]>
Commit | Line | Data |
---|---|---|
081ff398 HR |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
3 | * Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers | |
4 | * | |
5 | * Copyright 2017 Hannes Reinecke, SUSE Linux GmbH <[email protected]> | |
6 | * | |
7 | * Based on the original DAC960 driver, | |
8 | * Copyright 1998-2001 by Leonard N. Zubkoff <[email protected]> | |
9 | * Portions Copyright 2002 by Mylex (An IBM Business Unit) | |
10 | * | |
11 | */ | |
12 | ||
13 | #include <linux/module.h> | |
14 | #include <linux/types.h> | |
15 | #include <linux/delay.h> | |
16 | #include <linux/interrupt.h> | |
17 | #include <linux/pci.h> | |
18 | #include <linux/raid_class.h> | |
19 | #include <asm/unaligned.h> | |
20 | #include <scsi/scsi.h> | |
21 | #include <scsi/scsi_host.h> | |
22 | #include <scsi/scsi_device.h> | |
23 | #include <scsi/scsi_cmnd.h> | |
24 | #include <scsi/scsi_tcq.h> | |
25 | #include "myrb.h" | |
26 | ||
27 | static struct raid_template *myrb_raid_template; | |
28 | ||
29 | static void myrb_monitor(struct work_struct *work); | |
30 | static inline void myrb_translate_devstate(void *DeviceState); | |
31 | ||
32 | static inline int myrb_logical_channel(struct Scsi_Host *shost) | |
33 | { | |
34 | return shost->max_channel - 1; | |
35 | } | |
36 | ||
37 | static struct myrb_devstate_name_entry { | |
38 | enum myrb_devstate state; | |
39 | const char *name; | |
40 | } myrb_devstate_name_list[] = { | |
41 | { MYRB_DEVICE_DEAD, "Dead" }, | |
42 | { MYRB_DEVICE_WO, "WriteOnly" }, | |
43 | { MYRB_DEVICE_ONLINE, "Online" }, | |
44 | { MYRB_DEVICE_CRITICAL, "Critical" }, | |
45 | { MYRB_DEVICE_STANDBY, "Standby" }, | |
46 | { MYRB_DEVICE_OFFLINE, "Offline" }, | |
47 | }; | |
48 | ||
49 | static const char *myrb_devstate_name(enum myrb_devstate state) | |
50 | { | |
51 | struct myrb_devstate_name_entry *entry = myrb_devstate_name_list; | |
52 | int i; | |
53 | ||
54 | for (i = 0; i < ARRAY_SIZE(myrb_devstate_name_list); i++) { | |
55 | if (entry[i].state == state) | |
56 | return entry[i].name; | |
57 | } | |
58 | return "Unknown"; | |
59 | } | |
60 | ||
61 | static struct myrb_raidlevel_name_entry { | |
62 | enum myrb_raidlevel level; | |
63 | const char *name; | |
64 | } myrb_raidlevel_name_list[] = { | |
65 | { MYRB_RAID_LEVEL0, "RAID0" }, | |
66 | { MYRB_RAID_LEVEL1, "RAID1" }, | |
67 | { MYRB_RAID_LEVEL3, "RAID3" }, | |
68 | { MYRB_RAID_LEVEL5, "RAID5" }, | |
69 | { MYRB_RAID_LEVEL6, "RAID6" }, | |
70 | { MYRB_RAID_JBOD, "JBOD" }, | |
71 | }; | |
72 | ||
73 | static const char *myrb_raidlevel_name(enum myrb_raidlevel level) | |
74 | { | |
75 | struct myrb_raidlevel_name_entry *entry = myrb_raidlevel_name_list; | |
76 | int i; | |
77 | ||
78 | for (i = 0; i < ARRAY_SIZE(myrb_raidlevel_name_list); i++) { | |
79 | if (entry[i].level == level) | |
80 | return entry[i].name; | |
81 | } | |
82 | return NULL; | |
83 | } | |
84 | ||
85 | /** | |
86 | * myrb_create_mempools - allocates auxiliary data structures | |
87 | * | |
88 | * Return: true on success, false otherwise. | |
89 | */ | |
90 | static bool myrb_create_mempools(struct pci_dev *pdev, struct myrb_hba *cb) | |
91 | { | |
92 | size_t elem_size, elem_align; | |
93 | ||
94 | elem_align = sizeof(struct myrb_sge); | |
95 | elem_size = cb->host->sg_tablesize * elem_align; | |
96 | cb->sg_pool = dma_pool_create("myrb_sg", &pdev->dev, | |
97 | elem_size, elem_align, 0); | |
98 | if (cb->sg_pool == NULL) { | |
99 | shost_printk(KERN_ERR, cb->host, | |
100 | "Failed to allocate SG pool\n"); | |
101 | return false; | |
102 | } | |
103 | ||
104 | cb->dcdb_pool = dma_pool_create("myrb_dcdb", &pdev->dev, | |
105 | sizeof(struct myrb_dcdb), | |
106 | sizeof(unsigned int), 0); | |
107 | if (!cb->dcdb_pool) { | |
108 | dma_pool_destroy(cb->sg_pool); | |
109 | cb->sg_pool = NULL; | |
110 | shost_printk(KERN_ERR, cb->host, | |
111 | "Failed to allocate DCDB pool\n"); | |
112 | return false; | |
113 | } | |
114 | ||
115 | snprintf(cb->work_q_name, sizeof(cb->work_q_name), | |
116 | "myrb_wq_%d", cb->host->host_no); | |
117 | cb->work_q = create_singlethread_workqueue(cb->work_q_name); | |
118 | if (!cb->work_q) { | |
119 | dma_pool_destroy(cb->dcdb_pool); | |
120 | cb->dcdb_pool = NULL; | |
121 | dma_pool_destroy(cb->sg_pool); | |
122 | cb->sg_pool = NULL; | |
123 | shost_printk(KERN_ERR, cb->host, | |
124 | "Failed to create workqueue\n"); | |
125 | return false; | |
126 | } | |
127 | ||
128 | /* | |
129 | * Initialize the Monitoring Timer. | |
130 | */ | |
131 | INIT_DELAYED_WORK(&cb->monitor_work, myrb_monitor); | |
132 | queue_delayed_work(cb->work_q, &cb->monitor_work, 1); | |
133 | ||
134 | return true; | |
135 | } | |
136 | ||
137 | /** | |
138 | * myrb_destroy_mempools - tears down the memory pools for the controller | |
139 | */ | |
140 | static void myrb_destroy_mempools(struct myrb_hba *cb) | |
141 | { | |
142 | cancel_delayed_work_sync(&cb->monitor_work); | |
143 | destroy_workqueue(cb->work_q); | |
144 | ||
145 | dma_pool_destroy(cb->sg_pool); | |
146 | dma_pool_destroy(cb->dcdb_pool); | |
147 | } | |
148 | ||
149 | /** | |
150 | * myrb_reset_cmd - reset command block | |
151 | */ | |
152 | static inline void myrb_reset_cmd(struct myrb_cmdblk *cmd_blk) | |
153 | { | |
154 | union myrb_cmd_mbox *mbox = &cmd_blk->mbox; | |
155 | ||
156 | memset(mbox, 0, sizeof(union myrb_cmd_mbox)); | |
157 | cmd_blk->status = 0; | |
158 | } | |
159 | ||
160 | /** | |
161 | * myrb_qcmd - queues command block for execution | |
162 | */ | |
163 | static void myrb_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk) | |
164 | { | |
165 | void __iomem *base = cb->io_base; | |
166 | union myrb_cmd_mbox *mbox = &cmd_blk->mbox; | |
167 | union myrb_cmd_mbox *next_mbox = cb->next_cmd_mbox; | |
168 | ||
169 | cb->write_cmd_mbox(next_mbox, mbox); | |
170 | if (cb->prev_cmd_mbox1->words[0] == 0 || | |
171 | cb->prev_cmd_mbox2->words[0] == 0) | |
172 | cb->get_cmd_mbox(base); | |
173 | cb->prev_cmd_mbox2 = cb->prev_cmd_mbox1; | |
174 | cb->prev_cmd_mbox1 = next_mbox; | |
175 | if (++next_mbox > cb->last_cmd_mbox) | |
176 | next_mbox = cb->first_cmd_mbox; | |
177 | cb->next_cmd_mbox = next_mbox; | |
178 | } | |
179 | ||
180 | /** | |
181 | * myrb_exec_cmd - executes command block and waits for completion. | |
182 | * | |
183 | * Return: command status | |
184 | */ | |
185 | static unsigned short myrb_exec_cmd(struct myrb_hba *cb, | |
186 | struct myrb_cmdblk *cmd_blk) | |
187 | { | |
188 | DECLARE_COMPLETION_ONSTACK(cmpl); | |
189 | unsigned long flags; | |
190 | ||
191 | cmd_blk->completion = &cmpl; | |
192 | ||
193 | spin_lock_irqsave(&cb->queue_lock, flags); | |
194 | cb->qcmd(cb, cmd_blk); | |
195 | spin_unlock_irqrestore(&cb->queue_lock, flags); | |
196 | ||
197 | WARN_ON(in_interrupt()); | |
198 | wait_for_completion(&cmpl); | |
199 | return cmd_blk->status; | |
200 | } | |
201 | ||
202 | /** | |
203 | * myrb_exec_type3 - executes a type 3 command and waits for completion. | |
204 | * | |
205 | * Return: command status | |
206 | */ | |
207 | static unsigned short myrb_exec_type3(struct myrb_hba *cb, | |
208 | enum myrb_cmd_opcode op, dma_addr_t addr) | |
209 | { | |
210 | struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk; | |
211 | union myrb_cmd_mbox *mbox = &cmd_blk->mbox; | |
212 | unsigned short status; | |
213 | ||
214 | mutex_lock(&cb->dcmd_mutex); | |
215 | myrb_reset_cmd(cmd_blk); | |
216 | mbox->type3.id = MYRB_DCMD_TAG; | |
217 | mbox->type3.opcode = op; | |
218 | mbox->type3.addr = addr; | |
219 | status = myrb_exec_cmd(cb, cmd_blk); | |
220 | mutex_unlock(&cb->dcmd_mutex); | |
221 | return status; | |
222 | } | |
223 | ||
224 | /** | |
225 | * myrb_exec_type3D - executes a type 3D command and waits for completion. | |
226 | * | |
227 | * Return: command status | |
228 | */ | |
229 | static unsigned short myrb_exec_type3D(struct myrb_hba *cb, | |
230 | enum myrb_cmd_opcode op, struct scsi_device *sdev, | |
231 | struct myrb_pdev_state *pdev_info) | |
232 | { | |
233 | struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk; | |
234 | union myrb_cmd_mbox *mbox = &cmd_blk->mbox; | |
235 | unsigned short status; | |
236 | dma_addr_t pdev_info_addr; | |
237 | ||
238 | pdev_info_addr = dma_map_single(&cb->pdev->dev, pdev_info, | |
239 | sizeof(struct myrb_pdev_state), | |
240 | DMA_FROM_DEVICE); | |
241 | if (dma_mapping_error(&cb->pdev->dev, pdev_info_addr)) | |
242 | return MYRB_STATUS_SUBSYS_FAILED; | |
243 | ||
244 | mutex_lock(&cb->dcmd_mutex); | |
245 | myrb_reset_cmd(cmd_blk); | |
246 | mbox->type3D.id = MYRB_DCMD_TAG; | |
247 | mbox->type3D.opcode = op; | |
248 | mbox->type3D.channel = sdev->channel; | |
249 | mbox->type3D.target = sdev->id; | |
250 | mbox->type3D.addr = pdev_info_addr; | |
251 | status = myrb_exec_cmd(cb, cmd_blk); | |
252 | mutex_unlock(&cb->dcmd_mutex); | |
253 | dma_unmap_single(&cb->pdev->dev, pdev_info_addr, | |
254 | sizeof(struct myrb_pdev_state), DMA_FROM_DEVICE); | |
255 | if (status == MYRB_STATUS_SUCCESS && | |
256 | mbox->type3D.opcode == MYRB_CMD_GET_DEVICE_STATE_OLD) | |
257 | myrb_translate_devstate(pdev_info); | |
258 | ||
259 | return status; | |
260 | } | |
261 | ||
262 | static char *myrb_event_msg[] = { | |
263 | "killed because write recovery failed", | |
264 | "killed because of SCSI bus reset failure", | |
265 | "killed because of double check condition", | |
266 | "killed because it was removed", | |
267 | "killed because of gross error on SCSI chip", | |
268 | "killed because of bad tag returned from drive", | |
269 | "killed because of timeout on SCSI command", | |
270 | "killed because of reset SCSI command issued from system", | |
271 | "killed because busy or parity error count exceeded limit", | |
272 | "killed because of 'kill drive' command from system", | |
273 | "killed because of selection timeout", | |
274 | "killed due to SCSI phase sequence error", | |
275 | "killed due to unknown status", | |
276 | }; | |
277 | ||
278 | /** | |
279 | * myrb_get_event - get event log from HBA | |
280 | * @cb: pointer to the hba structure | |
281 | * @event: number of the event | |
282 | * | |
283 | * Execute a type 3E command and logs the event message | |
284 | */ | |
285 | static void myrb_get_event(struct myrb_hba *cb, unsigned int event) | |
286 | { | |
287 | struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk; | |
288 | union myrb_cmd_mbox *mbox = &cmd_blk->mbox; | |
289 | struct myrb_log_entry *ev_buf; | |
290 | dma_addr_t ev_addr; | |
291 | unsigned short status; | |
292 | ||
293 | ev_buf = dma_alloc_coherent(&cb->pdev->dev, | |
294 | sizeof(struct myrb_log_entry), | |
295 | &ev_addr, GFP_KERNEL); | |
296 | if (!ev_buf) | |
297 | return; | |
298 | ||
299 | myrb_reset_cmd(cmd_blk); | |
300 | mbox->type3E.id = MYRB_MCMD_TAG; | |
301 | mbox->type3E.opcode = MYRB_CMD_EVENT_LOG_OPERATION; | |
302 | mbox->type3E.optype = DAC960_V1_GetEventLogEntry; | |
303 | mbox->type3E.opqual = 1; | |
304 | mbox->type3E.ev_seq = event; | |
305 | mbox->type3E.addr = ev_addr; | |
306 | status = myrb_exec_cmd(cb, cmd_blk); | |
307 | if (status != MYRB_STATUS_SUCCESS) | |
308 | shost_printk(KERN_INFO, cb->host, | |
309 | "Failed to get event log %d, status %04x\n", | |
310 | event, status); | |
311 | ||
312 | else if (ev_buf->seq_num == event) { | |
313 | struct scsi_sense_hdr sshdr; | |
314 | ||
315 | memset(&sshdr, 0, sizeof(sshdr)); | |
316 | scsi_normalize_sense(ev_buf->sense, 32, &sshdr); | |
317 | ||
318 | if (sshdr.sense_key == VENDOR_SPECIFIC && | |
319 | sshdr.asc == 0x80 && | |
320 | sshdr.ascq < ARRAY_SIZE(myrb_event_msg)) | |
321 | shost_printk(KERN_CRIT, cb->host, | |
322 | "Physical drive %d:%d: %s\n", | |
323 | ev_buf->channel, ev_buf->target, | |
324 | myrb_event_msg[sshdr.ascq]); | |
325 | else | |
326 | shost_printk(KERN_CRIT, cb->host, | |
327 | "Physical drive %d:%d: Sense: %X/%02X/%02X\n", | |
328 | ev_buf->channel, ev_buf->target, | |
329 | sshdr.sense_key, sshdr.asc, sshdr.ascq); | |
330 | } | |
331 | ||
332 | dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_log_entry), | |
333 | ev_buf, ev_addr); | |
334 | } | |
335 | ||
336 | /** | |
337 | * myrb_get_errtable - retrieves the error table from the controller | |
338 | * | |
339 | * Executes a type 3 command and logs the error table from the controller. | |
340 | */ | |
341 | static void myrb_get_errtable(struct myrb_hba *cb) | |
342 | { | |
343 | struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk; | |
344 | union myrb_cmd_mbox *mbox = &cmd_blk->mbox; | |
345 | unsigned short status; | |
346 | struct myrb_error_entry old_table[MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS]; | |
347 | ||
348 | memcpy(&old_table, cb->err_table, sizeof(old_table)); | |
349 | ||
350 | myrb_reset_cmd(cmd_blk); | |
351 | mbox->type3.id = MYRB_MCMD_TAG; | |
352 | mbox->type3.opcode = MYRB_CMD_GET_ERROR_TABLE; | |
353 | mbox->type3.addr = cb->err_table_addr; | |
354 | status = myrb_exec_cmd(cb, cmd_blk); | |
355 | if (status == MYRB_STATUS_SUCCESS) { | |
356 | struct myrb_error_entry *table = cb->err_table; | |
357 | struct myrb_error_entry *new, *old; | |
358 | size_t err_table_offset; | |
359 | struct scsi_device *sdev; | |
360 | ||
361 | shost_for_each_device(sdev, cb->host) { | |
362 | if (sdev->channel >= myrb_logical_channel(cb->host)) | |
363 | continue; | |
364 | err_table_offset = sdev->channel * MYRB_MAX_TARGETS | |
365 | + sdev->id; | |
366 | new = table + err_table_offset; | |
367 | old = &old_table[err_table_offset]; | |
368 | if (new->parity_err == old->parity_err && | |
369 | new->soft_err == old->soft_err && | |
370 | new->hard_err == old->hard_err && | |
371 | new->misc_err == old->misc_err) | |
372 | continue; | |
373 | sdev_printk(KERN_CRIT, sdev, | |
374 | "Errors: Parity = %d, Soft = %d, Hard = %d, Misc = %d\n", | |
375 | new->parity_err, new->soft_err, | |
376 | new->hard_err, new->misc_err); | |
377 | } | |
378 | } | |
379 | } | |
380 | ||
381 | /** | |
382 | * myrb_get_ldev_info - retrieves the logical device table from the controller | |
383 | * | |
384 | * Executes a type 3 command and updates the logical device table. | |
385 | * | |
386 | * Return: command status | |
387 | */ | |
388 | static unsigned short myrb_get_ldev_info(struct myrb_hba *cb) | |
389 | { | |
390 | unsigned short status; | |
391 | int ldev_num, ldev_cnt = cb->enquiry->ldev_count; | |
392 | struct Scsi_Host *shost = cb->host; | |
393 | ||
394 | status = myrb_exec_type3(cb, MYRB_CMD_GET_LDEV_INFO, | |
395 | cb->ldev_info_addr); | |
396 | if (status != MYRB_STATUS_SUCCESS) | |
397 | return status; | |
398 | ||
399 | for (ldev_num = 0; ldev_num < ldev_cnt; ldev_num++) { | |
400 | struct myrb_ldev_info *old = NULL; | |
401 | struct myrb_ldev_info *new = cb->ldev_info_buf + ldev_num; | |
402 | struct scsi_device *sdev; | |
403 | ||
404 | sdev = scsi_device_lookup(shost, myrb_logical_channel(shost), | |
405 | ldev_num, 0); | |
406 | if (!sdev) { | |
407 | if (new->state == MYRB_DEVICE_OFFLINE) | |
408 | continue; | |
409 | shost_printk(KERN_INFO, shost, | |
410 | "Adding Logical Drive %d in state %s\n", | |
411 | ldev_num, myrb_devstate_name(new->state)); | |
412 | scsi_add_device(shost, myrb_logical_channel(shost), | |
413 | ldev_num, 0); | |
414 | continue; | |
415 | } | |
416 | old = sdev->hostdata; | |
417 | if (new->state != old->state) | |
418 | shost_printk(KERN_INFO, shost, | |
419 | "Logical Drive %d is now %s\n", | |
420 | ldev_num, myrb_devstate_name(new->state)); | |
421 | if (new->wb_enabled != old->wb_enabled) | |
422 | sdev_printk(KERN_INFO, sdev, | |
423 | "Logical Drive is now WRITE %s\n", | |
424 | (new->wb_enabled ? "BACK" : "THRU")); | |
425 | memcpy(old, new, sizeof(*new)); | |
426 | scsi_device_put(sdev); | |
427 | } | |
428 | return status; | |
429 | } | |
430 | ||
431 | /** | |
432 | * myrb_get_rbld_progress - get rebuild progress information | |
433 | * | |
434 | * Executes a type 3 command and returns the rebuild progress | |
435 | * information. | |
436 | * | |
437 | * Return: command status | |
438 | */ | |
439 | static unsigned short myrb_get_rbld_progress(struct myrb_hba *cb, | |
440 | struct myrb_rbld_progress *rbld) | |
441 | { | |
442 | struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk; | |
443 | union myrb_cmd_mbox *mbox = &cmd_blk->mbox; | |
444 | struct myrb_rbld_progress *rbld_buf; | |
445 | dma_addr_t rbld_addr; | |
446 | unsigned short status; | |
447 | ||
448 | rbld_buf = dma_alloc_coherent(&cb->pdev->dev, | |
449 | sizeof(struct myrb_rbld_progress), | |
450 | &rbld_addr, GFP_KERNEL); | |
451 | if (!rbld_buf) | |
452 | return MYRB_STATUS_RBLD_NOT_CHECKED; | |
453 | ||
454 | myrb_reset_cmd(cmd_blk); | |
455 | mbox->type3.id = MYRB_MCMD_TAG; | |
456 | mbox->type3.opcode = MYRB_CMD_GET_REBUILD_PROGRESS; | |
457 | mbox->type3.addr = rbld_addr; | |
458 | status = myrb_exec_cmd(cb, cmd_blk); | |
459 | if (rbld) | |
460 | memcpy(rbld, rbld_buf, sizeof(struct myrb_rbld_progress)); | |
461 | dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_rbld_progress), | |
462 | rbld_buf, rbld_addr); | |
463 | return status; | |
464 | } | |
465 | ||
466 | /** | |
467 | * myrb_update_rbld_progress - updates the rebuild status | |
468 | * | |
469 | * Updates the rebuild status for the attached logical devices. | |
470 | * | |
471 | */ | |
472 | static void myrb_update_rbld_progress(struct myrb_hba *cb) | |
473 | { | |
474 | struct myrb_rbld_progress rbld_buf; | |
475 | unsigned short status; | |
476 | ||
477 | status = myrb_get_rbld_progress(cb, &rbld_buf); | |
478 | if (status == MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS && | |
479 | cb->last_rbld_status == MYRB_STATUS_SUCCESS) | |
480 | status = MYRB_STATUS_RBLD_SUCCESS; | |
481 | if (status != MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS) { | |
482 | unsigned int blocks_done = | |
483 | rbld_buf.ldev_size - rbld_buf.blocks_left; | |
484 | struct scsi_device *sdev; | |
485 | ||
486 | sdev = scsi_device_lookup(cb->host, | |
487 | myrb_logical_channel(cb->host), | |
488 | rbld_buf.ldev_num, 0); | |
489 | if (!sdev) | |
490 | return; | |
491 | ||
492 | switch (status) { | |
493 | case MYRB_STATUS_SUCCESS: | |
494 | sdev_printk(KERN_INFO, sdev, | |
495 | "Rebuild in Progress, %d%% completed\n", | |
496 | (100 * (blocks_done >> 7)) | |
497 | / (rbld_buf.ldev_size >> 7)); | |
498 | break; | |
499 | case MYRB_STATUS_RBLD_FAILED_LDEV_FAILURE: | |
500 | sdev_printk(KERN_INFO, sdev, | |
501 | "Rebuild Failed due to Logical Drive Failure\n"); | |
502 | break; | |
503 | case MYRB_STATUS_RBLD_FAILED_BADBLOCKS: | |
504 | sdev_printk(KERN_INFO, sdev, | |
505 | "Rebuild Failed due to Bad Blocks on Other Drives\n"); | |
506 | break; | |
507 | case MYRB_STATUS_RBLD_FAILED_NEW_DRIVE_FAILED: | |
508 | sdev_printk(KERN_INFO, sdev, | |
509 | "Rebuild Failed due to Failure of Drive Being Rebuilt\n"); | |
510 | break; | |
511 | case MYRB_STATUS_RBLD_SUCCESS: | |
512 | sdev_printk(KERN_INFO, sdev, | |
513 | "Rebuild Completed Successfully\n"); | |
514 | break; | |
515 | case MYRB_STATUS_RBLD_SUCCESS_TERMINATED: | |
516 | sdev_printk(KERN_INFO, sdev, | |
517 | "Rebuild Successfully Terminated\n"); | |
518 | break; | |
519 | default: | |
520 | break; | |
521 | } | |
522 | scsi_device_put(sdev); | |
523 | } | |
524 | cb->last_rbld_status = status; | |
525 | } | |
526 | ||
527 | /** | |
528 | * myrb_get_cc_progress - retrieve the rebuild status | |
529 | * | |
530 | * Execute a type 3 Command and fetch the rebuild / consistency check | |
531 | * status. | |
532 | */ | |
533 | static void myrb_get_cc_progress(struct myrb_hba *cb) | |
534 | { | |
535 | struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk; | |
536 | union myrb_cmd_mbox *mbox = &cmd_blk->mbox; | |
537 | struct myrb_rbld_progress *rbld_buf; | |
538 | dma_addr_t rbld_addr; | |
539 | unsigned short status; | |
540 | ||
541 | rbld_buf = dma_alloc_coherent(&cb->pdev->dev, | |
542 | sizeof(struct myrb_rbld_progress), | |
543 | &rbld_addr, GFP_KERNEL); | |
544 | if (!rbld_buf) { | |
545 | cb->need_cc_status = true; | |
546 | return; | |
547 | } | |
548 | myrb_reset_cmd(cmd_blk); | |
549 | mbox->type3.id = MYRB_MCMD_TAG; | |
550 | mbox->type3.opcode = MYRB_CMD_REBUILD_STAT; | |
551 | mbox->type3.addr = rbld_addr; | |
552 | status = myrb_exec_cmd(cb, cmd_blk); | |
553 | if (status == MYRB_STATUS_SUCCESS) { | |
554 | unsigned int ldev_num = rbld_buf->ldev_num; | |
555 | unsigned int ldev_size = rbld_buf->ldev_size; | |
556 | unsigned int blocks_done = | |
557 | ldev_size - rbld_buf->blocks_left; | |
558 | struct scsi_device *sdev; | |
559 | ||
560 | sdev = scsi_device_lookup(cb->host, | |
561 | myrb_logical_channel(cb->host), | |
562 | ldev_num, 0); | |
563 | if (sdev) { | |
564 | sdev_printk(KERN_INFO, sdev, | |
565 | "Consistency Check in Progress: %d%% completed\n", | |
566 | (100 * (blocks_done >> 7)) | |
567 | / (ldev_size >> 7)); | |
568 | scsi_device_put(sdev); | |
569 | } | |
570 | } | |
571 | dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_rbld_progress), | |
572 | rbld_buf, rbld_addr); | |
573 | } | |
574 | ||
575 | /** | |
576 | * myrb_bgi_control - updates background initialisation status | |
577 | * | |
578 | * Executes a type 3B command and updates the background initialisation status | |
579 | */ | |
580 | static void myrb_bgi_control(struct myrb_hba *cb) | |
581 | { | |
582 | struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk; | |
583 | union myrb_cmd_mbox *mbox = &cmd_blk->mbox; | |
584 | struct myrb_bgi_status *bgi, *last_bgi; | |
585 | dma_addr_t bgi_addr; | |
586 | struct scsi_device *sdev = NULL; | |
587 | unsigned short status; | |
588 | ||
589 | bgi = dma_alloc_coherent(&cb->pdev->dev, sizeof(struct myrb_bgi_status), | |
590 | &bgi_addr, GFP_KERNEL); | |
591 | if (!bgi) { | |
592 | shost_printk(KERN_ERR, cb->host, | |
593 | "Failed to allocate bgi memory\n"); | |
594 | return; | |
595 | } | |
596 | myrb_reset_cmd(cmd_blk); | |
597 | mbox->type3B.id = MYRB_DCMD_TAG; | |
598 | mbox->type3B.opcode = MYRB_CMD_BGI_CONTROL; | |
599 | mbox->type3B.optype = 0x20; | |
600 | mbox->type3B.addr = bgi_addr; | |
601 | status = myrb_exec_cmd(cb, cmd_blk); | |
602 | last_bgi = &cb->bgi_status; | |
603 | sdev = scsi_device_lookup(cb->host, | |
604 | myrb_logical_channel(cb->host), | |
605 | bgi->ldev_num, 0); | |
606 | switch (status) { | |
607 | case MYRB_STATUS_SUCCESS: | |
608 | switch (bgi->status) { | |
609 | case MYRB_BGI_INVALID: | |
610 | break; | |
611 | case MYRB_BGI_STARTED: | |
612 | if (!sdev) | |
613 | break; | |
614 | sdev_printk(KERN_INFO, sdev, | |
615 | "Background Initialization Started\n"); | |
616 | break; | |
617 | case MYRB_BGI_INPROGRESS: | |
618 | if (!sdev) | |
619 | break; | |
620 | if (bgi->blocks_done == last_bgi->blocks_done && | |
621 | bgi->ldev_num == last_bgi->ldev_num) | |
622 | break; | |
623 | sdev_printk(KERN_INFO, sdev, | |
624 | "Background Initialization in Progress: %d%% completed\n", | |
625 | (100 * (bgi->blocks_done >> 7)) | |
626 | / (bgi->ldev_size >> 7)); | |
627 | break; | |
628 | case MYRB_BGI_SUSPENDED: | |
629 | if (!sdev) | |
630 | break; | |
631 | sdev_printk(KERN_INFO, sdev, | |
632 | "Background Initialization Suspended\n"); | |
633 | break; | |
634 | case MYRB_BGI_CANCELLED: | |
635 | if (!sdev) | |
636 | break; | |
637 | sdev_printk(KERN_INFO, sdev, | |
638 | "Background Initialization Cancelled\n"); | |
639 | break; | |
640 | } | |
641 | memcpy(&cb->bgi_status, bgi, sizeof(struct myrb_bgi_status)); | |
642 | break; | |
643 | case MYRB_STATUS_BGI_SUCCESS: | |
644 | if (sdev && cb->bgi_status.status == MYRB_BGI_INPROGRESS) | |
645 | sdev_printk(KERN_INFO, sdev, | |
646 | "Background Initialization Completed Successfully\n"); | |
647 | cb->bgi_status.status = MYRB_BGI_INVALID; | |
648 | break; | |
649 | case MYRB_STATUS_BGI_ABORTED: | |
650 | if (sdev && cb->bgi_status.status == MYRB_BGI_INPROGRESS) | |
651 | sdev_printk(KERN_INFO, sdev, | |
652 | "Background Initialization Aborted\n"); | |
653 | /* Fallthrough */ | |
654 | case MYRB_STATUS_NO_BGI_INPROGRESS: | |
655 | cb->bgi_status.status = MYRB_BGI_INVALID; | |
656 | break; | |
657 | } | |
658 | if (sdev) | |
659 | scsi_device_put(sdev); | |
660 | dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_bgi_status), | |
661 | bgi, bgi_addr); | |
662 | } | |
663 | ||
664 | /** | |
665 | * myrb_hba_enquiry - updates the controller status | |
666 | * | |
667 | * Executes a DAC_V1_Enquiry command and updates the controller status. | |
668 | * | |
669 | * Return: command status | |
670 | */ | |
671 | static unsigned short myrb_hba_enquiry(struct myrb_hba *cb) | |
672 | { | |
673 | struct myrb_enquiry old, *new; | |
674 | unsigned short status; | |
675 | ||
676 | memcpy(&old, cb->enquiry, sizeof(struct myrb_enquiry)); | |
677 | ||
678 | status = myrb_exec_type3(cb, MYRB_CMD_ENQUIRY, cb->enquiry_addr); | |
679 | if (status != MYRB_STATUS_SUCCESS) | |
680 | return status; | |
681 | ||
682 | new = cb->enquiry; | |
683 | if (new->ldev_count > old.ldev_count) { | |
684 | int ldev_num = old.ldev_count - 1; | |
685 | ||
686 | while (++ldev_num < new->ldev_count) | |
687 | shost_printk(KERN_CRIT, cb->host, | |
688 | "Logical Drive %d Now Exists\n", | |
689 | ldev_num); | |
690 | } | |
691 | if (new->ldev_count < old.ldev_count) { | |
692 | int ldev_num = new->ldev_count - 1; | |
693 | ||
694 | while (++ldev_num < old.ldev_count) | |
695 | shost_printk(KERN_CRIT, cb->host, | |
696 | "Logical Drive %d No Longer Exists\n", | |
697 | ldev_num); | |
698 | } | |
699 | if (new->status.deferred != old.status.deferred) | |
700 | shost_printk(KERN_CRIT, cb->host, | |
701 | "Deferred Write Error Flag is now %s\n", | |
702 | (new->status.deferred ? "TRUE" : "FALSE")); | |
703 | if (new->ev_seq != old.ev_seq) { | |
704 | cb->new_ev_seq = new->ev_seq; | |
705 | cb->need_err_info = true; | |
706 | shost_printk(KERN_INFO, cb->host, | |
707 | "Event log %d/%d (%d/%d) available\n", | |
708 | cb->old_ev_seq, cb->new_ev_seq, | |
709 | old.ev_seq, new->ev_seq); | |
710 | } | |
711 | if ((new->ldev_critical > 0 && | |
712 | new->ldev_critical != old.ldev_critical) || | |
713 | (new->ldev_offline > 0 && | |
714 | new->ldev_offline != old.ldev_offline) || | |
715 | (new->ldev_count != old.ldev_count)) { | |
716 | shost_printk(KERN_INFO, cb->host, | |
717 | "Logical drive count changed (%d/%d/%d)\n", | |
718 | new->ldev_critical, | |
719 | new->ldev_offline, | |
720 | new->ldev_count); | |
721 | cb->need_ldev_info = true; | |
722 | } | |
723 | if (new->pdev_dead > 0 || | |
724 | new->pdev_dead != old.pdev_dead || | |
725 | time_after_eq(jiffies, cb->secondary_monitor_time | |
726 | + MYRB_SECONDARY_MONITOR_INTERVAL)) { | |
727 | cb->need_bgi_status = cb->bgi_status_supported; | |
728 | cb->secondary_monitor_time = jiffies; | |
729 | } | |
730 | if (new->rbld == MYRB_STDBY_RBLD_IN_PROGRESS || | |
731 | new->rbld == MYRB_BG_RBLD_IN_PROGRESS || | |
732 | old.rbld == MYRB_STDBY_RBLD_IN_PROGRESS || | |
733 | old.rbld == MYRB_BG_RBLD_IN_PROGRESS) { | |
734 | cb->need_rbld = true; | |
735 | cb->rbld_first = (new->ldev_critical < old.ldev_critical); | |
736 | } | |
737 | if (old.rbld == MYRB_BG_CHECK_IN_PROGRESS) | |
738 | switch (new->rbld) { | |
739 | case MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS: | |
740 | shost_printk(KERN_INFO, cb->host, | |
741 | "Consistency Check Completed Successfully\n"); | |
742 | break; | |
743 | case MYRB_STDBY_RBLD_IN_PROGRESS: | |
744 | case MYRB_BG_RBLD_IN_PROGRESS: | |
745 | break; | |
746 | case MYRB_BG_CHECK_IN_PROGRESS: | |
747 | cb->need_cc_status = true; | |
748 | break; | |
749 | case MYRB_STDBY_RBLD_COMPLETED_WITH_ERROR: | |
750 | shost_printk(KERN_INFO, cb->host, | |
751 | "Consistency Check Completed with Error\n"); | |
752 | break; | |
753 | case MYRB_BG_RBLD_OR_CHECK_FAILED_DRIVE_FAILED: | |
754 | shost_printk(KERN_INFO, cb->host, | |
755 | "Consistency Check Failed - Physical Device Failed\n"); | |
756 | break; | |
757 | case MYRB_BG_RBLD_OR_CHECK_FAILED_LDEV_FAILED: | |
758 | shost_printk(KERN_INFO, cb->host, | |
759 | "Consistency Check Failed - Logical Drive Failed\n"); | |
760 | break; | |
761 | case MYRB_BG_RBLD_OR_CHECK_FAILED_OTHER: | |
762 | shost_printk(KERN_INFO, cb->host, | |
763 | "Consistency Check Failed - Other Causes\n"); | |
764 | break; | |
765 | case MYRB_BG_RBLD_OR_CHECK_SUCCESS_TERMINATED: | |
766 | shost_printk(KERN_INFO, cb->host, | |
767 | "Consistency Check Successfully Terminated\n"); | |
768 | break; | |
769 | } | |
770 | else if (new->rbld == MYRB_BG_CHECK_IN_PROGRESS) | |
771 | cb->need_cc_status = true; | |
772 | ||
773 | return MYRB_STATUS_SUCCESS; | |
774 | } | |
775 | ||
776 | /** | |
777 | * myrb_set_pdev_state - sets the device state for a physical device | |
778 | * | |
779 | * Return: command status | |
780 | */ | |
781 | static unsigned short myrb_set_pdev_state(struct myrb_hba *cb, | |
782 | struct scsi_device *sdev, enum myrb_devstate state) | |
783 | { | |
784 | struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk; | |
785 | union myrb_cmd_mbox *mbox = &cmd_blk->mbox; | |
786 | unsigned short status; | |
787 | ||
788 | mutex_lock(&cb->dcmd_mutex); | |
789 | mbox->type3D.opcode = MYRB_CMD_START_DEVICE; | |
790 | mbox->type3D.id = MYRB_DCMD_TAG; | |
791 | mbox->type3D.channel = sdev->channel; | |
792 | mbox->type3D.target = sdev->id; | |
793 | mbox->type3D.state = state & 0x1F; | |
794 | status = myrb_exec_cmd(cb, cmd_blk); | |
795 | mutex_unlock(&cb->dcmd_mutex); | |
796 | ||
797 | return status; | |
798 | } | |
799 | ||
800 | /** | |
801 | * myrb_enable_mmio - enables the Memory Mailbox Interface | |
802 | * | |
803 | * PD and P controller types have no memory mailbox, but still need the | |
804 | * other dma mapped memory. | |
805 | * | |
806 | * Return: true on success, false otherwise. | |
807 | */ | |
808 | static bool myrb_enable_mmio(struct myrb_hba *cb, mbox_mmio_init_t mmio_init_fn) | |
809 | { | |
810 | void __iomem *base = cb->io_base; | |
811 | struct pci_dev *pdev = cb->pdev; | |
812 | size_t err_table_size; | |
813 | size_t ldev_info_size; | |
814 | union myrb_cmd_mbox *cmd_mbox_mem; | |
815 | struct myrb_stat_mbox *stat_mbox_mem; | |
816 | union myrb_cmd_mbox mbox; | |
817 | unsigned short status; | |
818 | ||
819 | memset(&mbox, 0, sizeof(union myrb_cmd_mbox)); | |
820 | ||
821 | if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) { | |
822 | dev_err(&pdev->dev, "DMA mask out of range\n"); | |
823 | return false; | |
824 | } | |
825 | ||
826 | cb->enquiry = dma_alloc_coherent(&pdev->dev, | |
827 | sizeof(struct myrb_enquiry), | |
828 | &cb->enquiry_addr, GFP_KERNEL); | |
829 | if (!cb->enquiry) | |
830 | return false; | |
831 | ||
832 | err_table_size = sizeof(struct myrb_error_entry) * | |
833 | MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS; | |
834 | cb->err_table = dma_alloc_coherent(&pdev->dev, err_table_size, | |
835 | &cb->err_table_addr, GFP_KERNEL); | |
836 | if (!cb->err_table) | |
837 | return false; | |
838 | ||
839 | ldev_info_size = sizeof(struct myrb_ldev_info) * MYRB_MAX_LDEVS; | |
840 | cb->ldev_info_buf = dma_alloc_coherent(&pdev->dev, ldev_info_size, | |
841 | &cb->ldev_info_addr, GFP_KERNEL); | |
842 | if (!cb->ldev_info_buf) | |
843 | return false; | |
844 | ||
845 | /* | |
846 | * Skip mailbox initialisation for PD and P Controllers | |
847 | */ | |
848 | if (!mmio_init_fn) | |
849 | return true; | |
850 | ||
851 | /* These are the base addresses for the command memory mailbox array */ | |
852 | cb->cmd_mbox_size = MYRB_CMD_MBOX_COUNT * sizeof(union myrb_cmd_mbox); | |
853 | cb->first_cmd_mbox = dma_alloc_coherent(&pdev->dev, | |
854 | cb->cmd_mbox_size, | |
855 | &cb->cmd_mbox_addr, | |
856 | GFP_KERNEL); | |
857 | if (!cb->first_cmd_mbox) | |
858 | return false; | |
859 | ||
860 | cmd_mbox_mem = cb->first_cmd_mbox; | |
861 | cmd_mbox_mem += MYRB_CMD_MBOX_COUNT - 1; | |
862 | cb->last_cmd_mbox = cmd_mbox_mem; | |
863 | cb->next_cmd_mbox = cb->first_cmd_mbox; | |
864 | cb->prev_cmd_mbox1 = cb->last_cmd_mbox; | |
865 | cb->prev_cmd_mbox2 = cb->last_cmd_mbox - 1; | |
866 | ||
867 | /* These are the base addresses for the status memory mailbox array */ | |
868 | cb->stat_mbox_size = MYRB_STAT_MBOX_COUNT * | |
869 | sizeof(struct myrb_stat_mbox); | |
870 | cb->first_stat_mbox = dma_alloc_coherent(&pdev->dev, | |
871 | cb->stat_mbox_size, | |
872 | &cb->stat_mbox_addr, | |
873 | GFP_KERNEL); | |
874 | if (!cb->first_stat_mbox) | |
875 | return false; | |
876 | ||
877 | stat_mbox_mem = cb->first_stat_mbox; | |
878 | stat_mbox_mem += MYRB_STAT_MBOX_COUNT - 1; | |
879 | cb->last_stat_mbox = stat_mbox_mem; | |
880 | cb->next_stat_mbox = cb->first_stat_mbox; | |
881 | ||
882 | /* Enable the Memory Mailbox Interface. */ | |
883 | cb->dual_mode_interface = true; | |
884 | mbox.typeX.opcode = 0x2B; | |
885 | mbox.typeX.id = 0; | |
886 | mbox.typeX.opcode2 = 0x14; | |
887 | mbox.typeX.cmd_mbox_addr = cb->cmd_mbox_addr; | |
888 | mbox.typeX.stat_mbox_addr = cb->stat_mbox_addr; | |
889 | ||
890 | status = mmio_init_fn(pdev, base, &mbox); | |
891 | if (status != MYRB_STATUS_SUCCESS) { | |
892 | cb->dual_mode_interface = false; | |
893 | mbox.typeX.opcode2 = 0x10; | |
894 | status = mmio_init_fn(pdev, base, &mbox); | |
895 | if (status != MYRB_STATUS_SUCCESS) { | |
896 | dev_err(&pdev->dev, | |
897 | "Failed to enable mailbox, statux %02X\n", | |
898 | status); | |
899 | return false; | |
900 | } | |
901 | } | |
902 | return true; | |
903 | } | |
904 | ||
905 | /** | |
906 | * myrb_get_hba_config - reads the configuration information | |
907 | * | |
908 | * Reads the configuration information from the controller and | |
909 | * initializes the controller structure. | |
910 | * | |
911 | * Return: 0 on success, errno otherwise | |
912 | */ | |
913 | static int myrb_get_hba_config(struct myrb_hba *cb) | |
914 | { | |
915 | struct myrb_enquiry2 *enquiry2; | |
916 | dma_addr_t enquiry2_addr; | |
917 | struct myrb_config2 *config2; | |
918 | dma_addr_t config2_addr; | |
919 | struct Scsi_Host *shost = cb->host; | |
920 | struct pci_dev *pdev = cb->pdev; | |
921 | int pchan_max = 0, pchan_cur = 0; | |
922 | unsigned short status; | |
923 | int ret = -ENODEV, memsize = 0; | |
924 | ||
925 | enquiry2 = dma_alloc_coherent(&pdev->dev, sizeof(struct myrb_enquiry2), | |
926 | &enquiry2_addr, GFP_KERNEL); | |
927 | if (!enquiry2) { | |
928 | shost_printk(KERN_ERR, cb->host, | |
929 | "Failed to allocate V1 enquiry2 memory\n"); | |
930 | return -ENOMEM; | |
931 | } | |
932 | config2 = dma_alloc_coherent(&pdev->dev, sizeof(struct myrb_config2), | |
933 | &config2_addr, GFP_KERNEL); | |
934 | if (!config2) { | |
935 | shost_printk(KERN_ERR, cb->host, | |
936 | "Failed to allocate V1 config2 memory\n"); | |
937 | dma_free_coherent(&pdev->dev, sizeof(struct myrb_enquiry2), | |
938 | enquiry2, enquiry2_addr); | |
939 | return -ENOMEM; | |
940 | } | |
941 | mutex_lock(&cb->dma_mutex); | |
942 | status = myrb_hba_enquiry(cb); | |
943 | mutex_unlock(&cb->dma_mutex); | |
944 | if (status != MYRB_STATUS_SUCCESS) { | |
945 | shost_printk(KERN_WARNING, cb->host, | |
946 | "Failed it issue V1 Enquiry\n"); | |
947 | goto out_free; | |
948 | } | |
949 | ||
950 | status = myrb_exec_type3(cb, MYRB_CMD_ENQUIRY2, enquiry2_addr); | |
951 | if (status != MYRB_STATUS_SUCCESS) { | |
952 | shost_printk(KERN_WARNING, cb->host, | |
953 | "Failed to issue V1 Enquiry2\n"); | |
954 | goto out_free; | |
955 | } | |
956 | ||
957 | status = myrb_exec_type3(cb, MYRB_CMD_READ_CONFIG2, config2_addr); | |
958 | if (status != MYRB_STATUS_SUCCESS) { | |
959 | shost_printk(KERN_WARNING, cb->host, | |
960 | "Failed to issue ReadConfig2\n"); | |
961 | goto out_free; | |
962 | } | |
963 | ||
964 | status = myrb_get_ldev_info(cb); | |
965 | if (status != MYRB_STATUS_SUCCESS) { | |
966 | shost_printk(KERN_WARNING, cb->host, | |
967 | "Failed to get logical drive information\n"); | |
968 | goto out_free; | |
969 | } | |
970 | ||
971 | /* | |
972 | * Initialize the Controller Model Name and Full Model Name fields. | |
973 | */ | |
974 | switch (enquiry2->hw.sub_model) { | |
975 | case DAC960_V1_P_PD_PU: | |
976 | if (enquiry2->scsi_cap.bus_speed == MYRB_SCSI_SPEED_ULTRA) | |
977 | strcpy(cb->model_name, "DAC960PU"); | |
978 | else | |
979 | strcpy(cb->model_name, "DAC960PD"); | |
980 | break; | |
981 | case DAC960_V1_PL: | |
982 | strcpy(cb->model_name, "DAC960PL"); | |
983 | break; | |
984 | case DAC960_V1_PG: | |
985 | strcpy(cb->model_name, "DAC960PG"); | |
986 | break; | |
987 | case DAC960_V1_PJ: | |
988 | strcpy(cb->model_name, "DAC960PJ"); | |
989 | break; | |
990 | case DAC960_V1_PR: | |
991 | strcpy(cb->model_name, "DAC960PR"); | |
992 | break; | |
993 | case DAC960_V1_PT: | |
994 | strcpy(cb->model_name, "DAC960PT"); | |
995 | break; | |
996 | case DAC960_V1_PTL0: | |
997 | strcpy(cb->model_name, "DAC960PTL0"); | |
998 | break; | |
999 | case DAC960_V1_PRL: | |
1000 | strcpy(cb->model_name, "DAC960PRL"); | |
1001 | break; | |
1002 | case DAC960_V1_PTL1: | |
1003 | strcpy(cb->model_name, "DAC960PTL1"); | |
1004 | break; | |
1005 | case DAC960_V1_1164P: | |
1006 | strcpy(cb->model_name, "eXtremeRAID 1100"); | |
1007 | break; | |
1008 | default: | |
1009 | shost_printk(KERN_WARNING, cb->host, | |
1010 | "Unknown Model %X\n", | |
1011 | enquiry2->hw.sub_model); | |
1012 | goto out; | |
1013 | } | |
1014 | /* | |
1015 | * Initialize the Controller Firmware Version field and verify that it | |
1016 | * is a supported firmware version. | |
1017 | * The supported firmware versions are: | |
1018 | * | |
1019 | * DAC1164P 5.06 and above | |
1020 | * DAC960PTL/PRL/PJ/PG 4.06 and above | |
1021 | * DAC960PU/PD/PL 3.51 and above | |
1022 | * DAC960PU/PD/PL/P 2.73 and above | |
1023 | */ | |
1024 | #if defined(CONFIG_ALPHA) | |
1025 | /* | |
1026 | * DEC Alpha machines were often equipped with DAC960 cards that were | |
1027 | * OEMed from Mylex, and had their own custom firmware. Version 2.70, | |
1028 | * the last custom FW revision to be released by DEC for these older | |
1029 | * controllers, appears to work quite well with this driver. | |
1030 | * | |
1031 | * Cards tested successfully were several versions each of the PD and | |
1032 | * PU, called by DEC the KZPSC and KZPAC, respectively, and having | |
1033 | * the Manufacturer Numbers (from Mylex), usually on a sticker on the | |
1034 | * back of the board, of: | |
1035 | * | |
1036 | * KZPSC: D040347 (1-channel) or D040348 (2-channel) | |
1037 | * or D040349 (3-channel) | |
1038 | * KZPAC: D040395 (1-channel) or D040396 (2-channel) | |
1039 | * or D040397 (3-channel) | |
1040 | */ | |
1041 | # define FIRMWARE_27X "2.70" | |
1042 | #else | |
1043 | # define FIRMWARE_27X "2.73" | |
1044 | #endif | |
1045 | ||
1046 | if (enquiry2->fw.major_version == 0) { | |
1047 | enquiry2->fw.major_version = cb->enquiry->fw_major_version; | |
1048 | enquiry2->fw.minor_version = cb->enquiry->fw_minor_version; | |
1049 | enquiry2->fw.firmware_type = '0'; | |
1050 | enquiry2->fw.turn_id = 0; | |
1051 | } | |
f8f4adc1 AB |
1052 | snprintf(cb->fw_version, sizeof(cb->fw_version), |
1053 | "%d.%02d-%c-%02d", | |
081ff398 HR |
1054 | enquiry2->fw.major_version, |
1055 | enquiry2->fw.minor_version, | |
1056 | enquiry2->fw.firmware_type, | |
1057 | enquiry2->fw.turn_id); | |
1058 | if (!((enquiry2->fw.major_version == 5 && | |
1059 | enquiry2->fw.minor_version >= 6) || | |
1060 | (enquiry2->fw.major_version == 4 && | |
1061 | enquiry2->fw.minor_version >= 6) || | |
1062 | (enquiry2->fw.major_version == 3 && | |
1063 | enquiry2->fw.minor_version >= 51) || | |
1064 | (enquiry2->fw.major_version == 2 && | |
1065 | strcmp(cb->fw_version, FIRMWARE_27X) >= 0))) { | |
1066 | shost_printk(KERN_WARNING, cb->host, | |
1067 | "Firmware Version '%s' unsupported\n", | |
1068 | cb->fw_version); | |
1069 | goto out; | |
1070 | } | |
1071 | /* | |
1072 | * Initialize the Channels, Targets, Memory Size, and SAF-TE | |
1073 | * Enclosure Management Enabled fields. | |
1074 | */ | |
1075 | switch (enquiry2->hw.model) { | |
1076 | case MYRB_5_CHANNEL_BOARD: | |
1077 | pchan_max = 5; | |
1078 | break; | |
1079 | case MYRB_3_CHANNEL_BOARD: | |
1080 | case MYRB_3_CHANNEL_ASIC_DAC: | |
1081 | pchan_max = 3; | |
1082 | break; | |
1083 | case MYRB_2_CHANNEL_BOARD: | |
1084 | pchan_max = 2; | |
1085 | break; | |
1086 | default: | |
1087 | pchan_max = enquiry2->cfg_chan; | |
1088 | break; | |
1089 | } | |
1090 | pchan_cur = enquiry2->cur_chan; | |
1091 | if (enquiry2->scsi_cap.bus_width == MYRB_WIDTH_WIDE_32BIT) | |
1092 | cb->bus_width = 32; | |
1093 | else if (enquiry2->scsi_cap.bus_width == MYRB_WIDTH_WIDE_16BIT) | |
1094 | cb->bus_width = 16; | |
1095 | else | |
1096 | cb->bus_width = 8; | |
1097 | cb->ldev_block_size = enquiry2->ldev_block_size; | |
1098 | shost->max_channel = pchan_cur; | |
1099 | shost->max_id = enquiry2->max_targets; | |
1100 | memsize = enquiry2->mem_size >> 20; | |
1101 | cb->safte_enabled = (enquiry2->fault_mgmt == MYRB_FAULT_SAFTE); | |
1102 | /* | |
1103 | * Initialize the Controller Queue Depth, Driver Queue Depth, | |
1104 | * Logical Drive Count, Maximum Blocks per Command, Controller | |
1105 | * Scatter/Gather Limit, and Driver Scatter/Gather Limit. | |
1106 | * The Driver Queue Depth must be at most one less than the | |
1107 | * Controller Queue Depth to allow for an automatic drive | |
1108 | * rebuild operation. | |
1109 | */ | |
1110 | shost->can_queue = cb->enquiry->max_tcq; | |
1111 | if (shost->can_queue < 3) | |
1112 | shost->can_queue = enquiry2->max_cmds; | |
1113 | if (shost->can_queue < 3) | |
1114 | /* Play safe and disable TCQ */ | |
1115 | shost->can_queue = 1; | |
1116 | ||
1117 | if (shost->can_queue > MYRB_CMD_MBOX_COUNT - 2) | |
1118 | shost->can_queue = MYRB_CMD_MBOX_COUNT - 2; | |
1119 | shost->max_sectors = enquiry2->max_sectors; | |
1120 | shost->sg_tablesize = enquiry2->max_sge; | |
1121 | if (shost->sg_tablesize > MYRB_SCATTER_GATHER_LIMIT) | |
1122 | shost->sg_tablesize = MYRB_SCATTER_GATHER_LIMIT; | |
1123 | /* | |
1124 | * Initialize the Stripe Size, Segment Size, and Geometry Translation. | |
1125 | */ | |
1126 | cb->stripe_size = config2->blocks_per_stripe * config2->block_factor | |
1127 | >> (10 - MYRB_BLKSIZE_BITS); | |
1128 | cb->segment_size = config2->blocks_per_cacheline * config2->block_factor | |
1129 | >> (10 - MYRB_BLKSIZE_BITS); | |
1130 | /* Assume 255/63 translation */ | |
1131 | cb->ldev_geom_heads = 255; | |
1132 | cb->ldev_geom_sectors = 63; | |
1133 | if (config2->drive_geometry) { | |
1134 | cb->ldev_geom_heads = 128; | |
1135 | cb->ldev_geom_sectors = 32; | |
1136 | } | |
1137 | ||
1138 | /* | |
1139 | * Initialize the Background Initialization Status. | |
1140 | */ | |
1141 | if ((cb->fw_version[0] == '4' && | |
1142 | strcmp(cb->fw_version, "4.08") >= 0) || | |
1143 | (cb->fw_version[0] == '5' && | |
1144 | strcmp(cb->fw_version, "5.08") >= 0)) { | |
1145 | cb->bgi_status_supported = true; | |
1146 | myrb_bgi_control(cb); | |
1147 | } | |
1148 | cb->last_rbld_status = MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS; | |
1149 | ret = 0; | |
1150 | ||
1151 | out: | |
1152 | shost_printk(KERN_INFO, cb->host, | |
1153 | "Configuring %s PCI RAID Controller\n", cb->model_name); | |
1154 | shost_printk(KERN_INFO, cb->host, | |
1155 | " Firmware Version: %s, Memory Size: %dMB\n", | |
1156 | cb->fw_version, memsize); | |
1157 | if (cb->io_addr == 0) | |
1158 | shost_printk(KERN_INFO, cb->host, | |
1159 | " I/O Address: n/a, PCI Address: 0x%lX, IRQ Channel: %d\n", | |
1160 | (unsigned long)cb->pci_addr, cb->irq); | |
1161 | else | |
1162 | shost_printk(KERN_INFO, cb->host, | |
1163 | " I/O Address: 0x%lX, PCI Address: 0x%lX, IRQ Channel: %d\n", | |
1164 | (unsigned long)cb->io_addr, (unsigned long)cb->pci_addr, | |
1165 | cb->irq); | |
1166 | shost_printk(KERN_INFO, cb->host, | |
1167 | " Controller Queue Depth: %d, Maximum Blocks per Command: %d\n", | |
1168 | cb->host->can_queue, cb->host->max_sectors); | |
1169 | shost_printk(KERN_INFO, cb->host, | |
1170 | " Driver Queue Depth: %d, Scatter/Gather Limit: %d of %d Segments\n", | |
1171 | cb->host->can_queue, cb->host->sg_tablesize, | |
1172 | MYRB_SCATTER_GATHER_LIMIT); | |
1173 | shost_printk(KERN_INFO, cb->host, | |
1174 | " Stripe Size: %dKB, Segment Size: %dKB, BIOS Geometry: %d/%d%s\n", | |
1175 | cb->stripe_size, cb->segment_size, | |
1176 | cb->ldev_geom_heads, cb->ldev_geom_sectors, | |
1177 | cb->safte_enabled ? | |
1178 | " SAF-TE Enclosure Management Enabled" : ""); | |
1179 | shost_printk(KERN_INFO, cb->host, | |
1180 | " Physical: %d/%d channels %d/%d/%d devices\n", | |
1181 | pchan_cur, pchan_max, 0, cb->enquiry->pdev_dead, | |
1182 | cb->host->max_id); | |
1183 | ||
1184 | shost_printk(KERN_INFO, cb->host, | |
1185 | " Logical: 1/1 channels, %d/%d disks\n", | |
1186 | cb->enquiry->ldev_count, MYRB_MAX_LDEVS); | |
1187 | ||
1188 | out_free: | |
1189 | dma_free_coherent(&pdev->dev, sizeof(struct myrb_enquiry2), | |
1190 | enquiry2, enquiry2_addr); | |
1191 | dma_free_coherent(&pdev->dev, sizeof(struct myrb_config2), | |
1192 | config2, config2_addr); | |
1193 | ||
1194 | return ret; | |
1195 | } | |
1196 | ||
1197 | /** | |
1198 | * myrb_unmap - unmaps controller structures | |
1199 | */ | |
1200 | static void myrb_unmap(struct myrb_hba *cb) | |
1201 | { | |
1202 | if (cb->ldev_info_buf) { | |
1203 | size_t ldev_info_size = sizeof(struct myrb_ldev_info) * | |
1204 | MYRB_MAX_LDEVS; | |
1205 | dma_free_coherent(&cb->pdev->dev, ldev_info_size, | |
1206 | cb->ldev_info_buf, cb->ldev_info_addr); | |
1207 | cb->ldev_info_buf = NULL; | |
1208 | } | |
1209 | if (cb->err_table) { | |
1210 | size_t err_table_size = sizeof(struct myrb_error_entry) * | |
1211 | MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS; | |
1212 | dma_free_coherent(&cb->pdev->dev, err_table_size, | |
1213 | cb->err_table, cb->err_table_addr); | |
1214 | cb->err_table = NULL; | |
1215 | } | |
1216 | if (cb->enquiry) { | |
1217 | dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_enquiry), | |
1218 | cb->enquiry, cb->enquiry_addr); | |
1219 | cb->enquiry = NULL; | |
1220 | } | |
1221 | if (cb->first_stat_mbox) { | |
1222 | dma_free_coherent(&cb->pdev->dev, cb->stat_mbox_size, | |
1223 | cb->first_stat_mbox, cb->stat_mbox_addr); | |
1224 | cb->first_stat_mbox = NULL; | |
1225 | } | |
1226 | if (cb->first_cmd_mbox) { | |
1227 | dma_free_coherent(&cb->pdev->dev, cb->cmd_mbox_size, | |
1228 | cb->first_cmd_mbox, cb->cmd_mbox_addr); | |
1229 | cb->first_cmd_mbox = NULL; | |
1230 | } | |
1231 | } | |
1232 | ||
1233 | /** | |
1234 | * myrb_cleanup - cleanup controller structures | |
1235 | */ | |
1236 | static void myrb_cleanup(struct myrb_hba *cb) | |
1237 | { | |
1238 | struct pci_dev *pdev = cb->pdev; | |
1239 | ||
1240 | /* Free the memory mailbox, status, and related structures */ | |
1241 | myrb_unmap(cb); | |
1242 | ||
1243 | if (cb->mmio_base) { | |
1244 | cb->disable_intr(cb->io_base); | |
1245 | iounmap(cb->mmio_base); | |
1246 | } | |
1247 | if (cb->irq) | |
1248 | free_irq(cb->irq, cb); | |
1249 | if (cb->io_addr) | |
1250 | release_region(cb->io_addr, 0x80); | |
1251 | pci_set_drvdata(pdev, NULL); | |
1252 | pci_disable_device(pdev); | |
1253 | scsi_host_put(cb->host); | |
1254 | } | |
1255 | ||
1256 | static int myrb_host_reset(struct scsi_cmnd *scmd) | |
1257 | { | |
1258 | struct Scsi_Host *shost = scmd->device->host; | |
1259 | struct myrb_hba *cb = shost_priv(shost); | |
1260 | ||
1261 | cb->reset(cb->io_base); | |
1262 | return SUCCESS; | |
1263 | } | |
1264 | ||
1265 | static int myrb_pthru_queuecommand(struct Scsi_Host *shost, | |
1266 | struct scsi_cmnd *scmd) | |
1267 | { | |
1268 | struct myrb_hba *cb = shost_priv(shost); | |
1269 | struct myrb_cmdblk *cmd_blk = scsi_cmd_priv(scmd); | |
1270 | union myrb_cmd_mbox *mbox = &cmd_blk->mbox; | |
1271 | struct myrb_dcdb *dcdb; | |
1272 | dma_addr_t dcdb_addr; | |
1273 | struct scsi_device *sdev = scmd->device; | |
1274 | struct scatterlist *sgl; | |
1275 | unsigned long flags; | |
1276 | int nsge; | |
1277 | ||
1278 | myrb_reset_cmd(cmd_blk); | |
1279 | dcdb = dma_pool_alloc(cb->dcdb_pool, GFP_ATOMIC, &dcdb_addr); | |
1280 | if (!dcdb) | |
1281 | return SCSI_MLQUEUE_HOST_BUSY; | |
1282 | nsge = scsi_dma_map(scmd); | |
1283 | if (nsge > 1) { | |
1284 | dma_pool_free(cb->dcdb_pool, dcdb, dcdb_addr); | |
1285 | scmd->result = (DID_ERROR << 16); | |
1286 | scmd->scsi_done(scmd); | |
1287 | return 0; | |
1288 | } | |
1289 | ||
1290 | mbox->type3.opcode = MYRB_CMD_DCDB; | |
1291 | mbox->type3.id = scmd->request->tag + 3; | |
1292 | mbox->type3.addr = dcdb_addr; | |
1293 | dcdb->channel = sdev->channel; | |
1294 | dcdb->target = sdev->id; | |
1295 | switch (scmd->sc_data_direction) { | |
1296 | case DMA_NONE: | |
1297 | dcdb->data_xfer = MYRB_DCDB_XFER_NONE; | |
1298 | break; | |
1299 | case DMA_TO_DEVICE: | |
1300 | dcdb->data_xfer = MYRB_DCDB_XFER_SYSTEM_TO_DEVICE; | |
1301 | break; | |
1302 | case DMA_FROM_DEVICE: | |
1303 | dcdb->data_xfer = MYRB_DCDB_XFER_DEVICE_TO_SYSTEM; | |
1304 | break; | |
1305 | default: | |
1306 | dcdb->data_xfer = MYRB_DCDB_XFER_ILLEGAL; | |
1307 | break; | |
1308 | } | |
1309 | dcdb->early_status = false; | |
1310 | if (scmd->request->timeout <= 10) | |
1311 | dcdb->timeout = MYRB_DCDB_TMO_10_SECS; | |
1312 | else if (scmd->request->timeout <= 60) | |
1313 | dcdb->timeout = MYRB_DCDB_TMO_60_SECS; | |
1314 | else if (scmd->request->timeout <= 600) | |
1315 | dcdb->timeout = MYRB_DCDB_TMO_10_MINS; | |
1316 | else | |
1317 | dcdb->timeout = MYRB_DCDB_TMO_24_HRS; | |
1318 | dcdb->no_autosense = false; | |
1319 | dcdb->allow_disconnect = true; | |
1320 | sgl = scsi_sglist(scmd); | |
1321 | dcdb->dma_addr = sg_dma_address(sgl); | |
1322 | if (sg_dma_len(sgl) > USHRT_MAX) { | |
1323 | dcdb->xfer_len_lo = sg_dma_len(sgl) & 0xffff; | |
1324 | dcdb->xfer_len_hi4 = sg_dma_len(sgl) >> 16; | |
1325 | } else { | |
1326 | dcdb->xfer_len_lo = sg_dma_len(sgl); | |
1327 | dcdb->xfer_len_hi4 = 0; | |
1328 | } | |
1329 | dcdb->cdb_len = scmd->cmd_len; | |
1330 | dcdb->sense_len = sizeof(dcdb->sense); | |
1331 | memcpy(&dcdb->cdb, scmd->cmnd, scmd->cmd_len); | |
1332 | ||
1333 | spin_lock_irqsave(&cb->queue_lock, flags); | |
1334 | cb->qcmd(cb, cmd_blk); | |
1335 | spin_unlock_irqrestore(&cb->queue_lock, flags); | |
1336 | return 0; | |
1337 | } | |
1338 | ||
1339 | static void myrb_inquiry(struct myrb_hba *cb, | |
1340 | struct scsi_cmnd *scmd) | |
1341 | { | |
1342 | unsigned char inq[36] = { | |
1343 | 0x00, 0x00, 0x03, 0x02, 0x20, 0x00, 0x01, 0x00, | |
1344 | 0x4d, 0x59, 0x4c, 0x45, 0x58, 0x20, 0x20, 0x20, | |
1345 | 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, | |
1346 | 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, | |
1347 | 0x20, 0x20, 0x20, 0x20, | |
1348 | }; | |
1349 | ||
1350 | if (cb->bus_width > 16) | |
1351 | inq[7] |= 1 << 6; | |
1352 | if (cb->bus_width > 8) | |
1353 | inq[7] |= 1 << 5; | |
1354 | memcpy(&inq[16], cb->model_name, 16); | |
1355 | memcpy(&inq[32], cb->fw_version, 1); | |
1356 | memcpy(&inq[33], &cb->fw_version[2], 2); | |
1357 | memcpy(&inq[35], &cb->fw_version[7], 1); | |
1358 | ||
1359 | scsi_sg_copy_from_buffer(scmd, (void *)inq, 36); | |
1360 | } | |
1361 | ||
1362 | static void | |
1363 | myrb_mode_sense(struct myrb_hba *cb, struct scsi_cmnd *scmd, | |
1364 | struct myrb_ldev_info *ldev_info) | |
1365 | { | |
1366 | unsigned char modes[32], *mode_pg; | |
1367 | bool dbd; | |
1368 | size_t mode_len; | |
1369 | ||
1370 | dbd = (scmd->cmnd[1] & 0x08) == 0x08; | |
1371 | if (dbd) { | |
1372 | mode_len = 24; | |
1373 | mode_pg = &modes[4]; | |
1374 | } else { | |
1375 | mode_len = 32; | |
1376 | mode_pg = &modes[12]; | |
1377 | } | |
1378 | memset(modes, 0, sizeof(modes)); | |
1379 | modes[0] = mode_len - 1; | |
1380 | if (!dbd) { | |
1381 | unsigned char *block_desc = &modes[4]; | |
1382 | ||
1383 | modes[3] = 8; | |
1384 | put_unaligned_be32(ldev_info->size, &block_desc[0]); | |
1385 | put_unaligned_be32(cb->ldev_block_size, &block_desc[5]); | |
1386 | } | |
1387 | mode_pg[0] = 0x08; | |
1388 | mode_pg[1] = 0x12; | |
1389 | if (ldev_info->wb_enabled) | |
1390 | mode_pg[2] |= 0x04; | |
1391 | if (cb->segment_size) { | |
1392 | mode_pg[2] |= 0x08; | |
1393 | put_unaligned_be16(cb->segment_size, &mode_pg[14]); | |
1394 | } | |
1395 | ||
1396 | scsi_sg_copy_from_buffer(scmd, modes, mode_len); | |
1397 | } | |
1398 | ||
1399 | static void myrb_request_sense(struct myrb_hba *cb, | |
1400 | struct scsi_cmnd *scmd) | |
1401 | { | |
1402 | scsi_build_sense_buffer(0, scmd->sense_buffer, | |
1403 | NO_SENSE, 0, 0); | |
1404 | scsi_sg_copy_from_buffer(scmd, scmd->sense_buffer, | |
1405 | SCSI_SENSE_BUFFERSIZE); | |
1406 | } | |
1407 | ||
1408 | static void myrb_read_capacity(struct myrb_hba *cb, struct scsi_cmnd *scmd, | |
1409 | struct myrb_ldev_info *ldev_info) | |
1410 | { | |
1411 | unsigned char data[8]; | |
1412 | ||
1413 | dev_dbg(&scmd->device->sdev_gendev, | |
1414 | "Capacity %u, blocksize %u\n", | |
1415 | ldev_info->size, cb->ldev_block_size); | |
1416 | put_unaligned_be32(ldev_info->size - 1, &data[0]); | |
1417 | put_unaligned_be32(cb->ldev_block_size, &data[4]); | |
1418 | scsi_sg_copy_from_buffer(scmd, data, 8); | |
1419 | } | |
1420 | ||
1421 | static int myrb_ldev_queuecommand(struct Scsi_Host *shost, | |
1422 | struct scsi_cmnd *scmd) | |
1423 | { | |
1424 | struct myrb_hba *cb = shost_priv(shost); | |
1425 | struct myrb_cmdblk *cmd_blk = scsi_cmd_priv(scmd); | |
1426 | union myrb_cmd_mbox *mbox = &cmd_blk->mbox; | |
1427 | struct myrb_ldev_info *ldev_info; | |
1428 | struct scsi_device *sdev = scmd->device; | |
1429 | struct scatterlist *sgl; | |
1430 | unsigned long flags; | |
1431 | u64 lba; | |
1432 | u32 block_cnt; | |
1433 | int nsge; | |
1434 | ||
1435 | ldev_info = sdev->hostdata; | |
1436 | if (ldev_info->state != MYRB_DEVICE_ONLINE && | |
1437 | ldev_info->state != MYRB_DEVICE_WO) { | |
1438 | dev_dbg(&shost->shost_gendev, "ldev %u in state %x, skip\n", | |
1439 | sdev->id, ldev_info ? ldev_info->state : 0xff); | |
1440 | scmd->result = (DID_BAD_TARGET << 16); | |
1441 | scmd->scsi_done(scmd); | |
1442 | return 0; | |
1443 | } | |
1444 | switch (scmd->cmnd[0]) { | |
1445 | case TEST_UNIT_READY: | |
1446 | scmd->result = (DID_OK << 16); | |
1447 | scmd->scsi_done(scmd); | |
1448 | return 0; | |
1449 | case INQUIRY: | |
1450 | if (scmd->cmnd[1] & 1) { | |
1451 | /* Illegal request, invalid field in CDB */ | |
1452 | scsi_build_sense_buffer(0, scmd->sense_buffer, | |
1453 | ILLEGAL_REQUEST, 0x24, 0); | |
1454 | scmd->result = (DRIVER_SENSE << 24) | | |
1455 | SAM_STAT_CHECK_CONDITION; | |
1456 | } else { | |
1457 | myrb_inquiry(cb, scmd); | |
1458 | scmd->result = (DID_OK << 16); | |
1459 | } | |
1460 | scmd->scsi_done(scmd); | |
1461 | return 0; | |
1462 | case SYNCHRONIZE_CACHE: | |
1463 | scmd->result = (DID_OK << 16); | |
1464 | scmd->scsi_done(scmd); | |
1465 | return 0; | |
1466 | case MODE_SENSE: | |
1467 | if ((scmd->cmnd[2] & 0x3F) != 0x3F && | |
1468 | (scmd->cmnd[2] & 0x3F) != 0x08) { | |
1469 | /* Illegal request, invalid field in CDB */ | |
1470 | scsi_build_sense_buffer(0, scmd->sense_buffer, | |
1471 | ILLEGAL_REQUEST, 0x24, 0); | |
1472 | scmd->result = (DRIVER_SENSE << 24) | | |
1473 | SAM_STAT_CHECK_CONDITION; | |
1474 | } else { | |
1475 | myrb_mode_sense(cb, scmd, ldev_info); | |
1476 | scmd->result = (DID_OK << 16); | |
1477 | } | |
1478 | scmd->scsi_done(scmd); | |
1479 | return 0; | |
1480 | case READ_CAPACITY: | |
1481 | if ((scmd->cmnd[1] & 1) || | |
1482 | (scmd->cmnd[8] & 1)) { | |
1483 | /* Illegal request, invalid field in CDB */ | |
1484 | scsi_build_sense_buffer(0, scmd->sense_buffer, | |
1485 | ILLEGAL_REQUEST, 0x24, 0); | |
1486 | scmd->result = (DRIVER_SENSE << 24) | | |
1487 | SAM_STAT_CHECK_CONDITION; | |
1488 | scmd->scsi_done(scmd); | |
1489 | return 0; | |
1490 | } | |
1491 | lba = get_unaligned_be32(&scmd->cmnd[2]); | |
1492 | if (lba) { | |
1493 | /* Illegal request, invalid field in CDB */ | |
1494 | scsi_build_sense_buffer(0, scmd->sense_buffer, | |
1495 | ILLEGAL_REQUEST, 0x24, 0); | |
1496 | scmd->result = (DRIVER_SENSE << 24) | | |
1497 | SAM_STAT_CHECK_CONDITION; | |
1498 | scmd->scsi_done(scmd); | |
1499 | return 0; | |
1500 | } | |
1501 | myrb_read_capacity(cb, scmd, ldev_info); | |
1502 | scmd->scsi_done(scmd); | |
1503 | return 0; | |
1504 | case REQUEST_SENSE: | |
1505 | myrb_request_sense(cb, scmd); | |
1506 | scmd->result = (DID_OK << 16); | |
1507 | return 0; | |
1508 | case SEND_DIAGNOSTIC: | |
1509 | if (scmd->cmnd[1] != 0x04) { | |
1510 | /* Illegal request, invalid field in CDB */ | |
1511 | scsi_build_sense_buffer(0, scmd->sense_buffer, | |
1512 | ILLEGAL_REQUEST, 0x24, 0); | |
1513 | scmd->result = (DRIVER_SENSE << 24) | | |
1514 | SAM_STAT_CHECK_CONDITION; | |
1515 | } else { | |
1516 | /* Assume good status */ | |
1517 | scmd->result = (DID_OK << 16); | |
1518 | } | |
1519 | scmd->scsi_done(scmd); | |
1520 | return 0; | |
1521 | case READ_6: | |
1522 | if (ldev_info->state == MYRB_DEVICE_WO) { | |
1523 | /* Data protect, attempt to read invalid data */ | |
1524 | scsi_build_sense_buffer(0, scmd->sense_buffer, | |
1525 | DATA_PROTECT, 0x21, 0x06); | |
1526 | scmd->result = (DRIVER_SENSE << 24) | | |
1527 | SAM_STAT_CHECK_CONDITION; | |
1528 | scmd->scsi_done(scmd); | |
1529 | return 0; | |
1530 | } | |
1531 | case WRITE_6: | |
1532 | lba = (((scmd->cmnd[1] & 0x1F) << 16) | | |
1533 | (scmd->cmnd[2] << 8) | | |
1534 | scmd->cmnd[3]); | |
1535 | block_cnt = scmd->cmnd[4]; | |
1536 | break; | |
1537 | case READ_10: | |
1538 | if (ldev_info->state == MYRB_DEVICE_WO) { | |
1539 | /* Data protect, attempt to read invalid data */ | |
1540 | scsi_build_sense_buffer(0, scmd->sense_buffer, | |
1541 | DATA_PROTECT, 0x21, 0x06); | |
1542 | scmd->result = (DRIVER_SENSE << 24) | | |
1543 | SAM_STAT_CHECK_CONDITION; | |
1544 | scmd->scsi_done(scmd); | |
1545 | return 0; | |
1546 | } | |
1547 | case WRITE_10: | |
1548 | case VERIFY: /* 0x2F */ | |
1549 | case WRITE_VERIFY: /* 0x2E */ | |
1550 | lba = get_unaligned_be32(&scmd->cmnd[2]); | |
1551 | block_cnt = get_unaligned_be16(&scmd->cmnd[7]); | |
1552 | break; | |
1553 | case READ_12: | |
1554 | if (ldev_info->state == MYRB_DEVICE_WO) { | |
1555 | /* Data protect, attempt to read invalid data */ | |
1556 | scsi_build_sense_buffer(0, scmd->sense_buffer, | |
1557 | DATA_PROTECT, 0x21, 0x06); | |
1558 | scmd->result = (DRIVER_SENSE << 24) | | |
1559 | SAM_STAT_CHECK_CONDITION; | |
1560 | scmd->scsi_done(scmd); | |
1561 | return 0; | |
1562 | } | |
1563 | case WRITE_12: | |
1564 | case VERIFY_12: /* 0xAF */ | |
1565 | case WRITE_VERIFY_12: /* 0xAE */ | |
1566 | lba = get_unaligned_be32(&scmd->cmnd[2]); | |
1567 | block_cnt = get_unaligned_be32(&scmd->cmnd[6]); | |
1568 | break; | |
1569 | default: | |
1570 | /* Illegal request, invalid opcode */ | |
1571 | scsi_build_sense_buffer(0, scmd->sense_buffer, | |
1572 | ILLEGAL_REQUEST, 0x20, 0); | |
1573 | scmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION; | |
1574 | scmd->scsi_done(scmd); | |
1575 | return 0; | |
1576 | } | |
1577 | ||
1578 | myrb_reset_cmd(cmd_blk); | |
1579 | mbox->type5.id = scmd->request->tag + 3; | |
1580 | if (scmd->sc_data_direction == DMA_NONE) | |
1581 | goto submit; | |
1582 | nsge = scsi_dma_map(scmd); | |
1583 | if (nsge == 1) { | |
1584 | sgl = scsi_sglist(scmd); | |
1585 | if (scmd->sc_data_direction == DMA_FROM_DEVICE) | |
1586 | mbox->type5.opcode = MYRB_CMD_READ; | |
1587 | else | |
1588 | mbox->type5.opcode = MYRB_CMD_WRITE; | |
1589 | ||
1590 | mbox->type5.ld.xfer_len = block_cnt; | |
1591 | mbox->type5.ld.ldev_num = sdev->id; | |
1592 | mbox->type5.lba = lba; | |
1593 | mbox->type5.addr = (u32)sg_dma_address(sgl); | |
1594 | } else { | |
1595 | struct myrb_sge *hw_sgl; | |
1596 | dma_addr_t hw_sgl_addr; | |
1597 | int i; | |
1598 | ||
1599 | hw_sgl = dma_pool_alloc(cb->sg_pool, GFP_ATOMIC, &hw_sgl_addr); | |
1600 | if (!hw_sgl) | |
1601 | return SCSI_MLQUEUE_HOST_BUSY; | |
1602 | ||
1603 | cmd_blk->sgl = hw_sgl; | |
1604 | cmd_blk->sgl_addr = hw_sgl_addr; | |
1605 | ||
1606 | if (scmd->sc_data_direction == DMA_FROM_DEVICE) | |
1607 | mbox->type5.opcode = MYRB_CMD_READ_SG; | |
1608 | else | |
1609 | mbox->type5.opcode = MYRB_CMD_WRITE_SG; | |
1610 | ||
1611 | mbox->type5.ld.xfer_len = block_cnt; | |
1612 | mbox->type5.ld.ldev_num = sdev->id; | |
1613 | mbox->type5.lba = lba; | |
1614 | mbox->type5.addr = hw_sgl_addr; | |
1615 | mbox->type5.sg_count = nsge; | |
1616 | ||
1617 | scsi_for_each_sg(scmd, sgl, nsge, i) { | |
1618 | hw_sgl->sge_addr = (u32)sg_dma_address(sgl); | |
1619 | hw_sgl->sge_count = (u32)sg_dma_len(sgl); | |
1620 | hw_sgl++; | |
1621 | } | |
1622 | } | |
1623 | submit: | |
1624 | spin_lock_irqsave(&cb->queue_lock, flags); | |
1625 | cb->qcmd(cb, cmd_blk); | |
1626 | spin_unlock_irqrestore(&cb->queue_lock, flags); | |
1627 | ||
1628 | return 0; | |
1629 | } | |
1630 | ||
1631 | static int myrb_queuecommand(struct Scsi_Host *shost, | |
1632 | struct scsi_cmnd *scmd) | |
1633 | { | |
1634 | struct scsi_device *sdev = scmd->device; | |
1635 | ||
1636 | if (sdev->channel > myrb_logical_channel(shost)) { | |
1637 | scmd->result = (DID_BAD_TARGET << 16); | |
1638 | scmd->scsi_done(scmd); | |
1639 | return 0; | |
1640 | } | |
1641 | if (sdev->channel == myrb_logical_channel(shost)) | |
1642 | return myrb_ldev_queuecommand(shost, scmd); | |
1643 | ||
1644 | return myrb_pthru_queuecommand(shost, scmd); | |
1645 | } | |
1646 | ||
1647 | static int myrb_ldev_slave_alloc(struct scsi_device *sdev) | |
1648 | { | |
1649 | struct myrb_hba *cb = shost_priv(sdev->host); | |
1650 | struct myrb_ldev_info *ldev_info; | |
1651 | unsigned short ldev_num = sdev->id; | |
1652 | enum raid_level level; | |
1653 | ||
1654 | ldev_info = cb->ldev_info_buf + ldev_num; | |
1655 | if (!ldev_info) | |
1656 | return -ENXIO; | |
1657 | ||
1658 | sdev->hostdata = kzalloc(sizeof(*ldev_info), GFP_KERNEL); | |
1659 | if (!sdev->hostdata) | |
1660 | return -ENOMEM; | |
1661 | dev_dbg(&sdev->sdev_gendev, | |
1662 | "slave alloc ldev %d state %x\n", | |
1663 | ldev_num, ldev_info->state); | |
1664 | memcpy(sdev->hostdata, ldev_info, | |
1665 | sizeof(*ldev_info)); | |
1666 | switch (ldev_info->raid_level) { | |
1667 | case MYRB_RAID_LEVEL0: | |
1668 | level = RAID_LEVEL_LINEAR; | |
1669 | break; | |
1670 | case MYRB_RAID_LEVEL1: | |
1671 | level = RAID_LEVEL_1; | |
1672 | break; | |
1673 | case MYRB_RAID_LEVEL3: | |
1674 | level = RAID_LEVEL_3; | |
1675 | break; | |
1676 | case MYRB_RAID_LEVEL5: | |
1677 | level = RAID_LEVEL_5; | |
1678 | break; | |
1679 | case MYRB_RAID_LEVEL6: | |
1680 | level = RAID_LEVEL_6; | |
1681 | break; | |
1682 | case MYRB_RAID_JBOD: | |
1683 | level = RAID_LEVEL_JBOD; | |
1684 | break; | |
1685 | default: | |
1686 | level = RAID_LEVEL_UNKNOWN; | |
1687 | break; | |
1688 | } | |
1689 | raid_set_level(myrb_raid_template, &sdev->sdev_gendev, level); | |
1690 | return 0; | |
1691 | } | |
1692 | ||
1693 | static int myrb_pdev_slave_alloc(struct scsi_device *sdev) | |
1694 | { | |
1695 | struct myrb_hba *cb = shost_priv(sdev->host); | |
1696 | struct myrb_pdev_state *pdev_info; | |
1697 | unsigned short status; | |
1698 | ||
1699 | if (sdev->id > MYRB_MAX_TARGETS) | |
1700 | return -ENXIO; | |
1701 | ||
1702 | pdev_info = kzalloc(sizeof(*pdev_info), GFP_KERNEL|GFP_DMA); | |
1703 | if (!pdev_info) | |
1704 | return -ENOMEM; | |
1705 | ||
1706 | status = myrb_exec_type3D(cb, MYRB_CMD_GET_DEVICE_STATE, | |
1707 | sdev, pdev_info); | |
1708 | if (status != MYRB_STATUS_SUCCESS) { | |
1709 | dev_dbg(&sdev->sdev_gendev, | |
1710 | "Failed to get device state, status %x\n", | |
1711 | status); | |
1712 | kfree(pdev_info); | |
1713 | return -ENXIO; | |
1714 | } | |
1715 | if (!pdev_info->present) { | |
1716 | dev_dbg(&sdev->sdev_gendev, | |
1717 | "device not present, skip\n"); | |
1718 | kfree(pdev_info); | |
1719 | return -ENXIO; | |
1720 | } | |
1721 | dev_dbg(&sdev->sdev_gendev, | |
1722 | "slave alloc pdev %d:%d state %x\n", | |
1723 | sdev->channel, sdev->id, pdev_info->state); | |
1724 | sdev->hostdata = pdev_info; | |
1725 | ||
1726 | return 0; | |
1727 | } | |
1728 | ||
1729 | static int myrb_slave_alloc(struct scsi_device *sdev) | |
1730 | { | |
1731 | if (sdev->channel > myrb_logical_channel(sdev->host)) | |
1732 | return -ENXIO; | |
1733 | ||
1734 | if (sdev->lun > 0) | |
1735 | return -ENXIO; | |
1736 | ||
1737 | if (sdev->channel == myrb_logical_channel(sdev->host)) | |
1738 | return myrb_ldev_slave_alloc(sdev); | |
1739 | ||
1740 | return myrb_pdev_slave_alloc(sdev); | |
1741 | } | |
1742 | ||
1743 | static int myrb_slave_configure(struct scsi_device *sdev) | |
1744 | { | |
1745 | struct myrb_ldev_info *ldev_info; | |
1746 | ||
1747 | if (sdev->channel > myrb_logical_channel(sdev->host)) | |
1748 | return -ENXIO; | |
1749 | ||
1750 | if (sdev->channel < myrb_logical_channel(sdev->host)) { | |
1751 | sdev->no_uld_attach = 1; | |
1752 | return 0; | |
1753 | } | |
1754 | if (sdev->lun != 0) | |
1755 | return -ENXIO; | |
1756 | ||
1757 | ldev_info = sdev->hostdata; | |
1758 | if (!ldev_info) | |
1759 | return -ENXIO; | |
1760 | if (ldev_info->state != MYRB_DEVICE_ONLINE) | |
1761 | sdev_printk(KERN_INFO, sdev, | |
1762 | "Logical drive is %s\n", | |
1763 | myrb_devstate_name(ldev_info->state)); | |
1764 | ||
1765 | sdev->tagged_supported = 1; | |
1766 | return 0; | |
1767 | } | |
1768 | ||
1769 | static void myrb_slave_destroy(struct scsi_device *sdev) | |
1770 | { | |
1771 | kfree(sdev->hostdata); | |
1772 | } | |
1773 | ||
1774 | static int myrb_biosparam(struct scsi_device *sdev, struct block_device *bdev, | |
1775 | sector_t capacity, int geom[]) | |
1776 | { | |
1777 | struct myrb_hba *cb = shost_priv(sdev->host); | |
1778 | ||
1779 | geom[0] = cb->ldev_geom_heads; | |
1780 | geom[1] = cb->ldev_geom_sectors; | |
1781 | geom[2] = sector_div(capacity, geom[0] * geom[1]); | |
1782 | ||
1783 | return 0; | |
1784 | } | |
1785 | ||
1786 | static ssize_t raid_state_show(struct device *dev, | |
1787 | struct device_attribute *attr, char *buf) | |
1788 | { | |
1789 | struct scsi_device *sdev = to_scsi_device(dev); | |
1790 | struct myrb_hba *cb = shost_priv(sdev->host); | |
1791 | int ret; | |
1792 | ||
1793 | if (!sdev->hostdata) | |
1794 | return snprintf(buf, 16, "Unknown\n"); | |
1795 | ||
1796 | if (sdev->channel == myrb_logical_channel(sdev->host)) { | |
1797 | struct myrb_ldev_info *ldev_info = sdev->hostdata; | |
1798 | const char *name; | |
1799 | ||
1800 | name = myrb_devstate_name(ldev_info->state); | |
1801 | if (name) | |
1802 | ret = snprintf(buf, 32, "%s\n", name); | |
1803 | else | |
1804 | ret = snprintf(buf, 32, "Invalid (%02X)\n", | |
1805 | ldev_info->state); | |
1806 | } else { | |
1807 | struct myrb_pdev_state *pdev_info = sdev->hostdata; | |
1808 | unsigned short status; | |
1809 | const char *name; | |
1810 | ||
1811 | status = myrb_exec_type3D(cb, MYRB_CMD_GET_DEVICE_STATE, | |
1812 | sdev, pdev_info); | |
1813 | if (status != MYRB_STATUS_SUCCESS) | |
1814 | sdev_printk(KERN_INFO, sdev, | |
1815 | "Failed to get device state, status %x\n", | |
1816 | status); | |
1817 | ||
1818 | if (!pdev_info->present) | |
1819 | name = "Removed"; | |
1820 | else | |
1821 | name = myrb_devstate_name(pdev_info->state); | |
1822 | if (name) | |
1823 | ret = snprintf(buf, 32, "%s\n", name); | |
1824 | else | |
1825 | ret = snprintf(buf, 32, "Invalid (%02X)\n", | |
1826 | pdev_info->state); | |
1827 | } | |
1828 | return ret; | |
1829 | } | |
1830 | ||
1831 | static ssize_t raid_state_store(struct device *dev, | |
1832 | struct device_attribute *attr, const char *buf, size_t count) | |
1833 | { | |
1834 | struct scsi_device *sdev = to_scsi_device(dev); | |
1835 | struct myrb_hba *cb = shost_priv(sdev->host); | |
1836 | struct myrb_pdev_state *pdev_info; | |
1837 | enum myrb_devstate new_state; | |
1838 | unsigned short status; | |
1839 | ||
1840 | if (!strncmp(buf, "kill", 4) || | |
1841 | !strncmp(buf, "offline", 7)) | |
1842 | new_state = MYRB_DEVICE_DEAD; | |
1843 | else if (!strncmp(buf, "online", 6)) | |
1844 | new_state = MYRB_DEVICE_ONLINE; | |
1845 | else if (!strncmp(buf, "standby", 7)) | |
1846 | new_state = MYRB_DEVICE_STANDBY; | |
1847 | else | |
1848 | return -EINVAL; | |
1849 | ||
1850 | pdev_info = sdev->hostdata; | |
1851 | if (!pdev_info) { | |
1852 | sdev_printk(KERN_INFO, sdev, | |
1853 | "Failed - no physical device information\n"); | |
1854 | return -ENXIO; | |
1855 | } | |
1856 | if (!pdev_info->present) { | |
1857 | sdev_printk(KERN_INFO, sdev, | |
1858 | "Failed - device not present\n"); | |
1859 | return -ENXIO; | |
1860 | } | |
1861 | ||
1862 | if (pdev_info->state == new_state) | |
1863 | return count; | |
1864 | ||
1865 | status = myrb_set_pdev_state(cb, sdev, new_state); | |
1866 | switch (status) { | |
1867 | case MYRB_STATUS_SUCCESS: | |
1868 | break; | |
1869 | case MYRB_STATUS_START_DEVICE_FAILED: | |
1870 | sdev_printk(KERN_INFO, sdev, | |
1871 | "Failed - Unable to Start Device\n"); | |
1872 | count = -EAGAIN; | |
1873 | break; | |
1874 | case MYRB_STATUS_NO_DEVICE: | |
1875 | sdev_printk(KERN_INFO, sdev, | |
1876 | "Failed - No Device at Address\n"); | |
1877 | count = -ENODEV; | |
1878 | break; | |
1879 | case MYRB_STATUS_INVALID_CHANNEL_OR_TARGET: | |
1880 | sdev_printk(KERN_INFO, sdev, | |
1881 | "Failed - Invalid Channel or Target or Modifier\n"); | |
1882 | count = -EINVAL; | |
1883 | break; | |
1884 | case MYRB_STATUS_CHANNEL_BUSY: | |
1885 | sdev_printk(KERN_INFO, sdev, | |
1886 | "Failed - Channel Busy\n"); | |
1887 | count = -EBUSY; | |
1888 | break; | |
1889 | default: | |
1890 | sdev_printk(KERN_INFO, sdev, | |
1891 | "Failed - Unexpected Status %04X\n", status); | |
1892 | count = -EIO; | |
1893 | break; | |
1894 | } | |
1895 | return count; | |
1896 | } | |
1897 | static DEVICE_ATTR_RW(raid_state); | |
1898 | ||
1899 | static ssize_t raid_level_show(struct device *dev, | |
1900 | struct device_attribute *attr, char *buf) | |
1901 | { | |
1902 | struct scsi_device *sdev = to_scsi_device(dev); | |
1903 | ||
1904 | if (sdev->channel == myrb_logical_channel(sdev->host)) { | |
1905 | struct myrb_ldev_info *ldev_info = sdev->hostdata; | |
1906 | const char *name; | |
1907 | ||
1908 | if (!ldev_info) | |
1909 | return -ENXIO; | |
1910 | ||
1911 | name = myrb_raidlevel_name(ldev_info->raid_level); | |
1912 | if (!name) | |
1913 | return snprintf(buf, 32, "Invalid (%02X)\n", | |
1914 | ldev_info->state); | |
1915 | return snprintf(buf, 32, "%s\n", name); | |
1916 | } | |
1917 | return snprintf(buf, 32, "Physical Drive\n"); | |
1918 | } | |
1919 | static DEVICE_ATTR_RO(raid_level); | |
1920 | ||
1921 | static ssize_t rebuild_show(struct device *dev, | |
1922 | struct device_attribute *attr, char *buf) | |
1923 | { | |
1924 | struct scsi_device *sdev = to_scsi_device(dev); | |
1925 | struct myrb_hba *cb = shost_priv(sdev->host); | |
1926 | struct myrb_rbld_progress rbld_buf; | |
1927 | unsigned char status; | |
1928 | ||
1929 | if (sdev->channel < myrb_logical_channel(sdev->host)) | |
1930 | return snprintf(buf, 32, "physical device - not rebuilding\n"); | |
1931 | ||
1932 | status = myrb_get_rbld_progress(cb, &rbld_buf); | |
1933 | ||
1934 | if (rbld_buf.ldev_num != sdev->id || | |
1935 | status != MYRB_STATUS_SUCCESS) | |
1936 | return snprintf(buf, 32, "not rebuilding\n"); | |
1937 | ||
1938 | return snprintf(buf, 32, "rebuilding block %u of %u\n", | |
1939 | rbld_buf.ldev_size - rbld_buf.blocks_left, | |
1940 | rbld_buf.ldev_size); | |
1941 | } | |
1942 | ||
1943 | static ssize_t rebuild_store(struct device *dev, | |
1944 | struct device_attribute *attr, const char *buf, size_t count) | |
1945 | { | |
1946 | struct scsi_device *sdev = to_scsi_device(dev); | |
1947 | struct myrb_hba *cb = shost_priv(sdev->host); | |
1948 | struct myrb_cmdblk *cmd_blk; | |
1949 | union myrb_cmd_mbox *mbox; | |
1950 | unsigned short status; | |
1951 | int rc, start; | |
1952 | const char *msg; | |
1953 | ||
1954 | rc = kstrtoint(buf, 0, &start); | |
1955 | if (rc) | |
1956 | return rc; | |
1957 | ||
1958 | if (sdev->channel >= myrb_logical_channel(sdev->host)) | |
1959 | return -ENXIO; | |
1960 | ||
1961 | status = myrb_get_rbld_progress(cb, NULL); | |
1962 | if (start) { | |
1963 | if (status == MYRB_STATUS_SUCCESS) { | |
1964 | sdev_printk(KERN_INFO, sdev, | |
1965 | "Rebuild Not Initiated; already in progress\n"); | |
1966 | return -EALREADY; | |
1967 | } | |
1968 | mutex_lock(&cb->dcmd_mutex); | |
1969 | cmd_blk = &cb->dcmd_blk; | |
1970 | myrb_reset_cmd(cmd_blk); | |
1971 | mbox = &cmd_blk->mbox; | |
1972 | mbox->type3D.opcode = MYRB_CMD_REBUILD_ASYNC; | |
1973 | mbox->type3D.id = MYRB_DCMD_TAG; | |
1974 | mbox->type3D.channel = sdev->channel; | |
1975 | mbox->type3D.target = sdev->id; | |
1976 | status = myrb_exec_cmd(cb, cmd_blk); | |
1977 | mutex_unlock(&cb->dcmd_mutex); | |
1978 | } else { | |
1979 | struct pci_dev *pdev = cb->pdev; | |
1980 | unsigned char *rate; | |
1981 | dma_addr_t rate_addr; | |
1982 | ||
1983 | if (status != MYRB_STATUS_SUCCESS) { | |
1984 | sdev_printk(KERN_INFO, sdev, | |
1985 | "Rebuild Not Cancelled; not in progress\n"); | |
1986 | return 0; | |
1987 | } | |
1988 | ||
1989 | rate = dma_alloc_coherent(&pdev->dev, sizeof(char), | |
1990 | &rate_addr, GFP_KERNEL); | |
1991 | if (rate == NULL) { | |
1992 | sdev_printk(KERN_INFO, sdev, | |
1993 | "Cancellation of Rebuild Failed - Out of Memory\n"); | |
1994 | return -ENOMEM; | |
1995 | } | |
1996 | mutex_lock(&cb->dcmd_mutex); | |
1997 | cmd_blk = &cb->dcmd_blk; | |
1998 | myrb_reset_cmd(cmd_blk); | |
1999 | mbox = &cmd_blk->mbox; | |
2000 | mbox->type3R.opcode = MYRB_CMD_REBUILD_CONTROL; | |
2001 | mbox->type3R.id = MYRB_DCMD_TAG; | |
2002 | mbox->type3R.rbld_rate = 0xFF; | |
2003 | mbox->type3R.addr = rate_addr; | |
2004 | status = myrb_exec_cmd(cb, cmd_blk); | |
2005 | dma_free_coherent(&pdev->dev, sizeof(char), rate, rate_addr); | |
2006 | mutex_unlock(&cb->dcmd_mutex); | |
2007 | } | |
2008 | if (status == MYRB_STATUS_SUCCESS) { | |
2009 | sdev_printk(KERN_INFO, sdev, "Rebuild %s\n", | |
2010 | start ? "Initiated" : "Cancelled"); | |
2011 | return count; | |
2012 | } | |
2013 | if (!start) { | |
2014 | sdev_printk(KERN_INFO, sdev, | |
2015 | "Rebuild Not Cancelled, status 0x%x\n", | |
2016 | status); | |
2017 | return -EIO; | |
2018 | } | |
2019 | ||
2020 | switch (status) { | |
2021 | case MYRB_STATUS_ATTEMPT_TO_RBLD_ONLINE_DRIVE: | |
2022 | msg = "Attempt to Rebuild Online or Unresponsive Drive"; | |
2023 | break; | |
2024 | case MYRB_STATUS_RBLD_NEW_DISK_FAILED: | |
2025 | msg = "New Disk Failed During Rebuild"; | |
2026 | break; | |
2027 | case MYRB_STATUS_INVALID_ADDRESS: | |
2028 | msg = "Invalid Device Address"; | |
2029 | break; | |
2030 | case MYRB_STATUS_RBLD_OR_CHECK_INPROGRESS: | |
2031 | msg = "Already in Progress"; | |
2032 | break; | |
2033 | default: | |
2034 | msg = NULL; | |
2035 | break; | |
2036 | } | |
2037 | if (msg) | |
2038 | sdev_printk(KERN_INFO, sdev, | |
2039 | "Rebuild Failed - %s\n", msg); | |
2040 | else | |
2041 | sdev_printk(KERN_INFO, sdev, | |
2042 | "Rebuild Failed, status 0x%x\n", status); | |
2043 | ||
2044 | return -EIO; | |
2045 | } | |
2046 | static DEVICE_ATTR_RW(rebuild); | |
2047 | ||
2048 | static ssize_t consistency_check_store(struct device *dev, | |
2049 | struct device_attribute *attr, const char *buf, size_t count) | |
2050 | { | |
2051 | struct scsi_device *sdev = to_scsi_device(dev); | |
2052 | struct myrb_hba *cb = shost_priv(sdev->host); | |
2053 | struct myrb_rbld_progress rbld_buf; | |
2054 | struct myrb_cmdblk *cmd_blk; | |
2055 | union myrb_cmd_mbox *mbox; | |
2056 | unsigned short ldev_num = 0xFFFF; | |
2057 | unsigned short status; | |
2058 | int rc, start; | |
2059 | const char *msg; | |
2060 | ||
2061 | rc = kstrtoint(buf, 0, &start); | |
2062 | if (rc) | |
2063 | return rc; | |
2064 | ||
2065 | if (sdev->channel < myrb_logical_channel(sdev->host)) | |
2066 | return -ENXIO; | |
2067 | ||
2068 | status = myrb_get_rbld_progress(cb, &rbld_buf); | |
2069 | if (start) { | |
2070 | if (status == MYRB_STATUS_SUCCESS) { | |
2071 | sdev_printk(KERN_INFO, sdev, | |
2072 | "Check Consistency Not Initiated; already in progress\n"); | |
2073 | return -EALREADY; | |
2074 | } | |
2075 | mutex_lock(&cb->dcmd_mutex); | |
2076 | cmd_blk = &cb->dcmd_blk; | |
2077 | myrb_reset_cmd(cmd_blk); | |
2078 | mbox = &cmd_blk->mbox; | |
2079 | mbox->type3C.opcode = MYRB_CMD_CHECK_CONSISTENCY_ASYNC; | |
2080 | mbox->type3C.id = MYRB_DCMD_TAG; | |
2081 | mbox->type3C.ldev_num = sdev->id; | |
2082 | mbox->type3C.auto_restore = true; | |
2083 | ||
2084 | status = myrb_exec_cmd(cb, cmd_blk); | |
2085 | mutex_unlock(&cb->dcmd_mutex); | |
2086 | } else { | |
2087 | struct pci_dev *pdev = cb->pdev; | |
2088 | unsigned char *rate; | |
2089 | dma_addr_t rate_addr; | |
2090 | ||
2091 | if (ldev_num != sdev->id) { | |
2092 | sdev_printk(KERN_INFO, sdev, | |
2093 | "Check Consistency Not Cancelled; not in progress\n"); | |
2094 | return 0; | |
2095 | } | |
2096 | rate = dma_alloc_coherent(&pdev->dev, sizeof(char), | |
2097 | &rate_addr, GFP_KERNEL); | |
2098 | if (rate == NULL) { | |
2099 | sdev_printk(KERN_INFO, sdev, | |
2100 | "Cancellation of Check Consistency Failed - Out of Memory\n"); | |
2101 | return -ENOMEM; | |
2102 | } | |
2103 | mutex_lock(&cb->dcmd_mutex); | |
2104 | cmd_blk = &cb->dcmd_blk; | |
2105 | myrb_reset_cmd(cmd_blk); | |
2106 | mbox = &cmd_blk->mbox; | |
2107 | mbox->type3R.opcode = MYRB_CMD_REBUILD_CONTROL; | |
2108 | mbox->type3R.id = MYRB_DCMD_TAG; | |
2109 | mbox->type3R.rbld_rate = 0xFF; | |
2110 | mbox->type3R.addr = rate_addr; | |
2111 | status = myrb_exec_cmd(cb, cmd_blk); | |
2112 | dma_free_coherent(&pdev->dev, sizeof(char), rate, rate_addr); | |
2113 | mutex_unlock(&cb->dcmd_mutex); | |
2114 | } | |
2115 | if (status == MYRB_STATUS_SUCCESS) { | |
2116 | sdev_printk(KERN_INFO, sdev, "Check Consistency %s\n", | |
2117 | start ? "Initiated" : "Cancelled"); | |
2118 | return count; | |
2119 | } | |
2120 | if (!start) { | |
2121 | sdev_printk(KERN_INFO, sdev, | |
2122 | "Check Consistency Not Cancelled, status 0x%x\n", | |
2123 | status); | |
2124 | return -EIO; | |
2125 | } | |
2126 | ||
2127 | switch (status) { | |
2128 | case MYRB_STATUS_ATTEMPT_TO_RBLD_ONLINE_DRIVE: | |
2129 | msg = "Dependent Physical Device is DEAD"; | |
2130 | break; | |
2131 | case MYRB_STATUS_RBLD_NEW_DISK_FAILED: | |
2132 | msg = "New Disk Failed During Rebuild"; | |
2133 | break; | |
2134 | case MYRB_STATUS_INVALID_ADDRESS: | |
2135 | msg = "Invalid or Nonredundant Logical Drive"; | |
2136 | break; | |
2137 | case MYRB_STATUS_RBLD_OR_CHECK_INPROGRESS: | |
2138 | msg = "Already in Progress"; | |
2139 | break; | |
2140 | default: | |
2141 | msg = NULL; | |
2142 | break; | |
2143 | } | |
2144 | if (msg) | |
2145 | sdev_printk(KERN_INFO, sdev, | |
2146 | "Check Consistency Failed - %s\n", msg); | |
2147 | else | |
2148 | sdev_printk(KERN_INFO, sdev, | |
2149 | "Check Consistency Failed, status 0x%x\n", status); | |
2150 | ||
2151 | return -EIO; | |
2152 | } | |
2153 | ||
2154 | static ssize_t consistency_check_show(struct device *dev, | |
2155 | struct device_attribute *attr, char *buf) | |
2156 | { | |
2157 | return rebuild_show(dev, attr, buf); | |
2158 | } | |
2159 | static DEVICE_ATTR_RW(consistency_check); | |
2160 | ||
2161 | static ssize_t ctlr_num_show(struct device *dev, | |
2162 | struct device_attribute *attr, char *buf) | |
2163 | { | |
2164 | struct Scsi_Host *shost = class_to_shost(dev); | |
2165 | struct myrb_hba *cb = shost_priv(shost); | |
2166 | ||
2167 | return snprintf(buf, 20, "%d\n", cb->ctlr_num); | |
2168 | } | |
2169 | static DEVICE_ATTR_RO(ctlr_num); | |
2170 | ||
2171 | static ssize_t firmware_show(struct device *dev, | |
2172 | struct device_attribute *attr, char *buf) | |
2173 | { | |
2174 | struct Scsi_Host *shost = class_to_shost(dev); | |
2175 | struct myrb_hba *cb = shost_priv(shost); | |
2176 | ||
2177 | return snprintf(buf, 16, "%s\n", cb->fw_version); | |
2178 | } | |
2179 | static DEVICE_ATTR_RO(firmware); | |
2180 | ||
2181 | static ssize_t model_show(struct device *dev, | |
2182 | struct device_attribute *attr, char *buf) | |
2183 | { | |
2184 | struct Scsi_Host *shost = class_to_shost(dev); | |
2185 | struct myrb_hba *cb = shost_priv(shost); | |
2186 | ||
2187 | return snprintf(buf, 16, "%s\n", cb->model_name); | |
2188 | } | |
2189 | static DEVICE_ATTR_RO(model); | |
2190 | ||
2191 | static ssize_t flush_cache_store(struct device *dev, | |
2192 | struct device_attribute *attr, const char *buf, size_t count) | |
2193 | { | |
2194 | struct Scsi_Host *shost = class_to_shost(dev); | |
2195 | struct myrb_hba *cb = shost_priv(shost); | |
2196 | unsigned short status; | |
2197 | ||
2198 | status = myrb_exec_type3(cb, MYRB_CMD_FLUSH, 0); | |
2199 | if (status == MYRB_STATUS_SUCCESS) { | |
2200 | shost_printk(KERN_INFO, shost, | |
2201 | "Cache Flush Completed\n"); | |
2202 | return count; | |
2203 | } | |
2204 | shost_printk(KERN_INFO, shost, | |
2205 | "Cache Flush Failed, status %x\n", status); | |
2206 | return -EIO; | |
2207 | } | |
2208 | static DEVICE_ATTR_WO(flush_cache); | |
2209 | ||
2210 | static struct device_attribute *myrb_sdev_attrs[] = { | |
2211 | &dev_attr_rebuild, | |
2212 | &dev_attr_consistency_check, | |
2213 | &dev_attr_raid_state, | |
2214 | &dev_attr_raid_level, | |
2215 | NULL, | |
2216 | }; | |
2217 | ||
2218 | static struct device_attribute *myrb_shost_attrs[] = { | |
2219 | &dev_attr_ctlr_num, | |
2220 | &dev_attr_model, | |
2221 | &dev_attr_firmware, | |
2222 | &dev_attr_flush_cache, | |
2223 | NULL, | |
2224 | }; | |
2225 | ||
2226 | struct scsi_host_template myrb_template = { | |
2227 | .module = THIS_MODULE, | |
2228 | .name = "DAC960", | |
2229 | .proc_name = "myrb", | |
2230 | .queuecommand = myrb_queuecommand, | |
2231 | .eh_host_reset_handler = myrb_host_reset, | |
2232 | .slave_alloc = myrb_slave_alloc, | |
2233 | .slave_configure = myrb_slave_configure, | |
2234 | .slave_destroy = myrb_slave_destroy, | |
2235 | .bios_param = myrb_biosparam, | |
2236 | .cmd_size = sizeof(struct myrb_cmdblk), | |
2237 | .shost_attrs = myrb_shost_attrs, | |
2238 | .sdev_attrs = myrb_sdev_attrs, | |
2239 | .this_id = -1, | |
2240 | }; | |
2241 | ||
2242 | /** | |
2243 | * myrb_is_raid - return boolean indicating device is raid volume | |
2244 | * @dev the device struct object | |
2245 | */ | |
2246 | static int myrb_is_raid(struct device *dev) | |
2247 | { | |
2248 | struct scsi_device *sdev = to_scsi_device(dev); | |
2249 | ||
2250 | return sdev->channel == myrb_logical_channel(sdev->host); | |
2251 | } | |
2252 | ||
2253 | /** | |
2254 | * myrb_get_resync - get raid volume resync percent complete | |
2255 | * @dev the device struct object | |
2256 | */ | |
2257 | static void myrb_get_resync(struct device *dev) | |
2258 | { | |
2259 | struct scsi_device *sdev = to_scsi_device(dev); | |
2260 | struct myrb_hba *cb = shost_priv(sdev->host); | |
2261 | struct myrb_rbld_progress rbld_buf; | |
2262 | unsigned int percent_complete = 0; | |
2263 | unsigned short status; | |
2264 | unsigned int ldev_size = 0, remaining = 0; | |
2265 | ||
2266 | if (sdev->channel < myrb_logical_channel(sdev->host)) | |
2267 | return; | |
2268 | status = myrb_get_rbld_progress(cb, &rbld_buf); | |
2269 | if (status == MYRB_STATUS_SUCCESS) { | |
2270 | if (rbld_buf.ldev_num == sdev->id) { | |
2271 | ldev_size = rbld_buf.ldev_size; | |
2272 | remaining = rbld_buf.blocks_left; | |
2273 | } | |
2274 | } | |
2275 | if (remaining && ldev_size) | |
2276 | percent_complete = (ldev_size - remaining) * 100 / ldev_size; | |
2277 | raid_set_resync(myrb_raid_template, dev, percent_complete); | |
2278 | } | |
2279 | ||
2280 | /** | |
2281 | * myrb_get_state - get raid volume status | |
2282 | * @dev the device struct object | |
2283 | */ | |
2284 | static void myrb_get_state(struct device *dev) | |
2285 | { | |
2286 | struct scsi_device *sdev = to_scsi_device(dev); | |
2287 | struct myrb_hba *cb = shost_priv(sdev->host); | |
2288 | struct myrb_ldev_info *ldev_info = sdev->hostdata; | |
2289 | enum raid_state state = RAID_STATE_UNKNOWN; | |
2290 | unsigned short status; | |
2291 | ||
2292 | if (sdev->channel < myrb_logical_channel(sdev->host) || !ldev_info) | |
2293 | state = RAID_STATE_UNKNOWN; | |
2294 | else { | |
2295 | status = myrb_get_rbld_progress(cb, NULL); | |
2296 | if (status == MYRB_STATUS_SUCCESS) | |
2297 | state = RAID_STATE_RESYNCING; | |
2298 | else { | |
2299 | switch (ldev_info->state) { | |
2300 | case MYRB_DEVICE_ONLINE: | |
2301 | state = RAID_STATE_ACTIVE; | |
2302 | break; | |
2303 | case MYRB_DEVICE_WO: | |
2304 | case MYRB_DEVICE_CRITICAL: | |
2305 | state = RAID_STATE_DEGRADED; | |
2306 | break; | |
2307 | default: | |
2308 | state = RAID_STATE_OFFLINE; | |
2309 | } | |
2310 | } | |
2311 | } | |
2312 | raid_set_state(myrb_raid_template, dev, state); | |
2313 | } | |
2314 | ||
2315 | struct raid_function_template myrb_raid_functions = { | |
2316 | .cookie = &myrb_template, | |
2317 | .is_raid = myrb_is_raid, | |
2318 | .get_resync = myrb_get_resync, | |
2319 | .get_state = myrb_get_state, | |
2320 | }; | |
2321 | ||
2322 | static void myrb_handle_scsi(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk, | |
2323 | struct scsi_cmnd *scmd) | |
2324 | { | |
2325 | unsigned short status; | |
2326 | ||
2327 | if (!cmd_blk) | |
2328 | return; | |
2329 | ||
2330 | scsi_dma_unmap(scmd); | |
2331 | ||
2332 | if (cmd_blk->dcdb) { | |
2333 | memcpy(scmd->sense_buffer, &cmd_blk->dcdb->sense, 64); | |
2334 | dma_pool_free(cb->dcdb_pool, cmd_blk->dcdb, | |
2335 | cmd_blk->dcdb_addr); | |
2336 | cmd_blk->dcdb = NULL; | |
2337 | } | |
2338 | if (cmd_blk->sgl) { | |
2339 | dma_pool_free(cb->sg_pool, cmd_blk->sgl, cmd_blk->sgl_addr); | |
2340 | cmd_blk->sgl = NULL; | |
2341 | cmd_blk->sgl_addr = 0; | |
2342 | } | |
2343 | status = cmd_blk->status; | |
2344 | switch (status) { | |
2345 | case MYRB_STATUS_SUCCESS: | |
2346 | case MYRB_STATUS_DEVICE_BUSY: | |
2347 | scmd->result = (DID_OK << 16) | status; | |
2348 | break; | |
2349 | case MYRB_STATUS_BAD_DATA: | |
2350 | dev_dbg(&scmd->device->sdev_gendev, | |
2351 | "Bad Data Encountered\n"); | |
2352 | if (scmd->sc_data_direction == DMA_FROM_DEVICE) | |
2353 | /* Unrecovered read error */ | |
2354 | scsi_build_sense_buffer(0, scmd->sense_buffer, | |
2355 | MEDIUM_ERROR, 0x11, 0); | |
2356 | else | |
2357 | /* Write error */ | |
2358 | scsi_build_sense_buffer(0, scmd->sense_buffer, | |
2359 | MEDIUM_ERROR, 0x0C, 0); | |
2360 | scmd->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION; | |
2361 | break; | |
2362 | case MYRB_STATUS_IRRECOVERABLE_DATA_ERROR: | |
2363 | scmd_printk(KERN_ERR, scmd, "Irrecoverable Data Error\n"); | |
2364 | if (scmd->sc_data_direction == DMA_FROM_DEVICE) | |
2365 | /* Unrecovered read error, auto-reallocation failed */ | |
2366 | scsi_build_sense_buffer(0, scmd->sense_buffer, | |
2367 | MEDIUM_ERROR, 0x11, 0x04); | |
2368 | else | |
2369 | /* Write error, auto-reallocation failed */ | |
2370 | scsi_build_sense_buffer(0, scmd->sense_buffer, | |
2371 | MEDIUM_ERROR, 0x0C, 0x02); | |
2372 | scmd->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION; | |
2373 | break; | |
2374 | case MYRB_STATUS_LDRV_NONEXISTENT_OR_OFFLINE: | |
2375 | dev_dbg(&scmd->device->sdev_gendev, | |
2376 | "Logical Drive Nonexistent or Offline"); | |
2377 | scmd->result = (DID_BAD_TARGET << 16); | |
2378 | break; | |
2379 | case MYRB_STATUS_ACCESS_BEYOND_END_OF_LDRV: | |
2380 | dev_dbg(&scmd->device->sdev_gendev, | |
2381 | "Attempt to Access Beyond End of Logical Drive"); | |
2382 | /* Logical block address out of range */ | |
2383 | scsi_build_sense_buffer(0, scmd->sense_buffer, | |
2384 | NOT_READY, 0x21, 0); | |
2385 | break; | |
2386 | case MYRB_STATUS_DEVICE_NONRESPONSIVE: | |
2387 | dev_dbg(&scmd->device->sdev_gendev, "Device nonresponsive\n"); | |
2388 | scmd->result = (DID_BAD_TARGET << 16); | |
2389 | break; | |
2390 | default: | |
2391 | scmd_printk(KERN_ERR, scmd, | |
2392 | "Unexpected Error Status %04X", status); | |
2393 | scmd->result = (DID_ERROR << 16); | |
2394 | break; | |
2395 | } | |
2396 | scmd->scsi_done(scmd); | |
2397 | } | |
2398 | ||
2399 | static void myrb_handle_cmdblk(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk) | |
2400 | { | |
2401 | if (!cmd_blk) | |
2402 | return; | |
2403 | ||
2404 | if (cmd_blk->completion) { | |
2405 | complete(cmd_blk->completion); | |
2406 | cmd_blk->completion = NULL; | |
2407 | } | |
2408 | } | |
2409 | ||
2410 | static void myrb_monitor(struct work_struct *work) | |
2411 | { | |
2412 | struct myrb_hba *cb = container_of(work, | |
2413 | struct myrb_hba, monitor_work.work); | |
2414 | struct Scsi_Host *shost = cb->host; | |
2415 | unsigned long interval = MYRB_PRIMARY_MONITOR_INTERVAL; | |
2416 | ||
2417 | dev_dbg(&shost->shost_gendev, "monitor tick\n"); | |
2418 | ||
2419 | if (cb->new_ev_seq > cb->old_ev_seq) { | |
2420 | int event = cb->old_ev_seq; | |
2421 | ||
2422 | dev_dbg(&shost->shost_gendev, | |
2423 | "get event log no %d/%d\n", | |
2424 | cb->new_ev_seq, event); | |
2425 | myrb_get_event(cb, event); | |
2426 | cb->old_ev_seq = event + 1; | |
2427 | interval = 10; | |
2428 | } else if (cb->need_err_info) { | |
2429 | cb->need_err_info = false; | |
2430 | dev_dbg(&shost->shost_gendev, "get error table\n"); | |
2431 | myrb_get_errtable(cb); | |
2432 | interval = 10; | |
2433 | } else if (cb->need_rbld && cb->rbld_first) { | |
2434 | cb->need_rbld = false; | |
2435 | dev_dbg(&shost->shost_gendev, | |
2436 | "get rebuild progress\n"); | |
2437 | myrb_update_rbld_progress(cb); | |
2438 | interval = 10; | |
2439 | } else if (cb->need_ldev_info) { | |
2440 | cb->need_ldev_info = false; | |
2441 | dev_dbg(&shost->shost_gendev, | |
2442 | "get logical drive info\n"); | |
2443 | myrb_get_ldev_info(cb); | |
2444 | interval = 10; | |
2445 | } else if (cb->need_rbld) { | |
2446 | cb->need_rbld = false; | |
2447 | dev_dbg(&shost->shost_gendev, | |
2448 | "get rebuild progress\n"); | |
2449 | myrb_update_rbld_progress(cb); | |
2450 | interval = 10; | |
2451 | } else if (cb->need_cc_status) { | |
2452 | cb->need_cc_status = false; | |
2453 | dev_dbg(&shost->shost_gendev, | |
2454 | "get consistency check progress\n"); | |
2455 | myrb_get_cc_progress(cb); | |
2456 | interval = 10; | |
2457 | } else if (cb->need_bgi_status) { | |
2458 | cb->need_bgi_status = false; | |
2459 | dev_dbg(&shost->shost_gendev, "get background init status\n"); | |
2460 | myrb_bgi_control(cb); | |
2461 | interval = 10; | |
2462 | } else { | |
2463 | dev_dbg(&shost->shost_gendev, "new enquiry\n"); | |
2464 | mutex_lock(&cb->dma_mutex); | |
2465 | myrb_hba_enquiry(cb); | |
2466 | mutex_unlock(&cb->dma_mutex); | |
2467 | if ((cb->new_ev_seq - cb->old_ev_seq > 0) || | |
2468 | cb->need_err_info || cb->need_rbld || | |
2469 | cb->need_ldev_info || cb->need_cc_status || | |
2470 | cb->need_bgi_status) { | |
2471 | dev_dbg(&shost->shost_gendev, | |
2472 | "reschedule monitor\n"); | |
2473 | interval = 0; | |
2474 | } | |
2475 | } | |
2476 | if (interval > 1) | |
2477 | cb->primary_monitor_time = jiffies; | |
2478 | queue_delayed_work(cb->work_q, &cb->monitor_work, interval); | |
2479 | } | |
2480 | ||
2481 | /** | |
2482 | * myrb_err_status - reports controller BIOS messages | |
2483 | * | |
2484 | * Controller BIOS messages are passed through the Error Status Register | |
2485 | * when the driver performs the BIOS handshaking. | |
2486 | * | |
2487 | * Return: true for fatal errors and false otherwise. | |
2488 | */ | |
2489 | bool myrb_err_status(struct myrb_hba *cb, unsigned char error, | |
2490 | unsigned char parm0, unsigned char parm1) | |
2491 | { | |
2492 | struct pci_dev *pdev = cb->pdev; | |
2493 | ||
2494 | switch (error) { | |
2495 | case 0x00: | |
2496 | dev_info(&pdev->dev, | |
2497 | "Physical Device %d:%d Not Responding\n", | |
2498 | parm1, parm0); | |
2499 | break; | |
2500 | case 0x08: | |
2501 | dev_notice(&pdev->dev, "Spinning Up Drives\n"); | |
2502 | break; | |
2503 | case 0x30: | |
2504 | dev_notice(&pdev->dev, "Configuration Checksum Error\n"); | |
2505 | break; | |
2506 | case 0x60: | |
2507 | dev_notice(&pdev->dev, "Mirror Race Recovery Failed\n"); | |
2508 | break; | |
2509 | case 0x70: | |
2510 | dev_notice(&pdev->dev, "Mirror Race Recovery In Progress\n"); | |
2511 | break; | |
2512 | case 0x90: | |
2513 | dev_notice(&pdev->dev, "Physical Device %d:%d COD Mismatch\n", | |
2514 | parm1, parm0); | |
2515 | break; | |
2516 | case 0xA0: | |
2517 | dev_notice(&pdev->dev, "Logical Drive Installation Aborted\n"); | |
2518 | break; | |
2519 | case 0xB0: | |
2520 | dev_notice(&pdev->dev, "Mirror Race On A Critical Logical Drive\n"); | |
2521 | break; | |
2522 | case 0xD0: | |
2523 | dev_notice(&pdev->dev, "New Controller Configuration Found\n"); | |
2524 | break; | |
2525 | case 0xF0: | |
2526 | dev_err(&pdev->dev, "Fatal Memory Parity Error\n"); | |
2527 | return true; | |
2528 | default: | |
2529 | dev_err(&pdev->dev, "Unknown Initialization Error %02X\n", | |
2530 | error); | |
2531 | return true; | |
2532 | } | |
2533 | return false; | |
2534 | } | |
2535 | ||
2536 | /* | |
2537 | * Hardware-specific functions | |
2538 | */ | |
2539 | ||
2540 | /* | |
2541 | * DAC960 LA Series Controllers | |
2542 | */ | |
2543 | ||
2544 | static inline void DAC960_LA_hw_mbox_new_cmd(void __iomem *base) | |
2545 | { | |
2546 | writeb(DAC960_LA_IDB_HWMBOX_NEW_CMD, base + DAC960_LA_IDB_OFFSET); | |
2547 | } | |
2548 | ||
2549 | static inline void DAC960_LA_ack_hw_mbox_status(void __iomem *base) | |
2550 | { | |
2551 | writeb(DAC960_LA_IDB_HWMBOX_ACK_STS, base + DAC960_LA_IDB_OFFSET); | |
2552 | } | |
2553 | ||
2554 | static inline void DAC960_LA_gen_intr(void __iomem *base) | |
2555 | { | |
2556 | writeb(DAC960_LA_IDB_GEN_IRQ, base + DAC960_LA_IDB_OFFSET); | |
2557 | } | |
2558 | ||
2559 | static inline void DAC960_LA_reset_ctrl(void __iomem *base) | |
2560 | { | |
2561 | writeb(DAC960_LA_IDB_CTRL_RESET, base + DAC960_LA_IDB_OFFSET); | |
2562 | } | |
2563 | ||
2564 | static inline void DAC960_LA_mem_mbox_new_cmd(void __iomem *base) | |
2565 | { | |
2566 | writeb(DAC960_LA_IDB_MMBOX_NEW_CMD, base + DAC960_LA_IDB_OFFSET); | |
2567 | } | |
2568 | ||
2569 | static inline bool DAC960_LA_hw_mbox_is_full(void __iomem *base) | |
2570 | { | |
2571 | unsigned char idb = readb(base + DAC960_LA_IDB_OFFSET); | |
2572 | ||
2573 | return !(idb & DAC960_LA_IDB_HWMBOX_EMPTY); | |
2574 | } | |
2575 | ||
2576 | static inline bool DAC960_LA_init_in_progress(void __iomem *base) | |
2577 | { | |
2578 | unsigned char idb = readb(base + DAC960_LA_IDB_OFFSET); | |
2579 | ||
2580 | return !(idb & DAC960_LA_IDB_INIT_DONE); | |
2581 | } | |
2582 | ||
2583 | static inline void DAC960_LA_ack_hw_mbox_intr(void __iomem *base) | |
2584 | { | |
2585 | writeb(DAC960_LA_ODB_HWMBOX_ACK_IRQ, base + DAC960_LA_ODB_OFFSET); | |
2586 | } | |
2587 | ||
2588 | static inline void DAC960_LA_ack_mem_mbox_intr(void __iomem *base) | |
2589 | { | |
2590 | writeb(DAC960_LA_ODB_MMBOX_ACK_IRQ, base + DAC960_LA_ODB_OFFSET); | |
2591 | } | |
2592 | ||
2593 | static inline void DAC960_LA_ack_intr(void __iomem *base) | |
2594 | { | |
2595 | writeb(DAC960_LA_ODB_HWMBOX_ACK_IRQ | DAC960_LA_ODB_MMBOX_ACK_IRQ, | |
2596 | base + DAC960_LA_ODB_OFFSET); | |
2597 | } | |
2598 | ||
2599 | static inline bool DAC960_LA_hw_mbox_status_available(void __iomem *base) | |
2600 | { | |
2601 | unsigned char odb = readb(base + DAC960_LA_ODB_OFFSET); | |
2602 | ||
2603 | return odb & DAC960_LA_ODB_HWMBOX_STS_AVAIL; | |
2604 | } | |
2605 | ||
2606 | static inline bool DAC960_LA_mem_mbox_status_available(void __iomem *base) | |
2607 | { | |
2608 | unsigned char odb = readb(base + DAC960_LA_ODB_OFFSET); | |
2609 | ||
2610 | return odb & DAC960_LA_ODB_MMBOX_STS_AVAIL; | |
2611 | } | |
2612 | ||
2613 | static inline void DAC960_LA_enable_intr(void __iomem *base) | |
2614 | { | |
2615 | unsigned char odb = 0xFF; | |
2616 | ||
2617 | odb &= ~DAC960_LA_IRQMASK_DISABLE_IRQ; | |
2618 | writeb(odb, base + DAC960_LA_IRQMASK_OFFSET); | |
2619 | } | |
2620 | ||
2621 | static inline void DAC960_LA_disable_intr(void __iomem *base) | |
2622 | { | |
2623 | unsigned char odb = 0xFF; | |
2624 | ||
2625 | odb |= DAC960_LA_IRQMASK_DISABLE_IRQ; | |
2626 | writeb(odb, base + DAC960_LA_IRQMASK_OFFSET); | |
2627 | } | |
2628 | ||
2629 | static inline bool DAC960_LA_intr_enabled(void __iomem *base) | |
2630 | { | |
2631 | unsigned char imask = readb(base + DAC960_LA_IRQMASK_OFFSET); | |
2632 | ||
2633 | return !(imask & DAC960_LA_IRQMASK_DISABLE_IRQ); | |
2634 | } | |
2635 | ||
2636 | static inline void DAC960_LA_write_cmd_mbox(union myrb_cmd_mbox *mem_mbox, | |
2637 | union myrb_cmd_mbox *mbox) | |
2638 | { | |
2639 | mem_mbox->words[1] = mbox->words[1]; | |
2640 | mem_mbox->words[2] = mbox->words[2]; | |
2641 | mem_mbox->words[3] = mbox->words[3]; | |
2642 | /* Memory barrier to prevent reordering */ | |
2643 | wmb(); | |
2644 | mem_mbox->words[0] = mbox->words[0]; | |
2645 | /* Memory barrier to force PCI access */ | |
2646 | mb(); | |
2647 | } | |
2648 | ||
2649 | static inline void DAC960_LA_write_hw_mbox(void __iomem *base, | |
2650 | union myrb_cmd_mbox *mbox) | |
2651 | { | |
2652 | writel(mbox->words[0], base + DAC960_LA_CMDOP_OFFSET); | |
2653 | writel(mbox->words[1], base + DAC960_LA_MBOX4_OFFSET); | |
2654 | writel(mbox->words[2], base + DAC960_LA_MBOX8_OFFSET); | |
2655 | writeb(mbox->bytes[12], base + DAC960_LA_MBOX12_OFFSET); | |
2656 | } | |
2657 | ||
2658 | static inline unsigned char DAC960_LA_read_status_cmd_ident(void __iomem *base) | |
2659 | { | |
2660 | return readb(base + DAC960_LA_STSID_OFFSET); | |
2661 | } | |
2662 | ||
2663 | static inline unsigned short DAC960_LA_read_status(void __iomem *base) | |
2664 | { | |
2665 | return readw(base + DAC960_LA_STS_OFFSET); | |
2666 | } | |
2667 | ||
2668 | static inline bool | |
2669 | DAC960_LA_read_error_status(void __iomem *base, unsigned char *error, | |
2670 | unsigned char *param0, unsigned char *param1) | |
2671 | { | |
2672 | unsigned char errsts = readb(base + DAC960_LA_ERRSTS_OFFSET); | |
2673 | ||
2674 | if (!(errsts & DAC960_LA_ERRSTS_PENDING)) | |
2675 | return false; | |
2676 | errsts &= ~DAC960_LA_ERRSTS_PENDING; | |
2677 | ||
2678 | *error = errsts; | |
2679 | *param0 = readb(base + DAC960_LA_CMDOP_OFFSET); | |
2680 | *param1 = readb(base + DAC960_LA_CMDID_OFFSET); | |
2681 | writeb(0xFF, base + DAC960_LA_ERRSTS_OFFSET); | |
2682 | return true; | |
2683 | } | |
2684 | ||
2685 | static inline unsigned short | |
2686 | DAC960_LA_mbox_init(struct pci_dev *pdev, void __iomem *base, | |
2687 | union myrb_cmd_mbox *mbox) | |
2688 | { | |
2689 | unsigned short status; | |
2690 | int timeout = 0; | |
2691 | ||
2692 | while (timeout < MYRB_MAILBOX_TIMEOUT) { | |
2693 | if (!DAC960_LA_hw_mbox_is_full(base)) | |
2694 | break; | |
2695 | udelay(10); | |
2696 | timeout++; | |
2697 | } | |
2698 | if (DAC960_LA_hw_mbox_is_full(base)) { | |
2699 | dev_err(&pdev->dev, | |
2700 | "Timeout waiting for empty mailbox\n"); | |
2701 | return MYRB_STATUS_SUBSYS_TIMEOUT; | |
2702 | } | |
2703 | DAC960_LA_write_hw_mbox(base, mbox); | |
2704 | DAC960_LA_hw_mbox_new_cmd(base); | |
2705 | timeout = 0; | |
2706 | while (timeout < MYRB_MAILBOX_TIMEOUT) { | |
2707 | if (DAC960_LA_hw_mbox_status_available(base)) | |
2708 | break; | |
2709 | udelay(10); | |
2710 | timeout++; | |
2711 | } | |
2712 | if (!DAC960_LA_hw_mbox_status_available(base)) { | |
2713 | dev_err(&pdev->dev, "Timeout waiting for mailbox status\n"); | |
2714 | return MYRB_STATUS_SUBSYS_TIMEOUT; | |
2715 | } | |
2716 | status = DAC960_LA_read_status(base); | |
2717 | DAC960_LA_ack_hw_mbox_intr(base); | |
2718 | DAC960_LA_ack_hw_mbox_status(base); | |
2719 | ||
2720 | return status; | |
2721 | } | |
2722 | ||
2723 | static int DAC960_LA_hw_init(struct pci_dev *pdev, | |
2724 | struct myrb_hba *cb, void __iomem *base) | |
2725 | { | |
2726 | int timeout = 0; | |
2727 | unsigned char error, parm0, parm1; | |
2728 | ||
2729 | DAC960_LA_disable_intr(base); | |
2730 | DAC960_LA_ack_hw_mbox_status(base); | |
2731 | udelay(1000); | |
2732 | timeout = 0; | |
2733 | while (DAC960_LA_init_in_progress(base) && | |
2734 | timeout < MYRB_MAILBOX_TIMEOUT) { | |
2735 | if (DAC960_LA_read_error_status(base, &error, | |
2736 | &parm0, &parm1) && | |
2737 | myrb_err_status(cb, error, parm0, parm1)) | |
2738 | return -ENODEV; | |
2739 | udelay(10); | |
2740 | timeout++; | |
2741 | } | |
2742 | if (timeout == MYRB_MAILBOX_TIMEOUT) { | |
2743 | dev_err(&pdev->dev, | |
2744 | "Timeout waiting for Controller Initialisation\n"); | |
2745 | return -ETIMEDOUT; | |
2746 | } | |
2747 | if (!myrb_enable_mmio(cb, DAC960_LA_mbox_init)) { | |
2748 | dev_err(&pdev->dev, | |
2749 | "Unable to Enable Memory Mailbox Interface\n"); | |
2750 | DAC960_LA_reset_ctrl(base); | |
2751 | return -ENODEV; | |
2752 | } | |
2753 | DAC960_LA_enable_intr(base); | |
2754 | cb->qcmd = myrb_qcmd; | |
2755 | cb->write_cmd_mbox = DAC960_LA_write_cmd_mbox; | |
2756 | if (cb->dual_mode_interface) | |
2757 | cb->get_cmd_mbox = DAC960_LA_mem_mbox_new_cmd; | |
2758 | else | |
2759 | cb->get_cmd_mbox = DAC960_LA_hw_mbox_new_cmd; | |
2760 | cb->disable_intr = DAC960_LA_disable_intr; | |
2761 | cb->reset = DAC960_LA_reset_ctrl; | |
2762 | ||
2763 | return 0; | |
2764 | } | |
2765 | ||
2766 | static irqreturn_t DAC960_LA_intr_handler(int irq, void *arg) | |
2767 | { | |
2768 | struct myrb_hba *cb = arg; | |
2769 | void __iomem *base = cb->io_base; | |
2770 | struct myrb_stat_mbox *next_stat_mbox; | |
2771 | unsigned long flags; | |
2772 | ||
2773 | spin_lock_irqsave(&cb->queue_lock, flags); | |
2774 | DAC960_LA_ack_intr(base); | |
2775 | next_stat_mbox = cb->next_stat_mbox; | |
2776 | while (next_stat_mbox->valid) { | |
2777 | unsigned char id = next_stat_mbox->id; | |
2778 | struct scsi_cmnd *scmd = NULL; | |
2779 | struct myrb_cmdblk *cmd_blk = NULL; | |
2780 | ||
2781 | if (id == MYRB_DCMD_TAG) | |
2782 | cmd_blk = &cb->dcmd_blk; | |
2783 | else if (id == MYRB_MCMD_TAG) | |
2784 | cmd_blk = &cb->mcmd_blk; | |
2785 | else { | |
2786 | scmd = scsi_host_find_tag(cb->host, id - 3); | |
2787 | if (scmd) | |
2788 | cmd_blk = scsi_cmd_priv(scmd); | |
2789 | } | |
2790 | if (cmd_blk) | |
2791 | cmd_blk->status = next_stat_mbox->status; | |
2792 | else | |
2793 | dev_err(&cb->pdev->dev, | |
2794 | "Unhandled command completion %d\n", id); | |
2795 | ||
2796 | memset(next_stat_mbox, 0, sizeof(struct myrb_stat_mbox)); | |
2797 | if (++next_stat_mbox > cb->last_stat_mbox) | |
2798 | next_stat_mbox = cb->first_stat_mbox; | |
2799 | ||
2800 | if (cmd_blk) { | |
2801 | if (id < 3) | |
2802 | myrb_handle_cmdblk(cb, cmd_blk); | |
2803 | else | |
2804 | myrb_handle_scsi(cb, cmd_blk, scmd); | |
2805 | } | |
2806 | } | |
2807 | cb->next_stat_mbox = next_stat_mbox; | |
2808 | spin_unlock_irqrestore(&cb->queue_lock, flags); | |
2809 | return IRQ_HANDLED; | |
2810 | } | |
2811 | ||
2812 | struct myrb_privdata DAC960_LA_privdata = { | |
2813 | .hw_init = DAC960_LA_hw_init, | |
2814 | .irq_handler = DAC960_LA_intr_handler, | |
2815 | .mmio_size = DAC960_LA_mmio_size, | |
2816 | }; | |
2817 | ||
2818 | /* | |
2819 | * DAC960 PG Series Controllers | |
2820 | */ | |
2821 | static inline void DAC960_PG_hw_mbox_new_cmd(void __iomem *base) | |
2822 | { | |
2823 | writel(DAC960_PG_IDB_HWMBOX_NEW_CMD, base + DAC960_PG_IDB_OFFSET); | |
2824 | } | |
2825 | ||
2826 | static inline void DAC960_PG_ack_hw_mbox_status(void __iomem *base) | |
2827 | { | |
2828 | writel(DAC960_PG_IDB_HWMBOX_ACK_STS, base + DAC960_PG_IDB_OFFSET); | |
2829 | } | |
2830 | ||
2831 | static inline void DAC960_PG_gen_intr(void __iomem *base) | |
2832 | { | |
2833 | writel(DAC960_PG_IDB_GEN_IRQ, base + DAC960_PG_IDB_OFFSET); | |
2834 | } | |
2835 | ||
2836 | static inline void DAC960_PG_reset_ctrl(void __iomem *base) | |
2837 | { | |
2838 | writel(DAC960_PG_IDB_CTRL_RESET, base + DAC960_PG_IDB_OFFSET); | |
2839 | } | |
2840 | ||
2841 | static inline void DAC960_PG_mem_mbox_new_cmd(void __iomem *base) | |
2842 | { | |
2843 | writel(DAC960_PG_IDB_MMBOX_NEW_CMD, base + DAC960_PG_IDB_OFFSET); | |
2844 | } | |
2845 | ||
2846 | static inline bool DAC960_PG_hw_mbox_is_full(void __iomem *base) | |
2847 | { | |
2848 | unsigned char idb = readl(base + DAC960_PG_IDB_OFFSET); | |
2849 | ||
2850 | return idb & DAC960_PG_IDB_HWMBOX_FULL; | |
2851 | } | |
2852 | ||
2853 | static inline bool DAC960_PG_init_in_progress(void __iomem *base) | |
2854 | { | |
2855 | unsigned char idb = readl(base + DAC960_PG_IDB_OFFSET); | |
2856 | ||
2857 | return idb & DAC960_PG_IDB_INIT_IN_PROGRESS; | |
2858 | } | |
2859 | ||
2860 | static inline void DAC960_PG_ack_hw_mbox_intr(void __iomem *base) | |
2861 | { | |
2862 | writel(DAC960_PG_ODB_HWMBOX_ACK_IRQ, base + DAC960_PG_ODB_OFFSET); | |
2863 | } | |
2864 | ||
2865 | static inline void DAC960_PG_ack_mem_mbox_intr(void __iomem *base) | |
2866 | { | |
2867 | writel(DAC960_PG_ODB_MMBOX_ACK_IRQ, base + DAC960_PG_ODB_OFFSET); | |
2868 | } | |
2869 | ||
2870 | static inline void DAC960_PG_ack_intr(void __iomem *base) | |
2871 | { | |
2872 | writel(DAC960_PG_ODB_HWMBOX_ACK_IRQ | DAC960_PG_ODB_MMBOX_ACK_IRQ, | |
2873 | base + DAC960_PG_ODB_OFFSET); | |
2874 | } | |
2875 | ||
2876 | static inline bool DAC960_PG_hw_mbox_status_available(void __iomem *base) | |
2877 | { | |
2878 | unsigned char odb = readl(base + DAC960_PG_ODB_OFFSET); | |
2879 | ||
2880 | return odb & DAC960_PG_ODB_HWMBOX_STS_AVAIL; | |
2881 | } | |
2882 | ||
2883 | static inline bool DAC960_PG_mem_mbox_status_available(void __iomem *base) | |
2884 | { | |
2885 | unsigned char odb = readl(base + DAC960_PG_ODB_OFFSET); | |
2886 | ||
2887 | return odb & DAC960_PG_ODB_MMBOX_STS_AVAIL; | |
2888 | } | |
2889 | ||
2890 | static inline void DAC960_PG_enable_intr(void __iomem *base) | |
2891 | { | |
2892 | unsigned int imask = (unsigned int)-1; | |
2893 | ||
2894 | imask &= ~DAC960_PG_IRQMASK_DISABLE_IRQ; | |
2895 | writel(imask, base + DAC960_PG_IRQMASK_OFFSET); | |
2896 | } | |
2897 | ||
2898 | static inline void DAC960_PG_disable_intr(void __iomem *base) | |
2899 | { | |
2900 | unsigned int imask = (unsigned int)-1; | |
2901 | ||
2902 | writel(imask, base + DAC960_PG_IRQMASK_OFFSET); | |
2903 | } | |
2904 | ||
2905 | static inline bool DAC960_PG_intr_enabled(void __iomem *base) | |
2906 | { | |
2907 | unsigned int imask = readl(base + DAC960_PG_IRQMASK_OFFSET); | |
2908 | ||
2909 | return !(imask & DAC960_PG_IRQMASK_DISABLE_IRQ); | |
2910 | } | |
2911 | ||
2912 | static inline void DAC960_PG_write_cmd_mbox(union myrb_cmd_mbox *mem_mbox, | |
2913 | union myrb_cmd_mbox *mbox) | |
2914 | { | |
2915 | mem_mbox->words[1] = mbox->words[1]; | |
2916 | mem_mbox->words[2] = mbox->words[2]; | |
2917 | mem_mbox->words[3] = mbox->words[3]; | |
2918 | /* Memory barrier to prevent reordering */ | |
2919 | wmb(); | |
2920 | mem_mbox->words[0] = mbox->words[0]; | |
2921 | /* Memory barrier to force PCI access */ | |
2922 | mb(); | |
2923 | } | |
2924 | ||
2925 | static inline void DAC960_PG_write_hw_mbox(void __iomem *base, | |
2926 | union myrb_cmd_mbox *mbox) | |
2927 | { | |
2928 | writel(mbox->words[0], base + DAC960_PG_CMDOP_OFFSET); | |
2929 | writel(mbox->words[1], base + DAC960_PG_MBOX4_OFFSET); | |
2930 | writel(mbox->words[2], base + DAC960_PG_MBOX8_OFFSET); | |
2931 | writeb(mbox->bytes[12], base + DAC960_PG_MBOX12_OFFSET); | |
2932 | } | |
2933 | ||
2934 | static inline unsigned char | |
2935 | DAC960_PG_read_status_cmd_ident(void __iomem *base) | |
2936 | { | |
2937 | return readb(base + DAC960_PG_STSID_OFFSET); | |
2938 | } | |
2939 | ||
2940 | static inline unsigned short | |
2941 | DAC960_PG_read_status(void __iomem *base) | |
2942 | { | |
2943 | return readw(base + DAC960_PG_STS_OFFSET); | |
2944 | } | |
2945 | ||
2946 | static inline bool | |
2947 | DAC960_PG_read_error_status(void __iomem *base, unsigned char *error, | |
2948 | unsigned char *param0, unsigned char *param1) | |
2949 | { | |
2950 | unsigned char errsts = readb(base + DAC960_PG_ERRSTS_OFFSET); | |
2951 | ||
2952 | if (!(errsts & DAC960_PG_ERRSTS_PENDING)) | |
2953 | return false; | |
2954 | errsts &= ~DAC960_PG_ERRSTS_PENDING; | |
2955 | *error = errsts; | |
2956 | *param0 = readb(base + DAC960_PG_CMDOP_OFFSET); | |
2957 | *param1 = readb(base + DAC960_PG_CMDID_OFFSET); | |
2958 | writeb(0, base + DAC960_PG_ERRSTS_OFFSET); | |
2959 | return true; | |
2960 | } | |
2961 | ||
2962 | static inline unsigned short | |
2963 | DAC960_PG_mbox_init(struct pci_dev *pdev, void __iomem *base, | |
2964 | union myrb_cmd_mbox *mbox) | |
2965 | { | |
2966 | unsigned short status; | |
2967 | int timeout = 0; | |
2968 | ||
2969 | while (timeout < MYRB_MAILBOX_TIMEOUT) { | |
2970 | if (!DAC960_PG_hw_mbox_is_full(base)) | |
2971 | break; | |
2972 | udelay(10); | |
2973 | timeout++; | |
2974 | } | |
2975 | if (DAC960_PG_hw_mbox_is_full(base)) { | |
2976 | dev_err(&pdev->dev, | |
2977 | "Timeout waiting for empty mailbox\n"); | |
2978 | return MYRB_STATUS_SUBSYS_TIMEOUT; | |
2979 | } | |
2980 | DAC960_PG_write_hw_mbox(base, mbox); | |
2981 | DAC960_PG_hw_mbox_new_cmd(base); | |
2982 | ||
2983 | timeout = 0; | |
2984 | while (timeout < MYRB_MAILBOX_TIMEOUT) { | |
2985 | if (DAC960_PG_hw_mbox_status_available(base)) | |
2986 | break; | |
2987 | udelay(10); | |
2988 | timeout++; | |
2989 | } | |
2990 | if (!DAC960_PG_hw_mbox_status_available(base)) { | |
2991 | dev_err(&pdev->dev, | |
2992 | "Timeout waiting for mailbox status\n"); | |
2993 | return MYRB_STATUS_SUBSYS_TIMEOUT; | |
2994 | } | |
2995 | status = DAC960_PG_read_status(base); | |
2996 | DAC960_PG_ack_hw_mbox_intr(base); | |
2997 | DAC960_PG_ack_hw_mbox_status(base); | |
2998 | ||
2999 | return status; | |
3000 | } | |
3001 | ||
3002 | static int DAC960_PG_hw_init(struct pci_dev *pdev, | |
3003 | struct myrb_hba *cb, void __iomem *base) | |
3004 | { | |
3005 | int timeout = 0; | |
3006 | unsigned char error, parm0, parm1; | |
3007 | ||
3008 | DAC960_PG_disable_intr(base); | |
3009 | DAC960_PG_ack_hw_mbox_status(base); | |
3010 | udelay(1000); | |
3011 | while (DAC960_PG_init_in_progress(base) && | |
3012 | timeout < MYRB_MAILBOX_TIMEOUT) { | |
3013 | if (DAC960_PG_read_error_status(base, &error, | |
3014 | &parm0, &parm1) && | |
3015 | myrb_err_status(cb, error, parm0, parm1)) | |
3016 | return -EIO; | |
3017 | udelay(10); | |
3018 | timeout++; | |
3019 | } | |
3020 | if (timeout == MYRB_MAILBOX_TIMEOUT) { | |
3021 | dev_err(&pdev->dev, | |
3022 | "Timeout waiting for Controller Initialisation\n"); | |
3023 | return -ETIMEDOUT; | |
3024 | } | |
3025 | if (!myrb_enable_mmio(cb, DAC960_PG_mbox_init)) { | |
3026 | dev_err(&pdev->dev, | |
3027 | "Unable to Enable Memory Mailbox Interface\n"); | |
3028 | DAC960_PG_reset_ctrl(base); | |
3029 | return -ENODEV; | |
3030 | } | |
3031 | DAC960_PG_enable_intr(base); | |
3032 | cb->qcmd = myrb_qcmd; | |
3033 | cb->write_cmd_mbox = DAC960_PG_write_cmd_mbox; | |
3034 | if (cb->dual_mode_interface) | |
3035 | cb->get_cmd_mbox = DAC960_PG_mem_mbox_new_cmd; | |
3036 | else | |
3037 | cb->get_cmd_mbox = DAC960_PG_hw_mbox_new_cmd; | |
3038 | cb->disable_intr = DAC960_PG_disable_intr; | |
3039 | cb->reset = DAC960_PG_reset_ctrl; | |
3040 | ||
3041 | return 0; | |
3042 | } | |
3043 | ||
3044 | static irqreturn_t DAC960_PG_intr_handler(int irq, void *arg) | |
3045 | { | |
3046 | struct myrb_hba *cb = arg; | |
3047 | void __iomem *base = cb->io_base; | |
3048 | struct myrb_stat_mbox *next_stat_mbox; | |
3049 | unsigned long flags; | |
3050 | ||
3051 | spin_lock_irqsave(&cb->queue_lock, flags); | |
3052 | DAC960_PG_ack_intr(base); | |
3053 | next_stat_mbox = cb->next_stat_mbox; | |
3054 | while (next_stat_mbox->valid) { | |
3055 | unsigned char id = next_stat_mbox->id; | |
3056 | struct scsi_cmnd *scmd = NULL; | |
3057 | struct myrb_cmdblk *cmd_blk = NULL; | |
3058 | ||
3059 | if (id == MYRB_DCMD_TAG) | |
3060 | cmd_blk = &cb->dcmd_blk; | |
3061 | else if (id == MYRB_MCMD_TAG) | |
3062 | cmd_blk = &cb->mcmd_blk; | |
3063 | else { | |
3064 | scmd = scsi_host_find_tag(cb->host, id - 3); | |
3065 | if (scmd) | |
3066 | cmd_blk = scsi_cmd_priv(scmd); | |
3067 | } | |
3068 | if (cmd_blk) | |
3069 | cmd_blk->status = next_stat_mbox->status; | |
3070 | else | |
3071 | dev_err(&cb->pdev->dev, | |
3072 | "Unhandled command completion %d\n", id); | |
3073 | ||
3074 | memset(next_stat_mbox, 0, sizeof(struct myrb_stat_mbox)); | |
3075 | if (++next_stat_mbox > cb->last_stat_mbox) | |
3076 | next_stat_mbox = cb->first_stat_mbox; | |
3077 | ||
3078 | if (id < 3) | |
3079 | myrb_handle_cmdblk(cb, cmd_blk); | |
3080 | else | |
3081 | myrb_handle_scsi(cb, cmd_blk, scmd); | |
3082 | } | |
3083 | cb->next_stat_mbox = next_stat_mbox; | |
3084 | spin_unlock_irqrestore(&cb->queue_lock, flags); | |
3085 | return IRQ_HANDLED; | |
3086 | } | |
3087 | ||
3088 | struct myrb_privdata DAC960_PG_privdata = { | |
3089 | .hw_init = DAC960_PG_hw_init, | |
3090 | .irq_handler = DAC960_PG_intr_handler, | |
3091 | .mmio_size = DAC960_PG_mmio_size, | |
3092 | }; | |
3093 | ||
3094 | ||
3095 | /* | |
3096 | * DAC960 PD Series Controllers | |
3097 | */ | |
3098 | ||
3099 | static inline void DAC960_PD_hw_mbox_new_cmd(void __iomem *base) | |
3100 | { | |
3101 | writeb(DAC960_PD_IDB_HWMBOX_NEW_CMD, base + DAC960_PD_IDB_OFFSET); | |
3102 | } | |
3103 | ||
3104 | static inline void DAC960_PD_ack_hw_mbox_status(void __iomem *base) | |
3105 | { | |
3106 | writeb(DAC960_PD_IDB_HWMBOX_ACK_STS, base + DAC960_PD_IDB_OFFSET); | |
3107 | } | |
3108 | ||
3109 | static inline void DAC960_PD_gen_intr(void __iomem *base) | |
3110 | { | |
3111 | writeb(DAC960_PD_IDB_GEN_IRQ, base + DAC960_PD_IDB_OFFSET); | |
3112 | } | |
3113 | ||
3114 | static inline void DAC960_PD_reset_ctrl(void __iomem *base) | |
3115 | { | |
3116 | writeb(DAC960_PD_IDB_CTRL_RESET, base + DAC960_PD_IDB_OFFSET); | |
3117 | } | |
3118 | ||
3119 | static inline bool DAC960_PD_hw_mbox_is_full(void __iomem *base) | |
3120 | { | |
3121 | unsigned char idb = readb(base + DAC960_PD_IDB_OFFSET); | |
3122 | ||
3123 | return idb & DAC960_PD_IDB_HWMBOX_FULL; | |
3124 | } | |
3125 | ||
3126 | static inline bool DAC960_PD_init_in_progress(void __iomem *base) | |
3127 | { | |
3128 | unsigned char idb = readb(base + DAC960_PD_IDB_OFFSET); | |
3129 | ||
3130 | return idb & DAC960_PD_IDB_INIT_IN_PROGRESS; | |
3131 | } | |
3132 | ||
3133 | static inline void DAC960_PD_ack_intr(void __iomem *base) | |
3134 | { | |
3135 | writeb(DAC960_PD_ODB_HWMBOX_ACK_IRQ, base + DAC960_PD_ODB_OFFSET); | |
3136 | } | |
3137 | ||
3138 | static inline bool DAC960_PD_hw_mbox_status_available(void __iomem *base) | |
3139 | { | |
3140 | unsigned char odb = readb(base + DAC960_PD_ODB_OFFSET); | |
3141 | ||
3142 | return odb & DAC960_PD_ODB_HWMBOX_STS_AVAIL; | |
3143 | } | |
3144 | ||
3145 | static inline void DAC960_PD_enable_intr(void __iomem *base) | |
3146 | { | |
3147 | writeb(DAC960_PD_IRQMASK_ENABLE_IRQ, base + DAC960_PD_IRQEN_OFFSET); | |
3148 | } | |
3149 | ||
3150 | static inline void DAC960_PD_disable_intr(void __iomem *base) | |
3151 | { | |
3152 | writeb(0, base + DAC960_PD_IRQEN_OFFSET); | |
3153 | } | |
3154 | ||
3155 | static inline bool DAC960_PD_intr_enabled(void __iomem *base) | |
3156 | { | |
3157 | unsigned char imask = readb(base + DAC960_PD_IRQEN_OFFSET); | |
3158 | ||
3159 | return imask & DAC960_PD_IRQMASK_ENABLE_IRQ; | |
3160 | } | |
3161 | ||
3162 | static inline void DAC960_PD_write_cmd_mbox(void __iomem *base, | |
3163 | union myrb_cmd_mbox *mbox) | |
3164 | { | |
3165 | writel(mbox->words[0], base + DAC960_PD_CMDOP_OFFSET); | |
3166 | writel(mbox->words[1], base + DAC960_PD_MBOX4_OFFSET); | |
3167 | writel(mbox->words[2], base + DAC960_PD_MBOX8_OFFSET); | |
3168 | writeb(mbox->bytes[12], base + DAC960_PD_MBOX12_OFFSET); | |
3169 | } | |
3170 | ||
3171 | static inline unsigned char | |
3172 | DAC960_PD_read_status_cmd_ident(void __iomem *base) | |
3173 | { | |
3174 | return readb(base + DAC960_PD_STSID_OFFSET); | |
3175 | } | |
3176 | ||
3177 | static inline unsigned short | |
3178 | DAC960_PD_read_status(void __iomem *base) | |
3179 | { | |
3180 | return readw(base + DAC960_PD_STS_OFFSET); | |
3181 | } | |
3182 | ||
3183 | static inline bool | |
3184 | DAC960_PD_read_error_status(void __iomem *base, unsigned char *error, | |
3185 | unsigned char *param0, unsigned char *param1) | |
3186 | { | |
3187 | unsigned char errsts = readb(base + DAC960_PD_ERRSTS_OFFSET); | |
3188 | ||
3189 | if (!(errsts & DAC960_PD_ERRSTS_PENDING)) | |
3190 | return false; | |
3191 | errsts &= ~DAC960_PD_ERRSTS_PENDING; | |
3192 | *error = errsts; | |
3193 | *param0 = readb(base + DAC960_PD_CMDOP_OFFSET); | |
3194 | *param1 = readb(base + DAC960_PD_CMDID_OFFSET); | |
3195 | writeb(0, base + DAC960_PD_ERRSTS_OFFSET); | |
3196 | return true; | |
3197 | } | |
3198 | ||
3199 | static void DAC960_PD_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk) | |
3200 | { | |
3201 | void __iomem *base = cb->io_base; | |
3202 | union myrb_cmd_mbox *mbox = &cmd_blk->mbox; | |
3203 | ||
3204 | while (DAC960_PD_hw_mbox_is_full(base)) | |
3205 | udelay(1); | |
3206 | DAC960_PD_write_cmd_mbox(base, mbox); | |
3207 | DAC960_PD_hw_mbox_new_cmd(base); | |
3208 | } | |
3209 | ||
3210 | static int DAC960_PD_hw_init(struct pci_dev *pdev, | |
3211 | struct myrb_hba *cb, void __iomem *base) | |
3212 | { | |
3213 | int timeout = 0; | |
3214 | unsigned char error, parm0, parm1; | |
3215 | ||
3216 | if (!request_region(cb->io_addr, 0x80, "myrb")) { | |
3217 | dev_err(&pdev->dev, "IO port 0x%lx busy\n", | |
3218 | (unsigned long)cb->io_addr); | |
3219 | return -EBUSY; | |
3220 | } | |
3221 | DAC960_PD_disable_intr(base); | |
3222 | DAC960_PD_ack_hw_mbox_status(base); | |
3223 | udelay(1000); | |
3224 | while (DAC960_PD_init_in_progress(base) && | |
3225 | timeout < MYRB_MAILBOX_TIMEOUT) { | |
3226 | if (DAC960_PD_read_error_status(base, &error, | |
3227 | &parm0, &parm1) && | |
3228 | myrb_err_status(cb, error, parm0, parm1)) | |
3229 | return -EIO; | |
3230 | udelay(10); | |
3231 | timeout++; | |
3232 | } | |
3233 | if (timeout == MYRB_MAILBOX_TIMEOUT) { | |
3234 | dev_err(&pdev->dev, | |
3235 | "Timeout waiting for Controller Initialisation\n"); | |
3236 | return -ETIMEDOUT; | |
3237 | } | |
3238 | if (!myrb_enable_mmio(cb, NULL)) { | |
3239 | dev_err(&pdev->dev, | |
3240 | "Unable to Enable Memory Mailbox Interface\n"); | |
3241 | DAC960_PD_reset_ctrl(base); | |
3242 | return -ENODEV; | |
3243 | } | |
3244 | DAC960_PD_enable_intr(base); | |
3245 | cb->qcmd = DAC960_PD_qcmd; | |
3246 | cb->disable_intr = DAC960_PD_disable_intr; | |
3247 | cb->reset = DAC960_PD_reset_ctrl; | |
3248 | ||
3249 | return 0; | |
3250 | } | |
3251 | ||
3252 | static irqreturn_t DAC960_PD_intr_handler(int irq, void *arg) | |
3253 | { | |
3254 | struct myrb_hba *cb = arg; | |
3255 | void __iomem *base = cb->io_base; | |
3256 | unsigned long flags; | |
3257 | ||
3258 | spin_lock_irqsave(&cb->queue_lock, flags); | |
3259 | while (DAC960_PD_hw_mbox_status_available(base)) { | |
3260 | unsigned char id = DAC960_PD_read_status_cmd_ident(base); | |
3261 | struct scsi_cmnd *scmd = NULL; | |
3262 | struct myrb_cmdblk *cmd_blk = NULL; | |
3263 | ||
3264 | if (id == MYRB_DCMD_TAG) | |
3265 | cmd_blk = &cb->dcmd_blk; | |
3266 | else if (id == MYRB_MCMD_TAG) | |
3267 | cmd_blk = &cb->mcmd_blk; | |
3268 | else { | |
3269 | scmd = scsi_host_find_tag(cb->host, id - 3); | |
3270 | if (scmd) | |
3271 | cmd_blk = scsi_cmd_priv(scmd); | |
3272 | } | |
3273 | if (cmd_blk) | |
3274 | cmd_blk->status = DAC960_PD_read_status(base); | |
3275 | else | |
3276 | dev_err(&cb->pdev->dev, | |
3277 | "Unhandled command completion %d\n", id); | |
3278 | ||
3279 | DAC960_PD_ack_intr(base); | |
3280 | DAC960_PD_ack_hw_mbox_status(base); | |
3281 | ||
3282 | if (id < 3) | |
3283 | myrb_handle_cmdblk(cb, cmd_blk); | |
3284 | else | |
3285 | myrb_handle_scsi(cb, cmd_blk, scmd); | |
3286 | } | |
3287 | spin_unlock_irqrestore(&cb->queue_lock, flags); | |
3288 | return IRQ_HANDLED; | |
3289 | } | |
3290 | ||
3291 | struct myrb_privdata DAC960_PD_privdata = { | |
3292 | .hw_init = DAC960_PD_hw_init, | |
3293 | .irq_handler = DAC960_PD_intr_handler, | |
3294 | .mmio_size = DAC960_PD_mmio_size, | |
3295 | }; | |
3296 | ||
3297 | ||
3298 | /* | |
3299 | * DAC960 P Series Controllers | |
3300 | * | |
3301 | * Similar to the DAC960 PD Series Controllers, but some commands have | |
3302 | * to be translated. | |
3303 | */ | |
3304 | ||
3305 | static inline void myrb_translate_enquiry(void *enq) | |
3306 | { | |
3307 | memcpy(enq + 132, enq + 36, 64); | |
3308 | memset(enq + 36, 0, 96); | |
3309 | } | |
3310 | ||
3311 | static inline void myrb_translate_devstate(void *state) | |
3312 | { | |
3313 | memcpy(state + 2, state + 3, 1); | |
3314 | memmove(state + 4, state + 5, 2); | |
3315 | memmove(state + 6, state + 8, 4); | |
3316 | } | |
3317 | ||
3318 | static inline void myrb_translate_to_rw_command(struct myrb_cmdblk *cmd_blk) | |
3319 | { | |
3320 | union myrb_cmd_mbox *mbox = &cmd_blk->mbox; | |
3321 | int ldev_num = mbox->type5.ld.ldev_num; | |
3322 | ||
3323 | mbox->bytes[3] &= 0x7; | |
3324 | mbox->bytes[3] |= mbox->bytes[7] << 6; | |
3325 | mbox->bytes[7] = ldev_num; | |
3326 | } | |
3327 | ||
3328 | static inline void myrb_translate_from_rw_command(struct myrb_cmdblk *cmd_blk) | |
3329 | { | |
3330 | union myrb_cmd_mbox *mbox = &cmd_blk->mbox; | |
3331 | int ldev_num = mbox->bytes[7]; | |
3332 | ||
3333 | mbox->bytes[7] = mbox->bytes[3] >> 6; | |
3334 | mbox->bytes[3] &= 0x7; | |
3335 | mbox->bytes[3] |= ldev_num << 3; | |
3336 | } | |
3337 | ||
3338 | static void DAC960_P_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk) | |
3339 | { | |
3340 | void __iomem *base = cb->io_base; | |
3341 | union myrb_cmd_mbox *mbox = &cmd_blk->mbox; | |
3342 | ||
3343 | switch (mbox->common.opcode) { | |
3344 | case MYRB_CMD_ENQUIRY: | |
3345 | mbox->common.opcode = MYRB_CMD_ENQUIRY_OLD; | |
3346 | break; | |
3347 | case MYRB_CMD_GET_DEVICE_STATE: | |
3348 | mbox->common.opcode = MYRB_CMD_GET_DEVICE_STATE_OLD; | |
3349 | break; | |
3350 | case MYRB_CMD_READ: | |
3351 | mbox->common.opcode = MYRB_CMD_READ_OLD; | |
3352 | myrb_translate_to_rw_command(cmd_blk); | |
3353 | break; | |
3354 | case MYRB_CMD_WRITE: | |
3355 | mbox->common.opcode = MYRB_CMD_WRITE_OLD; | |
3356 | myrb_translate_to_rw_command(cmd_blk); | |
3357 | break; | |
3358 | case MYRB_CMD_READ_SG: | |
3359 | mbox->common.opcode = MYRB_CMD_READ_SG_OLD; | |
3360 | myrb_translate_to_rw_command(cmd_blk); | |
3361 | break; | |
3362 | case MYRB_CMD_WRITE_SG: | |
3363 | mbox->common.opcode = MYRB_CMD_WRITE_SG_OLD; | |
3364 | myrb_translate_to_rw_command(cmd_blk); | |
3365 | break; | |
3366 | default: | |
3367 | break; | |
3368 | } | |
3369 | while (DAC960_PD_hw_mbox_is_full(base)) | |
3370 | udelay(1); | |
3371 | DAC960_PD_write_cmd_mbox(base, mbox); | |
3372 | DAC960_PD_hw_mbox_new_cmd(base); | |
3373 | } | |
3374 | ||
3375 | ||
3376 | static int DAC960_P_hw_init(struct pci_dev *pdev, | |
3377 | struct myrb_hba *cb, void __iomem *base) | |
3378 | { | |
3379 | int timeout = 0; | |
3380 | unsigned char error, parm0, parm1; | |
3381 | ||
3382 | if (!request_region(cb->io_addr, 0x80, "myrb")) { | |
3383 | dev_err(&pdev->dev, "IO port 0x%lx busy\n", | |
3384 | (unsigned long)cb->io_addr); | |
3385 | return -EBUSY; | |
3386 | } | |
3387 | DAC960_PD_disable_intr(base); | |
3388 | DAC960_PD_ack_hw_mbox_status(base); | |
3389 | udelay(1000); | |
3390 | while (DAC960_PD_init_in_progress(base) && | |
3391 | timeout < MYRB_MAILBOX_TIMEOUT) { | |
3392 | if (DAC960_PD_read_error_status(base, &error, | |
3393 | &parm0, &parm1) && | |
3394 | myrb_err_status(cb, error, parm0, parm1)) | |
3395 | return -EAGAIN; | |
3396 | udelay(10); | |
3397 | timeout++; | |
3398 | } | |
3399 | if (timeout == MYRB_MAILBOX_TIMEOUT) { | |
3400 | dev_err(&pdev->dev, | |
3401 | "Timeout waiting for Controller Initialisation\n"); | |
3402 | return -ETIMEDOUT; | |
3403 | } | |
3404 | if (!myrb_enable_mmio(cb, NULL)) { | |
3405 | dev_err(&pdev->dev, | |
3406 | "Unable to allocate DMA mapped memory\n"); | |
3407 | DAC960_PD_reset_ctrl(base); | |
3408 | return -ETIMEDOUT; | |
3409 | } | |
3410 | DAC960_PD_enable_intr(base); | |
3411 | cb->qcmd = DAC960_P_qcmd; | |
3412 | cb->disable_intr = DAC960_PD_disable_intr; | |
3413 | cb->reset = DAC960_PD_reset_ctrl; | |
3414 | ||
3415 | return 0; | |
3416 | } | |
3417 | ||
3418 | static irqreturn_t DAC960_P_intr_handler(int irq, void *arg) | |
3419 | { | |
3420 | struct myrb_hba *cb = arg; | |
3421 | void __iomem *base = cb->io_base; | |
3422 | unsigned long flags; | |
3423 | ||
3424 | spin_lock_irqsave(&cb->queue_lock, flags); | |
3425 | while (DAC960_PD_hw_mbox_status_available(base)) { | |
3426 | unsigned char id = DAC960_PD_read_status_cmd_ident(base); | |
3427 | struct scsi_cmnd *scmd = NULL; | |
3428 | struct myrb_cmdblk *cmd_blk = NULL; | |
3429 | union myrb_cmd_mbox *mbox; | |
3430 | enum myrb_cmd_opcode op; | |
3431 | ||
3432 | ||
3433 | if (id == MYRB_DCMD_TAG) | |
3434 | cmd_blk = &cb->dcmd_blk; | |
3435 | else if (id == MYRB_MCMD_TAG) | |
3436 | cmd_blk = &cb->mcmd_blk; | |
3437 | else { | |
3438 | scmd = scsi_host_find_tag(cb->host, id - 3); | |
3439 | if (scmd) | |
3440 | cmd_blk = scsi_cmd_priv(scmd); | |
3441 | } | |
3442 | if (cmd_blk) | |
3443 | cmd_blk->status = DAC960_PD_read_status(base); | |
3444 | else | |
3445 | dev_err(&cb->pdev->dev, | |
3446 | "Unhandled command completion %d\n", id); | |
3447 | ||
3448 | DAC960_PD_ack_intr(base); | |
3449 | DAC960_PD_ack_hw_mbox_status(base); | |
3450 | ||
3451 | if (!cmd_blk) | |
3452 | continue; | |
3453 | ||
3454 | mbox = &cmd_blk->mbox; | |
3455 | op = mbox->common.opcode; | |
3456 | switch (op) { | |
3457 | case MYRB_CMD_ENQUIRY_OLD: | |
3458 | mbox->common.opcode = MYRB_CMD_ENQUIRY; | |
3459 | myrb_translate_enquiry(cb->enquiry); | |
3460 | break; | |
3461 | case MYRB_CMD_READ_OLD: | |
3462 | mbox->common.opcode = MYRB_CMD_READ; | |
3463 | myrb_translate_from_rw_command(cmd_blk); | |
3464 | break; | |
3465 | case MYRB_CMD_WRITE_OLD: | |
3466 | mbox->common.opcode = MYRB_CMD_WRITE; | |
3467 | myrb_translate_from_rw_command(cmd_blk); | |
3468 | break; | |
3469 | case MYRB_CMD_READ_SG_OLD: | |
3470 | mbox->common.opcode = MYRB_CMD_READ_SG; | |
3471 | myrb_translate_from_rw_command(cmd_blk); | |
3472 | break; | |
3473 | case MYRB_CMD_WRITE_SG_OLD: | |
3474 | mbox->common.opcode = MYRB_CMD_WRITE_SG; | |
3475 | myrb_translate_from_rw_command(cmd_blk); | |
3476 | break; | |
3477 | default: | |
3478 | break; | |
3479 | } | |
3480 | if (id < 3) | |
3481 | myrb_handle_cmdblk(cb, cmd_blk); | |
3482 | else | |
3483 | myrb_handle_scsi(cb, cmd_blk, scmd); | |
3484 | } | |
3485 | spin_unlock_irqrestore(&cb->queue_lock, flags); | |
3486 | return IRQ_HANDLED; | |
3487 | } | |
3488 | ||
3489 | struct myrb_privdata DAC960_P_privdata = { | |
3490 | .hw_init = DAC960_P_hw_init, | |
3491 | .irq_handler = DAC960_P_intr_handler, | |
3492 | .mmio_size = DAC960_PD_mmio_size, | |
3493 | }; | |
3494 | ||
3495 | static struct myrb_hba *myrb_detect(struct pci_dev *pdev, | |
3496 | const struct pci_device_id *entry) | |
3497 | { | |
3498 | struct myrb_privdata *privdata = | |
3499 | (struct myrb_privdata *)entry->driver_data; | |
3500 | irq_handler_t irq_handler = privdata->irq_handler; | |
3501 | unsigned int mmio_size = privdata->mmio_size; | |
3502 | struct Scsi_Host *shost; | |
3503 | struct myrb_hba *cb = NULL; | |
3504 | ||
3505 | shost = scsi_host_alloc(&myrb_template, sizeof(struct myrb_hba)); | |
3506 | if (!shost) { | |
3507 | dev_err(&pdev->dev, "Unable to allocate Controller\n"); | |
3508 | return NULL; | |
3509 | } | |
3510 | shost->max_cmd_len = 12; | |
3511 | shost->max_lun = 256; | |
3512 | cb = shost_priv(shost); | |
3513 | mutex_init(&cb->dcmd_mutex); | |
3514 | mutex_init(&cb->dma_mutex); | |
3515 | cb->pdev = pdev; | |
3516 | ||
3517 | if (pci_enable_device(pdev)) | |
3518 | goto failure; | |
3519 | ||
3520 | if (privdata->hw_init == DAC960_PD_hw_init || | |
3521 | privdata->hw_init == DAC960_P_hw_init) { | |
3522 | cb->io_addr = pci_resource_start(pdev, 0); | |
3523 | cb->pci_addr = pci_resource_start(pdev, 1); | |
3524 | } else | |
3525 | cb->pci_addr = pci_resource_start(pdev, 0); | |
3526 | ||
3527 | pci_set_drvdata(pdev, cb); | |
3528 | spin_lock_init(&cb->queue_lock); | |
3529 | if (mmio_size < PAGE_SIZE) | |
3530 | mmio_size = PAGE_SIZE; | |
3531 | cb->mmio_base = ioremap_nocache(cb->pci_addr & PAGE_MASK, mmio_size); | |
3532 | if (cb->mmio_base == NULL) { | |
3533 | dev_err(&pdev->dev, | |
3534 | "Unable to map Controller Register Window\n"); | |
3535 | goto failure; | |
3536 | } | |
3537 | ||
3538 | cb->io_base = cb->mmio_base + (cb->pci_addr & ~PAGE_MASK); | |
3539 | if (privdata->hw_init(pdev, cb, cb->io_base)) | |
3540 | goto failure; | |
3541 | ||
3542 | if (request_irq(pdev->irq, irq_handler, IRQF_SHARED, "myrb", cb) < 0) { | |
3543 | dev_err(&pdev->dev, | |
3544 | "Unable to acquire IRQ Channel %d\n", pdev->irq); | |
3545 | goto failure; | |
3546 | } | |
3547 | cb->irq = pdev->irq; | |
3548 | return cb; | |
3549 | ||
3550 | failure: | |
3551 | dev_err(&pdev->dev, | |
3552 | "Failed to initialize Controller\n"); | |
3553 | myrb_cleanup(cb); | |
3554 | return NULL; | |
3555 | } | |
3556 | ||
3557 | static int myrb_probe(struct pci_dev *dev, const struct pci_device_id *entry) | |
3558 | { | |
3559 | struct myrb_hba *cb; | |
3560 | int ret; | |
3561 | ||
3562 | cb = myrb_detect(dev, entry); | |
3563 | if (!cb) | |
3564 | return -ENODEV; | |
3565 | ||
3566 | ret = myrb_get_hba_config(cb); | |
3567 | if (ret < 0) { | |
3568 | myrb_cleanup(cb); | |
3569 | return ret; | |
3570 | } | |
3571 | ||
3572 | if (!myrb_create_mempools(dev, cb)) { | |
3573 | ret = -ENOMEM; | |
3574 | goto failed; | |
3575 | } | |
3576 | ||
3577 | ret = scsi_add_host(cb->host, &dev->dev); | |
3578 | if (ret) { | |
3579 | dev_err(&dev->dev, "scsi_add_host failed with %d\n", ret); | |
3580 | myrb_destroy_mempools(cb); | |
3581 | goto failed; | |
3582 | } | |
3583 | scsi_scan_host(cb->host); | |
3584 | return 0; | |
3585 | failed: | |
3586 | myrb_cleanup(cb); | |
3587 | return ret; | |
3588 | } | |
3589 | ||
3590 | ||
3591 | static void myrb_remove(struct pci_dev *pdev) | |
3592 | { | |
3593 | struct myrb_hba *cb = pci_get_drvdata(pdev); | |
3594 | ||
3595 | shost_printk(KERN_NOTICE, cb->host, "Flushing Cache..."); | |
3596 | myrb_exec_type3(cb, MYRB_CMD_FLUSH, 0); | |
3597 | myrb_cleanup(cb); | |
3598 | myrb_destroy_mempools(cb); | |
3599 | } | |
3600 | ||
3601 | ||
3602 | static const struct pci_device_id myrb_id_table[] = { | |
3603 | { | |
3604 | PCI_DEVICE_SUB(PCI_VENDOR_ID_DEC, | |
3605 | PCI_DEVICE_ID_DEC_21285, | |
3606 | PCI_VENDOR_ID_MYLEX, | |
3607 | PCI_DEVICE_ID_MYLEX_DAC960_LA), | |
3608 | .driver_data = (unsigned long) &DAC960_LA_privdata, | |
3609 | }, | |
3610 | { | |
3611 | PCI_DEVICE_DATA(MYLEX, DAC960_PG, &DAC960_PG_privdata), | |
3612 | }, | |
3613 | { | |
3614 | PCI_DEVICE_DATA(MYLEX, DAC960_PD, &DAC960_PD_privdata), | |
3615 | }, | |
3616 | { | |
3617 | PCI_DEVICE_DATA(MYLEX, DAC960_P, &DAC960_P_privdata), | |
3618 | }, | |
3619 | {0, }, | |
3620 | }; | |
3621 | ||
3622 | MODULE_DEVICE_TABLE(pci, myrb_id_table); | |
3623 | ||
3624 | static struct pci_driver myrb_pci_driver = { | |
3625 | .name = "myrb", | |
3626 | .id_table = myrb_id_table, | |
3627 | .probe = myrb_probe, | |
3628 | .remove = myrb_remove, | |
3629 | }; | |
3630 | ||
3631 | static int __init myrb_init_module(void) | |
3632 | { | |
3633 | int ret; | |
3634 | ||
3635 | myrb_raid_template = raid_class_attach(&myrb_raid_functions); | |
3636 | if (!myrb_raid_template) | |
3637 | return -ENODEV; | |
3638 | ||
3639 | ret = pci_register_driver(&myrb_pci_driver); | |
3640 | if (ret) | |
3641 | raid_class_release(myrb_raid_template); | |
3642 | ||
3643 | return ret; | |
3644 | } | |
3645 | ||
3646 | static void __exit myrb_cleanup_module(void) | |
3647 | { | |
3648 | pci_unregister_driver(&myrb_pci_driver); | |
3649 | raid_class_release(myrb_raid_template); | |
3650 | } | |
3651 | ||
3652 | module_init(myrb_init_module); | |
3653 | module_exit(myrb_cleanup_module); | |
3654 | ||
3655 | MODULE_DESCRIPTION("Mylex DAC960/AcceleRAID/eXtremeRAID driver (Block interface)"); | |
3656 | MODULE_AUTHOR("Hannes Reinecke <[email protected]>"); | |
3657 | MODULE_LICENSE("GPL"); |