1 // SPDX-License-Identifier: GPL-2.0
3 * Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers
7 * Based on the original DAC960 driver,
9 * Portions Copyright 2002 by Mylex (An IBM Business Unit)
13 #include <linux/module.h>
14 #include <linux/types.h>
15 #include <linux/delay.h>
16 #include <linux/interrupt.h>
17 #include <linux/pci.h>
18 #include <linux/raid_class.h>
19 #include <asm/unaligned.h>
20 #include <scsi/scsi.h>
21 #include <scsi/scsi_host.h>
22 #include <scsi/scsi_device.h>
23 #include <scsi/scsi_cmnd.h>
24 #include <scsi/scsi_tcq.h>
27 static struct raid_template *myrb_raid_template;
29 static void myrb_monitor(struct work_struct *work);
30 static inline void myrb_translate_devstate(void *DeviceState);
32 static inline int myrb_logical_channel(struct Scsi_Host *shost)
34 return shost->max_channel - 1;
37 static struct myrb_devstate_name_entry {
38 enum myrb_devstate state;
40 } myrb_devstate_name_list[] = {
41 { MYRB_DEVICE_DEAD, "Dead" },
42 { MYRB_DEVICE_WO, "WriteOnly" },
43 { MYRB_DEVICE_ONLINE, "Online" },
44 { MYRB_DEVICE_CRITICAL, "Critical" },
45 { MYRB_DEVICE_STANDBY, "Standby" },
46 { MYRB_DEVICE_OFFLINE, "Offline" },
49 static const char *myrb_devstate_name(enum myrb_devstate state)
51 struct myrb_devstate_name_entry *entry = myrb_devstate_name_list;
54 for (i = 0; i < ARRAY_SIZE(myrb_devstate_name_list); i++) {
55 if (entry[i].state == state)
61 static struct myrb_raidlevel_name_entry {
62 enum myrb_raidlevel level;
64 } myrb_raidlevel_name_list[] = {
65 { MYRB_RAID_LEVEL0, "RAID0" },
66 { MYRB_RAID_LEVEL1, "RAID1" },
67 { MYRB_RAID_LEVEL3, "RAID3" },
68 { MYRB_RAID_LEVEL5, "RAID5" },
69 { MYRB_RAID_LEVEL6, "RAID6" },
70 { MYRB_RAID_JBOD, "JBOD" },
73 static const char *myrb_raidlevel_name(enum myrb_raidlevel level)
75 struct myrb_raidlevel_name_entry *entry = myrb_raidlevel_name_list;
78 for (i = 0; i < ARRAY_SIZE(myrb_raidlevel_name_list); i++) {
79 if (entry[i].level == level)
86 * myrb_create_mempools - allocates auxiliary data structures
88 * Return: true on success, false otherwise.
90 static bool myrb_create_mempools(struct pci_dev *pdev, struct myrb_hba *cb)
92 size_t elem_size, elem_align;
94 elem_align = sizeof(struct myrb_sge);
95 elem_size = cb->host->sg_tablesize * elem_align;
96 cb->sg_pool = dma_pool_create("myrb_sg", &pdev->dev,
97 elem_size, elem_align, 0);
98 if (cb->sg_pool == NULL) {
99 shost_printk(KERN_ERR, cb->host,
100 "Failed to allocate SG pool\n");
104 cb->dcdb_pool = dma_pool_create("myrb_dcdb", &pdev->dev,
105 sizeof(struct myrb_dcdb),
106 sizeof(unsigned int), 0);
107 if (!cb->dcdb_pool) {
108 dma_pool_destroy(cb->sg_pool);
110 shost_printk(KERN_ERR, cb->host,
111 "Failed to allocate DCDB pool\n");
115 snprintf(cb->work_q_name, sizeof(cb->work_q_name),
116 "myrb_wq_%d", cb->host->host_no);
117 cb->work_q = create_singlethread_workqueue(cb->work_q_name);
119 dma_pool_destroy(cb->dcdb_pool);
120 cb->dcdb_pool = NULL;
121 dma_pool_destroy(cb->sg_pool);
123 shost_printk(KERN_ERR, cb->host,
124 "Failed to create workqueue\n");
129 * Initialize the Monitoring Timer.
131 INIT_DELAYED_WORK(&cb->monitor_work, myrb_monitor);
132 queue_delayed_work(cb->work_q, &cb->monitor_work, 1);
138 * myrb_destroy_mempools - tears down the memory pools for the controller
140 static void myrb_destroy_mempools(struct myrb_hba *cb)
142 cancel_delayed_work_sync(&cb->monitor_work);
143 destroy_workqueue(cb->work_q);
145 dma_pool_destroy(cb->sg_pool);
146 dma_pool_destroy(cb->dcdb_pool);
150 * myrb_reset_cmd - reset command block
152 static inline void myrb_reset_cmd(struct myrb_cmdblk *cmd_blk)
154 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
156 memset(mbox, 0, sizeof(union myrb_cmd_mbox));
161 * myrb_qcmd - queues command block for execution
163 static void myrb_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
165 void __iomem *base = cb->io_base;
166 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
167 union myrb_cmd_mbox *next_mbox = cb->next_cmd_mbox;
169 cb->write_cmd_mbox(next_mbox, mbox);
170 if (cb->prev_cmd_mbox1->words[0] == 0 ||
171 cb->prev_cmd_mbox2->words[0] == 0)
172 cb->get_cmd_mbox(base);
173 cb->prev_cmd_mbox2 = cb->prev_cmd_mbox1;
174 cb->prev_cmd_mbox1 = next_mbox;
175 if (++next_mbox > cb->last_cmd_mbox)
176 next_mbox = cb->first_cmd_mbox;
177 cb->next_cmd_mbox = next_mbox;
181 * myrb_exec_cmd - executes command block and waits for completion.
183 * Return: command status
185 static unsigned short myrb_exec_cmd(struct myrb_hba *cb,
186 struct myrb_cmdblk *cmd_blk)
188 DECLARE_COMPLETION_ONSTACK(cmpl);
191 cmd_blk->completion = &cmpl;
193 spin_lock_irqsave(&cb->queue_lock, flags);
194 cb->qcmd(cb, cmd_blk);
195 spin_unlock_irqrestore(&cb->queue_lock, flags);
197 WARN_ON(in_interrupt());
198 wait_for_completion(&cmpl);
199 return cmd_blk->status;
203 * myrb_exec_type3 - executes a type 3 command and waits for completion.
205 * Return: command status
207 static unsigned short myrb_exec_type3(struct myrb_hba *cb,
208 enum myrb_cmd_opcode op, dma_addr_t addr)
210 struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
211 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
212 unsigned short status;
214 mutex_lock(&cb->dcmd_mutex);
215 myrb_reset_cmd(cmd_blk);
216 mbox->type3.id = MYRB_DCMD_TAG;
217 mbox->type3.opcode = op;
218 mbox->type3.addr = addr;
219 status = myrb_exec_cmd(cb, cmd_blk);
220 mutex_unlock(&cb->dcmd_mutex);
225 * myrb_exec_type3D - executes a type 3D command and waits for completion.
227 * Return: command status
229 static unsigned short myrb_exec_type3D(struct myrb_hba *cb,
230 enum myrb_cmd_opcode op, struct scsi_device *sdev,
231 struct myrb_pdev_state *pdev_info)
233 struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
234 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
235 unsigned short status;
236 dma_addr_t pdev_info_addr;
238 pdev_info_addr = dma_map_single(&cb->pdev->dev, pdev_info,
239 sizeof(struct myrb_pdev_state),
241 if (dma_mapping_error(&cb->pdev->dev, pdev_info_addr))
242 return MYRB_STATUS_SUBSYS_FAILED;
244 mutex_lock(&cb->dcmd_mutex);
245 myrb_reset_cmd(cmd_blk);
246 mbox->type3D.id = MYRB_DCMD_TAG;
247 mbox->type3D.opcode = op;
248 mbox->type3D.channel = sdev->channel;
249 mbox->type3D.target = sdev->id;
250 mbox->type3D.addr = pdev_info_addr;
251 status = myrb_exec_cmd(cb, cmd_blk);
252 mutex_unlock(&cb->dcmd_mutex);
253 dma_unmap_single(&cb->pdev->dev, pdev_info_addr,
254 sizeof(struct myrb_pdev_state), DMA_FROM_DEVICE);
255 if (status == MYRB_STATUS_SUCCESS &&
256 mbox->type3D.opcode == MYRB_CMD_GET_DEVICE_STATE_OLD)
257 myrb_translate_devstate(pdev_info);
262 static char *myrb_event_msg[] = {
263 "killed because write recovery failed",
264 "killed because of SCSI bus reset failure",
265 "killed because of double check condition",
266 "killed because it was removed",
267 "killed because of gross error on SCSI chip",
268 "killed because of bad tag returned from drive",
269 "killed because of timeout on SCSI command",
270 "killed because of reset SCSI command issued from system",
271 "killed because busy or parity error count exceeded limit",
272 "killed because of 'kill drive' command from system",
273 "killed because of selection timeout",
274 "killed due to SCSI phase sequence error",
275 "killed due to unknown status",
279 * myrb_get_event - get event log from HBA
280 * @cb: pointer to the hba structure
281 * @event: number of the event
283 * Execute a type 3E command and logs the event message
285 static void myrb_get_event(struct myrb_hba *cb, unsigned int event)
287 struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
288 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
289 struct myrb_log_entry *ev_buf;
291 unsigned short status;
293 ev_buf = dma_alloc_coherent(&cb->pdev->dev,
294 sizeof(struct myrb_log_entry),
295 &ev_addr, GFP_KERNEL);
299 myrb_reset_cmd(cmd_blk);
300 mbox->type3E.id = MYRB_MCMD_TAG;
301 mbox->type3E.opcode = MYRB_CMD_EVENT_LOG_OPERATION;
302 mbox->type3E.optype = DAC960_V1_GetEventLogEntry;
303 mbox->type3E.opqual = 1;
304 mbox->type3E.ev_seq = event;
305 mbox->type3E.addr = ev_addr;
306 status = myrb_exec_cmd(cb, cmd_blk);
307 if (status != MYRB_STATUS_SUCCESS)
308 shost_printk(KERN_INFO, cb->host,
309 "Failed to get event log %d, status %04x\n",
312 else if (ev_buf->seq_num == event) {
313 struct scsi_sense_hdr sshdr;
315 memset(&sshdr, 0, sizeof(sshdr));
316 scsi_normalize_sense(ev_buf->sense, 32, &sshdr);
318 if (sshdr.sense_key == VENDOR_SPECIFIC &&
320 sshdr.ascq < ARRAY_SIZE(myrb_event_msg))
321 shost_printk(KERN_CRIT, cb->host,
322 "Physical drive %d:%d: %s\n",
323 ev_buf->channel, ev_buf->target,
324 myrb_event_msg[sshdr.ascq]);
326 shost_printk(KERN_CRIT, cb->host,
327 "Physical drive %d:%d: Sense: %X/%02X/%02X\n",
328 ev_buf->channel, ev_buf->target,
329 sshdr.sense_key, sshdr.asc, sshdr.ascq);
332 dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_log_entry),
337 * myrb_get_errtable - retrieves the error table from the controller
339 * Executes a type 3 command and logs the error table from the controller.
341 static void myrb_get_errtable(struct myrb_hba *cb)
343 struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
344 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
345 unsigned short status;
346 struct myrb_error_entry old_table[MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS];
348 memcpy(&old_table, cb->err_table, sizeof(old_table));
350 myrb_reset_cmd(cmd_blk);
351 mbox->type3.id = MYRB_MCMD_TAG;
352 mbox->type3.opcode = MYRB_CMD_GET_ERROR_TABLE;
353 mbox->type3.addr = cb->err_table_addr;
354 status = myrb_exec_cmd(cb, cmd_blk);
355 if (status == MYRB_STATUS_SUCCESS) {
356 struct myrb_error_entry *table = cb->err_table;
357 struct myrb_error_entry *new, *old;
358 size_t err_table_offset;
359 struct scsi_device *sdev;
361 shost_for_each_device(sdev, cb->host) {
362 if (sdev->channel >= myrb_logical_channel(cb->host))
364 err_table_offset = sdev->channel * MYRB_MAX_TARGETS
366 new = table + err_table_offset;
367 old = &old_table[err_table_offset];
368 if (new->parity_err == old->parity_err &&
369 new->soft_err == old->soft_err &&
370 new->hard_err == old->hard_err &&
371 new->misc_err == old->misc_err)
373 sdev_printk(KERN_CRIT, sdev,
374 "Errors: Parity = %d, Soft = %d, Hard = %d, Misc = %d\n",
375 new->parity_err, new->soft_err,
376 new->hard_err, new->misc_err);
382 * myrb_get_ldev_info - retrieves the logical device table from the controller
384 * Executes a type 3 command and updates the logical device table.
386 * Return: command status
388 static unsigned short myrb_get_ldev_info(struct myrb_hba *cb)
390 unsigned short status;
391 int ldev_num, ldev_cnt = cb->enquiry->ldev_count;
392 struct Scsi_Host *shost = cb->host;
394 status = myrb_exec_type3(cb, MYRB_CMD_GET_LDEV_INFO,
396 if (status != MYRB_STATUS_SUCCESS)
399 for (ldev_num = 0; ldev_num < ldev_cnt; ldev_num++) {
400 struct myrb_ldev_info *old = NULL;
401 struct myrb_ldev_info *new = cb->ldev_info_buf + ldev_num;
402 struct scsi_device *sdev;
404 sdev = scsi_device_lookup(shost, myrb_logical_channel(shost),
407 if (new->state == MYRB_DEVICE_OFFLINE)
409 shost_printk(KERN_INFO, shost,
410 "Adding Logical Drive %d in state %s\n",
411 ldev_num, myrb_devstate_name(new->state));
412 scsi_add_device(shost, myrb_logical_channel(shost),
416 old = sdev->hostdata;
417 if (new->state != old->state)
418 shost_printk(KERN_INFO, shost,
419 "Logical Drive %d is now %s\n",
420 ldev_num, myrb_devstate_name(new->state));
421 if (new->wb_enabled != old->wb_enabled)
422 sdev_printk(KERN_INFO, sdev,
423 "Logical Drive is now WRITE %s\n",
424 (new->wb_enabled ? "BACK" : "THRU"));
425 memcpy(old, new, sizeof(*new));
426 scsi_device_put(sdev);
432 * myrb_get_rbld_progress - get rebuild progress information
434 * Executes a type 3 command and returns the rebuild progress
437 * Return: command status
439 static unsigned short myrb_get_rbld_progress(struct myrb_hba *cb,
440 struct myrb_rbld_progress *rbld)
442 struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
443 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
444 struct myrb_rbld_progress *rbld_buf;
445 dma_addr_t rbld_addr;
446 unsigned short status;
448 rbld_buf = dma_alloc_coherent(&cb->pdev->dev,
449 sizeof(struct myrb_rbld_progress),
450 &rbld_addr, GFP_KERNEL);
452 return MYRB_STATUS_RBLD_NOT_CHECKED;
454 myrb_reset_cmd(cmd_blk);
455 mbox->type3.id = MYRB_MCMD_TAG;
456 mbox->type3.opcode = MYRB_CMD_GET_REBUILD_PROGRESS;
457 mbox->type3.addr = rbld_addr;
458 status = myrb_exec_cmd(cb, cmd_blk);
460 memcpy(rbld, rbld_buf, sizeof(struct myrb_rbld_progress));
461 dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_rbld_progress),
462 rbld_buf, rbld_addr);
467 * myrb_update_rbld_progress - updates the rebuild status
469 * Updates the rebuild status for the attached logical devices.
472 static void myrb_update_rbld_progress(struct myrb_hba *cb)
474 struct myrb_rbld_progress rbld_buf;
475 unsigned short status;
477 status = myrb_get_rbld_progress(cb, &rbld_buf);
478 if (status == MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS &&
479 cb->last_rbld_status == MYRB_STATUS_SUCCESS)
480 status = MYRB_STATUS_RBLD_SUCCESS;
481 if (status != MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS) {
482 unsigned int blocks_done =
483 rbld_buf.ldev_size - rbld_buf.blocks_left;
484 struct scsi_device *sdev;
486 sdev = scsi_device_lookup(cb->host,
487 myrb_logical_channel(cb->host),
488 rbld_buf.ldev_num, 0);
493 case MYRB_STATUS_SUCCESS:
494 sdev_printk(KERN_INFO, sdev,
495 "Rebuild in Progress, %d%% completed\n",
496 (100 * (blocks_done >> 7))
497 / (rbld_buf.ldev_size >> 7));
499 case MYRB_STATUS_RBLD_FAILED_LDEV_FAILURE:
500 sdev_printk(KERN_INFO, sdev,
501 "Rebuild Failed due to Logical Drive Failure\n");
503 case MYRB_STATUS_RBLD_FAILED_BADBLOCKS:
504 sdev_printk(KERN_INFO, sdev,
505 "Rebuild Failed due to Bad Blocks on Other Drives\n");
507 case MYRB_STATUS_RBLD_FAILED_NEW_DRIVE_FAILED:
508 sdev_printk(KERN_INFO, sdev,
509 "Rebuild Failed due to Failure of Drive Being Rebuilt\n");
511 case MYRB_STATUS_RBLD_SUCCESS:
512 sdev_printk(KERN_INFO, sdev,
513 "Rebuild Completed Successfully\n");
515 case MYRB_STATUS_RBLD_SUCCESS_TERMINATED:
516 sdev_printk(KERN_INFO, sdev,
517 "Rebuild Successfully Terminated\n");
522 scsi_device_put(sdev);
524 cb->last_rbld_status = status;
528 * myrb_get_cc_progress - retrieve the rebuild status
530 * Execute a type 3 Command and fetch the rebuild / consistency check
533 static void myrb_get_cc_progress(struct myrb_hba *cb)
535 struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
536 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
537 struct myrb_rbld_progress *rbld_buf;
538 dma_addr_t rbld_addr;
539 unsigned short status;
541 rbld_buf = dma_alloc_coherent(&cb->pdev->dev,
542 sizeof(struct myrb_rbld_progress),
543 &rbld_addr, GFP_KERNEL);
545 cb->need_cc_status = true;
548 myrb_reset_cmd(cmd_blk);
549 mbox->type3.id = MYRB_MCMD_TAG;
550 mbox->type3.opcode = MYRB_CMD_REBUILD_STAT;
551 mbox->type3.addr = rbld_addr;
552 status = myrb_exec_cmd(cb, cmd_blk);
553 if (status == MYRB_STATUS_SUCCESS) {
554 unsigned int ldev_num = rbld_buf->ldev_num;
555 unsigned int ldev_size = rbld_buf->ldev_size;
556 unsigned int blocks_done =
557 ldev_size - rbld_buf->blocks_left;
558 struct scsi_device *sdev;
560 sdev = scsi_device_lookup(cb->host,
561 myrb_logical_channel(cb->host),
564 sdev_printk(KERN_INFO, sdev,
565 "Consistency Check in Progress: %d%% completed\n",
566 (100 * (blocks_done >> 7))
568 scsi_device_put(sdev);
571 dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_rbld_progress),
572 rbld_buf, rbld_addr);
576 * myrb_bgi_control - updates background initialisation status
578 * Executes a type 3B command and updates the background initialisation status
580 static void myrb_bgi_control(struct myrb_hba *cb)
582 struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
583 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
584 struct myrb_bgi_status *bgi, *last_bgi;
586 struct scsi_device *sdev = NULL;
587 unsigned short status;
589 bgi = dma_alloc_coherent(&cb->pdev->dev, sizeof(struct myrb_bgi_status),
590 &bgi_addr, GFP_KERNEL);
592 shost_printk(KERN_ERR, cb->host,
593 "Failed to allocate bgi memory\n");
596 myrb_reset_cmd(cmd_blk);
597 mbox->type3B.id = MYRB_DCMD_TAG;
598 mbox->type3B.opcode = MYRB_CMD_BGI_CONTROL;
599 mbox->type3B.optype = 0x20;
600 mbox->type3B.addr = bgi_addr;
601 status = myrb_exec_cmd(cb, cmd_blk);
602 last_bgi = &cb->bgi_status;
603 sdev = scsi_device_lookup(cb->host,
604 myrb_logical_channel(cb->host),
607 case MYRB_STATUS_SUCCESS:
608 switch (bgi->status) {
609 case MYRB_BGI_INVALID:
611 case MYRB_BGI_STARTED:
614 sdev_printk(KERN_INFO, sdev,
615 "Background Initialization Started\n");
617 case MYRB_BGI_INPROGRESS:
620 if (bgi->blocks_done == last_bgi->blocks_done &&
621 bgi->ldev_num == last_bgi->ldev_num)
623 sdev_printk(KERN_INFO, sdev,
624 "Background Initialization in Progress: %d%% completed\n",
625 (100 * (bgi->blocks_done >> 7))
626 / (bgi->ldev_size >> 7));
628 case MYRB_BGI_SUSPENDED:
631 sdev_printk(KERN_INFO, sdev,
632 "Background Initialization Suspended\n");
634 case MYRB_BGI_CANCELLED:
637 sdev_printk(KERN_INFO, sdev,
638 "Background Initialization Cancelled\n");
641 memcpy(&cb->bgi_status, bgi, sizeof(struct myrb_bgi_status));
643 case MYRB_STATUS_BGI_SUCCESS:
644 if (sdev && cb->bgi_status.status == MYRB_BGI_INPROGRESS)
645 sdev_printk(KERN_INFO, sdev,
646 "Background Initialization Completed Successfully\n");
647 cb->bgi_status.status = MYRB_BGI_INVALID;
649 case MYRB_STATUS_BGI_ABORTED:
650 if (sdev && cb->bgi_status.status == MYRB_BGI_INPROGRESS)
651 sdev_printk(KERN_INFO, sdev,
652 "Background Initialization Aborted\n");
654 case MYRB_STATUS_NO_BGI_INPROGRESS:
655 cb->bgi_status.status = MYRB_BGI_INVALID;
659 scsi_device_put(sdev);
660 dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_bgi_status),
665 * myrb_hba_enquiry - updates the controller status
667 * Executes a DAC_V1_Enquiry command and updates the controller status.
669 * Return: command status
671 static unsigned short myrb_hba_enquiry(struct myrb_hba *cb)
673 struct myrb_enquiry old, *new;
674 unsigned short status;
676 memcpy(&old, cb->enquiry, sizeof(struct myrb_enquiry));
678 status = myrb_exec_type3(cb, MYRB_CMD_ENQUIRY, cb->enquiry_addr);
679 if (status != MYRB_STATUS_SUCCESS)
683 if (new->ldev_count > old.ldev_count) {
684 int ldev_num = old.ldev_count - 1;
686 while (++ldev_num < new->ldev_count)
687 shost_printk(KERN_CRIT, cb->host,
688 "Logical Drive %d Now Exists\n",
691 if (new->ldev_count < old.ldev_count) {
692 int ldev_num = new->ldev_count - 1;
694 while (++ldev_num < old.ldev_count)
695 shost_printk(KERN_CRIT, cb->host,
696 "Logical Drive %d No Longer Exists\n",
699 if (new->status.deferred != old.status.deferred)
700 shost_printk(KERN_CRIT, cb->host,
701 "Deferred Write Error Flag is now %s\n",
702 (new->status.deferred ? "TRUE" : "FALSE"));
703 if (new->ev_seq != old.ev_seq) {
704 cb->new_ev_seq = new->ev_seq;
705 cb->need_err_info = true;
706 shost_printk(KERN_INFO, cb->host,
707 "Event log %d/%d (%d/%d) available\n",
708 cb->old_ev_seq, cb->new_ev_seq,
709 old.ev_seq, new->ev_seq);
711 if ((new->ldev_critical > 0 &&
712 new->ldev_critical != old.ldev_critical) ||
713 (new->ldev_offline > 0 &&
714 new->ldev_offline != old.ldev_offline) ||
715 (new->ldev_count != old.ldev_count)) {
716 shost_printk(KERN_INFO, cb->host,
717 "Logical drive count changed (%d/%d/%d)\n",
721 cb->need_ldev_info = true;
723 if (new->pdev_dead > 0 ||
724 new->pdev_dead != old.pdev_dead ||
725 time_after_eq(jiffies, cb->secondary_monitor_time
726 + MYRB_SECONDARY_MONITOR_INTERVAL)) {
727 cb->need_bgi_status = cb->bgi_status_supported;
728 cb->secondary_monitor_time = jiffies;
730 if (new->rbld == MYRB_STDBY_RBLD_IN_PROGRESS ||
731 new->rbld == MYRB_BG_RBLD_IN_PROGRESS ||
732 old.rbld == MYRB_STDBY_RBLD_IN_PROGRESS ||
733 old.rbld == MYRB_BG_RBLD_IN_PROGRESS) {
734 cb->need_rbld = true;
735 cb->rbld_first = (new->ldev_critical < old.ldev_critical);
737 if (old.rbld == MYRB_BG_CHECK_IN_PROGRESS)
739 case MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS:
740 shost_printk(KERN_INFO, cb->host,
741 "Consistency Check Completed Successfully\n");
743 case MYRB_STDBY_RBLD_IN_PROGRESS:
744 case MYRB_BG_RBLD_IN_PROGRESS:
746 case MYRB_BG_CHECK_IN_PROGRESS:
747 cb->need_cc_status = true;
749 case MYRB_STDBY_RBLD_COMPLETED_WITH_ERROR:
750 shost_printk(KERN_INFO, cb->host,
751 "Consistency Check Completed with Error\n");
753 case MYRB_BG_RBLD_OR_CHECK_FAILED_DRIVE_FAILED:
754 shost_printk(KERN_INFO, cb->host,
755 "Consistency Check Failed - Physical Device Failed\n");
757 case MYRB_BG_RBLD_OR_CHECK_FAILED_LDEV_FAILED:
758 shost_printk(KERN_INFO, cb->host,
759 "Consistency Check Failed - Logical Drive Failed\n");
761 case MYRB_BG_RBLD_OR_CHECK_FAILED_OTHER:
762 shost_printk(KERN_INFO, cb->host,
763 "Consistency Check Failed - Other Causes\n");
765 case MYRB_BG_RBLD_OR_CHECK_SUCCESS_TERMINATED:
766 shost_printk(KERN_INFO, cb->host,
767 "Consistency Check Successfully Terminated\n");
770 else if (new->rbld == MYRB_BG_CHECK_IN_PROGRESS)
771 cb->need_cc_status = true;
773 return MYRB_STATUS_SUCCESS;
777 * myrb_set_pdev_state - sets the device state for a physical device
779 * Return: command status
781 static unsigned short myrb_set_pdev_state(struct myrb_hba *cb,
782 struct scsi_device *sdev, enum myrb_devstate state)
784 struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
785 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
786 unsigned short status;
788 mutex_lock(&cb->dcmd_mutex);
789 mbox->type3D.opcode = MYRB_CMD_START_DEVICE;
790 mbox->type3D.id = MYRB_DCMD_TAG;
791 mbox->type3D.channel = sdev->channel;
792 mbox->type3D.target = sdev->id;
793 mbox->type3D.state = state & 0x1F;
794 status = myrb_exec_cmd(cb, cmd_blk);
795 mutex_unlock(&cb->dcmd_mutex);
801 * myrb_enable_mmio - enables the Memory Mailbox Interface
803 * PD and P controller types have no memory mailbox, but still need the
804 * other dma mapped memory.
806 * Return: true on success, false otherwise.
808 static bool myrb_enable_mmio(struct myrb_hba *cb, mbox_mmio_init_t mmio_init_fn)
810 void __iomem *base = cb->io_base;
811 struct pci_dev *pdev = cb->pdev;
812 size_t err_table_size;
813 size_t ldev_info_size;
814 union myrb_cmd_mbox *cmd_mbox_mem;
815 struct myrb_stat_mbox *stat_mbox_mem;
816 union myrb_cmd_mbox mbox;
817 unsigned short status;
819 memset(&mbox, 0, sizeof(union myrb_cmd_mbox));
821 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
822 dev_err(&pdev->dev, "DMA mask out of range\n");
826 cb->enquiry = dma_alloc_coherent(&pdev->dev,
827 sizeof(struct myrb_enquiry),
828 &cb->enquiry_addr, GFP_KERNEL);
832 err_table_size = sizeof(struct myrb_error_entry) *
833 MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS;
834 cb->err_table = dma_alloc_coherent(&pdev->dev, err_table_size,
835 &cb->err_table_addr, GFP_KERNEL);
839 ldev_info_size = sizeof(struct myrb_ldev_info) * MYRB_MAX_LDEVS;
840 cb->ldev_info_buf = dma_alloc_coherent(&pdev->dev, ldev_info_size,
841 &cb->ldev_info_addr, GFP_KERNEL);
842 if (!cb->ldev_info_buf)
846 * Skip mailbox initialisation for PD and P Controllers
851 /* These are the base addresses for the command memory mailbox array */
852 cb->cmd_mbox_size = MYRB_CMD_MBOX_COUNT * sizeof(union myrb_cmd_mbox);
853 cb->first_cmd_mbox = dma_alloc_coherent(&pdev->dev,
857 if (!cb->first_cmd_mbox)
860 cmd_mbox_mem = cb->first_cmd_mbox;
861 cmd_mbox_mem += MYRB_CMD_MBOX_COUNT - 1;
862 cb->last_cmd_mbox = cmd_mbox_mem;
863 cb->next_cmd_mbox = cb->first_cmd_mbox;
864 cb->prev_cmd_mbox1 = cb->last_cmd_mbox;
865 cb->prev_cmd_mbox2 = cb->last_cmd_mbox - 1;
867 /* These are the base addresses for the status memory mailbox array */
868 cb->stat_mbox_size = MYRB_STAT_MBOX_COUNT *
869 sizeof(struct myrb_stat_mbox);
870 cb->first_stat_mbox = dma_alloc_coherent(&pdev->dev,
874 if (!cb->first_stat_mbox)
877 stat_mbox_mem = cb->first_stat_mbox;
878 stat_mbox_mem += MYRB_STAT_MBOX_COUNT - 1;
879 cb->last_stat_mbox = stat_mbox_mem;
880 cb->next_stat_mbox = cb->first_stat_mbox;
882 /* Enable the Memory Mailbox Interface. */
883 cb->dual_mode_interface = true;
884 mbox.typeX.opcode = 0x2B;
886 mbox.typeX.opcode2 = 0x14;
887 mbox.typeX.cmd_mbox_addr = cb->cmd_mbox_addr;
888 mbox.typeX.stat_mbox_addr = cb->stat_mbox_addr;
890 status = mmio_init_fn(pdev, base, &mbox);
891 if (status != MYRB_STATUS_SUCCESS) {
892 cb->dual_mode_interface = false;
893 mbox.typeX.opcode2 = 0x10;
894 status = mmio_init_fn(pdev, base, &mbox);
895 if (status != MYRB_STATUS_SUCCESS) {
897 "Failed to enable mailbox, statux %02X\n",
906 * myrb_get_hba_config - reads the configuration information
908 * Reads the configuration information from the controller and
909 * initializes the controller structure.
911 * Return: 0 on success, errno otherwise
913 static int myrb_get_hba_config(struct myrb_hba *cb)
915 struct myrb_enquiry2 *enquiry2;
916 dma_addr_t enquiry2_addr;
917 struct myrb_config2 *config2;
918 dma_addr_t config2_addr;
919 struct Scsi_Host *shost = cb->host;
920 struct pci_dev *pdev = cb->pdev;
921 int pchan_max = 0, pchan_cur = 0;
922 unsigned short status;
923 int ret = -ENODEV, memsize = 0;
925 enquiry2 = dma_alloc_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
926 &enquiry2_addr, GFP_KERNEL);
928 shost_printk(KERN_ERR, cb->host,
929 "Failed to allocate V1 enquiry2 memory\n");
932 config2 = dma_alloc_coherent(&pdev->dev, sizeof(struct myrb_config2),
933 &config2_addr, GFP_KERNEL);
935 shost_printk(KERN_ERR, cb->host,
936 "Failed to allocate V1 config2 memory\n");
937 dma_free_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
938 enquiry2, enquiry2_addr);
941 mutex_lock(&cb->dma_mutex);
942 status = myrb_hba_enquiry(cb);
943 mutex_unlock(&cb->dma_mutex);
944 if (status != MYRB_STATUS_SUCCESS) {
945 shost_printk(KERN_WARNING, cb->host,
946 "Failed it issue V1 Enquiry\n");
950 status = myrb_exec_type3(cb, MYRB_CMD_ENQUIRY2, enquiry2_addr);
951 if (status != MYRB_STATUS_SUCCESS) {
952 shost_printk(KERN_WARNING, cb->host,
953 "Failed to issue V1 Enquiry2\n");
957 status = myrb_exec_type3(cb, MYRB_CMD_READ_CONFIG2, config2_addr);
958 if (status != MYRB_STATUS_SUCCESS) {
959 shost_printk(KERN_WARNING, cb->host,
960 "Failed to issue ReadConfig2\n");
964 status = myrb_get_ldev_info(cb);
965 if (status != MYRB_STATUS_SUCCESS) {
966 shost_printk(KERN_WARNING, cb->host,
967 "Failed to get logical drive information\n");
972 * Initialize the Controller Model Name and Full Model Name fields.
974 switch (enquiry2->hw.sub_model) {
975 case DAC960_V1_P_PD_PU:
976 if (enquiry2->scsi_cap.bus_speed == MYRB_SCSI_SPEED_ULTRA)
977 strcpy(cb->model_name, "DAC960PU");
979 strcpy(cb->model_name, "DAC960PD");
982 strcpy(cb->model_name, "DAC960PL");
985 strcpy(cb->model_name, "DAC960PG");
988 strcpy(cb->model_name, "DAC960PJ");
991 strcpy(cb->model_name, "DAC960PR");
994 strcpy(cb->model_name, "DAC960PT");
997 strcpy(cb->model_name, "DAC960PTL0");
1000 strcpy(cb->model_name, "DAC960PRL");
1002 case DAC960_V1_PTL1:
1003 strcpy(cb->model_name, "DAC960PTL1");
1005 case DAC960_V1_1164P:
1006 strcpy(cb->model_name, "eXtremeRAID 1100");
1009 shost_printk(KERN_WARNING, cb->host,
1010 "Unknown Model %X\n",
1011 enquiry2->hw.sub_model);
1015 * Initialize the Controller Firmware Version field and verify that it
1016 * is a supported firmware version.
1017 * The supported firmware versions are:
1019 * DAC1164P 5.06 and above
1020 * DAC960PTL/PRL/PJ/PG 4.06 and above
1021 * DAC960PU/PD/PL 3.51 and above
1022 * DAC960PU/PD/PL/P 2.73 and above
1024 #if defined(CONFIG_ALPHA)
1026 * DEC Alpha machines were often equipped with DAC960 cards that were
1027 * OEMed from Mylex, and had their own custom firmware. Version 2.70,
1028 * the last custom FW revision to be released by DEC for these older
1029 * controllers, appears to work quite well with this driver.
1031 * Cards tested successfully were several versions each of the PD and
1032 * PU, called by DEC the KZPSC and KZPAC, respectively, and having
1033 * the Manufacturer Numbers (from Mylex), usually on a sticker on the
1034 * back of the board, of:
1036 * KZPSC: D040347 (1-channel) or D040348 (2-channel)
1037 * or D040349 (3-channel)
1038 * KZPAC: D040395 (1-channel) or D040396 (2-channel)
1039 * or D040397 (3-channel)
1041 # define FIRMWARE_27X "2.70"
1043 # define FIRMWARE_27X "2.73"
1046 if (enquiry2->fw.major_version == 0) {
1047 enquiry2->fw.major_version = cb->enquiry->fw_major_version;
1048 enquiry2->fw.minor_version = cb->enquiry->fw_minor_version;
1049 enquiry2->fw.firmware_type = '0';
1050 enquiry2->fw.turn_id = 0;
1052 sprintf(cb->fw_version, "%d.%02d-%c-%02d",
1053 enquiry2->fw.major_version,
1054 enquiry2->fw.minor_version,
1055 enquiry2->fw.firmware_type,
1056 enquiry2->fw.turn_id);
1057 if (!((enquiry2->fw.major_version == 5 &&
1058 enquiry2->fw.minor_version >= 6) ||
1059 (enquiry2->fw.major_version == 4 &&
1060 enquiry2->fw.minor_version >= 6) ||
1061 (enquiry2->fw.major_version == 3 &&
1062 enquiry2->fw.minor_version >= 51) ||
1063 (enquiry2->fw.major_version == 2 &&
1064 strcmp(cb->fw_version, FIRMWARE_27X) >= 0))) {
1065 shost_printk(KERN_WARNING, cb->host,
1066 "Firmware Version '%s' unsupported\n",
1071 * Initialize the Channels, Targets, Memory Size, and SAF-TE
1072 * Enclosure Management Enabled fields.
1074 switch (enquiry2->hw.model) {
1075 case MYRB_5_CHANNEL_BOARD:
1078 case MYRB_3_CHANNEL_BOARD:
1079 case MYRB_3_CHANNEL_ASIC_DAC:
1082 case MYRB_2_CHANNEL_BOARD:
1086 pchan_max = enquiry2->cfg_chan;
1089 pchan_cur = enquiry2->cur_chan;
1090 if (enquiry2->scsi_cap.bus_width == MYRB_WIDTH_WIDE_32BIT)
1092 else if (enquiry2->scsi_cap.bus_width == MYRB_WIDTH_WIDE_16BIT)
1096 cb->ldev_block_size = enquiry2->ldev_block_size;
1097 shost->max_channel = pchan_cur;
1098 shost->max_id = enquiry2->max_targets;
1099 memsize = enquiry2->mem_size >> 20;
1100 cb->safte_enabled = (enquiry2->fault_mgmt == MYRB_FAULT_SAFTE);
1102 * Initialize the Controller Queue Depth, Driver Queue Depth,
1103 * Logical Drive Count, Maximum Blocks per Command, Controller
1104 * Scatter/Gather Limit, and Driver Scatter/Gather Limit.
1105 * The Driver Queue Depth must be at most one less than the
1106 * Controller Queue Depth to allow for an automatic drive
1107 * rebuild operation.
1109 shost->can_queue = cb->enquiry->max_tcq;
1110 if (shost->can_queue < 3)
1111 shost->can_queue = enquiry2->max_cmds;
1112 if (shost->can_queue < 3)
1113 /* Play safe and disable TCQ */
1114 shost->can_queue = 1;
1116 if (shost->can_queue > MYRB_CMD_MBOX_COUNT - 2)
1117 shost->can_queue = MYRB_CMD_MBOX_COUNT - 2;
1118 shost->max_sectors = enquiry2->max_sectors;
1119 shost->sg_tablesize = enquiry2->max_sge;
1120 if (shost->sg_tablesize > MYRB_SCATTER_GATHER_LIMIT)
1121 shost->sg_tablesize = MYRB_SCATTER_GATHER_LIMIT;
1123 * Initialize the Stripe Size, Segment Size, and Geometry Translation.
1125 cb->stripe_size = config2->blocks_per_stripe * config2->block_factor
1126 >> (10 - MYRB_BLKSIZE_BITS);
1127 cb->segment_size = config2->blocks_per_cacheline * config2->block_factor
1128 >> (10 - MYRB_BLKSIZE_BITS);
1129 /* Assume 255/63 translation */
1130 cb->ldev_geom_heads = 255;
1131 cb->ldev_geom_sectors = 63;
1132 if (config2->drive_geometry) {
1133 cb->ldev_geom_heads = 128;
1134 cb->ldev_geom_sectors = 32;
1138 * Initialize the Background Initialization Status.
1140 if ((cb->fw_version[0] == '4' &&
1141 strcmp(cb->fw_version, "4.08") >= 0) ||
1142 (cb->fw_version[0] == '5' &&
1143 strcmp(cb->fw_version, "5.08") >= 0)) {
1144 cb->bgi_status_supported = true;
1145 myrb_bgi_control(cb);
1147 cb->last_rbld_status = MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS;
1151 shost_printk(KERN_INFO, cb->host,
1152 "Configuring %s PCI RAID Controller\n", cb->model_name);
1153 shost_printk(KERN_INFO, cb->host,
1154 " Firmware Version: %s, Memory Size: %dMB\n",
1155 cb->fw_version, memsize);
1156 if (cb->io_addr == 0)
1157 shost_printk(KERN_INFO, cb->host,
1158 " I/O Address: n/a, PCI Address: 0x%lX, IRQ Channel: %d\n",
1159 (unsigned long)cb->pci_addr, cb->irq);
1161 shost_printk(KERN_INFO, cb->host,
1162 " I/O Address: 0x%lX, PCI Address: 0x%lX, IRQ Channel: %d\n",
1163 (unsigned long)cb->io_addr, (unsigned long)cb->pci_addr,
1165 shost_printk(KERN_INFO, cb->host,
1166 " Controller Queue Depth: %d, Maximum Blocks per Command: %d\n",
1167 cb->host->can_queue, cb->host->max_sectors);
1168 shost_printk(KERN_INFO, cb->host,
1169 " Driver Queue Depth: %d, Scatter/Gather Limit: %d of %d Segments\n",
1170 cb->host->can_queue, cb->host->sg_tablesize,
1171 MYRB_SCATTER_GATHER_LIMIT);
1172 shost_printk(KERN_INFO, cb->host,
1173 " Stripe Size: %dKB, Segment Size: %dKB, BIOS Geometry: %d/%d%s\n",
1174 cb->stripe_size, cb->segment_size,
1175 cb->ldev_geom_heads, cb->ldev_geom_sectors,
1177 " SAF-TE Enclosure Management Enabled" : "");
1178 shost_printk(KERN_INFO, cb->host,
1179 " Physical: %d/%d channels %d/%d/%d devices\n",
1180 pchan_cur, pchan_max, 0, cb->enquiry->pdev_dead,
1183 shost_printk(KERN_INFO, cb->host,
1184 " Logical: 1/1 channels, %d/%d disks\n",
1185 cb->enquiry->ldev_count, MYRB_MAX_LDEVS);
1188 dma_free_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
1189 enquiry2, enquiry2_addr);
1190 dma_free_coherent(&pdev->dev, sizeof(struct myrb_config2),
1191 config2, config2_addr);
1197 * myrb_unmap - unmaps controller structures
1199 static void myrb_unmap(struct myrb_hba *cb)
1201 if (cb->ldev_info_buf) {
1202 size_t ldev_info_size = sizeof(struct myrb_ldev_info) *
1204 dma_free_coherent(&cb->pdev->dev, ldev_info_size,
1205 cb->ldev_info_buf, cb->ldev_info_addr);
1206 cb->ldev_info_buf = NULL;
1208 if (cb->err_table) {
1209 size_t err_table_size = sizeof(struct myrb_error_entry) *
1210 MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS;
1211 dma_free_coherent(&cb->pdev->dev, err_table_size,
1212 cb->err_table, cb->err_table_addr);
1213 cb->err_table = NULL;
1216 dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_enquiry),
1217 cb->enquiry, cb->enquiry_addr);
1220 if (cb->first_stat_mbox) {
1221 dma_free_coherent(&cb->pdev->dev, cb->stat_mbox_size,
1222 cb->first_stat_mbox, cb->stat_mbox_addr);
1223 cb->first_stat_mbox = NULL;
1225 if (cb->first_cmd_mbox) {
1226 dma_free_coherent(&cb->pdev->dev, cb->cmd_mbox_size,
1227 cb->first_cmd_mbox, cb->cmd_mbox_addr);
1228 cb->first_cmd_mbox = NULL;
1233 * myrb_cleanup - cleanup controller structures
1235 static void myrb_cleanup(struct myrb_hba *cb)
1237 struct pci_dev *pdev = cb->pdev;
1239 /* Free the memory mailbox, status, and related structures */
1242 if (cb->mmio_base) {
1243 cb->disable_intr(cb->io_base);
1244 iounmap(cb->mmio_base);
1247 free_irq(cb->irq, cb);
1249 release_region(cb->io_addr, 0x80);
1250 pci_set_drvdata(pdev, NULL);
1251 pci_disable_device(pdev);
1252 scsi_host_put(cb->host);
1255 static int myrb_host_reset(struct scsi_cmnd *scmd)
1257 struct Scsi_Host *shost = scmd->device->host;
1258 struct myrb_hba *cb = shost_priv(shost);
1260 cb->reset(cb->io_base);
1264 static int myrb_pthru_queuecommand(struct Scsi_Host *shost,
1265 struct scsi_cmnd *scmd)
1267 struct myrb_hba *cb = shost_priv(shost);
1268 struct myrb_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
1269 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
1270 struct myrb_dcdb *dcdb;
1271 dma_addr_t dcdb_addr;
1272 struct scsi_device *sdev = scmd->device;
1273 struct scatterlist *sgl;
1274 unsigned long flags;
1277 myrb_reset_cmd(cmd_blk);
1278 dcdb = dma_pool_alloc(cb->dcdb_pool, GFP_ATOMIC, &dcdb_addr);
1280 return SCSI_MLQUEUE_HOST_BUSY;
1281 nsge = scsi_dma_map(scmd);
1283 dma_pool_free(cb->dcdb_pool, dcdb, dcdb_addr);
1284 scmd->result = (DID_ERROR << 16);
1285 scmd->scsi_done(scmd);
1289 mbox->type3.opcode = MYRB_CMD_DCDB;
1290 mbox->type3.id = scmd->request->tag + 3;
1291 mbox->type3.addr = dcdb_addr;
1292 dcdb->channel = sdev->channel;
1293 dcdb->target = sdev->id;
1294 switch (scmd->sc_data_direction) {
1296 dcdb->data_xfer = MYRB_DCDB_XFER_NONE;
1299 dcdb->data_xfer = MYRB_DCDB_XFER_SYSTEM_TO_DEVICE;
1301 case DMA_FROM_DEVICE:
1302 dcdb->data_xfer = MYRB_DCDB_XFER_DEVICE_TO_SYSTEM;
1305 dcdb->data_xfer = MYRB_DCDB_XFER_ILLEGAL;
1308 dcdb->early_status = false;
1309 if (scmd->request->timeout <= 10)
1310 dcdb->timeout = MYRB_DCDB_TMO_10_SECS;
1311 else if (scmd->request->timeout <= 60)
1312 dcdb->timeout = MYRB_DCDB_TMO_60_SECS;
1313 else if (scmd->request->timeout <= 600)
1314 dcdb->timeout = MYRB_DCDB_TMO_10_MINS;
1316 dcdb->timeout = MYRB_DCDB_TMO_24_HRS;
1317 dcdb->no_autosense = false;
1318 dcdb->allow_disconnect = true;
1319 sgl = scsi_sglist(scmd);
1320 dcdb->dma_addr = sg_dma_address(sgl);
1321 if (sg_dma_len(sgl) > USHRT_MAX) {
1322 dcdb->xfer_len_lo = sg_dma_len(sgl) & 0xffff;
1323 dcdb->xfer_len_hi4 = sg_dma_len(sgl) >> 16;
1325 dcdb->xfer_len_lo = sg_dma_len(sgl);
1326 dcdb->xfer_len_hi4 = 0;
1328 dcdb->cdb_len = scmd->cmd_len;
1329 dcdb->sense_len = sizeof(dcdb->sense);
1330 memcpy(&dcdb->cdb, scmd->cmnd, scmd->cmd_len);
1332 spin_lock_irqsave(&cb->queue_lock, flags);
1333 cb->qcmd(cb, cmd_blk);
1334 spin_unlock_irqrestore(&cb->queue_lock, flags);
1338 static void myrb_inquiry(struct myrb_hba *cb,
1339 struct scsi_cmnd *scmd)
1341 unsigned char inq[36] = {
1342 0x00, 0x00, 0x03, 0x02, 0x20, 0x00, 0x01, 0x00,
1343 0x4d, 0x59, 0x4c, 0x45, 0x58, 0x20, 0x20, 0x20,
1344 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
1345 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
1346 0x20, 0x20, 0x20, 0x20,
1349 if (cb->bus_width > 16)
1351 if (cb->bus_width > 8)
1353 memcpy(&inq[16], cb->model_name, 16);
1354 memcpy(&inq[32], cb->fw_version, 1);
1355 memcpy(&inq[33], &cb->fw_version[2], 2);
1356 memcpy(&inq[35], &cb->fw_version[7], 1);
1358 scsi_sg_copy_from_buffer(scmd, (void *)inq, 36);
1362 myrb_mode_sense(struct myrb_hba *cb, struct scsi_cmnd *scmd,
1363 struct myrb_ldev_info *ldev_info)
1365 unsigned char modes[32], *mode_pg;
1369 dbd = (scmd->cmnd[1] & 0x08) == 0x08;
1372 mode_pg = &modes[4];
1375 mode_pg = &modes[12];
1377 memset(modes, 0, sizeof(modes));
1378 modes[0] = mode_len - 1;
1380 unsigned char *block_desc = &modes[4];
1383 put_unaligned_be32(ldev_info->size, &block_desc[0]);
1384 put_unaligned_be32(cb->ldev_block_size, &block_desc[5]);
1388 if (ldev_info->wb_enabled)
1390 if (cb->segment_size) {
1392 put_unaligned_be16(cb->segment_size, &mode_pg[14]);
1395 scsi_sg_copy_from_buffer(scmd, modes, mode_len);
1398 static void myrb_request_sense(struct myrb_hba *cb,
1399 struct scsi_cmnd *scmd)
1401 scsi_build_sense_buffer(0, scmd->sense_buffer,
1403 scsi_sg_copy_from_buffer(scmd, scmd->sense_buffer,
1404 SCSI_SENSE_BUFFERSIZE);
1407 static void myrb_read_capacity(struct myrb_hba *cb, struct scsi_cmnd *scmd,
1408 struct myrb_ldev_info *ldev_info)
1410 unsigned char data[8];
1412 dev_dbg(&scmd->device->sdev_gendev,
1413 "Capacity %u, blocksize %u\n",
1414 ldev_info->size, cb->ldev_block_size);
1415 put_unaligned_be32(ldev_info->size - 1, &data[0]);
1416 put_unaligned_be32(cb->ldev_block_size, &data[4]);
1417 scsi_sg_copy_from_buffer(scmd, data, 8);
1420 static int myrb_ldev_queuecommand(struct Scsi_Host *shost,
1421 struct scsi_cmnd *scmd)
1423 struct myrb_hba *cb = shost_priv(shost);
1424 struct myrb_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
1425 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
1426 struct myrb_ldev_info *ldev_info;
1427 struct scsi_device *sdev = scmd->device;
1428 struct scatterlist *sgl;
1429 unsigned long flags;
1434 ldev_info = sdev->hostdata;
1435 if (ldev_info->state != MYRB_DEVICE_ONLINE &&
1436 ldev_info->state != MYRB_DEVICE_WO) {
1437 dev_dbg(&shost->shost_gendev, "ldev %u in state %x, skip\n",
1438 sdev->id, ldev_info ? ldev_info->state : 0xff);
1439 scmd->result = (DID_BAD_TARGET << 16);
1440 scmd->scsi_done(scmd);
1443 switch (scmd->cmnd[0]) {
1444 case TEST_UNIT_READY:
1445 scmd->result = (DID_OK << 16);
1446 scmd->scsi_done(scmd);
1449 if (scmd->cmnd[1] & 1) {
1450 /* Illegal request, invalid field in CDB */
1451 scsi_build_sense_buffer(0, scmd->sense_buffer,
1452 ILLEGAL_REQUEST, 0x24, 0);
1453 scmd->result = (DRIVER_SENSE << 24) |
1454 SAM_STAT_CHECK_CONDITION;
1456 myrb_inquiry(cb, scmd);
1457 scmd->result = (DID_OK << 16);
1459 scmd->scsi_done(scmd);
1461 case SYNCHRONIZE_CACHE:
1462 scmd->result = (DID_OK << 16);
1463 scmd->scsi_done(scmd);
1466 if ((scmd->cmnd[2] & 0x3F) != 0x3F &&
1467 (scmd->cmnd[2] & 0x3F) != 0x08) {
1468 /* Illegal request, invalid field in CDB */
1469 scsi_build_sense_buffer(0, scmd->sense_buffer,
1470 ILLEGAL_REQUEST, 0x24, 0);
1471 scmd->result = (DRIVER_SENSE << 24) |
1472 SAM_STAT_CHECK_CONDITION;
1474 myrb_mode_sense(cb, scmd, ldev_info);
1475 scmd->result = (DID_OK << 16);
1477 scmd->scsi_done(scmd);
1480 if ((scmd->cmnd[1] & 1) ||
1481 (scmd->cmnd[8] & 1)) {
1482 /* Illegal request, invalid field in CDB */
1483 scsi_build_sense_buffer(0, scmd->sense_buffer,
1484 ILLEGAL_REQUEST, 0x24, 0);
1485 scmd->result = (DRIVER_SENSE << 24) |
1486 SAM_STAT_CHECK_CONDITION;
1487 scmd->scsi_done(scmd);
1490 lba = get_unaligned_be32(&scmd->cmnd[2]);
1492 /* Illegal request, invalid field in CDB */
1493 scsi_build_sense_buffer(0, scmd->sense_buffer,
1494 ILLEGAL_REQUEST, 0x24, 0);
1495 scmd->result = (DRIVER_SENSE << 24) |
1496 SAM_STAT_CHECK_CONDITION;
1497 scmd->scsi_done(scmd);
1500 myrb_read_capacity(cb, scmd, ldev_info);
1501 scmd->scsi_done(scmd);
1504 myrb_request_sense(cb, scmd);
1505 scmd->result = (DID_OK << 16);
1507 case SEND_DIAGNOSTIC:
1508 if (scmd->cmnd[1] != 0x04) {
1509 /* Illegal request, invalid field in CDB */
1510 scsi_build_sense_buffer(0, scmd->sense_buffer,
1511 ILLEGAL_REQUEST, 0x24, 0);
1512 scmd->result = (DRIVER_SENSE << 24) |
1513 SAM_STAT_CHECK_CONDITION;
1515 /* Assume good status */
1516 scmd->result = (DID_OK << 16);
1518 scmd->scsi_done(scmd);
1521 if (ldev_info->state == MYRB_DEVICE_WO) {
1522 /* Data protect, attempt to read invalid data */
1523 scsi_build_sense_buffer(0, scmd->sense_buffer,
1524 DATA_PROTECT, 0x21, 0x06);
1525 scmd->result = (DRIVER_SENSE << 24) |
1526 SAM_STAT_CHECK_CONDITION;
1527 scmd->scsi_done(scmd);
1531 lba = (((scmd->cmnd[1] & 0x1F) << 16) |
1532 (scmd->cmnd[2] << 8) |
1534 block_cnt = scmd->cmnd[4];
1537 if (ldev_info->state == MYRB_DEVICE_WO) {
1538 /* Data protect, attempt to read invalid data */
1539 scsi_build_sense_buffer(0, scmd->sense_buffer,
1540 DATA_PROTECT, 0x21, 0x06);
1541 scmd->result = (DRIVER_SENSE << 24) |
1542 SAM_STAT_CHECK_CONDITION;
1543 scmd->scsi_done(scmd);
1547 case VERIFY: /* 0x2F */
1548 case WRITE_VERIFY: /* 0x2E */
1549 lba = get_unaligned_be32(&scmd->cmnd[2]);
1550 block_cnt = get_unaligned_be16(&scmd->cmnd[7]);
1553 if (ldev_info->state == MYRB_DEVICE_WO) {
1554 /* Data protect, attempt to read invalid data */
1555 scsi_build_sense_buffer(0, scmd->sense_buffer,
1556 DATA_PROTECT, 0x21, 0x06);
1557 scmd->result = (DRIVER_SENSE << 24) |
1558 SAM_STAT_CHECK_CONDITION;
1559 scmd->scsi_done(scmd);
1563 case VERIFY_12: /* 0xAF */
1564 case WRITE_VERIFY_12: /* 0xAE */
1565 lba = get_unaligned_be32(&scmd->cmnd[2]);
1566 block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
1569 /* Illegal request, invalid opcode */
1570 scsi_build_sense_buffer(0, scmd->sense_buffer,
1571 ILLEGAL_REQUEST, 0x20, 0);
1572 scmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
1573 scmd->scsi_done(scmd);
1577 myrb_reset_cmd(cmd_blk);
1578 mbox->type5.id = scmd->request->tag + 3;
1579 if (scmd->sc_data_direction == DMA_NONE)
1581 nsge = scsi_dma_map(scmd);
1583 sgl = scsi_sglist(scmd);
1584 if (scmd->sc_data_direction == DMA_FROM_DEVICE)
1585 mbox->type5.opcode = MYRB_CMD_READ;
1587 mbox->type5.opcode = MYRB_CMD_WRITE;
1589 mbox->type5.ld.xfer_len = block_cnt;
1590 mbox->type5.ld.ldev_num = sdev->id;
1591 mbox->type5.lba = lba;
1592 mbox->type5.addr = (u32)sg_dma_address(sgl);
1594 struct myrb_sge *hw_sgl;
1595 dma_addr_t hw_sgl_addr;
1598 hw_sgl = dma_pool_alloc(cb->sg_pool, GFP_ATOMIC, &hw_sgl_addr);
1600 return SCSI_MLQUEUE_HOST_BUSY;
1602 cmd_blk->sgl = hw_sgl;
1603 cmd_blk->sgl_addr = hw_sgl_addr;
1605 if (scmd->sc_data_direction == DMA_FROM_DEVICE)
1606 mbox->type5.opcode = MYRB_CMD_READ_SG;
1608 mbox->type5.opcode = MYRB_CMD_WRITE_SG;
1610 mbox->type5.ld.xfer_len = block_cnt;
1611 mbox->type5.ld.ldev_num = sdev->id;
1612 mbox->type5.lba = lba;
1613 mbox->type5.addr = hw_sgl_addr;
1614 mbox->type5.sg_count = nsge;
1616 scsi_for_each_sg(scmd, sgl, nsge, i) {
1617 hw_sgl->sge_addr = (u32)sg_dma_address(sgl);
1618 hw_sgl->sge_count = (u32)sg_dma_len(sgl);
1623 spin_lock_irqsave(&cb->queue_lock, flags);
1624 cb->qcmd(cb, cmd_blk);
1625 spin_unlock_irqrestore(&cb->queue_lock, flags);
1630 static int myrb_queuecommand(struct Scsi_Host *shost,
1631 struct scsi_cmnd *scmd)
1633 struct scsi_device *sdev = scmd->device;
1635 if (sdev->channel > myrb_logical_channel(shost)) {
1636 scmd->result = (DID_BAD_TARGET << 16);
1637 scmd->scsi_done(scmd);
1640 if (sdev->channel == myrb_logical_channel(shost))
1641 return myrb_ldev_queuecommand(shost, scmd);
1643 return myrb_pthru_queuecommand(shost, scmd);
1646 static int myrb_ldev_slave_alloc(struct scsi_device *sdev)
1648 struct myrb_hba *cb = shost_priv(sdev->host);
1649 struct myrb_ldev_info *ldev_info;
1650 unsigned short ldev_num = sdev->id;
1651 enum raid_level level;
1653 ldev_info = cb->ldev_info_buf + ldev_num;
1657 sdev->hostdata = kzalloc(sizeof(*ldev_info), GFP_KERNEL);
1658 if (!sdev->hostdata)
1660 dev_dbg(&sdev->sdev_gendev,
1661 "slave alloc ldev %d state %x\n",
1662 ldev_num, ldev_info->state);
1663 memcpy(sdev->hostdata, ldev_info,
1664 sizeof(*ldev_info));
1665 switch (ldev_info->raid_level) {
1666 case MYRB_RAID_LEVEL0:
1667 level = RAID_LEVEL_LINEAR;
1669 case MYRB_RAID_LEVEL1:
1670 level = RAID_LEVEL_1;
1672 case MYRB_RAID_LEVEL3:
1673 level = RAID_LEVEL_3;
1675 case MYRB_RAID_LEVEL5:
1676 level = RAID_LEVEL_5;
1678 case MYRB_RAID_LEVEL6:
1679 level = RAID_LEVEL_6;
1681 case MYRB_RAID_JBOD:
1682 level = RAID_LEVEL_JBOD;
1685 level = RAID_LEVEL_UNKNOWN;
1688 raid_set_level(myrb_raid_template, &sdev->sdev_gendev, level);
1692 static int myrb_pdev_slave_alloc(struct scsi_device *sdev)
1694 struct myrb_hba *cb = shost_priv(sdev->host);
1695 struct myrb_pdev_state *pdev_info;
1696 unsigned short status;
1698 if (sdev->id > MYRB_MAX_TARGETS)
1701 pdev_info = kzalloc(sizeof(*pdev_info), GFP_KERNEL|GFP_DMA);
1705 status = myrb_exec_type3D(cb, MYRB_CMD_GET_DEVICE_STATE,
1707 if (status != MYRB_STATUS_SUCCESS) {
1708 dev_dbg(&sdev->sdev_gendev,
1709 "Failed to get device state, status %x\n",
1714 if (!pdev_info->present) {
1715 dev_dbg(&sdev->sdev_gendev,
1716 "device not present, skip\n");
1720 dev_dbg(&sdev->sdev_gendev,
1721 "slave alloc pdev %d:%d state %x\n",
1722 sdev->channel, sdev->id, pdev_info->state);
1723 sdev->hostdata = pdev_info;
1728 static int myrb_slave_alloc(struct scsi_device *sdev)
1730 if (sdev->channel > myrb_logical_channel(sdev->host))
1736 if (sdev->channel == myrb_logical_channel(sdev->host))
1737 return myrb_ldev_slave_alloc(sdev);
1739 return myrb_pdev_slave_alloc(sdev);
1742 static int myrb_slave_configure(struct scsi_device *sdev)
1744 struct myrb_ldev_info *ldev_info;
1746 if (sdev->channel > myrb_logical_channel(sdev->host))
1749 if (sdev->channel < myrb_logical_channel(sdev->host)) {
1750 sdev->no_uld_attach = 1;
1756 ldev_info = sdev->hostdata;
1759 if (ldev_info->state != MYRB_DEVICE_ONLINE)
1760 sdev_printk(KERN_INFO, sdev,
1761 "Logical drive is %s\n",
1762 myrb_devstate_name(ldev_info->state));
1764 sdev->tagged_supported = 1;
1768 static void myrb_slave_destroy(struct scsi_device *sdev)
1770 kfree(sdev->hostdata);
1773 static int myrb_biosparam(struct scsi_device *sdev, struct block_device *bdev,
1774 sector_t capacity, int geom[])
1776 struct myrb_hba *cb = shost_priv(sdev->host);
1778 geom[0] = cb->ldev_geom_heads;
1779 geom[1] = cb->ldev_geom_sectors;
1780 geom[2] = sector_div(capacity, geom[0] * geom[1]);
1785 static ssize_t raid_state_show(struct device *dev,
1786 struct device_attribute *attr, char *buf)
1788 struct scsi_device *sdev = to_scsi_device(dev);
1789 struct myrb_hba *cb = shost_priv(sdev->host);
1792 if (!sdev->hostdata)
1793 return snprintf(buf, 16, "Unknown\n");
1795 if (sdev->channel == myrb_logical_channel(sdev->host)) {
1796 struct myrb_ldev_info *ldev_info = sdev->hostdata;
1799 name = myrb_devstate_name(ldev_info->state);
1801 ret = snprintf(buf, 32, "%s\n", name);
1803 ret = snprintf(buf, 32, "Invalid (%02X)\n",
1806 struct myrb_pdev_state *pdev_info = sdev->hostdata;
1807 unsigned short status;
1810 status = myrb_exec_type3D(cb, MYRB_CMD_GET_DEVICE_STATE,
1812 if (status != MYRB_STATUS_SUCCESS)
1813 sdev_printk(KERN_INFO, sdev,
1814 "Failed to get device state, status %x\n",
1817 if (!pdev_info->present)
1820 name = myrb_devstate_name(pdev_info->state);
1822 ret = snprintf(buf, 32, "%s\n", name);
1824 ret = snprintf(buf, 32, "Invalid (%02X)\n",
1830 static ssize_t raid_state_store(struct device *dev,
1831 struct device_attribute *attr, const char *buf, size_t count)
1833 struct scsi_device *sdev = to_scsi_device(dev);
1834 struct myrb_hba *cb = shost_priv(sdev->host);
1835 struct myrb_pdev_state *pdev_info;
1836 enum myrb_devstate new_state;
1837 unsigned short status;
1839 if (!strncmp(buf, "kill", 4) ||
1840 !strncmp(buf, "offline", 7))
1841 new_state = MYRB_DEVICE_DEAD;
1842 else if (!strncmp(buf, "online", 6))
1843 new_state = MYRB_DEVICE_ONLINE;
1844 else if (!strncmp(buf, "standby", 7))
1845 new_state = MYRB_DEVICE_STANDBY;
1849 pdev_info = sdev->hostdata;
1851 sdev_printk(KERN_INFO, sdev,
1852 "Failed - no physical device information\n");
1855 if (!pdev_info->present) {
1856 sdev_printk(KERN_INFO, sdev,
1857 "Failed - device not present\n");
1861 if (pdev_info->state == new_state)
1864 status = myrb_set_pdev_state(cb, sdev, new_state);
1866 case MYRB_STATUS_SUCCESS:
1868 case MYRB_STATUS_START_DEVICE_FAILED:
1869 sdev_printk(KERN_INFO, sdev,
1870 "Failed - Unable to Start Device\n");
1873 case MYRB_STATUS_NO_DEVICE:
1874 sdev_printk(KERN_INFO, sdev,
1875 "Failed - No Device at Address\n");
1878 case MYRB_STATUS_INVALID_CHANNEL_OR_TARGET:
1879 sdev_printk(KERN_INFO, sdev,
1880 "Failed - Invalid Channel or Target or Modifier\n");
1883 case MYRB_STATUS_CHANNEL_BUSY:
1884 sdev_printk(KERN_INFO, sdev,
1885 "Failed - Channel Busy\n");
1889 sdev_printk(KERN_INFO, sdev,
1890 "Failed - Unexpected Status %04X\n", status);
1896 static DEVICE_ATTR_RW(raid_state);
1898 static ssize_t raid_level_show(struct device *dev,
1899 struct device_attribute *attr, char *buf)
1901 struct scsi_device *sdev = to_scsi_device(dev);
1903 if (sdev->channel == myrb_logical_channel(sdev->host)) {
1904 struct myrb_ldev_info *ldev_info = sdev->hostdata;
1910 name = myrb_raidlevel_name(ldev_info->raid_level);
1912 return snprintf(buf, 32, "Invalid (%02X)\n",
1914 return snprintf(buf, 32, "%s\n", name);
1916 return snprintf(buf, 32, "Physical Drive\n");
1918 static DEVICE_ATTR_RO(raid_level);
1920 static ssize_t rebuild_show(struct device *dev,
1921 struct device_attribute *attr, char *buf)
1923 struct scsi_device *sdev = to_scsi_device(dev);
1924 struct myrb_hba *cb = shost_priv(sdev->host);
1925 struct myrb_rbld_progress rbld_buf;
1926 unsigned char status;
1928 if (sdev->channel < myrb_logical_channel(sdev->host))
1929 return snprintf(buf, 32, "physical device - not rebuilding\n");
1931 status = myrb_get_rbld_progress(cb, &rbld_buf);
1933 if (rbld_buf.ldev_num != sdev->id ||
1934 status != MYRB_STATUS_SUCCESS)
1935 return snprintf(buf, 32, "not rebuilding\n");
1937 return snprintf(buf, 32, "rebuilding block %u of %u\n",
1938 rbld_buf.ldev_size - rbld_buf.blocks_left,
1939 rbld_buf.ldev_size);
1942 static ssize_t rebuild_store(struct device *dev,
1943 struct device_attribute *attr, const char *buf, size_t count)
1945 struct scsi_device *sdev = to_scsi_device(dev);
1946 struct myrb_hba *cb = shost_priv(sdev->host);
1947 struct myrb_cmdblk *cmd_blk;
1948 union myrb_cmd_mbox *mbox;
1949 unsigned short status;
1953 rc = kstrtoint(buf, 0, &start);
1957 if (sdev->channel >= myrb_logical_channel(sdev->host))
1960 status = myrb_get_rbld_progress(cb, NULL);
1962 if (status == MYRB_STATUS_SUCCESS) {
1963 sdev_printk(KERN_INFO, sdev,
1964 "Rebuild Not Initiated; already in progress\n");
1967 mutex_lock(&cb->dcmd_mutex);
1968 cmd_blk = &cb->dcmd_blk;
1969 myrb_reset_cmd(cmd_blk);
1970 mbox = &cmd_blk->mbox;
1971 mbox->type3D.opcode = MYRB_CMD_REBUILD_ASYNC;
1972 mbox->type3D.id = MYRB_DCMD_TAG;
1973 mbox->type3D.channel = sdev->channel;
1974 mbox->type3D.target = sdev->id;
1975 status = myrb_exec_cmd(cb, cmd_blk);
1976 mutex_unlock(&cb->dcmd_mutex);
1978 struct pci_dev *pdev = cb->pdev;
1979 unsigned char *rate;
1980 dma_addr_t rate_addr;
1982 if (status != MYRB_STATUS_SUCCESS) {
1983 sdev_printk(KERN_INFO, sdev,
1984 "Rebuild Not Cancelled; not in progress\n");
1988 rate = dma_alloc_coherent(&pdev->dev, sizeof(char),
1989 &rate_addr, GFP_KERNEL);
1991 sdev_printk(KERN_INFO, sdev,
1992 "Cancellation of Rebuild Failed - Out of Memory\n");
1995 mutex_lock(&cb->dcmd_mutex);
1996 cmd_blk = &cb->dcmd_blk;
1997 myrb_reset_cmd(cmd_blk);
1998 mbox = &cmd_blk->mbox;
1999 mbox->type3R.opcode = MYRB_CMD_REBUILD_CONTROL;
2000 mbox->type3R.id = MYRB_DCMD_TAG;
2001 mbox->type3R.rbld_rate = 0xFF;
2002 mbox->type3R.addr = rate_addr;
2003 status = myrb_exec_cmd(cb, cmd_blk);
2004 dma_free_coherent(&pdev->dev, sizeof(char), rate, rate_addr);
2005 mutex_unlock(&cb->dcmd_mutex);
2007 if (status == MYRB_STATUS_SUCCESS) {
2008 sdev_printk(KERN_INFO, sdev, "Rebuild %s\n",
2009 start ? "Initiated" : "Cancelled");
2013 sdev_printk(KERN_INFO, sdev,
2014 "Rebuild Not Cancelled, status 0x%x\n",
2020 case MYRB_STATUS_ATTEMPT_TO_RBLD_ONLINE_DRIVE:
2021 msg = "Attempt to Rebuild Online or Unresponsive Drive";
2023 case MYRB_STATUS_RBLD_NEW_DISK_FAILED:
2024 msg = "New Disk Failed During Rebuild";
2026 case MYRB_STATUS_INVALID_ADDRESS:
2027 msg = "Invalid Device Address";
2029 case MYRB_STATUS_RBLD_OR_CHECK_INPROGRESS:
2030 msg = "Already in Progress";
2037 sdev_printk(KERN_INFO, sdev,
2038 "Rebuild Failed - %s\n", msg);
2040 sdev_printk(KERN_INFO, sdev,
2041 "Rebuild Failed, status 0x%x\n", status);
2045 static DEVICE_ATTR_RW(rebuild);
2047 static ssize_t consistency_check_store(struct device *dev,
2048 struct device_attribute *attr, const char *buf, size_t count)
2050 struct scsi_device *sdev = to_scsi_device(dev);
2051 struct myrb_hba *cb = shost_priv(sdev->host);
2052 struct myrb_rbld_progress rbld_buf;
2053 struct myrb_cmdblk *cmd_blk;
2054 union myrb_cmd_mbox *mbox;
2055 unsigned short ldev_num = 0xFFFF;
2056 unsigned short status;
2060 rc = kstrtoint(buf, 0, &start);
2064 if (sdev->channel < myrb_logical_channel(sdev->host))
2067 status = myrb_get_rbld_progress(cb, &rbld_buf);
2069 if (status == MYRB_STATUS_SUCCESS) {
2070 sdev_printk(KERN_INFO, sdev,
2071 "Check Consistency Not Initiated; already in progress\n");
2074 mutex_lock(&cb->dcmd_mutex);
2075 cmd_blk = &cb->dcmd_blk;
2076 myrb_reset_cmd(cmd_blk);
2077 mbox = &cmd_blk->mbox;
2078 mbox->type3C.opcode = MYRB_CMD_CHECK_CONSISTENCY_ASYNC;
2079 mbox->type3C.id = MYRB_DCMD_TAG;
2080 mbox->type3C.ldev_num = sdev->id;
2081 mbox->type3C.auto_restore = true;
2083 status = myrb_exec_cmd(cb, cmd_blk);
2084 mutex_unlock(&cb->dcmd_mutex);
2086 struct pci_dev *pdev = cb->pdev;
2087 unsigned char *rate;
2088 dma_addr_t rate_addr;
2090 if (ldev_num != sdev->id) {
2091 sdev_printk(KERN_INFO, sdev,
2092 "Check Consistency Not Cancelled; not in progress\n");
2095 rate = dma_alloc_coherent(&pdev->dev, sizeof(char),
2096 &rate_addr, GFP_KERNEL);
2098 sdev_printk(KERN_INFO, sdev,
2099 "Cancellation of Check Consistency Failed - Out of Memory\n");
2102 mutex_lock(&cb->dcmd_mutex);
2103 cmd_blk = &cb->dcmd_blk;
2104 myrb_reset_cmd(cmd_blk);
2105 mbox = &cmd_blk->mbox;
2106 mbox->type3R.opcode = MYRB_CMD_REBUILD_CONTROL;
2107 mbox->type3R.id = MYRB_DCMD_TAG;
2108 mbox->type3R.rbld_rate = 0xFF;
2109 mbox->type3R.addr = rate_addr;
2110 status = myrb_exec_cmd(cb, cmd_blk);
2111 dma_free_coherent(&pdev->dev, sizeof(char), rate, rate_addr);
2112 mutex_unlock(&cb->dcmd_mutex);
2114 if (status == MYRB_STATUS_SUCCESS) {
2115 sdev_printk(KERN_INFO, sdev, "Check Consistency %s\n",
2116 start ? "Initiated" : "Cancelled");
2120 sdev_printk(KERN_INFO, sdev,
2121 "Check Consistency Not Cancelled, status 0x%x\n",
2127 case MYRB_STATUS_ATTEMPT_TO_RBLD_ONLINE_DRIVE:
2128 msg = "Dependent Physical Device is DEAD";
2130 case MYRB_STATUS_RBLD_NEW_DISK_FAILED:
2131 msg = "New Disk Failed During Rebuild";
2133 case MYRB_STATUS_INVALID_ADDRESS:
2134 msg = "Invalid or Nonredundant Logical Drive";
2136 case MYRB_STATUS_RBLD_OR_CHECK_INPROGRESS:
2137 msg = "Already in Progress";
2144 sdev_printk(KERN_INFO, sdev,
2145 "Check Consistency Failed - %s\n", msg);
2147 sdev_printk(KERN_INFO, sdev,
2148 "Check Consistency Failed, status 0x%x\n", status);
2153 static ssize_t consistency_check_show(struct device *dev,
2154 struct device_attribute *attr, char *buf)
2156 return rebuild_show(dev, attr, buf);
2158 static DEVICE_ATTR_RW(consistency_check);
2160 static ssize_t ctlr_num_show(struct device *dev,
2161 struct device_attribute *attr, char *buf)
2163 struct Scsi_Host *shost = class_to_shost(dev);
2164 struct myrb_hba *cb = shost_priv(shost);
2166 return snprintf(buf, 20, "%d\n", cb->ctlr_num);
2168 static DEVICE_ATTR_RO(ctlr_num);
2170 static ssize_t firmware_show(struct device *dev,
2171 struct device_attribute *attr, char *buf)
2173 struct Scsi_Host *shost = class_to_shost(dev);
2174 struct myrb_hba *cb = shost_priv(shost);
2176 return snprintf(buf, 16, "%s\n", cb->fw_version);
2178 static DEVICE_ATTR_RO(firmware);
2180 static ssize_t model_show(struct device *dev,
2181 struct device_attribute *attr, char *buf)
2183 struct Scsi_Host *shost = class_to_shost(dev);
2184 struct myrb_hba *cb = shost_priv(shost);
2186 return snprintf(buf, 16, "%s\n", cb->model_name);
2188 static DEVICE_ATTR_RO(model);
2190 static ssize_t flush_cache_store(struct device *dev,
2191 struct device_attribute *attr, const char *buf, size_t count)
2193 struct Scsi_Host *shost = class_to_shost(dev);
2194 struct myrb_hba *cb = shost_priv(shost);
2195 unsigned short status;
2197 status = myrb_exec_type3(cb, MYRB_CMD_FLUSH, 0);
2198 if (status == MYRB_STATUS_SUCCESS) {
2199 shost_printk(KERN_INFO, shost,
2200 "Cache Flush Completed\n");
2203 shost_printk(KERN_INFO, shost,
2204 "Cache Flush Failed, status %x\n", status);
2207 static DEVICE_ATTR_WO(flush_cache);
2209 static struct device_attribute *myrb_sdev_attrs[] = {
2211 &dev_attr_consistency_check,
2212 &dev_attr_raid_state,
2213 &dev_attr_raid_level,
2217 static struct device_attribute *myrb_shost_attrs[] = {
2221 &dev_attr_flush_cache,
2225 struct scsi_host_template myrb_template = {
2226 .module = THIS_MODULE,
2228 .proc_name = "myrb",
2229 .queuecommand = myrb_queuecommand,
2230 .eh_host_reset_handler = myrb_host_reset,
2231 .slave_alloc = myrb_slave_alloc,
2232 .slave_configure = myrb_slave_configure,
2233 .slave_destroy = myrb_slave_destroy,
2234 .bios_param = myrb_biosparam,
2235 .cmd_size = sizeof(struct myrb_cmdblk),
2236 .shost_attrs = myrb_shost_attrs,
2237 .sdev_attrs = myrb_sdev_attrs,
2242 * myrb_is_raid - return boolean indicating device is raid volume
2243 * @dev the device struct object
2245 static int myrb_is_raid(struct device *dev)
2247 struct scsi_device *sdev = to_scsi_device(dev);
2249 return sdev->channel == myrb_logical_channel(sdev->host);
2253 * myrb_get_resync - get raid volume resync percent complete
2254 * @dev the device struct object
2256 static void myrb_get_resync(struct device *dev)
2258 struct scsi_device *sdev = to_scsi_device(dev);
2259 struct myrb_hba *cb = shost_priv(sdev->host);
2260 struct myrb_rbld_progress rbld_buf;
2261 unsigned int percent_complete = 0;
2262 unsigned short status;
2263 unsigned int ldev_size = 0, remaining = 0;
2265 if (sdev->channel < myrb_logical_channel(sdev->host))
2267 status = myrb_get_rbld_progress(cb, &rbld_buf);
2268 if (status == MYRB_STATUS_SUCCESS) {
2269 if (rbld_buf.ldev_num == sdev->id) {
2270 ldev_size = rbld_buf.ldev_size;
2271 remaining = rbld_buf.blocks_left;
2274 if (remaining && ldev_size)
2275 percent_complete = (ldev_size - remaining) * 100 / ldev_size;
2276 raid_set_resync(myrb_raid_template, dev, percent_complete);
2280 * myrb_get_state - get raid volume status
2281 * @dev the device struct object
2283 static void myrb_get_state(struct device *dev)
2285 struct scsi_device *sdev = to_scsi_device(dev);
2286 struct myrb_hba *cb = shost_priv(sdev->host);
2287 struct myrb_ldev_info *ldev_info = sdev->hostdata;
2288 enum raid_state state = RAID_STATE_UNKNOWN;
2289 unsigned short status;
2291 if (sdev->channel < myrb_logical_channel(sdev->host) || !ldev_info)
2292 state = RAID_STATE_UNKNOWN;
2294 status = myrb_get_rbld_progress(cb, NULL);
2295 if (status == MYRB_STATUS_SUCCESS)
2296 state = RAID_STATE_RESYNCING;
2298 switch (ldev_info->state) {
2299 case MYRB_DEVICE_ONLINE:
2300 state = RAID_STATE_ACTIVE;
2302 case MYRB_DEVICE_WO:
2303 case MYRB_DEVICE_CRITICAL:
2304 state = RAID_STATE_DEGRADED;
2307 state = RAID_STATE_OFFLINE;
2311 raid_set_state(myrb_raid_template, dev, state);
2314 struct raid_function_template myrb_raid_functions = {
2315 .cookie = &myrb_template,
2316 .is_raid = myrb_is_raid,
2317 .get_resync = myrb_get_resync,
2318 .get_state = myrb_get_state,
2321 static void myrb_handle_scsi(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk,
2322 struct scsi_cmnd *scmd)
2324 unsigned short status;
2329 scsi_dma_unmap(scmd);
2331 if (cmd_blk->dcdb) {
2332 memcpy(scmd->sense_buffer, &cmd_blk->dcdb->sense, 64);
2333 dma_pool_free(cb->dcdb_pool, cmd_blk->dcdb,
2334 cmd_blk->dcdb_addr);
2335 cmd_blk->dcdb = NULL;
2338 dma_pool_free(cb->sg_pool, cmd_blk->sgl, cmd_blk->sgl_addr);
2339 cmd_blk->sgl = NULL;
2340 cmd_blk->sgl_addr = 0;
2342 status = cmd_blk->status;
2344 case MYRB_STATUS_SUCCESS:
2345 case MYRB_STATUS_DEVICE_BUSY:
2346 scmd->result = (DID_OK << 16) | status;
2348 case MYRB_STATUS_BAD_DATA:
2349 dev_dbg(&scmd->device->sdev_gendev,
2350 "Bad Data Encountered\n");
2351 if (scmd->sc_data_direction == DMA_FROM_DEVICE)
2352 /* Unrecovered read error */
2353 scsi_build_sense_buffer(0, scmd->sense_buffer,
2354 MEDIUM_ERROR, 0x11, 0);
2357 scsi_build_sense_buffer(0, scmd->sense_buffer,
2358 MEDIUM_ERROR, 0x0C, 0);
2359 scmd->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION;
2361 case MYRB_STATUS_IRRECOVERABLE_DATA_ERROR:
2362 scmd_printk(KERN_ERR, scmd, "Irrecoverable Data Error\n");
2363 if (scmd->sc_data_direction == DMA_FROM_DEVICE)
2364 /* Unrecovered read error, auto-reallocation failed */
2365 scsi_build_sense_buffer(0, scmd->sense_buffer,
2366 MEDIUM_ERROR, 0x11, 0x04);
2368 /* Write error, auto-reallocation failed */
2369 scsi_build_sense_buffer(0, scmd->sense_buffer,
2370 MEDIUM_ERROR, 0x0C, 0x02);
2371 scmd->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION;
2373 case MYRB_STATUS_LDRV_NONEXISTENT_OR_OFFLINE:
2374 dev_dbg(&scmd->device->sdev_gendev,
2375 "Logical Drive Nonexistent or Offline");
2376 scmd->result = (DID_BAD_TARGET << 16);
2378 case MYRB_STATUS_ACCESS_BEYOND_END_OF_LDRV:
2379 dev_dbg(&scmd->device->sdev_gendev,
2380 "Attempt to Access Beyond End of Logical Drive");
2381 /* Logical block address out of range */
2382 scsi_build_sense_buffer(0, scmd->sense_buffer,
2383 NOT_READY, 0x21, 0);
2385 case MYRB_STATUS_DEVICE_NONRESPONSIVE:
2386 dev_dbg(&scmd->device->sdev_gendev, "Device nonresponsive\n");
2387 scmd->result = (DID_BAD_TARGET << 16);
2390 scmd_printk(KERN_ERR, scmd,
2391 "Unexpected Error Status %04X", status);
2392 scmd->result = (DID_ERROR << 16);
2395 scmd->scsi_done(scmd);
2398 static void myrb_handle_cmdblk(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
2403 if (cmd_blk->completion) {
2404 complete(cmd_blk->completion);
2405 cmd_blk->completion = NULL;
2409 static void myrb_monitor(struct work_struct *work)
2411 struct myrb_hba *cb = container_of(work,
2412 struct myrb_hba, monitor_work.work);
2413 struct Scsi_Host *shost = cb->host;
2414 unsigned long interval = MYRB_PRIMARY_MONITOR_INTERVAL;
2416 dev_dbg(&shost->shost_gendev, "monitor tick\n");
2418 if (cb->new_ev_seq > cb->old_ev_seq) {
2419 int event = cb->old_ev_seq;
2421 dev_dbg(&shost->shost_gendev,
2422 "get event log no %d/%d\n",
2423 cb->new_ev_seq, event);
2424 myrb_get_event(cb, event);
2425 cb->old_ev_seq = event + 1;
2427 } else if (cb->need_err_info) {
2428 cb->need_err_info = false;
2429 dev_dbg(&shost->shost_gendev, "get error table\n");
2430 myrb_get_errtable(cb);
2432 } else if (cb->need_rbld && cb->rbld_first) {
2433 cb->need_rbld = false;
2434 dev_dbg(&shost->shost_gendev,
2435 "get rebuild progress\n");
2436 myrb_update_rbld_progress(cb);
2438 } else if (cb->need_ldev_info) {
2439 cb->need_ldev_info = false;
2440 dev_dbg(&shost->shost_gendev,
2441 "get logical drive info\n");
2442 myrb_get_ldev_info(cb);
2444 } else if (cb->need_rbld) {
2445 cb->need_rbld = false;
2446 dev_dbg(&shost->shost_gendev,
2447 "get rebuild progress\n");
2448 myrb_update_rbld_progress(cb);
2450 } else if (cb->need_cc_status) {
2451 cb->need_cc_status = false;
2452 dev_dbg(&shost->shost_gendev,
2453 "get consistency check progress\n");
2454 myrb_get_cc_progress(cb);
2456 } else if (cb->need_bgi_status) {
2457 cb->need_bgi_status = false;
2458 dev_dbg(&shost->shost_gendev, "get background init status\n");
2459 myrb_bgi_control(cb);
2462 dev_dbg(&shost->shost_gendev, "new enquiry\n");
2463 mutex_lock(&cb->dma_mutex);
2464 myrb_hba_enquiry(cb);
2465 mutex_unlock(&cb->dma_mutex);
2466 if ((cb->new_ev_seq - cb->old_ev_seq > 0) ||
2467 cb->need_err_info || cb->need_rbld ||
2468 cb->need_ldev_info || cb->need_cc_status ||
2469 cb->need_bgi_status) {
2470 dev_dbg(&shost->shost_gendev,
2471 "reschedule monitor\n");
2476 cb->primary_monitor_time = jiffies;
2477 queue_delayed_work(cb->work_q, &cb->monitor_work, interval);
2481 * myrb_err_status - reports controller BIOS messages
2483 * Controller BIOS messages are passed through the Error Status Register
2484 * when the driver performs the BIOS handshaking.
2486 * Return: true for fatal errors and false otherwise.
2488 bool myrb_err_status(struct myrb_hba *cb, unsigned char error,
2489 unsigned char parm0, unsigned char parm1)
2491 struct pci_dev *pdev = cb->pdev;
2495 dev_info(&pdev->dev,
2496 "Physical Device %d:%d Not Responding\n",
2500 dev_notice(&pdev->dev, "Spinning Up Drives\n");
2503 dev_notice(&pdev->dev, "Configuration Checksum Error\n");
2506 dev_notice(&pdev->dev, "Mirror Race Recovery Failed\n");
2509 dev_notice(&pdev->dev, "Mirror Race Recovery In Progress\n");
2512 dev_notice(&pdev->dev, "Physical Device %d:%d COD Mismatch\n",
2516 dev_notice(&pdev->dev, "Logical Drive Installation Aborted\n");
2519 dev_notice(&pdev->dev, "Mirror Race On A Critical Logical Drive\n");
2522 dev_notice(&pdev->dev, "New Controller Configuration Found\n");
2525 dev_err(&pdev->dev, "Fatal Memory Parity Error\n");
2528 dev_err(&pdev->dev, "Unknown Initialization Error %02X\n",
2536 * Hardware-specific functions
2540 * DAC960 LA Series Controllers
2543 static inline void DAC960_LA_hw_mbox_new_cmd(void __iomem *base)
2545 writeb(DAC960_LA_IDB_HWMBOX_NEW_CMD, base + DAC960_LA_IDB_OFFSET);
2548 static inline void DAC960_LA_ack_hw_mbox_status(void __iomem *base)
2550 writeb(DAC960_LA_IDB_HWMBOX_ACK_STS, base + DAC960_LA_IDB_OFFSET);
2553 static inline void DAC960_LA_gen_intr(void __iomem *base)
2555 writeb(DAC960_LA_IDB_GEN_IRQ, base + DAC960_LA_IDB_OFFSET);
2558 static inline void DAC960_LA_reset_ctrl(void __iomem *base)
2560 writeb(DAC960_LA_IDB_CTRL_RESET, base + DAC960_LA_IDB_OFFSET);
2563 static inline void DAC960_LA_mem_mbox_new_cmd(void __iomem *base)
2565 writeb(DAC960_LA_IDB_MMBOX_NEW_CMD, base + DAC960_LA_IDB_OFFSET);
2568 static inline bool DAC960_LA_hw_mbox_is_full(void __iomem *base)
2570 unsigned char idb = readb(base + DAC960_LA_IDB_OFFSET);
2572 return !(idb & DAC960_LA_IDB_HWMBOX_EMPTY);
2575 static inline bool DAC960_LA_init_in_progress(void __iomem *base)
2577 unsigned char idb = readb(base + DAC960_LA_IDB_OFFSET);
2579 return !(idb & DAC960_LA_IDB_INIT_DONE);
2582 static inline void DAC960_LA_ack_hw_mbox_intr(void __iomem *base)
2584 writeb(DAC960_LA_ODB_HWMBOX_ACK_IRQ, base + DAC960_LA_ODB_OFFSET);
2587 static inline void DAC960_LA_ack_mem_mbox_intr(void __iomem *base)
2589 writeb(DAC960_LA_ODB_MMBOX_ACK_IRQ, base + DAC960_LA_ODB_OFFSET);
2592 static inline void DAC960_LA_ack_intr(void __iomem *base)
2594 writeb(DAC960_LA_ODB_HWMBOX_ACK_IRQ | DAC960_LA_ODB_MMBOX_ACK_IRQ,
2595 base + DAC960_LA_ODB_OFFSET);
2598 static inline bool DAC960_LA_hw_mbox_status_available(void __iomem *base)
2600 unsigned char odb = readb(base + DAC960_LA_ODB_OFFSET);
2602 return odb & DAC960_LA_ODB_HWMBOX_STS_AVAIL;
2605 static inline bool DAC960_LA_mem_mbox_status_available(void __iomem *base)
2607 unsigned char odb = readb(base + DAC960_LA_ODB_OFFSET);
2609 return odb & DAC960_LA_ODB_MMBOX_STS_AVAIL;
2612 static inline void DAC960_LA_enable_intr(void __iomem *base)
2614 unsigned char odb = 0xFF;
2616 odb &= ~DAC960_LA_IRQMASK_DISABLE_IRQ;
2617 writeb(odb, base + DAC960_LA_IRQMASK_OFFSET);
2620 static inline void DAC960_LA_disable_intr(void __iomem *base)
2622 unsigned char odb = 0xFF;
2624 odb |= DAC960_LA_IRQMASK_DISABLE_IRQ;
2625 writeb(odb, base + DAC960_LA_IRQMASK_OFFSET);
2628 static inline bool DAC960_LA_intr_enabled(void __iomem *base)
2630 unsigned char imask = readb(base + DAC960_LA_IRQMASK_OFFSET);
2632 return !(imask & DAC960_LA_IRQMASK_DISABLE_IRQ);
2635 static inline void DAC960_LA_write_cmd_mbox(union myrb_cmd_mbox *mem_mbox,
2636 union myrb_cmd_mbox *mbox)
2638 mem_mbox->words[1] = mbox->words[1];
2639 mem_mbox->words[2] = mbox->words[2];
2640 mem_mbox->words[3] = mbox->words[3];
2641 /* Memory barrier to prevent reordering */
2643 mem_mbox->words[0] = mbox->words[0];
2644 /* Memory barrier to force PCI access */
2648 static inline void DAC960_LA_write_hw_mbox(void __iomem *base,
2649 union myrb_cmd_mbox *mbox)
2651 writel(mbox->words[0], base + DAC960_LA_CMDOP_OFFSET);
2652 writel(mbox->words[1], base + DAC960_LA_MBOX4_OFFSET);
2653 writel(mbox->words[2], base + DAC960_LA_MBOX8_OFFSET);
2654 writeb(mbox->bytes[12], base + DAC960_LA_MBOX12_OFFSET);
2657 static inline unsigned char DAC960_LA_read_status_cmd_ident(void __iomem *base)
2659 return readb(base + DAC960_LA_STSID_OFFSET);
2662 static inline unsigned short DAC960_LA_read_status(void __iomem *base)
2664 return readw(base + DAC960_LA_STS_OFFSET);
2668 DAC960_LA_read_error_status(void __iomem *base, unsigned char *error,
2669 unsigned char *param0, unsigned char *param1)
2671 unsigned char errsts = readb(base + DAC960_LA_ERRSTS_OFFSET);
2673 if (!(errsts & DAC960_LA_ERRSTS_PENDING))
2675 errsts &= ~DAC960_LA_ERRSTS_PENDING;
2678 *param0 = readb(base + DAC960_LA_CMDOP_OFFSET);
2679 *param1 = readb(base + DAC960_LA_CMDID_OFFSET);
2680 writeb(0xFF, base + DAC960_LA_ERRSTS_OFFSET);
2684 static inline unsigned short
2685 DAC960_LA_mbox_init(struct pci_dev *pdev, void __iomem *base,
2686 union myrb_cmd_mbox *mbox)
2688 unsigned short status;
2691 while (timeout < MYRB_MAILBOX_TIMEOUT) {
2692 if (!DAC960_LA_hw_mbox_is_full(base))
2697 if (DAC960_LA_hw_mbox_is_full(base)) {
2699 "Timeout waiting for empty mailbox\n");
2700 return MYRB_STATUS_SUBSYS_TIMEOUT;
2702 DAC960_LA_write_hw_mbox(base, mbox);
2703 DAC960_LA_hw_mbox_new_cmd(base);
2705 while (timeout < MYRB_MAILBOX_TIMEOUT) {
2706 if (DAC960_LA_hw_mbox_status_available(base))
2711 if (!DAC960_LA_hw_mbox_status_available(base)) {
2712 dev_err(&pdev->dev, "Timeout waiting for mailbox status\n");
2713 return MYRB_STATUS_SUBSYS_TIMEOUT;
2715 status = DAC960_LA_read_status(base);
2716 DAC960_LA_ack_hw_mbox_intr(base);
2717 DAC960_LA_ack_hw_mbox_status(base);
2722 static int DAC960_LA_hw_init(struct pci_dev *pdev,
2723 struct myrb_hba *cb, void __iomem *base)
2726 unsigned char error, parm0, parm1;
2728 DAC960_LA_disable_intr(base);
2729 DAC960_LA_ack_hw_mbox_status(base);
2732 while (DAC960_LA_init_in_progress(base) &&
2733 timeout < MYRB_MAILBOX_TIMEOUT) {
2734 if (DAC960_LA_read_error_status(base, &error,
2736 myrb_err_status(cb, error, parm0, parm1))
2741 if (timeout == MYRB_MAILBOX_TIMEOUT) {
2743 "Timeout waiting for Controller Initialisation\n");
2746 if (!myrb_enable_mmio(cb, DAC960_LA_mbox_init)) {
2748 "Unable to Enable Memory Mailbox Interface\n");
2749 DAC960_LA_reset_ctrl(base);
2752 DAC960_LA_enable_intr(base);
2753 cb->qcmd = myrb_qcmd;
2754 cb->write_cmd_mbox = DAC960_LA_write_cmd_mbox;
2755 if (cb->dual_mode_interface)
2756 cb->get_cmd_mbox = DAC960_LA_mem_mbox_new_cmd;
2758 cb->get_cmd_mbox = DAC960_LA_hw_mbox_new_cmd;
2759 cb->disable_intr = DAC960_LA_disable_intr;
2760 cb->reset = DAC960_LA_reset_ctrl;
2765 static irqreturn_t DAC960_LA_intr_handler(int irq, void *arg)
2767 struct myrb_hba *cb = arg;
2768 void __iomem *base = cb->io_base;
2769 struct myrb_stat_mbox *next_stat_mbox;
2770 unsigned long flags;
2772 spin_lock_irqsave(&cb->queue_lock, flags);
2773 DAC960_LA_ack_intr(base);
2774 next_stat_mbox = cb->next_stat_mbox;
2775 while (next_stat_mbox->valid) {
2776 unsigned char id = next_stat_mbox->id;
2777 struct scsi_cmnd *scmd = NULL;
2778 struct myrb_cmdblk *cmd_blk = NULL;
2780 if (id == MYRB_DCMD_TAG)
2781 cmd_blk = &cb->dcmd_blk;
2782 else if (id == MYRB_MCMD_TAG)
2783 cmd_blk = &cb->mcmd_blk;
2785 scmd = scsi_host_find_tag(cb->host, id - 3);
2787 cmd_blk = scsi_cmd_priv(scmd);
2790 cmd_blk->status = next_stat_mbox->status;
2792 dev_err(&cb->pdev->dev,
2793 "Unhandled command completion %d\n", id);
2795 memset(next_stat_mbox, 0, sizeof(struct myrb_stat_mbox));
2796 if (++next_stat_mbox > cb->last_stat_mbox)
2797 next_stat_mbox = cb->first_stat_mbox;
2801 myrb_handle_cmdblk(cb, cmd_blk);
2803 myrb_handle_scsi(cb, cmd_blk, scmd);
2806 cb->next_stat_mbox = next_stat_mbox;
2807 spin_unlock_irqrestore(&cb->queue_lock, flags);
2811 struct myrb_privdata DAC960_LA_privdata = {
2812 .hw_init = DAC960_LA_hw_init,
2813 .irq_handler = DAC960_LA_intr_handler,
2814 .mmio_size = DAC960_LA_mmio_size,
2818 * DAC960 PG Series Controllers
2820 static inline void DAC960_PG_hw_mbox_new_cmd(void __iomem *base)
2822 writel(DAC960_PG_IDB_HWMBOX_NEW_CMD, base + DAC960_PG_IDB_OFFSET);
2825 static inline void DAC960_PG_ack_hw_mbox_status(void __iomem *base)
2827 writel(DAC960_PG_IDB_HWMBOX_ACK_STS, base + DAC960_PG_IDB_OFFSET);
2830 static inline void DAC960_PG_gen_intr(void __iomem *base)
2832 writel(DAC960_PG_IDB_GEN_IRQ, base + DAC960_PG_IDB_OFFSET);
2835 static inline void DAC960_PG_reset_ctrl(void __iomem *base)
2837 writel(DAC960_PG_IDB_CTRL_RESET, base + DAC960_PG_IDB_OFFSET);
2840 static inline void DAC960_PG_mem_mbox_new_cmd(void __iomem *base)
2842 writel(DAC960_PG_IDB_MMBOX_NEW_CMD, base + DAC960_PG_IDB_OFFSET);
2845 static inline bool DAC960_PG_hw_mbox_is_full(void __iomem *base)
2847 unsigned char idb = readl(base + DAC960_PG_IDB_OFFSET);
2849 return idb & DAC960_PG_IDB_HWMBOX_FULL;
2852 static inline bool DAC960_PG_init_in_progress(void __iomem *base)
2854 unsigned char idb = readl(base + DAC960_PG_IDB_OFFSET);
2856 return idb & DAC960_PG_IDB_INIT_IN_PROGRESS;
2859 static inline void DAC960_PG_ack_hw_mbox_intr(void __iomem *base)
2861 writel(DAC960_PG_ODB_HWMBOX_ACK_IRQ, base + DAC960_PG_ODB_OFFSET);
2864 static inline void DAC960_PG_ack_mem_mbox_intr(void __iomem *base)
2866 writel(DAC960_PG_ODB_MMBOX_ACK_IRQ, base + DAC960_PG_ODB_OFFSET);
2869 static inline void DAC960_PG_ack_intr(void __iomem *base)
2871 writel(DAC960_PG_ODB_HWMBOX_ACK_IRQ | DAC960_PG_ODB_MMBOX_ACK_IRQ,
2872 base + DAC960_PG_ODB_OFFSET);
2875 static inline bool DAC960_PG_hw_mbox_status_available(void __iomem *base)
2877 unsigned char odb = readl(base + DAC960_PG_ODB_OFFSET);
2879 return odb & DAC960_PG_ODB_HWMBOX_STS_AVAIL;
2882 static inline bool DAC960_PG_mem_mbox_status_available(void __iomem *base)
2884 unsigned char odb = readl(base + DAC960_PG_ODB_OFFSET);
2886 return odb & DAC960_PG_ODB_MMBOX_STS_AVAIL;
2889 static inline void DAC960_PG_enable_intr(void __iomem *base)
2891 unsigned int imask = (unsigned int)-1;
2893 imask &= ~DAC960_PG_IRQMASK_DISABLE_IRQ;
2894 writel(imask, base + DAC960_PG_IRQMASK_OFFSET);
2897 static inline void DAC960_PG_disable_intr(void __iomem *base)
2899 unsigned int imask = (unsigned int)-1;
2901 writel(imask, base + DAC960_PG_IRQMASK_OFFSET);
2904 static inline bool DAC960_PG_intr_enabled(void __iomem *base)
2906 unsigned int imask = readl(base + DAC960_PG_IRQMASK_OFFSET);
2908 return !(imask & DAC960_PG_IRQMASK_DISABLE_IRQ);
2911 static inline void DAC960_PG_write_cmd_mbox(union myrb_cmd_mbox *mem_mbox,
2912 union myrb_cmd_mbox *mbox)
2914 mem_mbox->words[1] = mbox->words[1];
2915 mem_mbox->words[2] = mbox->words[2];
2916 mem_mbox->words[3] = mbox->words[3];
2917 /* Memory barrier to prevent reordering */
2919 mem_mbox->words[0] = mbox->words[0];
2920 /* Memory barrier to force PCI access */
2924 static inline void DAC960_PG_write_hw_mbox(void __iomem *base,
2925 union myrb_cmd_mbox *mbox)
2927 writel(mbox->words[0], base + DAC960_PG_CMDOP_OFFSET);
2928 writel(mbox->words[1], base + DAC960_PG_MBOX4_OFFSET);
2929 writel(mbox->words[2], base + DAC960_PG_MBOX8_OFFSET);
2930 writeb(mbox->bytes[12], base + DAC960_PG_MBOX12_OFFSET);
2933 static inline unsigned char
2934 DAC960_PG_read_status_cmd_ident(void __iomem *base)
2936 return readb(base + DAC960_PG_STSID_OFFSET);
2939 static inline unsigned short
2940 DAC960_PG_read_status(void __iomem *base)
2942 return readw(base + DAC960_PG_STS_OFFSET);
2946 DAC960_PG_read_error_status(void __iomem *base, unsigned char *error,
2947 unsigned char *param0, unsigned char *param1)
2949 unsigned char errsts = readb(base + DAC960_PG_ERRSTS_OFFSET);
2951 if (!(errsts & DAC960_PG_ERRSTS_PENDING))
2953 errsts &= ~DAC960_PG_ERRSTS_PENDING;
2955 *param0 = readb(base + DAC960_PG_CMDOP_OFFSET);
2956 *param1 = readb(base + DAC960_PG_CMDID_OFFSET);
2957 writeb(0, base + DAC960_PG_ERRSTS_OFFSET);
2961 static inline unsigned short
2962 DAC960_PG_mbox_init(struct pci_dev *pdev, void __iomem *base,
2963 union myrb_cmd_mbox *mbox)
2965 unsigned short status;
2968 while (timeout < MYRB_MAILBOX_TIMEOUT) {
2969 if (!DAC960_PG_hw_mbox_is_full(base))
2974 if (DAC960_PG_hw_mbox_is_full(base)) {
2976 "Timeout waiting for empty mailbox\n");
2977 return MYRB_STATUS_SUBSYS_TIMEOUT;
2979 DAC960_PG_write_hw_mbox(base, mbox);
2980 DAC960_PG_hw_mbox_new_cmd(base);
2983 while (timeout < MYRB_MAILBOX_TIMEOUT) {
2984 if (DAC960_PG_hw_mbox_status_available(base))
2989 if (!DAC960_PG_hw_mbox_status_available(base)) {
2991 "Timeout waiting for mailbox status\n");
2992 return MYRB_STATUS_SUBSYS_TIMEOUT;
2994 status = DAC960_PG_read_status(base);
2995 DAC960_PG_ack_hw_mbox_intr(base);
2996 DAC960_PG_ack_hw_mbox_status(base);
3001 static int DAC960_PG_hw_init(struct pci_dev *pdev,
3002 struct myrb_hba *cb, void __iomem *base)
3005 unsigned char error, parm0, parm1;
3007 DAC960_PG_disable_intr(base);
3008 DAC960_PG_ack_hw_mbox_status(base);
3010 while (DAC960_PG_init_in_progress(base) &&
3011 timeout < MYRB_MAILBOX_TIMEOUT) {
3012 if (DAC960_PG_read_error_status(base, &error,
3014 myrb_err_status(cb, error, parm0, parm1))
3019 if (timeout == MYRB_MAILBOX_TIMEOUT) {
3021 "Timeout waiting for Controller Initialisation\n");
3024 if (!myrb_enable_mmio(cb, DAC960_PG_mbox_init)) {
3026 "Unable to Enable Memory Mailbox Interface\n");
3027 DAC960_PG_reset_ctrl(base);
3030 DAC960_PG_enable_intr(base);
3031 cb->qcmd = myrb_qcmd;
3032 cb->write_cmd_mbox = DAC960_PG_write_cmd_mbox;
3033 if (cb->dual_mode_interface)
3034 cb->get_cmd_mbox = DAC960_PG_mem_mbox_new_cmd;
3036 cb->get_cmd_mbox = DAC960_PG_hw_mbox_new_cmd;
3037 cb->disable_intr = DAC960_PG_disable_intr;
3038 cb->reset = DAC960_PG_reset_ctrl;
3043 static irqreturn_t DAC960_PG_intr_handler(int irq, void *arg)
3045 struct myrb_hba *cb = arg;
3046 void __iomem *base = cb->io_base;
3047 struct myrb_stat_mbox *next_stat_mbox;
3048 unsigned long flags;
3050 spin_lock_irqsave(&cb->queue_lock, flags);
3051 DAC960_PG_ack_intr(base);
3052 next_stat_mbox = cb->next_stat_mbox;
3053 while (next_stat_mbox->valid) {
3054 unsigned char id = next_stat_mbox->id;
3055 struct scsi_cmnd *scmd = NULL;
3056 struct myrb_cmdblk *cmd_blk = NULL;
3058 if (id == MYRB_DCMD_TAG)
3059 cmd_blk = &cb->dcmd_blk;
3060 else if (id == MYRB_MCMD_TAG)
3061 cmd_blk = &cb->mcmd_blk;
3063 scmd = scsi_host_find_tag(cb->host, id - 3);
3065 cmd_blk = scsi_cmd_priv(scmd);
3068 cmd_blk->status = next_stat_mbox->status;
3070 dev_err(&cb->pdev->dev,
3071 "Unhandled command completion %d\n", id);
3073 memset(next_stat_mbox, 0, sizeof(struct myrb_stat_mbox));
3074 if (++next_stat_mbox > cb->last_stat_mbox)
3075 next_stat_mbox = cb->first_stat_mbox;
3078 myrb_handle_cmdblk(cb, cmd_blk);
3080 myrb_handle_scsi(cb, cmd_blk, scmd);
3082 cb->next_stat_mbox = next_stat_mbox;
3083 spin_unlock_irqrestore(&cb->queue_lock, flags);
3087 struct myrb_privdata DAC960_PG_privdata = {
3088 .hw_init = DAC960_PG_hw_init,
3089 .irq_handler = DAC960_PG_intr_handler,
3090 .mmio_size = DAC960_PG_mmio_size,
3095 * DAC960 PD Series Controllers
3098 static inline void DAC960_PD_hw_mbox_new_cmd(void __iomem *base)
3100 writeb(DAC960_PD_IDB_HWMBOX_NEW_CMD, base + DAC960_PD_IDB_OFFSET);
3103 static inline void DAC960_PD_ack_hw_mbox_status(void __iomem *base)
3105 writeb(DAC960_PD_IDB_HWMBOX_ACK_STS, base + DAC960_PD_IDB_OFFSET);
3108 static inline void DAC960_PD_gen_intr(void __iomem *base)
3110 writeb(DAC960_PD_IDB_GEN_IRQ, base + DAC960_PD_IDB_OFFSET);
3113 static inline void DAC960_PD_reset_ctrl(void __iomem *base)
3115 writeb(DAC960_PD_IDB_CTRL_RESET, base + DAC960_PD_IDB_OFFSET);
3118 static inline bool DAC960_PD_hw_mbox_is_full(void __iomem *base)
3120 unsigned char idb = readb(base + DAC960_PD_IDB_OFFSET);
3122 return idb & DAC960_PD_IDB_HWMBOX_FULL;
3125 static inline bool DAC960_PD_init_in_progress(void __iomem *base)
3127 unsigned char idb = readb(base + DAC960_PD_IDB_OFFSET);
3129 return idb & DAC960_PD_IDB_INIT_IN_PROGRESS;
3132 static inline void DAC960_PD_ack_intr(void __iomem *base)
3134 writeb(DAC960_PD_ODB_HWMBOX_ACK_IRQ, base + DAC960_PD_ODB_OFFSET);
3137 static inline bool DAC960_PD_hw_mbox_status_available(void __iomem *base)
3139 unsigned char odb = readb(base + DAC960_PD_ODB_OFFSET);
3141 return odb & DAC960_PD_ODB_HWMBOX_STS_AVAIL;
3144 static inline void DAC960_PD_enable_intr(void __iomem *base)
3146 writeb(DAC960_PD_IRQMASK_ENABLE_IRQ, base + DAC960_PD_IRQEN_OFFSET);
3149 static inline void DAC960_PD_disable_intr(void __iomem *base)
3151 writeb(0, base + DAC960_PD_IRQEN_OFFSET);
3154 static inline bool DAC960_PD_intr_enabled(void __iomem *base)
3156 unsigned char imask = readb(base + DAC960_PD_IRQEN_OFFSET);
3158 return imask & DAC960_PD_IRQMASK_ENABLE_IRQ;
3161 static inline void DAC960_PD_write_cmd_mbox(void __iomem *base,
3162 union myrb_cmd_mbox *mbox)
3164 writel(mbox->words[0], base + DAC960_PD_CMDOP_OFFSET);
3165 writel(mbox->words[1], base + DAC960_PD_MBOX4_OFFSET);
3166 writel(mbox->words[2], base + DAC960_PD_MBOX8_OFFSET);
3167 writeb(mbox->bytes[12], base + DAC960_PD_MBOX12_OFFSET);
3170 static inline unsigned char
3171 DAC960_PD_read_status_cmd_ident(void __iomem *base)
3173 return readb(base + DAC960_PD_STSID_OFFSET);
3176 static inline unsigned short
3177 DAC960_PD_read_status(void __iomem *base)
3179 return readw(base + DAC960_PD_STS_OFFSET);
3183 DAC960_PD_read_error_status(void __iomem *base, unsigned char *error,
3184 unsigned char *param0, unsigned char *param1)
3186 unsigned char errsts = readb(base + DAC960_PD_ERRSTS_OFFSET);
3188 if (!(errsts & DAC960_PD_ERRSTS_PENDING))
3190 errsts &= ~DAC960_PD_ERRSTS_PENDING;
3192 *param0 = readb(base + DAC960_PD_CMDOP_OFFSET);
3193 *param1 = readb(base + DAC960_PD_CMDID_OFFSET);
3194 writeb(0, base + DAC960_PD_ERRSTS_OFFSET);
3198 static void DAC960_PD_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
3200 void __iomem *base = cb->io_base;
3201 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3203 while (DAC960_PD_hw_mbox_is_full(base))
3205 DAC960_PD_write_cmd_mbox(base, mbox);
3206 DAC960_PD_hw_mbox_new_cmd(base);
3209 static int DAC960_PD_hw_init(struct pci_dev *pdev,
3210 struct myrb_hba *cb, void __iomem *base)
3213 unsigned char error, parm0, parm1;
3215 if (!request_region(cb->io_addr, 0x80, "myrb")) {
3216 dev_err(&pdev->dev, "IO port 0x%lx busy\n",
3217 (unsigned long)cb->io_addr);
3220 DAC960_PD_disable_intr(base);
3221 DAC960_PD_ack_hw_mbox_status(base);
3223 while (DAC960_PD_init_in_progress(base) &&
3224 timeout < MYRB_MAILBOX_TIMEOUT) {
3225 if (DAC960_PD_read_error_status(base, &error,
3227 myrb_err_status(cb, error, parm0, parm1))
3232 if (timeout == MYRB_MAILBOX_TIMEOUT) {
3234 "Timeout waiting for Controller Initialisation\n");
3237 if (!myrb_enable_mmio(cb, NULL)) {
3239 "Unable to Enable Memory Mailbox Interface\n");
3240 DAC960_PD_reset_ctrl(base);
3243 DAC960_PD_enable_intr(base);
3244 cb->qcmd = DAC960_PD_qcmd;
3245 cb->disable_intr = DAC960_PD_disable_intr;
3246 cb->reset = DAC960_PD_reset_ctrl;
3251 static irqreturn_t DAC960_PD_intr_handler(int irq, void *arg)
3253 struct myrb_hba *cb = arg;
3254 void __iomem *base = cb->io_base;
3255 unsigned long flags;
3257 spin_lock_irqsave(&cb->queue_lock, flags);
3258 while (DAC960_PD_hw_mbox_status_available(base)) {
3259 unsigned char id = DAC960_PD_read_status_cmd_ident(base);
3260 struct scsi_cmnd *scmd = NULL;
3261 struct myrb_cmdblk *cmd_blk = NULL;
3263 if (id == MYRB_DCMD_TAG)
3264 cmd_blk = &cb->dcmd_blk;
3265 else if (id == MYRB_MCMD_TAG)
3266 cmd_blk = &cb->mcmd_blk;
3268 scmd = scsi_host_find_tag(cb->host, id - 3);
3270 cmd_blk = scsi_cmd_priv(scmd);
3273 cmd_blk->status = DAC960_PD_read_status(base);
3275 dev_err(&cb->pdev->dev,
3276 "Unhandled command completion %d\n", id);
3278 DAC960_PD_ack_intr(base);
3279 DAC960_PD_ack_hw_mbox_status(base);
3282 myrb_handle_cmdblk(cb, cmd_blk);
3284 myrb_handle_scsi(cb, cmd_blk, scmd);
3286 spin_unlock_irqrestore(&cb->queue_lock, flags);
3290 struct myrb_privdata DAC960_PD_privdata = {
3291 .hw_init = DAC960_PD_hw_init,
3292 .irq_handler = DAC960_PD_intr_handler,
3293 .mmio_size = DAC960_PD_mmio_size,
3298 * DAC960 P Series Controllers
3300 * Similar to the DAC960 PD Series Controllers, but some commands have
3304 static inline void myrb_translate_enquiry(void *enq)
3306 memcpy(enq + 132, enq + 36, 64);
3307 memset(enq + 36, 0, 96);
3310 static inline void myrb_translate_devstate(void *state)
3312 memcpy(state + 2, state + 3, 1);
3313 memmove(state + 4, state + 5, 2);
3314 memmove(state + 6, state + 8, 4);
3317 static inline void myrb_translate_to_rw_command(struct myrb_cmdblk *cmd_blk)
3319 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3320 int ldev_num = mbox->type5.ld.ldev_num;
3322 mbox->bytes[3] &= 0x7;
3323 mbox->bytes[3] |= mbox->bytes[7] << 6;
3324 mbox->bytes[7] = ldev_num;
3327 static inline void myrb_translate_from_rw_command(struct myrb_cmdblk *cmd_blk)
3329 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3330 int ldev_num = mbox->bytes[7];
3332 mbox->bytes[7] = mbox->bytes[3] >> 6;
3333 mbox->bytes[3] &= 0x7;
3334 mbox->bytes[3] |= ldev_num << 3;
3337 static void DAC960_P_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
3339 void __iomem *base = cb->io_base;
3340 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3342 switch (mbox->common.opcode) {
3343 case MYRB_CMD_ENQUIRY:
3344 mbox->common.opcode = MYRB_CMD_ENQUIRY_OLD;
3346 case MYRB_CMD_GET_DEVICE_STATE:
3347 mbox->common.opcode = MYRB_CMD_GET_DEVICE_STATE_OLD;
3350 mbox->common.opcode = MYRB_CMD_READ_OLD;
3351 myrb_translate_to_rw_command(cmd_blk);
3353 case MYRB_CMD_WRITE:
3354 mbox->common.opcode = MYRB_CMD_WRITE_OLD;
3355 myrb_translate_to_rw_command(cmd_blk);
3357 case MYRB_CMD_READ_SG:
3358 mbox->common.opcode = MYRB_CMD_READ_SG_OLD;
3359 myrb_translate_to_rw_command(cmd_blk);
3361 case MYRB_CMD_WRITE_SG:
3362 mbox->common.opcode = MYRB_CMD_WRITE_SG_OLD;
3363 myrb_translate_to_rw_command(cmd_blk);
3368 while (DAC960_PD_hw_mbox_is_full(base))
3370 DAC960_PD_write_cmd_mbox(base, mbox);
3371 DAC960_PD_hw_mbox_new_cmd(base);
3375 static int DAC960_P_hw_init(struct pci_dev *pdev,
3376 struct myrb_hba *cb, void __iomem *base)
3379 unsigned char error, parm0, parm1;
3381 if (!request_region(cb->io_addr, 0x80, "myrb")) {
3382 dev_err(&pdev->dev, "IO port 0x%lx busy\n",
3383 (unsigned long)cb->io_addr);
3386 DAC960_PD_disable_intr(base);
3387 DAC960_PD_ack_hw_mbox_status(base);
3389 while (DAC960_PD_init_in_progress(base) &&
3390 timeout < MYRB_MAILBOX_TIMEOUT) {
3391 if (DAC960_PD_read_error_status(base, &error,
3393 myrb_err_status(cb, error, parm0, parm1))
3398 if (timeout == MYRB_MAILBOX_TIMEOUT) {
3400 "Timeout waiting for Controller Initialisation\n");
3403 if (!myrb_enable_mmio(cb, NULL)) {
3405 "Unable to allocate DMA mapped memory\n");
3406 DAC960_PD_reset_ctrl(base);
3409 DAC960_PD_enable_intr(base);
3410 cb->qcmd = DAC960_P_qcmd;
3411 cb->disable_intr = DAC960_PD_disable_intr;
3412 cb->reset = DAC960_PD_reset_ctrl;
3417 static irqreturn_t DAC960_P_intr_handler(int irq, void *arg)
3419 struct myrb_hba *cb = arg;
3420 void __iomem *base = cb->io_base;
3421 unsigned long flags;
3423 spin_lock_irqsave(&cb->queue_lock, flags);
3424 while (DAC960_PD_hw_mbox_status_available(base)) {
3425 unsigned char id = DAC960_PD_read_status_cmd_ident(base);
3426 struct scsi_cmnd *scmd = NULL;
3427 struct myrb_cmdblk *cmd_blk = NULL;
3428 union myrb_cmd_mbox *mbox;
3429 enum myrb_cmd_opcode op;
3432 if (id == MYRB_DCMD_TAG)
3433 cmd_blk = &cb->dcmd_blk;
3434 else if (id == MYRB_MCMD_TAG)
3435 cmd_blk = &cb->mcmd_blk;
3437 scmd = scsi_host_find_tag(cb->host, id - 3);
3439 cmd_blk = scsi_cmd_priv(scmd);
3442 cmd_blk->status = DAC960_PD_read_status(base);
3444 dev_err(&cb->pdev->dev,
3445 "Unhandled command completion %d\n", id);
3447 DAC960_PD_ack_intr(base);
3448 DAC960_PD_ack_hw_mbox_status(base);
3453 mbox = &cmd_blk->mbox;
3454 op = mbox->common.opcode;
3456 case MYRB_CMD_ENQUIRY_OLD:
3457 mbox->common.opcode = MYRB_CMD_ENQUIRY;
3458 myrb_translate_enquiry(cb->enquiry);
3460 case MYRB_CMD_READ_OLD:
3461 mbox->common.opcode = MYRB_CMD_READ;
3462 myrb_translate_from_rw_command(cmd_blk);
3464 case MYRB_CMD_WRITE_OLD:
3465 mbox->common.opcode = MYRB_CMD_WRITE;
3466 myrb_translate_from_rw_command(cmd_blk);
3468 case MYRB_CMD_READ_SG_OLD:
3469 mbox->common.opcode = MYRB_CMD_READ_SG;
3470 myrb_translate_from_rw_command(cmd_blk);
3472 case MYRB_CMD_WRITE_SG_OLD:
3473 mbox->common.opcode = MYRB_CMD_WRITE_SG;
3474 myrb_translate_from_rw_command(cmd_blk);
3480 myrb_handle_cmdblk(cb, cmd_blk);
3482 myrb_handle_scsi(cb, cmd_blk, scmd);
3484 spin_unlock_irqrestore(&cb->queue_lock, flags);
3488 struct myrb_privdata DAC960_P_privdata = {
3489 .hw_init = DAC960_P_hw_init,
3490 .irq_handler = DAC960_P_intr_handler,
3491 .mmio_size = DAC960_PD_mmio_size,
3494 static struct myrb_hba *myrb_detect(struct pci_dev *pdev,
3495 const struct pci_device_id *entry)
3497 struct myrb_privdata *privdata =
3498 (struct myrb_privdata *)entry->driver_data;
3499 irq_handler_t irq_handler = privdata->irq_handler;
3500 unsigned int mmio_size = privdata->mmio_size;
3501 struct Scsi_Host *shost;
3502 struct myrb_hba *cb = NULL;
3504 shost = scsi_host_alloc(&myrb_template, sizeof(struct myrb_hba));
3506 dev_err(&pdev->dev, "Unable to allocate Controller\n");
3509 shost->max_cmd_len = 12;
3510 shost->max_lun = 256;
3511 cb = shost_priv(shost);
3512 mutex_init(&cb->dcmd_mutex);
3513 mutex_init(&cb->dma_mutex);
3516 if (pci_enable_device(pdev))
3519 if (privdata->hw_init == DAC960_PD_hw_init ||
3520 privdata->hw_init == DAC960_P_hw_init) {
3521 cb->io_addr = pci_resource_start(pdev, 0);
3522 cb->pci_addr = pci_resource_start(pdev, 1);
3524 cb->pci_addr = pci_resource_start(pdev, 0);
3526 pci_set_drvdata(pdev, cb);
3527 spin_lock_init(&cb->queue_lock);
3528 if (mmio_size < PAGE_SIZE)
3529 mmio_size = PAGE_SIZE;
3530 cb->mmio_base = ioremap_nocache(cb->pci_addr & PAGE_MASK, mmio_size);
3531 if (cb->mmio_base == NULL) {
3533 "Unable to map Controller Register Window\n");
3537 cb->io_base = cb->mmio_base + (cb->pci_addr & ~PAGE_MASK);
3538 if (privdata->hw_init(pdev, cb, cb->io_base))
3541 if (request_irq(pdev->irq, irq_handler, IRQF_SHARED, "myrb", cb) < 0) {
3543 "Unable to acquire IRQ Channel %d\n", pdev->irq);
3546 cb->irq = pdev->irq;
3551 "Failed to initialize Controller\n");
3556 static int myrb_probe(struct pci_dev *dev, const struct pci_device_id *entry)
3558 struct myrb_hba *cb;
3561 cb = myrb_detect(dev, entry);
3565 ret = myrb_get_hba_config(cb);
3571 if (!myrb_create_mempools(dev, cb)) {
3576 ret = scsi_add_host(cb->host, &dev->dev);
3578 dev_err(&dev->dev, "scsi_add_host failed with %d\n", ret);
3579 myrb_destroy_mempools(cb);
3582 scsi_scan_host(cb->host);
3590 static void myrb_remove(struct pci_dev *pdev)
3592 struct myrb_hba *cb = pci_get_drvdata(pdev);
3594 shost_printk(KERN_NOTICE, cb->host, "Flushing Cache...");
3595 myrb_exec_type3(cb, MYRB_CMD_FLUSH, 0);
3597 myrb_destroy_mempools(cb);
3601 static const struct pci_device_id myrb_id_table[] = {
3603 PCI_DEVICE_SUB(PCI_VENDOR_ID_DEC,
3604 PCI_DEVICE_ID_DEC_21285,
3605 PCI_VENDOR_ID_MYLEX,
3606 PCI_DEVICE_ID_MYLEX_DAC960_LA),
3607 .driver_data = (unsigned long) &DAC960_LA_privdata,
3610 PCI_DEVICE_DATA(MYLEX, DAC960_PG, &DAC960_PG_privdata),
3613 PCI_DEVICE_DATA(MYLEX, DAC960_PD, &DAC960_PD_privdata),
3616 PCI_DEVICE_DATA(MYLEX, DAC960_P, &DAC960_P_privdata),
3621 MODULE_DEVICE_TABLE(pci, myrb_id_table);
3623 static struct pci_driver myrb_pci_driver = {
3625 .id_table = myrb_id_table,
3626 .probe = myrb_probe,
3627 .remove = myrb_remove,
3630 static int __init myrb_init_module(void)
3634 myrb_raid_template = raid_class_attach(&myrb_raid_functions);
3635 if (!myrb_raid_template)
3638 ret = pci_register_driver(&myrb_pci_driver);
3640 raid_class_release(myrb_raid_template);
3645 static void __exit myrb_cleanup_module(void)
3647 pci_unregister_driver(&myrb_pci_driver);
3648 raid_class_release(myrb_raid_template);
3651 module_init(myrb_init_module);
3652 module_exit(myrb_cleanup_module);
3654 MODULE_DESCRIPTION("Mylex DAC960/AcceleRAID/eXtremeRAID driver (Block interface)");
3656 MODULE_LICENSE("GPL");