1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* -*- mode: c; c-basic-offset: 8 -*- */
4 /* NCR (or Symbios) 53c700 and 53c700-66 Driver
7 **-----------------------------------------------------------------------------
10 **-----------------------------------------------------------------------------
15 * This driver is designed exclusively for these chips (virtually the
16 * earliest of the scripts engine chips). They need their own drivers
17 * because they are missing so many of the scripts and snazzy register
18 * features of their elder brothers (the 710, 720 and 770).
20 * The 700 is the lowliest of the line, it can only do async SCSI.
21 * The 700-66 can at least do synchronous SCSI up to 10MHz.
23 * The 700 chip has no host bus interface logic of its own. However,
24 * it is usually mapped to a location with well defined register
25 * offsets. Therefore, if you can determine the base address and the
26 * irq your board incorporating this chip uses, you can probably use
27 * this driver to run it (although you'll probably have to write a
28 * minimal wrapper for the purpose---see the NCR_D700 driver for
29 * details about how to do this).
34 * 1. Better statistics in the proc fs
36 * 2. Implement message queue (queues SCSI messages like commands) and make
37 * the abort and device reset functions use them.
44 * Fixed bad bug affecting tag starvation processing (previously the
45 * driver would hang the system if too many tags starved. Also fixed
46 * bad bug having to do with 10 byte command processing and REQUEST
47 * SENSE (the command would loop forever getting a transfer length
48 * mismatch in the CMD phase).
52 * Fixed scripts problem which caused certain devices (notably CDRWs)
53 * to hang on initial INQUIRY. Updated NCR_700_readl/writel to use
54 * __raw_readl/writel for parisc compatibility (Thomas
55 * Bogendoerfer). Added missing SCp->request_bufflen initialisation
56 * for sense requests (Ryan Bradetich).
60 * Following test of the 64 bit parisc kernel by Richard Hirst,
61 * several problems have now been corrected. Also adds support for
62 * consistent memory allocation.
66 * More Compatibility changes for 710 (now actually works). Enhanced
67 * support for odd clock speeds which constrain SDTR negotiations.
68 * correct cacheline separation for scsi messages and status for
69 * incoherent architectures. Use of the pci mapping functions on
70 * buffers to begin support for 64 bit drivers.
74 * Added support for the 53c710 chip (in 53c700 emulation mode only---no
75 * special 53c710 instructions or registers are used).
79 * More endianness/cache coherency changes.
81 * Better bad device handling (handles devices lying about tag
82 * queueing support and devices which fail to provide sense data on
83 * contingent allegiance conditions)
86 * debugging this driver on the parisc architecture and suggesting
87 * many improvements and bug fixes.
89 * Thanks also go to Linuxcare Inc. for providing several PARISC
90 * machines for me to debug the driver on.
94 * Made the driver mem or io mapped; added endian invariance; added
95 * dma cache flushing operations for architectures which need it;
96 * added support for more varied clocking speeds.
100 * Initial modularisation from the D700. See NCR_D700.c for the rest of
103 #define NCR_700_VERSION "2.8"
105 #include <linux/kernel.h>
106 #include <linux/types.h>
107 #include <linux/string.h>
108 #include <linux/slab.h>
109 #include <linux/ioport.h>
110 #include <linux/delay.h>
111 #include <linux/spinlock.h>
112 #include <linux/completion.h>
113 #include <linux/init.h>
114 #include <linux/proc_fs.h>
115 #include <linux/blkdev.h>
116 #include <linux/module.h>
117 #include <linux/interrupt.h>
118 #include <linux/device.h>
121 #include <asm/pgtable.h>
122 #include <asm/byteorder.h>
124 #include <scsi/scsi.h>
125 #include <scsi/scsi_cmnd.h>
126 #include <scsi/scsi_dbg.h>
127 #include <scsi/scsi_eh.h>
128 #include <scsi/scsi_host.h>
129 #include <scsi/scsi_tcq.h>
130 #include <scsi/scsi_transport.h>
131 #include <scsi/scsi_transport_spi.h>
135 /* NOTE: For 64 bit drivers there are points in the code where we use
136 * a non dereferenceable pointer to point to a structure in dma-able
137 * memory (which is 32 bits) so that we can use all of the structure
138 * operations but take the address at the end. This macro allows us
139 * to truncate the 64 bit pointer down to 32 bits without the compiler
141 #define to32bit(x) ((__u32)((unsigned long)(x)))
146 #define STATIC static
149 MODULE_AUTHOR("James Bottomley");
150 MODULE_DESCRIPTION("53c700 and 53c700-66 Driver");
151 MODULE_LICENSE("GPL");
153 /* This is the script */
154 #include "53c700_d.h"
157 STATIC int NCR_700_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *);
158 STATIC int NCR_700_abort(struct scsi_cmnd * SCpnt);
159 STATIC int NCR_700_host_reset(struct scsi_cmnd * SCpnt);
160 STATIC void NCR_700_chip_setup(struct Scsi_Host *host);
161 STATIC void NCR_700_chip_reset(struct Scsi_Host *host);
162 STATIC int NCR_700_slave_alloc(struct scsi_device *SDpnt);
163 STATIC int NCR_700_slave_configure(struct scsi_device *SDpnt);
164 STATIC void NCR_700_slave_destroy(struct scsi_device *SDpnt);
165 static int NCR_700_change_queue_depth(struct scsi_device *SDpnt, int depth);
167 STATIC struct device_attribute *NCR_700_dev_attrs[];
169 STATIC struct scsi_transport_template *NCR_700_transport_template = NULL;
171 static char *NCR_700_phase[] = {
174 "before command phase",
175 "after command phase",
176 "after status phase",
177 "after data in phase",
178 "after data out phase",
182 static char *NCR_700_condition[] = {
190 "REJECT_MSG RECEIVED",
191 "DISCONNECT_MSG RECEIVED",
197 static char *NCR_700_fatal_messages[] = {
198 "unexpected message after reselection",
199 "still MSG_OUT after message injection",
200 "not MSG_IN after selection",
201 "Illegal message length received",
204 static char *NCR_700_SBCL_bits[] = {
215 static char *NCR_700_SBCL_to_phase[] = {
226 /* This translates the SDTR message offset and period to a value
227 * which can be loaded into the SXFER_REG.
229 * NOTE: According to SCSI-2, the true transfer period (in ns) is
230 * actually four times this period value */
232 NCR_700_offset_period_to_sxfer(struct NCR_700_Host_Parameters *hostdata,
233 __u8 offset, __u8 period)
237 __u8 min_xferp = (hostdata->chip710
238 ? NCR_710_MIN_XFERP : NCR_700_MIN_XFERP);
239 __u8 max_offset = (hostdata->chip710
240 ? NCR_710_MAX_OFFSET : NCR_700_MAX_OFFSET);
245 if(period < hostdata->min_period) {
246 printk(KERN_WARNING "53c700: Period %dns is less than this chip's minimum, setting to %d\n", period*4, NCR_700_MIN_PERIOD*4);
247 period = hostdata->min_period;
249 XFERP = (period*4 * hostdata->sync_clock)/1000 - 4;
250 if(offset > max_offset) {
251 printk(KERN_WARNING "53c700: Offset %d exceeds chip maximum, setting to %d\n",
255 if(XFERP < min_xferp) {
258 return (offset & 0x0f) | (XFERP & 0x07)<<4;
262 NCR_700_get_SXFER(struct scsi_device *SDp)
264 struct NCR_700_Host_Parameters *hostdata =
265 (struct NCR_700_Host_Parameters *)SDp->host->hostdata[0];
267 return NCR_700_offset_period_to_sxfer(hostdata,
268 spi_offset(SDp->sdev_target),
269 spi_period(SDp->sdev_target));
273 NCR_700_detect(struct scsi_host_template *tpnt,
274 struct NCR_700_Host_Parameters *hostdata, struct device *dev)
276 dma_addr_t pScript, pSlots;
279 struct Scsi_Host *host;
280 static int banner = 0;
283 if(tpnt->sdev_attrs == NULL)
284 tpnt->sdev_attrs = NCR_700_dev_attrs;
286 memory = dma_alloc_attrs(dev, TOTAL_MEM_SIZE, &pScript,
287 GFP_KERNEL, DMA_ATTR_NON_CONSISTENT);
289 printk(KERN_ERR "53c700: Failed to allocate memory for driver, detaching\n");
293 script = (__u32 *)memory;
294 hostdata->msgin = memory + MSGIN_OFFSET;
295 hostdata->msgout = memory + MSGOUT_OFFSET;
296 hostdata->status = memory + STATUS_OFFSET;
297 hostdata->slots = (struct NCR_700_command_slot *)(memory + SLOTS_OFFSET);
300 pSlots = pScript + SLOTS_OFFSET;
302 /* Fill in the missing routines from the host template */
303 tpnt->queuecommand = NCR_700_queuecommand;
304 tpnt->eh_abort_handler = NCR_700_abort;
305 tpnt->eh_host_reset_handler = NCR_700_host_reset;
306 tpnt->can_queue = NCR_700_COMMAND_SLOTS_PER_HOST;
307 tpnt->sg_tablesize = NCR_700_SG_SEGMENTS;
308 tpnt->cmd_per_lun = NCR_700_CMD_PER_LUN;
309 tpnt->slave_configure = NCR_700_slave_configure;
310 tpnt->slave_destroy = NCR_700_slave_destroy;
311 tpnt->slave_alloc = NCR_700_slave_alloc;
312 tpnt->change_queue_depth = NCR_700_change_queue_depth;
314 if(tpnt->name == NULL)
315 tpnt->name = "53c700";
316 if(tpnt->proc_name == NULL)
317 tpnt->proc_name = "53c700";
319 host = scsi_host_alloc(tpnt, 4);
322 memset(hostdata->slots, 0, sizeof(struct NCR_700_command_slot)
323 * NCR_700_COMMAND_SLOTS_PER_HOST);
324 for (j = 0; j < NCR_700_COMMAND_SLOTS_PER_HOST; j++) {
325 dma_addr_t offset = (dma_addr_t)((unsigned long)&hostdata->slots[j].SG[0]
326 - (unsigned long)&hostdata->slots[0].SG[0]);
327 hostdata->slots[j].pSG = (struct NCR_700_SG_List *)((unsigned long)(pSlots + offset));
329 hostdata->free_list = &hostdata->slots[j];
331 hostdata->slots[j-1].ITL_forw = &hostdata->slots[j];
332 hostdata->slots[j].state = NCR_700_SLOT_FREE;
335 for (j = 0; j < ARRAY_SIZE(SCRIPT); j++)
336 script[j] = bS_to_host(SCRIPT[j]);
338 /* adjust all labels to be bus physical */
339 for (j = 0; j < PATCHES; j++)
340 script[LABELPATCHES[j]] = bS_to_host(pScript + SCRIPT[LABELPATCHES[j]]);
341 /* now patch up fixed addresses. */
342 script_patch_32(hostdata->dev, script, MessageLocation,
343 pScript + MSGOUT_OFFSET);
344 script_patch_32(hostdata->dev, script, StatusAddress,
345 pScript + STATUS_OFFSET);
346 script_patch_32(hostdata->dev, script, ReceiveMsgAddress,
347 pScript + MSGIN_OFFSET);
349 hostdata->script = script;
350 hostdata->pScript = pScript;
351 dma_sync_single_for_device(hostdata->dev, pScript, sizeof(SCRIPT), DMA_TO_DEVICE);
352 hostdata->state = NCR_700_HOST_FREE;
353 hostdata->cmd = NULL;
355 host->max_lun = NCR_700_MAX_LUNS;
356 BUG_ON(NCR_700_transport_template == NULL);
357 host->transportt = NCR_700_transport_template;
358 host->unique_id = (unsigned long)hostdata->base;
359 hostdata->eh_complete = NULL;
360 host->hostdata[0] = (unsigned long)hostdata;
362 NCR_700_writeb(0xff, host, CTEST9_REG);
363 if (hostdata->chip710)
364 hostdata->rev = (NCR_700_readb(host, CTEST8_REG)>>4) & 0x0f;
366 hostdata->rev = (NCR_700_readb(host, CTEST7_REG)>>4) & 0x0f;
367 hostdata->fast = (NCR_700_readb(host, CTEST9_REG) == 0);
372 printk(KERN_NOTICE "scsi%d: %s rev %d %s\n", host->host_no,
373 hostdata->chip710 ? "53c710" :
374 (hostdata->fast ? "53c700-66" : "53c700"),
375 hostdata->rev, hostdata->differential ?
376 "(Differential)" : "");
378 NCR_700_chip_reset(host);
380 if (scsi_add_host(host, dev)) {
381 dev_printk(KERN_ERR, dev, "53c700: scsi_add_host failed\n");
386 spi_signalling(host) = hostdata->differential ? SPI_SIGNAL_HVD :
393 NCR_700_release(struct Scsi_Host *host)
395 struct NCR_700_Host_Parameters *hostdata =
396 (struct NCR_700_Host_Parameters *)host->hostdata[0];
398 dma_free_attrs(hostdata->dev, TOTAL_MEM_SIZE, hostdata->script,
399 hostdata->pScript, DMA_ATTR_NON_CONSISTENT);
404 NCR_700_identify(int can_disconnect, __u8 lun)
406 return IDENTIFY_BASE |
407 ((can_disconnect) ? 0x40 : 0) |
408 (lun & NCR_700_LUN_MASK);
412 * Function : static int data_residual (Scsi_Host *host)
414 * Purpose : return residual data count of what's in the chip. If you
415 * really want to know what this function is doing, it's almost a
416 * direct transcription of the algorithm described in the 53c710
417 * guide, except that the DBC and DFIFO registers are only 6 bits
420 * Inputs : host - SCSI host */
422 NCR_700_data_residual (struct Scsi_Host *host) {
423 struct NCR_700_Host_Parameters *hostdata =
424 (struct NCR_700_Host_Parameters *)host->hostdata[0];
425 int count, synchronous = 0;
428 if(hostdata->chip710) {
429 count = ((NCR_700_readb(host, DFIFO_REG) & 0x7f) -
430 (NCR_700_readl(host, DBC_REG) & 0x7f)) & 0x7f;
432 count = ((NCR_700_readb(host, DFIFO_REG) & 0x3f) -
433 (NCR_700_readl(host, DBC_REG) & 0x3f)) & 0x3f;
437 synchronous = NCR_700_readb(host, SXFER_REG) & 0x0f;
439 /* get the data direction */
440 ddir = NCR_700_readb(host, CTEST0_REG) & 0x01;
445 count += (NCR_700_readb(host, SSTAT2_REG) & 0xf0) >> 4;
447 if (NCR_700_readb(host, SSTAT1_REG) & SIDL_REG_FULL)
451 __u8 sstat = NCR_700_readb(host, SSTAT1_REG);
452 if (sstat & SODL_REG_FULL)
454 if (synchronous && (sstat & SODR_REG_FULL))
459 printk("RESIDUAL IS %d (ddir %d)\n", count, ddir);
464 /* print out the SCSI wires and corresponding phase from the SBCL register
467 sbcl_to_string(__u8 sbcl)
470 static char ret[256];
475 strcat(ret, NCR_700_SBCL_bits[i]);
477 strcat(ret, NCR_700_SBCL_to_phase[sbcl & 0x07]);
482 bitmap_to_number(__u8 bitmap)
486 for(i=0; i<8 && !(bitmap &(1<<i)); i++)
491 /* Pull a slot off the free list */
492 STATIC struct NCR_700_command_slot *
493 find_empty_slot(struct NCR_700_Host_Parameters *hostdata)
495 struct NCR_700_command_slot *slot = hostdata->free_list;
499 if(hostdata->command_slot_count != NCR_700_COMMAND_SLOTS_PER_HOST)
500 printk(KERN_ERR "SLOTS FULL, but count is %d, should be %d\n", hostdata->command_slot_count, NCR_700_COMMAND_SLOTS_PER_HOST);
504 if(slot->state != NCR_700_SLOT_FREE)
506 printk(KERN_ERR "BUSY SLOT ON FREE LIST!!!\n");
509 hostdata->free_list = slot->ITL_forw;
510 slot->ITL_forw = NULL;
513 /* NOTE: set the state to busy here, not queued, since this
514 * indicates the slot is in use and cannot be run by the IRQ
515 * finish routine. If we cannot queue the command when it
516 * is properly build, we then change to NCR_700_SLOT_QUEUED */
517 slot->state = NCR_700_SLOT_BUSY;
519 hostdata->command_slot_count++;
525 free_slot(struct NCR_700_command_slot *slot,
526 struct NCR_700_Host_Parameters *hostdata)
528 if((slot->state & NCR_700_SLOT_MASK) != NCR_700_SLOT_MAGIC) {
529 printk(KERN_ERR "53c700: SLOT %p is not MAGIC!!!\n", slot);
531 if(slot->state == NCR_700_SLOT_FREE) {
532 printk(KERN_ERR "53c700: SLOT %p is FREE!!!\n", slot);
535 slot->resume_offset = 0;
537 slot->state = NCR_700_SLOT_FREE;
538 slot->ITL_forw = hostdata->free_list;
539 hostdata->free_list = slot;
540 hostdata->command_slot_count--;
544 /* This routine really does very little. The command is indexed on
545 the ITL and (if tagged) the ITLQ lists in _queuecommand */
547 save_for_reselection(struct NCR_700_Host_Parameters *hostdata,
548 struct scsi_cmnd *SCp, __u32 dsp)
550 /* Its just possible that this gets executed twice */
552 struct NCR_700_command_slot *slot =
553 (struct NCR_700_command_slot *)SCp->host_scribble;
555 slot->resume_offset = dsp;
557 hostdata->state = NCR_700_HOST_FREE;
558 hostdata->cmd = NULL;
562 NCR_700_unmap(struct NCR_700_Host_Parameters *hostdata, struct scsi_cmnd *SCp,
563 struct NCR_700_command_slot *slot)
565 if(SCp->sc_data_direction != DMA_NONE &&
566 SCp->sc_data_direction != DMA_BIDIRECTIONAL)
571 NCR_700_scsi_done(struct NCR_700_Host_Parameters *hostdata,
572 struct scsi_cmnd *SCp, int result)
574 hostdata->state = NCR_700_HOST_FREE;
575 hostdata->cmd = NULL;
578 struct NCR_700_command_slot *slot =
579 (struct NCR_700_command_slot *)SCp->host_scribble;
581 dma_unmap_single(hostdata->dev, slot->pCmd,
582 MAX_COMMAND_SIZE, DMA_TO_DEVICE);
583 if (slot->flags == NCR_700_FLAG_AUTOSENSE) {
584 char *cmnd = NCR_700_get_sense_cmnd(SCp->device);
586 dma_unmap_single(hostdata->dev, slot->dma_handle,
587 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
588 /* restore the old result if the request sense was
592 /* restore the original length */
593 SCp->cmd_len = cmnd[8];
595 NCR_700_unmap(hostdata, SCp, slot);
597 free_slot(slot, hostdata);
599 if(NCR_700_get_depth(SCp->device) == 0 ||
600 NCR_700_get_depth(SCp->device) > SCp->device->queue_depth)
601 printk(KERN_ERR "Invalid depth in NCR_700_scsi_done(): %d\n",
602 NCR_700_get_depth(SCp->device));
603 #endif /* NCR_700_DEBUG */
604 NCR_700_set_depth(SCp->device, NCR_700_get_depth(SCp->device) - 1);
606 SCp->host_scribble = NULL;
607 SCp->result = result;
610 printk(KERN_ERR "53c700: SCSI DONE HAS NULL SCp\n");
616 NCR_700_internal_bus_reset(struct Scsi_Host *host)
619 NCR_700_writeb(ASSERT_RST, host, SCNTL1_REG);
621 NCR_700_writeb(0, host, SCNTL1_REG);
626 NCR_700_chip_setup(struct Scsi_Host *host)
628 struct NCR_700_Host_Parameters *hostdata =
629 (struct NCR_700_Host_Parameters *)host->hostdata[0];
631 __u8 min_xferp = (hostdata->chip710 ? NCR_710_MIN_XFERP : NCR_700_MIN_XFERP);
633 if(hostdata->chip710) {
634 __u8 burst_disable = 0;
635 __u8 burst_length = 0;
637 switch (hostdata->burst_length) {
639 burst_length = BURST_LENGTH_1;
642 burst_length = BURST_LENGTH_2;
645 burst_length = BURST_LENGTH_4;
648 burst_length = BURST_LENGTH_8;
651 burst_disable = BURST_DISABLE;
654 hostdata->dcntl_extra |= COMPAT_700_MODE;
656 NCR_700_writeb(hostdata->dcntl_extra, host, DCNTL_REG);
657 NCR_700_writeb(burst_length | hostdata->dmode_extra,
658 host, DMODE_710_REG);
659 NCR_700_writeb(burst_disable | hostdata->ctest7_extra |
660 (hostdata->differential ? DIFF : 0),
662 NCR_700_writeb(BTB_TIMER_DISABLE, host, CTEST0_REG);
663 NCR_700_writeb(FULL_ARBITRATION | ENABLE_PARITY | PARITY
664 | AUTO_ATN, host, SCNTL0_REG);
666 NCR_700_writeb(BURST_LENGTH_8 | hostdata->dmode_extra,
667 host, DMODE_700_REG);
668 NCR_700_writeb(hostdata->differential ?
669 DIFF : 0, host, CTEST7_REG);
671 /* this is for 700-66, does nothing on 700 */
672 NCR_700_writeb(LAST_DIS_ENBL | ENABLE_ACTIVE_NEGATION
673 | GENERATE_RECEIVE_PARITY, host,
676 NCR_700_writeb(FULL_ARBITRATION | ENABLE_PARITY
677 | PARITY | AUTO_ATN, host, SCNTL0_REG);
681 NCR_700_writeb(1 << host->this_id, host, SCID_REG);
682 NCR_700_writeb(0, host, SBCL_REG);
683 NCR_700_writeb(ASYNC_OPERATION, host, SXFER_REG);
685 NCR_700_writeb(PHASE_MM_INT | SEL_TIMEOUT_INT | GROSS_ERR_INT | UX_DISC_INT
686 | RST_INT | PAR_ERR_INT | SELECT_INT, host, SIEN_REG);
688 NCR_700_writeb(ABORT_INT | INT_INST_INT | ILGL_INST_INT, host, DIEN_REG);
689 NCR_700_writeb(ENABLE_SELECT, host, SCNTL1_REG);
690 if(hostdata->clock > 75) {
691 printk(KERN_ERR "53c700: Clock speed %dMHz is too high: 75Mhz is the maximum this chip can be driven at\n", hostdata->clock);
692 /* do the best we can, but the async clock will be out
693 * of spec: sync divider 2, async divider 3 */
694 DEBUG(("53c700: sync 2 async 3\n"));
695 NCR_700_writeb(SYNC_DIV_2_0, host, SBCL_REG);
696 NCR_700_writeb(ASYNC_DIV_3_0 | hostdata->dcntl_extra, host, DCNTL_REG);
697 hostdata->sync_clock = hostdata->clock/2;
698 } else if(hostdata->clock > 50 && hostdata->clock <= 75) {
699 /* sync divider 1.5, async divider 3 */
700 DEBUG(("53c700: sync 1.5 async 3\n"));
701 NCR_700_writeb(SYNC_DIV_1_5, host, SBCL_REG);
702 NCR_700_writeb(ASYNC_DIV_3_0 | hostdata->dcntl_extra, host, DCNTL_REG);
703 hostdata->sync_clock = hostdata->clock*2;
704 hostdata->sync_clock /= 3;
706 } else if(hostdata->clock > 37 && hostdata->clock <= 50) {
707 /* sync divider 1, async divider 2 */
708 DEBUG(("53c700: sync 1 async 2\n"));
709 NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
710 NCR_700_writeb(ASYNC_DIV_2_0 | hostdata->dcntl_extra, host, DCNTL_REG);
711 hostdata->sync_clock = hostdata->clock;
712 } else if(hostdata->clock > 25 && hostdata->clock <=37) {
713 /* sync divider 1, async divider 1.5 */
714 DEBUG(("53c700: sync 1 async 1.5\n"));
715 NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
716 NCR_700_writeb(ASYNC_DIV_1_5 | hostdata->dcntl_extra, host, DCNTL_REG);
717 hostdata->sync_clock = hostdata->clock;
719 DEBUG(("53c700: sync 1 async 1\n"));
720 NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
721 NCR_700_writeb(ASYNC_DIV_1_0 | hostdata->dcntl_extra, host, DCNTL_REG);
722 /* sync divider 1, async divider 1 */
723 hostdata->sync_clock = hostdata->clock;
725 /* Calculate the actual minimum period that can be supported
726 * by our synchronous clock speed. See the 710 manual for
727 * exact details of this calculation which is based on a
728 * setting of the SXFER register */
729 min_period = 1000*(4+min_xferp)/(4*hostdata->sync_clock);
730 hostdata->min_period = NCR_700_MIN_PERIOD;
731 if(min_period > NCR_700_MIN_PERIOD)
732 hostdata->min_period = min_period;
736 NCR_700_chip_reset(struct Scsi_Host *host)
738 struct NCR_700_Host_Parameters *hostdata =
739 (struct NCR_700_Host_Parameters *)host->hostdata[0];
740 if(hostdata->chip710) {
741 NCR_700_writeb(SOFTWARE_RESET_710, host, ISTAT_REG);
744 NCR_700_writeb(0, host, ISTAT_REG);
746 NCR_700_writeb(SOFTWARE_RESET, host, DCNTL_REG);
749 NCR_700_writeb(0, host, DCNTL_REG);
754 NCR_700_chip_setup(host);
757 /* The heart of the message processing engine is that the instruction
758 * immediately after the INT is the normal case (and so must be CLEAR
759 * ACK). If we want to do something else, we call that routine in
760 * scripts and set temp to be the normal case + 8 (skipping the CLEAR
761 * ACK) so that the routine returns correctly to resume its activity
764 process_extended_message(struct Scsi_Host *host,
765 struct NCR_700_Host_Parameters *hostdata,
766 struct scsi_cmnd *SCp, __u32 dsp, __u32 dsps)
768 __u32 resume_offset = dsp, temp = dsp + 8;
769 __u8 pun = 0xff, lun = 0xff;
772 pun = SCp->device->id;
773 lun = SCp->device->lun;
776 switch(hostdata->msgin[2]) {
778 if(SCp != NULL && NCR_700_is_flag_set(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION)) {
779 struct scsi_target *starget = SCp->device->sdev_target;
780 __u8 period = hostdata->msgin[3];
781 __u8 offset = hostdata->msgin[4];
783 if(offset == 0 || period == 0) {
788 spi_offset(starget) = offset;
789 spi_period(starget) = period;
791 if(NCR_700_is_flag_set(SCp->device, NCR_700_DEV_PRINT_SYNC_NEGOTIATION)) {
792 spi_display_xfer_agreement(starget);
793 NCR_700_clear_flag(SCp->device, NCR_700_DEV_PRINT_SYNC_NEGOTIATION);
796 NCR_700_set_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC);
797 NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
799 NCR_700_writeb(NCR_700_get_SXFER(SCp->device),
803 /* SDTR message out of the blue, reject it */
804 shost_printk(KERN_WARNING, host,
805 "Unexpected SDTR msg\n");
806 hostdata->msgout[0] = A_REJECT_MSG;
807 dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
808 script_patch_16(hostdata->dev, hostdata->script,
810 /* SendMsgOut returns, so set up the return
812 resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
817 printk(KERN_INFO "scsi%d: (%d:%d), Unsolicited WDTR after CMD, Rejecting\n",
818 host->host_no, pun, lun);
819 hostdata->msgout[0] = A_REJECT_MSG;
820 dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
821 script_patch_16(hostdata->dev, hostdata->script, MessageCount,
823 resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
828 printk(KERN_INFO "scsi%d (%d:%d): Unexpected message %s: ",
829 host->host_no, pun, lun,
830 NCR_700_phase[(dsps & 0xf00) >> 8]);
831 spi_print_msg(hostdata->msgin);
834 hostdata->msgout[0] = A_REJECT_MSG;
835 dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
836 script_patch_16(hostdata->dev, hostdata->script, MessageCount,
838 /* SendMsgOut returns, so set up the return
840 resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
842 NCR_700_writel(temp, host, TEMP_REG);
843 return resume_offset;
847 process_message(struct Scsi_Host *host, struct NCR_700_Host_Parameters *hostdata,
848 struct scsi_cmnd *SCp, __u32 dsp, __u32 dsps)
850 /* work out where to return to */
851 __u32 temp = dsp + 8, resume_offset = dsp;
852 __u8 pun = 0xff, lun = 0xff;
855 pun = SCp->device->id;
856 lun = SCp->device->lun;
860 printk("scsi%d (%d:%d): message %s: ", host->host_no, pun, lun,
861 NCR_700_phase[(dsps & 0xf00) >> 8]);
862 spi_print_msg(hostdata->msgin);
866 switch(hostdata->msgin[0]) {
869 resume_offset = process_extended_message(host, hostdata, SCp,
874 if(SCp != NULL && NCR_700_is_flag_set(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION)) {
875 /* Rejected our sync negotiation attempt */
876 spi_period(SCp->device->sdev_target) =
877 spi_offset(SCp->device->sdev_target) = 0;
878 NCR_700_set_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC);
879 NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
880 } else if(SCp != NULL && NCR_700_get_tag_neg_state(SCp->device) == NCR_700_DURING_TAG_NEGOTIATION) {
881 /* rejected our first simple tag message */
882 scmd_printk(KERN_WARNING, SCp,
883 "Rejected first tag queue attempt, turning off tag queueing\n");
884 /* we're done negotiating */
885 NCR_700_set_tag_neg_state(SCp->device, NCR_700_FINISHED_TAG_NEGOTIATION);
886 hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
888 SCp->device->tagged_supported = 0;
889 SCp->device->simple_tags = 0;
890 scsi_change_queue_depth(SCp->device, host->cmd_per_lun);
892 shost_printk(KERN_WARNING, host,
893 "(%d:%d) Unexpected REJECT Message %s\n",
895 NCR_700_phase[(dsps & 0xf00) >> 8]);
896 /* however, just ignore it */
900 case A_PARITY_ERROR_MSG:
901 printk(KERN_ERR "scsi%d (%d:%d) Parity Error!\n", host->host_no,
903 NCR_700_internal_bus_reset(host);
905 case A_SIMPLE_TAG_MSG:
906 printk(KERN_INFO "scsi%d (%d:%d) SIMPLE TAG %d %s\n", host->host_no,
907 pun, lun, hostdata->msgin[1],
908 NCR_700_phase[(dsps & 0xf00) >> 8]);
912 printk(KERN_INFO "scsi%d (%d:%d): Unexpected message %s: ",
913 host->host_no, pun, lun,
914 NCR_700_phase[(dsps & 0xf00) >> 8]);
916 spi_print_msg(hostdata->msgin);
919 hostdata->msgout[0] = A_REJECT_MSG;
920 dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
921 script_patch_16(hostdata->dev, hostdata->script, MessageCount,
923 /* SendMsgOut returns, so set up the return
925 resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
929 NCR_700_writel(temp, host, TEMP_REG);
930 /* set us up to receive another message */
931 dma_cache_sync(hostdata->dev, hostdata->msgin, MSG_ARRAY_SIZE, DMA_FROM_DEVICE);
932 return resume_offset;
936 process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
937 struct Scsi_Host *host,
938 struct NCR_700_Host_Parameters *hostdata)
940 __u32 resume_offset = 0;
941 __u8 pun = 0xff, lun=0xff;
944 pun = SCp->device->id;
945 lun = SCp->device->lun;
948 if(dsps == A_GOOD_STATUS_AFTER_STATUS) {
949 DEBUG((" COMMAND COMPLETE, status=%02x\n",
950 hostdata->status[0]));
951 /* OK, if TCQ still under negotiation, we now know it works */
952 if (NCR_700_get_tag_neg_state(SCp->device) == NCR_700_DURING_TAG_NEGOTIATION)
953 NCR_700_set_tag_neg_state(SCp->device,
954 NCR_700_FINISHED_TAG_NEGOTIATION);
956 /* check for contingent allegiance contitions */
957 if(status_byte(hostdata->status[0]) == CHECK_CONDITION ||
958 status_byte(hostdata->status[0]) == COMMAND_TERMINATED) {
959 struct NCR_700_command_slot *slot =
960 (struct NCR_700_command_slot *)SCp->host_scribble;
961 if(slot->flags == NCR_700_FLAG_AUTOSENSE) {
962 /* OOPS: bad device, returning another
963 * contingent allegiance condition */
964 scmd_printk(KERN_ERR, SCp,
965 "broken device is looping in contingent allegiance: ignoring\n");
966 NCR_700_scsi_done(hostdata, SCp, hostdata->status[0]);
969 NCR_700_get_sense_cmnd(SCp->device);
971 scsi_print_command(SCp);
972 printk(" cmd %p has status %d, requesting sense\n",
973 SCp, hostdata->status[0]);
975 /* we can destroy the command here
976 * because the contingent allegiance
977 * condition will cause a retry which
978 * will re-copy the command from the
979 * saved data_cmnd. We also unmap any
980 * data associated with the command
982 NCR_700_unmap(hostdata, SCp, slot);
983 dma_unmap_single(hostdata->dev, slot->pCmd,
987 cmnd[0] = REQUEST_SENSE;
988 cmnd[1] = (lun & 0x7) << 5;
991 cmnd[4] = SCSI_SENSE_BUFFERSIZE;
993 /* Here's a quiet hack: the
994 * REQUEST_SENSE command is six bytes,
995 * so store a flag indicating that
996 * this was an internal sense request
997 * and the original status at the end
999 cmnd[6] = NCR_700_INTERNAL_SENSE_MAGIC;
1000 cmnd[7] = hostdata->status[0];
1001 cmnd[8] = SCp->cmd_len;
1002 SCp->cmd_len = 6; /* command length for
1004 slot->pCmd = dma_map_single(hostdata->dev, cmnd, MAX_COMMAND_SIZE, DMA_TO_DEVICE);
1005 slot->dma_handle = dma_map_single(hostdata->dev, SCp->sense_buffer, SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
1006 slot->SG[0].ins = bS_to_host(SCRIPT_MOVE_DATA_IN | SCSI_SENSE_BUFFERSIZE);
1007 slot->SG[0].pAddr = bS_to_host(slot->dma_handle);
1008 slot->SG[1].ins = bS_to_host(SCRIPT_RETURN);
1009 slot->SG[1].pAddr = 0;
1010 slot->resume_offset = hostdata->pScript;
1011 dma_cache_sync(hostdata->dev, slot->SG, sizeof(slot->SG[0])*2, DMA_TO_DEVICE);
1012 dma_cache_sync(hostdata->dev, SCp->sense_buffer, SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
1014 /* queue the command for reissue */
1015 slot->state = NCR_700_SLOT_QUEUED;
1016 slot->flags = NCR_700_FLAG_AUTOSENSE;
1017 hostdata->state = NCR_700_HOST_FREE;
1018 hostdata->cmd = NULL;
1021 // Currently rely on the mid layer evaluation
1022 // of the tag queuing capability
1024 //if(status_byte(hostdata->status[0]) == GOOD &&
1025 // SCp->cmnd[0] == INQUIRY && SCp->use_sg == 0) {
1026 // /* Piggy back the tag queueing support
1027 // * on this command */
1028 // dma_sync_single_for_cpu(hostdata->dev,
1029 // slot->dma_handle,
1030 // SCp->request_bufflen,
1031 // DMA_FROM_DEVICE);
1032 // if(((char *)SCp->request_buffer)[7] & 0x02) {
1033 // scmd_printk(KERN_INFO, SCp,
1034 // "Enabling Tag Command Queuing\n");
1035 // hostdata->tag_negotiated |= (1<<scmd_id(SCp));
1036 // NCR_700_set_flag(SCp->device, NCR_700_DEV_BEGIN_TAG_QUEUEING);
1038 // NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_TAG_QUEUEING);
1039 // hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
1042 NCR_700_scsi_done(hostdata, SCp, hostdata->status[0]);
1044 } else if((dsps & 0xfffff0f0) == A_UNEXPECTED_PHASE) {
1045 __u8 i = (dsps & 0xf00) >> 8;
1047 scmd_printk(KERN_ERR, SCp, "UNEXPECTED PHASE %s (%s)\n",
1049 sbcl_to_string(NCR_700_readb(host, SBCL_REG)));
1050 scmd_printk(KERN_ERR, SCp, " len = %d, cmd =",
1052 scsi_print_command(SCp);
1054 NCR_700_internal_bus_reset(host);
1055 } else if((dsps & 0xfffff000) == A_FATAL) {
1056 int i = (dsps & 0xfff);
1058 printk(KERN_ERR "scsi%d: (%d:%d) FATAL ERROR: %s\n",
1059 host->host_no, pun, lun, NCR_700_fatal_messages[i]);
1060 if(dsps == A_FATAL_ILLEGAL_MSG_LENGTH) {
1061 printk(KERN_ERR " msg begins %02x %02x\n",
1062 hostdata->msgin[0], hostdata->msgin[1]);
1064 NCR_700_internal_bus_reset(host);
1065 } else if((dsps & 0xfffff0f0) == A_DISCONNECT) {
1066 #ifdef NCR_700_DEBUG
1067 __u8 i = (dsps & 0xf00) >> 8;
1069 printk("scsi%d: (%d:%d), DISCONNECTED (%d) %s\n",
1070 host->host_no, pun, lun,
1071 i, NCR_700_phase[i]);
1073 save_for_reselection(hostdata, SCp, dsp);
1075 } else if(dsps == A_RESELECTION_IDENTIFIED) {
1077 struct NCR_700_command_slot *slot;
1078 __u8 reselection_id = hostdata->reselection_id;
1079 struct scsi_device *SDp;
1081 lun = hostdata->msgin[0] & 0x1f;
1083 hostdata->reselection_id = 0xff;
1084 DEBUG(("scsi%d: (%d:%d) RESELECTED!\n",
1085 host->host_no, reselection_id, lun));
1086 /* clear the reselection indicator */
1087 SDp = __scsi_device_lookup(host, 0, reselection_id, lun);
1088 if(unlikely(SDp == NULL)) {
1089 printk(KERN_ERR "scsi%d: (%d:%d) HAS NO device\n",
1090 host->host_no, reselection_id, lun);
1093 if(hostdata->msgin[1] == A_SIMPLE_TAG_MSG) {
1094 struct scsi_cmnd *SCp;
1096 SCp = scsi_host_find_tag(SDp->host, hostdata->msgin[2]);
1097 if(unlikely(SCp == NULL)) {
1098 printk(KERN_ERR "scsi%d: (%d:%d) no saved request for tag %d\n",
1099 host->host_no, reselection_id, lun, hostdata->msgin[2]);
1103 slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1104 DDEBUG(KERN_DEBUG, SDp,
1105 "reselection is tag %d, slot %p(%d)\n",
1106 hostdata->msgin[2], slot, slot->tag);
1108 struct NCR_700_Device_Parameters *p = SDp->hostdata;
1109 struct scsi_cmnd *SCp = p->current_cmnd;
1111 if(unlikely(SCp == NULL)) {
1112 sdev_printk(KERN_ERR, SDp,
1113 "no saved request for untagged cmd\n");
1116 slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1120 printk(KERN_ERR "scsi%d: (%d:%d) RESELECTED but no saved command (MSG = %02x %02x %02x)!!\n",
1121 host->host_no, reselection_id, lun,
1122 hostdata->msgin[0], hostdata->msgin[1],
1123 hostdata->msgin[2]);
1125 if(hostdata->state != NCR_700_HOST_BUSY)
1126 printk(KERN_ERR "scsi%d: FATAL, host not busy during valid reselection!\n",
1128 resume_offset = slot->resume_offset;
1129 hostdata->cmd = slot->cmnd;
1131 /* re-patch for this command */
1132 script_patch_32_abs(hostdata->dev, hostdata->script,
1133 CommandAddress, slot->pCmd);
1134 script_patch_16(hostdata->dev, hostdata->script,
1135 CommandCount, slot->cmnd->cmd_len);
1136 script_patch_32_abs(hostdata->dev, hostdata->script,
1137 SGScriptStartAddress,
1138 to32bit(&slot->pSG[0].ins));
1140 /* Note: setting SXFER only works if we're
1141 * still in the MESSAGE phase, so it is vital
1142 * that ACK is still asserted when we process
1143 * the reselection message. The resume offset
1144 * should therefore always clear ACK */
1145 NCR_700_writeb(NCR_700_get_SXFER(hostdata->cmd->device),
1147 dma_cache_sync(hostdata->dev, hostdata->msgin,
1148 MSG_ARRAY_SIZE, DMA_FROM_DEVICE);
1149 dma_cache_sync(hostdata->dev, hostdata->msgout,
1150 MSG_ARRAY_SIZE, DMA_TO_DEVICE);
1151 /* I'm just being paranoid here, the command should
1152 * already have been flushed from the cache */
1153 dma_cache_sync(hostdata->dev, slot->cmnd->cmnd,
1154 slot->cmnd->cmd_len, DMA_TO_DEVICE);
1159 } else if(dsps == A_RESELECTED_DURING_SELECTION) {
1161 /* This section is full of debugging code because I've
1162 * never managed to reach it. I think what happens is
1163 * that, because the 700 runs with selection
1164 * interrupts enabled the whole time that we take a
1165 * selection interrupt before we manage to get to the
1166 * reselected script interrupt */
1168 __u8 reselection_id = NCR_700_readb(host, SFBR_REG);
1169 struct NCR_700_command_slot *slot;
1171 /* Take out our own ID */
1172 reselection_id &= ~(1<<host->this_id);
1174 /* I've never seen this happen, so keep this as a printk rather
1176 printk(KERN_INFO "scsi%d: (%d:%d) RESELECTION DURING SELECTION, dsp=%08x[%04x] state=%d, count=%d\n",
1177 host->host_no, reselection_id, lun, dsp, dsp - hostdata->pScript, hostdata->state, hostdata->command_slot_count);
1180 /* FIXME: DEBUGGING CODE */
1181 __u32 SG = (__u32)bS_to_cpu(hostdata->script[A_SGScriptStartAddress_used[0]]);
1184 for(i=0; i< NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
1185 if(SG >= to32bit(&hostdata->slots[i].pSG[0])
1186 && SG <= to32bit(&hostdata->slots[i].pSG[NCR_700_SG_SEGMENTS]))
1189 printk(KERN_INFO "IDENTIFIED SG segment as being %08x in slot %p, cmd %p, slot->resume_offset=%08x\n", SG, &hostdata->slots[i], hostdata->slots[i].cmnd, hostdata->slots[i].resume_offset);
1190 SCp = hostdata->slots[i].cmnd;
1194 slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1195 /* change slot from busy to queued to redo command */
1196 slot->state = NCR_700_SLOT_QUEUED;
1198 hostdata->cmd = NULL;
1200 if(reselection_id == 0) {
1201 if(hostdata->reselection_id == 0xff) {
1202 printk(KERN_ERR "scsi%d: Invalid reselection during selection!!\n", host->host_no);
1205 printk(KERN_ERR "scsi%d: script reselected and we took a selection interrupt\n",
1207 reselection_id = hostdata->reselection_id;
1211 /* convert to real ID */
1212 reselection_id = bitmap_to_number(reselection_id);
1214 hostdata->reselection_id = reselection_id;
1215 /* just in case we have a stale simple tag message, clear it */
1216 hostdata->msgin[1] = 0;
1217 dma_cache_sync(hostdata->dev, hostdata->msgin,
1218 MSG_ARRAY_SIZE, DMA_BIDIRECTIONAL);
1219 if(hostdata->tag_negotiated & (1<<reselection_id)) {
1220 resume_offset = hostdata->pScript + Ent_GetReselectionWithTag;
1222 resume_offset = hostdata->pScript + Ent_GetReselectionData;
1224 } else if(dsps == A_COMPLETED_SELECTION_AS_TARGET) {
1225 /* we've just disconnected from the bus, do nothing since
1226 * a return here will re-run the queued command slot
1227 * that may have been interrupted by the initial selection */
1228 DEBUG((" SELECTION COMPLETED\n"));
1229 } else if((dsps & 0xfffff0f0) == A_MSG_IN) {
1230 resume_offset = process_message(host, hostdata, SCp,
1232 } else if((dsps & 0xfffff000) == 0) {
1233 __u8 i = (dsps & 0xf0) >> 4, j = (dsps & 0xf00) >> 8;
1234 printk(KERN_ERR "scsi%d: (%d:%d), unhandled script condition %s %s at %04x\n",
1235 host->host_no, pun, lun, NCR_700_condition[i],
1236 NCR_700_phase[j], dsp - hostdata->pScript);
1238 struct scatterlist *sg;
1240 scsi_print_command(SCp);
1241 scsi_for_each_sg(SCp, sg, scsi_sg_count(SCp) + 1, i) {
1242 printk(KERN_INFO " SG[%d].length = %d, move_insn=%08x, addr %08x\n", i, sg->length, ((struct NCR_700_command_slot *)SCp->host_scribble)->SG[i].ins, ((struct NCR_700_command_slot *)SCp->host_scribble)->SG[i].pAddr);
1245 NCR_700_internal_bus_reset(host);
1246 } else if((dsps & 0xfffff000) == A_DEBUG_INTERRUPT) {
1247 printk(KERN_NOTICE "scsi%d (%d:%d) DEBUG INTERRUPT %d AT %08x[%04x], continuing\n",
1248 host->host_no, pun, lun, dsps & 0xfff, dsp, dsp - hostdata->pScript);
1249 resume_offset = dsp;
1251 printk(KERN_ERR "scsi%d: (%d:%d), unidentified script interrupt 0x%x at %04x\n",
1252 host->host_no, pun, lun, dsps, dsp - hostdata->pScript);
1253 NCR_700_internal_bus_reset(host);
1255 return resume_offset;
1258 /* We run the 53c700 with selection interrupts always enabled. This
1259 * means that the chip may be selected as soon as the bus frees. On a
1260 * busy bus, this can be before the scripts engine finishes its
1261 * processing. Therefore, part of the selection processing has to be
1262 * to find out what the scripts engine is doing and complete the
1263 * function if necessary (i.e. process the pending disconnect or save
1264 * the interrupted initial selection */
1266 process_selection(struct Scsi_Host *host, __u32 dsp)
1268 __u8 id = 0; /* Squash compiler warning */
1270 __u32 resume_offset = 0;
1271 struct NCR_700_Host_Parameters *hostdata =
1272 (struct NCR_700_Host_Parameters *)host->hostdata[0];
1273 struct scsi_cmnd *SCp = hostdata->cmd;
1276 for(count = 0; count < 5; count++) {
1277 id = NCR_700_readb(host, hostdata->chip710 ?
1278 CTEST9_REG : SFBR_REG);
1280 /* Take out our own ID */
1281 id &= ~(1<<host->this_id);
1286 sbcl = NCR_700_readb(host, SBCL_REG);
1287 if((sbcl & SBCL_IO) == 0) {
1288 /* mark as having been selected rather than reselected */
1291 /* convert to real ID */
1292 hostdata->reselection_id = id = bitmap_to_number(id);
1293 DEBUG(("scsi%d: Reselected by %d\n",
1294 host->host_no, id));
1296 if(hostdata->state == NCR_700_HOST_BUSY && SCp != NULL) {
1297 struct NCR_700_command_slot *slot =
1298 (struct NCR_700_command_slot *)SCp->host_scribble;
1299 DEBUG((" ID %d WARNING: RESELECTION OF BUSY HOST, saving cmd %p, slot %p, addr %x [%04x], resume %x!\n", id, hostdata->cmd, slot, dsp, dsp - hostdata->pScript, resume_offset));
1301 switch(dsp - hostdata->pScript) {
1302 case Ent_Disconnect1:
1303 case Ent_Disconnect2:
1304 save_for_reselection(hostdata, SCp, Ent_Disconnect2 + hostdata->pScript);
1306 case Ent_Disconnect3:
1307 case Ent_Disconnect4:
1308 save_for_reselection(hostdata, SCp, Ent_Disconnect4 + hostdata->pScript);
1310 case Ent_Disconnect5:
1311 case Ent_Disconnect6:
1312 save_for_reselection(hostdata, SCp, Ent_Disconnect6 + hostdata->pScript);
1314 case Ent_Disconnect7:
1315 case Ent_Disconnect8:
1316 save_for_reselection(hostdata, SCp, Ent_Disconnect8 + hostdata->pScript);
1320 process_script_interrupt(A_GOOD_STATUS_AFTER_STATUS, dsp, SCp, host, hostdata);
1324 slot->state = NCR_700_SLOT_QUEUED;
1328 hostdata->state = NCR_700_HOST_BUSY;
1329 hostdata->cmd = NULL;
1330 /* clear any stale simple tag message */
1331 hostdata->msgin[1] = 0;
1332 dma_cache_sync(hostdata->dev, hostdata->msgin, MSG_ARRAY_SIZE,
1336 /* Selected as target, Ignore */
1337 resume_offset = hostdata->pScript + Ent_SelectedAsTarget;
1338 } else if(hostdata->tag_negotiated & (1<<id)) {
1339 resume_offset = hostdata->pScript + Ent_GetReselectionWithTag;
1341 resume_offset = hostdata->pScript + Ent_GetReselectionData;
1343 return resume_offset;
1347 NCR_700_clear_fifo(struct Scsi_Host *host) {
1348 const struct NCR_700_Host_Parameters *hostdata
1349 = (struct NCR_700_Host_Parameters *)host->hostdata[0];
1350 if(hostdata->chip710) {
1351 NCR_700_writeb(CLR_FIFO_710, host, CTEST8_REG);
1353 NCR_700_writeb(CLR_FIFO, host, DFIFO_REG);
1358 NCR_700_flush_fifo(struct Scsi_Host *host) {
1359 const struct NCR_700_Host_Parameters *hostdata
1360 = (struct NCR_700_Host_Parameters *)host->hostdata[0];
1361 if(hostdata->chip710) {
1362 NCR_700_writeb(FLUSH_DMA_FIFO_710, host, CTEST8_REG);
1364 NCR_700_writeb(0, host, CTEST8_REG);
1366 NCR_700_writeb(FLUSH_DMA_FIFO, host, DFIFO_REG);
1368 NCR_700_writeb(0, host, DFIFO_REG);
1373 /* The queue lock with interrupts disabled must be held on entry to
1376 NCR_700_start_command(struct scsi_cmnd *SCp)
1378 struct NCR_700_command_slot *slot =
1379 (struct NCR_700_command_slot *)SCp->host_scribble;
1380 struct NCR_700_Host_Parameters *hostdata =
1381 (struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
1382 __u16 count = 1; /* for IDENTIFY message */
1383 u8 lun = SCp->device->lun;
1385 if(hostdata->state != NCR_700_HOST_FREE) {
1386 /* keep this inside the lock to close the race window where
1387 * the running command finishes on another CPU while we don't
1388 * change the state to queued on this one */
1389 slot->state = NCR_700_SLOT_QUEUED;
1391 DEBUG(("scsi%d: host busy, queueing command %p, slot %p\n",
1392 SCp->device->host->host_no, slot->cmnd, slot));
1395 hostdata->state = NCR_700_HOST_BUSY;
1396 hostdata->cmd = SCp;
1397 slot->state = NCR_700_SLOT_BUSY;
1398 /* keep interrupts disabled until we have the command correctly
1399 * set up so we cannot take a selection interrupt */
1401 hostdata->msgout[0] = NCR_700_identify((SCp->cmnd[0] != REQUEST_SENSE &&
1402 slot->flags != NCR_700_FLAG_AUTOSENSE),
1404 /* for INQUIRY or REQUEST_SENSE commands, we cannot be sure
1405 * if the negotiated transfer parameters still hold, so
1406 * always renegotiate them */
1407 if(SCp->cmnd[0] == INQUIRY || SCp->cmnd[0] == REQUEST_SENSE ||
1408 slot->flags == NCR_700_FLAG_AUTOSENSE) {
1409 NCR_700_clear_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC);
1412 /* REQUEST_SENSE is asking for contingent I_T_L(_Q) status.
1413 * If a contingent allegiance condition exists, the device
1414 * will refuse all tags, so send the request sense as untagged
1416 if((hostdata->tag_negotiated & (1<<scmd_id(SCp)))
1417 && (slot->tag != SCSI_NO_TAG && SCp->cmnd[0] != REQUEST_SENSE &&
1418 slot->flags != NCR_700_FLAG_AUTOSENSE)) {
1419 count += spi_populate_tag_msg(&hostdata->msgout[count], SCp);
1422 if(hostdata->fast &&
1423 NCR_700_is_flag_clear(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC)) {
1424 count += spi_populate_sync_msg(&hostdata->msgout[count],
1425 spi_period(SCp->device->sdev_target),
1426 spi_offset(SCp->device->sdev_target));
1427 NCR_700_set_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
1430 script_patch_16(hostdata->dev, hostdata->script, MessageCount, count);
1433 script_patch_ID(hostdata->dev, hostdata->script,
1434 Device_ID, 1<<scmd_id(SCp));
1436 script_patch_32_abs(hostdata->dev, hostdata->script, CommandAddress,
1438 script_patch_16(hostdata->dev, hostdata->script, CommandCount,
1440 /* finally plumb the beginning of the SG list into the script
1442 script_patch_32_abs(hostdata->dev, hostdata->script,
1443 SGScriptStartAddress, to32bit(&slot->pSG[0].ins));
1444 NCR_700_clear_fifo(SCp->device->host);
1446 if(slot->resume_offset == 0)
1447 slot->resume_offset = hostdata->pScript;
1448 /* now perform all the writebacks and invalidates */
1449 dma_cache_sync(hostdata->dev, hostdata->msgout, count, DMA_TO_DEVICE);
1450 dma_cache_sync(hostdata->dev, hostdata->msgin, MSG_ARRAY_SIZE,
1452 dma_cache_sync(hostdata->dev, SCp->cmnd, SCp->cmd_len, DMA_TO_DEVICE);
1453 dma_cache_sync(hostdata->dev, hostdata->status, 1, DMA_FROM_DEVICE);
1455 /* set the synchronous period/offset */
1456 NCR_700_writeb(NCR_700_get_SXFER(SCp->device),
1457 SCp->device->host, SXFER_REG);
1458 NCR_700_writel(slot->temp, SCp->device->host, TEMP_REG);
1459 NCR_700_writel(slot->resume_offset, SCp->device->host, DSP_REG);
1465 NCR_700_intr(int irq, void *dev_id)
1467 struct Scsi_Host *host = (struct Scsi_Host *)dev_id;
1468 struct NCR_700_Host_Parameters *hostdata =
1469 (struct NCR_700_Host_Parameters *)host->hostdata[0];
1471 __u32 resume_offset = 0;
1472 __u8 pun = 0xff, lun = 0xff;
1473 unsigned long flags;
1476 /* Use the host lock to serialise access to the 53c700
1477 * hardware. Note: In future, we may need to take the queue
1478 * lock to enter the done routines. When that happens, we
1479 * need to ensure that for this driver, the host lock and the
1480 * queue lock point to the same thing. */
1481 spin_lock_irqsave(host->host_lock, flags);
1482 if((istat = NCR_700_readb(host, ISTAT_REG))
1483 & (SCSI_INT_PENDING | DMA_INT_PENDING)) {
1485 __u8 sstat0 = 0, dstat = 0;
1487 struct scsi_cmnd *SCp = hostdata->cmd;
1488 enum NCR_700_Host_State state;
1491 state = hostdata->state;
1492 SCp = hostdata->cmd;
1494 if(istat & SCSI_INT_PENDING) {
1497 sstat0 = NCR_700_readb(host, SSTAT0_REG);
1500 if(istat & DMA_INT_PENDING) {
1503 dstat = NCR_700_readb(host, DSTAT_REG);
1506 dsps = NCR_700_readl(host, DSPS_REG);
1507 dsp = NCR_700_readl(host, DSP_REG);
1509 DEBUG(("scsi%d: istat %02x sstat0 %02x dstat %02x dsp %04x[%08x] dsps 0x%x\n",
1510 host->host_no, istat, sstat0, dstat,
1511 (dsp - (__u32)(hostdata->pScript))/4,
1515 pun = SCp->device->id;
1516 lun = SCp->device->lun;
1519 if(sstat0 & SCSI_RESET_DETECTED) {
1520 struct scsi_device *SDp;
1523 hostdata->state = NCR_700_HOST_BUSY;
1525 printk(KERN_ERR "scsi%d: Bus Reset detected, executing command %p, slot %p, dsp %08x[%04x]\n",
1526 host->host_no, SCp, SCp == NULL ? NULL : SCp->host_scribble, dsp, dsp - hostdata->pScript);
1528 scsi_report_bus_reset(host, 0);
1530 /* clear all the negotiated parameters */
1531 __shost_for_each_device(SDp, host)
1532 NCR_700_clear_flag(SDp, ~0);
1534 /* clear all the slots and their pending commands */
1535 for(i = 0; i < NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
1536 struct scsi_cmnd *SCp;
1537 struct NCR_700_command_slot *slot =
1538 &hostdata->slots[i];
1540 if(slot->state == NCR_700_SLOT_FREE)
1544 printk(KERN_ERR " failing command because of reset, slot %p, cmnd %p\n",
1546 free_slot(slot, hostdata);
1547 SCp->host_scribble = NULL;
1548 NCR_700_set_depth(SCp->device, 0);
1549 /* NOTE: deadlock potential here: we
1550 * rely on mid-layer guarantees that
1551 * scsi_done won't try to issue the
1552 * command again otherwise we'll
1554 * hostdata->state_lock */
1555 SCp->result = DID_RESET << 16;
1556 SCp->scsi_done(SCp);
1559 NCR_700_chip_setup(host);
1561 hostdata->state = NCR_700_HOST_FREE;
1562 hostdata->cmd = NULL;
1563 /* signal back if this was an eh induced reset */
1564 if(hostdata->eh_complete != NULL)
1565 complete(hostdata->eh_complete);
1567 } else if(sstat0 & SELECTION_TIMEOUT) {
1568 DEBUG(("scsi%d: (%d:%d) selection timeout\n",
1569 host->host_no, pun, lun));
1570 NCR_700_scsi_done(hostdata, SCp, DID_NO_CONNECT<<16);
1571 } else if(sstat0 & PHASE_MISMATCH) {
1572 struct NCR_700_command_slot *slot = (SCp == NULL) ? NULL :
1573 (struct NCR_700_command_slot *)SCp->host_scribble;
1575 if(dsp == Ent_SendMessage + 8 + hostdata->pScript) {
1576 /* It wants to reply to some part of
1578 #ifdef NCR_700_DEBUG
1579 __u32 temp = NCR_700_readl(host, TEMP_REG);
1580 int count = (hostdata->script[Ent_SendMessage/4] & 0xffffff) - ((NCR_700_readl(host, DBC_REG) & 0xffffff) + NCR_700_data_residual(host));
1581 printk("scsi%d (%d:%d) PHASE MISMATCH IN SEND MESSAGE %d remain, return %p[%04x], phase %s\n", host->host_no, pun, lun, count, (void *)temp, temp - hostdata->pScript, sbcl_to_string(NCR_700_readb(host, SBCL_REG)));
1583 resume_offset = hostdata->pScript + Ent_SendMessagePhaseMismatch;
1584 } else if(dsp >= to32bit(&slot->pSG[0].ins) &&
1585 dsp <= to32bit(&slot->pSG[NCR_700_SG_SEGMENTS].ins)) {
1586 int data_transfer = NCR_700_readl(host, DBC_REG) & 0xffffff;
1587 int SGcount = (dsp - to32bit(&slot->pSG[0].ins))/sizeof(struct NCR_700_SG_List);
1588 int residual = NCR_700_data_residual(host);
1590 #ifdef NCR_700_DEBUG
1591 __u32 naddr = NCR_700_readl(host, DNAD_REG);
1593 printk("scsi%d: (%d:%d) Expected phase mismatch in slot->SG[%d], transferred 0x%x\n",
1594 host->host_no, pun, lun,
1595 SGcount, data_transfer);
1596 scsi_print_command(SCp);
1598 printk("scsi%d: (%d:%d) Expected phase mismatch in slot->SG[%d], transferred 0x%x, residual %d\n",
1599 host->host_no, pun, lun,
1600 SGcount, data_transfer, residual);
1603 data_transfer += residual;
1605 if(data_transfer != 0) {
1611 count = (bS_to_cpu(slot->SG[SGcount].ins) & 0x00ffffff);
1612 DEBUG(("DATA TRANSFER MISMATCH, count = %d, transferred %d\n", count, count-data_transfer));
1613 slot->SG[SGcount].ins &= bS_to_host(0xff000000);
1614 slot->SG[SGcount].ins |= bS_to_host(data_transfer);
1615 pAddr = bS_to_cpu(slot->SG[SGcount].pAddr);
1616 pAddr += (count - data_transfer);
1617 #ifdef NCR_700_DEBUG
1618 if(pAddr != naddr) {
1619 printk("scsi%d (%d:%d) transfer mismatch pAddr=%lx, naddr=%lx, data_transfer=%d, residual=%d\n", host->host_no, pun, lun, (unsigned long)pAddr, (unsigned long)naddr, data_transfer, residual);
1622 slot->SG[SGcount].pAddr = bS_to_host(pAddr);
1624 /* set the executed moves to nops */
1625 for(i=0; i<SGcount; i++) {
1626 slot->SG[i].ins = bS_to_host(SCRIPT_NOP);
1627 slot->SG[i].pAddr = 0;
1629 dma_cache_sync(hostdata->dev, slot->SG, sizeof(slot->SG), DMA_TO_DEVICE);
1630 /* and pretend we disconnected after
1631 * the command phase */
1632 resume_offset = hostdata->pScript + Ent_MsgInDuringData;
1633 /* make sure all the data is flushed */
1634 NCR_700_flush_fifo(host);
1636 __u8 sbcl = NCR_700_readb(host, SBCL_REG);
1637 printk(KERN_ERR "scsi%d: (%d:%d) phase mismatch at %04x, phase %s\n",
1638 host->host_no, pun, lun, dsp - hostdata->pScript, sbcl_to_string(sbcl));
1639 NCR_700_internal_bus_reset(host);
1642 } else if(sstat0 & SCSI_GROSS_ERROR) {
1643 printk(KERN_ERR "scsi%d: (%d:%d) GROSS ERROR\n",
1644 host->host_no, pun, lun);
1645 NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1646 } else if(sstat0 & PARITY_ERROR) {
1647 printk(KERN_ERR "scsi%d: (%d:%d) PARITY ERROR\n",
1648 host->host_no, pun, lun);
1649 NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1650 } else if(dstat & SCRIPT_INT_RECEIVED) {
1651 DEBUG(("scsi%d: (%d:%d) ====>SCRIPT INTERRUPT<====\n",
1652 host->host_no, pun, lun));
1653 resume_offset = process_script_interrupt(dsps, dsp, SCp, host, hostdata);
1654 } else if(dstat & (ILGL_INST_DETECTED)) {
1655 printk(KERN_ERR "scsi%d: (%d:%d) Illegal Instruction detected at 0x%08x[0x%x]!!!\n"
1657 host->host_no, pun, lun,
1658 dsp, dsp - hostdata->pScript);
1659 NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1660 } else if(dstat & (WATCH_DOG_INTERRUPT|ABORTED)) {
1661 printk(KERN_ERR "scsi%d: (%d:%d) serious DMA problem, dstat=%02x\n",
1662 host->host_no, pun, lun, dstat);
1663 NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1667 /* NOTE: selection interrupt processing MUST occur
1668 * after script interrupt processing to correctly cope
1669 * with the case where we process a disconnect and
1670 * then get reselected before we process the
1672 if(sstat0 & SELECTED) {
1673 /* FIXME: It currently takes at least FOUR
1674 * interrupts to complete a command that
1675 * disconnects: one for the disconnect, one
1676 * for the reselection, one to get the
1677 * reselection data and one to complete the
1678 * command. If we guess the reselected
1679 * command here and prepare it, we only need
1680 * to get a reselection data interrupt if we
1681 * guessed wrongly. Since the interrupt
1682 * overhead is much greater than the command
1683 * setup, this would be an efficient
1684 * optimisation particularly as we probably
1685 * only have one outstanding command on a
1686 * target most of the time */
1688 resume_offset = process_selection(host, dsp);
1695 if(hostdata->state != NCR_700_HOST_BUSY) {
1696 printk(KERN_ERR "scsi%d: Driver error: resume at 0x%08x [0x%04x] with non busy host!\n",
1697 host->host_no, resume_offset, resume_offset - hostdata->pScript);
1698 hostdata->state = NCR_700_HOST_BUSY;
1701 DEBUG(("Attempting to resume at %x\n", resume_offset));
1702 NCR_700_clear_fifo(host);
1703 NCR_700_writel(resume_offset, host, DSP_REG);
1705 /* There is probably a technical no-no about this: If we're a
1706 * shared interrupt and we got this interrupt because the
1707 * other device needs servicing not us, we're still going to
1708 * check our queued commands here---of course, there shouldn't
1709 * be any outstanding.... */
1710 if(hostdata->state == NCR_700_HOST_FREE) {
1713 for(i = 0; i < NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
1714 /* fairness: always run the queue from the last
1715 * position we left off */
1716 int j = (i + hostdata->saved_slot_position)
1717 % NCR_700_COMMAND_SLOTS_PER_HOST;
1719 if(hostdata->slots[j].state != NCR_700_SLOT_QUEUED)
1721 if(NCR_700_start_command(hostdata->slots[j].cmnd)) {
1722 DEBUG(("scsi%d: Issuing saved command slot %p, cmd %p\t\n",
1723 host->host_no, &hostdata->slots[j],
1724 hostdata->slots[j].cmnd));
1725 hostdata->saved_slot_position = j + 1;
1732 spin_unlock_irqrestore(host->host_lock, flags);
1733 return IRQ_RETVAL(handled);
1737 NCR_700_queuecommand_lck(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *))
1739 struct NCR_700_Host_Parameters *hostdata =
1740 (struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
1742 enum dma_data_direction direction;
1743 struct NCR_700_command_slot *slot;
1745 if(hostdata->command_slot_count >= NCR_700_COMMAND_SLOTS_PER_HOST) {
1746 /* We're over our allocation, this should never happen
1747 * since we report the max allocation to the mid layer */
1748 printk(KERN_WARNING "scsi%d: Command depth has gone over queue depth\n", SCp->device->host->host_no);
1751 /* check for untagged commands. We cannot have any outstanding
1752 * commands if we accept them. Commands could be untagged because:
1754 * - The tag negotiated bitmap is clear
1755 * - The blk layer sent and untagged command
1757 if(NCR_700_get_depth(SCp->device) != 0
1758 && (!(hostdata->tag_negotiated & (1<<scmd_id(SCp)))
1759 || !(SCp->flags & SCMD_TAGGED))) {
1760 CDEBUG(KERN_ERR, SCp, "has non zero depth %d\n",
1761 NCR_700_get_depth(SCp->device));
1762 return SCSI_MLQUEUE_DEVICE_BUSY;
1764 if(NCR_700_get_depth(SCp->device) >= SCp->device->queue_depth) {
1765 CDEBUG(KERN_ERR, SCp, "has max tag depth %d\n",
1766 NCR_700_get_depth(SCp->device));
1767 return SCSI_MLQUEUE_DEVICE_BUSY;
1769 NCR_700_set_depth(SCp->device, NCR_700_get_depth(SCp->device) + 1);
1771 /* begin the command here */
1772 /* no need to check for NULL, test for command_slot_count above
1773 * ensures a slot is free */
1774 slot = find_empty_slot(hostdata);
1778 SCp->scsi_done = done;
1779 SCp->host_scribble = (unsigned char *)slot;
1780 SCp->SCp.ptr = NULL;
1781 SCp->SCp.buffer = NULL;
1783 #ifdef NCR_700_DEBUG
1784 printk("53c700: scsi%d, command ", SCp->device->host->host_no);
1785 scsi_print_command(SCp);
1787 if ((SCp->flags & SCMD_TAGGED)
1788 && (hostdata->tag_negotiated &(1<<scmd_id(SCp))) == 0
1789 && NCR_700_get_tag_neg_state(SCp->device) == NCR_700_START_TAG_NEGOTIATION) {
1790 scmd_printk(KERN_ERR, SCp, "Enabling Tag Command Queuing\n");
1791 hostdata->tag_negotiated |= (1<<scmd_id(SCp));
1792 NCR_700_set_tag_neg_state(SCp->device, NCR_700_DURING_TAG_NEGOTIATION);
1795 /* here we may have to process an untagged command. The gate
1796 * above ensures that this will be the only one outstanding,
1797 * so clear the tag negotiated bit.
1799 * FIXME: This will royally screw up on multiple LUN devices
1801 if (!(SCp->flags & SCMD_TAGGED)
1802 && (hostdata->tag_negotiated &(1<<scmd_id(SCp)))) {
1803 scmd_printk(KERN_INFO, SCp, "Disabling Tag Command Queuing\n");
1804 hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
1807 if ((hostdata->tag_negotiated & (1<<scmd_id(SCp))) &&
1808 SCp->device->simple_tags) {
1809 slot->tag = SCp->request->tag;
1810 CDEBUG(KERN_DEBUG, SCp, "sending out tag %d, slot %p\n",
1813 struct NCR_700_Device_Parameters *p = SCp->device->hostdata;
1815 slot->tag = SCSI_NO_TAG;
1816 /* save current command for reselection */
1817 p->current_cmnd = SCp;
1819 /* sanity check: some of the commands generated by the mid-layer
1820 * have an eccentric idea of their sc_data_direction */
1821 if(!scsi_sg_count(SCp) && !scsi_bufflen(SCp) &&
1822 SCp->sc_data_direction != DMA_NONE) {
1823 #ifdef NCR_700_DEBUG
1824 printk("53c700: Command");
1825 scsi_print_command(SCp);
1826 printk("Has wrong data direction %d\n", SCp->sc_data_direction);
1828 SCp->sc_data_direction = DMA_NONE;
1831 switch (SCp->cmnd[0]) {
1833 /* clear the internal sense magic */
1837 /* OK, get it from the command */
1838 switch(SCp->sc_data_direction) {
1839 case DMA_BIDIRECTIONAL:
1841 printk(KERN_ERR "53c700: Unknown command for data direction ");
1842 scsi_print_command(SCp);
1849 case DMA_FROM_DEVICE:
1850 move_ins = SCRIPT_MOVE_DATA_IN;
1853 move_ins = SCRIPT_MOVE_DATA_OUT;
1858 /* now build the scatter gather list */
1859 direction = SCp->sc_data_direction;
1863 dma_addr_t vPtr = 0;
1864 struct scatterlist *sg;
1867 sg_count = scsi_dma_map(SCp);
1868 BUG_ON(sg_count < 0);
1870 scsi_for_each_sg(SCp, sg, sg_count, i) {
1871 vPtr = sg_dma_address(sg);
1872 count = sg_dma_len(sg);
1874 slot->SG[i].ins = bS_to_host(move_ins | count);
1875 DEBUG((" scatter block %d: move %d[%08x] from 0x%lx\n",
1876 i, count, slot->SG[i].ins, (unsigned long)vPtr));
1877 slot->SG[i].pAddr = bS_to_host(vPtr);
1879 slot->SG[i].ins = bS_to_host(SCRIPT_RETURN);
1880 slot->SG[i].pAddr = 0;
1881 dma_cache_sync(hostdata->dev, slot->SG, sizeof(slot->SG), DMA_TO_DEVICE);
1882 DEBUG((" SETTING %p to %x\n",
1883 (&slot->pSG[i].ins),
1886 slot->resume_offset = 0;
1887 slot->pCmd = dma_map_single(hostdata->dev, SCp->cmnd,
1888 MAX_COMMAND_SIZE, DMA_TO_DEVICE);
1889 NCR_700_start_command(SCp);
1893 STATIC DEF_SCSI_QCMD(NCR_700_queuecommand)
1896 NCR_700_abort(struct scsi_cmnd * SCp)
1898 struct NCR_700_command_slot *slot;
1900 scmd_printk(KERN_INFO, SCp, "abort command\n");
1902 slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1905 /* no outstanding command to abort */
1907 if(SCp->cmnd[0] == TEST_UNIT_READY) {
1908 /* FIXME: This is because of a problem in the new
1909 * error handler. When it is in error recovery, it
1910 * will send a TUR to a device it thinks may still be
1911 * showing a problem. If the TUR isn't responded to,
1912 * it will abort it and mark the device off line.
1913 * Unfortunately, it does no other error recovery, so
1914 * this would leave us with an outstanding command
1915 * occupying a slot. Rather than allow this to
1916 * happen, we issue a bus reset to force all
1917 * outstanding commands to terminate here. */
1918 NCR_700_internal_bus_reset(SCp->device->host);
1919 /* still drop through and return failed */
1926 NCR_700_host_reset(struct scsi_cmnd * SCp)
1928 DECLARE_COMPLETION_ONSTACK(complete);
1929 struct NCR_700_Host_Parameters *hostdata =
1930 (struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
1932 scmd_printk(KERN_INFO, SCp,
1933 "New error handler wants HOST reset, cmd %p\n\t", SCp);
1934 scsi_print_command(SCp);
1936 /* In theory, eh_complete should always be null because the
1937 * eh is single threaded, but just in case we're handling a
1938 * reset via sg or something */
1939 spin_lock_irq(SCp->device->host->host_lock);
1940 while (hostdata->eh_complete != NULL) {
1941 spin_unlock_irq(SCp->device->host->host_lock);
1942 msleep_interruptible(100);
1943 spin_lock_irq(SCp->device->host->host_lock);
1946 hostdata->eh_complete = &complete;
1947 NCR_700_internal_bus_reset(SCp->device->host);
1948 NCR_700_chip_reset(SCp->device->host);
1950 spin_unlock_irq(SCp->device->host->host_lock);
1951 wait_for_completion(&complete);
1952 spin_lock_irq(SCp->device->host->host_lock);
1954 hostdata->eh_complete = NULL;
1955 /* Revalidate the transport parameters of the failing device */
1957 spi_schedule_dv_device(SCp->device);
1959 spin_unlock_irq(SCp->device->host->host_lock);
1964 NCR_700_set_period(struct scsi_target *STp, int period)
1966 struct Scsi_Host *SHp = dev_to_shost(STp->dev.parent);
1967 struct NCR_700_Host_Parameters *hostdata =
1968 (struct NCR_700_Host_Parameters *)SHp->hostdata[0];
1973 if(period < hostdata->min_period)
1974 period = hostdata->min_period;
1976 spi_period(STp) = period;
1977 spi_flags(STp) &= ~(NCR_700_DEV_NEGOTIATED_SYNC |
1978 NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
1979 spi_flags(STp) |= NCR_700_DEV_PRINT_SYNC_NEGOTIATION;
1983 NCR_700_set_offset(struct scsi_target *STp, int offset)
1985 struct Scsi_Host *SHp = dev_to_shost(STp->dev.parent);
1986 struct NCR_700_Host_Parameters *hostdata =
1987 (struct NCR_700_Host_Parameters *)SHp->hostdata[0];
1988 int max_offset = hostdata->chip710
1989 ? NCR_710_MAX_OFFSET : NCR_700_MAX_OFFSET;
1994 if(offset > max_offset)
1995 offset = max_offset;
1997 /* if we're currently async, make sure the period is reasonable */
1998 if(spi_offset(STp) == 0 && (spi_period(STp) < hostdata->min_period ||
1999 spi_period(STp) > 0xff))
2000 spi_period(STp) = hostdata->min_period;
2002 spi_offset(STp) = offset;
2003 spi_flags(STp) &= ~(NCR_700_DEV_NEGOTIATED_SYNC |
2004 NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
2005 spi_flags(STp) |= NCR_700_DEV_PRINT_SYNC_NEGOTIATION;
2009 NCR_700_slave_alloc(struct scsi_device *SDp)
2011 SDp->hostdata = kzalloc(sizeof(struct NCR_700_Device_Parameters),
2021 NCR_700_slave_configure(struct scsi_device *SDp)
2023 struct NCR_700_Host_Parameters *hostdata =
2024 (struct NCR_700_Host_Parameters *)SDp->host->hostdata[0];
2026 /* to do here: allocate memory; build a queue_full list */
2027 if(SDp->tagged_supported) {
2028 scsi_change_queue_depth(SDp, NCR_700_DEFAULT_TAGS);
2029 NCR_700_set_tag_neg_state(SDp, NCR_700_START_TAG_NEGOTIATION);
2032 if(hostdata->fast) {
2033 /* Find the correct offset and period via domain validation */
2034 if (!spi_initial_dv(SDp->sdev_target))
2037 spi_offset(SDp->sdev_target) = 0;
2038 spi_period(SDp->sdev_target) = 0;
2044 NCR_700_slave_destroy(struct scsi_device *SDp)
2046 kfree(SDp->hostdata);
2047 SDp->hostdata = NULL;
2051 NCR_700_change_queue_depth(struct scsi_device *SDp, int depth)
2053 if (depth > NCR_700_MAX_TAGS)
2054 depth = NCR_700_MAX_TAGS;
2055 return scsi_change_queue_depth(SDp, depth);
2059 NCR_700_show_active_tags(struct device *dev, struct device_attribute *attr, char *buf)
2061 struct scsi_device *SDp = to_scsi_device(dev);
2063 return snprintf(buf, 20, "%d\n", NCR_700_get_depth(SDp));
2066 static struct device_attribute NCR_700_active_tags_attr = {
2068 .name = "active_tags",
2071 .show = NCR_700_show_active_tags,
2074 STATIC struct device_attribute *NCR_700_dev_attrs[] = {
2075 &NCR_700_active_tags_attr,
2079 EXPORT_SYMBOL(NCR_700_detect);
2080 EXPORT_SYMBOL(NCR_700_release);
2081 EXPORT_SYMBOL(NCR_700_intr);
2083 static struct spi_function_template NCR_700_transport_functions = {
2084 .set_period = NCR_700_set_period,
2086 .set_offset = NCR_700_set_offset,
2090 static int __init NCR_700_init(void)
2092 NCR_700_transport_template = spi_attach_transport(&NCR_700_transport_functions);
2093 if(!NCR_700_transport_template)
2098 static void __exit NCR_700_exit(void)
2100 spi_release_transport(NCR_700_transport_template);
2103 module_init(NCR_700_init);
2104 module_exit(NCR_700_exit);