Merge branch 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jikos/hid
[deliverable/linux.git] / drivers / scsi / 53c700.c
CommitLineData
1da177e4
LT
1/* -*- mode: c; c-basic-offset: 8 -*- */
2
3/* NCR (or Symbios) 53c700 and 53c700-66 Driver
4 *
5 * Copyright (C) 2001 by James.Bottomley@HansenPartnership.com
6**-----------------------------------------------------------------------------
7**
8** This program is free software; you can redistribute it and/or modify
9** it under the terms of the GNU General Public License as published by
10** the Free Software Foundation; either version 2 of the License, or
11** (at your option) any later version.
12**
13** This program is distributed in the hope that it will be useful,
14** but WITHOUT ANY WARRANTY; without even the implied warranty of
15** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16** GNU General Public License for more details.
17**
18** You should have received a copy of the GNU General Public License
19** along with this program; if not, write to the Free Software
20** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21**
22**-----------------------------------------------------------------------------
23 */
24
25/* Notes:
26 *
27 * This driver is designed exclusively for these chips (virtually the
28 * earliest of the scripts engine chips). They need their own drivers
29 * because they are missing so many of the scripts and snazzy register
30 * features of their elder brothers (the 710, 720 and 770).
31 *
32 * The 700 is the lowliest of the line, it can only do async SCSI.
33 * The 700-66 can at least do synchronous SCSI up to 10MHz.
34 *
35 * The 700 chip has no host bus interface logic of its own. However,
36 * it is usually mapped to a location with well defined register
37 * offsets. Therefore, if you can determine the base address and the
38 * irq your board incorporating this chip uses, you can probably use
39 * this driver to run it (although you'll probably have to write a
40 * minimal wrapper for the purpose---see the NCR_D700 driver for
41 * details about how to do this).
42 *
43 *
44 * TODO List:
45 *
46 * 1. Better statistics in the proc fs
47 *
48 * 2. Implement message queue (queues SCSI messages like commands) and make
49 * the abort and device reset functions use them.
50 * */
51
52/* CHANGELOG
53 *
54 * Version 2.8
55 *
56 * Fixed bad bug affecting tag starvation processing (previously the
57 * driver would hang the system if too many tags starved. Also fixed
58 * bad bug having to do with 10 byte command processing and REQUEST
59 * SENSE (the command would loop forever getting a transfer length
60 * mismatch in the CMD phase).
61 *
62 * Version 2.7
63 *
64 * Fixed scripts problem which caused certain devices (notably CDRWs)
65 * to hang on initial INQUIRY. Updated NCR_700_readl/writel to use
66 * __raw_readl/writel for parisc compatibility (Thomas
67 * Bogendoerfer). Added missing SCp->request_bufflen initialisation
68 * for sense requests (Ryan Bradetich).
69 *
70 * Version 2.6
71 *
72 * Following test of the 64 bit parisc kernel by Richard Hirst,
73 * several problems have now been corrected. Also adds support for
74 * consistent memory allocation.
75 *
76 * Version 2.5
77 *
78 * More Compatibility changes for 710 (now actually works). Enhanced
79 * support for odd clock speeds which constrain SDTR negotiations.
80 * correct cacheline separation for scsi messages and status for
81 * incoherent architectures. Use of the pci mapping functions on
82 * buffers to begin support for 64 bit drivers.
83 *
84 * Version 2.4
85 *
86 * Added support for the 53c710 chip (in 53c700 emulation mode only---no
87 * special 53c710 instructions or registers are used).
88 *
89 * Version 2.3
90 *
91 * More endianness/cache coherency changes.
92 *
93 * Better bad device handling (handles devices lying about tag
94 * queueing support and devices which fail to provide sense data on
95 * contingent allegiance conditions)
96 *
97 * Many thanks to Richard Hirst <rhirst@linuxcare.com> for patiently
98 * debugging this driver on the parisc architecture and suggesting
99 * many improvements and bug fixes.
100 *
101 * Thanks also go to Linuxcare Inc. for providing several PARISC
102 * machines for me to debug the driver on.
103 *
104 * Version 2.2
105 *
106 * Made the driver mem or io mapped; added endian invariance; added
107 * dma cache flushing operations for architectures which need it;
108 * added support for more varied clocking speeds.
109 *
110 * Version 2.1
111 *
112 * Initial modularisation from the D700. See NCR_D700.c for the rest of
113 * the changelog.
114 * */
115#define NCR_700_VERSION "2.8"
116
1da177e4
LT
117#include <linux/kernel.h>
118#include <linux/types.h>
119#include <linux/string.h>
120#include <linux/ioport.h>
121#include <linux/delay.h>
122#include <linux/spinlock.h>
123#include <linux/completion.h>
124#include <linux/sched.h>
125#include <linux/init.h>
126#include <linux/proc_fs.h>
127#include <linux/blkdev.h>
128#include <linux/module.h>
129#include <linux/interrupt.h>
017560fc 130#include <linux/device.h>
1da177e4
LT
131#include <asm/dma.h>
132#include <asm/system.h>
133#include <asm/io.h>
134#include <asm/pgtable.h>
135#include <asm/byteorder.h>
136
137#include <scsi/scsi.h>
138#include <scsi/scsi_cmnd.h>
139#include <scsi/scsi_dbg.h>
140#include <scsi/scsi_eh.h>
141#include <scsi/scsi_host.h>
142#include <scsi/scsi_tcq.h>
143#include <scsi/scsi_transport.h>
144#include <scsi/scsi_transport_spi.h>
145
146#include "53c700.h"
147
148/* NOTE: For 64 bit drivers there are points in the code where we use
149 * a non dereferenceable pointer to point to a structure in dma-able
150 * memory (which is 32 bits) so that we can use all of the structure
151 * operations but take the address at the end. This macro allows us
152 * to truncate the 64 bit pointer down to 32 bits without the compiler
153 * complaining */
154#define to32bit(x) ((__u32)((unsigned long)(x)))
155
156#ifdef NCR_700_DEBUG
157#define STATIC
158#else
159#define STATIC static
160#endif
161
162MODULE_AUTHOR("James Bottomley");
163MODULE_DESCRIPTION("53c700 and 53c700-66 Driver");
164MODULE_LICENSE("GPL");
165
166/* This is the script */
167#include "53c700_d.h"
168
169
170STATIC int NCR_700_queuecommand(struct scsi_cmnd *, void (*done)(struct scsi_cmnd *));
171STATIC int NCR_700_abort(struct scsi_cmnd * SCpnt);
172STATIC int NCR_700_bus_reset(struct scsi_cmnd * SCpnt);
1da177e4
LT
173STATIC int NCR_700_host_reset(struct scsi_cmnd * SCpnt);
174STATIC void NCR_700_chip_setup(struct Scsi_Host *host);
175STATIC void NCR_700_chip_reset(struct Scsi_Host *host);
0f13fc09 176STATIC int NCR_700_slave_alloc(struct scsi_device *SDpnt);
1da177e4
LT
177STATIC int NCR_700_slave_configure(struct scsi_device *SDpnt);
178STATIC void NCR_700_slave_destroy(struct scsi_device *SDpnt);
179static int NCR_700_change_queue_depth(struct scsi_device *SDpnt, int depth);
180static int NCR_700_change_queue_type(struct scsi_device *SDpnt, int depth);
181
182STATIC struct device_attribute *NCR_700_dev_attrs[];
183
184STATIC struct scsi_transport_template *NCR_700_transport_template = NULL;
185
186static char *NCR_700_phase[] = {
187 "",
188 "after selection",
189 "before command phase",
190 "after command phase",
191 "after status phase",
192 "after data in phase",
193 "after data out phase",
194 "during data phase",
195};
196
197static char *NCR_700_condition[] = {
198 "",
199 "NOT MSG_OUT",
200 "UNEXPECTED PHASE",
201 "NOT MSG_IN",
202 "UNEXPECTED MSG",
203 "MSG_IN",
204 "SDTR_MSG RECEIVED",
205 "REJECT_MSG RECEIVED",
206 "DISCONNECT_MSG RECEIVED",
207 "MSG_OUT",
208 "DATA_IN",
209
210};
211
212static char *NCR_700_fatal_messages[] = {
213 "unexpected message after reselection",
214 "still MSG_OUT after message injection",
215 "not MSG_IN after selection",
216 "Illegal message length received",
217};
218
219static char *NCR_700_SBCL_bits[] = {
220 "IO ",
221 "CD ",
222 "MSG ",
223 "ATN ",
224 "SEL ",
225 "BSY ",
226 "ACK ",
227 "REQ ",
228};
229
230static char *NCR_700_SBCL_to_phase[] = {
231 "DATA_OUT",
232 "DATA_IN",
233 "CMD_OUT",
234 "STATE",
235 "ILLEGAL PHASE",
236 "ILLEGAL PHASE",
237 "MSG OUT",
238 "MSG IN",
239};
240
1da177e4
LT
241/* This translates the SDTR message offset and period to a value
242 * which can be loaded into the SXFER_REG.
243 *
244 * NOTE: According to SCSI-2, the true transfer period (in ns) is
245 * actually four times this period value */
246static inline __u8
247NCR_700_offset_period_to_sxfer(struct NCR_700_Host_Parameters *hostdata,
248 __u8 offset, __u8 period)
249{
250 int XFERP;
251
252 __u8 min_xferp = (hostdata->chip710
253 ? NCR_710_MIN_XFERP : NCR_700_MIN_XFERP);
254 __u8 max_offset = (hostdata->chip710
255 ? NCR_710_MAX_OFFSET : NCR_700_MAX_OFFSET);
256
257 if(offset == 0)
258 return 0;
259
260 if(period < hostdata->min_period) {
6ea3c0b2 261 printk(KERN_WARNING "53c700: Period %dns is less than this chip's minimum, setting to %d\n", period*4, NCR_700_MIN_PERIOD*4);
1da177e4
LT
262 period = hostdata->min_period;
263 }
264 XFERP = (period*4 * hostdata->sync_clock)/1000 - 4;
265 if(offset > max_offset) {
266 printk(KERN_WARNING "53c700: Offset %d exceeds chip maximum, setting to %d\n",
267 offset, max_offset);
268 offset = max_offset;
269 }
270 if(XFERP < min_xferp) {
271 printk(KERN_WARNING "53c700: XFERP %d is less than minium, setting to %d\n",
272 XFERP, min_xferp);
273 XFERP = min_xferp;
274 }
275 return (offset & 0x0f) | (XFERP & 0x07)<<4;
276}
277
278static inline __u8
279NCR_700_get_SXFER(struct scsi_device *SDp)
280{
281 struct NCR_700_Host_Parameters *hostdata =
282 (struct NCR_700_Host_Parameters *)SDp->host->hostdata[0];
283
284 return NCR_700_offset_period_to_sxfer(hostdata,
285 spi_offset(SDp->sdev_target),
286 spi_period(SDp->sdev_target));
287}
288
289struct Scsi_Host *
290NCR_700_detect(struct scsi_host_template *tpnt,
291 struct NCR_700_Host_Parameters *hostdata, struct device *dev)
292{
293 dma_addr_t pScript, pSlots;
294 __u8 *memory;
295 __u32 *script;
296 struct Scsi_Host *host;
297 static int banner = 0;
298 int j;
299
300 if(tpnt->sdev_attrs == NULL)
301 tpnt->sdev_attrs = NCR_700_dev_attrs;
302
303 memory = dma_alloc_noncoherent(hostdata->dev, TOTAL_MEM_SIZE,
304 &pScript, GFP_KERNEL);
305 if(memory == NULL) {
306 printk(KERN_ERR "53c700: Failed to allocate memory for driver, detatching\n");
307 return NULL;
308 }
309
310 script = (__u32 *)memory;
311 hostdata->msgin = memory + MSGIN_OFFSET;
312 hostdata->msgout = memory + MSGOUT_OFFSET;
313 hostdata->status = memory + STATUS_OFFSET;
314 /* all of these offsets are L1_CACHE_BYTES separated. It is fatal
315 * if this isn't sufficient separation to avoid dma flushing issues */
f67637ee 316 BUG_ON(!dma_is_consistent(hostdata->dev, pScript) && L1_CACHE_BYTES < dma_get_cache_alignment());
1da177e4
LT
317 hostdata->slots = (struct NCR_700_command_slot *)(memory + SLOTS_OFFSET);
318 hostdata->dev = dev;
6391a113 319
1da177e4
LT
320 pSlots = pScript + SLOTS_OFFSET;
321
322 /* Fill in the missing routines from the host template */
323 tpnt->queuecommand = NCR_700_queuecommand;
324 tpnt->eh_abort_handler = NCR_700_abort;
1da177e4
LT
325 tpnt->eh_bus_reset_handler = NCR_700_bus_reset;
326 tpnt->eh_host_reset_handler = NCR_700_host_reset;
327 tpnt->can_queue = NCR_700_COMMAND_SLOTS_PER_HOST;
328 tpnt->sg_tablesize = NCR_700_SG_SEGMENTS;
329 tpnt->cmd_per_lun = NCR_700_CMD_PER_LUN;
330 tpnt->use_clustering = ENABLE_CLUSTERING;
331 tpnt->slave_configure = NCR_700_slave_configure;
332 tpnt->slave_destroy = NCR_700_slave_destroy;
0f13fc09 333 tpnt->slave_alloc = NCR_700_slave_alloc;
1da177e4
LT
334 tpnt->change_queue_depth = NCR_700_change_queue_depth;
335 tpnt->change_queue_type = NCR_700_change_queue_type;
6391a113 336
1da177e4
LT
337 if(tpnt->name == NULL)
338 tpnt->name = "53c700";
339 if(tpnt->proc_name == NULL)
340 tpnt->proc_name = "53c700";
1da177e4
LT
341
342 host = scsi_host_alloc(tpnt, 4);
343 if (!host)
344 return NULL;
345 memset(hostdata->slots, 0, sizeof(struct NCR_700_command_slot)
346 * NCR_700_COMMAND_SLOTS_PER_HOST);
6391a113 347 for (j = 0; j < NCR_700_COMMAND_SLOTS_PER_HOST; j++) {
1da177e4
LT
348 dma_addr_t offset = (dma_addr_t)((unsigned long)&hostdata->slots[j].SG[0]
349 - (unsigned long)&hostdata->slots[0].SG[0]);
350 hostdata->slots[j].pSG = (struct NCR_700_SG_List *)((unsigned long)(pSlots + offset));
351 if(j == 0)
352 hostdata->free_list = &hostdata->slots[j];
353 else
354 hostdata->slots[j-1].ITL_forw = &hostdata->slots[j];
355 hostdata->slots[j].state = NCR_700_SLOT_FREE;
356 }
357
6391a113 358 for (j = 0; j < ARRAY_SIZE(SCRIPT); j++)
1da177e4 359 script[j] = bS_to_host(SCRIPT[j]);
1da177e4
LT
360
361 /* adjust all labels to be bus physical */
6391a113 362 for (j = 0; j < PATCHES; j++)
1da177e4 363 script[LABELPATCHES[j]] = bS_to_host(pScript + SCRIPT[LABELPATCHES[j]]);
1da177e4 364 /* now patch up fixed addresses. */
d3fa72e4 365 script_patch_32(hostdata->dev, script, MessageLocation,
1da177e4 366 pScript + MSGOUT_OFFSET);
d3fa72e4 367 script_patch_32(hostdata->dev, script, StatusAddress,
1da177e4 368 pScript + STATUS_OFFSET);
d3fa72e4 369 script_patch_32(hostdata->dev, script, ReceiveMsgAddress,
1da177e4
LT
370 pScript + MSGIN_OFFSET);
371
372 hostdata->script = script;
373 hostdata->pScript = pScript;
374 dma_sync_single_for_device(hostdata->dev, pScript, sizeof(SCRIPT), DMA_TO_DEVICE);
375 hostdata->state = NCR_700_HOST_FREE;
376 hostdata->cmd = NULL;
2b89dad0 377 host->max_id = 8;
1da177e4
LT
378 host->max_lun = NCR_700_MAX_LUNS;
379 BUG_ON(NCR_700_transport_template == NULL);
380 host->transportt = NCR_700_transport_template;
56fece20 381 host->unique_id = (unsigned long)hostdata->base;
1da177e4
LT
382 hostdata->eh_complete = NULL;
383 host->hostdata[0] = (unsigned long)hostdata;
384 /* kick the chip */
385 NCR_700_writeb(0xff, host, CTEST9_REG);
6391a113 386 if (hostdata->chip710)
1da177e4
LT
387 hostdata->rev = (NCR_700_readb(host, CTEST8_REG)>>4) & 0x0f;
388 else
389 hostdata->rev = (NCR_700_readb(host, CTEST7_REG)>>4) & 0x0f;
390 hostdata->fast = (NCR_700_readb(host, CTEST9_REG) == 0);
6391a113 391 if (banner == 0) {
1da177e4
LT
392 printk(KERN_NOTICE "53c700: Version " NCR_700_VERSION " By James.Bottomley@HansenPartnership.com\n");
393 banner = 1;
394 }
395 printk(KERN_NOTICE "scsi%d: %s rev %d %s\n", host->host_no,
6391a113 396 hostdata->chip710 ? "53c710" :
1da177e4
LT
397 (hostdata->fast ? "53c700-66" : "53c700"),
398 hostdata->rev, hostdata->differential ?
399 "(Differential)" : "");
400 /* reset the chip */
401 NCR_700_chip_reset(host);
402
403 if (scsi_add_host(host, dev)) {
404 dev_printk(KERN_ERR, dev, "53c700: scsi_add_host failed\n");
405 scsi_host_put(host);
406 return NULL;
407 }
408
409 spi_signalling(host) = hostdata->differential ? SPI_SIGNAL_HVD :
410 SPI_SIGNAL_SE;
411
412 return host;
413}
414
415int
416NCR_700_release(struct Scsi_Host *host)
417{
418 struct NCR_700_Host_Parameters *hostdata =
419 (struct NCR_700_Host_Parameters *)host->hostdata[0];
420
421 dma_free_noncoherent(hostdata->dev, TOTAL_MEM_SIZE,
422 hostdata->script, hostdata->pScript);
423 return 1;
424}
425
426static inline __u8
427NCR_700_identify(int can_disconnect, __u8 lun)
428{
429 return IDENTIFY_BASE |
430 ((can_disconnect) ? 0x40 : 0) |
431 (lun & NCR_700_LUN_MASK);
432}
433
434/*
435 * Function : static int data_residual (Scsi_Host *host)
436 *
437 * Purpose : return residual data count of what's in the chip. If you
438 * really want to know what this function is doing, it's almost a
439 * direct transcription of the algorithm described in the 53c710
440 * guide, except that the DBC and DFIFO registers are only 6 bits
441 * wide on a 53c700.
442 *
443 * Inputs : host - SCSI host */
444static inline int
445NCR_700_data_residual (struct Scsi_Host *host) {
446 struct NCR_700_Host_Parameters *hostdata =
447 (struct NCR_700_Host_Parameters *)host->hostdata[0];
448 int count, synchronous = 0;
449 unsigned int ddir;
450
451 if(hostdata->chip710) {
452 count = ((NCR_700_readb(host, DFIFO_REG) & 0x7f) -
453 (NCR_700_readl(host, DBC_REG) & 0x7f)) & 0x7f;
454 } else {
455 count = ((NCR_700_readb(host, DFIFO_REG) & 0x3f) -
456 (NCR_700_readl(host, DBC_REG) & 0x3f)) & 0x3f;
457 }
458
459 if(hostdata->fast)
460 synchronous = NCR_700_readb(host, SXFER_REG) & 0x0f;
461
462 /* get the data direction */
463 ddir = NCR_700_readb(host, CTEST0_REG) & 0x01;
464
465 if (ddir) {
466 /* Receive */
467 if (synchronous)
468 count += (NCR_700_readb(host, SSTAT2_REG) & 0xf0) >> 4;
469 else
470 if (NCR_700_readb(host, SSTAT1_REG) & SIDL_REG_FULL)
471 ++count;
472 } else {
473 /* Send */
474 __u8 sstat = NCR_700_readb(host, SSTAT1_REG);
475 if (sstat & SODL_REG_FULL)
476 ++count;
477 if (synchronous && (sstat & SODR_REG_FULL))
478 ++count;
479 }
480#ifdef NCR_700_DEBUG
481 if(count)
482 printk("RESIDUAL IS %d (ddir %d)\n", count, ddir);
483#endif
484 return count;
485}
486
487/* print out the SCSI wires and corresponding phase from the SBCL register
488 * in the chip */
489static inline char *
490sbcl_to_string(__u8 sbcl)
491{
492 int i;
493 static char ret[256];
494
495 ret[0]='\0';
496 for(i=0; i<8; i++) {
497 if((1<<i) & sbcl)
498 strcat(ret, NCR_700_SBCL_bits[i]);
499 }
500 strcat(ret, NCR_700_SBCL_to_phase[sbcl & 0x07]);
501 return ret;
502}
503
504static inline __u8
505bitmap_to_number(__u8 bitmap)
506{
507 __u8 i;
508
509 for(i=0; i<8 && !(bitmap &(1<<i)); i++)
510 ;
511 return i;
512}
513
514/* Pull a slot off the free list */
515STATIC struct NCR_700_command_slot *
516find_empty_slot(struct NCR_700_Host_Parameters *hostdata)
517{
518 struct NCR_700_command_slot *slot = hostdata->free_list;
519
520 if(slot == NULL) {
521 /* sanity check */
522 if(hostdata->command_slot_count != NCR_700_COMMAND_SLOTS_PER_HOST)
523 printk(KERN_ERR "SLOTS FULL, but count is %d, should be %d\n", hostdata->command_slot_count, NCR_700_COMMAND_SLOTS_PER_HOST);
524 return NULL;
525 }
526
527 if(slot->state != NCR_700_SLOT_FREE)
528 /* should panic! */
529 printk(KERN_ERR "BUSY SLOT ON FREE LIST!!!\n");
530
531
532 hostdata->free_list = slot->ITL_forw;
533 slot->ITL_forw = NULL;
534
535
536 /* NOTE: set the state to busy here, not queued, since this
537 * indicates the slot is in use and cannot be run by the IRQ
538 * finish routine. If we cannot queue the command when it
539 * is properly build, we then change to NCR_700_SLOT_QUEUED */
540 slot->state = NCR_700_SLOT_BUSY;
67d59dfd 541 slot->flags = 0;
1da177e4
LT
542 hostdata->command_slot_count++;
543
544 return slot;
545}
546
547STATIC void
548free_slot(struct NCR_700_command_slot *slot,
549 struct NCR_700_Host_Parameters *hostdata)
550{
551 if((slot->state & NCR_700_SLOT_MASK) != NCR_700_SLOT_MAGIC) {
552 printk(KERN_ERR "53c700: SLOT %p is not MAGIC!!!\n", slot);
553 }
554 if(slot->state == NCR_700_SLOT_FREE) {
555 printk(KERN_ERR "53c700: SLOT %p is FREE!!!\n", slot);
556 }
557
558 slot->resume_offset = 0;
559 slot->cmnd = NULL;
560 slot->state = NCR_700_SLOT_FREE;
561 slot->ITL_forw = hostdata->free_list;
562 hostdata->free_list = slot;
563 hostdata->command_slot_count--;
564}
565
566
567/* This routine really does very little. The command is indexed on
568 the ITL and (if tagged) the ITLQ lists in _queuecommand */
569STATIC void
570save_for_reselection(struct NCR_700_Host_Parameters *hostdata,
571 struct scsi_cmnd *SCp, __u32 dsp)
572{
573 /* Its just possible that this gets executed twice */
574 if(SCp != NULL) {
575 struct NCR_700_command_slot *slot =
576 (struct NCR_700_command_slot *)SCp->host_scribble;
577
578 slot->resume_offset = dsp;
579 }
580 hostdata->state = NCR_700_HOST_FREE;
581 hostdata->cmd = NULL;
582}
583
584STATIC inline void
585NCR_700_unmap(struct NCR_700_Host_Parameters *hostdata, struct scsi_cmnd *SCp,
586 struct NCR_700_command_slot *slot)
587{
588 if(SCp->sc_data_direction != DMA_NONE &&
589 SCp->sc_data_direction != DMA_BIDIRECTIONAL) {
590 if(SCp->use_sg) {
67d59dfd 591 dma_unmap_sg(hostdata->dev, SCp->request_buffer,
1da177e4
LT
592 SCp->use_sg, SCp->sc_data_direction);
593 } else {
594 dma_unmap_single(hostdata->dev, slot->dma_handle,
595 SCp->request_bufflen,
596 SCp->sc_data_direction);
597 }
598 }
599}
600
601STATIC inline void
602NCR_700_scsi_done(struct NCR_700_Host_Parameters *hostdata,
603 struct scsi_cmnd *SCp, int result)
604{
605 hostdata->state = NCR_700_HOST_FREE;
606 hostdata->cmd = NULL;
607
608 if(SCp != NULL) {
609 struct NCR_700_command_slot *slot =
610 (struct NCR_700_command_slot *)SCp->host_scribble;
611
0f13fc09
JB
612 dma_unmap_single(hostdata->dev, slot->pCmd,
613 sizeof(SCp->cmnd), DMA_TO_DEVICE);
67d59dfd 614 if (slot->flags == NCR_700_FLAG_AUTOSENSE) {
0f13fc09 615 char *cmnd = NCR_700_get_sense_cmnd(SCp->device);
1da177e4
LT
616#ifdef NCR_700_DEBUG
617 printk(" ORIGINAL CMD %p RETURNED %d, new return is %d sense is\n",
618 SCp, SCp->cmnd[7], result);
619 scsi_print_sense("53c700", SCp);
620
621#endif
67d59dfd 622 dma_unmap_single(hostdata->dev, slot->dma_handle, sizeof(SCp->sense_buffer), DMA_FROM_DEVICE);
1da177e4
LT
623 /* restore the old result if the request sense was
624 * successful */
c603d04e 625 if (result == 0)
0f13fc09 626 result = cmnd[7];
c603d04e
JB
627 /* restore the original length */
628 SCp->cmd_len = cmnd[8];
67d59dfd 629 } else
0f13fc09 630 NCR_700_unmap(hostdata, SCp, slot);
67d59dfd 631
1da177e4
LT
632 free_slot(slot, hostdata);
633#ifdef NCR_700_DEBUG
634 if(NCR_700_get_depth(SCp->device) == 0 ||
635 NCR_700_get_depth(SCp->device) > SCp->device->queue_depth)
636 printk(KERN_ERR "Invalid depth in NCR_700_scsi_done(): %d\n",
637 NCR_700_get_depth(SCp->device));
638#endif /* NCR_700_DEBUG */
639 NCR_700_set_depth(SCp->device, NCR_700_get_depth(SCp->device) - 1);
640
641 SCp->host_scribble = NULL;
642 SCp->result = result;
643 SCp->scsi_done(SCp);
644 } else {
645 printk(KERN_ERR "53c700: SCSI DONE HAS NULL SCp\n");
646 }
647}
648
649
650STATIC void
651NCR_700_internal_bus_reset(struct Scsi_Host *host)
652{
653 /* Bus reset */
654 NCR_700_writeb(ASSERT_RST, host, SCNTL1_REG);
655 udelay(50);
656 NCR_700_writeb(0, host, SCNTL1_REG);
657
658}
659
660STATIC void
661NCR_700_chip_setup(struct Scsi_Host *host)
662{
663 struct NCR_700_Host_Parameters *hostdata =
664 (struct NCR_700_Host_Parameters *)host->hostdata[0];
665 __u32 dcntl_extra = 0;
666 __u8 min_period;
667 __u8 min_xferp = (hostdata->chip710 ? NCR_710_MIN_XFERP : NCR_700_MIN_XFERP);
668
669 if(hostdata->chip710) {
670 __u8 burst_disable = hostdata->burst_disable
671 ? BURST_DISABLE : 0;
672 dcntl_extra = COMPAT_700_MODE;
673
674 NCR_700_writeb(dcntl_extra, host, DCNTL_REG);
675 NCR_700_writeb(BURST_LENGTH_8 | hostdata->dmode_extra,
676 host, DMODE_710_REG);
677 NCR_700_writeb(burst_disable | (hostdata->differential ?
678 DIFF : 0), host, CTEST7_REG);
679 NCR_700_writeb(BTB_TIMER_DISABLE, host, CTEST0_REG);
680 NCR_700_writeb(FULL_ARBITRATION | ENABLE_PARITY | PARITY
681 | AUTO_ATN, host, SCNTL0_REG);
682 } else {
683 NCR_700_writeb(BURST_LENGTH_8 | hostdata->dmode_extra,
684 host, DMODE_700_REG);
685 NCR_700_writeb(hostdata->differential ?
686 DIFF : 0, host, CTEST7_REG);
687 if(hostdata->fast) {
688 /* this is for 700-66, does nothing on 700 */
689 NCR_700_writeb(LAST_DIS_ENBL | ENABLE_ACTIVE_NEGATION
690 | GENERATE_RECEIVE_PARITY, host,
691 CTEST8_REG);
692 } else {
693 NCR_700_writeb(FULL_ARBITRATION | ENABLE_PARITY
694 | PARITY | AUTO_ATN, host, SCNTL0_REG);
695 }
696 }
697
698 NCR_700_writeb(1 << host->this_id, host, SCID_REG);
699 NCR_700_writeb(0, host, SBCL_REG);
700 NCR_700_writeb(ASYNC_OPERATION, host, SXFER_REG);
701
702 NCR_700_writeb(PHASE_MM_INT | SEL_TIMEOUT_INT | GROSS_ERR_INT | UX_DISC_INT
703 | RST_INT | PAR_ERR_INT | SELECT_INT, host, SIEN_REG);
704
705 NCR_700_writeb(ABORT_INT | INT_INST_INT | ILGL_INST_INT, host, DIEN_REG);
706 NCR_700_writeb(ENABLE_SELECT, host, SCNTL1_REG);
707 if(hostdata->clock > 75) {
708 printk(KERN_ERR "53c700: Clock speed %dMHz is too high: 75Mhz is the maximum this chip can be driven at\n", hostdata->clock);
709 /* do the best we can, but the async clock will be out
710 * of spec: sync divider 2, async divider 3 */
711 DEBUG(("53c700: sync 2 async 3\n"));
712 NCR_700_writeb(SYNC_DIV_2_0, host, SBCL_REG);
713 NCR_700_writeb(ASYNC_DIV_3_0 | dcntl_extra, host, DCNTL_REG);
714 hostdata->sync_clock = hostdata->clock/2;
715 } else if(hostdata->clock > 50 && hostdata->clock <= 75) {
716 /* sync divider 1.5, async divider 3 */
717 DEBUG(("53c700: sync 1.5 async 3\n"));
718 NCR_700_writeb(SYNC_DIV_1_5, host, SBCL_REG);
719 NCR_700_writeb(ASYNC_DIV_3_0 | dcntl_extra, host, DCNTL_REG);
720 hostdata->sync_clock = hostdata->clock*2;
721 hostdata->sync_clock /= 3;
722
723 } else if(hostdata->clock > 37 && hostdata->clock <= 50) {
724 /* sync divider 1, async divider 2 */
725 DEBUG(("53c700: sync 1 async 2\n"));
726 NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
727 NCR_700_writeb(ASYNC_DIV_2_0 | dcntl_extra, host, DCNTL_REG);
728 hostdata->sync_clock = hostdata->clock;
729 } else if(hostdata->clock > 25 && hostdata->clock <=37) {
730 /* sync divider 1, async divider 1.5 */
731 DEBUG(("53c700: sync 1 async 1.5\n"));
732 NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
733 NCR_700_writeb(ASYNC_DIV_1_5 | dcntl_extra, host, DCNTL_REG);
734 hostdata->sync_clock = hostdata->clock;
735 } else {
736 DEBUG(("53c700: sync 1 async 1\n"));
737 NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
738 NCR_700_writeb(ASYNC_DIV_1_0 | dcntl_extra, host, DCNTL_REG);
739 /* sync divider 1, async divider 1 */
740 hostdata->sync_clock = hostdata->clock;
741 }
742 /* Calculate the actual minimum period that can be supported
743 * by our synchronous clock speed. See the 710 manual for
744 * exact details of this calculation which is based on a
745 * setting of the SXFER register */
746 min_period = 1000*(4+min_xferp)/(4*hostdata->sync_clock);
747 hostdata->min_period = NCR_700_MIN_PERIOD;
748 if(min_period > NCR_700_MIN_PERIOD)
749 hostdata->min_period = min_period;
750}
751
752STATIC void
753NCR_700_chip_reset(struct Scsi_Host *host)
754{
755 struct NCR_700_Host_Parameters *hostdata =
756 (struct NCR_700_Host_Parameters *)host->hostdata[0];
757 if(hostdata->chip710) {
758 NCR_700_writeb(SOFTWARE_RESET_710, host, ISTAT_REG);
759 udelay(100);
760
761 NCR_700_writeb(0, host, ISTAT_REG);
762 } else {
763 NCR_700_writeb(SOFTWARE_RESET, host, DCNTL_REG);
764 udelay(100);
765
766 NCR_700_writeb(0, host, DCNTL_REG);
767 }
768
769 mdelay(1000);
770
771 NCR_700_chip_setup(host);
772}
773
774/* The heart of the message processing engine is that the instruction
775 * immediately after the INT is the normal case (and so must be CLEAR
776 * ACK). If we want to do something else, we call that routine in
777 * scripts and set temp to be the normal case + 8 (skipping the CLEAR
778 * ACK) so that the routine returns correctly to resume its activity
779 * */
780STATIC __u32
781process_extended_message(struct Scsi_Host *host,
782 struct NCR_700_Host_Parameters *hostdata,
783 struct scsi_cmnd *SCp, __u32 dsp, __u32 dsps)
784{
785 __u32 resume_offset = dsp, temp = dsp + 8;
786 __u8 pun = 0xff, lun = 0xff;
787
788 if(SCp != NULL) {
789 pun = SCp->device->id;
790 lun = SCp->device->lun;
791 }
792
793 switch(hostdata->msgin[2]) {
794 case A_SDTR_MSG:
795 if(SCp != NULL && NCR_700_is_flag_set(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION)) {
796 struct scsi_target *starget = SCp->device->sdev_target;
797 __u8 period = hostdata->msgin[3];
798 __u8 offset = hostdata->msgin[4];
799
800 if(offset == 0 || period == 0) {
801 offset = 0;
802 period = 0;
803 }
804
805 spi_offset(starget) = offset;
806 spi_period(starget) = period;
807
808 if(NCR_700_is_flag_set(SCp->device, NCR_700_DEV_PRINT_SYNC_NEGOTIATION)) {
809 spi_display_xfer_agreement(starget);
810 NCR_700_clear_flag(SCp->device, NCR_700_DEV_PRINT_SYNC_NEGOTIATION);
811 }
812
813 NCR_700_set_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC);
814 NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
815
816 NCR_700_writeb(NCR_700_get_SXFER(SCp->device),
817 host, SXFER_REG);
818
819 } else {
820 /* SDTR message out of the blue, reject it */
017560fc
JG
821 shost_printk(KERN_WARNING, host,
822 "Unexpected SDTR msg\n");
1da177e4 823 hostdata->msgout[0] = A_REJECT_MSG;
d3fa72e4
RB
824 dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
825 script_patch_16(hostdata->dev, hostdata->script,
826 MessageCount, 1);
1da177e4
LT
827 /* SendMsgOut returns, so set up the return
828 * address */
829 resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
830 }
831 break;
832
833 case A_WDTR_MSG:
834 printk(KERN_INFO "scsi%d: (%d:%d), Unsolicited WDTR after CMD, Rejecting\n",
835 host->host_no, pun, lun);
836 hostdata->msgout[0] = A_REJECT_MSG;
d3fa72e4
RB
837 dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
838 script_patch_16(hostdata->dev, hostdata->script, MessageCount,
839 1);
1da177e4
LT
840 resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
841
842 break;
843
844 default:
845 printk(KERN_INFO "scsi%d (%d:%d): Unexpected message %s: ",
846 host->host_no, pun, lun,
847 NCR_700_phase[(dsps & 0xf00) >> 8]);
1abfd370 848 spi_print_msg(hostdata->msgin);
1da177e4
LT
849 printk("\n");
850 /* just reject it */
851 hostdata->msgout[0] = A_REJECT_MSG;
d3fa72e4
RB
852 dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
853 script_patch_16(hostdata->dev, hostdata->script, MessageCount,
854 1);
1da177e4
LT
855 /* SendMsgOut returns, so set up the return
856 * address */
857 resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
858 }
859 NCR_700_writel(temp, host, TEMP_REG);
860 return resume_offset;
861}
862
863STATIC __u32
864process_message(struct Scsi_Host *host, struct NCR_700_Host_Parameters *hostdata,
865 struct scsi_cmnd *SCp, __u32 dsp, __u32 dsps)
866{
867 /* work out where to return to */
868 __u32 temp = dsp + 8, resume_offset = dsp;
869 __u8 pun = 0xff, lun = 0xff;
870
871 if(SCp != NULL) {
872 pun = SCp->device->id;
873 lun = SCp->device->lun;
874 }
875
876#ifdef NCR_700_DEBUG
877 printk("scsi%d (%d:%d): message %s: ", host->host_no, pun, lun,
878 NCR_700_phase[(dsps & 0xf00) >> 8]);
1abfd370 879 spi_print_msg(hostdata->msgin);
1da177e4
LT
880 printk("\n");
881#endif
882
883 switch(hostdata->msgin[0]) {
884
885 case A_EXTENDED_MSG:
886 resume_offset = process_extended_message(host, hostdata, SCp,
887 dsp, dsps);
888 break;
889
890 case A_REJECT_MSG:
891 if(SCp != NULL && NCR_700_is_flag_set(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION)) {
892 /* Rejected our sync negotiation attempt */
893 spi_period(SCp->device->sdev_target) =
894 spi_offset(SCp->device->sdev_target) = 0;
895 NCR_700_set_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC);
896 NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
897 } else if(SCp != NULL && NCR_700_get_tag_neg_state(SCp->device) == NCR_700_DURING_TAG_NEGOTIATION) {
898 /* rejected our first simple tag message */
017560fc
JG
899 scmd_printk(KERN_WARNING, SCp,
900 "Rejected first tag queue attempt, turning off tag queueing\n");
1da177e4
LT
901 /* we're done negotiating */
902 NCR_700_set_tag_neg_state(SCp->device, NCR_700_FINISHED_TAG_NEGOTIATION);
017560fc 903 hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
1da177e4
LT
904 SCp->device->tagged_supported = 0;
905 scsi_deactivate_tcq(SCp->device, host->cmd_per_lun);
906 } else {
017560fc
JG
907 shost_printk(KERN_WARNING, host,
908 "(%d:%d) Unexpected REJECT Message %s\n",
909 pun, lun,
1da177e4
LT
910 NCR_700_phase[(dsps & 0xf00) >> 8]);
911 /* however, just ignore it */
912 }
913 break;
914
915 case A_PARITY_ERROR_MSG:
916 printk(KERN_ERR "scsi%d (%d:%d) Parity Error!\n", host->host_no,
917 pun, lun);
918 NCR_700_internal_bus_reset(host);
919 break;
920 case A_SIMPLE_TAG_MSG:
921 printk(KERN_INFO "scsi%d (%d:%d) SIMPLE TAG %d %s\n", host->host_no,
922 pun, lun, hostdata->msgin[1],
923 NCR_700_phase[(dsps & 0xf00) >> 8]);
924 /* just ignore it */
925 break;
926 default:
927 printk(KERN_INFO "scsi%d (%d:%d): Unexpected message %s: ",
928 host->host_no, pun, lun,
929 NCR_700_phase[(dsps & 0xf00) >> 8]);
930
1abfd370 931 spi_print_msg(hostdata->msgin);
1da177e4
LT
932 printk("\n");
933 /* just reject it */
934 hostdata->msgout[0] = A_REJECT_MSG;
d3fa72e4
RB
935 dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
936 script_patch_16(hostdata->dev, hostdata->script, MessageCount,
937 1);
1da177e4
LT
938 /* SendMsgOut returns, so set up the return
939 * address */
940 resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
941
942 break;
943 }
944 NCR_700_writel(temp, host, TEMP_REG);
945 /* set us up to receive another message */
d3fa72e4 946 dma_cache_sync(hostdata->dev, hostdata->msgin, MSG_ARRAY_SIZE, DMA_FROM_DEVICE);
1da177e4
LT
947 return resume_offset;
948}
949
950STATIC __u32
951process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
952 struct Scsi_Host *host,
953 struct NCR_700_Host_Parameters *hostdata)
954{
955 __u32 resume_offset = 0;
956 __u8 pun = 0xff, lun=0xff;
957
958 if(SCp != NULL) {
959 pun = SCp->device->id;
960 lun = SCp->device->lun;
961 }
962
963 if(dsps == A_GOOD_STATUS_AFTER_STATUS) {
964 DEBUG((" COMMAND COMPLETE, status=%02x\n",
965 hostdata->status[0]));
966 /* OK, if TCQ still under negotiation, we now know it works */
967 if (NCR_700_get_tag_neg_state(SCp->device) == NCR_700_DURING_TAG_NEGOTIATION)
968 NCR_700_set_tag_neg_state(SCp->device,
969 NCR_700_FINISHED_TAG_NEGOTIATION);
970
971 /* check for contingent allegiance contitions */
972 if(status_byte(hostdata->status[0]) == CHECK_CONDITION ||
973 status_byte(hostdata->status[0]) == COMMAND_TERMINATED) {
974 struct NCR_700_command_slot *slot =
975 (struct NCR_700_command_slot *)SCp->host_scribble;
0f13fc09 976 if(slot->flags == NCR_700_FLAG_AUTOSENSE) {
1da177e4
LT
977 /* OOPS: bad device, returning another
978 * contingent allegiance condition */
017560fc
JG
979 scmd_printk(KERN_ERR, SCp,
980 "broken device is looping in contingent allegiance: ignoring\n");
1da177e4
LT
981 NCR_700_scsi_done(hostdata, SCp, hostdata->status[0]);
982 } else {
0f13fc09
JB
983 char *cmnd =
984 NCR_700_get_sense_cmnd(SCp->device);
1da177e4
LT
985#ifdef NCR_DEBUG
986 scsi_print_command(SCp);
987 printk(" cmd %p has status %d, requesting sense\n",
988 SCp, hostdata->status[0]);
989#endif
990 /* we can destroy the command here
991 * because the contingent allegiance
992 * condition will cause a retry which
993 * will re-copy the command from the
994 * saved data_cmnd. We also unmap any
995 * data associated with the command
996 * here */
997 NCR_700_unmap(hostdata, SCp, slot);
67d59dfd
JB
998 dma_unmap_single(hostdata->dev, slot->pCmd,
999 sizeof(SCp->cmnd),
1000 DMA_TO_DEVICE);
1001
0f13fc09
JB
1002 cmnd[0] = REQUEST_SENSE;
1003 cmnd[1] = (SCp->device->lun & 0x7) << 5;
1004 cmnd[2] = 0;
1005 cmnd[3] = 0;
1006 cmnd[4] = sizeof(SCp->sense_buffer);
1007 cmnd[5] = 0;
1da177e4
LT
1008 /* Here's a quiet hack: the
1009 * REQUEST_SENSE command is six bytes,
1010 * so store a flag indicating that
1011 * this was an internal sense request
1012 * and the original status at the end
1013 * of the command */
0f13fc09
JB
1014 cmnd[6] = NCR_700_INTERNAL_SENSE_MAGIC;
1015 cmnd[7] = hostdata->status[0];
c603d04e
JB
1016 cmnd[8] = SCp->cmd_len;
1017 SCp->cmd_len = 6; /* command length for
1018 * REQUEST_SENSE */
0f13fc09 1019 slot->pCmd = dma_map_single(hostdata->dev, cmnd, MAX_COMMAND_SIZE, DMA_TO_DEVICE);
1da177e4
LT
1020 slot->dma_handle = dma_map_single(hostdata->dev, SCp->sense_buffer, sizeof(SCp->sense_buffer), DMA_FROM_DEVICE);
1021 slot->SG[0].ins = bS_to_host(SCRIPT_MOVE_DATA_IN | sizeof(SCp->sense_buffer));
1022 slot->SG[0].pAddr = bS_to_host(slot->dma_handle);
1023 slot->SG[1].ins = bS_to_host(SCRIPT_RETURN);
1024 slot->SG[1].pAddr = 0;
1025 slot->resume_offset = hostdata->pScript;
d3fa72e4
RB
1026 dma_cache_sync(hostdata->dev, slot->SG, sizeof(slot->SG[0])*2, DMA_TO_DEVICE);
1027 dma_cache_sync(hostdata->dev, SCp->sense_buffer, sizeof(SCp->sense_buffer), DMA_FROM_DEVICE);
1028
1da177e4
LT
1029 /* queue the command for reissue */
1030 slot->state = NCR_700_SLOT_QUEUED;
67d59dfd 1031 slot->flags = NCR_700_FLAG_AUTOSENSE;
1da177e4
LT
1032 hostdata->state = NCR_700_HOST_FREE;
1033 hostdata->cmd = NULL;
1034 }
1035 } else {
1036 // Currently rely on the mid layer evaluation
1037 // of the tag queuing capability
1038 //
1039 //if(status_byte(hostdata->status[0]) == GOOD &&
1040 // SCp->cmnd[0] == INQUIRY && SCp->use_sg == 0) {
1041 // /* Piggy back the tag queueing support
1042 // * on this command */
1043 // dma_sync_single_for_cpu(hostdata->dev,
1044 // slot->dma_handle,
1045 // SCp->request_bufflen,
1046 // DMA_FROM_DEVICE);
1047 // if(((char *)SCp->request_buffer)[7] & 0x02) {
017560fc
JG
1048 // scmd_printk(KERN_INFO, SCp,
1049 // "Enabling Tag Command Queuing\n");
1050 // hostdata->tag_negotiated |= (1<<scmd_id(SCp));
1da177e4
LT
1051 // NCR_700_set_flag(SCp->device, NCR_700_DEV_BEGIN_TAG_QUEUEING);
1052 // } else {
1053 // NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_TAG_QUEUEING);
017560fc 1054 // hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
1da177e4
LT
1055 // }
1056 //}
1057 NCR_700_scsi_done(hostdata, SCp, hostdata->status[0]);
1058 }
1059 } else if((dsps & 0xfffff0f0) == A_UNEXPECTED_PHASE) {
1060 __u8 i = (dsps & 0xf00) >> 8;
1061
017560fc 1062 scmd_printk(KERN_ERR, SCp, "UNEXPECTED PHASE %s (%s)\n",
1da177e4
LT
1063 NCR_700_phase[i],
1064 sbcl_to_string(NCR_700_readb(host, SBCL_REG)));
017560fc
JG
1065 scmd_printk(KERN_ERR, SCp, " len = %d, cmd =",
1066 SCp->cmd_len);
1da177e4
LT
1067 scsi_print_command(SCp);
1068
1069 NCR_700_internal_bus_reset(host);
1070 } else if((dsps & 0xfffff000) == A_FATAL) {
1071 int i = (dsps & 0xfff);
1072
1073 printk(KERN_ERR "scsi%d: (%d:%d) FATAL ERROR: %s\n",
1074 host->host_no, pun, lun, NCR_700_fatal_messages[i]);
1075 if(dsps == A_FATAL_ILLEGAL_MSG_LENGTH) {
1076 printk(KERN_ERR " msg begins %02x %02x\n",
1077 hostdata->msgin[0], hostdata->msgin[1]);
1078 }
1079 NCR_700_internal_bus_reset(host);
1080 } else if((dsps & 0xfffff0f0) == A_DISCONNECT) {
1081#ifdef NCR_700_DEBUG
1082 __u8 i = (dsps & 0xf00) >> 8;
1083
1084 printk("scsi%d: (%d:%d), DISCONNECTED (%d) %s\n",
1085 host->host_no, pun, lun,
1086 i, NCR_700_phase[i]);
1087#endif
1088 save_for_reselection(hostdata, SCp, dsp);
1089
1090 } else if(dsps == A_RESELECTION_IDENTIFIED) {
1091 __u8 lun;
1092 struct NCR_700_command_slot *slot;
1093 __u8 reselection_id = hostdata->reselection_id;
1094 struct scsi_device *SDp;
1095
1096 lun = hostdata->msgin[0] & 0x1f;
1097
1098 hostdata->reselection_id = 0xff;
1099 DEBUG(("scsi%d: (%d:%d) RESELECTED!\n",
1100 host->host_no, reselection_id, lun));
1101 /* clear the reselection indicator */
1102 SDp = __scsi_device_lookup(host, 0, reselection_id, lun);
1103 if(unlikely(SDp == NULL)) {
1104 printk(KERN_ERR "scsi%d: (%d:%d) HAS NO device\n",
1105 host->host_no, reselection_id, lun);
1106 BUG();
1107 }
1108 if(hostdata->msgin[1] == A_SIMPLE_TAG_MSG) {
1109 struct scsi_cmnd *SCp = scsi_find_tag(SDp, hostdata->msgin[2]);
1110 if(unlikely(SCp == NULL)) {
1111 printk(KERN_ERR "scsi%d: (%d:%d) no saved request for tag %d\n",
1112 host->host_no, reselection_id, lun, hostdata->msgin[2]);
1113 BUG();
1114 }
1115
1116 slot = (struct NCR_700_command_slot *)SCp->host_scribble;
017560fc
JG
1117 DDEBUG(KERN_DEBUG, SDp,
1118 "reselection is tag %d, slot %p(%d)\n",
1119 hostdata->msgin[2], slot, slot->tag);
1da177e4
LT
1120 } else {
1121 struct scsi_cmnd *SCp = scsi_find_tag(SDp, SCSI_NO_TAG);
1122 if(unlikely(SCp == NULL)) {
017560fc
JG
1123 sdev_printk(KERN_ERR, SDp,
1124 "no saved request for untagged cmd\n");
1da177e4
LT
1125 BUG();
1126 }
1127 slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1128 }
1129
1130 if(slot == NULL) {
1131 printk(KERN_ERR "scsi%d: (%d:%d) RESELECTED but no saved command (MSG = %02x %02x %02x)!!\n",
1132 host->host_no, reselection_id, lun,
1133 hostdata->msgin[0], hostdata->msgin[1],
1134 hostdata->msgin[2]);
1135 } else {
1136 if(hostdata->state != NCR_700_HOST_BUSY)
1137 printk(KERN_ERR "scsi%d: FATAL, host not busy during valid reselection!\n",
1138 host->host_no);
1139 resume_offset = slot->resume_offset;
1140 hostdata->cmd = slot->cmnd;
1141
1142 /* re-patch for this command */
d3fa72e4
RB
1143 script_patch_32_abs(hostdata->dev, hostdata->script,
1144 CommandAddress, slot->pCmd);
1145 script_patch_16(hostdata->dev, hostdata->script,
1da177e4 1146 CommandCount, slot->cmnd->cmd_len);
d3fa72e4
RB
1147 script_patch_32_abs(hostdata->dev, hostdata->script,
1148 SGScriptStartAddress,
1da177e4
LT
1149 to32bit(&slot->pSG[0].ins));
1150
1151 /* Note: setting SXFER only works if we're
1152 * still in the MESSAGE phase, so it is vital
1153 * that ACK is still asserted when we process
1154 * the reselection message. The resume offset
1155 * should therefore always clear ACK */
1156 NCR_700_writeb(NCR_700_get_SXFER(hostdata->cmd->device),
1157 host, SXFER_REG);
d3fa72e4 1158 dma_cache_sync(hostdata->dev, hostdata->msgin,
1da177e4 1159 MSG_ARRAY_SIZE, DMA_FROM_DEVICE);
d3fa72e4 1160 dma_cache_sync(hostdata->dev, hostdata->msgout,
1da177e4
LT
1161 MSG_ARRAY_SIZE, DMA_TO_DEVICE);
1162 /* I'm just being paranoid here, the command should
1163 * already have been flushed from the cache */
d3fa72e4 1164 dma_cache_sync(hostdata->dev, slot->cmnd->cmnd,
1da177e4
LT
1165 slot->cmnd->cmd_len, DMA_TO_DEVICE);
1166
1167
1168
1169 }
1170 } else if(dsps == A_RESELECTED_DURING_SELECTION) {
1171
1172 /* This section is full of debugging code because I've
1173 * never managed to reach it. I think what happens is
1174 * that, because the 700 runs with selection
1175 * interrupts enabled the whole time that we take a
1176 * selection interrupt before we manage to get to the
1177 * reselected script interrupt */
1178
1179 __u8 reselection_id = NCR_700_readb(host, SFBR_REG);
1180 struct NCR_700_command_slot *slot;
1181
1182 /* Take out our own ID */
1183 reselection_id &= ~(1<<host->this_id);
1184
1185 /* I've never seen this happen, so keep this as a printk rather
1186 * than a debug */
1187 printk(KERN_INFO "scsi%d: (%d:%d) RESELECTION DURING SELECTION, dsp=%08x[%04x] state=%d, count=%d\n",
1188 host->host_no, reselection_id, lun, dsp, dsp - hostdata->pScript, hostdata->state, hostdata->command_slot_count);
1189
1190 {
1191 /* FIXME: DEBUGGING CODE */
1192 __u32 SG = (__u32)bS_to_cpu(hostdata->script[A_SGScriptStartAddress_used[0]]);
1193 int i;
1194
1195 for(i=0; i< NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
1196 if(SG >= to32bit(&hostdata->slots[i].pSG[0])
1197 && SG <= to32bit(&hostdata->slots[i].pSG[NCR_700_SG_SEGMENTS]))
1198 break;
1199 }
1200 printk(KERN_INFO "IDENTIFIED SG segment as being %08x in slot %p, cmd %p, slot->resume_offset=%08x\n", SG, &hostdata->slots[i], hostdata->slots[i].cmnd, hostdata->slots[i].resume_offset);
1201 SCp = hostdata->slots[i].cmnd;
1202 }
1203
1204 if(SCp != NULL) {
1205 slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1206 /* change slot from busy to queued to redo command */
1207 slot->state = NCR_700_SLOT_QUEUED;
1208 }
1209 hostdata->cmd = NULL;
1210
1211 if(reselection_id == 0) {
1212 if(hostdata->reselection_id == 0xff) {
1213 printk(KERN_ERR "scsi%d: Invalid reselection during selection!!\n", host->host_no);
1214 return 0;
1215 } else {
1216 printk(KERN_ERR "scsi%d: script reselected and we took a selection interrupt\n",
1217 host->host_no);
1218 reselection_id = hostdata->reselection_id;
1219 }
1220 } else {
1221
1222 /* convert to real ID */
1223 reselection_id = bitmap_to_number(reselection_id);
1224 }
1225 hostdata->reselection_id = reselection_id;
1226 /* just in case we have a stale simple tag message, clear it */
1227 hostdata->msgin[1] = 0;
d3fa72e4 1228 dma_cache_sync(hostdata->dev, hostdata->msgin,
1da177e4
LT
1229 MSG_ARRAY_SIZE, DMA_BIDIRECTIONAL);
1230 if(hostdata->tag_negotiated & (1<<reselection_id)) {
1231 resume_offset = hostdata->pScript + Ent_GetReselectionWithTag;
1232 } else {
1233 resume_offset = hostdata->pScript + Ent_GetReselectionData;
1234 }
1235 } else if(dsps == A_COMPLETED_SELECTION_AS_TARGET) {
1236 /* we've just disconnected from the bus, do nothing since
1237 * a return here will re-run the queued command slot
1238 * that may have been interrupted by the initial selection */
1239 DEBUG((" SELECTION COMPLETED\n"));
1240 } else if((dsps & 0xfffff0f0) == A_MSG_IN) {
1241 resume_offset = process_message(host, hostdata, SCp,
1242 dsp, dsps);
1243 } else if((dsps & 0xfffff000) == 0) {
1244 __u8 i = (dsps & 0xf0) >> 4, j = (dsps & 0xf00) >> 8;
1245 printk(KERN_ERR "scsi%d: (%d:%d), unhandled script condition %s %s at %04x\n",
1246 host->host_no, pun, lun, NCR_700_condition[i],
1247 NCR_700_phase[j], dsp - hostdata->pScript);
1248 if(SCp != NULL) {
1249 scsi_print_command(SCp);
1250
1251 if(SCp->use_sg) {
1252 for(i = 0; i < SCp->use_sg + 1; i++) {
67d59dfd 1253 printk(KERN_INFO " SG[%d].length = %d, move_insn=%08x, addr %08x\n", i, ((struct scatterlist *)SCp->request_buffer)[i].length, ((struct NCR_700_command_slot *)SCp->host_scribble)->SG[i].ins, ((struct NCR_700_command_slot *)SCp->host_scribble)->SG[i].pAddr);
1da177e4
LT
1254 }
1255 }
1256 }
1257 NCR_700_internal_bus_reset(host);
1258 } else if((dsps & 0xfffff000) == A_DEBUG_INTERRUPT) {
1259 printk(KERN_NOTICE "scsi%d (%d:%d) DEBUG INTERRUPT %d AT %08x[%04x], continuing\n",
1260 host->host_no, pun, lun, dsps & 0xfff, dsp, dsp - hostdata->pScript);
1261 resume_offset = dsp;
1262 } else {
1263 printk(KERN_ERR "scsi%d: (%d:%d), unidentified script interrupt 0x%x at %04x\n",
1264 host->host_no, pun, lun, dsps, dsp - hostdata->pScript);
1265 NCR_700_internal_bus_reset(host);
1266 }
1267 return resume_offset;
1268}
1269
1270/* We run the 53c700 with selection interrupts always enabled. This
1271 * means that the chip may be selected as soon as the bus frees. On a
1272 * busy bus, this can be before the scripts engine finishes its
1273 * processing. Therefore, part of the selection processing has to be
1274 * to find out what the scripts engine is doing and complete the
1275 * function if necessary (i.e. process the pending disconnect or save
1276 * the interrupted initial selection */
1277STATIC inline __u32
1278process_selection(struct Scsi_Host *host, __u32 dsp)
1279{
1280 __u8 id = 0; /* Squash compiler warning */
1281 int count = 0;
1282 __u32 resume_offset = 0;
1283 struct NCR_700_Host_Parameters *hostdata =
1284 (struct NCR_700_Host_Parameters *)host->hostdata[0];
1285 struct scsi_cmnd *SCp = hostdata->cmd;
1286 __u8 sbcl;
1287
1288 for(count = 0; count < 5; count++) {
1289 id = NCR_700_readb(host, hostdata->chip710 ?
1290 CTEST9_REG : SFBR_REG);
1291
1292 /* Take out our own ID */
1293 id &= ~(1<<host->this_id);
1294 if(id != 0)
1295 break;
1296 udelay(5);
1297 }
1298 sbcl = NCR_700_readb(host, SBCL_REG);
1299 if((sbcl & SBCL_IO) == 0) {
1300 /* mark as having been selected rather than reselected */
1301 id = 0xff;
1302 } else {
1303 /* convert to real ID */
1304 hostdata->reselection_id = id = bitmap_to_number(id);
1305 DEBUG(("scsi%d: Reselected by %d\n",
1306 host->host_no, id));
1307 }
1308 if(hostdata->state == NCR_700_HOST_BUSY && SCp != NULL) {
1309 struct NCR_700_command_slot *slot =
1310 (struct NCR_700_command_slot *)SCp->host_scribble;
1311 DEBUG((" ID %d WARNING: RESELECTION OF BUSY HOST, saving cmd %p, slot %p, addr %x [%04x], resume %x!\n", id, hostdata->cmd, slot, dsp, dsp - hostdata->pScript, resume_offset));
1312
1313 switch(dsp - hostdata->pScript) {
1314 case Ent_Disconnect1:
1315 case Ent_Disconnect2:
1316 save_for_reselection(hostdata, SCp, Ent_Disconnect2 + hostdata->pScript);
1317 break;
1318 case Ent_Disconnect3:
1319 case Ent_Disconnect4:
1320 save_for_reselection(hostdata, SCp, Ent_Disconnect4 + hostdata->pScript);
1321 break;
1322 case Ent_Disconnect5:
1323 case Ent_Disconnect6:
1324 save_for_reselection(hostdata, SCp, Ent_Disconnect6 + hostdata->pScript);
1325 break;
1326 case Ent_Disconnect7:
1327 case Ent_Disconnect8:
1328 save_for_reselection(hostdata, SCp, Ent_Disconnect8 + hostdata->pScript);
1329 break;
1330 case Ent_Finish1:
1331 case Ent_Finish2:
1332 process_script_interrupt(A_GOOD_STATUS_AFTER_STATUS, dsp, SCp, host, hostdata);
1333 break;
1334
1335 default:
1336 slot->state = NCR_700_SLOT_QUEUED;
1337 break;
1338 }
1339 }
1340 hostdata->state = NCR_700_HOST_BUSY;
1341 hostdata->cmd = NULL;
1342 /* clear any stale simple tag message */
1343 hostdata->msgin[1] = 0;
d3fa72e4 1344 dma_cache_sync(hostdata->dev, hostdata->msgin, MSG_ARRAY_SIZE,
1da177e4
LT
1345 DMA_BIDIRECTIONAL);
1346
1347 if(id == 0xff) {
1348 /* Selected as target, Ignore */
1349 resume_offset = hostdata->pScript + Ent_SelectedAsTarget;
1350 } else if(hostdata->tag_negotiated & (1<<id)) {
1351 resume_offset = hostdata->pScript + Ent_GetReselectionWithTag;
1352 } else {
1353 resume_offset = hostdata->pScript + Ent_GetReselectionData;
1354 }
1355 return resume_offset;
1356}
1357
1358static inline void
1359NCR_700_clear_fifo(struct Scsi_Host *host) {
1360 const struct NCR_700_Host_Parameters *hostdata
1361 = (struct NCR_700_Host_Parameters *)host->hostdata[0];
1362 if(hostdata->chip710) {
1363 NCR_700_writeb(CLR_FIFO_710, host, CTEST8_REG);
1364 } else {
1365 NCR_700_writeb(CLR_FIFO, host, DFIFO_REG);
1366 }
1367}
1368
1369static inline void
1370NCR_700_flush_fifo(struct Scsi_Host *host) {
1371 const struct NCR_700_Host_Parameters *hostdata
1372 = (struct NCR_700_Host_Parameters *)host->hostdata[0];
1373 if(hostdata->chip710) {
1374 NCR_700_writeb(FLUSH_DMA_FIFO_710, host, CTEST8_REG);
1375 udelay(10);
1376 NCR_700_writeb(0, host, CTEST8_REG);
1377 } else {
1378 NCR_700_writeb(FLUSH_DMA_FIFO, host, DFIFO_REG);
1379 udelay(10);
1380 NCR_700_writeb(0, host, DFIFO_REG);
1381 }
1382}
1383
1384
1385/* The queue lock with interrupts disabled must be held on entry to
1386 * this function */
1387STATIC int
1388NCR_700_start_command(struct scsi_cmnd *SCp)
1389{
1390 struct NCR_700_command_slot *slot =
1391 (struct NCR_700_command_slot *)SCp->host_scribble;
1392 struct NCR_700_Host_Parameters *hostdata =
1393 (struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
1394 __u16 count = 1; /* for IDENTIFY message */
1395
1396 if(hostdata->state != NCR_700_HOST_FREE) {
1397 /* keep this inside the lock to close the race window where
1398 * the running command finishes on another CPU while we don't
1399 * change the state to queued on this one */
1400 slot->state = NCR_700_SLOT_QUEUED;
1401
1402 DEBUG(("scsi%d: host busy, queueing command %p, slot %p\n",
1403 SCp->device->host->host_no, slot->cmnd, slot));
1404 return 0;
1405 }
1406 hostdata->state = NCR_700_HOST_BUSY;
1407 hostdata->cmd = SCp;
1408 slot->state = NCR_700_SLOT_BUSY;
1409 /* keep interrupts disabled until we have the command correctly
1410 * set up so we cannot take a selection interrupt */
1411
67d59dfd
JB
1412 hostdata->msgout[0] = NCR_700_identify((SCp->cmnd[0] != REQUEST_SENSE &&
1413 slot->flags != NCR_700_FLAG_AUTOSENSE),
1da177e4
LT
1414 SCp->device->lun);
1415 /* for INQUIRY or REQUEST_SENSE commands, we cannot be sure
1416 * if the negotiated transfer parameters still hold, so
1417 * always renegotiate them */
67d59dfd
JB
1418 if(SCp->cmnd[0] == INQUIRY || SCp->cmnd[0] == REQUEST_SENSE ||
1419 slot->flags == NCR_700_FLAG_AUTOSENSE) {
1da177e4
LT
1420 NCR_700_clear_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC);
1421 }
1422
1423 /* REQUEST_SENSE is asking for contingent I_T_L(_Q) status.
1424 * If a contingent allegiance condition exists, the device
1425 * will refuse all tags, so send the request sense as untagged
1426 * */
422c0d61 1427 if((hostdata->tag_negotiated & (1<<scmd_id(SCp)))
67d59dfd
JB
1428 && (slot->tag != SCSI_NO_TAG && SCp->cmnd[0] != REQUEST_SENSE &&
1429 slot->flags != NCR_700_FLAG_AUTOSENSE)) {
1da177e4
LT
1430 count += scsi_populate_tag_msg(SCp, &hostdata->msgout[count]);
1431 }
1432
1433 if(hostdata->fast &&
1434 NCR_700_is_flag_clear(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC)) {
6ea3c0b2
MW
1435 count += spi_populate_sync_msg(&hostdata->msgout[count],
1436 spi_period(SCp->device->sdev_target),
1437 spi_offset(SCp->device->sdev_target));
1da177e4
LT
1438 NCR_700_set_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
1439 }
1440
d3fa72e4 1441 script_patch_16(hostdata->dev, hostdata->script, MessageCount, count);
1da177e4
LT
1442
1443
d3fa72e4 1444 script_patch_ID(hostdata->dev, hostdata->script,
422c0d61 1445 Device_ID, 1<<scmd_id(SCp));
1da177e4 1446
d3fa72e4 1447 script_patch_32_abs(hostdata->dev, hostdata->script, CommandAddress,
1da177e4 1448 slot->pCmd);
d3fa72e4
RB
1449 script_patch_16(hostdata->dev, hostdata->script, CommandCount,
1450 SCp->cmd_len);
1da177e4
LT
1451 /* finally plumb the beginning of the SG list into the script
1452 * */
d3fa72e4
RB
1453 script_patch_32_abs(hostdata->dev, hostdata->script,
1454 SGScriptStartAddress, to32bit(&slot->pSG[0].ins));
1da177e4
LT
1455 NCR_700_clear_fifo(SCp->device->host);
1456
1457 if(slot->resume_offset == 0)
1458 slot->resume_offset = hostdata->pScript;
1459 /* now perform all the writebacks and invalidates */
d3fa72e4
RB
1460 dma_cache_sync(hostdata->dev, hostdata->msgout, count, DMA_TO_DEVICE);
1461 dma_cache_sync(hostdata->dev, hostdata->msgin, MSG_ARRAY_SIZE,
1da177e4 1462 DMA_FROM_DEVICE);
d3fa72e4
RB
1463 dma_cache_sync(hostdata->dev, SCp->cmnd, SCp->cmd_len, DMA_TO_DEVICE);
1464 dma_cache_sync(hostdata->dev, hostdata->status, 1, DMA_FROM_DEVICE);
1da177e4
LT
1465
1466 /* set the synchronous period/offset */
1467 NCR_700_writeb(NCR_700_get_SXFER(SCp->device),
1468 SCp->device->host, SXFER_REG);
1469 NCR_700_writel(slot->temp, SCp->device->host, TEMP_REG);
1470 NCR_700_writel(slot->resume_offset, SCp->device->host, DSP_REG);
1471
1472 return 1;
1473}
1474
1475irqreturn_t
7d12e780 1476NCR_700_intr(int irq, void *dev_id)
1da177e4
LT
1477{
1478 struct Scsi_Host *host = (struct Scsi_Host *)dev_id;
1479 struct NCR_700_Host_Parameters *hostdata =
1480 (struct NCR_700_Host_Parameters *)host->hostdata[0];
1481 __u8 istat;
1482 __u32 resume_offset = 0;
1483 __u8 pun = 0xff, lun = 0xff;
1484 unsigned long flags;
1485 int handled = 0;
1486
1487 /* Use the host lock to serialise acess to the 53c700
1488 * hardware. Note: In future, we may need to take the queue
1489 * lock to enter the done routines. When that happens, we
1490 * need to ensure that for this driver, the host lock and the
1491 * queue lock point to the same thing. */
1492 spin_lock_irqsave(host->host_lock, flags);
1493 if((istat = NCR_700_readb(host, ISTAT_REG))
1494 & (SCSI_INT_PENDING | DMA_INT_PENDING)) {
1495 __u32 dsps;
1496 __u8 sstat0 = 0, dstat = 0;
1497 __u32 dsp;
1498 struct scsi_cmnd *SCp = hostdata->cmd;
1499 enum NCR_700_Host_State state;
1500
1501 handled = 1;
1502 state = hostdata->state;
1503 SCp = hostdata->cmd;
1504
1505 if(istat & SCSI_INT_PENDING) {
1506 udelay(10);
1507
1508 sstat0 = NCR_700_readb(host, SSTAT0_REG);
1509 }
1510
1511 if(istat & DMA_INT_PENDING) {
1512 udelay(10);
1513
1514 dstat = NCR_700_readb(host, DSTAT_REG);
1515 }
1516
1517 dsps = NCR_700_readl(host, DSPS_REG);
1518 dsp = NCR_700_readl(host, DSP_REG);
1519
1520 DEBUG(("scsi%d: istat %02x sstat0 %02x dstat %02x dsp %04x[%08x] dsps 0x%x\n",
1521 host->host_no, istat, sstat0, dstat,
1522 (dsp - (__u32)(hostdata->pScript))/4,
1523 dsp, dsps));
1524
1525 if(SCp != NULL) {
1526 pun = SCp->device->id;
1527 lun = SCp->device->lun;
1528 }
1529
1530 if(sstat0 & SCSI_RESET_DETECTED) {
1531 struct scsi_device *SDp;
1532 int i;
1533
1534 hostdata->state = NCR_700_HOST_BUSY;
1535
1536 printk(KERN_ERR "scsi%d: Bus Reset detected, executing command %p, slot %p, dsp %08x[%04x]\n",
1537 host->host_no, SCp, SCp == NULL ? NULL : SCp->host_scribble, dsp, dsp - hostdata->pScript);
1538
1539 scsi_report_bus_reset(host, 0);
1540
1541 /* clear all the negotiated parameters */
1542 __shost_for_each_device(SDp, host)
0f13fc09 1543 NCR_700_clear_flag(SDp, ~0);
1da177e4
LT
1544
1545 /* clear all the slots and their pending commands */
1546 for(i = 0; i < NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
1547 struct scsi_cmnd *SCp;
1548 struct NCR_700_command_slot *slot =
1549 &hostdata->slots[i];
1550
1551 if(slot->state == NCR_700_SLOT_FREE)
1552 continue;
1553
1554 SCp = slot->cmnd;
1555 printk(KERN_ERR " failing command because of reset, slot %p, cmnd %p\n",
1556 slot, SCp);
1557 free_slot(slot, hostdata);
1558 SCp->host_scribble = NULL;
1559 NCR_700_set_depth(SCp->device, 0);
1560 /* NOTE: deadlock potential here: we
1561 * rely on mid-layer guarantees that
1562 * scsi_done won't try to issue the
1563 * command again otherwise we'll
1564 * deadlock on the
1565 * hostdata->state_lock */
1566 SCp->result = DID_RESET << 16;
1567 SCp->scsi_done(SCp);
1568 }
1569 mdelay(25);
1570 NCR_700_chip_setup(host);
1571
1572 hostdata->state = NCR_700_HOST_FREE;
1573 hostdata->cmd = NULL;
1574 /* signal back if this was an eh induced reset */
1575 if(hostdata->eh_complete != NULL)
1576 complete(hostdata->eh_complete);
1577 goto out_unlock;
1578 } else if(sstat0 & SELECTION_TIMEOUT) {
1579 DEBUG(("scsi%d: (%d:%d) selection timeout\n",
1580 host->host_no, pun, lun));
1581 NCR_700_scsi_done(hostdata, SCp, DID_NO_CONNECT<<16);
1582 } else if(sstat0 & PHASE_MISMATCH) {
1583 struct NCR_700_command_slot *slot = (SCp == NULL) ? NULL :
1584 (struct NCR_700_command_slot *)SCp->host_scribble;
1585
1586 if(dsp == Ent_SendMessage + 8 + hostdata->pScript) {
1587 /* It wants to reply to some part of
1588 * our message */
1589#ifdef NCR_700_DEBUG
1590 __u32 temp = NCR_700_readl(host, TEMP_REG);
1591 int count = (hostdata->script[Ent_SendMessage/4] & 0xffffff) - ((NCR_700_readl(host, DBC_REG) & 0xffffff) + NCR_700_data_residual(host));
1592 printk("scsi%d (%d:%d) PHASE MISMATCH IN SEND MESSAGE %d remain, return %p[%04x], phase %s\n", host->host_no, pun, lun, count, (void *)temp, temp - hostdata->pScript, sbcl_to_string(NCR_700_readb(host, SBCL_REG)));
1593#endif
1594 resume_offset = hostdata->pScript + Ent_SendMessagePhaseMismatch;
1595 } else if(dsp >= to32bit(&slot->pSG[0].ins) &&
1596 dsp <= to32bit(&slot->pSG[NCR_700_SG_SEGMENTS].ins)) {
1597 int data_transfer = NCR_700_readl(host, DBC_REG) & 0xffffff;
1598 int SGcount = (dsp - to32bit(&slot->pSG[0].ins))/sizeof(struct NCR_700_SG_List);
1599 int residual = NCR_700_data_residual(host);
1600 int i;
1601#ifdef NCR_700_DEBUG
1602 __u32 naddr = NCR_700_readl(host, DNAD_REG);
1603
1604 printk("scsi%d: (%d:%d) Expected phase mismatch in slot->SG[%d], transferred 0x%x\n",
1605 host->host_no, pun, lun,
1606 SGcount, data_transfer);
1607 scsi_print_command(SCp);
1608 if(residual) {
1609 printk("scsi%d: (%d:%d) Expected phase mismatch in slot->SG[%d], transferred 0x%x, residual %d\n",
1610 host->host_no, pun, lun,
1611 SGcount, data_transfer, residual);
1612 }
1613#endif
1614 data_transfer += residual;
1615
1616 if(data_transfer != 0) {
1617 int count;
1618 __u32 pAddr;
1619
1620 SGcount--;
1621
1622 count = (bS_to_cpu(slot->SG[SGcount].ins) & 0x00ffffff);
1623 DEBUG(("DATA TRANSFER MISMATCH, count = %d, transferred %d\n", count, count-data_transfer));
1624 slot->SG[SGcount].ins &= bS_to_host(0xff000000);
1625 slot->SG[SGcount].ins |= bS_to_host(data_transfer);
1626 pAddr = bS_to_cpu(slot->SG[SGcount].pAddr);
1627 pAddr += (count - data_transfer);
1628#ifdef NCR_700_DEBUG
1629 if(pAddr != naddr) {
1630 printk("scsi%d (%d:%d) transfer mismatch pAddr=%lx, naddr=%lx, data_transfer=%d, residual=%d\n", host->host_no, pun, lun, (unsigned long)pAddr, (unsigned long)naddr, data_transfer, residual);
1631 }
1632#endif
1633 slot->SG[SGcount].pAddr = bS_to_host(pAddr);
1634 }
1635 /* set the executed moves to nops */
1636 for(i=0; i<SGcount; i++) {
1637 slot->SG[i].ins = bS_to_host(SCRIPT_NOP);
1638 slot->SG[i].pAddr = 0;
1639 }
d3fa72e4 1640 dma_cache_sync(hostdata->dev, slot->SG, sizeof(slot->SG), DMA_TO_DEVICE);
1da177e4
LT
1641 /* and pretend we disconnected after
1642 * the command phase */
1643 resume_offset = hostdata->pScript + Ent_MsgInDuringData;
1644 /* make sure all the data is flushed */
1645 NCR_700_flush_fifo(host);
1646 } else {
1647 __u8 sbcl = NCR_700_readb(host, SBCL_REG);
1648 printk(KERN_ERR "scsi%d: (%d:%d) phase mismatch at %04x, phase %s\n",
1649 host->host_no, pun, lun, dsp - hostdata->pScript, sbcl_to_string(sbcl));
1650 NCR_700_internal_bus_reset(host);
1651 }
1652
1653 } else if(sstat0 & SCSI_GROSS_ERROR) {
1654 printk(KERN_ERR "scsi%d: (%d:%d) GROSS ERROR\n",
1655 host->host_no, pun, lun);
1656 NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1657 } else if(sstat0 & PARITY_ERROR) {
1658 printk(KERN_ERR "scsi%d: (%d:%d) PARITY ERROR\n",
1659 host->host_no, pun, lun);
1660 NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1661 } else if(dstat & SCRIPT_INT_RECEIVED) {
1662 DEBUG(("scsi%d: (%d:%d) ====>SCRIPT INTERRUPT<====\n",
1663 host->host_no, pun, lun));
1664 resume_offset = process_script_interrupt(dsps, dsp, SCp, host, hostdata);
1665 } else if(dstat & (ILGL_INST_DETECTED)) {
1666 printk(KERN_ERR "scsi%d: (%d:%d) Illegal Instruction detected at 0x%08x[0x%x]!!!\n"
1667 " Please email James.Bottomley@HansenPartnership.com with the details\n",
1668 host->host_no, pun, lun,
1669 dsp, dsp - hostdata->pScript);
1670 NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1671 } else if(dstat & (WATCH_DOG_INTERRUPT|ABORTED)) {
1672 printk(KERN_ERR "scsi%d: (%d:%d) serious DMA problem, dstat=%02x\n",
1673 host->host_no, pun, lun, dstat);
1674 NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1675 }
1676
1677
1678 /* NOTE: selection interrupt processing MUST occur
1679 * after script interrupt processing to correctly cope
1680 * with the case where we process a disconnect and
1681 * then get reselected before we process the
1682 * disconnection */
1683 if(sstat0 & SELECTED) {
1684 /* FIXME: It currently takes at least FOUR
1685 * interrupts to complete a command that
1686 * disconnects: one for the disconnect, one
1687 * for the reselection, one to get the
1688 * reselection data and one to complete the
1689 * command. If we guess the reselected
1690 * command here and prepare it, we only need
1691 * to get a reselection data interrupt if we
1692 * guessed wrongly. Since the interrupt
1693 * overhead is much greater than the command
1694 * setup, this would be an efficient
1695 * optimisation particularly as we probably
1696 * only have one outstanding command on a
1697 * target most of the time */
1698
1699 resume_offset = process_selection(host, dsp);
1700
1701 }
1702
1703 }
1704
1705 if(resume_offset) {
1706 if(hostdata->state != NCR_700_HOST_BUSY) {
1707 printk(KERN_ERR "scsi%d: Driver error: resume at 0x%08x [0x%04x] with non busy host!\n",
1708 host->host_no, resume_offset, resume_offset - hostdata->pScript);
1709 hostdata->state = NCR_700_HOST_BUSY;
1710 }
1711
1712 DEBUG(("Attempting to resume at %x\n", resume_offset));
1713 NCR_700_clear_fifo(host);
1714 NCR_700_writel(resume_offset, host, DSP_REG);
1715 }
1716 /* There is probably a technical no-no about this: If we're a
1717 * shared interrupt and we got this interrupt because the
1718 * other device needs servicing not us, we're still going to
1719 * check our queued commands here---of course, there shouldn't
1720 * be any outstanding.... */
1721 if(hostdata->state == NCR_700_HOST_FREE) {
1722 int i;
1723
1724 for(i = 0; i < NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
1725 /* fairness: always run the queue from the last
1726 * position we left off */
1727 int j = (i + hostdata->saved_slot_position)
1728 % NCR_700_COMMAND_SLOTS_PER_HOST;
1729
1730 if(hostdata->slots[j].state != NCR_700_SLOT_QUEUED)
1731 continue;
1732 if(NCR_700_start_command(hostdata->slots[j].cmnd)) {
1733 DEBUG(("scsi%d: Issuing saved command slot %p, cmd %p\t\n",
1734 host->host_no, &hostdata->slots[j],
1735 hostdata->slots[j].cmnd));
1736 hostdata->saved_slot_position = j + 1;
1737 }
1738
1739 break;
1740 }
1741 }
1742 out_unlock:
1743 spin_unlock_irqrestore(host->host_lock, flags);
1744 return IRQ_RETVAL(handled);
1745}
1746
1747STATIC int
1748NCR_700_queuecommand(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *))
1749{
1750 struct NCR_700_Host_Parameters *hostdata =
1751 (struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
1752 __u32 move_ins;
1753 enum dma_data_direction direction;
1754 struct NCR_700_command_slot *slot;
1755
1756 if(hostdata->command_slot_count >= NCR_700_COMMAND_SLOTS_PER_HOST) {
1757 /* We're over our allocation, this should never happen
1758 * since we report the max allocation to the mid layer */
1759 printk(KERN_WARNING "scsi%d: Command depth has gone over queue depth\n", SCp->device->host->host_no);
1760 return 1;
1761 }
1762 /* check for untagged commands. We cannot have any outstanding
1763 * commands if we accept them. Commands could be untagged because:
1764 *
1765 * - The tag negotiated bitmap is clear
1766 * - The blk layer sent and untagged command
1767 */
1768 if(NCR_700_get_depth(SCp->device) != 0
017560fc 1769 && (!(hostdata->tag_negotiated & (1<<scmd_id(SCp)))
1da177e4 1770 || !blk_rq_tagged(SCp->request))) {
017560fc
JG
1771 CDEBUG(KERN_ERR, SCp, "has non zero depth %d\n",
1772 NCR_700_get_depth(SCp->device));
1da177e4
LT
1773 return SCSI_MLQUEUE_DEVICE_BUSY;
1774 }
1775 if(NCR_700_get_depth(SCp->device) >= SCp->device->queue_depth) {
017560fc
JG
1776 CDEBUG(KERN_ERR, SCp, "has max tag depth %d\n",
1777 NCR_700_get_depth(SCp->device));
1da177e4
LT
1778 return SCSI_MLQUEUE_DEVICE_BUSY;
1779 }
1780 NCR_700_set_depth(SCp->device, NCR_700_get_depth(SCp->device) + 1);
1781
1782 /* begin the command here */
1783 /* no need to check for NULL, test for command_slot_count above
1784 * ensures a slot is free */
1785 slot = find_empty_slot(hostdata);
1786
1787 slot->cmnd = SCp;
1788
1789 SCp->scsi_done = done;
1790 SCp->host_scribble = (unsigned char *)slot;
1791 SCp->SCp.ptr = NULL;
1792 SCp->SCp.buffer = NULL;
1793
1794#ifdef NCR_700_DEBUG
1795 printk("53c700: scsi%d, command ", SCp->device->host->host_no);
1796 scsi_print_command(SCp);
1797#endif
1798 if(blk_rq_tagged(SCp->request)
017560fc 1799 && (hostdata->tag_negotiated &(1<<scmd_id(SCp))) == 0
1da177e4 1800 && NCR_700_get_tag_neg_state(SCp->device) == NCR_700_START_TAG_NEGOTIATION) {
017560fc
JG
1801 scmd_printk(KERN_ERR, SCp, "Enabling Tag Command Queuing\n");
1802 hostdata->tag_negotiated |= (1<<scmd_id(SCp));
1da177e4
LT
1803 NCR_700_set_tag_neg_state(SCp->device, NCR_700_DURING_TAG_NEGOTIATION);
1804 }
1805
1806 /* here we may have to process an untagged command. The gate
1807 * above ensures that this will be the only one outstanding,
1808 * so clear the tag negotiated bit.
1809 *
1810 * FIXME: This will royally screw up on multiple LUN devices
1811 * */
1812 if(!blk_rq_tagged(SCp->request)
017560fc
JG
1813 && (hostdata->tag_negotiated &(1<<scmd_id(SCp)))) {
1814 scmd_printk(KERN_INFO, SCp, "Disabling Tag Command Queuing\n");
1815 hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
1da177e4
LT
1816 }
1817
017560fc 1818 if((hostdata->tag_negotiated &(1<<scmd_id(SCp)))
1da177e4
LT
1819 && scsi_get_tag_type(SCp->device)) {
1820 slot->tag = SCp->request->tag;
017560fc
JG
1821 CDEBUG(KERN_DEBUG, SCp, "sending out tag %d, slot %p\n",
1822 slot->tag, slot);
1da177e4
LT
1823 } else {
1824 slot->tag = SCSI_NO_TAG;
1825 /* must populate current_cmnd for scsi_find_tag to work */
1826 SCp->device->current_cmnd = SCp;
1827 }
1828 /* sanity check: some of the commands generated by the mid-layer
1829 * have an eccentric idea of their sc_data_direction */
1830 if(!SCp->use_sg && !SCp->request_bufflen
1831 && SCp->sc_data_direction != DMA_NONE) {
1832#ifdef NCR_700_DEBUG
1833 printk("53c700: Command");
1834 scsi_print_command(SCp);
1835 printk("Has wrong data direction %d\n", SCp->sc_data_direction);
1836#endif
1837 SCp->sc_data_direction = DMA_NONE;
1838 }
1839
1840 switch (SCp->cmnd[0]) {
1841 case REQUEST_SENSE:
1842 /* clear the internal sense magic */
1843 SCp->cmnd[6] = 0;
1844 /* fall through */
1845 default:
1846 /* OK, get it from the command */
1847 switch(SCp->sc_data_direction) {
1848 case DMA_BIDIRECTIONAL:
1849 default:
1850 printk(KERN_ERR "53c700: Unknown command for data direction ");
1851 scsi_print_command(SCp);
1852
1853 move_ins = 0;
1854 break;
1855 case DMA_NONE:
1856 move_ins = 0;
1857 break;
1858 case DMA_FROM_DEVICE:
1859 move_ins = SCRIPT_MOVE_DATA_IN;
1860 break;
1861 case DMA_TO_DEVICE:
1862 move_ins = SCRIPT_MOVE_DATA_OUT;
1863 break;
1864 }
1865 }
1866
1867 /* now build the scatter gather list */
1868 direction = SCp->sc_data_direction;
1869 if(move_ins != 0) {
1870 int i;
1871 int sg_count;
1872 dma_addr_t vPtr = 0;
1873 __u32 count = 0;
1874
1875 if(SCp->use_sg) {
67d59dfd
JB
1876 sg_count = dma_map_sg(hostdata->dev,
1877 SCp->request_buffer, SCp->use_sg,
1878 direction);
1da177e4
LT
1879 } else {
1880 vPtr = dma_map_single(hostdata->dev,
1881 SCp->request_buffer,
1882 SCp->request_bufflen,
1883 direction);
1884 count = SCp->request_bufflen;
1885 slot->dma_handle = vPtr;
1886 sg_count = 1;
1887 }
1888
1889
1890 for(i = 0; i < sg_count; i++) {
1891
1892 if(SCp->use_sg) {
67d59dfd 1893 struct scatterlist *sg = SCp->request_buffer;
1da177e4
LT
1894
1895 vPtr = sg_dma_address(&sg[i]);
1896 count = sg_dma_len(&sg[i]);
1897 }
1898
1899 slot->SG[i].ins = bS_to_host(move_ins | count);
1900 DEBUG((" scatter block %d: move %d[%08x] from 0x%lx\n",
1901 i, count, slot->SG[i].ins, (unsigned long)vPtr));
1902 slot->SG[i].pAddr = bS_to_host(vPtr);
1903 }
1904 slot->SG[i].ins = bS_to_host(SCRIPT_RETURN);
1905 slot->SG[i].pAddr = 0;
d3fa72e4 1906 dma_cache_sync(hostdata->dev, slot->SG, sizeof(slot->SG), DMA_TO_DEVICE);
1da177e4 1907 DEBUG((" SETTING %08lx to %x\n",
d3fa72e4 1908 (&slot->pSG[i].ins),
1da177e4
LT
1909 slot->SG[i].ins));
1910 }
1911 slot->resume_offset = 0;
1912 slot->pCmd = dma_map_single(hostdata->dev, SCp->cmnd,
1913 sizeof(SCp->cmnd), DMA_TO_DEVICE);
1914 NCR_700_start_command(SCp);
1915 return 0;
1916}
1917
1918STATIC int
1919NCR_700_abort(struct scsi_cmnd * SCp)
1920{
1921 struct NCR_700_command_slot *slot;
1922
017560fc
JG
1923 scmd_printk(KERN_INFO, SCp,
1924 "New error handler wants to abort command\n\t");
1da177e4
LT
1925 scsi_print_command(SCp);
1926
1927 slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1928
1929 if(slot == NULL)
1930 /* no outstanding command to abort */
1931 return SUCCESS;
1932 if(SCp->cmnd[0] == TEST_UNIT_READY) {
1933 /* FIXME: This is because of a problem in the new
1934 * error handler. When it is in error recovery, it
1935 * will send a TUR to a device it thinks may still be
1936 * showing a problem. If the TUR isn't responded to,
1937 * it will abort it and mark the device off line.
1938 * Unfortunately, it does no other error recovery, so
1939 * this would leave us with an outstanding command
1940 * occupying a slot. Rather than allow this to
1941 * happen, we issue a bus reset to force all
1942 * outstanding commands to terminate here. */
1943 NCR_700_internal_bus_reset(SCp->device->host);
1944 /* still drop through and return failed */
1945 }
1946 return FAILED;
1947
1948}
1949
1950STATIC int
1951NCR_700_bus_reset(struct scsi_cmnd * SCp)
1952{
6e9a4738 1953 DECLARE_COMPLETION_ONSTACK(complete);
1da177e4
LT
1954 struct NCR_700_Host_Parameters *hostdata =
1955 (struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
1956
017560fc
JG
1957 scmd_printk(KERN_INFO, SCp,
1958 "New error handler wants BUS reset, cmd %p\n\t", SCp);
1da177e4 1959 scsi_print_command(SCp);
68b3aa7c 1960
1da177e4
LT
1961 /* In theory, eh_complete should always be null because the
1962 * eh is single threaded, but just in case we're handling a
1963 * reset via sg or something */
68b3aa7c
JG
1964 spin_lock_irq(SCp->device->host->host_lock);
1965 while (hostdata->eh_complete != NULL) {
1da177e4
LT
1966 spin_unlock_irq(SCp->device->host->host_lock);
1967 msleep_interruptible(100);
1968 spin_lock_irq(SCp->device->host->host_lock);
1969 }
68b3aa7c 1970
1da177e4
LT
1971 hostdata->eh_complete = &complete;
1972 NCR_700_internal_bus_reset(SCp->device->host);
68b3aa7c 1973
1da177e4
LT
1974 spin_unlock_irq(SCp->device->host->host_lock);
1975 wait_for_completion(&complete);
1976 spin_lock_irq(SCp->device->host->host_lock);
68b3aa7c 1977
1da177e4
LT
1978 hostdata->eh_complete = NULL;
1979 /* Revalidate the transport parameters of the failing device */
1980 if(hostdata->fast)
1981 spi_schedule_dv_device(SCp->device);
68b3aa7c
JG
1982
1983 spin_unlock_irq(SCp->device->host->host_lock);
1da177e4
LT
1984 return SUCCESS;
1985}
1986
1da177e4
LT
1987STATIC int
1988NCR_700_host_reset(struct scsi_cmnd * SCp)
1989{
017560fc 1990 scmd_printk(KERN_INFO, SCp, "New error handler wants HOST reset\n\t");
1da177e4
LT
1991 scsi_print_command(SCp);
1992
df0ae249
JG
1993 spin_lock_irq(SCp->device->host->host_lock);
1994
1da177e4
LT
1995 NCR_700_internal_bus_reset(SCp->device->host);
1996 NCR_700_chip_reset(SCp->device->host);
df0ae249
JG
1997
1998 spin_unlock_irq(SCp->device->host->host_lock);
1999
1da177e4
LT
2000 return SUCCESS;
2001}
2002
2003STATIC void
2004NCR_700_set_period(struct scsi_target *STp, int period)
2005{
2006 struct Scsi_Host *SHp = dev_to_shost(STp->dev.parent);
2007 struct NCR_700_Host_Parameters *hostdata =
2008 (struct NCR_700_Host_Parameters *)SHp->hostdata[0];
2009
2010 if(!hostdata->fast)
2011 return;
2012
2013 if(period < hostdata->min_period)
2014 period = hostdata->min_period;
2015
2016 spi_period(STp) = period;
2017 spi_flags(STp) &= ~(NCR_700_DEV_NEGOTIATED_SYNC |
2018 NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
2019 spi_flags(STp) |= NCR_700_DEV_PRINT_SYNC_NEGOTIATION;
2020}
2021
2022STATIC void
2023NCR_700_set_offset(struct scsi_target *STp, int offset)
2024{
2025 struct Scsi_Host *SHp = dev_to_shost(STp->dev.parent);
2026 struct NCR_700_Host_Parameters *hostdata =
2027 (struct NCR_700_Host_Parameters *)SHp->hostdata[0];
2028 int max_offset = hostdata->chip710
2029 ? NCR_710_MAX_OFFSET : NCR_700_MAX_OFFSET;
2030
2031 if(!hostdata->fast)
2032 return;
2033
2034 if(offset > max_offset)
2035 offset = max_offset;
2036
2037 /* if we're currently async, make sure the period is reasonable */
2038 if(spi_offset(STp) == 0 && (spi_period(STp) < hostdata->min_period ||
2039 spi_period(STp) > 0xff))
2040 spi_period(STp) = hostdata->min_period;
2041
2042 spi_offset(STp) = offset;
2043 spi_flags(STp) &= ~(NCR_700_DEV_NEGOTIATED_SYNC |
2044 NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
2045 spi_flags(STp) |= NCR_700_DEV_PRINT_SYNC_NEGOTIATION;
2046}
2047
0f13fc09
JB
2048STATIC int
2049NCR_700_slave_alloc(struct scsi_device *SDp)
2050{
2051 SDp->hostdata = kzalloc(sizeof(struct NCR_700_Device_Parameters),
2052 GFP_KERNEL);
1da177e4 2053
0f13fc09
JB
2054 if (!SDp->hostdata)
2055 return -ENOMEM;
2056
2057 return 0;
2058}
1da177e4
LT
2059
2060STATIC int
2061NCR_700_slave_configure(struct scsi_device *SDp)
2062{
2063 struct NCR_700_Host_Parameters *hostdata =
2064 (struct NCR_700_Host_Parameters *)SDp->host->hostdata[0];
2065
2066 /* to do here: allocate memory; build a queue_full list */
2067 if(SDp->tagged_supported) {
2068 scsi_set_tag_type(SDp, MSG_ORDERED_TAG);
2069 scsi_activate_tcq(SDp, NCR_700_DEFAULT_TAGS);
2070 NCR_700_set_tag_neg_state(SDp, NCR_700_START_TAG_NEGOTIATION);
2071 } else {
2072 /* initialise to default depth */
2073 scsi_adjust_queue_depth(SDp, 0, SDp->host->cmd_per_lun);
2074 }
2075 if(hostdata->fast) {
2076 /* Find the correct offset and period via domain validation */
2077 if (!spi_initial_dv(SDp->sdev_target))
2078 spi_dv_device(SDp);
2079 } else {
2080 spi_offset(SDp->sdev_target) = 0;
2081 spi_period(SDp->sdev_target) = 0;
2082 }
2083 return 0;
2084}
2085
2086STATIC void
2087NCR_700_slave_destroy(struct scsi_device *SDp)
2088{
67d59dfd
JB
2089 kfree(SDp->hostdata);
2090 SDp->hostdata = NULL;
1da177e4
LT
2091}
2092
2093static int
2094NCR_700_change_queue_depth(struct scsi_device *SDp, int depth)
2095{
2096 if (depth > NCR_700_MAX_TAGS)
2097 depth = NCR_700_MAX_TAGS;
2098
2099 scsi_adjust_queue_depth(SDp, scsi_get_tag_type(SDp), depth);
2100 return depth;
2101}
2102
2103static int NCR_700_change_queue_type(struct scsi_device *SDp, int tag_type)
2104{
2105 int change_tag = ((tag_type ==0 && scsi_get_tag_type(SDp) != 0)
2106 || (tag_type != 0 && scsi_get_tag_type(SDp) == 0));
2107 struct NCR_700_Host_Parameters *hostdata =
2108 (struct NCR_700_Host_Parameters *)SDp->host->hostdata[0];
2109
2110 scsi_set_tag_type(SDp, tag_type);
2111
2112 /* We have a global (per target) flag to track whether TCQ is
2113 * enabled, so we'll be turning it off for the entire target here.
2114 * our tag algorithm will fail if we mix tagged and untagged commands,
2115 * so quiesce the device before doing this */
2116 if (change_tag)
2117 scsi_target_quiesce(SDp->sdev_target);
2118
2119 if (!tag_type) {
2120 /* shift back to the default unqueued number of commands
2121 * (the user can still raise this) */
2122 scsi_deactivate_tcq(SDp, SDp->host->cmd_per_lun);
422c0d61 2123 hostdata->tag_negotiated &= ~(1 << sdev_id(SDp));
1da177e4
LT
2124 } else {
2125 /* Here, we cleared the negotiation flag above, so this
2126 * will force the driver to renegotiate */
2127 scsi_activate_tcq(SDp, SDp->queue_depth);
2128 if (change_tag)
2129 NCR_700_set_tag_neg_state(SDp, NCR_700_START_TAG_NEGOTIATION);
2130 }
2131 if (change_tag)
2132 scsi_target_resume(SDp->sdev_target);
2133
2134 return tag_type;
2135}
2136
2137static ssize_t
10523b3b 2138NCR_700_show_active_tags(struct device *dev, struct device_attribute *attr, char *buf)
1da177e4
LT
2139{
2140 struct scsi_device *SDp = to_scsi_device(dev);
2141
2142 return snprintf(buf, 20, "%d\n", NCR_700_get_depth(SDp));
2143}
2144
2145static struct device_attribute NCR_700_active_tags_attr = {
2146 .attr = {
2147 .name = "active_tags",
2148 .mode = S_IRUGO,
2149 },
2150 .show = NCR_700_show_active_tags,
2151};
2152
2153STATIC struct device_attribute *NCR_700_dev_attrs[] = {
2154 &NCR_700_active_tags_attr,
2155 NULL,
2156};
2157
2158EXPORT_SYMBOL(NCR_700_detect);
2159EXPORT_SYMBOL(NCR_700_release);
2160EXPORT_SYMBOL(NCR_700_intr);
2161
2162static struct spi_function_template NCR_700_transport_functions = {
2163 .set_period = NCR_700_set_period,
2164 .show_period = 1,
2165 .set_offset = NCR_700_set_offset,
2166 .show_offset = 1,
2167};
2168
2169static int __init NCR_700_init(void)
2170{
2171 NCR_700_transport_template = spi_attach_transport(&NCR_700_transport_functions);
2172 if(!NCR_700_transport_template)
2173 return -ENODEV;
2174 return 0;
2175}
2176
2177static void __exit NCR_700_exit(void)
2178{
2179 spi_release_transport(NCR_700_transport_template);
2180}
2181
2182module_init(NCR_700_init);
2183module_exit(NCR_700_exit);
2184
This page took 0.758952 seconds and 5 git commands to generate.