2 * SuperTrak EX Series Storage Controller driver for Linux
4 * Copyright (C) 2005-2009 Promise Technology Inc.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 * Ed Lin <promise_linux@promise.com>
16 #include <linux/init.h>
17 #include <linux/errno.h>
18 #include <linux/kernel.h>
19 #include <linux/delay.h>
20 #include <linux/time.h>
21 #include <linux/pci.h>
22 #include <linux/blkdev.h>
23 #include <linux/interrupt.h>
24 #include <linux/types.h>
25 #include <linux/module.h>
26 #include <linux/spinlock.h>
29 #include <asm/byteorder.h>
30 #include <scsi/scsi.h>
31 #include <scsi/scsi_device.h>
32 #include <scsi/scsi_cmnd.h>
33 #include <scsi/scsi_host.h>
34 #include <scsi/scsi_tcq.h>
35 #include <scsi/scsi_dbg.h>
36 #include <scsi/scsi_eh.h>
38 #define DRV_NAME "stex"
39 #define ST_DRIVER_VERSION "4.6.0000.4"
40 #define ST_VER_MAJOR 4
41 #define ST_VER_MINOR 6
43 #define ST_BUILD_VER 4
46 /* MU register offset */
47 IMR0
= 0x10, /* MU_INBOUND_MESSAGE_REG0 */
48 IMR1
= 0x14, /* MU_INBOUND_MESSAGE_REG1 */
49 OMR0
= 0x18, /* MU_OUTBOUND_MESSAGE_REG0 */
50 OMR1
= 0x1c, /* MU_OUTBOUND_MESSAGE_REG1 */
51 IDBL
= 0x20, /* MU_INBOUND_DOORBELL */
52 IIS
= 0x24, /* MU_INBOUND_INTERRUPT_STATUS */
53 IIM
= 0x28, /* MU_INBOUND_INTERRUPT_MASK */
54 ODBL
= 0x2c, /* MU_OUTBOUND_DOORBELL */
55 OIS
= 0x30, /* MU_OUTBOUND_INTERRUPT_STATUS */
56 OIM
= 0x3c, /* MU_OUTBOUND_INTERRUPT_MASK */
66 /* MU register value */
67 MU_INBOUND_DOORBELL_HANDSHAKE
= (1 << 0),
68 MU_INBOUND_DOORBELL_REQHEADCHANGED
= (1 << 1),
69 MU_INBOUND_DOORBELL_STATUSTAILCHANGED
= (1 << 2),
70 MU_INBOUND_DOORBELL_HMUSTOPPED
= (1 << 3),
71 MU_INBOUND_DOORBELL_RESET
= (1 << 4),
73 MU_OUTBOUND_DOORBELL_HANDSHAKE
= (1 << 0),
74 MU_OUTBOUND_DOORBELL_REQUESTTAILCHANGED
= (1 << 1),
75 MU_OUTBOUND_DOORBELL_STATUSHEADCHANGED
= (1 << 2),
76 MU_OUTBOUND_DOORBELL_BUSCHANGE
= (1 << 3),
77 MU_OUTBOUND_DOORBELL_HASEVENT
= (1 << 4),
78 MU_OUTBOUND_DOORBELL_REQUEST_RESET
= (1 << 27),
81 MU_STATE_STARTING
= 1,
83 MU_STATE_RESETTING
= 3,
87 MU_HANDSHAKE_SIGNATURE
= 0x55aaaa55,
88 MU_HANDSHAKE_SIGNATURE_HALF
= 0x5a5a0000,
89 MU_HARD_RESET_WAIT
= 30000,
92 /* firmware returned values */
93 SRB_STATUS_SUCCESS
= 0x01,
94 SRB_STATUS_ERROR
= 0x04,
95 SRB_STATUS_BUSY
= 0x05,
96 SRB_STATUS_INVALID_REQUEST
= 0x06,
97 SRB_STATUS_SELECTION_TIMEOUT
= 0x0A,
101 TASK_ATTRIBUTE_SIMPLE
= 0x0,
102 TASK_ATTRIBUTE_HEADOFQUEUE
= 0x1,
103 TASK_ATTRIBUTE_ORDERED
= 0x2,
104 TASK_ATTRIBUTE_ACA
= 0x4,
106 SS_STS_NORMAL
= 0x80000000,
107 SS_STS_DONE
= 0x40000000,
108 SS_STS_HANDSHAKE
= 0x20000000,
110 SS_HEAD_HANDSHAKE
= 0x80,
112 SS_H2I_INT_RESET
= 0x100,
114 SS_I2H_REQUEST_RESET
= 0x2000,
116 SS_MU_OPERATIONAL
= 0x80000000,
118 STEX_CDB_LENGTH
= 16,
119 STATUS_VAR_LEN
= 128,
122 SG_CF_EOT
= 0x80, /* end of table */
123 SG_CF_64B
= 0x40, /* 64 bit item */
124 SG_CF_HOST
= 0x20, /* sg in host memory */
127 MSG_DATA_DIR_OUT
= 2,
135 PASSTHRU_REQ_TYPE
= 0x00000001,
136 PASSTHRU_REQ_NO_WAKEUP
= 0x00000100,
137 ST_INTERNAL_TIMEOUT
= 180,
142 /* vendor specific commands of Promise */
144 SINBAND_MGT_CMD
= 0xd9,
146 CONTROLLER_CMD
= 0xe1,
147 DEBUGGING_CMD
= 0xe2,
150 PASSTHRU_GET_ADAPTER
= 0x05,
151 PASSTHRU_GET_DRVVER
= 0x10,
153 CTLR_CONFIG_CMD
= 0x03,
154 CTLR_SHUTDOWN
= 0x0d,
156 CTLR_POWER_STATE_CHANGE
= 0x0e,
157 CTLR_POWER_SAVING
= 0x01,
159 PASSTHRU_SIGNATURE
= 0x4e415041,
160 MGT_CMD_SIGNATURE
= 0xba,
164 ST_ADDITIONAL_MEM
= 0x200000,
165 ST_ADDITIONAL_MEM_MIN
= 0x80000,
169 u8 ctrl
; /* SG_CF_xxx */
175 struct st_ss_sgitem
{
187 struct st_msg_header
{
195 struct handshake_frame
{
196 __le64 rb_phy
; /* request payload queue physical address */
197 __le16 req_sz
; /* size of each request payload */
198 __le16 req_cnt
; /* count of reqs the buffer can hold */
199 __le16 status_sz
; /* size of each status payload */
200 __le16 status_cnt
; /* count of status the buffer can hold */
201 __le64 hosttime
; /* seconds from Jan 1, 1970 (GMT) */
202 u8 partner_type
; /* who sends this frame */
204 __le32 partner_ver_major
;
205 __le32 partner_ver_minor
;
206 __le32 partner_ver_oem
;
207 __le32 partner_ver_build
;
208 __le32 extra_offset
; /* NEW */
209 __le32 extra_size
; /* NEW */
221 u8 payload_sz
; /* payload size in 4-byte, not used */
222 u8 cdb
[STEX_CDB_LENGTH
];
233 u8 payload_sz
; /* payload size in 4-byte */
234 u8 variable
[STATUS_VAR_LEN
];
249 struct ver_info drv_ver
;
250 struct ver_info bios_ver
;
281 struct scsi_cmnd
*cmd
;
284 unsigned int sense_bufflen
;
294 void __iomem
*mmio_base
; /* iomapped PCI memory space */
296 dma_addr_t dma_handle
;
299 struct Scsi_Host
*host
;
300 struct pci_dev
*pdev
;
302 struct req_msg
* (*alloc_rq
) (struct st_hba
*);
303 int (*map_sg
)(struct st_hba
*, struct req_msg
*, struct st_ccb
*);
304 void (*send
) (struct st_hba
*, struct req_msg
*, u16
);
311 struct status_msg
*status_buffer
;
312 void *copy_buffer
; /* temp buffer for driver-handled commands */
314 struct st_ccb
*wait_ccb
;
317 char work_q_name
[20];
318 struct workqueue_struct
*work_q
;
319 struct work_struct reset_work
;
320 wait_queue_head_t reset_waitq
;
321 unsigned int mu_status
;
322 unsigned int cardtype
;
331 struct st_card_info
{
332 struct req_msg
* (*alloc_rq
) (struct st_hba
*);
333 int (*map_sg
)(struct st_hba
*, struct req_msg
*, struct st_ccb
*);
334 void (*send
) (struct st_hba
*, struct req_msg
*, u16
);
336 unsigned int max_lun
;
337 unsigned int max_channel
;
344 module_param(msi
, int, 0);
345 MODULE_PARM_DESC(msi
, "Enable Message Signaled Interrupts(0=off, 1=on)");
347 static const char console_inq_page
[] =
349 0x03,0x00,0x03,0x03,0xFA,0x00,0x00,0x30,
350 0x50,0x72,0x6F,0x6D,0x69,0x73,0x65,0x20, /* "Promise " */
351 0x52,0x41,0x49,0x44,0x20,0x43,0x6F,0x6E, /* "RAID Con" */
352 0x73,0x6F,0x6C,0x65,0x20,0x20,0x20,0x20, /* "sole " */
353 0x31,0x2E,0x30,0x30,0x20,0x20,0x20,0x20, /* "1.00 " */
354 0x53,0x58,0x2F,0x52,0x53,0x41,0x46,0x2D, /* "SX/RSAF-" */
355 0x54,0x45,0x31,0x2E,0x30,0x30,0x20,0x20, /* "TE1.00 " */
356 0x0C,0x20,0x20,0x20,0x20,0x20,0x20,0x20
359 MODULE_AUTHOR("Ed Lin");
360 MODULE_DESCRIPTION("Promise Technology SuperTrak EX Controllers");
361 MODULE_LICENSE("GPL");
362 MODULE_VERSION(ST_DRIVER_VERSION
);
364 static void stex_gettime(__le64
*time
)
368 do_gettimeofday(&tv
);
369 *time
= cpu_to_le64(tv
.tv_sec
);
372 static struct status_msg
*stex_get_status(struct st_hba
*hba
)
374 struct status_msg
*status
= hba
->status_buffer
+ hba
->status_tail
;
377 hba
->status_tail
%= hba
->sts_count
+1;
382 static void stex_invalid_field(struct scsi_cmnd
*cmd
,
383 void (*done
)(struct scsi_cmnd
*))
385 cmd
->result
= (DRIVER_SENSE
<< 24) | SAM_STAT_CHECK_CONDITION
;
387 /* "Invalid field in cdb" */
388 scsi_build_sense_buffer(0, cmd
->sense_buffer
, ILLEGAL_REQUEST
, 0x24,
393 static struct req_msg
*stex_alloc_req(struct st_hba
*hba
)
395 struct req_msg
*req
= hba
->dma_mem
+ hba
->req_head
* hba
->rq_size
;
398 hba
->req_head
%= hba
->rq_count
+1;
403 static struct req_msg
*stex_ss_alloc_req(struct st_hba
*hba
)
405 return (struct req_msg
*)(hba
->dma_mem
+
406 hba
->req_head
* hba
->rq_size
+ sizeof(struct st_msg_header
));
409 static int stex_map_sg(struct st_hba
*hba
,
410 struct req_msg
*req
, struct st_ccb
*ccb
)
412 struct scsi_cmnd
*cmd
;
413 struct scatterlist
*sg
;
414 struct st_sgtable
*dst
;
415 struct st_sgitem
*table
;
419 nseg
= scsi_dma_map(cmd
);
422 dst
= (struct st_sgtable
*)req
->variable
;
424 ccb
->sg_count
= nseg
;
425 dst
->sg_count
= cpu_to_le16((u16
)nseg
);
426 dst
->max_sg_count
= cpu_to_le16(hba
->host
->sg_tablesize
);
427 dst
->sz_in_byte
= cpu_to_le32(scsi_bufflen(cmd
));
429 table
= (struct st_sgitem
*)(dst
+ 1);
430 scsi_for_each_sg(cmd
, sg
, nseg
, i
) {
431 table
[i
].count
= cpu_to_le32((u32
)sg_dma_len(sg
));
432 table
[i
].addr
= cpu_to_le64(sg_dma_address(sg
));
433 table
[i
].ctrl
= SG_CF_64B
| SG_CF_HOST
;
435 table
[--i
].ctrl
|= SG_CF_EOT
;
441 static int stex_ss_map_sg(struct st_hba
*hba
,
442 struct req_msg
*req
, struct st_ccb
*ccb
)
444 struct scsi_cmnd
*cmd
;
445 struct scatterlist
*sg
;
446 struct st_sgtable
*dst
;
447 struct st_ss_sgitem
*table
;
451 nseg
= scsi_dma_map(cmd
);
454 dst
= (struct st_sgtable
*)req
->variable
;
456 ccb
->sg_count
= nseg
;
457 dst
->sg_count
= cpu_to_le16((u16
)nseg
);
458 dst
->max_sg_count
= cpu_to_le16(hba
->host
->sg_tablesize
);
459 dst
->sz_in_byte
= cpu_to_le32(scsi_bufflen(cmd
));
461 table
= (struct st_ss_sgitem
*)(dst
+ 1);
462 scsi_for_each_sg(cmd
, sg
, nseg
, i
) {
463 table
[i
].count
= cpu_to_le32((u32
)sg_dma_len(sg
));
465 cpu_to_le32(sg_dma_address(sg
) & 0xffffffff);
467 cpu_to_le32((sg_dma_address(sg
) >> 16) >> 16);
474 static void stex_controller_info(struct st_hba
*hba
, struct st_ccb
*ccb
)
477 size_t count
= sizeof(struct st_frame
);
479 p
= hba
->copy_buffer
;
480 scsi_sg_copy_to_buffer(ccb
->cmd
, p
, count
);
481 memset(p
->base
, 0, sizeof(u32
)*6);
482 *(unsigned long *)(p
->base
) = pci_resource_start(hba
->pdev
, 0);
485 p
->drv_ver
.major
= ST_VER_MAJOR
;
486 p
->drv_ver
.minor
= ST_VER_MINOR
;
487 p
->drv_ver
.oem
= ST_OEM
;
488 p
->drv_ver
.build
= ST_BUILD_VER
;
490 p
->bus
= hba
->pdev
->bus
->number
;
491 p
->slot
= hba
->pdev
->devfn
;
493 p
->irq_vec
= hba
->pdev
->irq
;
494 p
->id
= hba
->pdev
->vendor
<< 16 | hba
->pdev
->device
;
496 hba
->pdev
->subsystem_vendor
<< 16 | hba
->pdev
->subsystem_device
;
498 scsi_sg_copy_from_buffer(ccb
->cmd
, p
, count
);
502 stex_send_cmd(struct st_hba
*hba
, struct req_msg
*req
, u16 tag
)
504 req
->tag
= cpu_to_le16(tag
);
506 hba
->ccb
[tag
].req
= req
;
509 writel(hba
->req_head
, hba
->mmio_base
+ IMR0
);
510 writel(MU_INBOUND_DOORBELL_REQHEADCHANGED
, hba
->mmio_base
+ IDBL
);
511 readl(hba
->mmio_base
+ IDBL
); /* flush */
515 stex_ss_send_cmd(struct st_hba
*hba
, struct req_msg
*req
, u16 tag
)
517 struct scsi_cmnd
*cmd
;
518 struct st_msg_header
*msg_h
;
521 req
->tag
= cpu_to_le16(tag
);
523 hba
->ccb
[tag
].req
= req
;
526 cmd
= hba
->ccb
[tag
].cmd
;
527 msg_h
= (struct st_msg_header
*)req
- 1;
529 msg_h
->channel
= (u8
)cmd
->device
->channel
;
530 msg_h
->timeout
= cpu_to_le16(cmd
->request
->timeout
/HZ
);
532 addr
= hba
->dma_handle
+ hba
->req_head
* hba
->rq_size
;
533 addr
+= (hba
->ccb
[tag
].sg_count
+4)/11;
534 msg_h
->handle
= cpu_to_le64(addr
);
537 hba
->req_head
%= hba
->rq_count
+1;
539 writel((addr
>> 16) >> 16, hba
->mmio_base
+ YH2I_REQ_HI
);
540 readl(hba
->mmio_base
+ YH2I_REQ_HI
); /* flush */
541 writel(addr
, hba
->mmio_base
+ YH2I_REQ
);
542 readl(hba
->mmio_base
+ YH2I_REQ
); /* flush */
546 stex_slave_alloc(struct scsi_device
*sdev
)
548 /* Cheat: usually extracted from Inquiry data */
549 sdev
->tagged_supported
= 1;
551 scsi_activate_tcq(sdev
, sdev
->host
->can_queue
);
557 stex_slave_config(struct scsi_device
*sdev
)
559 sdev
->use_10_for_rw
= 1;
560 sdev
->use_10_for_ms
= 1;
561 blk_queue_rq_timeout(sdev
->request_queue
, 60 * HZ
);
562 sdev
->tagged_supported
= 1;
568 stex_slave_destroy(struct scsi_device
*sdev
)
570 scsi_deactivate_tcq(sdev
, 1);
574 stex_queuecommand(struct scsi_cmnd
*cmd
, void (* done
)(struct scsi_cmnd
*))
577 struct Scsi_Host
*host
;
578 unsigned int id
, lun
;
582 host
= cmd
->device
->host
;
583 id
= cmd
->device
->id
;
584 lun
= cmd
->device
->lun
;
585 hba
= (struct st_hba
*) &host
->hostdata
[0];
587 if (unlikely(hba
->mu_status
== MU_STATE_RESETTING
))
588 return SCSI_MLQUEUE_HOST_BUSY
;
590 switch (cmd
->cmnd
[0]) {
593 static char ms10_caching_page
[12] =
594 { 0, 0x12, 0, 0, 0, 0, 0, 0, 0x8, 0xa, 0x4, 0 };
597 page
= cmd
->cmnd
[2] & 0x3f;
598 if (page
== 0x8 || page
== 0x3f) {
599 scsi_sg_copy_from_buffer(cmd
, ms10_caching_page
,
600 sizeof(ms10_caching_page
));
601 cmd
->result
= DID_OK
<< 16 | COMMAND_COMPLETE
<< 8;
604 stex_invalid_field(cmd
, done
);
609 * The shasta firmware does not report actual luns in the
610 * target, so fail the command to force sequential lun scan.
611 * Also, the console device does not support this command.
613 if (hba
->cardtype
== st_shasta
|| id
== host
->max_id
- 1) {
614 stex_invalid_field(cmd
, done
);
618 case TEST_UNIT_READY
:
619 if (id
== host
->max_id
- 1) {
620 cmd
->result
= DID_OK
<< 16 | COMMAND_COMPLETE
<< 8;
626 if (id
!= host
->max_id
- 1)
628 if (!lun
&& !cmd
->device
->channel
&&
629 (cmd
->cmnd
[1] & INQUIRY_EVPD
) == 0) {
630 scsi_sg_copy_from_buffer(cmd
, (void *)console_inq_page
,
631 sizeof(console_inq_page
));
632 cmd
->result
= DID_OK
<< 16 | COMMAND_COMPLETE
<< 8;
635 stex_invalid_field(cmd
, done
);
638 if (cmd
->cmnd
[1] == PASSTHRU_GET_DRVVER
) {
639 struct st_drvver ver
;
640 size_t cp_len
= sizeof(ver
);
642 ver
.major
= ST_VER_MAJOR
;
643 ver
.minor
= ST_VER_MINOR
;
645 ver
.build
= ST_BUILD_VER
;
646 ver
.signature
[0] = PASSTHRU_SIGNATURE
;
647 ver
.console_id
= host
->max_id
- 1;
648 ver
.host_no
= hba
->host
->host_no
;
649 cp_len
= scsi_sg_copy_from_buffer(cmd
, &ver
, cp_len
);
650 cmd
->result
= sizeof(ver
) == cp_len
?
651 DID_OK
<< 16 | COMMAND_COMPLETE
<< 8 :
652 DID_ERROR
<< 16 | COMMAND_COMPLETE
<< 8;
660 cmd
->scsi_done
= done
;
662 tag
= cmd
->request
->tag
;
664 if (unlikely(tag
>= host
->can_queue
))
665 return SCSI_MLQUEUE_HOST_BUSY
;
667 req
= hba
->alloc_rq(hba
);
673 memcpy(req
->cdb
, cmd
->cmnd
, STEX_CDB_LENGTH
);
675 if (cmd
->sc_data_direction
== DMA_FROM_DEVICE
)
676 req
->data_dir
= MSG_DATA_DIR_IN
;
677 else if (cmd
->sc_data_direction
== DMA_TO_DEVICE
)
678 req
->data_dir
= MSG_DATA_DIR_OUT
;
680 req
->data_dir
= MSG_DATA_DIR_ND
;
682 hba
->ccb
[tag
].cmd
= cmd
;
683 hba
->ccb
[tag
].sense_bufflen
= SCSI_SENSE_BUFFERSIZE
;
684 hba
->ccb
[tag
].sense_buffer
= cmd
->sense_buffer
;
686 if (!hba
->map_sg(hba
, req
, &hba
->ccb
[tag
])) {
687 hba
->ccb
[tag
].sg_count
= 0;
688 memset(&req
->variable
[0], 0, 8);
691 hba
->send(hba
, req
, tag
);
695 static void stex_scsi_done(struct st_ccb
*ccb
)
697 struct scsi_cmnd
*cmd
= ccb
->cmd
;
700 if (ccb
->srb_status
== SRB_STATUS_SUCCESS
|| ccb
->srb_status
== 0) {
701 result
= ccb
->scsi_status
;
702 switch (ccb
->scsi_status
) {
704 result
|= DID_OK
<< 16 | COMMAND_COMPLETE
<< 8;
706 case SAM_STAT_CHECK_CONDITION
:
707 result
|= DRIVER_SENSE
<< 24;
710 result
|= DID_BUS_BUSY
<< 16 | COMMAND_COMPLETE
<< 8;
713 result
|= DID_ERROR
<< 16 | COMMAND_COMPLETE
<< 8;
717 else if (ccb
->srb_status
& SRB_SEE_SENSE
)
718 result
= DRIVER_SENSE
<< 24 | SAM_STAT_CHECK_CONDITION
;
719 else switch (ccb
->srb_status
) {
720 case SRB_STATUS_SELECTION_TIMEOUT
:
721 result
= DID_NO_CONNECT
<< 16 | COMMAND_COMPLETE
<< 8;
723 case SRB_STATUS_BUSY
:
724 result
= DID_BUS_BUSY
<< 16 | COMMAND_COMPLETE
<< 8;
726 case SRB_STATUS_INVALID_REQUEST
:
727 case SRB_STATUS_ERROR
:
729 result
= DID_ERROR
<< 16 | COMMAND_COMPLETE
<< 8;
733 cmd
->result
= result
;
737 static void stex_copy_data(struct st_ccb
*ccb
,
738 struct status_msg
*resp
, unsigned int variable
)
740 if (resp
->scsi_status
!= SAM_STAT_GOOD
) {
741 if (ccb
->sense_buffer
!= NULL
)
742 memcpy(ccb
->sense_buffer
, resp
->variable
,
743 min(variable
, ccb
->sense_bufflen
));
747 if (ccb
->cmd
== NULL
)
749 scsi_sg_copy_from_buffer(ccb
->cmd
, resp
->variable
, variable
);
752 static void stex_check_cmd(struct st_hba
*hba
,
753 struct st_ccb
*ccb
, struct status_msg
*resp
)
755 if (ccb
->cmd
->cmnd
[0] == MGT_CMD
&&
756 resp
->scsi_status
!= SAM_STAT_CHECK_CONDITION
)
757 scsi_set_resid(ccb
->cmd
, scsi_bufflen(ccb
->cmd
) -
758 le32_to_cpu(*(__le32
*)&resp
->variable
[0]));
761 static void stex_mu_intr(struct st_hba
*hba
, u32 doorbell
)
763 void __iomem
*base
= hba
->mmio_base
;
764 struct status_msg
*resp
;
769 if (unlikely(!(doorbell
& MU_OUTBOUND_DOORBELL_STATUSHEADCHANGED
)))
772 /* status payloads */
773 hba
->status_head
= readl(base
+ OMR1
);
774 if (unlikely(hba
->status_head
> hba
->sts_count
)) {
775 printk(KERN_WARNING DRV_NAME
"(%s): invalid status head\n",
776 pci_name(hba
->pdev
));
781 * it's not a valid status payload if:
782 * 1. there are no pending requests(e.g. during init stage)
783 * 2. there are some pending requests, but the controller is in
784 * reset status, and its type is not st_yosemite
785 * firmware of st_yosemite in reset status will return pending requests
786 * to driver, so we allow it to pass
788 if (unlikely(hba
->out_req_cnt
<= 0 ||
789 (hba
->mu_status
== MU_STATE_RESETTING
&&
790 hba
->cardtype
!= st_yosemite
))) {
791 hba
->status_tail
= hba
->status_head
;
795 while (hba
->status_tail
!= hba
->status_head
) {
796 resp
= stex_get_status(hba
);
797 tag
= le16_to_cpu(resp
->tag
);
798 if (unlikely(tag
>= hba
->host
->can_queue
)) {
799 printk(KERN_WARNING DRV_NAME
800 "(%s): invalid tag\n", pci_name(hba
->pdev
));
805 ccb
= &hba
->ccb
[tag
];
806 if (unlikely(hba
->wait_ccb
== ccb
))
807 hba
->wait_ccb
= NULL
;
808 if (unlikely(ccb
->req
== NULL
)) {
809 printk(KERN_WARNING DRV_NAME
810 "(%s): lagging req\n", pci_name(hba
->pdev
));
814 size
= resp
->payload_sz
* sizeof(u32
); /* payload size */
815 if (unlikely(size
< sizeof(*resp
) - STATUS_VAR_LEN
||
816 size
> sizeof(*resp
))) {
817 printk(KERN_WARNING DRV_NAME
"(%s): bad status size\n",
818 pci_name(hba
->pdev
));
820 size
-= sizeof(*resp
) - STATUS_VAR_LEN
; /* copy size */
822 stex_copy_data(ccb
, resp
, size
);
826 ccb
->srb_status
= resp
->srb_status
;
827 ccb
->scsi_status
= resp
->scsi_status
;
829 if (likely(ccb
->cmd
!= NULL
)) {
830 if (hba
->cardtype
== st_yosemite
)
831 stex_check_cmd(hba
, ccb
, resp
);
833 if (unlikely(ccb
->cmd
->cmnd
[0] == PASSTHRU_CMD
&&
834 ccb
->cmd
->cmnd
[1] == PASSTHRU_GET_ADAPTER
))
835 stex_controller_info(hba
, ccb
);
837 scsi_dma_unmap(ccb
->cmd
);
844 writel(hba
->status_head
, base
+ IMR1
);
845 readl(base
+ IMR1
); /* flush */
848 static irqreturn_t
stex_intr(int irq
, void *__hba
)
850 struct st_hba
*hba
= __hba
;
851 void __iomem
*base
= hba
->mmio_base
;
855 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
857 data
= readl(base
+ ODBL
);
859 if (data
&& data
!= 0xffffffff) {
860 /* clear the interrupt */
861 writel(data
, base
+ ODBL
);
862 readl(base
+ ODBL
); /* flush */
863 stex_mu_intr(hba
, data
);
864 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
865 if (unlikely(data
& MU_OUTBOUND_DOORBELL_REQUEST_RESET
&&
866 hba
->cardtype
== st_shasta
))
867 queue_work(hba
->work_q
, &hba
->reset_work
);
871 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
876 static void stex_ss_mu_intr(struct st_hba
*hba
)
878 struct status_msg
*resp
;
886 if (unlikely(hba
->out_req_cnt
<= 0 ||
887 hba
->mu_status
== MU_STATE_RESETTING
))
890 while (count
< hba
->sts_count
) {
891 scratch
= hba
->scratch
+ hba
->status_tail
;
892 value
= le32_to_cpu(*scratch
);
893 if (unlikely(!(value
& SS_STS_NORMAL
)))
896 resp
= hba
->status_buffer
+ hba
->status_tail
;
900 hba
->status_tail
%= hba
->sts_count
+1;
903 if (unlikely(tag
>= hba
->host
->can_queue
)) {
904 printk(KERN_WARNING DRV_NAME
905 "(%s): invalid tag\n", pci_name(hba
->pdev
));
910 ccb
= &hba
->ccb
[tag
];
911 if (unlikely(hba
->wait_ccb
== ccb
))
912 hba
->wait_ccb
= NULL
;
913 if (unlikely(ccb
->req
== NULL
)) {
914 printk(KERN_WARNING DRV_NAME
915 "(%s): lagging req\n", pci_name(hba
->pdev
));
920 if (likely(value
& SS_STS_DONE
)) { /* normal case */
921 ccb
->srb_status
= SRB_STATUS_SUCCESS
;
922 ccb
->scsi_status
= SAM_STAT_GOOD
;
924 ccb
->srb_status
= resp
->srb_status
;
925 ccb
->scsi_status
= resp
->scsi_status
;
926 size
= resp
->payload_sz
* sizeof(u32
);
927 if (unlikely(size
< sizeof(*resp
) - STATUS_VAR_LEN
||
928 size
> sizeof(*resp
))) {
929 printk(KERN_WARNING DRV_NAME
930 "(%s): bad status size\n",
931 pci_name(hba
->pdev
));
933 size
-= sizeof(*resp
) - STATUS_VAR_LEN
;
935 stex_copy_data(ccb
, resp
, size
);
937 if (likely(ccb
->cmd
!= NULL
))
938 stex_check_cmd(hba
, ccb
, resp
);
941 if (likely(ccb
->cmd
!= NULL
)) {
942 scsi_dma_unmap(ccb
->cmd
);
949 static irqreturn_t
stex_ss_intr(int irq
, void *__hba
)
951 struct st_hba
*hba
= __hba
;
952 void __iomem
*base
= hba
->mmio_base
;
956 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
958 data
= readl(base
+ YI2H_INT
);
959 if (data
&& data
!= 0xffffffff) {
960 /* clear the interrupt */
961 writel(data
, base
+ YI2H_INT_C
);
962 stex_ss_mu_intr(hba
);
963 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
964 if (unlikely(data
& SS_I2H_REQUEST_RESET
))
965 queue_work(hba
->work_q
, &hba
->reset_work
);
969 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
974 static int stex_common_handshake(struct st_hba
*hba
)
976 void __iomem
*base
= hba
->mmio_base
;
977 struct handshake_frame
*h
;
978 dma_addr_t status_phys
;
980 unsigned long before
;
982 if (readl(base
+ OMR0
) != MU_HANDSHAKE_SIGNATURE
) {
983 writel(MU_INBOUND_DOORBELL_HANDSHAKE
, base
+ IDBL
);
986 while (readl(base
+ OMR0
) != MU_HANDSHAKE_SIGNATURE
) {
987 if (time_after(jiffies
, before
+ MU_MAX_DELAY
* HZ
)) {
988 printk(KERN_ERR DRV_NAME
989 "(%s): no handshake signature\n",
990 pci_name(hba
->pdev
));
1000 data
= readl(base
+ OMR1
);
1001 if ((data
& 0xffff0000) == MU_HANDSHAKE_SIGNATURE_HALF
) {
1003 if (hba
->host
->can_queue
> data
) {
1004 hba
->host
->can_queue
= data
;
1005 hba
->host
->cmd_per_lun
= data
;
1009 h
= (struct handshake_frame
*)hba
->status_buffer
;
1010 h
->rb_phy
= cpu_to_le64(hba
->dma_handle
);
1011 h
->req_sz
= cpu_to_le16(hba
->rq_size
);
1012 h
->req_cnt
= cpu_to_le16(hba
->rq_count
+1);
1013 h
->status_sz
= cpu_to_le16(sizeof(struct status_msg
));
1014 h
->status_cnt
= cpu_to_le16(hba
->sts_count
+1);
1015 stex_gettime(&h
->hosttime
);
1016 h
->partner_type
= HMU_PARTNER_TYPE
;
1017 if (hba
->extra_offset
) {
1018 h
->extra_offset
= cpu_to_le32(hba
->extra_offset
);
1019 h
->extra_size
= cpu_to_le32(hba
->dma_size
- hba
->extra_offset
);
1021 h
->extra_offset
= h
->extra_size
= 0;
1023 status_phys
= hba
->dma_handle
+ (hba
->rq_count
+1) * hba
->rq_size
;
1024 writel(status_phys
, base
+ IMR0
);
1026 writel((status_phys
>> 16) >> 16, base
+ IMR1
);
1029 writel((status_phys
>> 16) >> 16, base
+ OMR0
); /* old fw compatible */
1031 writel(MU_INBOUND_DOORBELL_HANDSHAKE
, base
+ IDBL
);
1032 readl(base
+ IDBL
); /* flush */
1036 while (readl(base
+ OMR0
) != MU_HANDSHAKE_SIGNATURE
) {
1037 if (time_after(jiffies
, before
+ MU_MAX_DELAY
* HZ
)) {
1038 printk(KERN_ERR DRV_NAME
1039 "(%s): no signature after handshake frame\n",
1040 pci_name(hba
->pdev
));
1047 writel(0, base
+ IMR0
);
1049 writel(0, base
+ OMR0
);
1051 writel(0, base
+ IMR1
);
1053 writel(0, base
+ OMR1
);
1054 readl(base
+ OMR1
); /* flush */
1058 static int stex_ss_handshake(struct st_hba
*hba
)
1060 void __iomem
*base
= hba
->mmio_base
;
1061 struct st_msg_header
*msg_h
;
1062 struct handshake_frame
*h
;
1064 u32 data
, scratch_size
;
1065 unsigned long before
;
1069 while ((readl(base
+ YIOA_STATUS
) & SS_MU_OPERATIONAL
) == 0) {
1070 if (time_after(jiffies
, before
+ MU_MAX_DELAY
* HZ
)) {
1071 printk(KERN_ERR DRV_NAME
1072 "(%s): firmware not operational\n",
1073 pci_name(hba
->pdev
));
1079 msg_h
= (struct st_msg_header
*)hba
->dma_mem
;
1080 msg_h
->handle
= cpu_to_le64(hba
->dma_handle
);
1081 msg_h
->flag
= SS_HEAD_HANDSHAKE
;
1083 h
= (struct handshake_frame
*)(msg_h
+ 1);
1084 h
->rb_phy
= cpu_to_le64(hba
->dma_handle
);
1085 h
->req_sz
= cpu_to_le16(hba
->rq_size
);
1086 h
->req_cnt
= cpu_to_le16(hba
->rq_count
+1);
1087 h
->status_sz
= cpu_to_le16(sizeof(struct status_msg
));
1088 h
->status_cnt
= cpu_to_le16(hba
->sts_count
+1);
1089 stex_gettime(&h
->hosttime
);
1090 h
->partner_type
= HMU_PARTNER_TYPE
;
1091 h
->extra_offset
= h
->extra_size
= 0;
1092 scratch_size
= (hba
->sts_count
+1)*sizeof(u32
);
1093 h
->scratch_size
= cpu_to_le32(scratch_size
);
1095 data
= readl(base
+ YINT_EN
);
1097 writel(data
, base
+ YINT_EN
);
1098 writel((hba
->dma_handle
>> 16) >> 16, base
+ YH2I_REQ_HI
);
1099 readl(base
+ YH2I_REQ_HI
);
1100 writel(hba
->dma_handle
, base
+ YH2I_REQ
);
1101 readl(base
+ YH2I_REQ
); /* flush */
1103 scratch
= hba
->scratch
;
1105 while (!(le32_to_cpu(*scratch
) & SS_STS_HANDSHAKE
)) {
1106 if (time_after(jiffies
, before
+ MU_MAX_DELAY
* HZ
)) {
1107 printk(KERN_ERR DRV_NAME
1108 "(%s): no signature after handshake frame\n",
1109 pci_name(hba
->pdev
));
1117 memset(scratch
, 0, scratch_size
);
1122 static int stex_handshake(struct st_hba
*hba
)
1125 unsigned long flags
;
1126 unsigned int mu_status
;
1128 err
= (hba
->cardtype
== st_yel
) ?
1129 stex_ss_handshake(hba
) : stex_common_handshake(hba
);
1130 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1131 mu_status
= hba
->mu_status
;
1135 hba
->status_head
= 0;
1136 hba
->status_tail
= 0;
1137 hba
->out_req_cnt
= 0;
1138 hba
->mu_status
= MU_STATE_STARTED
;
1140 hba
->mu_status
= MU_STATE_FAILED
;
1141 if (mu_status
== MU_STATE_RESETTING
)
1142 wake_up_all(&hba
->reset_waitq
);
1143 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1147 static int stex_abort(struct scsi_cmnd
*cmd
)
1149 struct Scsi_Host
*host
= cmd
->device
->host
;
1150 struct st_hba
*hba
= (struct st_hba
*)host
->hostdata
;
1151 u16 tag
= cmd
->request
->tag
;
1154 int result
= SUCCESS
;
1155 unsigned long flags
;
1157 printk(KERN_INFO DRV_NAME
1158 "(%s): aborting command\n", pci_name(hba
->pdev
));
1159 scsi_print_command(cmd
);
1161 base
= hba
->mmio_base
;
1162 spin_lock_irqsave(host
->host_lock
, flags
);
1163 if (tag
< host
->can_queue
&&
1164 hba
->ccb
[tag
].req
&& hba
->ccb
[tag
].cmd
== cmd
)
1165 hba
->wait_ccb
= &hba
->ccb
[tag
];
1169 if (hba
->cardtype
== st_yel
) {
1170 data
= readl(base
+ YI2H_INT
);
1171 if (data
== 0 || data
== 0xffffffff)
1174 writel(data
, base
+ YI2H_INT_C
);
1175 stex_ss_mu_intr(hba
);
1177 data
= readl(base
+ ODBL
);
1178 if (data
== 0 || data
== 0xffffffff)
1181 writel(data
, base
+ ODBL
);
1182 readl(base
+ ODBL
); /* flush */
1184 stex_mu_intr(hba
, data
);
1186 if (hba
->wait_ccb
== NULL
) {
1187 printk(KERN_WARNING DRV_NAME
1188 "(%s): lost interrupt\n", pci_name(hba
->pdev
));
1193 scsi_dma_unmap(cmd
);
1194 hba
->wait_ccb
->req
= NULL
; /* nullify the req's future return */
1195 hba
->wait_ccb
= NULL
;
1198 spin_unlock_irqrestore(host
->host_lock
, flags
);
1202 static void stex_hard_reset(struct st_hba
*hba
)
1204 struct pci_bus
*bus
;
1209 for (i
= 0; i
< 16; i
++)
1210 pci_read_config_dword(hba
->pdev
, i
* 4,
1211 &hba
->pdev
->saved_config_space
[i
]);
1213 /* Reset secondary bus. Our controller(MU/ATU) is the only device on
1214 secondary bus. Consult Intel 80331/3 developer's manual for detail */
1215 bus
= hba
->pdev
->bus
;
1216 pci_read_config_byte(bus
->self
, PCI_BRIDGE_CONTROL
, &pci_bctl
);
1217 pci_bctl
|= PCI_BRIDGE_CTL_BUS_RESET
;
1218 pci_write_config_byte(bus
->self
, PCI_BRIDGE_CONTROL
, pci_bctl
);
1221 * 1 ms may be enough for 8-port controllers. But 16-port controllers
1222 * require more time to finish bus reset. Use 100 ms here for safety
1225 pci_bctl
&= ~PCI_BRIDGE_CTL_BUS_RESET
;
1226 pci_write_config_byte(bus
->self
, PCI_BRIDGE_CONTROL
, pci_bctl
);
1228 for (i
= 0; i
< MU_HARD_RESET_WAIT
; i
++) {
1229 pci_read_config_word(hba
->pdev
, PCI_COMMAND
, &pci_cmd
);
1230 if (pci_cmd
!= 0xffff && (pci_cmd
& PCI_COMMAND_MASTER
))
1236 for (i
= 0; i
< 16; i
++)
1237 pci_write_config_dword(hba
->pdev
, i
* 4,
1238 hba
->pdev
->saved_config_space
[i
]);
1241 static int stex_yos_reset(struct st_hba
*hba
)
1244 unsigned long flags
, before
;
1247 base
= hba
->mmio_base
;
1248 writel(MU_INBOUND_DOORBELL_RESET
, base
+ IDBL
);
1249 readl(base
+ IDBL
); /* flush */
1251 while (hba
->out_req_cnt
> 0) {
1252 if (time_after(jiffies
, before
+ ST_INTERNAL_TIMEOUT
* HZ
)) {
1253 printk(KERN_WARNING DRV_NAME
1254 "(%s): reset timeout\n", pci_name(hba
->pdev
));
1261 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1263 hba
->mu_status
= MU_STATE_FAILED
;
1265 hba
->mu_status
= MU_STATE_STARTED
;
1266 wake_up_all(&hba
->reset_waitq
);
1267 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1272 static void stex_ss_reset(struct st_hba
*hba
)
1274 writel(SS_H2I_INT_RESET
, hba
->mmio_base
+ YH2I_INT
);
1275 readl(hba
->mmio_base
+ YH2I_INT
);
1279 static int stex_do_reset(struct st_hba
*hba
)
1282 unsigned long flags
;
1283 unsigned int mu_status
= MU_STATE_RESETTING
;
1286 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1287 if (hba
->mu_status
== MU_STATE_STARTING
) {
1288 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1289 printk(KERN_INFO DRV_NAME
"(%s): request reset during init\n",
1290 pci_name(hba
->pdev
));
1293 while (hba
->mu_status
== MU_STATE_RESETTING
) {
1294 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1295 wait_event_timeout(hba
->reset_waitq
,
1296 hba
->mu_status
!= MU_STATE_RESETTING
,
1298 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1299 mu_status
= hba
->mu_status
;
1302 if (mu_status
!= MU_STATE_RESETTING
) {
1303 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1304 return (mu_status
== MU_STATE_STARTED
) ? 0 : -1;
1307 hba
->mu_status
= MU_STATE_RESETTING
;
1308 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1310 if (hba
->cardtype
== st_yosemite
)
1311 return stex_yos_reset(hba
);
1313 if (hba
->cardtype
== st_shasta
)
1314 stex_hard_reset(hba
);
1315 else if (hba
->cardtype
== st_yel
)
1318 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1319 for (tag
= 0; tag
< hba
->host
->can_queue
; tag
++) {
1320 ccb
= &hba
->ccb
[tag
];
1321 if (ccb
->req
== NULL
)
1325 scsi_dma_unmap(ccb
->cmd
);
1326 ccb
->cmd
->result
= DID_RESET
<< 16;
1327 ccb
->cmd
->scsi_done(ccb
->cmd
);
1331 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1333 if (stex_handshake(hba
) == 0)
1336 printk(KERN_WARNING DRV_NAME
"(%s): resetting: handshake failed\n",
1337 pci_name(hba
->pdev
));
1341 static int stex_reset(struct scsi_cmnd
*cmd
)
1345 hba
= (struct st_hba
*) &cmd
->device
->host
->hostdata
[0];
1347 printk(KERN_INFO DRV_NAME
1348 "(%s): resetting host\n", pci_name(hba
->pdev
));
1349 scsi_print_command(cmd
);
1351 return stex_do_reset(hba
) ? FAILED
: SUCCESS
;
1354 static void stex_reset_work(struct work_struct
*work
)
1356 struct st_hba
*hba
= container_of(work
, struct st_hba
, reset_work
);
1361 static int stex_biosparam(struct scsi_device
*sdev
,
1362 struct block_device
*bdev
, sector_t capacity
, int geom
[])
1364 int heads
= 255, sectors
= 63;
1366 if (capacity
< 0x200000) {
1371 sector_div(capacity
, heads
* sectors
);
1380 static struct scsi_host_template driver_template
= {
1381 .module
= THIS_MODULE
,
1383 .proc_name
= DRV_NAME
,
1384 .bios_param
= stex_biosparam
,
1385 .queuecommand
= stex_queuecommand
,
1386 .slave_alloc
= stex_slave_alloc
,
1387 .slave_configure
= stex_slave_config
,
1388 .slave_destroy
= stex_slave_destroy
,
1389 .eh_abort_handler
= stex_abort
,
1390 .eh_host_reset_handler
= stex_reset
,
1394 static struct pci_device_id stex_pci_tbl
[] = {
1396 { 0x105a, 0x8350, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0,
1397 st_shasta
}, /* SuperTrak EX8350/8300/16350/16300 */
1398 { 0x105a, 0xc350, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0,
1399 st_shasta
}, /* SuperTrak EX12350 */
1400 { 0x105a, 0x4302, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0,
1401 st_shasta
}, /* SuperTrak EX4350 */
1402 { 0x105a, 0xe350, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0,
1403 st_shasta
}, /* SuperTrak EX24350 */
1406 { 0x105a, 0x7250, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, st_vsc
},
1409 { 0x105a, 0x8650, 0x105a, PCI_ANY_ID
, 0, 0, st_yosemite
},
1412 { 0x105a, 0x3360, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, st_seq
},
1415 { 0x105a, 0x8650, 0x1033, PCI_ANY_ID
, 0, 0, st_yel
},
1416 { 0x105a, 0x8760, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, st_yel
},
1417 { } /* terminate list */
1420 static struct st_card_info stex_card_info
[] = {
1429 .alloc_rq
= stex_alloc_req
,
1430 .map_sg
= stex_map_sg
,
1431 .send
= stex_send_cmd
,
1442 .alloc_rq
= stex_alloc_req
,
1443 .map_sg
= stex_map_sg
,
1444 .send
= stex_send_cmd
,
1455 .alloc_rq
= stex_alloc_req
,
1456 .map_sg
= stex_map_sg
,
1457 .send
= stex_send_cmd
,
1468 .alloc_rq
= stex_alloc_req
,
1469 .map_sg
= stex_map_sg
,
1470 .send
= stex_send_cmd
,
1481 .alloc_rq
= stex_ss_alloc_req
,
1482 .map_sg
= stex_ss_map_sg
,
1483 .send
= stex_ss_send_cmd
,
1487 static int stex_set_dma_mask(struct pci_dev
* pdev
)
1491 if (!pci_set_dma_mask(pdev
, DMA_BIT_MASK(64))
1492 && !pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64)))
1494 ret
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
1496 ret
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(32));
1500 static int stex_request_irq(struct st_hba
*hba
)
1502 struct pci_dev
*pdev
= hba
->pdev
;
1506 status
= pci_enable_msi(pdev
);
1508 printk(KERN_ERR DRV_NAME
1509 "(%s): error %d setting up MSI\n",
1510 pci_name(pdev
), status
);
1512 hba
->msi_enabled
= 1;
1514 hba
->msi_enabled
= 0;
1516 status
= request_irq(pdev
->irq
, hba
->cardtype
== st_yel
?
1517 stex_ss_intr
: stex_intr
, IRQF_SHARED
, DRV_NAME
, hba
);
1520 if (hba
->msi_enabled
)
1521 pci_disable_msi(pdev
);
1526 static void stex_free_irq(struct st_hba
*hba
)
1528 struct pci_dev
*pdev
= hba
->pdev
;
1530 free_irq(pdev
->irq
, hba
);
1531 if (hba
->msi_enabled
)
1532 pci_disable_msi(pdev
);
1535 static int __devinit
1536 stex_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
1539 struct Scsi_Host
*host
;
1540 const struct st_card_info
*ci
= NULL
;
1541 u32 sts_offset
, cp_offset
, scratch_offset
;
1544 err
= pci_enable_device(pdev
);
1548 pci_set_master(pdev
);
1550 host
= scsi_host_alloc(&driver_template
, sizeof(struct st_hba
));
1553 printk(KERN_ERR DRV_NAME
"(%s): scsi_host_alloc failed\n",
1559 hba
= (struct st_hba
*)host
->hostdata
;
1560 memset(hba
, 0, sizeof(struct st_hba
));
1562 err
= pci_request_regions(pdev
, DRV_NAME
);
1564 printk(KERN_ERR DRV_NAME
"(%s): request regions failed\n",
1566 goto out_scsi_host_put
;
1569 hba
->mmio_base
= pci_ioremap_bar(pdev
, 0);
1570 if ( !hba
->mmio_base
) {
1571 printk(KERN_ERR DRV_NAME
"(%s): memory map failed\n",
1574 goto out_release_regions
;
1577 err
= stex_set_dma_mask(pdev
);
1579 printk(KERN_ERR DRV_NAME
"(%s): set dma mask failed\n",
1584 hba
->cardtype
= (unsigned int) id
->driver_data
;
1585 ci
= &stex_card_info
[hba
->cardtype
];
1586 sts_offset
= scratch_offset
= (ci
->rq_count
+1) * ci
->rq_size
;
1587 if (hba
->cardtype
== st_yel
)
1588 sts_offset
+= (ci
->sts_count
+1) * sizeof(u32
);
1589 cp_offset
= sts_offset
+ (ci
->sts_count
+1) * sizeof(struct status_msg
);
1590 hba
->dma_size
= cp_offset
+ sizeof(struct st_frame
);
1591 if (hba
->cardtype
== st_seq
||
1592 (hba
->cardtype
== st_vsc
&& (pdev
->subsystem_device
& 1))) {
1593 hba
->extra_offset
= hba
->dma_size
;
1594 hba
->dma_size
+= ST_ADDITIONAL_MEM
;
1596 hba
->dma_mem
= dma_alloc_coherent(&pdev
->dev
,
1597 hba
->dma_size
, &hba
->dma_handle
, GFP_KERNEL
);
1598 if (!hba
->dma_mem
) {
1599 /* Retry minimum coherent mapping for st_seq and st_vsc */
1600 if (hba
->cardtype
== st_seq
||
1601 (hba
->cardtype
== st_vsc
&& (pdev
->subsystem_device
& 1))) {
1602 printk(KERN_WARNING DRV_NAME
1603 "(%s): allocating min buffer for controller\n",
1605 hba
->dma_size
= hba
->extra_offset
1606 + ST_ADDITIONAL_MEM_MIN
;
1607 hba
->dma_mem
= dma_alloc_coherent(&pdev
->dev
,
1608 hba
->dma_size
, &hba
->dma_handle
, GFP_KERNEL
);
1611 if (!hba
->dma_mem
) {
1613 printk(KERN_ERR DRV_NAME
"(%s): dma mem alloc failed\n",
1619 hba
->ccb
= kcalloc(ci
->rq_count
, sizeof(struct st_ccb
), GFP_KERNEL
);
1622 printk(KERN_ERR DRV_NAME
"(%s): ccb alloc failed\n",
1627 if (hba
->cardtype
== st_yel
)
1628 hba
->scratch
= (__le32
*)(hba
->dma_mem
+ scratch_offset
);
1629 hba
->status_buffer
= (struct status_msg
*)(hba
->dma_mem
+ sts_offset
);
1630 hba
->copy_buffer
= hba
->dma_mem
+ cp_offset
;
1631 hba
->rq_count
= ci
->rq_count
;
1632 hba
->rq_size
= ci
->rq_size
;
1633 hba
->sts_count
= ci
->sts_count
;
1634 hba
->alloc_rq
= ci
->alloc_rq
;
1635 hba
->map_sg
= ci
->map_sg
;
1636 hba
->send
= ci
->send
;
1637 hba
->mu_status
= MU_STATE_STARTING
;
1639 if (hba
->cardtype
== st_yel
)
1640 host
->sg_tablesize
= 38;
1642 host
->sg_tablesize
= 32;
1643 host
->can_queue
= ci
->rq_count
;
1644 host
->cmd_per_lun
= ci
->rq_count
;
1645 host
->max_id
= ci
->max_id
;
1646 host
->max_lun
= ci
->max_lun
;
1647 host
->max_channel
= ci
->max_channel
;
1648 host
->unique_id
= host
->host_no
;
1649 host
->max_cmd_len
= STEX_CDB_LENGTH
;
1653 init_waitqueue_head(&hba
->reset_waitq
);
1655 snprintf(hba
->work_q_name
, sizeof(hba
->work_q_name
),
1656 "stex_wq_%d", host
->host_no
);
1657 hba
->work_q
= create_singlethread_workqueue(hba
->work_q_name
);
1659 printk(KERN_ERR DRV_NAME
"(%s): create workqueue failed\n",
1664 INIT_WORK(&hba
->reset_work
, stex_reset_work
);
1666 err
= stex_request_irq(hba
);
1668 printk(KERN_ERR DRV_NAME
"(%s): request irq failed\n",
1673 err
= stex_handshake(hba
);
1677 err
= scsi_init_shared_tag_map(host
, host
->can_queue
);
1679 printk(KERN_ERR DRV_NAME
"(%s): init shared queue failed\n",
1684 pci_set_drvdata(pdev
, hba
);
1686 err
= scsi_add_host(host
, &pdev
->dev
);
1688 printk(KERN_ERR DRV_NAME
"(%s): scsi_add_host failed\n",
1693 scsi_scan_host(host
);
1700 destroy_workqueue(hba
->work_q
);
1704 dma_free_coherent(&pdev
->dev
, hba
->dma_size
,
1705 hba
->dma_mem
, hba
->dma_handle
);
1707 iounmap(hba
->mmio_base
);
1708 out_release_regions
:
1709 pci_release_regions(pdev
);
1711 scsi_host_put(host
);
1713 pci_disable_device(pdev
);
1718 static void stex_hba_stop(struct st_hba
*hba
)
1720 struct req_msg
*req
;
1721 struct st_msg_header
*msg_h
;
1722 unsigned long flags
;
1723 unsigned long before
;
1726 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1727 req
= hba
->alloc_rq(hba
);
1728 if (hba
->cardtype
== st_yel
) {
1729 msg_h
= (struct st_msg_header
*)req
- 1;
1730 memset(msg_h
, 0, hba
->rq_size
);
1732 memset(req
, 0, hba
->rq_size
);
1734 if (hba
->cardtype
== st_yosemite
|| hba
->cardtype
== st_yel
) {
1735 req
->cdb
[0] = MGT_CMD
;
1736 req
->cdb
[1] = MGT_CMD_SIGNATURE
;
1737 req
->cdb
[2] = CTLR_CONFIG_CMD
;
1738 req
->cdb
[3] = CTLR_SHUTDOWN
;
1740 req
->cdb
[0] = CONTROLLER_CMD
;
1741 req
->cdb
[1] = CTLR_POWER_STATE_CHANGE
;
1742 req
->cdb
[2] = CTLR_POWER_SAVING
;
1745 hba
->ccb
[tag
].cmd
= NULL
;
1746 hba
->ccb
[tag
].sg_count
= 0;
1747 hba
->ccb
[tag
].sense_bufflen
= 0;
1748 hba
->ccb
[tag
].sense_buffer
= NULL
;
1749 hba
->ccb
[tag
].req_type
= PASSTHRU_REQ_TYPE
;
1751 hba
->send(hba
, req
, tag
);
1752 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1755 while (hba
->ccb
[tag
].req_type
& PASSTHRU_REQ_TYPE
) {
1756 if (time_after(jiffies
, before
+ ST_INTERNAL_TIMEOUT
* HZ
)) {
1757 hba
->ccb
[tag
].req_type
= 0;
1764 static void stex_hba_free(struct st_hba
*hba
)
1768 destroy_workqueue(hba
->work_q
);
1770 iounmap(hba
->mmio_base
);
1772 pci_release_regions(hba
->pdev
);
1776 dma_free_coherent(&hba
->pdev
->dev
, hba
->dma_size
,
1777 hba
->dma_mem
, hba
->dma_handle
);
1780 static void stex_remove(struct pci_dev
*pdev
)
1782 struct st_hba
*hba
= pci_get_drvdata(pdev
);
1784 scsi_remove_host(hba
->host
);
1786 pci_set_drvdata(pdev
, NULL
);
1792 scsi_host_put(hba
->host
);
1794 pci_disable_device(pdev
);
1797 static void stex_shutdown(struct pci_dev
*pdev
)
1799 struct st_hba
*hba
= pci_get_drvdata(pdev
);
1804 MODULE_DEVICE_TABLE(pci
, stex_pci_tbl
);
1806 static struct pci_driver stex_pci_driver
= {
1808 .id_table
= stex_pci_tbl
,
1809 .probe
= stex_probe
,
1810 .remove
= __devexit_p(stex_remove
),
1811 .shutdown
= stex_shutdown
,
1814 static int __init
stex_init(void)
1816 printk(KERN_INFO DRV_NAME
1817 ": Promise SuperTrak EX Driver version: %s\n",
1820 return pci_register_driver(&stex_pci_driver
);
1823 static void __exit
stex_exit(void)
1825 pci_unregister_driver(&stex_pci_driver
);
1828 module_init(stex_init
);
1829 module_exit(stex_exit
);