2 * Disk Array driver for HP Smart Array SAS controllers
3 * Copyright 2000, 2014 Hewlett-Packard Development Company, L.P.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
12 * NON INFRINGEMENT. See the GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
22 #include <linux/module.h>
23 #include <linux/interrupt.h>
24 #include <linux/types.h>
25 #include <linux/pci.h>
26 #include <linux/pci-aspm.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/delay.h>
31 #include <linux/timer.h>
32 #include <linux/init.h>
33 #include <linux/spinlock.h>
34 #include <linux/compat.h>
35 #include <linux/blktrace_api.h>
36 #include <linux/uaccess.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/completion.h>
40 #include <linux/moduleparam.h>
41 #include <scsi/scsi.h>
42 #include <scsi/scsi_cmnd.h>
43 #include <scsi/scsi_device.h>
44 #include <scsi/scsi_host.h>
45 #include <scsi/scsi_tcq.h>
46 #include <linux/cciss_ioctl.h>
47 #include <linux/string.h>
48 #include <linux/bitmap.h>
49 #include <linux/atomic.h>
50 #include <linux/jiffies.h>
51 #include <linux/percpu-defs.h>
52 #include <linux/percpu.h>
53 #include <asm/unaligned.h>
54 #include <asm/div64.h>
58 /* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
59 #define HPSA_DRIVER_VERSION "3.4.4-1"
60 #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
63 /* How long to wait (in milliseconds) for board to go into simple mode */
64 #define MAX_CONFIG_WAIT 30000
65 #define MAX_IOCTL_CONFIG_WAIT 1000
67 /*define how many times we will try a command because of bus resets */
68 #define MAX_CMD_RETRIES 3
70 /* Embedded module documentation macros - see modules.h */
71 MODULE_AUTHOR("Hewlett-Packard Company");
72 MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
74 MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
75 MODULE_VERSION(HPSA_DRIVER_VERSION
);
76 MODULE_LICENSE("GPL");
78 static int hpsa_allow_any
;
79 module_param(hpsa_allow_any
, int, S_IRUGO
|S_IWUSR
);
80 MODULE_PARM_DESC(hpsa_allow_any
,
81 "Allow hpsa driver to access unknown HP Smart Array hardware");
82 static int hpsa_simple_mode
;
83 module_param(hpsa_simple_mode
, int, S_IRUGO
|S_IWUSR
);
84 MODULE_PARM_DESC(hpsa_simple_mode
,
85 "Use 'simple mode' rather than 'performant mode'");
87 /* define the PCI info for the cards we can control */
88 static const struct pci_device_id hpsa_pci_device_id
[] = {
89 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x3241},
90 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x3243},
91 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x3245},
92 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x3247},
93 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x3249},
94 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x324A},
95 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x324B},
96 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x3233},
97 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSF
, 0x103C, 0x3350},
98 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSF
, 0x103C, 0x3351},
99 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSF
, 0x103C, 0x3352},
100 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSF
, 0x103C, 0x3353},
101 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSF
, 0x103C, 0x3354},
102 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSF
, 0x103C, 0x3355},
103 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSF
, 0x103C, 0x3356},
104 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSH
, 0x103C, 0x1921},
105 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSH
, 0x103C, 0x1922},
106 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSH
, 0x103C, 0x1923},
107 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSH
, 0x103C, 0x1924},
108 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSH
, 0x103C, 0x1926},
109 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSH
, 0x103C, 0x1928},
110 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSH
, 0x103C, 0x1929},
111 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21BD},
112 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21BE},
113 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21BF},
114 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21C0},
115 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21C1},
116 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21C2},
117 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21C3},
118 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21C4},
119 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21C5},
120 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21C6},
121 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21C7},
122 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21C8},
123 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21C9},
124 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21CA},
125 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21CB},
126 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21CC},
127 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21CD},
128 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21CE},
129 {PCI_VENDOR_ID_HP_3PAR
, 0x0075, 0x1590, 0x0076},
130 {PCI_VENDOR_ID_HP_3PAR
, 0x0075, 0x1590, 0x0087},
131 {PCI_VENDOR_ID_HP_3PAR
, 0x0075, 0x1590, 0x007D},
132 {PCI_VENDOR_ID_HP_3PAR
, 0x0075, 0x1590, 0x0088},
133 {PCI_VENDOR_ID_HP
, 0x333f, 0x103c, 0x333f},
134 {PCI_VENDOR_ID_HP
, PCI_ANY_ID
, PCI_ANY_ID
, PCI_ANY_ID
,
135 PCI_CLASS_STORAGE_RAID
<< 8, 0xffff << 8, 0},
139 MODULE_DEVICE_TABLE(pci
, hpsa_pci_device_id
);
141 /* board_id = Subsystem Device ID & Vendor ID
142 * product = Marketing Name for the board
143 * access = Address of the struct of function pointers
145 static struct board_type products
[] = {
146 {0x3241103C, "Smart Array P212", &SA5_access
},
147 {0x3243103C, "Smart Array P410", &SA5_access
},
148 {0x3245103C, "Smart Array P410i", &SA5_access
},
149 {0x3247103C, "Smart Array P411", &SA5_access
},
150 {0x3249103C, "Smart Array P812", &SA5_access
},
151 {0x324A103C, "Smart Array P712m", &SA5_access
},
152 {0x324B103C, "Smart Array P711m", &SA5_access
},
153 {0x3233103C, "HP StorageWorks 1210m", &SA5_access
}, /* alias of 333f */
154 {0x3350103C, "Smart Array P222", &SA5_access
},
155 {0x3351103C, "Smart Array P420", &SA5_access
},
156 {0x3352103C, "Smart Array P421", &SA5_access
},
157 {0x3353103C, "Smart Array P822", &SA5_access
},
158 {0x3354103C, "Smart Array P420i", &SA5_access
},
159 {0x3355103C, "Smart Array P220i", &SA5_access
},
160 {0x3356103C, "Smart Array P721m", &SA5_access
},
161 {0x1921103C, "Smart Array P830i", &SA5_access
},
162 {0x1922103C, "Smart Array P430", &SA5_access
},
163 {0x1923103C, "Smart Array P431", &SA5_access
},
164 {0x1924103C, "Smart Array P830", &SA5_access
},
165 {0x1926103C, "Smart Array P731m", &SA5_access
},
166 {0x1928103C, "Smart Array P230i", &SA5_access
},
167 {0x1929103C, "Smart Array P530", &SA5_access
},
168 {0x21BD103C, "Smart Array", &SA5_access
},
169 {0x21BE103C, "Smart Array", &SA5_access
},
170 {0x21BF103C, "Smart Array", &SA5_access
},
171 {0x21C0103C, "Smart Array", &SA5_access
},
172 {0x21C1103C, "Smart Array", &SA5_access
},
173 {0x21C2103C, "Smart Array", &SA5_access
},
174 {0x21C3103C, "Smart Array", &SA5_access
},
175 {0x21C4103C, "Smart Array", &SA5_access
},
176 {0x21C5103C, "Smart Array", &SA5_access
},
177 {0x21C6103C, "Smart Array", &SA5_access
},
178 {0x21C7103C, "Smart Array", &SA5_access
},
179 {0x21C8103C, "Smart Array", &SA5_access
},
180 {0x21C9103C, "Smart Array", &SA5_access
},
181 {0x21CA103C, "Smart Array", &SA5_access
},
182 {0x21CB103C, "Smart Array", &SA5_access
},
183 {0x21CC103C, "Smart Array", &SA5_access
},
184 {0x21CD103C, "Smart Array", &SA5_access
},
185 {0x21CE103C, "Smart Array", &SA5_access
},
186 {0x00761590, "HP Storage P1224 Array Controller", &SA5_access
},
187 {0x00871590, "HP Storage P1224e Array Controller", &SA5_access
},
188 {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access
},
189 {0x00881590, "HP Storage P1228e Array Controller", &SA5_access
},
190 {0x333f103c, "HP StorageWorks 1210m Array Controller", &SA5_access
},
191 {0xFFFF103C, "Unknown Smart Array", &SA5_access
},
194 static int number_of_controllers
;
196 static irqreturn_t
do_hpsa_intr_intx(int irq
, void *dev_id
);
197 static irqreturn_t
do_hpsa_intr_msi(int irq
, void *dev_id
);
198 static int hpsa_ioctl(struct scsi_device
*dev
, int cmd
, void __user
*arg
);
201 static int hpsa_compat_ioctl(struct scsi_device
*dev
, int cmd
,
205 static void cmd_free(struct ctlr_info
*h
, struct CommandList
*c
);
206 static struct CommandList
*cmd_alloc(struct ctlr_info
*h
);
207 static int fill_cmd(struct CommandList
*c
, u8 cmd
, struct ctlr_info
*h
,
208 void *buff
, size_t size
, u16 page_code
, unsigned char *scsi3addr
,
210 static void hpsa_free_cmd_pool(struct ctlr_info
*h
);
211 #define VPD_PAGE (1 << 8)
213 static int hpsa_scsi_queue_command(struct Scsi_Host
*h
, struct scsi_cmnd
*cmd
);
214 static void hpsa_scan_start(struct Scsi_Host
*);
215 static int hpsa_scan_finished(struct Scsi_Host
*sh
,
216 unsigned long elapsed_time
);
217 static int hpsa_change_queue_depth(struct scsi_device
*sdev
, int qdepth
);
219 static int hpsa_eh_device_reset_handler(struct scsi_cmnd
*scsicmd
);
220 static int hpsa_eh_abort_handler(struct scsi_cmnd
*scsicmd
);
221 static int hpsa_slave_alloc(struct scsi_device
*sdev
);
222 static void hpsa_slave_destroy(struct scsi_device
*sdev
);
224 static void hpsa_update_scsi_devices(struct ctlr_info
*h
, int hostno
);
225 static int check_for_unit_attention(struct ctlr_info
*h
,
226 struct CommandList
*c
);
227 static void check_ioctl_unit_attention(struct ctlr_info
*h
,
228 struct CommandList
*c
);
229 /* performant mode helper functions */
230 static void calc_bucket_map(int *bucket
, int num_buckets
,
231 int nsgs
, int min_blocks
, u32
*bucket_map
);
232 static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info
*h
);
233 static inline u32
next_command(struct ctlr_info
*h
, u8 q
);
234 static int hpsa_find_cfg_addrs(struct pci_dev
*pdev
, void __iomem
*vaddr
,
235 u32
*cfg_base_addr
, u64
*cfg_base_addr_index
,
237 static int hpsa_pci_find_memory_BAR(struct pci_dev
*pdev
,
238 unsigned long *memory_bar
);
239 static int hpsa_lookup_board_id(struct pci_dev
*pdev
, u32
*board_id
);
240 static int hpsa_wait_for_board_state(struct pci_dev
*pdev
, void __iomem
*vaddr
,
242 static inline void finish_cmd(struct CommandList
*c
);
243 static void hpsa_wait_for_mode_change_ack(struct ctlr_info
*h
);
244 #define BOARD_NOT_READY 0
245 #define BOARD_READY 1
246 static void hpsa_drain_accel_commands(struct ctlr_info
*h
);
247 static void hpsa_flush_cache(struct ctlr_info
*h
);
248 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info
*h
,
249 struct CommandList
*c
, u32 ioaccel_handle
, u8
*cdb
, int cdb_len
,
250 u8
*scsi3addr
, struct hpsa_scsi_dev_t
*phys_disk
);
251 static void hpsa_command_resubmit_worker(struct work_struct
*work
);
253 static inline struct ctlr_info
*sdev_to_hba(struct scsi_device
*sdev
)
255 unsigned long *priv
= shost_priv(sdev
->host
);
256 return (struct ctlr_info
*) *priv
;
259 static inline struct ctlr_info
*shost_to_hba(struct Scsi_Host
*sh
)
261 unsigned long *priv
= shost_priv(sh
);
262 return (struct ctlr_info
*) *priv
;
265 static int check_for_unit_attention(struct ctlr_info
*h
,
266 struct CommandList
*c
)
268 if (c
->err_info
->SenseInfo
[2] != UNIT_ATTENTION
)
271 switch (c
->err_info
->SenseInfo
[12]) {
273 dev_warn(&h
->pdev
->dev
, HPSA
"%d: a state change "
274 "detected, command retried\n", h
->ctlr
);
277 dev_warn(&h
->pdev
->dev
,
278 HPSA
"%d: LUN failure detected\n", h
->ctlr
);
280 case REPORT_LUNS_CHANGED
:
281 dev_warn(&h
->pdev
->dev
,
282 HPSA
"%d: report LUN data changed\n", h
->ctlr
);
284 * Note: this REPORT_LUNS_CHANGED condition only occurs on the external
285 * target (array) devices.
289 dev_warn(&h
->pdev
->dev
, HPSA
"%d: a power on "
290 "or device reset detected\n", h
->ctlr
);
292 case UNIT_ATTENTION_CLEARED
:
293 dev_warn(&h
->pdev
->dev
, HPSA
"%d: unit attention "
294 "cleared by another initiator\n", h
->ctlr
);
297 dev_warn(&h
->pdev
->dev
, HPSA
"%d: unknown "
298 "unit attention detected\n", h
->ctlr
);
304 static int check_for_busy(struct ctlr_info
*h
, struct CommandList
*c
)
306 if (c
->err_info
->CommandStatus
!= CMD_TARGET_STATUS
||
307 (c
->err_info
->ScsiStatus
!= SAM_STAT_BUSY
&&
308 c
->err_info
->ScsiStatus
!= SAM_STAT_TASK_SET_FULL
))
310 dev_warn(&h
->pdev
->dev
, HPSA
"device busy");
314 static ssize_t
host_store_hp_ssd_smart_path_status(struct device
*dev
,
315 struct device_attribute
*attr
,
316 const char *buf
, size_t count
)
320 struct Scsi_Host
*shost
= class_to_shost(dev
);
323 if (!capable(CAP_SYS_ADMIN
) || !capable(CAP_SYS_RAWIO
))
325 len
= count
> sizeof(tmpbuf
) - 1 ? sizeof(tmpbuf
) - 1 : count
;
326 strncpy(tmpbuf
, buf
, len
);
328 if (sscanf(tmpbuf
, "%d", &status
) != 1)
330 h
= shost_to_hba(shost
);
331 h
->acciopath_status
= !!status
;
332 dev_warn(&h
->pdev
->dev
,
333 "hpsa: HP SSD Smart Path %s via sysfs update.\n",
334 h
->acciopath_status
? "enabled" : "disabled");
338 static ssize_t
host_store_raid_offload_debug(struct device
*dev
,
339 struct device_attribute
*attr
,
340 const char *buf
, size_t count
)
342 int debug_level
, len
;
344 struct Scsi_Host
*shost
= class_to_shost(dev
);
347 if (!capable(CAP_SYS_ADMIN
) || !capable(CAP_SYS_RAWIO
))
349 len
= count
> sizeof(tmpbuf
) - 1 ? sizeof(tmpbuf
) - 1 : count
;
350 strncpy(tmpbuf
, buf
, len
);
352 if (sscanf(tmpbuf
, "%d", &debug_level
) != 1)
356 h
= shost_to_hba(shost
);
357 h
->raid_offload_debug
= debug_level
;
358 dev_warn(&h
->pdev
->dev
, "hpsa: Set raid_offload_debug level = %d\n",
359 h
->raid_offload_debug
);
363 static ssize_t
host_store_rescan(struct device
*dev
,
364 struct device_attribute
*attr
,
365 const char *buf
, size_t count
)
368 struct Scsi_Host
*shost
= class_to_shost(dev
);
369 h
= shost_to_hba(shost
);
370 hpsa_scan_start(h
->scsi_host
);
374 static ssize_t
host_show_firmware_revision(struct device
*dev
,
375 struct device_attribute
*attr
, char *buf
)
378 struct Scsi_Host
*shost
= class_to_shost(dev
);
379 unsigned char *fwrev
;
381 h
= shost_to_hba(shost
);
382 if (!h
->hba_inquiry_data
)
384 fwrev
= &h
->hba_inquiry_data
[32];
385 return snprintf(buf
, 20, "%c%c%c%c\n",
386 fwrev
[0], fwrev
[1], fwrev
[2], fwrev
[3]);
389 static ssize_t
host_show_commands_outstanding(struct device
*dev
,
390 struct device_attribute
*attr
, char *buf
)
392 struct Scsi_Host
*shost
= class_to_shost(dev
);
393 struct ctlr_info
*h
= shost_to_hba(shost
);
395 return snprintf(buf
, 20, "%d\n",
396 atomic_read(&h
->commands_outstanding
));
399 static ssize_t
host_show_transport_mode(struct device
*dev
,
400 struct device_attribute
*attr
, char *buf
)
403 struct Scsi_Host
*shost
= class_to_shost(dev
);
405 h
= shost_to_hba(shost
);
406 return snprintf(buf
, 20, "%s\n",
407 h
->transMethod
& CFGTBL_Trans_Performant
?
408 "performant" : "simple");
411 static ssize_t
host_show_hp_ssd_smart_path_status(struct device
*dev
,
412 struct device_attribute
*attr
, char *buf
)
415 struct Scsi_Host
*shost
= class_to_shost(dev
);
417 h
= shost_to_hba(shost
);
418 return snprintf(buf
, 30, "HP SSD Smart Path %s\n",
419 (h
->acciopath_status
== 1) ? "enabled" : "disabled");
422 /* List of controllers which cannot be hard reset on kexec with reset_devices */
423 static u32 unresettable_controller
[] = {
424 0x324a103C, /* Smart Array P712m */
425 0x324b103C, /* SmartArray P711m */
426 0x3223103C, /* Smart Array P800 */
427 0x3234103C, /* Smart Array P400 */
428 0x3235103C, /* Smart Array P400i */
429 0x3211103C, /* Smart Array E200i */
430 0x3212103C, /* Smart Array E200 */
431 0x3213103C, /* Smart Array E200i */
432 0x3214103C, /* Smart Array E200i */
433 0x3215103C, /* Smart Array E200i */
434 0x3237103C, /* Smart Array E500 */
435 0x323D103C, /* Smart Array P700m */
436 0x40800E11, /* Smart Array 5i */
437 0x409C0E11, /* Smart Array 6400 */
438 0x409D0E11, /* Smart Array 6400 EM */
439 0x40700E11, /* Smart Array 5300 */
440 0x40820E11, /* Smart Array 532 */
441 0x40830E11, /* Smart Array 5312 */
442 0x409A0E11, /* Smart Array 641 */
443 0x409B0E11, /* Smart Array 642 */
444 0x40910E11, /* Smart Array 6i */
447 /* List of controllers which cannot even be soft reset */
448 static u32 soft_unresettable_controller
[] = {
449 0x40800E11, /* Smart Array 5i */
450 0x40700E11, /* Smart Array 5300 */
451 0x40820E11, /* Smart Array 532 */
452 0x40830E11, /* Smart Array 5312 */
453 0x409A0E11, /* Smart Array 641 */
454 0x409B0E11, /* Smart Array 642 */
455 0x40910E11, /* Smart Array 6i */
456 /* Exclude 640x boards. These are two pci devices in one slot
457 * which share a battery backed cache module. One controls the
458 * cache, the other accesses the cache through the one that controls
459 * it. If we reset the one controlling the cache, the other will
460 * likely not be happy. Just forbid resetting this conjoined mess.
461 * The 640x isn't really supported by hpsa anyway.
463 0x409C0E11, /* Smart Array 6400 */
464 0x409D0E11, /* Smart Array 6400 EM */
467 static int ctlr_is_hard_resettable(u32 board_id
)
471 for (i
= 0; i
< ARRAY_SIZE(unresettable_controller
); i
++)
472 if (unresettable_controller
[i
] == board_id
)
477 static int ctlr_is_soft_resettable(u32 board_id
)
481 for (i
= 0; i
< ARRAY_SIZE(soft_unresettable_controller
); i
++)
482 if (soft_unresettable_controller
[i
] == board_id
)
487 static int ctlr_is_resettable(u32 board_id
)
489 return ctlr_is_hard_resettable(board_id
) ||
490 ctlr_is_soft_resettable(board_id
);
493 static ssize_t
host_show_resettable(struct device
*dev
,
494 struct device_attribute
*attr
, char *buf
)
497 struct Scsi_Host
*shost
= class_to_shost(dev
);
499 h
= shost_to_hba(shost
);
500 return snprintf(buf
, 20, "%d\n", ctlr_is_resettable(h
->board_id
));
503 static inline int is_logical_dev_addr_mode(unsigned char scsi3addr
[])
505 return (scsi3addr
[3] & 0xC0) == 0x40;
508 static const char * const raid_label
[] = { "0", "4", "1(+0)", "5", "5+1", "6",
509 "1(+0)ADM", "UNKNOWN"
511 #define HPSA_RAID_0 0
512 #define HPSA_RAID_4 1
513 #define HPSA_RAID_1 2 /* also used for RAID 10 */
514 #define HPSA_RAID_5 3 /* also used for RAID 50 */
515 #define HPSA_RAID_51 4
516 #define HPSA_RAID_6 5 /* also used for RAID 60 */
517 #define HPSA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
518 #define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1)
520 static ssize_t
raid_level_show(struct device
*dev
,
521 struct device_attribute
*attr
, char *buf
)
524 unsigned char rlevel
;
526 struct scsi_device
*sdev
;
527 struct hpsa_scsi_dev_t
*hdev
;
530 sdev
= to_scsi_device(dev
);
531 h
= sdev_to_hba(sdev
);
532 spin_lock_irqsave(&h
->lock
, flags
);
533 hdev
= sdev
->hostdata
;
535 spin_unlock_irqrestore(&h
->lock
, flags
);
539 /* Is this even a logical drive? */
540 if (!is_logical_dev_addr_mode(hdev
->scsi3addr
)) {
541 spin_unlock_irqrestore(&h
->lock
, flags
);
542 l
= snprintf(buf
, PAGE_SIZE
, "N/A\n");
546 rlevel
= hdev
->raid_level
;
547 spin_unlock_irqrestore(&h
->lock
, flags
);
548 if (rlevel
> RAID_UNKNOWN
)
549 rlevel
= RAID_UNKNOWN
;
550 l
= snprintf(buf
, PAGE_SIZE
, "RAID %s\n", raid_label
[rlevel
]);
554 static ssize_t
lunid_show(struct device
*dev
,
555 struct device_attribute
*attr
, char *buf
)
558 struct scsi_device
*sdev
;
559 struct hpsa_scsi_dev_t
*hdev
;
561 unsigned char lunid
[8];
563 sdev
= to_scsi_device(dev
);
564 h
= sdev_to_hba(sdev
);
565 spin_lock_irqsave(&h
->lock
, flags
);
566 hdev
= sdev
->hostdata
;
568 spin_unlock_irqrestore(&h
->lock
, flags
);
571 memcpy(lunid
, hdev
->scsi3addr
, sizeof(lunid
));
572 spin_unlock_irqrestore(&h
->lock
, flags
);
573 return snprintf(buf
, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
574 lunid
[0], lunid
[1], lunid
[2], lunid
[3],
575 lunid
[4], lunid
[5], lunid
[6], lunid
[7]);
578 static ssize_t
unique_id_show(struct device
*dev
,
579 struct device_attribute
*attr
, char *buf
)
582 struct scsi_device
*sdev
;
583 struct hpsa_scsi_dev_t
*hdev
;
585 unsigned char sn
[16];
587 sdev
= to_scsi_device(dev
);
588 h
= sdev_to_hba(sdev
);
589 spin_lock_irqsave(&h
->lock
, flags
);
590 hdev
= sdev
->hostdata
;
592 spin_unlock_irqrestore(&h
->lock
, flags
);
595 memcpy(sn
, hdev
->device_id
, sizeof(sn
));
596 spin_unlock_irqrestore(&h
->lock
, flags
);
597 return snprintf(buf
, 16 * 2 + 2,
598 "%02X%02X%02X%02X%02X%02X%02X%02X"
599 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
600 sn
[0], sn
[1], sn
[2], sn
[3],
601 sn
[4], sn
[5], sn
[6], sn
[7],
602 sn
[8], sn
[9], sn
[10], sn
[11],
603 sn
[12], sn
[13], sn
[14], sn
[15]);
606 static ssize_t
host_show_hp_ssd_smart_path_enabled(struct device
*dev
,
607 struct device_attribute
*attr
, char *buf
)
610 struct scsi_device
*sdev
;
611 struct hpsa_scsi_dev_t
*hdev
;
615 sdev
= to_scsi_device(dev
);
616 h
= sdev_to_hba(sdev
);
617 spin_lock_irqsave(&h
->lock
, flags
);
618 hdev
= sdev
->hostdata
;
620 spin_unlock_irqrestore(&h
->lock
, flags
);
623 offload_enabled
= hdev
->offload_enabled
;
624 spin_unlock_irqrestore(&h
->lock
, flags
);
625 return snprintf(buf
, 20, "%d\n", offload_enabled
);
628 static DEVICE_ATTR(raid_level
, S_IRUGO
, raid_level_show
, NULL
);
629 static DEVICE_ATTR(lunid
, S_IRUGO
, lunid_show
, NULL
);
630 static DEVICE_ATTR(unique_id
, S_IRUGO
, unique_id_show
, NULL
);
631 static DEVICE_ATTR(rescan
, S_IWUSR
, NULL
, host_store_rescan
);
632 static DEVICE_ATTR(hp_ssd_smart_path_enabled
, S_IRUGO
,
633 host_show_hp_ssd_smart_path_enabled
, NULL
);
634 static DEVICE_ATTR(hp_ssd_smart_path_status
, S_IWUSR
|S_IRUGO
|S_IROTH
,
635 host_show_hp_ssd_smart_path_status
,
636 host_store_hp_ssd_smart_path_status
);
637 static DEVICE_ATTR(raid_offload_debug
, S_IWUSR
, NULL
,
638 host_store_raid_offload_debug
);
639 static DEVICE_ATTR(firmware_revision
, S_IRUGO
,
640 host_show_firmware_revision
, NULL
);
641 static DEVICE_ATTR(commands_outstanding
, S_IRUGO
,
642 host_show_commands_outstanding
, NULL
);
643 static DEVICE_ATTR(transport_mode
, S_IRUGO
,
644 host_show_transport_mode
, NULL
);
645 static DEVICE_ATTR(resettable
, S_IRUGO
,
646 host_show_resettable
, NULL
);
648 static struct device_attribute
*hpsa_sdev_attrs
[] = {
649 &dev_attr_raid_level
,
652 &dev_attr_hp_ssd_smart_path_enabled
,
656 static struct device_attribute
*hpsa_shost_attrs
[] = {
658 &dev_attr_firmware_revision
,
659 &dev_attr_commands_outstanding
,
660 &dev_attr_transport_mode
,
661 &dev_attr_resettable
,
662 &dev_attr_hp_ssd_smart_path_status
,
663 &dev_attr_raid_offload_debug
,
667 static struct scsi_host_template hpsa_driver_template
= {
668 .module
= THIS_MODULE
,
671 .queuecommand
= hpsa_scsi_queue_command
,
672 .scan_start
= hpsa_scan_start
,
673 .scan_finished
= hpsa_scan_finished
,
674 .change_queue_depth
= hpsa_change_queue_depth
,
676 .use_clustering
= ENABLE_CLUSTERING
,
677 .eh_abort_handler
= hpsa_eh_abort_handler
,
678 .eh_device_reset_handler
= hpsa_eh_device_reset_handler
,
680 .slave_alloc
= hpsa_slave_alloc
,
681 .slave_destroy
= hpsa_slave_destroy
,
683 .compat_ioctl
= hpsa_compat_ioctl
,
685 .sdev_attrs
= hpsa_sdev_attrs
,
686 .shost_attrs
= hpsa_shost_attrs
,
691 static inline u32
next_command(struct ctlr_info
*h
, u8 q
)
694 struct reply_queue_buffer
*rq
= &h
->reply_queue
[q
];
696 if (h
->transMethod
& CFGTBL_Trans_io_accel1
)
697 return h
->access
.command_completed(h
, q
);
699 if (unlikely(!(h
->transMethod
& CFGTBL_Trans_Performant
)))
700 return h
->access
.command_completed(h
, q
);
702 if ((rq
->head
[rq
->current_entry
] & 1) == rq
->wraparound
) {
703 a
= rq
->head
[rq
->current_entry
];
705 atomic_dec(&h
->commands_outstanding
);
709 /* Check for wraparound */
710 if (rq
->current_entry
== h
->max_commands
) {
711 rq
->current_entry
= 0;
718 * There are some special bits in the bus address of the
719 * command that we have to set for the controller to know
720 * how to process the command:
722 * Normal performant mode:
723 * bit 0: 1 means performant mode, 0 means simple mode.
724 * bits 1-3 = block fetch table entry
725 * bits 4-6 = command type (== 0)
728 * bit 0 = "performant mode" bit.
729 * bits 1-3 = block fetch table entry
730 * bits 4-6 = command type (== 110)
731 * (command type is needed because ioaccel1 mode
732 * commands are submitted through the same register as normal
733 * mode commands, so this is how the controller knows whether
734 * the command is normal mode or ioaccel1 mode.)
737 * bit 0 = "performant mode" bit.
738 * bits 1-4 = block fetch table entry (note extra bit)
739 * bits 4-6 = not needed, because ioaccel2 mode has
740 * a separate special register for submitting commands.
743 /* set_performant_mode: Modify the tag for cciss performant
744 * set bit 0 for pull model, bits 3-1 for block fetch
747 static void set_performant_mode(struct ctlr_info
*h
, struct CommandList
*c
)
749 if (likely(h
->transMethod
& CFGTBL_Trans_Performant
)) {
750 c
->busaddr
|= 1 | (h
->blockFetchTable
[c
->Header
.SGList
] << 1);
751 if (likely(h
->msix_vector
> 0))
752 c
->Header
.ReplyQueue
=
753 raw_smp_processor_id() % h
->nreply_queues
;
757 static void set_ioaccel1_performant_mode(struct ctlr_info
*h
,
758 struct CommandList
*c
)
760 struct io_accel1_cmd
*cp
= &h
->ioaccel_cmd_pool
[c
->cmdindex
];
762 /* Tell the controller to post the reply to the queue for this
763 * processor. This seems to give the best I/O throughput.
765 cp
->ReplyQueue
= smp_processor_id() % h
->nreply_queues
;
766 /* Set the bits in the address sent down to include:
767 * - performant mode bit (bit 0)
768 * - pull count (bits 1-3)
769 * - command type (bits 4-6)
771 c
->busaddr
|= 1 | (h
->ioaccel1_blockFetchTable
[c
->Header
.SGList
] << 1) |
772 IOACCEL1_BUSADDR_CMDTYPE
;
775 static void set_ioaccel2_performant_mode(struct ctlr_info
*h
,
776 struct CommandList
*c
)
778 struct io_accel2_cmd
*cp
= &h
->ioaccel2_cmd_pool
[c
->cmdindex
];
780 /* Tell the controller to post the reply to the queue for this
781 * processor. This seems to give the best I/O throughput.
783 cp
->reply_queue
= smp_processor_id() % h
->nreply_queues
;
784 /* Set the bits in the address sent down to include:
785 * - performant mode bit not used in ioaccel mode 2
786 * - pull count (bits 0-3)
787 * - command type isn't needed for ioaccel2
789 c
->busaddr
|= (h
->ioaccel2_blockFetchTable
[cp
->sg_count
]);
792 static int is_firmware_flash_cmd(u8
*cdb
)
794 return cdb
[0] == BMIC_WRITE
&& cdb
[6] == BMIC_FLASH_FIRMWARE
;
798 * During firmware flash, the heartbeat register may not update as frequently
799 * as it should. So we dial down lockup detection during firmware flash. and
800 * dial it back up when firmware flash completes.
802 #define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ)
803 #define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ)
804 static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info
*h
,
805 struct CommandList
*c
)
807 if (!is_firmware_flash_cmd(c
->Request
.CDB
))
809 atomic_inc(&h
->firmware_flash_in_progress
);
810 h
->heartbeat_sample_interval
= HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH
;
813 static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info
*h
,
814 struct CommandList
*c
)
816 if (is_firmware_flash_cmd(c
->Request
.CDB
) &&
817 atomic_dec_and_test(&h
->firmware_flash_in_progress
))
818 h
->heartbeat_sample_interval
= HEARTBEAT_SAMPLE_INTERVAL
;
821 static void enqueue_cmd_and_start_io(struct ctlr_info
*h
,
822 struct CommandList
*c
)
824 switch (c
->cmd_type
) {
826 set_ioaccel1_performant_mode(h
, c
);
829 set_ioaccel2_performant_mode(h
, c
);
832 set_performant_mode(h
, c
);
834 dial_down_lockup_detection_during_fw_flash(h
, c
);
835 atomic_inc(&h
->commands_outstanding
);
836 h
->access
.submit_command(h
, c
);
839 static inline int is_hba_lunid(unsigned char scsi3addr
[])
841 return memcmp(scsi3addr
, RAID_CTLR_LUNID
, 8) == 0;
844 static inline int is_scsi_rev_5(struct ctlr_info
*h
)
846 if (!h
->hba_inquiry_data
)
848 if ((h
->hba_inquiry_data
[2] & 0x07) == 5)
853 static int hpsa_find_target_lun(struct ctlr_info
*h
,
854 unsigned char scsi3addr
[], int bus
, int *target
, int *lun
)
856 /* finds an unused bus, target, lun for a new physical device
857 * assumes h->devlock is held
860 DECLARE_BITMAP(lun_taken
, HPSA_MAX_DEVICES
);
862 bitmap_zero(lun_taken
, HPSA_MAX_DEVICES
);
864 for (i
= 0; i
< h
->ndevices
; i
++) {
865 if (h
->dev
[i
]->bus
== bus
&& h
->dev
[i
]->target
!= -1)
866 __set_bit(h
->dev
[i
]->target
, lun_taken
);
869 i
= find_first_zero_bit(lun_taken
, HPSA_MAX_DEVICES
);
870 if (i
< HPSA_MAX_DEVICES
) {
879 /* Add an entry into h->dev[] array. */
880 static int hpsa_scsi_add_entry(struct ctlr_info
*h
, int hostno
,
881 struct hpsa_scsi_dev_t
*device
,
882 struct hpsa_scsi_dev_t
*added
[], int *nadded
)
884 /* assumes h->devlock is held */
887 unsigned char addr1
[8], addr2
[8];
888 struct hpsa_scsi_dev_t
*sd
;
890 if (n
>= HPSA_MAX_DEVICES
) {
891 dev_err(&h
->pdev
->dev
, "too many devices, some will be "
896 /* physical devices do not have lun or target assigned until now. */
897 if (device
->lun
!= -1)
898 /* Logical device, lun is already assigned. */
901 /* If this device a non-zero lun of a multi-lun device
902 * byte 4 of the 8-byte LUN addr will contain the logical
903 * unit no, zero otherwise.
905 if (device
->scsi3addr
[4] == 0) {
906 /* This is not a non-zero lun of a multi-lun device */
907 if (hpsa_find_target_lun(h
, device
->scsi3addr
,
908 device
->bus
, &device
->target
, &device
->lun
) != 0)
913 /* This is a non-zero lun of a multi-lun device.
914 * Search through our list and find the device which
915 * has the same 8 byte LUN address, excepting byte 4.
916 * Assign the same bus and target for this new LUN.
917 * Use the logical unit number from the firmware.
919 memcpy(addr1
, device
->scsi3addr
, 8);
921 for (i
= 0; i
< n
; i
++) {
923 memcpy(addr2
, sd
->scsi3addr
, 8);
925 /* differ only in byte 4? */
926 if (memcmp(addr1
, addr2
, 8) == 0) {
927 device
->bus
= sd
->bus
;
928 device
->target
= sd
->target
;
929 device
->lun
= device
->scsi3addr
[4];
933 if (device
->lun
== -1) {
934 dev_warn(&h
->pdev
->dev
, "physical device with no LUN=0,"
935 " suspect firmware bug or unsupported hardware "
944 added
[*nadded
] = device
;
947 /* initially, (before registering with scsi layer) we don't
948 * know our hostno and we don't want to print anything first
949 * time anyway (the scsi layer's inquiries will show that info)
951 /* if (hostno != -1) */
952 dev_info(&h
->pdev
->dev
, "%s device c%db%dt%dl%d added.\n",
953 scsi_device_type(device
->devtype
), hostno
,
954 device
->bus
, device
->target
, device
->lun
);
958 /* Update an entry in h->dev[] array. */
959 static void hpsa_scsi_update_entry(struct ctlr_info
*h
, int hostno
,
960 int entry
, struct hpsa_scsi_dev_t
*new_entry
)
962 /* assumes h->devlock is held */
963 BUG_ON(entry
< 0 || entry
>= HPSA_MAX_DEVICES
);
965 /* Raid level changed. */
966 h
->dev
[entry
]->raid_level
= new_entry
->raid_level
;
968 /* Raid offload parameters changed. Careful about the ordering. */
969 if (new_entry
->offload_config
&& new_entry
->offload_enabled
) {
971 * if drive is newly offload_enabled, we want to copy the
972 * raid map data first. If previously offload_enabled and
973 * offload_config were set, raid map data had better be
974 * the same as it was before. if raid map data is changed
975 * then it had better be the case that
976 * h->dev[entry]->offload_enabled is currently 0.
978 h
->dev
[entry
]->raid_map
= new_entry
->raid_map
;
979 h
->dev
[entry
]->ioaccel_handle
= new_entry
->ioaccel_handle
;
980 wmb(); /* ensure raid map updated prior to ->offload_enabled */
982 h
->dev
[entry
]->offload_config
= new_entry
->offload_config
;
983 h
->dev
[entry
]->offload_to_mirror
= new_entry
->offload_to_mirror
;
984 h
->dev
[entry
]->offload_enabled
= new_entry
->offload_enabled
;
985 h
->dev
[entry
]->queue_depth
= new_entry
->queue_depth
;
987 dev_info(&h
->pdev
->dev
, "%s device c%db%dt%dl%d updated.\n",
988 scsi_device_type(new_entry
->devtype
), hostno
, new_entry
->bus
,
989 new_entry
->target
, new_entry
->lun
);
992 /* Replace an entry from h->dev[] array. */
993 static void hpsa_scsi_replace_entry(struct ctlr_info
*h
, int hostno
,
994 int entry
, struct hpsa_scsi_dev_t
*new_entry
,
995 struct hpsa_scsi_dev_t
*added
[], int *nadded
,
996 struct hpsa_scsi_dev_t
*removed
[], int *nremoved
)
998 /* assumes h->devlock is held */
999 BUG_ON(entry
< 0 || entry
>= HPSA_MAX_DEVICES
);
1000 removed
[*nremoved
] = h
->dev
[entry
];
1004 * New physical devices won't have target/lun assigned yet
1005 * so we need to preserve the values in the slot we are replacing.
1007 if (new_entry
->target
== -1) {
1008 new_entry
->target
= h
->dev
[entry
]->target
;
1009 new_entry
->lun
= h
->dev
[entry
]->lun
;
1012 h
->dev
[entry
] = new_entry
;
1013 added
[*nadded
] = new_entry
;
1015 dev_info(&h
->pdev
->dev
, "%s device c%db%dt%dl%d changed.\n",
1016 scsi_device_type(new_entry
->devtype
), hostno
, new_entry
->bus
,
1017 new_entry
->target
, new_entry
->lun
);
1020 /* Remove an entry from h->dev[] array. */
1021 static void hpsa_scsi_remove_entry(struct ctlr_info
*h
, int hostno
, int entry
,
1022 struct hpsa_scsi_dev_t
*removed
[], int *nremoved
)
1024 /* assumes h->devlock is held */
1026 struct hpsa_scsi_dev_t
*sd
;
1028 BUG_ON(entry
< 0 || entry
>= HPSA_MAX_DEVICES
);
1031 removed
[*nremoved
] = h
->dev
[entry
];
1034 for (i
= entry
; i
< h
->ndevices
-1; i
++)
1035 h
->dev
[i
] = h
->dev
[i
+1];
1037 dev_info(&h
->pdev
->dev
, "%s device c%db%dt%dl%d removed.\n",
1038 scsi_device_type(sd
->devtype
), hostno
, sd
->bus
, sd
->target
,
1042 #define SCSI3ADDR_EQ(a, b) ( \
1043 (a)[7] == (b)[7] && \
1044 (a)[6] == (b)[6] && \
1045 (a)[5] == (b)[5] && \
1046 (a)[4] == (b)[4] && \
1047 (a)[3] == (b)[3] && \
1048 (a)[2] == (b)[2] && \
1049 (a)[1] == (b)[1] && \
1052 static void fixup_botched_add(struct ctlr_info
*h
,
1053 struct hpsa_scsi_dev_t
*added
)
1055 /* called when scsi_add_device fails in order to re-adjust
1056 * h->dev[] to match the mid layer's view.
1058 unsigned long flags
;
1061 spin_lock_irqsave(&h
->lock
, flags
);
1062 for (i
= 0; i
< h
->ndevices
; i
++) {
1063 if (h
->dev
[i
] == added
) {
1064 for (j
= i
; j
< h
->ndevices
-1; j
++)
1065 h
->dev
[j
] = h
->dev
[j
+1];
1070 spin_unlock_irqrestore(&h
->lock
, flags
);
1074 static inline int device_is_the_same(struct hpsa_scsi_dev_t
*dev1
,
1075 struct hpsa_scsi_dev_t
*dev2
)
1077 /* we compare everything except lun and target as these
1078 * are not yet assigned. Compare parts likely
1081 if (memcmp(dev1
->scsi3addr
, dev2
->scsi3addr
,
1082 sizeof(dev1
->scsi3addr
)) != 0)
1084 if (memcmp(dev1
->device_id
, dev2
->device_id
,
1085 sizeof(dev1
->device_id
)) != 0)
1087 if (memcmp(dev1
->model
, dev2
->model
, sizeof(dev1
->model
)) != 0)
1089 if (memcmp(dev1
->vendor
, dev2
->vendor
, sizeof(dev1
->vendor
)) != 0)
1091 if (dev1
->devtype
!= dev2
->devtype
)
1093 if (dev1
->bus
!= dev2
->bus
)
1098 static inline int device_updated(struct hpsa_scsi_dev_t
*dev1
,
1099 struct hpsa_scsi_dev_t
*dev2
)
1101 /* Device attributes that can change, but don't mean
1102 * that the device is a different device, nor that the OS
1103 * needs to be told anything about the change.
1105 if (dev1
->raid_level
!= dev2
->raid_level
)
1107 if (dev1
->offload_config
!= dev2
->offload_config
)
1109 if (dev1
->offload_enabled
!= dev2
->offload_enabled
)
1111 if (dev1
->queue_depth
!= dev2
->queue_depth
)
1116 /* Find needle in haystack. If exact match found, return DEVICE_SAME,
1117 * and return needle location in *index. If scsi3addr matches, but not
1118 * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle
1119 * location in *index.
1120 * In the case of a minor device attribute change, such as RAID level, just
1121 * return DEVICE_UPDATED, along with the updated device's location in index.
1122 * If needle not found, return DEVICE_NOT_FOUND.
1124 static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t
*needle
,
1125 struct hpsa_scsi_dev_t
*haystack
[], int haystack_size
,
1129 #define DEVICE_NOT_FOUND 0
1130 #define DEVICE_CHANGED 1
1131 #define DEVICE_SAME 2
1132 #define DEVICE_UPDATED 3
1133 for (i
= 0; i
< haystack_size
; i
++) {
1134 if (haystack
[i
] == NULL
) /* previously removed. */
1136 if (SCSI3ADDR_EQ(needle
->scsi3addr
, haystack
[i
]->scsi3addr
)) {
1138 if (device_is_the_same(needle
, haystack
[i
])) {
1139 if (device_updated(needle
, haystack
[i
]))
1140 return DEVICE_UPDATED
;
1143 /* Keep offline devices offline */
1144 if (needle
->volume_offline
)
1145 return DEVICE_NOT_FOUND
;
1146 return DEVICE_CHANGED
;
1151 return DEVICE_NOT_FOUND
;
1154 static void hpsa_monitor_offline_device(struct ctlr_info
*h
,
1155 unsigned char scsi3addr
[])
1157 struct offline_device_entry
*device
;
1158 unsigned long flags
;
1160 /* Check to see if device is already on the list */
1161 spin_lock_irqsave(&h
->offline_device_lock
, flags
);
1162 list_for_each_entry(device
, &h
->offline_device_list
, offline_list
) {
1163 if (memcmp(device
->scsi3addr
, scsi3addr
,
1164 sizeof(device
->scsi3addr
)) == 0) {
1165 spin_unlock_irqrestore(&h
->offline_device_lock
, flags
);
1169 spin_unlock_irqrestore(&h
->offline_device_lock
, flags
);
1171 /* Device is not on the list, add it. */
1172 device
= kmalloc(sizeof(*device
), GFP_KERNEL
);
1174 dev_warn(&h
->pdev
->dev
, "out of memory in %s\n", __func__
);
1177 memcpy(device
->scsi3addr
, scsi3addr
, sizeof(device
->scsi3addr
));
1178 spin_lock_irqsave(&h
->offline_device_lock
, flags
);
1179 list_add_tail(&device
->offline_list
, &h
->offline_device_list
);
1180 spin_unlock_irqrestore(&h
->offline_device_lock
, flags
);
1183 /* Print a message explaining various offline volume states */
1184 static void hpsa_show_volume_status(struct ctlr_info
*h
,
1185 struct hpsa_scsi_dev_t
*sd
)
1187 if (sd
->volume_offline
== HPSA_VPD_LV_STATUS_UNSUPPORTED
)
1188 dev_info(&h
->pdev
->dev
,
1189 "C%d:B%d:T%d:L%d Volume status is not available through vital product data pages.\n",
1190 h
->scsi_host
->host_no
,
1191 sd
->bus
, sd
->target
, sd
->lun
);
1192 switch (sd
->volume_offline
) {
1195 case HPSA_LV_UNDERGOING_ERASE
:
1196 dev_info(&h
->pdev
->dev
,
1197 "C%d:B%d:T%d:L%d Volume is undergoing background erase process.\n",
1198 h
->scsi_host
->host_no
,
1199 sd
->bus
, sd
->target
, sd
->lun
);
1201 case HPSA_LV_UNDERGOING_RPI
:
1202 dev_info(&h
->pdev
->dev
,
1203 "C%d:B%d:T%d:L%d Volume is undergoing rapid parity initialization process.\n",
1204 h
->scsi_host
->host_no
,
1205 sd
->bus
, sd
->target
, sd
->lun
);
1207 case HPSA_LV_PENDING_RPI
:
1208 dev_info(&h
->pdev
->dev
,
1209 "C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n",
1210 h
->scsi_host
->host_no
,
1211 sd
->bus
, sd
->target
, sd
->lun
);
1213 case HPSA_LV_ENCRYPTED_NO_KEY
:
1214 dev_info(&h
->pdev
->dev
,
1215 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because key is not present.\n",
1216 h
->scsi_host
->host_no
,
1217 sd
->bus
, sd
->target
, sd
->lun
);
1219 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER
:
1220 dev_info(&h
->pdev
->dev
,
1221 "C%d:B%d:T%d:L%d Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.\n",
1222 h
->scsi_host
->host_no
,
1223 sd
->bus
, sd
->target
, sd
->lun
);
1225 case HPSA_LV_UNDERGOING_ENCRYPTION
:
1226 dev_info(&h
->pdev
->dev
,
1227 "C%d:B%d:T%d:L%d Volume is undergoing encryption process.\n",
1228 h
->scsi_host
->host_no
,
1229 sd
->bus
, sd
->target
, sd
->lun
);
1231 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING
:
1232 dev_info(&h
->pdev
->dev
,
1233 "C%d:B%d:T%d:L%d Volume is undergoing encryption re-keying process.\n",
1234 h
->scsi_host
->host_no
,
1235 sd
->bus
, sd
->target
, sd
->lun
);
1237 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER
:
1238 dev_info(&h
->pdev
->dev
,
1239 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because controller does not have encryption enabled.\n",
1240 h
->scsi_host
->host_no
,
1241 sd
->bus
, sd
->target
, sd
->lun
);
1243 case HPSA_LV_PENDING_ENCRYPTION
:
1244 dev_info(&h
->pdev
->dev
,
1245 "C%d:B%d:T%d:L%d Volume is pending migration to encrypted state, but process has not started.\n",
1246 h
->scsi_host
->host_no
,
1247 sd
->bus
, sd
->target
, sd
->lun
);
1249 case HPSA_LV_PENDING_ENCRYPTION_REKEYING
:
1250 dev_info(&h
->pdev
->dev
,
1251 "C%d:B%d:T%d:L%d Volume is encrypted and is pending encryption rekeying.\n",
1252 h
->scsi_host
->host_no
,
1253 sd
->bus
, sd
->target
, sd
->lun
);
1259 * Figure the list of physical drive pointers for a logical drive with
1260 * raid offload configured.
1262 static void hpsa_figure_phys_disk_ptrs(struct ctlr_info
*h
,
1263 struct hpsa_scsi_dev_t
*dev
[], int ndevices
,
1264 struct hpsa_scsi_dev_t
*logical_drive
)
1266 struct raid_map_data
*map
= &logical_drive
->raid_map
;
1267 struct raid_map_disk_data
*dd
= &map
->data
[0];
1269 int total_disks_per_row
= le16_to_cpu(map
->data_disks_per_row
) +
1270 le16_to_cpu(map
->metadata_disks_per_row
);
1271 int nraid_map_entries
= le16_to_cpu(map
->row_cnt
) *
1272 le16_to_cpu(map
->layout_map_count
) *
1273 total_disks_per_row
;
1274 int nphys_disk
= le16_to_cpu(map
->layout_map_count
) *
1275 total_disks_per_row
;
1278 if (nraid_map_entries
> RAID_MAP_MAX_ENTRIES
)
1279 nraid_map_entries
= RAID_MAP_MAX_ENTRIES
;
1282 for (i
= 0; i
< nraid_map_entries
; i
++) {
1283 logical_drive
->phys_disk
[i
] = NULL
;
1284 if (!logical_drive
->offload_config
)
1286 for (j
= 0; j
< ndevices
; j
++) {
1287 if (dev
[j
]->devtype
!= TYPE_DISK
)
1289 if (is_logical_dev_addr_mode(dev
[j
]->scsi3addr
))
1291 if (dev
[j
]->ioaccel_handle
!= dd
[i
].ioaccel_handle
)
1294 logical_drive
->phys_disk
[i
] = dev
[j
];
1296 qdepth
= min(h
->nr_cmds
, qdepth
+
1297 logical_drive
->phys_disk
[i
]->queue_depth
);
1302 * This can happen if a physical drive is removed and
1303 * the logical drive is degraded. In that case, the RAID
1304 * map data will refer to a physical disk which isn't actually
1305 * present. And in that case offload_enabled should already
1306 * be 0, but we'll turn it off here just in case
1308 if (!logical_drive
->phys_disk
[i
]) {
1309 logical_drive
->offload_enabled
= 0;
1310 logical_drive
->queue_depth
= h
->nr_cmds
;
1313 if (nraid_map_entries
)
1315 * This is correct for reads, too high for full stripe writes,
1316 * way too high for partial stripe writes
1318 logical_drive
->queue_depth
= qdepth
;
1320 logical_drive
->queue_depth
= h
->nr_cmds
;
1323 static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info
*h
,
1324 struct hpsa_scsi_dev_t
*dev
[], int ndevices
)
1328 for (i
= 0; i
< ndevices
; i
++) {
1329 if (dev
[i
]->devtype
!= TYPE_DISK
)
1331 if (!is_logical_dev_addr_mode(dev
[i
]->scsi3addr
))
1333 hpsa_figure_phys_disk_ptrs(h
, dev
, ndevices
, dev
[i
]);
1337 static void adjust_hpsa_scsi_table(struct ctlr_info
*h
, int hostno
,
1338 struct hpsa_scsi_dev_t
*sd
[], int nsds
)
1340 /* sd contains scsi3 addresses and devtypes, and inquiry
1341 * data. This function takes what's in sd to be the current
1342 * reality and updates h->dev[] to reflect that reality.
1344 int i
, entry
, device_change
, changes
= 0;
1345 struct hpsa_scsi_dev_t
*csd
;
1346 unsigned long flags
;
1347 struct hpsa_scsi_dev_t
**added
, **removed
;
1348 int nadded
, nremoved
;
1349 struct Scsi_Host
*sh
= NULL
;
1351 added
= kzalloc(sizeof(*added
) * HPSA_MAX_DEVICES
, GFP_KERNEL
);
1352 removed
= kzalloc(sizeof(*removed
) * HPSA_MAX_DEVICES
, GFP_KERNEL
);
1354 if (!added
|| !removed
) {
1355 dev_warn(&h
->pdev
->dev
, "out of memory in "
1356 "adjust_hpsa_scsi_table\n");
1360 spin_lock_irqsave(&h
->devlock
, flags
);
1362 /* find any devices in h->dev[] that are not in
1363 * sd[] and remove them from h->dev[], and for any
1364 * devices which have changed, remove the old device
1365 * info and add the new device info.
1366 * If minor device attributes change, just update
1367 * the existing device structure.
1372 while (i
< h
->ndevices
) {
1374 device_change
= hpsa_scsi_find_entry(csd
, sd
, nsds
, &entry
);
1375 if (device_change
== DEVICE_NOT_FOUND
) {
1377 hpsa_scsi_remove_entry(h
, hostno
, i
,
1378 removed
, &nremoved
);
1379 continue; /* remove ^^^, hence i not incremented */
1380 } else if (device_change
== DEVICE_CHANGED
) {
1382 hpsa_scsi_replace_entry(h
, hostno
, i
, sd
[entry
],
1383 added
, &nadded
, removed
, &nremoved
);
1384 /* Set it to NULL to prevent it from being freed
1385 * at the bottom of hpsa_update_scsi_devices()
1388 } else if (device_change
== DEVICE_UPDATED
) {
1389 hpsa_scsi_update_entry(h
, hostno
, i
, sd
[entry
]);
1394 /* Now, make sure every device listed in sd[] is also
1395 * listed in h->dev[], adding them if they aren't found
1398 for (i
= 0; i
< nsds
; i
++) {
1399 if (!sd
[i
]) /* if already added above. */
1402 /* Don't add devices which are NOT READY, FORMAT IN PROGRESS
1403 * as the SCSI mid-layer does not handle such devices well.
1404 * It relentlessly loops sending TUR at 3Hz, then READ(10)
1405 * at 160Hz, and prevents the system from coming up.
1407 if (sd
[i
]->volume_offline
) {
1408 hpsa_show_volume_status(h
, sd
[i
]);
1409 dev_info(&h
->pdev
->dev
, "c%db%dt%dl%d: temporarily offline\n",
1410 h
->scsi_host
->host_no
,
1411 sd
[i
]->bus
, sd
[i
]->target
, sd
[i
]->lun
);
1415 device_change
= hpsa_scsi_find_entry(sd
[i
], h
->dev
,
1416 h
->ndevices
, &entry
);
1417 if (device_change
== DEVICE_NOT_FOUND
) {
1419 if (hpsa_scsi_add_entry(h
, hostno
, sd
[i
],
1420 added
, &nadded
) != 0)
1422 sd
[i
] = NULL
; /* prevent from being freed later. */
1423 } else if (device_change
== DEVICE_CHANGED
) {
1424 /* should never happen... */
1426 dev_warn(&h
->pdev
->dev
,
1427 "device unexpectedly changed.\n");
1428 /* but if it does happen, we just ignore that device */
1431 spin_unlock_irqrestore(&h
->devlock
, flags
);
1433 /* Monitor devices which are in one of several NOT READY states to be
1434 * brought online later. This must be done without holding h->devlock,
1435 * so don't touch h->dev[]
1437 for (i
= 0; i
< nsds
; i
++) {
1438 if (!sd
[i
]) /* if already added above. */
1440 if (sd
[i
]->volume_offline
)
1441 hpsa_monitor_offline_device(h
, sd
[i
]->scsi3addr
);
1444 /* Don't notify scsi mid layer of any changes the first time through
1445 * (or if there are no changes) scsi_scan_host will do it later the
1446 * first time through.
1448 if (hostno
== -1 || !changes
)
1452 /* Notify scsi mid layer of any removed devices */
1453 for (i
= 0; i
< nremoved
; i
++) {
1454 struct scsi_device
*sdev
=
1455 scsi_device_lookup(sh
, removed
[i
]->bus
,
1456 removed
[i
]->target
, removed
[i
]->lun
);
1458 scsi_remove_device(sdev
);
1459 scsi_device_put(sdev
);
1461 /* We don't expect to get here.
1462 * future cmds to this device will get selection
1463 * timeout as if the device was gone.
1465 dev_warn(&h
->pdev
->dev
, "didn't find c%db%dt%dl%d "
1466 " for removal.", hostno
, removed
[i
]->bus
,
1467 removed
[i
]->target
, removed
[i
]->lun
);
1473 /* Notify scsi mid layer of any added devices */
1474 for (i
= 0; i
< nadded
; i
++) {
1475 if (scsi_add_device(sh
, added
[i
]->bus
,
1476 added
[i
]->target
, added
[i
]->lun
) == 0)
1478 dev_warn(&h
->pdev
->dev
, "scsi_add_device c%db%dt%dl%d failed, "
1479 "device not added.\n", hostno
, added
[i
]->bus
,
1480 added
[i
]->target
, added
[i
]->lun
);
1481 /* now we have to remove it from h->dev,
1482 * since it didn't get added to scsi mid layer
1484 fixup_botched_add(h
, added
[i
]);
1493 * Lookup bus/target/lun and return corresponding struct hpsa_scsi_dev_t *
1494 * Assume's h->devlock is held.
1496 static struct hpsa_scsi_dev_t
*lookup_hpsa_scsi_dev(struct ctlr_info
*h
,
1497 int bus
, int target
, int lun
)
1500 struct hpsa_scsi_dev_t
*sd
;
1502 for (i
= 0; i
< h
->ndevices
; i
++) {
1504 if (sd
->bus
== bus
&& sd
->target
== target
&& sd
->lun
== lun
)
1510 /* link sdev->hostdata to our per-device structure. */
1511 static int hpsa_slave_alloc(struct scsi_device
*sdev
)
1513 struct hpsa_scsi_dev_t
*sd
;
1514 unsigned long flags
;
1515 struct ctlr_info
*h
;
1517 h
= sdev_to_hba(sdev
);
1518 spin_lock_irqsave(&h
->devlock
, flags
);
1519 sd
= lookup_hpsa_scsi_dev(h
, sdev_channel(sdev
),
1520 sdev_id(sdev
), sdev
->lun
);
1522 sdev
->hostdata
= sd
;
1523 if (sd
->queue_depth
)
1524 scsi_change_queue_depth(sdev
, sd
->queue_depth
);
1525 atomic_set(&sd
->ioaccel_cmds_out
, 0);
1527 spin_unlock_irqrestore(&h
->devlock
, flags
);
1531 static void hpsa_slave_destroy(struct scsi_device
*sdev
)
1533 /* nothing to do. */
1536 static void hpsa_free_sg_chain_blocks(struct ctlr_info
*h
)
1540 if (!h
->cmd_sg_list
)
1542 for (i
= 0; i
< h
->nr_cmds
; i
++) {
1543 kfree(h
->cmd_sg_list
[i
]);
1544 h
->cmd_sg_list
[i
] = NULL
;
1546 kfree(h
->cmd_sg_list
);
1547 h
->cmd_sg_list
= NULL
;
1550 static int hpsa_allocate_sg_chain_blocks(struct ctlr_info
*h
)
1554 if (h
->chainsize
<= 0)
1557 h
->cmd_sg_list
= kzalloc(sizeof(*h
->cmd_sg_list
) * h
->nr_cmds
,
1559 if (!h
->cmd_sg_list
) {
1560 dev_err(&h
->pdev
->dev
, "Failed to allocate SG list\n");
1563 for (i
= 0; i
< h
->nr_cmds
; i
++) {
1564 h
->cmd_sg_list
[i
] = kmalloc(sizeof(*h
->cmd_sg_list
[i
]) *
1565 h
->chainsize
, GFP_KERNEL
);
1566 if (!h
->cmd_sg_list
[i
]) {
1567 dev_err(&h
->pdev
->dev
, "Failed to allocate cmd SG\n");
1574 hpsa_free_sg_chain_blocks(h
);
1578 static int hpsa_map_sg_chain_block(struct ctlr_info
*h
,
1579 struct CommandList
*c
)
1581 struct SGDescriptor
*chain_sg
, *chain_block
;
1585 chain_sg
= &c
->SG
[h
->max_cmd_sg_entries
- 1];
1586 chain_block
= h
->cmd_sg_list
[c
->cmdindex
];
1587 chain_sg
->Ext
= cpu_to_le32(HPSA_SG_CHAIN
);
1588 chain_len
= sizeof(*chain_sg
) *
1589 (le16_to_cpu(c
->Header
.SGTotal
) - h
->max_cmd_sg_entries
);
1590 chain_sg
->Len
= cpu_to_le32(chain_len
);
1591 temp64
= pci_map_single(h
->pdev
, chain_block
, chain_len
,
1593 if (dma_mapping_error(&h
->pdev
->dev
, temp64
)) {
1594 /* prevent subsequent unmapping */
1595 chain_sg
->Addr
= cpu_to_le64(0);
1598 chain_sg
->Addr
= cpu_to_le64(temp64
);
1602 static void hpsa_unmap_sg_chain_block(struct ctlr_info
*h
,
1603 struct CommandList
*c
)
1605 struct SGDescriptor
*chain_sg
;
1607 if (le16_to_cpu(c
->Header
.SGTotal
) <= h
->max_cmd_sg_entries
)
1610 chain_sg
= &c
->SG
[h
->max_cmd_sg_entries
- 1];
1611 pci_unmap_single(h
->pdev
, le64_to_cpu(chain_sg
->Addr
),
1612 le32_to_cpu(chain_sg
->Len
), PCI_DMA_TODEVICE
);
1616 /* Decode the various types of errors on ioaccel2 path.
1617 * Return 1 for any error that should generate a RAID path retry.
1618 * Return 0 for errors that don't require a RAID path retry.
1620 static int handle_ioaccel_mode2_error(struct ctlr_info
*h
,
1621 struct CommandList
*c
,
1622 struct scsi_cmnd
*cmd
,
1623 struct io_accel2_cmd
*c2
)
1628 switch (c2
->error_data
.serv_response
) {
1629 case IOACCEL2_SERV_RESPONSE_COMPLETE
:
1630 switch (c2
->error_data
.status
) {
1631 case IOACCEL2_STATUS_SR_TASK_COMP_GOOD
:
1633 case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND
:
1634 dev_warn(&h
->pdev
->dev
,
1635 "%s: task complete with check condition.\n",
1636 "HP SSD Smart Path");
1637 cmd
->result
|= SAM_STAT_CHECK_CONDITION
;
1638 if (c2
->error_data
.data_present
!=
1639 IOACCEL2_SENSE_DATA_PRESENT
) {
1640 memset(cmd
->sense_buffer
, 0,
1641 SCSI_SENSE_BUFFERSIZE
);
1644 /* copy the sense data */
1645 data_len
= c2
->error_data
.sense_data_len
;
1646 if (data_len
> SCSI_SENSE_BUFFERSIZE
)
1647 data_len
= SCSI_SENSE_BUFFERSIZE
;
1648 if (data_len
> sizeof(c2
->error_data
.sense_data_buff
))
1650 sizeof(c2
->error_data
.sense_data_buff
);
1651 memcpy(cmd
->sense_buffer
,
1652 c2
->error_data
.sense_data_buff
, data_len
);
1655 case IOACCEL2_STATUS_SR_TASK_COMP_BUSY
:
1656 dev_warn(&h
->pdev
->dev
,
1657 "%s: task complete with BUSY status.\n",
1658 "HP SSD Smart Path");
1661 case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON
:
1662 dev_warn(&h
->pdev
->dev
,
1663 "%s: task complete with reservation conflict.\n",
1664 "HP SSD Smart Path");
1667 case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL
:
1668 /* Make scsi midlayer do unlimited retries */
1669 cmd
->result
= DID_IMM_RETRY
<< 16;
1671 case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED
:
1672 dev_warn(&h
->pdev
->dev
,
1673 "%s: task complete with aborted status.\n",
1674 "HP SSD Smart Path");
1678 dev_warn(&h
->pdev
->dev
,
1679 "%s: task complete with unrecognized status: 0x%02x\n",
1680 "HP SSD Smart Path", c2
->error_data
.status
);
1685 case IOACCEL2_SERV_RESPONSE_FAILURE
:
1686 /* don't expect to get here. */
1687 dev_warn(&h
->pdev
->dev
,
1688 "unexpected delivery or target failure, status = 0x%02x\n",
1689 c2
->error_data
.status
);
1692 case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE
:
1694 case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS
:
1696 case IOACCEL2_SERV_RESPONSE_TMF_REJECTED
:
1697 dev_warn(&h
->pdev
->dev
, "task management function rejected.\n");
1700 case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN
:
1701 dev_warn(&h
->pdev
->dev
, "task management function invalid LUN\n");
1704 dev_warn(&h
->pdev
->dev
,
1705 "%s: Unrecognized server response: 0x%02x\n",
1706 "HP SSD Smart Path",
1707 c2
->error_data
.serv_response
);
1712 return retry
; /* retry on raid path? */
1715 static void process_ioaccel2_completion(struct ctlr_info
*h
,
1716 struct CommandList
*c
, struct scsi_cmnd
*cmd
,
1717 struct hpsa_scsi_dev_t
*dev
)
1719 struct io_accel2_cmd
*c2
= &h
->ioaccel2_cmd_pool
[c
->cmdindex
];
1721 /* check for good status */
1722 if (likely(c2
->error_data
.serv_response
== 0 &&
1723 c2
->error_data
.status
== 0)) {
1725 cmd
->scsi_done(cmd
);
1729 /* Any RAID offload error results in retry which will use
1730 * the normal I/O path so the controller can handle whatever's
1733 if (is_logical_dev_addr_mode(dev
->scsi3addr
) &&
1734 c2
->error_data
.serv_response
==
1735 IOACCEL2_SERV_RESPONSE_FAILURE
) {
1736 if (c2
->error_data
.status
==
1737 IOACCEL2_STATUS_SR_IOACCEL_DISABLED
)
1738 dev
->offload_enabled
= 0;
1742 if (handle_ioaccel_mode2_error(h
, c
, cmd
, c2
))
1746 cmd
->scsi_done(cmd
);
1750 INIT_WORK(&c
->work
, hpsa_command_resubmit_worker
);
1751 queue_work_on(raw_smp_processor_id(), h
->resubmit_wq
, &c
->work
);
1754 static void complete_scsi_command(struct CommandList
*cp
)
1756 struct scsi_cmnd
*cmd
;
1757 struct ctlr_info
*h
;
1758 struct ErrorInfo
*ei
;
1759 struct hpsa_scsi_dev_t
*dev
;
1761 unsigned char sense_key
;
1762 unsigned char asc
; /* additional sense code */
1763 unsigned char ascq
; /* additional sense code qualifier */
1764 unsigned long sense_data_size
;
1767 cmd
= (struct scsi_cmnd
*) cp
->scsi_cmd
;
1769 dev
= cmd
->device
->hostdata
;
1771 scsi_dma_unmap(cmd
); /* undo the DMA mappings */
1772 if ((cp
->cmd_type
== CMD_SCSI
) &&
1773 (le16_to_cpu(cp
->Header
.SGTotal
) > h
->max_cmd_sg_entries
))
1774 hpsa_unmap_sg_chain_block(h
, cp
);
1776 cmd
->result
= (DID_OK
<< 16); /* host byte */
1777 cmd
->result
|= (COMMAND_COMPLETE
<< 8); /* msg byte */
1779 if (cp
->cmd_type
== CMD_IOACCEL2
|| cp
->cmd_type
== CMD_IOACCEL1
)
1780 atomic_dec(&cp
->phys_disk
->ioaccel_cmds_out
);
1782 if (cp
->cmd_type
== CMD_IOACCEL2
)
1783 return process_ioaccel2_completion(h
, cp
, cmd
, dev
);
1785 cmd
->result
|= ei
->ScsiStatus
;
1787 scsi_set_resid(cmd
, ei
->ResidualCnt
);
1788 if (ei
->CommandStatus
== 0) {
1789 if (cp
->cmd_type
== CMD_IOACCEL1
)
1790 atomic_dec(&cp
->phys_disk
->ioaccel_cmds_out
);
1792 cmd
->scsi_done(cmd
);
1796 /* copy the sense data */
1797 if (SCSI_SENSE_BUFFERSIZE
< sizeof(ei
->SenseInfo
))
1798 sense_data_size
= SCSI_SENSE_BUFFERSIZE
;
1800 sense_data_size
= sizeof(ei
->SenseInfo
);
1801 if (ei
->SenseLen
< sense_data_size
)
1802 sense_data_size
= ei
->SenseLen
;
1804 memcpy(cmd
->sense_buffer
, ei
->SenseInfo
, sense_data_size
);
1806 /* For I/O accelerator commands, copy over some fields to the normal
1807 * CISS header used below for error handling.
1809 if (cp
->cmd_type
== CMD_IOACCEL1
) {
1810 struct io_accel1_cmd
*c
= &h
->ioaccel_cmd_pool
[cp
->cmdindex
];
1811 cp
->Header
.SGList
= scsi_sg_count(cmd
);
1812 cp
->Header
.SGTotal
= cpu_to_le16(cp
->Header
.SGList
);
1813 cp
->Request
.CDBLen
= le16_to_cpu(c
->io_flags
) &
1814 IOACCEL1_IOFLAGS_CDBLEN_MASK
;
1815 cp
->Header
.tag
= c
->tag
;
1816 memcpy(cp
->Header
.LUN
.LunAddrBytes
, c
->CISS_LUN
, 8);
1817 memcpy(cp
->Request
.CDB
, c
->CDB
, cp
->Request
.CDBLen
);
1819 /* Any RAID offload error results in retry which will use
1820 * the normal I/O path so the controller can handle whatever's
1823 if (is_logical_dev_addr_mode(dev
->scsi3addr
)) {
1824 if (ei
->CommandStatus
== CMD_IOACCEL_DISABLED
)
1825 dev
->offload_enabled
= 0;
1826 INIT_WORK(&cp
->work
, hpsa_command_resubmit_worker
);
1827 queue_work_on(raw_smp_processor_id(),
1828 h
->resubmit_wq
, &cp
->work
);
1833 /* an error has occurred */
1834 switch (ei
->CommandStatus
) {
1836 case CMD_TARGET_STATUS
:
1837 if (ei
->ScsiStatus
) {
1839 sense_key
= 0xf & ei
->SenseInfo
[2];
1840 /* Get additional sense code */
1841 asc
= ei
->SenseInfo
[12];
1842 /* Get addition sense code qualifier */
1843 ascq
= ei
->SenseInfo
[13];
1845 if (ei
->ScsiStatus
== SAM_STAT_CHECK_CONDITION
) {
1846 if (sense_key
== ABORTED_COMMAND
) {
1847 cmd
->result
|= DID_SOFT_ERROR
<< 16;
1852 /* Problem was not a check condition
1853 * Pass it up to the upper layers...
1855 if (ei
->ScsiStatus
) {
1856 dev_warn(&h
->pdev
->dev
, "cp %p has status 0x%x "
1857 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
1858 "Returning result: 0x%x\n",
1860 sense_key
, asc
, ascq
,
1862 } else { /* scsi status is zero??? How??? */
1863 dev_warn(&h
->pdev
->dev
, "cp %p SCSI status was 0. "
1864 "Returning no connection.\n", cp
),
1866 /* Ordinarily, this case should never happen,
1867 * but there is a bug in some released firmware
1868 * revisions that allows it to happen if, for
1869 * example, a 4100 backplane loses power and
1870 * the tape drive is in it. We assume that
1871 * it's a fatal error of some kind because we
1872 * can't show that it wasn't. We will make it
1873 * look like selection timeout since that is
1874 * the most common reason for this to occur,
1875 * and it's severe enough.
1878 cmd
->result
= DID_NO_CONNECT
<< 16;
1882 case CMD_DATA_UNDERRUN
: /* let mid layer handle it. */
1884 case CMD_DATA_OVERRUN
:
1885 dev_warn(&h
->pdev
->dev
, "cp %p has"
1886 " completed with data overrun "
1890 /* print_bytes(cp, sizeof(*cp), 1, 0);
1892 /* We get CMD_INVALID if you address a non-existent device
1893 * instead of a selection timeout (no response). You will
1894 * see this if you yank out a drive, then try to access it.
1895 * This is kind of a shame because it means that any other
1896 * CMD_INVALID (e.g. driver bug) will get interpreted as a
1897 * missing target. */
1898 cmd
->result
= DID_NO_CONNECT
<< 16;
1901 case CMD_PROTOCOL_ERR
:
1902 cmd
->result
= DID_ERROR
<< 16;
1903 dev_warn(&h
->pdev
->dev
, "cp %p has "
1904 "protocol error\n", cp
);
1906 case CMD_HARDWARE_ERR
:
1907 cmd
->result
= DID_ERROR
<< 16;
1908 dev_warn(&h
->pdev
->dev
, "cp %p had hardware error\n", cp
);
1910 case CMD_CONNECTION_LOST
:
1911 cmd
->result
= DID_ERROR
<< 16;
1912 dev_warn(&h
->pdev
->dev
, "cp %p had connection lost\n", cp
);
1915 cmd
->result
= DID_ABORT
<< 16;
1916 dev_warn(&h
->pdev
->dev
, "cp %p was aborted with status 0x%x\n",
1917 cp
, ei
->ScsiStatus
);
1919 case CMD_ABORT_FAILED
:
1920 cmd
->result
= DID_ERROR
<< 16;
1921 dev_warn(&h
->pdev
->dev
, "cp %p reports abort failed\n", cp
);
1923 case CMD_UNSOLICITED_ABORT
:
1924 cmd
->result
= DID_SOFT_ERROR
<< 16; /* retry the command */
1925 dev_warn(&h
->pdev
->dev
, "cp %p aborted due to an unsolicited "
1929 cmd
->result
= DID_TIME_OUT
<< 16;
1930 dev_warn(&h
->pdev
->dev
, "cp %p timedout\n", cp
);
1932 case CMD_UNABORTABLE
:
1933 cmd
->result
= DID_ERROR
<< 16;
1934 dev_warn(&h
->pdev
->dev
, "Command unabortable\n");
1936 case CMD_IOACCEL_DISABLED
:
1937 /* This only handles the direct pass-through case since RAID
1938 * offload is handled above. Just attempt a retry.
1940 cmd
->result
= DID_SOFT_ERROR
<< 16;
1941 dev_warn(&h
->pdev
->dev
,
1942 "cp %p had HP SSD Smart Path error\n", cp
);
1945 cmd
->result
= DID_ERROR
<< 16;
1946 dev_warn(&h
->pdev
->dev
, "cp %p returned unknown status %x\n",
1947 cp
, ei
->CommandStatus
);
1950 cmd
->scsi_done(cmd
);
1953 static void hpsa_pci_unmap(struct pci_dev
*pdev
,
1954 struct CommandList
*c
, int sg_used
, int data_direction
)
1958 for (i
= 0; i
< sg_used
; i
++)
1959 pci_unmap_single(pdev
, (dma_addr_t
) le64_to_cpu(c
->SG
[i
].Addr
),
1960 le32_to_cpu(c
->SG
[i
].Len
),
1964 static int hpsa_map_one(struct pci_dev
*pdev
,
1965 struct CommandList
*cp
,
1972 if (buflen
== 0 || data_direction
== PCI_DMA_NONE
) {
1973 cp
->Header
.SGList
= 0;
1974 cp
->Header
.SGTotal
= cpu_to_le16(0);
1978 addr64
= pci_map_single(pdev
, buf
, buflen
, data_direction
);
1979 if (dma_mapping_error(&pdev
->dev
, addr64
)) {
1980 /* Prevent subsequent unmap of something never mapped */
1981 cp
->Header
.SGList
= 0;
1982 cp
->Header
.SGTotal
= cpu_to_le16(0);
1985 cp
->SG
[0].Addr
= cpu_to_le64(addr64
);
1986 cp
->SG
[0].Len
= cpu_to_le32(buflen
);
1987 cp
->SG
[0].Ext
= cpu_to_le32(HPSA_SG_LAST
); /* we are not chaining */
1988 cp
->Header
.SGList
= 1; /* no. SGs contig in this cmd */
1989 cp
->Header
.SGTotal
= cpu_to_le16(1); /* total sgs in cmd list */
1993 static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info
*h
,
1994 struct CommandList
*c
)
1996 DECLARE_COMPLETION_ONSTACK(wait
);
1999 enqueue_cmd_and_start_io(h
, c
);
2000 wait_for_completion(&wait
);
2003 static u32
lockup_detected(struct ctlr_info
*h
)
2006 u32 rc
, *lockup_detected
;
2009 lockup_detected
= per_cpu_ptr(h
->lockup_detected
, cpu
);
2010 rc
= *lockup_detected
;
2015 static void hpsa_scsi_do_simple_cmd_core_if_no_lockup(struct ctlr_info
*h
,
2016 struct CommandList
*c
)
2018 /* If controller lockup detected, fake a hardware error. */
2019 if (unlikely(lockup_detected(h
)))
2020 c
->err_info
->CommandStatus
= CMD_HARDWARE_ERR
;
2022 hpsa_scsi_do_simple_cmd_core(h
, c
);
2025 #define MAX_DRIVER_CMD_RETRIES 25
2026 static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info
*h
,
2027 struct CommandList
*c
, int data_direction
)
2029 int backoff_time
= 10, retry_count
= 0;
2032 memset(c
->err_info
, 0, sizeof(*c
->err_info
));
2033 hpsa_scsi_do_simple_cmd_core(h
, c
);
2035 if (retry_count
> 3) {
2036 msleep(backoff_time
);
2037 if (backoff_time
< 1000)
2040 } while ((check_for_unit_attention(h
, c
) ||
2041 check_for_busy(h
, c
)) &&
2042 retry_count
<= MAX_DRIVER_CMD_RETRIES
);
2043 hpsa_pci_unmap(h
->pdev
, c
, 1, data_direction
);
2046 static void hpsa_print_cmd(struct ctlr_info
*h
, char *txt
,
2047 struct CommandList
*c
)
2049 const u8
*cdb
= c
->Request
.CDB
;
2050 const u8
*lun
= c
->Header
.LUN
.LunAddrBytes
;
2052 dev_warn(&h
->pdev
->dev
, "%s: LUN:%02x%02x%02x%02x%02x%02x%02x%02x"
2053 " CDB:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
2054 txt
, lun
[0], lun
[1], lun
[2], lun
[3],
2055 lun
[4], lun
[5], lun
[6], lun
[7],
2056 cdb
[0], cdb
[1], cdb
[2], cdb
[3],
2057 cdb
[4], cdb
[5], cdb
[6], cdb
[7],
2058 cdb
[8], cdb
[9], cdb
[10], cdb
[11],
2059 cdb
[12], cdb
[13], cdb
[14], cdb
[15]);
2062 static void hpsa_scsi_interpret_error(struct ctlr_info
*h
,
2063 struct CommandList
*cp
)
2065 const struct ErrorInfo
*ei
= cp
->err_info
;
2066 struct device
*d
= &cp
->h
->pdev
->dev
;
2067 const u8
*sd
= ei
->SenseInfo
;
2069 switch (ei
->CommandStatus
) {
2070 case CMD_TARGET_STATUS
:
2071 hpsa_print_cmd(h
, "SCSI status", cp
);
2072 if (ei
->ScsiStatus
== SAM_STAT_CHECK_CONDITION
)
2073 dev_warn(d
, "SCSI Status = 02, Sense key = %02x, ASC = %02x, ASCQ = %02x\n",
2074 sd
[2] & 0x0f, sd
[12], sd
[13]);
2076 dev_warn(d
, "SCSI Status = %02x\n", ei
->ScsiStatus
);
2077 if (ei
->ScsiStatus
== 0)
2078 dev_warn(d
, "SCSI status is abnormally zero. "
2079 "(probably indicates selection timeout "
2080 "reported incorrectly due to a known "
2081 "firmware bug, circa July, 2001.)\n");
2083 case CMD_DATA_UNDERRUN
: /* let mid layer handle it. */
2085 case CMD_DATA_OVERRUN
:
2086 hpsa_print_cmd(h
, "overrun condition", cp
);
2089 /* controller unfortunately reports SCSI passthru's
2090 * to non-existent targets as invalid commands.
2092 hpsa_print_cmd(h
, "invalid command", cp
);
2093 dev_warn(d
, "probably means device no longer present\n");
2096 case CMD_PROTOCOL_ERR
:
2097 hpsa_print_cmd(h
, "protocol error", cp
);
2099 case CMD_HARDWARE_ERR
:
2100 hpsa_print_cmd(h
, "hardware error", cp
);
2102 case CMD_CONNECTION_LOST
:
2103 hpsa_print_cmd(h
, "connection lost", cp
);
2106 hpsa_print_cmd(h
, "aborted", cp
);
2108 case CMD_ABORT_FAILED
:
2109 hpsa_print_cmd(h
, "abort failed", cp
);
2111 case CMD_UNSOLICITED_ABORT
:
2112 hpsa_print_cmd(h
, "unsolicited abort", cp
);
2115 hpsa_print_cmd(h
, "timed out", cp
);
2117 case CMD_UNABORTABLE
:
2118 hpsa_print_cmd(h
, "unabortable", cp
);
2121 hpsa_print_cmd(h
, "unknown status", cp
);
2122 dev_warn(d
, "Unknown command status %x\n",
2127 static int hpsa_scsi_do_inquiry(struct ctlr_info
*h
, unsigned char *scsi3addr
,
2128 u16 page
, unsigned char *buf
,
2129 unsigned char bufsize
)
2132 struct CommandList
*c
;
2133 struct ErrorInfo
*ei
;
2138 dev_warn(&h
->pdev
->dev
, "cmd_alloc returned NULL!\n");
2142 if (fill_cmd(c
, HPSA_INQUIRY
, h
, buf
, bufsize
,
2143 page
, scsi3addr
, TYPE_CMD
)) {
2147 hpsa_scsi_do_simple_cmd_with_retry(h
, c
, PCI_DMA_FROMDEVICE
);
2149 if (ei
->CommandStatus
!= 0 && ei
->CommandStatus
!= CMD_DATA_UNDERRUN
) {
2150 hpsa_scsi_interpret_error(h
, c
);
2158 static int hpsa_bmic_ctrl_mode_sense(struct ctlr_info
*h
,
2159 unsigned char *scsi3addr
, unsigned char page
,
2160 struct bmic_controller_parameters
*buf
, size_t bufsize
)
2163 struct CommandList
*c
;
2164 struct ErrorInfo
*ei
;
2167 if (c
== NULL
) { /* trouble... */
2168 dev_warn(&h
->pdev
->dev
, "cmd_alloc returned NULL!\n");
2172 if (fill_cmd(c
, BMIC_SENSE_CONTROLLER_PARAMETERS
, h
, buf
, bufsize
,
2173 page
, scsi3addr
, TYPE_CMD
)) {
2177 hpsa_scsi_do_simple_cmd_with_retry(h
, c
, PCI_DMA_FROMDEVICE
);
2179 if (ei
->CommandStatus
!= 0 && ei
->CommandStatus
!= CMD_DATA_UNDERRUN
) {
2180 hpsa_scsi_interpret_error(h
, c
);
2188 static int hpsa_send_reset(struct ctlr_info
*h
, unsigned char *scsi3addr
,
2192 struct CommandList
*c
;
2193 struct ErrorInfo
*ei
;
2197 if (c
== NULL
) { /* trouble... */
2198 dev_warn(&h
->pdev
->dev
, "cmd_alloc returned NULL!\n");
2202 /* fill_cmd can't fail here, no data buffer to map. */
2203 (void) fill_cmd(c
, HPSA_DEVICE_RESET_MSG
, h
, NULL
, 0, 0,
2204 scsi3addr
, TYPE_MSG
);
2205 c
->Request
.CDB
[1] = reset_type
; /* fill_cmd defaults to LUN reset */
2206 hpsa_scsi_do_simple_cmd_core(h
, c
);
2207 /* no unmap needed here because no data xfer. */
2210 if (ei
->CommandStatus
!= 0) {
2211 hpsa_scsi_interpret_error(h
, c
);
2218 static void hpsa_get_raid_level(struct ctlr_info
*h
,
2219 unsigned char *scsi3addr
, unsigned char *raid_level
)
2224 *raid_level
= RAID_UNKNOWN
;
2225 buf
= kzalloc(64, GFP_KERNEL
);
2228 rc
= hpsa_scsi_do_inquiry(h
, scsi3addr
, VPD_PAGE
| 0xC1, buf
, 64);
2230 *raid_level
= buf
[8];
2231 if (*raid_level
> RAID_UNKNOWN
)
2232 *raid_level
= RAID_UNKNOWN
;
2237 #define HPSA_MAP_DEBUG
2238 #ifdef HPSA_MAP_DEBUG
2239 static void hpsa_debug_map_buff(struct ctlr_info
*h
, int rc
,
2240 struct raid_map_data
*map_buff
)
2242 struct raid_map_disk_data
*dd
= &map_buff
->data
[0];
2244 u16 map_cnt
, row_cnt
, disks_per_row
;
2249 /* Show details only if debugging has been activated. */
2250 if (h
->raid_offload_debug
< 2)
2253 dev_info(&h
->pdev
->dev
, "structure_size = %u\n",
2254 le32_to_cpu(map_buff
->structure_size
));
2255 dev_info(&h
->pdev
->dev
, "volume_blk_size = %u\n",
2256 le32_to_cpu(map_buff
->volume_blk_size
));
2257 dev_info(&h
->pdev
->dev
, "volume_blk_cnt = 0x%llx\n",
2258 le64_to_cpu(map_buff
->volume_blk_cnt
));
2259 dev_info(&h
->pdev
->dev
, "physicalBlockShift = %u\n",
2260 map_buff
->phys_blk_shift
);
2261 dev_info(&h
->pdev
->dev
, "parity_rotation_shift = %u\n",
2262 map_buff
->parity_rotation_shift
);
2263 dev_info(&h
->pdev
->dev
, "strip_size = %u\n",
2264 le16_to_cpu(map_buff
->strip_size
));
2265 dev_info(&h
->pdev
->dev
, "disk_starting_blk = 0x%llx\n",
2266 le64_to_cpu(map_buff
->disk_starting_blk
));
2267 dev_info(&h
->pdev
->dev
, "disk_blk_cnt = 0x%llx\n",
2268 le64_to_cpu(map_buff
->disk_blk_cnt
));
2269 dev_info(&h
->pdev
->dev
, "data_disks_per_row = %u\n",
2270 le16_to_cpu(map_buff
->data_disks_per_row
));
2271 dev_info(&h
->pdev
->dev
, "metadata_disks_per_row = %u\n",
2272 le16_to_cpu(map_buff
->metadata_disks_per_row
));
2273 dev_info(&h
->pdev
->dev
, "row_cnt = %u\n",
2274 le16_to_cpu(map_buff
->row_cnt
));
2275 dev_info(&h
->pdev
->dev
, "layout_map_count = %u\n",
2276 le16_to_cpu(map_buff
->layout_map_count
));
2277 dev_info(&h
->pdev
->dev
, "flags = 0x%x\n",
2278 le16_to_cpu(map_buff
->flags
));
2279 dev_info(&h
->pdev
->dev
, "encrypytion = %s\n",
2280 le16_to_cpu(map_buff
->flags
) &
2281 RAID_MAP_FLAG_ENCRYPT_ON
? "ON" : "OFF");
2282 dev_info(&h
->pdev
->dev
, "dekindex = %u\n",
2283 le16_to_cpu(map_buff
->dekindex
));
2284 map_cnt
= le16_to_cpu(map_buff
->layout_map_count
);
2285 for (map
= 0; map
< map_cnt
; map
++) {
2286 dev_info(&h
->pdev
->dev
, "Map%u:\n", map
);
2287 row_cnt
= le16_to_cpu(map_buff
->row_cnt
);
2288 for (row
= 0; row
< row_cnt
; row
++) {
2289 dev_info(&h
->pdev
->dev
, " Row%u:\n", row
);
2291 le16_to_cpu(map_buff
->data_disks_per_row
);
2292 for (col
= 0; col
< disks_per_row
; col
++, dd
++)
2293 dev_info(&h
->pdev
->dev
,
2294 " D%02u: h=0x%04x xor=%u,%u\n",
2295 col
, dd
->ioaccel_handle
,
2296 dd
->xor_mult
[0], dd
->xor_mult
[1]);
2298 le16_to_cpu(map_buff
->metadata_disks_per_row
);
2299 for (col
= 0; col
< disks_per_row
; col
++, dd
++)
2300 dev_info(&h
->pdev
->dev
,
2301 " M%02u: h=0x%04x xor=%u,%u\n",
2302 col
, dd
->ioaccel_handle
,
2303 dd
->xor_mult
[0], dd
->xor_mult
[1]);
2308 static void hpsa_debug_map_buff(__attribute__((unused
)) struct ctlr_info
*h
,
2309 __attribute__((unused
)) int rc
,
2310 __attribute__((unused
)) struct raid_map_data
*map_buff
)
2315 static int hpsa_get_raid_map(struct ctlr_info
*h
,
2316 unsigned char *scsi3addr
, struct hpsa_scsi_dev_t
*this_device
)
2319 struct CommandList
*c
;
2320 struct ErrorInfo
*ei
;
2324 dev_warn(&h
->pdev
->dev
, "cmd_alloc returned NULL!\n");
2327 if (fill_cmd(c
, HPSA_GET_RAID_MAP
, h
, &this_device
->raid_map
,
2328 sizeof(this_device
->raid_map
), 0,
2329 scsi3addr
, TYPE_CMD
)) {
2330 dev_warn(&h
->pdev
->dev
, "Out of memory in hpsa_get_raid_map()\n");
2334 hpsa_scsi_do_simple_cmd_with_retry(h
, c
, PCI_DMA_FROMDEVICE
);
2336 if (ei
->CommandStatus
!= 0 && ei
->CommandStatus
!= CMD_DATA_UNDERRUN
) {
2337 hpsa_scsi_interpret_error(h
, c
);
2343 /* @todo in the future, dynamically allocate RAID map memory */
2344 if (le32_to_cpu(this_device
->raid_map
.structure_size
) >
2345 sizeof(this_device
->raid_map
)) {
2346 dev_warn(&h
->pdev
->dev
, "RAID map size is too large!\n");
2349 hpsa_debug_map_buff(h
, rc
, &this_device
->raid_map
);
2353 static int hpsa_bmic_id_physical_device(struct ctlr_info
*h
,
2354 unsigned char scsi3addr
[], u16 bmic_device_index
,
2355 struct bmic_identify_physical_device
*buf
, size_t bufsize
)
2358 struct CommandList
*c
;
2359 struct ErrorInfo
*ei
;
2362 rc
= fill_cmd(c
, BMIC_IDENTIFY_PHYSICAL_DEVICE
, h
, buf
, bufsize
,
2363 0, RAID_CTLR_LUNID
, TYPE_CMD
);
2367 c
->Request
.CDB
[2] = bmic_device_index
& 0xff;
2368 c
->Request
.CDB
[9] = (bmic_device_index
>> 8) & 0xff;
2370 hpsa_scsi_do_simple_cmd_with_retry(h
, c
, PCI_DMA_FROMDEVICE
);
2372 if (ei
->CommandStatus
!= 0 && ei
->CommandStatus
!= CMD_DATA_UNDERRUN
) {
2373 hpsa_scsi_interpret_error(h
, c
);
2381 static int hpsa_vpd_page_supported(struct ctlr_info
*h
,
2382 unsigned char scsi3addr
[], u8 page
)
2387 unsigned char *buf
, bufsize
;
2389 buf
= kzalloc(256, GFP_KERNEL
);
2393 /* Get the size of the page list first */
2394 rc
= hpsa_scsi_do_inquiry(h
, scsi3addr
,
2395 VPD_PAGE
| HPSA_VPD_SUPPORTED_PAGES
,
2396 buf
, HPSA_VPD_HEADER_SZ
);
2398 goto exit_unsupported
;
2400 if ((pages
+ HPSA_VPD_HEADER_SZ
) <= 255)
2401 bufsize
= pages
+ HPSA_VPD_HEADER_SZ
;
2405 /* Get the whole VPD page list */
2406 rc
= hpsa_scsi_do_inquiry(h
, scsi3addr
,
2407 VPD_PAGE
| HPSA_VPD_SUPPORTED_PAGES
,
2410 goto exit_unsupported
;
2413 for (i
= 1; i
<= pages
; i
++)
2414 if (buf
[3 + i
] == page
)
2415 goto exit_supported
;
2424 static void hpsa_get_ioaccel_status(struct ctlr_info
*h
,
2425 unsigned char *scsi3addr
, struct hpsa_scsi_dev_t
*this_device
)
2431 this_device
->offload_config
= 0;
2432 this_device
->offload_enabled
= 0;
2434 buf
= kzalloc(64, GFP_KERNEL
);
2437 if (!hpsa_vpd_page_supported(h
, scsi3addr
, HPSA_VPD_LV_IOACCEL_STATUS
))
2439 rc
= hpsa_scsi_do_inquiry(h
, scsi3addr
,
2440 VPD_PAGE
| HPSA_VPD_LV_IOACCEL_STATUS
, buf
, 64);
2444 #define IOACCEL_STATUS_BYTE 4
2445 #define OFFLOAD_CONFIGURED_BIT 0x01
2446 #define OFFLOAD_ENABLED_BIT 0x02
2447 ioaccel_status
= buf
[IOACCEL_STATUS_BYTE
];
2448 this_device
->offload_config
=
2449 !!(ioaccel_status
& OFFLOAD_CONFIGURED_BIT
);
2450 if (this_device
->offload_config
) {
2451 this_device
->offload_enabled
=
2452 !!(ioaccel_status
& OFFLOAD_ENABLED_BIT
);
2453 if (hpsa_get_raid_map(h
, scsi3addr
, this_device
))
2454 this_device
->offload_enabled
= 0;
2461 /* Get the device id from inquiry page 0x83 */
2462 static int hpsa_get_device_id(struct ctlr_info
*h
, unsigned char *scsi3addr
,
2463 unsigned char *device_id
, int buflen
)
2470 buf
= kzalloc(64, GFP_KERNEL
);
2473 rc
= hpsa_scsi_do_inquiry(h
, scsi3addr
, VPD_PAGE
| 0x83, buf
, 64);
2475 memcpy(device_id
, &buf
[8], buflen
);
2480 static int hpsa_scsi_do_report_luns(struct ctlr_info
*h
, int logical
,
2481 void *buf
, int bufsize
,
2482 int extended_response
)
2485 struct CommandList
*c
;
2486 unsigned char scsi3addr
[8];
2487 struct ErrorInfo
*ei
;
2490 if (c
== NULL
) { /* trouble... */
2491 dev_err(&h
->pdev
->dev
, "cmd_alloc returned NULL!\n");
2494 /* address the controller */
2495 memset(scsi3addr
, 0, sizeof(scsi3addr
));
2496 if (fill_cmd(c
, logical
? HPSA_REPORT_LOG
: HPSA_REPORT_PHYS
, h
,
2497 buf
, bufsize
, 0, scsi3addr
, TYPE_CMD
)) {
2501 if (extended_response
)
2502 c
->Request
.CDB
[1] = extended_response
;
2503 hpsa_scsi_do_simple_cmd_with_retry(h
, c
, PCI_DMA_FROMDEVICE
);
2505 if (ei
->CommandStatus
!= 0 &&
2506 ei
->CommandStatus
!= CMD_DATA_UNDERRUN
) {
2507 hpsa_scsi_interpret_error(h
, c
);
2510 struct ReportLUNdata
*rld
= buf
;
2512 if (rld
->extended_response_flag
!= extended_response
) {
2513 dev_err(&h
->pdev
->dev
,
2514 "report luns requested format %u, got %u\n",
2516 rld
->extended_response_flag
);
2525 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info
*h
,
2526 struct ReportExtendedLUNdata
*buf
, int bufsize
)
2528 return hpsa_scsi_do_report_luns(h
, 0, buf
, bufsize
,
2529 HPSA_REPORT_PHYS_EXTENDED
);
2532 static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info
*h
,
2533 struct ReportLUNdata
*buf
, int bufsize
)
2535 return hpsa_scsi_do_report_luns(h
, 1, buf
, bufsize
, 0);
2538 static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t
*device
,
2539 int bus
, int target
, int lun
)
2542 device
->target
= target
;
2546 /* Use VPD inquiry to get details of volume status */
2547 static int hpsa_get_volume_status(struct ctlr_info
*h
,
2548 unsigned char scsi3addr
[])
2555 buf
= kzalloc(64, GFP_KERNEL
);
2557 return HPSA_VPD_LV_STATUS_UNSUPPORTED
;
2559 /* Does controller have VPD for logical volume status? */
2560 if (!hpsa_vpd_page_supported(h
, scsi3addr
, HPSA_VPD_LV_STATUS
))
2563 /* Get the size of the VPD return buffer */
2564 rc
= hpsa_scsi_do_inquiry(h
, scsi3addr
, VPD_PAGE
| HPSA_VPD_LV_STATUS
,
2565 buf
, HPSA_VPD_HEADER_SZ
);
2570 /* Now get the whole VPD buffer */
2571 rc
= hpsa_scsi_do_inquiry(h
, scsi3addr
, VPD_PAGE
| HPSA_VPD_LV_STATUS
,
2572 buf
, size
+ HPSA_VPD_HEADER_SZ
);
2575 status
= buf
[4]; /* status byte */
2581 return HPSA_VPD_LV_STATUS_UNSUPPORTED
;
2584 /* Determine offline status of a volume.
2587 * 0xff (offline for unknown reasons)
2588 * # (integer code indicating one of several NOT READY states
2589 * describing why a volume is to be kept offline)
2591 static int hpsa_volume_offline(struct ctlr_info
*h
,
2592 unsigned char scsi3addr
[])
2594 struct CommandList
*c
;
2595 unsigned char *sense
, sense_key
, asc
, ascq
;
2599 #define ASC_LUN_NOT_READY 0x04
2600 #define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x04
2601 #define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02
2606 (void) fill_cmd(c
, TEST_UNIT_READY
, h
, NULL
, 0, 0, scsi3addr
, TYPE_CMD
);
2607 hpsa_scsi_do_simple_cmd_core(h
, c
);
2608 sense
= c
->err_info
->SenseInfo
;
2609 sense_key
= sense
[2];
2612 cmd_status
= c
->err_info
->CommandStatus
;
2613 scsi_status
= c
->err_info
->ScsiStatus
;
2615 /* Is the volume 'not ready'? */
2616 if (cmd_status
!= CMD_TARGET_STATUS
||
2617 scsi_status
!= SAM_STAT_CHECK_CONDITION
||
2618 sense_key
!= NOT_READY
||
2619 asc
!= ASC_LUN_NOT_READY
) {
2623 /* Determine the reason for not ready state */
2624 ldstat
= hpsa_get_volume_status(h
, scsi3addr
);
2626 /* Keep volume offline in certain cases: */
2628 case HPSA_LV_UNDERGOING_ERASE
:
2629 case HPSA_LV_UNDERGOING_RPI
:
2630 case HPSA_LV_PENDING_RPI
:
2631 case HPSA_LV_ENCRYPTED_NO_KEY
:
2632 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER
:
2633 case HPSA_LV_UNDERGOING_ENCRYPTION
:
2634 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING
:
2635 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER
:
2637 case HPSA_VPD_LV_STATUS_UNSUPPORTED
:
2638 /* If VPD status page isn't available,
2639 * use ASC/ASCQ to determine state
2641 if ((ascq
== ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS
) ||
2642 (ascq
== ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ
))
2651 static int hpsa_update_device_info(struct ctlr_info
*h
,
2652 unsigned char scsi3addr
[], struct hpsa_scsi_dev_t
*this_device
,
2653 unsigned char *is_OBDR_device
)
2656 #define OBDR_SIG_OFFSET 43
2657 #define OBDR_TAPE_SIG "$DR-10"
2658 #define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1)
2659 #define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN)
2661 unsigned char *inq_buff
;
2662 unsigned char *obdr_sig
;
2664 inq_buff
= kzalloc(OBDR_TAPE_INQ_SIZE
, GFP_KERNEL
);
2668 /* Do an inquiry to the device to see what it is. */
2669 if (hpsa_scsi_do_inquiry(h
, scsi3addr
, 0, inq_buff
,
2670 (unsigned char) OBDR_TAPE_INQ_SIZE
) != 0) {
2671 /* Inquiry failed (msg printed already) */
2672 dev_err(&h
->pdev
->dev
,
2673 "hpsa_update_device_info: inquiry failed\n");
2677 this_device
->devtype
= (inq_buff
[0] & 0x1f);
2678 memcpy(this_device
->scsi3addr
, scsi3addr
, 8);
2679 memcpy(this_device
->vendor
, &inq_buff
[8],
2680 sizeof(this_device
->vendor
));
2681 memcpy(this_device
->model
, &inq_buff
[16],
2682 sizeof(this_device
->model
));
2683 memset(this_device
->device_id
, 0,
2684 sizeof(this_device
->device_id
));
2685 hpsa_get_device_id(h
, scsi3addr
, this_device
->device_id
,
2686 sizeof(this_device
->device_id
));
2688 if (this_device
->devtype
== TYPE_DISK
&&
2689 is_logical_dev_addr_mode(scsi3addr
)) {
2692 hpsa_get_raid_level(h
, scsi3addr
, &this_device
->raid_level
);
2693 if (h
->fw_support
& MISC_FW_RAID_OFFLOAD_BASIC
)
2694 hpsa_get_ioaccel_status(h
, scsi3addr
, this_device
);
2695 volume_offline
= hpsa_volume_offline(h
, scsi3addr
);
2696 if (volume_offline
< 0 || volume_offline
> 0xff)
2697 volume_offline
= HPSA_VPD_LV_STATUS_UNSUPPORTED
;
2698 this_device
->volume_offline
= volume_offline
& 0xff;
2700 this_device
->raid_level
= RAID_UNKNOWN
;
2701 this_device
->offload_config
= 0;
2702 this_device
->offload_enabled
= 0;
2703 this_device
->volume_offline
= 0;
2704 this_device
->queue_depth
= h
->nr_cmds
;
2707 if (is_OBDR_device
) {
2708 /* See if this is a One-Button-Disaster-Recovery device
2709 * by looking for "$DR-10" at offset 43 in inquiry data.
2711 obdr_sig
= &inq_buff
[OBDR_SIG_OFFSET
];
2712 *is_OBDR_device
= (this_device
->devtype
== TYPE_ROM
&&
2713 strncmp(obdr_sig
, OBDR_TAPE_SIG
,
2714 OBDR_SIG_LEN
) == 0);
2725 static unsigned char *ext_target_model
[] = {
2735 static int is_ext_target(struct ctlr_info
*h
, struct hpsa_scsi_dev_t
*device
)
2739 for (i
= 0; ext_target_model
[i
]; i
++)
2740 if (strncmp(device
->model
, ext_target_model
[i
],
2741 strlen(ext_target_model
[i
])) == 0)
2746 /* Helper function to assign bus, target, lun mapping of devices.
2747 * Puts non-external target logical volumes on bus 0, external target logical
2748 * volumes on bus 1, physical devices on bus 2. and the hba on bus 3.
2749 * Logical drive target and lun are assigned at this time, but
2750 * physical device lun and target assignment are deferred (assigned
2751 * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
2753 static void figure_bus_target_lun(struct ctlr_info
*h
,
2754 u8
*lunaddrbytes
, struct hpsa_scsi_dev_t
*device
)
2756 u32 lunid
= le32_to_cpu(*((__le32
*) lunaddrbytes
));
2758 if (!is_logical_dev_addr_mode(lunaddrbytes
)) {
2759 /* physical device, target and lun filled in later */
2760 if (is_hba_lunid(lunaddrbytes
))
2761 hpsa_set_bus_target_lun(device
, 3, 0, lunid
& 0x3fff);
2763 /* defer target, lun assignment for physical devices */
2764 hpsa_set_bus_target_lun(device
, 2, -1, -1);
2767 /* It's a logical device */
2768 if (is_ext_target(h
, device
)) {
2769 /* external target way, put logicals on bus 1
2770 * and match target/lun numbers box
2771 * reports, other smart array, bus 0, target 0, match lunid
2773 hpsa_set_bus_target_lun(device
,
2774 1, (lunid
>> 16) & 0x3fff, lunid
& 0x00ff);
2777 hpsa_set_bus_target_lun(device
, 0, 0, lunid
& 0x3fff);
2781 * If there is no lun 0 on a target, linux won't find any devices.
2782 * For the external targets (arrays), we have to manually detect the enclosure
2783 * which is at lun zero, as CCISS_REPORT_PHYSICAL_LUNS doesn't report
2784 * it for some reason. *tmpdevice is the target we're adding,
2785 * this_device is a pointer into the current element of currentsd[]
2786 * that we're building up in update_scsi_devices(), below.
2787 * lunzerobits is a bitmap that tracks which targets already have a
2789 * Returns 1 if an enclosure was added, 0 if not.
2791 static int add_ext_target_dev(struct ctlr_info
*h
,
2792 struct hpsa_scsi_dev_t
*tmpdevice
,
2793 struct hpsa_scsi_dev_t
*this_device
, u8
*lunaddrbytes
,
2794 unsigned long lunzerobits
[], int *n_ext_target_devs
)
2796 unsigned char scsi3addr
[8];
2798 if (test_bit(tmpdevice
->target
, lunzerobits
))
2799 return 0; /* There is already a lun 0 on this target. */
2801 if (!is_logical_dev_addr_mode(lunaddrbytes
))
2802 return 0; /* It's the logical targets that may lack lun 0. */
2804 if (!is_ext_target(h
, tmpdevice
))
2805 return 0; /* Only external target devices have this problem. */
2807 if (tmpdevice
->lun
== 0) /* if lun is 0, then we have a lun 0. */
2810 memset(scsi3addr
, 0, 8);
2811 scsi3addr
[3] = tmpdevice
->target
;
2812 if (is_hba_lunid(scsi3addr
))
2813 return 0; /* Don't add the RAID controller here. */
2815 if (is_scsi_rev_5(h
))
2816 return 0; /* p1210m doesn't need to do this. */
2818 if (*n_ext_target_devs
>= MAX_EXT_TARGETS
) {
2819 dev_warn(&h
->pdev
->dev
, "Maximum number of external "
2820 "target devices exceeded. Check your hardware "
2825 if (hpsa_update_device_info(h
, scsi3addr
, this_device
, NULL
))
2827 (*n_ext_target_devs
)++;
2828 hpsa_set_bus_target_lun(this_device
,
2829 tmpdevice
->bus
, tmpdevice
->target
, 0);
2830 set_bit(tmpdevice
->target
, lunzerobits
);
2835 * Get address of physical disk used for an ioaccel2 mode command:
2836 * 1. Extract ioaccel2 handle from the command.
2837 * 2. Find a matching ioaccel2 handle from list of physical disks.
2839 * 1 and set scsi3addr to address of matching physical
2840 * 0 if no matching physical disk was found.
2842 static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info
*h
,
2843 struct CommandList
*ioaccel2_cmd_to_abort
, unsigned char *scsi3addr
)
2845 struct ReportExtendedLUNdata
*physicals
= NULL
;
2846 int responsesize
= 24; /* size of physical extended response */
2847 int reportsize
= sizeof(*physicals
) + HPSA_MAX_PHYS_LUN
* responsesize
;
2848 u32 nphysicals
= 0; /* number of reported physical devs */
2849 int found
= 0; /* found match (1) or not (0) */
2850 u32 find
; /* handle we need to match */
2852 struct scsi_cmnd
*scmd
; /* scsi command within request being aborted */
2853 struct hpsa_scsi_dev_t
*d
; /* device of request being aborted */
2854 struct io_accel2_cmd
*c2a
; /* ioaccel2 command to abort */
2855 __le32 it_nexus
; /* 4 byte device handle for the ioaccel2 cmd */
2856 __le32 scsi_nexus
; /* 4 byte device handle for the ioaccel2 cmd */
2858 if (ioaccel2_cmd_to_abort
->cmd_type
!= CMD_IOACCEL2
)
2859 return 0; /* no match */
2861 /* point to the ioaccel2 device handle */
2862 c2a
= &h
->ioaccel2_cmd_pool
[ioaccel2_cmd_to_abort
->cmdindex
];
2864 return 0; /* no match */
2866 scmd
= (struct scsi_cmnd
*) ioaccel2_cmd_to_abort
->scsi_cmd
;
2868 return 0; /* no match */
2870 d
= scmd
->device
->hostdata
;
2872 return 0; /* no match */
2874 it_nexus
= cpu_to_le32(d
->ioaccel_handle
);
2875 scsi_nexus
= c2a
->scsi_nexus
;
2876 find
= le32_to_cpu(c2a
->scsi_nexus
);
2878 if (h
->raid_offload_debug
> 0)
2879 dev_info(&h
->pdev
->dev
,
2880 "%s: scsi_nexus:0x%08x device id: 0x%02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x\n",
2881 __func__
, scsi_nexus
,
2882 d
->device_id
[0], d
->device_id
[1], d
->device_id
[2],
2883 d
->device_id
[3], d
->device_id
[4], d
->device_id
[5],
2884 d
->device_id
[6], d
->device_id
[7], d
->device_id
[8],
2885 d
->device_id
[9], d
->device_id
[10], d
->device_id
[11],
2886 d
->device_id
[12], d
->device_id
[13], d
->device_id
[14],
2889 /* Get the list of physical devices */
2890 physicals
= kzalloc(reportsize
, GFP_KERNEL
);
2891 if (physicals
== NULL
)
2893 if (hpsa_scsi_do_report_phys_luns(h
, physicals
, reportsize
)) {
2894 dev_err(&h
->pdev
->dev
,
2895 "Can't lookup %s device handle: report physical LUNs failed.\n",
2896 "HP SSD Smart Path");
2900 nphysicals
= be32_to_cpu(*((__be32
*)physicals
->LUNListLength
)) /
2903 /* find ioaccel2 handle in list of physicals: */
2904 for (i
= 0; i
< nphysicals
; i
++) {
2905 struct ext_report_lun_entry
*entry
= &physicals
->LUN
[i
];
2907 /* handle is in bytes 28-31 of each lun */
2908 if (entry
->ioaccel_handle
!= find
)
2909 continue; /* didn't match */
2911 memcpy(scsi3addr
, entry
->lunid
, 8);
2912 if (h
->raid_offload_debug
> 0)
2913 dev_info(&h
->pdev
->dev
,
2914 "%s: Searched h=0x%08x, Found h=0x%08x, scsiaddr 0x%8phN\n",
2916 entry
->ioaccel_handle
, scsi3addr
);
2917 break; /* found it */
2928 * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev,
2929 * logdev. The number of luns in physdev and logdev are returned in
2930 * *nphysicals and *nlogicals, respectively.
2931 * Returns 0 on success, -1 otherwise.
2933 static int hpsa_gather_lun_info(struct ctlr_info
*h
,
2934 struct ReportExtendedLUNdata
*physdev
, u32
*nphysicals
,
2935 struct ReportLUNdata
*logdev
, u32
*nlogicals
)
2937 if (hpsa_scsi_do_report_phys_luns(h
, physdev
, sizeof(*physdev
))) {
2938 dev_err(&h
->pdev
->dev
, "report physical LUNs failed.\n");
2941 *nphysicals
= be32_to_cpu(*((__be32
*)physdev
->LUNListLength
)) / 24;
2942 if (*nphysicals
> HPSA_MAX_PHYS_LUN
) {
2943 dev_warn(&h
->pdev
->dev
, "maximum physical LUNs (%d) exceeded. %d LUNs ignored.\n",
2944 HPSA_MAX_PHYS_LUN
, *nphysicals
- HPSA_MAX_PHYS_LUN
);
2945 *nphysicals
= HPSA_MAX_PHYS_LUN
;
2947 if (hpsa_scsi_do_report_log_luns(h
, logdev
, sizeof(*logdev
))) {
2948 dev_err(&h
->pdev
->dev
, "report logical LUNs failed.\n");
2951 *nlogicals
= be32_to_cpu(*((__be32
*) logdev
->LUNListLength
)) / 8;
2952 /* Reject Logicals in excess of our max capability. */
2953 if (*nlogicals
> HPSA_MAX_LUN
) {
2954 dev_warn(&h
->pdev
->dev
,
2955 "maximum logical LUNs (%d) exceeded. "
2956 "%d LUNs ignored.\n", HPSA_MAX_LUN
,
2957 *nlogicals
- HPSA_MAX_LUN
);
2958 *nlogicals
= HPSA_MAX_LUN
;
2960 if (*nlogicals
+ *nphysicals
> HPSA_MAX_PHYS_LUN
) {
2961 dev_warn(&h
->pdev
->dev
,
2962 "maximum logical + physical LUNs (%d) exceeded. "
2963 "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN
,
2964 *nphysicals
+ *nlogicals
- HPSA_MAX_PHYS_LUN
);
2965 *nlogicals
= HPSA_MAX_PHYS_LUN
- *nphysicals
;
2970 static u8
*figure_lunaddrbytes(struct ctlr_info
*h
, int raid_ctlr_position
,
2971 int i
, int nphysicals
, int nlogicals
,
2972 struct ReportExtendedLUNdata
*physdev_list
,
2973 struct ReportLUNdata
*logdev_list
)
2975 /* Helper function, figure out where the LUN ID info is coming from
2976 * given index i, lists of physical and logical devices, where in
2977 * the list the raid controller is supposed to appear (first or last)
2980 int logicals_start
= nphysicals
+ (raid_ctlr_position
== 0);
2981 int last_device
= nphysicals
+ nlogicals
+ (raid_ctlr_position
== 0);
2983 if (i
== raid_ctlr_position
)
2984 return RAID_CTLR_LUNID
;
2986 if (i
< logicals_start
)
2987 return &physdev_list
->LUN
[i
-
2988 (raid_ctlr_position
== 0)].lunid
[0];
2990 if (i
< last_device
)
2991 return &logdev_list
->LUN
[i
- nphysicals
-
2992 (raid_ctlr_position
== 0)][0];
2997 static int hpsa_hba_mode_enabled(struct ctlr_info
*h
)
3000 int hba_mode_enabled
;
3001 struct bmic_controller_parameters
*ctlr_params
;
3002 ctlr_params
= kzalloc(sizeof(struct bmic_controller_parameters
),
3007 rc
= hpsa_bmic_ctrl_mode_sense(h
, RAID_CTLR_LUNID
, 0, ctlr_params
,
3008 sizeof(struct bmic_controller_parameters
));
3015 ((ctlr_params
->nvram_flags
& HBA_MODE_ENABLED_FLAG
) != 0);
3017 return hba_mode_enabled
;
3020 /* get physical drive ioaccel handle and queue depth */
3021 static void hpsa_get_ioaccel_drive_info(struct ctlr_info
*h
,
3022 struct hpsa_scsi_dev_t
*dev
,
3024 struct bmic_identify_physical_device
*id_phys
)
3027 struct ext_report_lun_entry
*rle
=
3028 (struct ext_report_lun_entry
*) lunaddrbytes
;
3030 dev
->ioaccel_handle
= rle
->ioaccel_handle
;
3031 memset(id_phys
, 0, sizeof(*id_phys
));
3032 rc
= hpsa_bmic_id_physical_device(h
, lunaddrbytes
,
3033 GET_BMIC_DRIVE_NUMBER(lunaddrbytes
), id_phys
,
3036 /* Reserve space for FW operations */
3037 #define DRIVE_CMDS_RESERVED_FOR_FW 2
3038 #define DRIVE_QUEUE_DEPTH 7
3040 le16_to_cpu(id_phys
->current_queue_depth_limit
) -
3041 DRIVE_CMDS_RESERVED_FOR_FW
;
3043 dev
->queue_depth
= DRIVE_QUEUE_DEPTH
; /* conservative */
3044 atomic_set(&dev
->ioaccel_cmds_out
, 0);
3047 static void hpsa_update_scsi_devices(struct ctlr_info
*h
, int hostno
)
3049 /* the idea here is we could get notified
3050 * that some devices have changed, so we do a report
3051 * physical luns and report logical luns cmd, and adjust
3052 * our list of devices accordingly.
3054 * The scsi3addr's of devices won't change so long as the
3055 * adapter is not reset. That means we can rescan and
3056 * tell which devices we already know about, vs. new
3057 * devices, vs. disappearing devices.
3059 struct ReportExtendedLUNdata
*physdev_list
= NULL
;
3060 struct ReportLUNdata
*logdev_list
= NULL
;
3061 struct bmic_identify_physical_device
*id_phys
= NULL
;
3064 u32 ndev_allocated
= 0;
3065 struct hpsa_scsi_dev_t
**currentsd
, *this_device
, *tmpdevice
;
3067 int i
, n_ext_target_devs
, ndevs_to_allocate
;
3068 int raid_ctlr_position
;
3069 int rescan_hba_mode
;
3070 DECLARE_BITMAP(lunzerobits
, MAX_EXT_TARGETS
);
3072 currentsd
= kzalloc(sizeof(*currentsd
) * HPSA_MAX_DEVICES
, GFP_KERNEL
);
3073 physdev_list
= kzalloc(sizeof(*physdev_list
), GFP_KERNEL
);
3074 logdev_list
= kzalloc(sizeof(*logdev_list
), GFP_KERNEL
);
3075 tmpdevice
= kzalloc(sizeof(*tmpdevice
), GFP_KERNEL
);
3076 id_phys
= kzalloc(sizeof(*id_phys
), GFP_KERNEL
);
3078 if (!currentsd
|| !physdev_list
|| !logdev_list
||
3079 !tmpdevice
|| !id_phys
) {
3080 dev_err(&h
->pdev
->dev
, "out of memory\n");
3083 memset(lunzerobits
, 0, sizeof(lunzerobits
));
3085 rescan_hba_mode
= hpsa_hba_mode_enabled(h
);
3086 if (rescan_hba_mode
< 0)
3089 if (!h
->hba_mode_enabled
&& rescan_hba_mode
)
3090 dev_warn(&h
->pdev
->dev
, "HBA mode enabled\n");
3091 else if (h
->hba_mode_enabled
&& !rescan_hba_mode
)
3092 dev_warn(&h
->pdev
->dev
, "HBA mode disabled\n");
3094 h
->hba_mode_enabled
= rescan_hba_mode
;
3096 if (hpsa_gather_lun_info(h
, physdev_list
, &nphysicals
,
3097 logdev_list
, &nlogicals
))
3100 /* We might see up to the maximum number of logical and physical disks
3101 * plus external target devices, and a device for the local RAID
3104 ndevs_to_allocate
= nphysicals
+ nlogicals
+ MAX_EXT_TARGETS
+ 1;
3106 /* Allocate the per device structures */
3107 for (i
= 0; i
< ndevs_to_allocate
; i
++) {
3108 if (i
>= HPSA_MAX_DEVICES
) {
3109 dev_warn(&h
->pdev
->dev
, "maximum devices (%d) exceeded."
3110 " %d devices ignored.\n", HPSA_MAX_DEVICES
,
3111 ndevs_to_allocate
- HPSA_MAX_DEVICES
);
3115 currentsd
[i
] = kzalloc(sizeof(*currentsd
[i
]), GFP_KERNEL
);
3116 if (!currentsd
[i
]) {
3117 dev_warn(&h
->pdev
->dev
, "out of memory at %s:%d\n",
3118 __FILE__
, __LINE__
);
3124 if (is_scsi_rev_5(h
))
3125 raid_ctlr_position
= 0;
3127 raid_ctlr_position
= nphysicals
+ nlogicals
;
3129 /* adjust our table of devices */
3130 n_ext_target_devs
= 0;
3131 for (i
= 0; i
< nphysicals
+ nlogicals
+ 1; i
++) {
3132 u8
*lunaddrbytes
, is_OBDR
= 0;
3134 /* Figure out where the LUN ID info is coming from */
3135 lunaddrbytes
= figure_lunaddrbytes(h
, raid_ctlr_position
,
3136 i
, nphysicals
, nlogicals
, physdev_list
, logdev_list
);
3137 /* skip masked physical devices. */
3138 if (lunaddrbytes
[3] & 0xC0 &&
3139 i
< nphysicals
+ (raid_ctlr_position
== 0))
3142 /* Get device type, vendor, model, device id */
3143 if (hpsa_update_device_info(h
, lunaddrbytes
, tmpdevice
,
3145 continue; /* skip it if we can't talk to it. */
3146 figure_bus_target_lun(h
, lunaddrbytes
, tmpdevice
);
3147 this_device
= currentsd
[ncurrent
];
3150 * For external target devices, we have to insert a LUN 0 which
3151 * doesn't show up in CCISS_REPORT_PHYSICAL data, but there
3152 * is nonetheless an enclosure device there. We have to
3153 * present that otherwise linux won't find anything if
3154 * there is no lun 0.
3156 if (add_ext_target_dev(h
, tmpdevice
, this_device
,
3157 lunaddrbytes
, lunzerobits
,
3158 &n_ext_target_devs
)) {
3160 this_device
= currentsd
[ncurrent
];
3163 *this_device
= *tmpdevice
;
3165 switch (this_device
->devtype
) {
3167 /* We don't *really* support actual CD-ROM devices,
3168 * just "One Button Disaster Recovery" tape drive
3169 * which temporarily pretends to be a CD-ROM drive.
3170 * So we check that the device is really an OBDR tape
3171 * device by checking for "$DR-10" in bytes 43-48 of
3178 if (h
->hba_mode_enabled
) {
3179 /* never use raid mapper in HBA mode */
3180 this_device
->offload_enabled
= 0;
3183 } else if (h
->acciopath_status
) {
3184 if (i
>= nphysicals
) {
3194 if (h
->transMethod
& CFGTBL_Trans_io_accel1
||
3195 h
->transMethod
& CFGTBL_Trans_io_accel2
) {
3196 hpsa_get_ioaccel_drive_info(h
, this_device
,
3197 lunaddrbytes
, id_phys
);
3198 atomic_set(&this_device
->ioaccel_cmds_out
, 0);
3203 case TYPE_MEDIUM_CHANGER
:
3207 /* Only present the Smartarray HBA as a RAID controller.
3208 * If it's a RAID controller other than the HBA itself
3209 * (an external RAID controller, MSA500 or similar)
3212 if (!is_hba_lunid(lunaddrbytes
))
3219 if (ncurrent
>= HPSA_MAX_DEVICES
)
3222 hpsa_update_log_drive_phys_drive_ptrs(h
, currentsd
, ncurrent
);
3223 adjust_hpsa_scsi_table(h
, hostno
, currentsd
, ncurrent
);
3226 for (i
= 0; i
< ndev_allocated
; i
++)
3227 kfree(currentsd
[i
]);
3229 kfree(physdev_list
);
3235 * hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
3236 * dma mapping and fills in the scatter gather entries of the
3239 static int hpsa_scatter_gather(struct ctlr_info
*h
,
3240 struct CommandList
*cp
,
3241 struct scsi_cmnd
*cmd
)
3244 struct scatterlist
*sg
;
3246 int use_sg
, i
, sg_index
, chained
;
3247 struct SGDescriptor
*curr_sg
;
3249 BUG_ON(scsi_sg_count(cmd
) > h
->maxsgentries
);
3251 use_sg
= scsi_dma_map(cmd
);
3256 goto sglist_finished
;
3261 scsi_for_each_sg(cmd
, sg
, use_sg
, i
) {
3262 if (i
== h
->max_cmd_sg_entries
- 1 &&
3263 use_sg
> h
->max_cmd_sg_entries
) {
3265 curr_sg
= h
->cmd_sg_list
[cp
->cmdindex
];
3268 addr64
= (u64
) sg_dma_address(sg
);
3269 len
= sg_dma_len(sg
);
3270 curr_sg
->Addr
= cpu_to_le64(addr64
);
3271 curr_sg
->Len
= cpu_to_le32(len
);
3272 curr_sg
->Ext
= cpu_to_le32(0);
3275 (--curr_sg
)->Ext
= cpu_to_le32(HPSA_SG_LAST
);
3277 if (use_sg
+ chained
> h
->maxSG
)
3278 h
->maxSG
= use_sg
+ chained
;
3281 cp
->Header
.SGList
= h
->max_cmd_sg_entries
;
3282 cp
->Header
.SGTotal
= cpu_to_le16(use_sg
+ 1);
3283 if (hpsa_map_sg_chain_block(h
, cp
)) {
3284 scsi_dma_unmap(cmd
);
3292 cp
->Header
.SGList
= (u8
) use_sg
; /* no. SGs contig in this cmd */
3293 cp
->Header
.SGTotal
= cpu_to_le16(use_sg
); /* total sgs in cmd list */
3297 #define IO_ACCEL_INELIGIBLE (1)
3298 static int fixup_ioaccel_cdb(u8
*cdb
, int *cdb_len
)
3304 /* Perform some CDB fixups if needed using 10 byte reads/writes only */
3311 if (*cdb_len
== 6) {
3312 block
= (((u32
) cdb
[2]) << 8) | cdb
[3];
3315 BUG_ON(*cdb_len
!= 12);
3316 block
= (((u32
) cdb
[2]) << 24) |
3317 (((u32
) cdb
[3]) << 16) |
3318 (((u32
) cdb
[4]) << 8) |
3321 (((u32
) cdb
[6]) << 24) |
3322 (((u32
) cdb
[7]) << 16) |
3323 (((u32
) cdb
[8]) << 8) |
3326 if (block_cnt
> 0xffff)
3327 return IO_ACCEL_INELIGIBLE
;
3329 cdb
[0] = is_write
? WRITE_10
: READ_10
;
3331 cdb
[2] = (u8
) (block
>> 24);
3332 cdb
[3] = (u8
) (block
>> 16);
3333 cdb
[4] = (u8
) (block
>> 8);
3334 cdb
[5] = (u8
) (block
);
3336 cdb
[7] = (u8
) (block_cnt
>> 8);
3337 cdb
[8] = (u8
) (block_cnt
);
3345 static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info
*h
,
3346 struct CommandList
*c
, u32 ioaccel_handle
, u8
*cdb
, int cdb_len
,
3347 u8
*scsi3addr
, struct hpsa_scsi_dev_t
*phys_disk
)
3349 struct scsi_cmnd
*cmd
= c
->scsi_cmd
;
3350 struct io_accel1_cmd
*cp
= &h
->ioaccel_cmd_pool
[c
->cmdindex
];
3352 unsigned int total_len
= 0;
3353 struct scatterlist
*sg
;
3356 struct SGDescriptor
*curr_sg
;
3357 u32 control
= IOACCEL1_CONTROL_SIMPLEQUEUE
;
3359 /* TODO: implement chaining support */
3360 if (scsi_sg_count(cmd
) > h
->ioaccel_maxsg
) {
3361 atomic_dec(&phys_disk
->ioaccel_cmds_out
);
3362 return IO_ACCEL_INELIGIBLE
;
3365 BUG_ON(cmd
->cmd_len
> IOACCEL1_IOFLAGS_CDBLEN_MAX
);
3367 if (fixup_ioaccel_cdb(cdb
, &cdb_len
)) {
3368 atomic_dec(&phys_disk
->ioaccel_cmds_out
);
3369 return IO_ACCEL_INELIGIBLE
;
3372 c
->cmd_type
= CMD_IOACCEL1
;
3374 /* Adjust the DMA address to point to the accelerated command buffer */
3375 c
->busaddr
= (u32
) h
->ioaccel_cmd_pool_dhandle
+
3376 (c
->cmdindex
* sizeof(*cp
));
3377 BUG_ON(c
->busaddr
& 0x0000007F);
3379 use_sg
= scsi_dma_map(cmd
);
3381 atomic_dec(&phys_disk
->ioaccel_cmds_out
);
3387 scsi_for_each_sg(cmd
, sg
, use_sg
, i
) {
3388 addr64
= (u64
) sg_dma_address(sg
);
3389 len
= sg_dma_len(sg
);
3391 curr_sg
->Addr
= cpu_to_le64(addr64
);
3392 curr_sg
->Len
= cpu_to_le32(len
);
3393 curr_sg
->Ext
= cpu_to_le32(0);
3396 (--curr_sg
)->Ext
= cpu_to_le32(HPSA_SG_LAST
);
3398 switch (cmd
->sc_data_direction
) {
3400 control
|= IOACCEL1_CONTROL_DATA_OUT
;
3402 case DMA_FROM_DEVICE
:
3403 control
|= IOACCEL1_CONTROL_DATA_IN
;
3406 control
|= IOACCEL1_CONTROL_NODATAXFER
;
3409 dev_err(&h
->pdev
->dev
, "unknown data direction: %d\n",
3410 cmd
->sc_data_direction
);
3415 control
|= IOACCEL1_CONTROL_NODATAXFER
;
3418 c
->Header
.SGList
= use_sg
;
3419 /* Fill out the command structure to submit */
3420 cp
->dev_handle
= cpu_to_le16(ioaccel_handle
& 0xFFFF);
3421 cp
->transfer_len
= cpu_to_le32(total_len
);
3422 cp
->io_flags
= cpu_to_le16(IOACCEL1_IOFLAGS_IO_REQ
|
3423 (cdb_len
& IOACCEL1_IOFLAGS_CDBLEN_MASK
));
3424 cp
->control
= cpu_to_le32(control
);
3425 memcpy(cp
->CDB
, cdb
, cdb_len
);
3426 memcpy(cp
->CISS_LUN
, scsi3addr
, 8);
3427 /* Tag was already set at init time. */
3428 enqueue_cmd_and_start_io(h
, c
);
3433 * Queue a command directly to a device behind the controller using the
3434 * I/O accelerator path.
3436 static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info
*h
,
3437 struct CommandList
*c
)
3439 struct scsi_cmnd
*cmd
= c
->scsi_cmd
;
3440 struct hpsa_scsi_dev_t
*dev
= cmd
->device
->hostdata
;
3444 return hpsa_scsi_ioaccel_queue_command(h
, c
, dev
->ioaccel_handle
,
3445 cmd
->cmnd
, cmd
->cmd_len
, dev
->scsi3addr
, dev
);
3449 * Set encryption parameters for the ioaccel2 request
3451 static void set_encrypt_ioaccel2(struct ctlr_info
*h
,
3452 struct CommandList
*c
, struct io_accel2_cmd
*cp
)
3454 struct scsi_cmnd
*cmd
= c
->scsi_cmd
;
3455 struct hpsa_scsi_dev_t
*dev
= cmd
->device
->hostdata
;
3456 struct raid_map_data
*map
= &dev
->raid_map
;
3459 /* Are we doing encryption on this device */
3460 if (!(le16_to_cpu(map
->flags
) & RAID_MAP_FLAG_ENCRYPT_ON
))
3462 /* Set the data encryption key index. */
3463 cp
->dekindex
= map
->dekindex
;
3465 /* Set the encryption enable flag, encoded into direction field. */
3466 cp
->direction
|= IOACCEL2_DIRECTION_ENCRYPT_MASK
;
3468 /* Set encryption tweak values based on logical block address
3469 * If block size is 512, tweak value is LBA.
3470 * For other block sizes, tweak is (LBA * block size)/ 512)
3472 switch (cmd
->cmnd
[0]) {
3473 /* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */
3476 first_block
= get_unaligned_be16(&cmd
->cmnd
[2]);
3480 /* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */
3483 first_block
= get_unaligned_be32(&cmd
->cmnd
[2]);
3487 first_block
= get_unaligned_be64(&cmd
->cmnd
[2]);
3490 dev_err(&h
->pdev
->dev
,
3491 "ERROR: %s: size (0x%x) not supported for encryption\n",
3492 __func__
, cmd
->cmnd
[0]);
3497 if (le32_to_cpu(map
->volume_blk_size
) != 512)
3498 first_block
= first_block
*
3499 le32_to_cpu(map
->volume_blk_size
)/512;
3501 cp
->tweak_lower
= cpu_to_le32(first_block
);
3502 cp
->tweak_upper
= cpu_to_le32(first_block
>> 32);
3505 static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info
*h
,
3506 struct CommandList
*c
, u32 ioaccel_handle
, u8
*cdb
, int cdb_len
,
3507 u8
*scsi3addr
, struct hpsa_scsi_dev_t
*phys_disk
)
3509 struct scsi_cmnd
*cmd
= c
->scsi_cmd
;
3510 struct io_accel2_cmd
*cp
= &h
->ioaccel2_cmd_pool
[c
->cmdindex
];
3511 struct ioaccel2_sg_element
*curr_sg
;
3513 struct scatterlist
*sg
;
3518 if (scsi_sg_count(cmd
) > h
->ioaccel_maxsg
) {
3519 atomic_dec(&phys_disk
->ioaccel_cmds_out
);
3520 return IO_ACCEL_INELIGIBLE
;
3523 if (fixup_ioaccel_cdb(cdb
, &cdb_len
)) {
3524 atomic_dec(&phys_disk
->ioaccel_cmds_out
);
3525 return IO_ACCEL_INELIGIBLE
;
3528 c
->cmd_type
= CMD_IOACCEL2
;
3529 /* Adjust the DMA address to point to the accelerated command buffer */
3530 c
->busaddr
= (u32
) h
->ioaccel2_cmd_pool_dhandle
+
3531 (c
->cmdindex
* sizeof(*cp
));
3532 BUG_ON(c
->busaddr
& 0x0000007F);
3534 memset(cp
, 0, sizeof(*cp
));
3535 cp
->IU_type
= IOACCEL2_IU_TYPE
;
3537 use_sg
= scsi_dma_map(cmd
);
3539 atomic_dec(&phys_disk
->ioaccel_cmds_out
);
3544 BUG_ON(use_sg
> IOACCEL2_MAXSGENTRIES
);
3546 scsi_for_each_sg(cmd
, sg
, use_sg
, i
) {
3547 addr64
= (u64
) sg_dma_address(sg
);
3548 len
= sg_dma_len(sg
);
3550 curr_sg
->address
= cpu_to_le64(addr64
);
3551 curr_sg
->length
= cpu_to_le32(len
);
3552 curr_sg
->reserved
[0] = 0;
3553 curr_sg
->reserved
[1] = 0;
3554 curr_sg
->reserved
[2] = 0;
3555 curr_sg
->chain_indicator
= 0;
3559 switch (cmd
->sc_data_direction
) {
3561 cp
->direction
&= ~IOACCEL2_DIRECTION_MASK
;
3562 cp
->direction
|= IOACCEL2_DIR_DATA_OUT
;
3564 case DMA_FROM_DEVICE
:
3565 cp
->direction
&= ~IOACCEL2_DIRECTION_MASK
;
3566 cp
->direction
|= IOACCEL2_DIR_DATA_IN
;
3569 cp
->direction
&= ~IOACCEL2_DIRECTION_MASK
;
3570 cp
->direction
|= IOACCEL2_DIR_NO_DATA
;
3573 dev_err(&h
->pdev
->dev
, "unknown data direction: %d\n",
3574 cmd
->sc_data_direction
);
3579 cp
->direction
&= ~IOACCEL2_DIRECTION_MASK
;
3580 cp
->direction
|= IOACCEL2_DIR_NO_DATA
;
3583 /* Set encryption parameters, if necessary */
3584 set_encrypt_ioaccel2(h
, c
, cp
);
3586 cp
->scsi_nexus
= cpu_to_le32(ioaccel_handle
);
3587 cp
->Tag
= cpu_to_le32(c
->cmdindex
<< DIRECT_LOOKUP_SHIFT
);
3588 memcpy(cp
->cdb
, cdb
, sizeof(cp
->cdb
));
3590 /* fill in sg elements */
3591 cp
->sg_count
= (u8
) use_sg
;
3593 cp
->data_len
= cpu_to_le32(total_len
);
3594 cp
->err_ptr
= cpu_to_le64(c
->busaddr
+
3595 offsetof(struct io_accel2_cmd
, error_data
));
3596 cp
->err_len
= cpu_to_le32(sizeof(cp
->error_data
));
3598 enqueue_cmd_and_start_io(h
, c
);
3603 * Queue a command to the correct I/O accelerator path.
3605 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info
*h
,
3606 struct CommandList
*c
, u32 ioaccel_handle
, u8
*cdb
, int cdb_len
,
3607 u8
*scsi3addr
, struct hpsa_scsi_dev_t
*phys_disk
)
3609 /* Try to honor the device's queue depth */
3610 if (atomic_inc_return(&phys_disk
->ioaccel_cmds_out
) >
3611 phys_disk
->queue_depth
) {
3612 atomic_dec(&phys_disk
->ioaccel_cmds_out
);
3613 return IO_ACCEL_INELIGIBLE
;
3615 if (h
->transMethod
& CFGTBL_Trans_io_accel1
)
3616 return hpsa_scsi_ioaccel1_queue_command(h
, c
, ioaccel_handle
,
3617 cdb
, cdb_len
, scsi3addr
,
3620 return hpsa_scsi_ioaccel2_queue_command(h
, c
, ioaccel_handle
,
3621 cdb
, cdb_len
, scsi3addr
,
3625 static void raid_map_helper(struct raid_map_data
*map
,
3626 int offload_to_mirror
, u32
*map_index
, u32
*current_group
)
3628 if (offload_to_mirror
== 0) {
3629 /* use physical disk in the first mirrored group. */
3630 *map_index
%= le16_to_cpu(map
->data_disks_per_row
);
3634 /* determine mirror group that *map_index indicates */
3635 *current_group
= *map_index
/
3636 le16_to_cpu(map
->data_disks_per_row
);
3637 if (offload_to_mirror
== *current_group
)
3639 if (*current_group
< le16_to_cpu(map
->layout_map_count
) - 1) {
3640 /* select map index from next group */
3641 *map_index
+= le16_to_cpu(map
->data_disks_per_row
);
3644 /* select map index from first group */
3645 *map_index
%= le16_to_cpu(map
->data_disks_per_row
);
3648 } while (offload_to_mirror
!= *current_group
);
3652 * Attempt to perform offload RAID mapping for a logical volume I/O.
3654 static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info
*h
,
3655 struct CommandList
*c
)
3657 struct scsi_cmnd
*cmd
= c
->scsi_cmd
;
3658 struct hpsa_scsi_dev_t
*dev
= cmd
->device
->hostdata
;
3659 struct raid_map_data
*map
= &dev
->raid_map
;
3660 struct raid_map_disk_data
*dd
= &map
->data
[0];
3663 u64 first_block
, last_block
;
3666 u64 first_row
, last_row
;
3667 u32 first_row_offset
, last_row_offset
;
3668 u32 first_column
, last_column
;
3669 u64 r0_first_row
, r0_last_row
;
3670 u32 r5or6_blocks_per_row
;
3671 u64 r5or6_first_row
, r5or6_last_row
;
3672 u32 r5or6_first_row_offset
, r5or6_last_row_offset
;
3673 u32 r5or6_first_column
, r5or6_last_column
;
3674 u32 total_disks_per_row
;
3676 u32 first_group
, last_group
, current_group
;
3684 #if BITS_PER_LONG == 32
3687 int offload_to_mirror
;
3689 /* check for valid opcode, get LBA and block count */
3690 switch (cmd
->cmnd
[0]) {
3695 (((u64
) cmd
->cmnd
[2]) << 8) |
3697 block_cnt
= cmd
->cmnd
[4];
3705 (((u64
) cmd
->cmnd
[2]) << 24) |
3706 (((u64
) cmd
->cmnd
[3]) << 16) |
3707 (((u64
) cmd
->cmnd
[4]) << 8) |
3710 (((u32
) cmd
->cmnd
[7]) << 8) |
3717 (((u64
) cmd
->cmnd
[2]) << 24) |
3718 (((u64
) cmd
->cmnd
[3]) << 16) |
3719 (((u64
) cmd
->cmnd
[4]) << 8) |
3722 (((u32
) cmd
->cmnd
[6]) << 24) |
3723 (((u32
) cmd
->cmnd
[7]) << 16) |
3724 (((u32
) cmd
->cmnd
[8]) << 8) |
3731 (((u64
) cmd
->cmnd
[2]) << 56) |
3732 (((u64
) cmd
->cmnd
[3]) << 48) |
3733 (((u64
) cmd
->cmnd
[4]) << 40) |
3734 (((u64
) cmd
->cmnd
[5]) << 32) |
3735 (((u64
) cmd
->cmnd
[6]) << 24) |
3736 (((u64
) cmd
->cmnd
[7]) << 16) |
3737 (((u64
) cmd
->cmnd
[8]) << 8) |
3740 (((u32
) cmd
->cmnd
[10]) << 24) |
3741 (((u32
) cmd
->cmnd
[11]) << 16) |
3742 (((u32
) cmd
->cmnd
[12]) << 8) |
3746 return IO_ACCEL_INELIGIBLE
; /* process via normal I/O path */
3748 last_block
= first_block
+ block_cnt
- 1;
3750 /* check for write to non-RAID-0 */
3751 if (is_write
&& dev
->raid_level
!= 0)
3752 return IO_ACCEL_INELIGIBLE
;
3754 /* check for invalid block or wraparound */
3755 if (last_block
>= le64_to_cpu(map
->volume_blk_cnt
) ||
3756 last_block
< first_block
)
3757 return IO_ACCEL_INELIGIBLE
;
3759 /* calculate stripe information for the request */
3760 blocks_per_row
= le16_to_cpu(map
->data_disks_per_row
) *
3761 le16_to_cpu(map
->strip_size
);
3762 strip_size
= le16_to_cpu(map
->strip_size
);
3763 #if BITS_PER_LONG == 32
3764 tmpdiv
= first_block
;
3765 (void) do_div(tmpdiv
, blocks_per_row
);
3767 tmpdiv
= last_block
;
3768 (void) do_div(tmpdiv
, blocks_per_row
);
3770 first_row_offset
= (u32
) (first_block
- (first_row
* blocks_per_row
));
3771 last_row_offset
= (u32
) (last_block
- (last_row
* blocks_per_row
));
3772 tmpdiv
= first_row_offset
;
3773 (void) do_div(tmpdiv
, strip_size
);
3774 first_column
= tmpdiv
;
3775 tmpdiv
= last_row_offset
;
3776 (void) do_div(tmpdiv
, strip_size
);
3777 last_column
= tmpdiv
;
3779 first_row
= first_block
/ blocks_per_row
;
3780 last_row
= last_block
/ blocks_per_row
;
3781 first_row_offset
= (u32
) (first_block
- (first_row
* blocks_per_row
));
3782 last_row_offset
= (u32
) (last_block
- (last_row
* blocks_per_row
));
3783 first_column
= first_row_offset
/ strip_size
;
3784 last_column
= last_row_offset
/ strip_size
;
3787 /* if this isn't a single row/column then give to the controller */
3788 if ((first_row
!= last_row
) || (first_column
!= last_column
))
3789 return IO_ACCEL_INELIGIBLE
;
3791 /* proceeding with driver mapping */
3792 total_disks_per_row
= le16_to_cpu(map
->data_disks_per_row
) +
3793 le16_to_cpu(map
->metadata_disks_per_row
);
3794 map_row
= ((u32
)(first_row
>> map
->parity_rotation_shift
)) %
3795 le16_to_cpu(map
->row_cnt
);
3796 map_index
= (map_row
* total_disks_per_row
) + first_column
;
3798 switch (dev
->raid_level
) {
3800 break; /* nothing special to do */
3802 /* Handles load balance across RAID 1 members.
3803 * (2-drive R1 and R10 with even # of drives.)
3804 * Appropriate for SSDs, not optimal for HDDs
3806 BUG_ON(le16_to_cpu(map
->layout_map_count
) != 2);
3807 if (dev
->offload_to_mirror
)
3808 map_index
+= le16_to_cpu(map
->data_disks_per_row
);
3809 dev
->offload_to_mirror
= !dev
->offload_to_mirror
;
3812 /* Handles N-way mirrors (R1-ADM)
3813 * and R10 with # of drives divisible by 3.)
3815 BUG_ON(le16_to_cpu(map
->layout_map_count
) != 3);
3817 offload_to_mirror
= dev
->offload_to_mirror
;
3818 raid_map_helper(map
, offload_to_mirror
,
3819 &map_index
, ¤t_group
);
3820 /* set mirror group to use next time */
3822 (offload_to_mirror
>=
3823 le16_to_cpu(map
->layout_map_count
) - 1)
3824 ? 0 : offload_to_mirror
+ 1;
3825 dev
->offload_to_mirror
= offload_to_mirror
;
3826 /* Avoid direct use of dev->offload_to_mirror within this
3827 * function since multiple threads might simultaneously
3828 * increment it beyond the range of dev->layout_map_count -1.
3833 if (le16_to_cpu(map
->layout_map_count
) <= 1)
3836 /* Verify first and last block are in same RAID group */
3837 r5or6_blocks_per_row
=
3838 le16_to_cpu(map
->strip_size
) *
3839 le16_to_cpu(map
->data_disks_per_row
);
3840 BUG_ON(r5or6_blocks_per_row
== 0);
3841 stripesize
= r5or6_blocks_per_row
*
3842 le16_to_cpu(map
->layout_map_count
);
3843 #if BITS_PER_LONG == 32
3844 tmpdiv
= first_block
;
3845 first_group
= do_div(tmpdiv
, stripesize
);
3846 tmpdiv
= first_group
;
3847 (void) do_div(tmpdiv
, r5or6_blocks_per_row
);
3848 first_group
= tmpdiv
;
3849 tmpdiv
= last_block
;
3850 last_group
= do_div(tmpdiv
, stripesize
);
3851 tmpdiv
= last_group
;
3852 (void) do_div(tmpdiv
, r5or6_blocks_per_row
);
3853 last_group
= tmpdiv
;
3855 first_group
= (first_block
% stripesize
) / r5or6_blocks_per_row
;
3856 last_group
= (last_block
% stripesize
) / r5or6_blocks_per_row
;
3858 if (first_group
!= last_group
)
3859 return IO_ACCEL_INELIGIBLE
;
3861 /* Verify request is in a single row of RAID 5/6 */
3862 #if BITS_PER_LONG == 32
3863 tmpdiv
= first_block
;
3864 (void) do_div(tmpdiv
, stripesize
);
3865 first_row
= r5or6_first_row
= r0_first_row
= tmpdiv
;
3866 tmpdiv
= last_block
;
3867 (void) do_div(tmpdiv
, stripesize
);
3868 r5or6_last_row
= r0_last_row
= tmpdiv
;
3870 first_row
= r5or6_first_row
= r0_first_row
=
3871 first_block
/ stripesize
;
3872 r5or6_last_row
= r0_last_row
= last_block
/ stripesize
;
3874 if (r5or6_first_row
!= r5or6_last_row
)
3875 return IO_ACCEL_INELIGIBLE
;
3878 /* Verify request is in a single column */
3879 #if BITS_PER_LONG == 32
3880 tmpdiv
= first_block
;
3881 first_row_offset
= do_div(tmpdiv
, stripesize
);
3882 tmpdiv
= first_row_offset
;
3883 first_row_offset
= (u32
) do_div(tmpdiv
, r5or6_blocks_per_row
);
3884 r5or6_first_row_offset
= first_row_offset
;
3885 tmpdiv
= last_block
;
3886 r5or6_last_row_offset
= do_div(tmpdiv
, stripesize
);
3887 tmpdiv
= r5or6_last_row_offset
;
3888 r5or6_last_row_offset
= do_div(tmpdiv
, r5or6_blocks_per_row
);
3889 tmpdiv
= r5or6_first_row_offset
;
3890 (void) do_div(tmpdiv
, map
->strip_size
);
3891 first_column
= r5or6_first_column
= tmpdiv
;
3892 tmpdiv
= r5or6_last_row_offset
;
3893 (void) do_div(tmpdiv
, map
->strip_size
);
3894 r5or6_last_column
= tmpdiv
;
3896 first_row_offset
= r5or6_first_row_offset
=
3897 (u32
)((first_block
% stripesize
) %
3898 r5or6_blocks_per_row
);
3900 r5or6_last_row_offset
=
3901 (u32
)((last_block
% stripesize
) %
3902 r5or6_blocks_per_row
);
3904 first_column
= r5or6_first_column
=
3905 r5or6_first_row_offset
/ le16_to_cpu(map
->strip_size
);
3907 r5or6_last_row_offset
/ le16_to_cpu(map
->strip_size
);
3909 if (r5or6_first_column
!= r5or6_last_column
)
3910 return IO_ACCEL_INELIGIBLE
;
3912 /* Request is eligible */
3913 map_row
= ((u32
)(first_row
>> map
->parity_rotation_shift
)) %
3914 le16_to_cpu(map
->row_cnt
);
3916 map_index
= (first_group
*
3917 (le16_to_cpu(map
->row_cnt
) * total_disks_per_row
)) +
3918 (map_row
* total_disks_per_row
) + first_column
;
3921 return IO_ACCEL_INELIGIBLE
;
3924 if (unlikely(map_index
>= RAID_MAP_MAX_ENTRIES
))
3925 return IO_ACCEL_INELIGIBLE
;
3927 c
->phys_disk
= dev
->phys_disk
[map_index
];
3929 disk_handle
= dd
[map_index
].ioaccel_handle
;
3930 disk_block
= le64_to_cpu(map
->disk_starting_blk
) +
3931 first_row
* le16_to_cpu(map
->strip_size
) +
3932 (first_row_offset
- first_column
*
3933 le16_to_cpu(map
->strip_size
));
3934 disk_block_cnt
= block_cnt
;
3936 /* handle differing logical/physical block sizes */
3937 if (map
->phys_blk_shift
) {
3938 disk_block
<<= map
->phys_blk_shift
;
3939 disk_block_cnt
<<= map
->phys_blk_shift
;
3941 BUG_ON(disk_block_cnt
> 0xffff);
3943 /* build the new CDB for the physical disk I/O */
3944 if (disk_block
> 0xffffffff) {
3945 cdb
[0] = is_write
? WRITE_16
: READ_16
;
3947 cdb
[2] = (u8
) (disk_block
>> 56);
3948 cdb
[3] = (u8
) (disk_block
>> 48);
3949 cdb
[4] = (u8
) (disk_block
>> 40);
3950 cdb
[5] = (u8
) (disk_block
>> 32);
3951 cdb
[6] = (u8
) (disk_block
>> 24);
3952 cdb
[7] = (u8
) (disk_block
>> 16);
3953 cdb
[8] = (u8
) (disk_block
>> 8);
3954 cdb
[9] = (u8
) (disk_block
);
3955 cdb
[10] = (u8
) (disk_block_cnt
>> 24);
3956 cdb
[11] = (u8
) (disk_block_cnt
>> 16);
3957 cdb
[12] = (u8
) (disk_block_cnt
>> 8);
3958 cdb
[13] = (u8
) (disk_block_cnt
);
3963 cdb
[0] = is_write
? WRITE_10
: READ_10
;
3965 cdb
[2] = (u8
) (disk_block
>> 24);
3966 cdb
[3] = (u8
) (disk_block
>> 16);
3967 cdb
[4] = (u8
) (disk_block
>> 8);
3968 cdb
[5] = (u8
) (disk_block
);
3970 cdb
[7] = (u8
) (disk_block_cnt
>> 8);
3971 cdb
[8] = (u8
) (disk_block_cnt
);
3975 return hpsa_scsi_ioaccel_queue_command(h
, c
, disk_handle
, cdb
, cdb_len
,
3977 dev
->phys_disk
[map_index
]);
3980 /* Submit commands down the "normal" RAID stack path */
3981 static int hpsa_ciss_submit(struct ctlr_info
*h
,
3982 struct CommandList
*c
, struct scsi_cmnd
*cmd
,
3983 unsigned char scsi3addr
[])
3985 cmd
->host_scribble
= (unsigned char *) c
;
3986 c
->cmd_type
= CMD_SCSI
;
3988 c
->Header
.ReplyQueue
= 0; /* unused in simple mode */
3989 memcpy(&c
->Header
.LUN
.LunAddrBytes
[0], &scsi3addr
[0], 8);
3990 c
->Header
.tag
= cpu_to_le64((c
->cmdindex
<< DIRECT_LOOKUP_SHIFT
));
3992 /* Fill in the request block... */
3994 c
->Request
.Timeout
= 0;
3995 memset(c
->Request
.CDB
, 0, sizeof(c
->Request
.CDB
));
3996 BUG_ON(cmd
->cmd_len
> sizeof(c
->Request
.CDB
));
3997 c
->Request
.CDBLen
= cmd
->cmd_len
;
3998 memcpy(c
->Request
.CDB
, cmd
->cmnd
, cmd
->cmd_len
);
3999 switch (cmd
->sc_data_direction
) {
4001 c
->Request
.type_attr_dir
=
4002 TYPE_ATTR_DIR(TYPE_CMD
, ATTR_SIMPLE
, XFER_WRITE
);
4004 case DMA_FROM_DEVICE
:
4005 c
->Request
.type_attr_dir
=
4006 TYPE_ATTR_DIR(TYPE_CMD
, ATTR_SIMPLE
, XFER_READ
);
4009 c
->Request
.type_attr_dir
=
4010 TYPE_ATTR_DIR(TYPE_CMD
, ATTR_SIMPLE
, XFER_NONE
);
4012 case DMA_BIDIRECTIONAL
:
4013 /* This can happen if a buggy application does a scsi passthru
4014 * and sets both inlen and outlen to non-zero. ( see
4015 * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
4018 c
->Request
.type_attr_dir
=
4019 TYPE_ATTR_DIR(TYPE_CMD
, ATTR_SIMPLE
, XFER_RSVD
);
4020 /* This is technically wrong, and hpsa controllers should
4021 * reject it with CMD_INVALID, which is the most correct
4022 * response, but non-fibre backends appear to let it
4023 * slide by, and give the same results as if this field
4024 * were set correctly. Either way is acceptable for
4025 * our purposes here.
4031 dev_err(&h
->pdev
->dev
, "unknown data direction: %d\n",
4032 cmd
->sc_data_direction
);
4037 if (hpsa_scatter_gather(h
, c
, cmd
) < 0) { /* Fill SG list */
4039 return SCSI_MLQUEUE_HOST_BUSY
;
4041 enqueue_cmd_and_start_io(h
, c
);
4042 /* the cmd'll come back via intr handler in complete_scsi_command() */
4046 static void hpsa_command_resubmit_worker(struct work_struct
*work
)
4048 struct scsi_cmnd
*cmd
;
4049 struct hpsa_scsi_dev_t
*dev
;
4050 struct CommandList
*c
=
4051 container_of(work
, struct CommandList
, work
);
4054 dev
= cmd
->device
->hostdata
;
4056 cmd
->result
= DID_NO_CONNECT
<< 16;
4057 cmd
->scsi_done(cmd
);
4060 if (hpsa_ciss_submit(c
->h
, c
, cmd
, dev
->scsi3addr
)) {
4062 * If we get here, it means dma mapping failed. Try
4063 * again via scsi mid layer, which will then get
4064 * SCSI_MLQUEUE_HOST_BUSY.
4066 cmd
->result
= DID_IMM_RETRY
<< 16;
4067 cmd
->scsi_done(cmd
);
4071 /* Running in struct Scsi_Host->host_lock less mode */
4072 static int hpsa_scsi_queue_command(struct Scsi_Host
*sh
, struct scsi_cmnd
*cmd
)
4074 struct ctlr_info
*h
;
4075 struct hpsa_scsi_dev_t
*dev
;
4076 unsigned char scsi3addr
[8];
4077 struct CommandList
*c
;
4080 /* Get the ptr to our adapter structure out of cmd->host. */
4081 h
= sdev_to_hba(cmd
->device
);
4082 dev
= cmd
->device
->hostdata
;
4084 cmd
->result
= DID_NO_CONNECT
<< 16;
4085 cmd
->scsi_done(cmd
);
4088 memcpy(scsi3addr
, dev
->scsi3addr
, sizeof(scsi3addr
));
4090 if (unlikely(lockup_detected(h
))) {
4091 cmd
->result
= DID_ERROR
<< 16;
4092 cmd
->scsi_done(cmd
);
4096 if (c
== NULL
) { /* trouble... */
4097 dev_err(&h
->pdev
->dev
, "cmd_alloc returned NULL!\n");
4098 return SCSI_MLQUEUE_HOST_BUSY
;
4100 if (unlikely(lockup_detected(h
))) {
4101 cmd
->result
= DID_ERROR
<< 16;
4103 cmd
->scsi_done(cmd
);
4108 * Call alternate submit routine for I/O accelerated commands.
4109 * Retries always go down the normal I/O path.
4111 if (likely(cmd
->retries
== 0 &&
4112 cmd
->request
->cmd_type
== REQ_TYPE_FS
&&
4113 h
->acciopath_status
)) {
4115 cmd
->host_scribble
= (unsigned char *) c
;
4116 c
->cmd_type
= CMD_SCSI
;
4119 if (dev
->offload_enabled
) {
4120 rc
= hpsa_scsi_ioaccel_raid_map(h
, c
);
4122 return 0; /* Sent on ioaccel path */
4123 if (rc
< 0) { /* scsi_dma_map failed. */
4125 return SCSI_MLQUEUE_HOST_BUSY
;
4127 } else if (dev
->ioaccel_handle
) {
4128 rc
= hpsa_scsi_ioaccel_direct_map(h
, c
);
4130 return 0; /* Sent on direct map path */
4131 if (rc
< 0) { /* scsi_dma_map failed. */
4133 return SCSI_MLQUEUE_HOST_BUSY
;
4137 return hpsa_ciss_submit(h
, c
, cmd
, scsi3addr
);
4140 static int do_not_scan_if_controller_locked_up(struct ctlr_info
*h
)
4142 unsigned long flags
;
4145 * Don't let rescans be initiated on a controller known
4146 * to be locked up. If the controller locks up *during*
4147 * a rescan, that thread is probably hosed, but at least
4148 * we can prevent new rescan threads from piling up on a
4149 * locked up controller.
4151 if (unlikely(lockup_detected(h
))) {
4152 spin_lock_irqsave(&h
->scan_lock
, flags
);
4153 h
->scan_finished
= 1;
4154 wake_up_all(&h
->scan_wait_queue
);
4155 spin_unlock_irqrestore(&h
->scan_lock
, flags
);
4161 static void hpsa_scan_start(struct Scsi_Host
*sh
)
4163 struct ctlr_info
*h
= shost_to_hba(sh
);
4164 unsigned long flags
;
4166 if (do_not_scan_if_controller_locked_up(h
))
4169 /* wait until any scan already in progress is finished. */
4171 spin_lock_irqsave(&h
->scan_lock
, flags
);
4172 if (h
->scan_finished
)
4174 spin_unlock_irqrestore(&h
->scan_lock
, flags
);
4175 wait_event(h
->scan_wait_queue
, h
->scan_finished
);
4176 /* Note: We don't need to worry about a race between this
4177 * thread and driver unload because the midlayer will
4178 * have incremented the reference count, so unload won't
4179 * happen if we're in here.
4182 h
->scan_finished
= 0; /* mark scan as in progress */
4183 spin_unlock_irqrestore(&h
->scan_lock
, flags
);
4185 if (do_not_scan_if_controller_locked_up(h
))
4188 hpsa_update_scsi_devices(h
, h
->scsi_host
->host_no
);
4190 spin_lock_irqsave(&h
->scan_lock
, flags
);
4191 h
->scan_finished
= 1; /* mark scan as finished. */
4192 wake_up_all(&h
->scan_wait_queue
);
4193 spin_unlock_irqrestore(&h
->scan_lock
, flags
);
4196 static int hpsa_change_queue_depth(struct scsi_device
*sdev
, int qdepth
)
4198 struct hpsa_scsi_dev_t
*logical_drive
= sdev
->hostdata
;
4205 else if (qdepth
> logical_drive
->queue_depth
)
4206 qdepth
= logical_drive
->queue_depth
;
4208 return scsi_change_queue_depth(sdev
, qdepth
);
4211 static int hpsa_scan_finished(struct Scsi_Host
*sh
,
4212 unsigned long elapsed_time
)
4214 struct ctlr_info
*h
= shost_to_hba(sh
);
4215 unsigned long flags
;
4218 spin_lock_irqsave(&h
->scan_lock
, flags
);
4219 finished
= h
->scan_finished
;
4220 spin_unlock_irqrestore(&h
->scan_lock
, flags
);
4224 static void hpsa_unregister_scsi(struct ctlr_info
*h
)
4226 /* we are being forcibly unloaded, and may not refuse. */
4227 scsi_remove_host(h
->scsi_host
);
4228 scsi_host_put(h
->scsi_host
);
4229 h
->scsi_host
= NULL
;
4232 static int hpsa_register_scsi(struct ctlr_info
*h
)
4234 struct Scsi_Host
*sh
;
4237 sh
= scsi_host_alloc(&hpsa_driver_template
, sizeof(h
));
4244 sh
->max_channel
= 3;
4245 sh
->max_cmd_len
= MAX_COMMAND_SIZE
;
4246 sh
->max_lun
= HPSA_MAX_LUN
;
4247 sh
->max_id
= HPSA_MAX_LUN
;
4248 sh
->can_queue
= h
->nr_cmds
-
4249 HPSA_CMDS_RESERVED_FOR_ABORTS
-
4250 HPSA_CMDS_RESERVED_FOR_DRIVER
-
4251 HPSA_MAX_CONCURRENT_PASSTHRUS
;
4252 sh
->cmd_per_lun
= sh
->can_queue
;
4253 sh
->sg_tablesize
= h
->maxsgentries
;
4255 sh
->hostdata
[0] = (unsigned long) h
;
4256 sh
->irq
= h
->intr
[h
->intr_mode
];
4257 sh
->unique_id
= sh
->irq
;
4258 error
= scsi_add_host(sh
, &h
->pdev
->dev
);
4265 dev_err(&h
->pdev
->dev
, "%s: scsi_add_host"
4266 " failed for controller %d\n", __func__
, h
->ctlr
);
4270 dev_err(&h
->pdev
->dev
, "%s: scsi_host_alloc"
4271 " failed for controller %d\n", __func__
, h
->ctlr
);
4275 static int wait_for_device_to_become_ready(struct ctlr_info
*h
,
4276 unsigned char lunaddr
[])
4280 int waittime
= 1; /* seconds */
4281 struct CommandList
*c
;
4285 dev_warn(&h
->pdev
->dev
, "out of memory in "
4286 "wait_for_device_to_become_ready.\n");
4290 /* Send test unit ready until device ready, or give up. */
4291 while (count
< HPSA_TUR_RETRY_LIMIT
) {
4293 /* Wait for a bit. do this first, because if we send
4294 * the TUR right away, the reset will just abort it.
4296 msleep(1000 * waittime
);
4298 rc
= 0; /* Device ready. */
4300 /* Increase wait time with each try, up to a point. */
4301 if (waittime
< HPSA_MAX_WAIT_INTERVAL_SECS
)
4302 waittime
= waittime
* 2;
4304 /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */
4305 (void) fill_cmd(c
, TEST_UNIT_READY
, h
,
4306 NULL
, 0, 0, lunaddr
, TYPE_CMD
);
4307 hpsa_scsi_do_simple_cmd_core(h
, c
);
4308 /* no unmap needed here because no data xfer. */
4310 if (c
->err_info
->CommandStatus
== CMD_SUCCESS
)
4313 if (c
->err_info
->CommandStatus
== CMD_TARGET_STATUS
&&
4314 c
->err_info
->ScsiStatus
== SAM_STAT_CHECK_CONDITION
&&
4315 (c
->err_info
->SenseInfo
[2] == NO_SENSE
||
4316 c
->err_info
->SenseInfo
[2] == UNIT_ATTENTION
))
4319 dev_warn(&h
->pdev
->dev
, "waiting %d secs "
4320 "for device to become ready.\n", waittime
);
4321 rc
= 1; /* device not ready. */
4325 dev_warn(&h
->pdev
->dev
, "giving up on device.\n");
4327 dev_warn(&h
->pdev
->dev
, "device is ready.\n");
4333 /* Need at least one of these error handlers to keep ../scsi/hosts.c from
4334 * complaining. Doing a host- or bus-reset can't do anything good here.
4336 static int hpsa_eh_device_reset_handler(struct scsi_cmnd
*scsicmd
)
4339 struct ctlr_info
*h
;
4340 struct hpsa_scsi_dev_t
*dev
;
4342 /* find the controller to which the command to be aborted was sent */
4343 h
= sdev_to_hba(scsicmd
->device
);
4344 if (h
== NULL
) /* paranoia */
4346 dev
= scsicmd
->device
->hostdata
;
4348 dev_err(&h
->pdev
->dev
, "hpsa_eh_device_reset_handler: "
4349 "device lookup failed.\n");
4352 dev_warn(&h
->pdev
->dev
, "resetting device %d:%d:%d:%d\n",
4353 h
->scsi_host
->host_no
, dev
->bus
, dev
->target
, dev
->lun
);
4354 /* send a reset to the SCSI LUN which the command was sent to */
4355 rc
= hpsa_send_reset(h
, dev
->scsi3addr
, HPSA_RESET_TYPE_LUN
);
4356 if (rc
== 0 && wait_for_device_to_become_ready(h
, dev
->scsi3addr
) == 0)
4359 dev_warn(&h
->pdev
->dev
, "resetting device failed.\n");
4363 static void swizzle_abort_tag(u8
*tag
)
4367 memcpy(original_tag
, tag
, 8);
4368 tag
[0] = original_tag
[3];
4369 tag
[1] = original_tag
[2];
4370 tag
[2] = original_tag
[1];
4371 tag
[3] = original_tag
[0];
4372 tag
[4] = original_tag
[7];
4373 tag
[5] = original_tag
[6];
4374 tag
[6] = original_tag
[5];
4375 tag
[7] = original_tag
[4];
4378 static void hpsa_get_tag(struct ctlr_info
*h
,
4379 struct CommandList
*c
, __le32
*taglower
, __le32
*tagupper
)
4382 if (c
->cmd_type
== CMD_IOACCEL1
) {
4383 struct io_accel1_cmd
*cm1
= (struct io_accel1_cmd
*)
4384 &h
->ioaccel_cmd_pool
[c
->cmdindex
];
4385 tag
= le64_to_cpu(cm1
->tag
);
4386 *tagupper
= cpu_to_le32(tag
>> 32);
4387 *taglower
= cpu_to_le32(tag
);
4390 if (c
->cmd_type
== CMD_IOACCEL2
) {
4391 struct io_accel2_cmd
*cm2
= (struct io_accel2_cmd
*)
4392 &h
->ioaccel2_cmd_pool
[c
->cmdindex
];
4393 /* upper tag not used in ioaccel2 mode */
4394 memset(tagupper
, 0, sizeof(*tagupper
));
4395 *taglower
= cm2
->Tag
;
4398 tag
= le64_to_cpu(c
->Header
.tag
);
4399 *tagupper
= cpu_to_le32(tag
>> 32);
4400 *taglower
= cpu_to_le32(tag
);
4403 static int hpsa_send_abort(struct ctlr_info
*h
, unsigned char *scsi3addr
,
4404 struct CommandList
*abort
, int swizzle
)
4407 struct CommandList
*c
;
4408 struct ErrorInfo
*ei
;
4409 __le32 tagupper
, taglower
;
4412 if (c
== NULL
) { /* trouble... */
4413 dev_warn(&h
->pdev
->dev
, "cmd_alloc returned NULL!\n");
4417 /* fill_cmd can't fail here, no buffer to map */
4418 (void) fill_cmd(c
, HPSA_ABORT_MSG
, h
, abort
,
4419 0, 0, scsi3addr
, TYPE_MSG
);
4421 swizzle_abort_tag(&c
->Request
.CDB
[4]);
4422 hpsa_scsi_do_simple_cmd_core(h
, c
);
4423 hpsa_get_tag(h
, abort
, &taglower
, &tagupper
);
4424 dev_dbg(&h
->pdev
->dev
, "%s: Tag:0x%08x:%08x: do_simple_cmd_core completed.\n",
4425 __func__
, tagupper
, taglower
);
4426 /* no unmap needed here because no data xfer. */
4429 switch (ei
->CommandStatus
) {
4432 case CMD_UNABORTABLE
: /* Very common, don't make noise. */
4436 dev_dbg(&h
->pdev
->dev
, "%s: Tag:0x%08x:%08x: interpreting error.\n",
4437 __func__
, tagupper
, taglower
);
4438 hpsa_scsi_interpret_error(h
, c
);
4443 dev_dbg(&h
->pdev
->dev
, "%s: Tag:0x%08x:%08x: Finished.\n",
4444 __func__
, tagupper
, taglower
);
4448 /* ioaccel2 path firmware cannot handle abort task requests.
4449 * Change abort requests to physical target reset, and send to the
4450 * address of the physical disk used for the ioaccel 2 command.
4451 * Return 0 on success (IO_OK)
4455 static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info
*h
,
4456 unsigned char *scsi3addr
, struct CommandList
*abort
)
4459 struct scsi_cmnd
*scmd
; /* scsi command within request being aborted */
4460 struct hpsa_scsi_dev_t
*dev
; /* device to which scsi cmd was sent */
4461 unsigned char phys_scsi3addr
[8]; /* addr of phys disk with volume */
4462 unsigned char *psa
= &phys_scsi3addr
[0];
4464 /* Get a pointer to the hpsa logical device. */
4465 scmd
= (struct scsi_cmnd
*) abort
->scsi_cmd
;
4466 dev
= (struct hpsa_scsi_dev_t
*)(scmd
->device
->hostdata
);
4468 dev_warn(&h
->pdev
->dev
,
4469 "Cannot abort: no device pointer for command.\n");
4470 return -1; /* not abortable */
4473 if (h
->raid_offload_debug
> 0)
4474 dev_info(&h
->pdev
->dev
,
4475 "Reset as abort: Abort requested on C%d:B%d:T%d:L%d scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4476 h
->scsi_host
->host_no
, dev
->bus
, dev
->target
, dev
->lun
,
4477 scsi3addr
[0], scsi3addr
[1], scsi3addr
[2], scsi3addr
[3],
4478 scsi3addr
[4], scsi3addr
[5], scsi3addr
[6], scsi3addr
[7]);
4480 if (!dev
->offload_enabled
) {
4481 dev_warn(&h
->pdev
->dev
,
4482 "Can't abort: device is not operating in HP SSD Smart Path mode.\n");
4483 return -1; /* not abortable */
4486 /* Incoming scsi3addr is logical addr. We need physical disk addr. */
4487 if (!hpsa_get_pdisk_of_ioaccel2(h
, abort
, psa
)) {
4488 dev_warn(&h
->pdev
->dev
, "Can't abort: Failed lookup of physical address.\n");
4489 return -1; /* not abortable */
4492 /* send the reset */
4493 if (h
->raid_offload_debug
> 0)
4494 dev_info(&h
->pdev
->dev
,
4495 "Reset as abort: Resetting physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4496 psa
[0], psa
[1], psa
[2], psa
[3],
4497 psa
[4], psa
[5], psa
[6], psa
[7]);
4498 rc
= hpsa_send_reset(h
, psa
, HPSA_RESET_TYPE_TARGET
);
4500 dev_warn(&h
->pdev
->dev
,
4501 "Reset as abort: Failed on physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4502 psa
[0], psa
[1], psa
[2], psa
[3],
4503 psa
[4], psa
[5], psa
[6], psa
[7]);
4504 return rc
; /* failed to reset */
4507 /* wait for device to recover */
4508 if (wait_for_device_to_become_ready(h
, psa
) != 0) {
4509 dev_warn(&h
->pdev
->dev
,
4510 "Reset as abort: Failed: Device never recovered from reset: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4511 psa
[0], psa
[1], psa
[2], psa
[3],
4512 psa
[4], psa
[5], psa
[6], psa
[7]);
4513 return -1; /* failed to recover */
4516 /* device recovered */
4517 dev_info(&h
->pdev
->dev
,
4518 "Reset as abort: Device recovered from reset: scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4519 psa
[0], psa
[1], psa
[2], psa
[3],
4520 psa
[4], psa
[5], psa
[6], psa
[7]);
4522 return rc
; /* success */
4525 /* Some Smart Arrays need the abort tag swizzled, and some don't. It's hard to
4526 * tell which kind we're dealing with, so we send the abort both ways. There
4527 * shouldn't be any collisions between swizzled and unswizzled tags due to the
4528 * way we construct our tags but we check anyway in case the assumptions which
4529 * make this true someday become false.
4531 static int hpsa_send_abort_both_ways(struct ctlr_info
*h
,
4532 unsigned char *scsi3addr
, struct CommandList
*abort
)
4534 /* ioccelerator mode 2 commands should be aborted via the
4535 * accelerated path, since RAID path is unaware of these commands,
4536 * but underlying firmware can't handle abort TMF.
4537 * Change abort to physical device reset.
4539 if (abort
->cmd_type
== CMD_IOACCEL2
)
4540 return hpsa_send_reset_as_abort_ioaccel2(h
, scsi3addr
, abort
);
4542 return hpsa_send_abort(h
, scsi3addr
, abort
, 0) &&
4543 hpsa_send_abort(h
, scsi3addr
, abort
, 1);
4546 /* Send an abort for the specified command.
4547 * If the device and controller support it,
4548 * send a task abort request.
4550 static int hpsa_eh_abort_handler(struct scsi_cmnd
*sc
)
4554 struct ctlr_info
*h
;
4555 struct hpsa_scsi_dev_t
*dev
;
4556 struct CommandList
*abort
; /* pointer to command to be aborted */
4557 struct scsi_cmnd
*as
; /* ptr to scsi cmd inside aborted command. */
4558 char msg
[256]; /* For debug messaging. */
4560 __le32 tagupper
, taglower
;
4563 /* Find the controller of the command to be aborted */
4564 h
= sdev_to_hba(sc
->device
);
4566 "ABORT REQUEST FAILED, Controller lookup failed.\n"))
4569 /* Check that controller supports some kind of task abort */
4570 if (!(HPSATMF_PHYS_TASK_ABORT
& h
->TMFSupportFlags
) &&
4571 !(HPSATMF_LOG_TASK_ABORT
& h
->TMFSupportFlags
))
4574 memset(msg
, 0, sizeof(msg
));
4575 ml
+= sprintf(msg
+ml
, "ABORT REQUEST on C%d:B%d:T%d:L%llu ",
4576 h
->scsi_host
->host_no
, sc
->device
->channel
,
4577 sc
->device
->id
, sc
->device
->lun
);
4579 /* Find the device of the command to be aborted */
4580 dev
= sc
->device
->hostdata
;
4582 dev_err(&h
->pdev
->dev
, "%s FAILED, Device lookup failed.\n",
4587 /* Get SCSI command to be aborted */
4588 abort
= (struct CommandList
*) sc
->host_scribble
;
4589 if (abort
== NULL
) {
4590 /* This can happen if the command already completed. */
4593 refcount
= atomic_inc_return(&abort
->refcount
);
4594 if (refcount
== 1) { /* Command is done already. */
4598 hpsa_get_tag(h
, abort
, &taglower
, &tagupper
);
4599 ml
+= sprintf(msg
+ml
, "Tag:0x%08x:%08x ", tagupper
, taglower
);
4600 as
= (struct scsi_cmnd
*) abort
->scsi_cmd
;
4602 ml
+= sprintf(msg
+ml
, "Command:0x%x SN:0x%lx ",
4603 as
->cmnd
[0], as
->serial_number
);
4604 dev_dbg(&h
->pdev
->dev
, "%s\n", msg
);
4605 dev_warn(&h
->pdev
->dev
, "Abort request on C%d:B%d:T%d:L%d\n",
4606 h
->scsi_host
->host_no
, dev
->bus
, dev
->target
, dev
->lun
);
4608 * Command is in flight, or possibly already completed
4609 * by the firmware (but not to the scsi mid layer) but we can't
4610 * distinguish which. Send the abort down.
4612 rc
= hpsa_send_abort_both_ways(h
, dev
->scsi3addr
, abort
);
4614 dev_dbg(&h
->pdev
->dev
, "%s Request FAILED.\n", msg
);
4615 dev_warn(&h
->pdev
->dev
, "FAILED abort on device C%d:B%d:T%d:L%d\n",
4616 h
->scsi_host
->host_no
,
4617 dev
->bus
, dev
->target
, dev
->lun
);
4621 dev_info(&h
->pdev
->dev
, "%s REQUEST SUCCEEDED.\n", msg
);
4623 /* If the abort(s) above completed and actually aborted the
4624 * command, then the command to be aborted should already be
4625 * completed. If not, wait around a bit more to see if they
4626 * manage to complete normally.
4628 #define ABORT_COMPLETE_WAIT_SECS 30
4629 for (i
= 0; i
< ABORT_COMPLETE_WAIT_SECS
* 10; i
++) {
4630 refcount
= atomic_read(&abort
->refcount
);
4638 dev_warn(&h
->pdev
->dev
, "%s FAILED. Aborted command has not completed after %d seconds.\n",
4639 msg
, ABORT_COMPLETE_WAIT_SECS
);
4645 * For operations that cannot sleep, a command block is allocated at init,
4646 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
4647 * which ones are free or in use. Lock must be held when calling this.
4648 * cmd_free() is the complement.
4651 static struct CommandList
*cmd_alloc(struct ctlr_info
*h
)
4653 struct CommandList
*c
;
4655 union u64bit temp64
;
4656 dma_addr_t cmd_dma_handle
, err_dma_handle
;
4658 unsigned long offset
;
4661 * There is some *extremely* small but non-zero chance that that
4662 * multiple threads could get in here, and one thread could
4663 * be scanning through the list of bits looking for a free
4664 * one, but the free ones are always behind him, and other
4665 * threads sneak in behind him and eat them before he can
4666 * get to them, so that while there is always a free one, a
4667 * very unlucky thread might be starved anyway, never able to
4668 * beat the other threads. In reality, this happens so
4669 * infrequently as to be indistinguishable from never.
4672 offset
= h
->last_allocation
; /* benignly racy */
4674 i
= find_next_zero_bit(h
->cmd_pool_bits
, h
->nr_cmds
, offset
);
4675 if (unlikely(i
== h
->nr_cmds
)) {
4679 c
= h
->cmd_pool
+ i
;
4680 refcount
= atomic_inc_return(&c
->refcount
);
4681 if (unlikely(refcount
> 1)) {
4682 cmd_free(h
, c
); /* already in use */
4683 offset
= (i
+ 1) % h
->nr_cmds
;
4686 set_bit(i
& (BITS_PER_LONG
- 1),
4687 h
->cmd_pool_bits
+ (i
/ BITS_PER_LONG
));
4688 break; /* it's ours now. */
4690 h
->last_allocation
= i
; /* benignly racy */
4692 /* Zero out all of commandlist except the last field, refcount */
4693 memset(c
, 0, offsetof(struct CommandList
, refcount
));
4694 c
->Header
.tag
= cpu_to_le64((u64
) (i
<< DIRECT_LOOKUP_SHIFT
));
4695 cmd_dma_handle
= h
->cmd_pool_dhandle
+ i
* sizeof(*c
);
4696 c
->err_info
= h
->errinfo_pool
+ i
;
4697 memset(c
->err_info
, 0, sizeof(*c
->err_info
));
4698 err_dma_handle
= h
->errinfo_pool_dhandle
4699 + i
* sizeof(*c
->err_info
);
4703 c
->busaddr
= (u32
) cmd_dma_handle
;
4704 temp64
.val
= (u64
) err_dma_handle
;
4705 c
->ErrDesc
.Addr
= cpu_to_le64((u64
) err_dma_handle
);
4706 c
->ErrDesc
.Len
= cpu_to_le32((u32
) sizeof(*c
->err_info
));
4712 static void cmd_free(struct ctlr_info
*h
, struct CommandList
*c
)
4714 if (atomic_dec_and_test(&c
->refcount
)) {
4717 i
= c
- h
->cmd_pool
;
4718 clear_bit(i
& (BITS_PER_LONG
- 1),
4719 h
->cmd_pool_bits
+ (i
/ BITS_PER_LONG
));
4723 #ifdef CONFIG_COMPAT
4725 static int hpsa_ioctl32_passthru(struct scsi_device
*dev
, int cmd
,
4728 IOCTL32_Command_struct __user
*arg32
=
4729 (IOCTL32_Command_struct __user
*) arg
;
4730 IOCTL_Command_struct arg64
;
4731 IOCTL_Command_struct __user
*p
= compat_alloc_user_space(sizeof(arg64
));
4735 memset(&arg64
, 0, sizeof(arg64
));
4737 err
|= copy_from_user(&arg64
.LUN_info
, &arg32
->LUN_info
,
4738 sizeof(arg64
.LUN_info
));
4739 err
|= copy_from_user(&arg64
.Request
, &arg32
->Request
,
4740 sizeof(arg64
.Request
));
4741 err
|= copy_from_user(&arg64
.error_info
, &arg32
->error_info
,
4742 sizeof(arg64
.error_info
));
4743 err
|= get_user(arg64
.buf_size
, &arg32
->buf_size
);
4744 err
|= get_user(cp
, &arg32
->buf
);
4745 arg64
.buf
= compat_ptr(cp
);
4746 err
|= copy_to_user(p
, &arg64
, sizeof(arg64
));
4751 err
= hpsa_ioctl(dev
, CCISS_PASSTHRU
, p
);
4754 err
|= copy_in_user(&arg32
->error_info
, &p
->error_info
,
4755 sizeof(arg32
->error_info
));
4761 static int hpsa_ioctl32_big_passthru(struct scsi_device
*dev
,
4762 int cmd
, void __user
*arg
)
4764 BIG_IOCTL32_Command_struct __user
*arg32
=
4765 (BIG_IOCTL32_Command_struct __user
*) arg
;
4766 BIG_IOCTL_Command_struct arg64
;
4767 BIG_IOCTL_Command_struct __user
*p
=
4768 compat_alloc_user_space(sizeof(arg64
));
4772 memset(&arg64
, 0, sizeof(arg64
));
4774 err
|= copy_from_user(&arg64
.LUN_info
, &arg32
->LUN_info
,
4775 sizeof(arg64
.LUN_info
));
4776 err
|= copy_from_user(&arg64
.Request
, &arg32
->Request
,
4777 sizeof(arg64
.Request
));
4778 err
|= copy_from_user(&arg64
.error_info
, &arg32
->error_info
,
4779 sizeof(arg64
.error_info
));
4780 err
|= get_user(arg64
.buf_size
, &arg32
->buf_size
);
4781 err
|= get_user(arg64
.malloc_size
, &arg32
->malloc_size
);
4782 err
|= get_user(cp
, &arg32
->buf
);
4783 arg64
.buf
= compat_ptr(cp
);
4784 err
|= copy_to_user(p
, &arg64
, sizeof(arg64
));
4789 err
= hpsa_ioctl(dev
, CCISS_BIG_PASSTHRU
, p
);
4792 err
|= copy_in_user(&arg32
->error_info
, &p
->error_info
,
4793 sizeof(arg32
->error_info
));
4799 static int hpsa_compat_ioctl(struct scsi_device
*dev
, int cmd
, void __user
*arg
)
4802 case CCISS_GETPCIINFO
:
4803 case CCISS_GETINTINFO
:
4804 case CCISS_SETINTINFO
:
4805 case CCISS_GETNODENAME
:
4806 case CCISS_SETNODENAME
:
4807 case CCISS_GETHEARTBEAT
:
4808 case CCISS_GETBUSTYPES
:
4809 case CCISS_GETFIRMVER
:
4810 case CCISS_GETDRIVVER
:
4811 case CCISS_REVALIDVOLS
:
4812 case CCISS_DEREGDISK
:
4813 case CCISS_REGNEWDISK
:
4815 case CCISS_RESCANDISK
:
4816 case CCISS_GETLUNINFO
:
4817 return hpsa_ioctl(dev
, cmd
, arg
);
4819 case CCISS_PASSTHRU32
:
4820 return hpsa_ioctl32_passthru(dev
, cmd
, arg
);
4821 case CCISS_BIG_PASSTHRU32
:
4822 return hpsa_ioctl32_big_passthru(dev
, cmd
, arg
);
4825 return -ENOIOCTLCMD
;
4830 static int hpsa_getpciinfo_ioctl(struct ctlr_info
*h
, void __user
*argp
)
4832 struct hpsa_pci_info pciinfo
;
4836 pciinfo
.domain
= pci_domain_nr(h
->pdev
->bus
);
4837 pciinfo
.bus
= h
->pdev
->bus
->number
;
4838 pciinfo
.dev_fn
= h
->pdev
->devfn
;
4839 pciinfo
.board_id
= h
->board_id
;
4840 if (copy_to_user(argp
, &pciinfo
, sizeof(pciinfo
)))
4845 static int hpsa_getdrivver_ioctl(struct ctlr_info
*h
, void __user
*argp
)
4847 DriverVer_type DriverVer
;
4848 unsigned char vmaj
, vmin
, vsubmin
;
4851 rc
= sscanf(HPSA_DRIVER_VERSION
, "%hhu.%hhu.%hhu",
4852 &vmaj
, &vmin
, &vsubmin
);
4854 dev_info(&h
->pdev
->dev
, "driver version string '%s' "
4855 "unrecognized.", HPSA_DRIVER_VERSION
);
4860 DriverVer
= (vmaj
<< 16) | (vmin
<< 8) | vsubmin
;
4863 if (copy_to_user(argp
, &DriverVer
, sizeof(DriverVer_type
)))
4868 static int hpsa_passthru_ioctl(struct ctlr_info
*h
, void __user
*argp
)
4870 IOCTL_Command_struct iocommand
;
4871 struct CommandList
*c
;
4878 if (!capable(CAP_SYS_RAWIO
))
4880 if (copy_from_user(&iocommand
, argp
, sizeof(iocommand
)))
4882 if ((iocommand
.buf_size
< 1) &&
4883 (iocommand
.Request
.Type
.Direction
!= XFER_NONE
)) {
4886 if (iocommand
.buf_size
> 0) {
4887 buff
= kmalloc(iocommand
.buf_size
, GFP_KERNEL
);
4890 if (iocommand
.Request
.Type
.Direction
& XFER_WRITE
) {
4891 /* Copy the data into the buffer we created */
4892 if (copy_from_user(buff
, iocommand
.buf
,
4893 iocommand
.buf_size
)) {
4898 memset(buff
, 0, iocommand
.buf_size
);
4906 /* Fill in the command type */
4907 c
->cmd_type
= CMD_IOCTL_PEND
;
4908 /* Fill in Command Header */
4909 c
->Header
.ReplyQueue
= 0; /* unused in simple mode */
4910 if (iocommand
.buf_size
> 0) { /* buffer to fill */
4911 c
->Header
.SGList
= 1;
4912 c
->Header
.SGTotal
= cpu_to_le16(1);
4913 } else { /* no buffers to fill */
4914 c
->Header
.SGList
= 0;
4915 c
->Header
.SGTotal
= cpu_to_le16(0);
4917 memcpy(&c
->Header
.LUN
, &iocommand
.LUN_info
, sizeof(c
->Header
.LUN
));
4919 /* Fill in Request block */
4920 memcpy(&c
->Request
, &iocommand
.Request
,
4921 sizeof(c
->Request
));
4923 /* Fill in the scatter gather information */
4924 if (iocommand
.buf_size
> 0) {
4925 temp64
= pci_map_single(h
->pdev
, buff
,
4926 iocommand
.buf_size
, PCI_DMA_BIDIRECTIONAL
);
4927 if (dma_mapping_error(&h
->pdev
->dev
, (dma_addr_t
) temp64
)) {
4928 c
->SG
[0].Addr
= cpu_to_le64(0);
4929 c
->SG
[0].Len
= cpu_to_le32(0);
4933 c
->SG
[0].Addr
= cpu_to_le64(temp64
);
4934 c
->SG
[0].Len
= cpu_to_le32(iocommand
.buf_size
);
4935 c
->SG
[0].Ext
= cpu_to_le32(HPSA_SG_LAST
); /* not chaining */
4937 hpsa_scsi_do_simple_cmd_core_if_no_lockup(h
, c
);
4938 if (iocommand
.buf_size
> 0)
4939 hpsa_pci_unmap(h
->pdev
, c
, 1, PCI_DMA_BIDIRECTIONAL
);
4940 check_ioctl_unit_attention(h
, c
);
4942 /* Copy the error information out */
4943 memcpy(&iocommand
.error_info
, c
->err_info
,
4944 sizeof(iocommand
.error_info
));
4945 if (copy_to_user(argp
, &iocommand
, sizeof(iocommand
))) {
4949 if ((iocommand
.Request
.Type
.Direction
& XFER_READ
) &&
4950 iocommand
.buf_size
> 0) {
4951 /* Copy the data out of the buffer we created */
4952 if (copy_to_user(iocommand
.buf
, buff
, iocommand
.buf_size
)) {
4964 static int hpsa_big_passthru_ioctl(struct ctlr_info
*h
, void __user
*argp
)
4966 BIG_IOCTL_Command_struct
*ioc
;
4967 struct CommandList
*c
;
4968 unsigned char **buff
= NULL
;
4969 int *buff_size
= NULL
;
4975 BYTE __user
*data_ptr
;
4979 if (!capable(CAP_SYS_RAWIO
))
4981 ioc
= (BIG_IOCTL_Command_struct
*)
4982 kmalloc(sizeof(*ioc
), GFP_KERNEL
);
4987 if (copy_from_user(ioc
, argp
, sizeof(*ioc
))) {
4991 if ((ioc
->buf_size
< 1) &&
4992 (ioc
->Request
.Type
.Direction
!= XFER_NONE
)) {
4996 /* Check kmalloc limits using all SGs */
4997 if (ioc
->malloc_size
> MAX_KMALLOC_SIZE
) {
5001 if (ioc
->buf_size
> ioc
->malloc_size
* SG_ENTRIES_IN_CMD
) {
5005 buff
= kzalloc(SG_ENTRIES_IN_CMD
* sizeof(char *), GFP_KERNEL
);
5010 buff_size
= kmalloc(SG_ENTRIES_IN_CMD
* sizeof(int), GFP_KERNEL
);
5015 left
= ioc
->buf_size
;
5016 data_ptr
= ioc
->buf
;
5018 sz
= (left
> ioc
->malloc_size
) ? ioc
->malloc_size
: left
;
5019 buff_size
[sg_used
] = sz
;
5020 buff
[sg_used
] = kmalloc(sz
, GFP_KERNEL
);
5021 if (buff
[sg_used
] == NULL
) {
5025 if (ioc
->Request
.Type
.Direction
& XFER_WRITE
) {
5026 if (copy_from_user(buff
[sg_used
], data_ptr
, sz
)) {
5031 memset(buff
[sg_used
], 0, sz
);
5041 c
->cmd_type
= CMD_IOCTL_PEND
;
5042 c
->Header
.ReplyQueue
= 0;
5043 c
->Header
.SGList
= (u8
) sg_used
;
5044 c
->Header
.SGTotal
= cpu_to_le16(sg_used
);
5045 memcpy(&c
->Header
.LUN
, &ioc
->LUN_info
, sizeof(c
->Header
.LUN
));
5046 memcpy(&c
->Request
, &ioc
->Request
, sizeof(c
->Request
));
5047 if (ioc
->buf_size
> 0) {
5049 for (i
= 0; i
< sg_used
; i
++) {
5050 temp64
= pci_map_single(h
->pdev
, buff
[i
],
5051 buff_size
[i
], PCI_DMA_BIDIRECTIONAL
);
5052 if (dma_mapping_error(&h
->pdev
->dev
,
5053 (dma_addr_t
) temp64
)) {
5054 c
->SG
[i
].Addr
= cpu_to_le64(0);
5055 c
->SG
[i
].Len
= cpu_to_le32(0);
5056 hpsa_pci_unmap(h
->pdev
, c
, i
,
5057 PCI_DMA_BIDIRECTIONAL
);
5061 c
->SG
[i
].Addr
= cpu_to_le64(temp64
);
5062 c
->SG
[i
].Len
= cpu_to_le32(buff_size
[i
]);
5063 c
->SG
[i
].Ext
= cpu_to_le32(0);
5065 c
->SG
[--i
].Ext
= cpu_to_le32(HPSA_SG_LAST
);
5067 hpsa_scsi_do_simple_cmd_core_if_no_lockup(h
, c
);
5069 hpsa_pci_unmap(h
->pdev
, c
, sg_used
, PCI_DMA_BIDIRECTIONAL
);
5070 check_ioctl_unit_attention(h
, c
);
5071 /* Copy the error information out */
5072 memcpy(&ioc
->error_info
, c
->err_info
, sizeof(ioc
->error_info
));
5073 if (copy_to_user(argp
, ioc
, sizeof(*ioc
))) {
5077 if ((ioc
->Request
.Type
.Direction
& XFER_READ
) && ioc
->buf_size
> 0) {
5080 /* Copy the data out of the buffer we created */
5081 BYTE __user
*ptr
= ioc
->buf
;
5082 for (i
= 0; i
< sg_used
; i
++) {
5083 if (copy_to_user(ptr
, buff
[i
], buff_size
[i
])) {
5087 ptr
+= buff_size
[i
];
5097 for (i
= 0; i
< sg_used
; i
++)
5106 static void check_ioctl_unit_attention(struct ctlr_info
*h
,
5107 struct CommandList
*c
)
5109 if (c
->err_info
->CommandStatus
== CMD_TARGET_STATUS
&&
5110 c
->err_info
->ScsiStatus
!= SAM_STAT_CHECK_CONDITION
)
5111 (void) check_for_unit_attention(h
, c
);
5117 static int hpsa_ioctl(struct scsi_device
*dev
, int cmd
, void __user
*arg
)
5119 struct ctlr_info
*h
;
5120 void __user
*argp
= (void __user
*)arg
;
5123 h
= sdev_to_hba(dev
);
5126 case CCISS_DEREGDISK
:
5127 case CCISS_REGNEWDISK
:
5129 hpsa_scan_start(h
->scsi_host
);
5131 case CCISS_GETPCIINFO
:
5132 return hpsa_getpciinfo_ioctl(h
, argp
);
5133 case CCISS_GETDRIVVER
:
5134 return hpsa_getdrivver_ioctl(h
, argp
);
5135 case CCISS_PASSTHRU
:
5136 if (atomic_dec_if_positive(&h
->passthru_cmds_avail
) < 0)
5138 rc
= hpsa_passthru_ioctl(h
, argp
);
5139 atomic_inc(&h
->passthru_cmds_avail
);
5141 case CCISS_BIG_PASSTHRU
:
5142 if (atomic_dec_if_positive(&h
->passthru_cmds_avail
) < 0)
5144 rc
= hpsa_big_passthru_ioctl(h
, argp
);
5145 atomic_inc(&h
->passthru_cmds_avail
);
5152 static int hpsa_send_host_reset(struct ctlr_info
*h
, unsigned char *scsi3addr
,
5155 struct CommandList
*c
;
5160 /* fill_cmd can't fail here, no data buffer to map */
5161 (void) fill_cmd(c
, HPSA_DEVICE_RESET_MSG
, h
, NULL
, 0, 0,
5162 RAID_CTLR_LUNID
, TYPE_MSG
);
5163 c
->Request
.CDB
[1] = reset_type
; /* fill_cmd defaults to target reset */
5165 enqueue_cmd_and_start_io(h
, c
);
5166 /* Don't wait for completion, the reset won't complete. Don't free
5167 * the command either. This is the last command we will send before
5168 * re-initializing everything, so it doesn't matter and won't leak.
5173 static int fill_cmd(struct CommandList
*c
, u8 cmd
, struct ctlr_info
*h
,
5174 void *buff
, size_t size
, u16 page_code
, unsigned char *scsi3addr
,
5177 int pci_dir
= XFER_NONE
;
5178 struct CommandList
*a
; /* for commands to be aborted */
5180 c
->cmd_type
= CMD_IOCTL_PEND
;
5181 c
->Header
.ReplyQueue
= 0;
5182 if (buff
!= NULL
&& size
> 0) {
5183 c
->Header
.SGList
= 1;
5184 c
->Header
.SGTotal
= cpu_to_le16(1);
5186 c
->Header
.SGList
= 0;
5187 c
->Header
.SGTotal
= cpu_to_le16(0);
5189 memcpy(c
->Header
.LUN
.LunAddrBytes
, scsi3addr
, 8);
5191 if (cmd_type
== TYPE_CMD
) {
5194 /* are we trying to read a vital product page */
5195 if (page_code
& VPD_PAGE
) {
5196 c
->Request
.CDB
[1] = 0x01;
5197 c
->Request
.CDB
[2] = (page_code
& 0xff);
5199 c
->Request
.CDBLen
= 6;
5200 c
->Request
.type_attr_dir
=
5201 TYPE_ATTR_DIR(cmd_type
, ATTR_SIMPLE
, XFER_READ
);
5202 c
->Request
.Timeout
= 0;
5203 c
->Request
.CDB
[0] = HPSA_INQUIRY
;
5204 c
->Request
.CDB
[4] = size
& 0xFF;
5206 case HPSA_REPORT_LOG
:
5207 case HPSA_REPORT_PHYS
:
5208 /* Talking to controller so It's a physical command
5209 mode = 00 target = 0. Nothing to write.
5211 c
->Request
.CDBLen
= 12;
5212 c
->Request
.type_attr_dir
=
5213 TYPE_ATTR_DIR(cmd_type
, ATTR_SIMPLE
, XFER_READ
);
5214 c
->Request
.Timeout
= 0;
5215 c
->Request
.CDB
[0] = cmd
;
5216 c
->Request
.CDB
[6] = (size
>> 24) & 0xFF; /* MSB */
5217 c
->Request
.CDB
[7] = (size
>> 16) & 0xFF;
5218 c
->Request
.CDB
[8] = (size
>> 8) & 0xFF;
5219 c
->Request
.CDB
[9] = size
& 0xFF;
5221 case HPSA_CACHE_FLUSH
:
5222 c
->Request
.CDBLen
= 12;
5223 c
->Request
.type_attr_dir
=
5224 TYPE_ATTR_DIR(cmd_type
,
5225 ATTR_SIMPLE
, XFER_WRITE
);
5226 c
->Request
.Timeout
= 0;
5227 c
->Request
.CDB
[0] = BMIC_WRITE
;
5228 c
->Request
.CDB
[6] = BMIC_CACHE_FLUSH
;
5229 c
->Request
.CDB
[7] = (size
>> 8) & 0xFF;
5230 c
->Request
.CDB
[8] = size
& 0xFF;
5232 case TEST_UNIT_READY
:
5233 c
->Request
.CDBLen
= 6;
5234 c
->Request
.type_attr_dir
=
5235 TYPE_ATTR_DIR(cmd_type
, ATTR_SIMPLE
, XFER_NONE
);
5236 c
->Request
.Timeout
= 0;
5238 case HPSA_GET_RAID_MAP
:
5239 c
->Request
.CDBLen
= 12;
5240 c
->Request
.type_attr_dir
=
5241 TYPE_ATTR_DIR(cmd_type
, ATTR_SIMPLE
, XFER_READ
);
5242 c
->Request
.Timeout
= 0;
5243 c
->Request
.CDB
[0] = HPSA_CISS_READ
;
5244 c
->Request
.CDB
[1] = cmd
;
5245 c
->Request
.CDB
[6] = (size
>> 24) & 0xFF; /* MSB */
5246 c
->Request
.CDB
[7] = (size
>> 16) & 0xFF;
5247 c
->Request
.CDB
[8] = (size
>> 8) & 0xFF;
5248 c
->Request
.CDB
[9] = size
& 0xFF;
5250 case BMIC_SENSE_CONTROLLER_PARAMETERS
:
5251 c
->Request
.CDBLen
= 10;
5252 c
->Request
.type_attr_dir
=
5253 TYPE_ATTR_DIR(cmd_type
, ATTR_SIMPLE
, XFER_READ
);
5254 c
->Request
.Timeout
= 0;
5255 c
->Request
.CDB
[0] = BMIC_READ
;
5256 c
->Request
.CDB
[6] = BMIC_SENSE_CONTROLLER_PARAMETERS
;
5257 c
->Request
.CDB
[7] = (size
>> 16) & 0xFF;
5258 c
->Request
.CDB
[8] = (size
>> 8) & 0xFF;
5260 case BMIC_IDENTIFY_PHYSICAL_DEVICE
:
5261 c
->Request
.CDBLen
= 10;
5262 c
->Request
.type_attr_dir
=
5263 TYPE_ATTR_DIR(cmd_type
, ATTR_SIMPLE
, XFER_READ
);
5264 c
->Request
.Timeout
= 0;
5265 c
->Request
.CDB
[0] = BMIC_READ
;
5266 c
->Request
.CDB
[6] = BMIC_IDENTIFY_PHYSICAL_DEVICE
;
5267 c
->Request
.CDB
[7] = (size
>> 16) & 0xFF;
5268 c
->Request
.CDB
[8] = (size
>> 8) & 0XFF;
5271 dev_warn(&h
->pdev
->dev
, "unknown command 0x%c\n", cmd
);
5275 } else if (cmd_type
== TYPE_MSG
) {
5278 case HPSA_DEVICE_RESET_MSG
:
5279 c
->Request
.CDBLen
= 16;
5280 c
->Request
.type_attr_dir
=
5281 TYPE_ATTR_DIR(cmd_type
, ATTR_SIMPLE
, XFER_NONE
);
5282 c
->Request
.Timeout
= 0; /* Don't time out */
5283 memset(&c
->Request
.CDB
[0], 0, sizeof(c
->Request
.CDB
));
5284 c
->Request
.CDB
[0] = cmd
;
5285 c
->Request
.CDB
[1] = HPSA_RESET_TYPE_LUN
;
5286 /* If bytes 4-7 are zero, it means reset the */
5288 c
->Request
.CDB
[4] = 0x00;
5289 c
->Request
.CDB
[5] = 0x00;
5290 c
->Request
.CDB
[6] = 0x00;
5291 c
->Request
.CDB
[7] = 0x00;
5293 case HPSA_ABORT_MSG
:
5294 a
= buff
; /* point to command to be aborted */
5295 dev_dbg(&h
->pdev
->dev
,
5296 "Abort Tag:0x%016llx request Tag:0x%016llx",
5297 a
->Header
.tag
, c
->Header
.tag
);
5298 c
->Request
.CDBLen
= 16;
5299 c
->Request
.type_attr_dir
=
5300 TYPE_ATTR_DIR(cmd_type
,
5301 ATTR_SIMPLE
, XFER_WRITE
);
5302 c
->Request
.Timeout
= 0; /* Don't time out */
5303 c
->Request
.CDB
[0] = HPSA_TASK_MANAGEMENT
;
5304 c
->Request
.CDB
[1] = HPSA_TMF_ABORT_TASK
;
5305 c
->Request
.CDB
[2] = 0x00; /* reserved */
5306 c
->Request
.CDB
[3] = 0x00; /* reserved */
5307 /* Tag to abort goes in CDB[4]-CDB[11] */
5308 memcpy(&c
->Request
.CDB
[4], &a
->Header
.tag
,
5309 sizeof(a
->Header
.tag
));
5310 c
->Request
.CDB
[12] = 0x00; /* reserved */
5311 c
->Request
.CDB
[13] = 0x00; /* reserved */
5312 c
->Request
.CDB
[14] = 0x00; /* reserved */
5313 c
->Request
.CDB
[15] = 0x00; /* reserved */
5316 dev_warn(&h
->pdev
->dev
, "unknown message type %d\n",
5321 dev_warn(&h
->pdev
->dev
, "unknown command type %d\n", cmd_type
);
5325 switch (GET_DIR(c
->Request
.type_attr_dir
)) {
5327 pci_dir
= PCI_DMA_FROMDEVICE
;
5330 pci_dir
= PCI_DMA_TODEVICE
;
5333 pci_dir
= PCI_DMA_NONE
;
5336 pci_dir
= PCI_DMA_BIDIRECTIONAL
;
5338 if (hpsa_map_one(h
->pdev
, c
, buff
, size
, pci_dir
))
5344 * Map (physical) PCI mem into (virtual) kernel space
5346 static void __iomem
*remap_pci_mem(ulong base
, ulong size
)
5348 ulong page_base
= ((ulong
) base
) & PAGE_MASK
;
5349 ulong page_offs
= ((ulong
) base
) - page_base
;
5350 void __iomem
*page_remapped
= ioremap_nocache(page_base
,
5353 return page_remapped
? (page_remapped
+ page_offs
) : NULL
;
5356 static inline unsigned long get_next_completion(struct ctlr_info
*h
, u8 q
)
5358 return h
->access
.command_completed(h
, q
);
5361 static inline bool interrupt_pending(struct ctlr_info
*h
)
5363 return h
->access
.intr_pending(h
);
5366 static inline long interrupt_not_for_us(struct ctlr_info
*h
)
5368 return (h
->access
.intr_pending(h
) == 0) ||
5369 (h
->interrupts_enabled
== 0);
5372 static inline int bad_tag(struct ctlr_info
*h
, u32 tag_index
,
5375 if (unlikely(tag_index
>= h
->nr_cmds
)) {
5376 dev_warn(&h
->pdev
->dev
, "bad tag 0x%08x ignored.\n", raw_tag
);
5382 static inline void finish_cmd(struct CommandList
*c
)
5384 dial_up_lockup_detection_on_fw_flash_complete(c
->h
, c
);
5385 if (likely(c
->cmd_type
== CMD_IOACCEL1
|| c
->cmd_type
== CMD_SCSI
5386 || c
->cmd_type
== CMD_IOACCEL2
))
5387 complete_scsi_command(c
);
5388 else if (c
->cmd_type
== CMD_IOCTL_PEND
)
5389 complete(c
->waiting
);
5393 static inline u32
hpsa_tag_discard_error_bits(struct ctlr_info
*h
, u32 tag
)
5395 #define HPSA_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1)
5396 #define HPSA_SIMPLE_ERROR_BITS 0x03
5397 if (unlikely(!(h
->transMethod
& CFGTBL_Trans_Performant
)))
5398 return tag
& ~HPSA_SIMPLE_ERROR_BITS
;
5399 return tag
& ~HPSA_PERF_ERROR_BITS
;
5402 /* process completion of an indexed ("direct lookup") command */
5403 static inline void process_indexed_cmd(struct ctlr_info
*h
,
5407 struct CommandList
*c
;
5409 tag_index
= raw_tag
>> DIRECT_LOOKUP_SHIFT
;
5410 if (!bad_tag(h
, tag_index
, raw_tag
)) {
5411 c
= h
->cmd_pool
+ tag_index
;
5416 /* Some controllers, like p400, will give us one interrupt
5417 * after a soft reset, even if we turned interrupts off.
5418 * Only need to check for this in the hpsa_xxx_discard_completions
5421 static int ignore_bogus_interrupt(struct ctlr_info
*h
)
5423 if (likely(!reset_devices
))
5426 if (likely(h
->interrupts_enabled
))
5429 dev_info(&h
->pdev
->dev
, "Received interrupt while interrupts disabled "
5430 "(known firmware bug.) Ignoring.\n");
5436 * Convert &h->q[x] (passed to interrupt handlers) back to h.
5437 * Relies on (h-q[x] == x) being true for x such that
5438 * 0 <= x < MAX_REPLY_QUEUES.
5440 static struct ctlr_info
*queue_to_hba(u8
*queue
)
5442 return container_of((queue
- *queue
), struct ctlr_info
, q
[0]);
5445 static irqreturn_t
hpsa_intx_discard_completions(int irq
, void *queue
)
5447 struct ctlr_info
*h
= queue_to_hba(queue
);
5448 u8 q
= *(u8
*) queue
;
5451 if (ignore_bogus_interrupt(h
))
5454 if (interrupt_not_for_us(h
))
5456 h
->last_intr_timestamp
= get_jiffies_64();
5457 while (interrupt_pending(h
)) {
5458 raw_tag
= get_next_completion(h
, q
);
5459 while (raw_tag
!= FIFO_EMPTY
)
5460 raw_tag
= next_command(h
, q
);
5465 static irqreturn_t
hpsa_msix_discard_completions(int irq
, void *queue
)
5467 struct ctlr_info
*h
= queue_to_hba(queue
);
5469 u8 q
= *(u8
*) queue
;
5471 if (ignore_bogus_interrupt(h
))
5474 h
->last_intr_timestamp
= get_jiffies_64();
5475 raw_tag
= get_next_completion(h
, q
);
5476 while (raw_tag
!= FIFO_EMPTY
)
5477 raw_tag
= next_command(h
, q
);
5481 static irqreturn_t
do_hpsa_intr_intx(int irq
, void *queue
)
5483 struct ctlr_info
*h
= queue_to_hba((u8
*) queue
);
5485 u8 q
= *(u8
*) queue
;
5487 if (interrupt_not_for_us(h
))
5489 h
->last_intr_timestamp
= get_jiffies_64();
5490 while (interrupt_pending(h
)) {
5491 raw_tag
= get_next_completion(h
, q
);
5492 while (raw_tag
!= FIFO_EMPTY
) {
5493 process_indexed_cmd(h
, raw_tag
);
5494 raw_tag
= next_command(h
, q
);
5500 static irqreturn_t
do_hpsa_intr_msi(int irq
, void *queue
)
5502 struct ctlr_info
*h
= queue_to_hba(queue
);
5504 u8 q
= *(u8
*) queue
;
5506 h
->last_intr_timestamp
= get_jiffies_64();
5507 raw_tag
= get_next_completion(h
, q
);
5508 while (raw_tag
!= FIFO_EMPTY
) {
5509 process_indexed_cmd(h
, raw_tag
);
5510 raw_tag
= next_command(h
, q
);
5515 /* Send a message CDB to the firmware. Careful, this only works
5516 * in simple mode, not performant mode due to the tag lookup.
5517 * We only ever use this immediately after a controller reset.
5519 static int hpsa_message(struct pci_dev
*pdev
, unsigned char opcode
,
5523 struct CommandListHeader CommandHeader
;
5524 struct RequestBlock Request
;
5525 struct ErrDescriptor ErrorDescriptor
;
5527 struct Command
*cmd
;
5528 static const size_t cmd_sz
= sizeof(*cmd
) +
5529 sizeof(cmd
->ErrorDescriptor
);
5533 void __iomem
*vaddr
;
5536 vaddr
= pci_ioremap_bar(pdev
, 0);
5540 /* The Inbound Post Queue only accepts 32-bit physical addresses for the
5541 * CCISS commands, so they must be allocated from the lower 4GiB of
5544 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(32));
5550 cmd
= pci_alloc_consistent(pdev
, cmd_sz
, &paddr64
);
5556 /* This must fit, because of the 32-bit consistent DMA mask. Also,
5557 * although there's no guarantee, we assume that the address is at
5558 * least 4-byte aligned (most likely, it's page-aligned).
5560 paddr32
= cpu_to_le32(paddr64
);
5562 cmd
->CommandHeader
.ReplyQueue
= 0;
5563 cmd
->CommandHeader
.SGList
= 0;
5564 cmd
->CommandHeader
.SGTotal
= cpu_to_le16(0);
5565 cmd
->CommandHeader
.tag
= cpu_to_le64(paddr64
);
5566 memset(&cmd
->CommandHeader
.LUN
.LunAddrBytes
, 0, 8);
5568 cmd
->Request
.CDBLen
= 16;
5569 cmd
->Request
.type_attr_dir
=
5570 TYPE_ATTR_DIR(TYPE_MSG
, ATTR_HEADOFQUEUE
, XFER_NONE
);
5571 cmd
->Request
.Timeout
= 0; /* Don't time out */
5572 cmd
->Request
.CDB
[0] = opcode
;
5573 cmd
->Request
.CDB
[1] = type
;
5574 memset(&cmd
->Request
.CDB
[2], 0, 14); /* rest of the CDB is reserved */
5575 cmd
->ErrorDescriptor
.Addr
=
5576 cpu_to_le64((le32_to_cpu(paddr32
) + sizeof(*cmd
)));
5577 cmd
->ErrorDescriptor
.Len
= cpu_to_le32(sizeof(struct ErrorInfo
));
5579 writel(le32_to_cpu(paddr32
), vaddr
+ SA5_REQUEST_PORT_OFFSET
);
5581 for (i
= 0; i
< HPSA_MSG_SEND_RETRY_LIMIT
; i
++) {
5582 tag
= readl(vaddr
+ SA5_REPLY_PORT_OFFSET
);
5583 if ((tag
& ~HPSA_SIMPLE_ERROR_BITS
) == paddr64
)
5585 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS
);
5590 /* we leak the DMA buffer here ... no choice since the controller could
5591 * still complete the command.
5593 if (i
== HPSA_MSG_SEND_RETRY_LIMIT
) {
5594 dev_err(&pdev
->dev
, "controller message %02x:%02x timed out\n",
5599 pci_free_consistent(pdev
, cmd_sz
, cmd
, paddr64
);
5601 if (tag
& HPSA_ERROR_BIT
) {
5602 dev_err(&pdev
->dev
, "controller message %02x:%02x failed\n",
5607 dev_info(&pdev
->dev
, "controller message %02x:%02x succeeded\n",
5612 #define hpsa_noop(p) hpsa_message(p, 3, 0)
5614 static int hpsa_controller_hard_reset(struct pci_dev
*pdev
,
5615 void __iomem
*vaddr
, u32 use_doorbell
)
5619 /* For everything after the P600, the PCI power state method
5620 * of resetting the controller doesn't work, so we have this
5621 * other way using the doorbell register.
5623 dev_info(&pdev
->dev
, "using doorbell to reset controller\n");
5624 writel(use_doorbell
, vaddr
+ SA5_DOORBELL
);
5626 /* PMC hardware guys tell us we need a 10 second delay after
5627 * doorbell reset and before any attempt to talk to the board
5628 * at all to ensure that this actually works and doesn't fall
5629 * over in some weird corner cases.
5632 } else { /* Try to do it the PCI power state way */
5634 /* Quoting from the Open CISS Specification: "The Power
5635 * Management Control/Status Register (CSR) controls the power
5636 * state of the device. The normal operating state is D0,
5637 * CSR=00h. The software off state is D3, CSR=03h. To reset
5638 * the controller, place the interface device in D3 then to D0,
5639 * this causes a secondary PCI reset which will reset the
5644 dev_info(&pdev
->dev
, "using PCI PM to reset controller\n");
5646 /* enter the D3hot power management state */
5647 rc
= pci_set_power_state(pdev
, PCI_D3hot
);
5653 /* enter the D0 power management state */
5654 rc
= pci_set_power_state(pdev
, PCI_D0
);
5659 * The P600 requires a small delay when changing states.
5660 * Otherwise we may think the board did not reset and we bail.
5661 * This for kdump only and is particular to the P600.
5668 static void init_driver_version(char *driver_version
, int len
)
5670 memset(driver_version
, 0, len
);
5671 strncpy(driver_version
, HPSA
" " HPSA_DRIVER_VERSION
, len
- 1);
5674 static int write_driver_ver_to_cfgtable(struct CfgTable __iomem
*cfgtable
)
5676 char *driver_version
;
5677 int i
, size
= sizeof(cfgtable
->driver_version
);
5679 driver_version
= kmalloc(size
, GFP_KERNEL
);
5680 if (!driver_version
)
5683 init_driver_version(driver_version
, size
);
5684 for (i
= 0; i
< size
; i
++)
5685 writeb(driver_version
[i
], &cfgtable
->driver_version
[i
]);
5686 kfree(driver_version
);
5690 static void read_driver_ver_from_cfgtable(struct CfgTable __iomem
*cfgtable
,
5691 unsigned char *driver_ver
)
5695 for (i
= 0; i
< sizeof(cfgtable
->driver_version
); i
++)
5696 driver_ver
[i
] = readb(&cfgtable
->driver_version
[i
]);
5699 static int controller_reset_failed(struct CfgTable __iomem
*cfgtable
)
5702 char *driver_ver
, *old_driver_ver
;
5703 int rc
, size
= sizeof(cfgtable
->driver_version
);
5705 old_driver_ver
= kmalloc(2 * size
, GFP_KERNEL
);
5706 if (!old_driver_ver
)
5708 driver_ver
= old_driver_ver
+ size
;
5710 /* After a reset, the 32 bytes of "driver version" in the cfgtable
5711 * should have been changed, otherwise we know the reset failed.
5713 init_driver_version(old_driver_ver
, size
);
5714 read_driver_ver_from_cfgtable(cfgtable
, driver_ver
);
5715 rc
= !memcmp(driver_ver
, old_driver_ver
, size
);
5716 kfree(old_driver_ver
);
5719 /* This does a hard reset of the controller using PCI power management
5720 * states or the using the doorbell register.
5722 static int hpsa_kdump_hard_reset_controller(struct pci_dev
*pdev
)
5726 u64 cfg_base_addr_index
;
5727 void __iomem
*vaddr
;
5728 unsigned long paddr
;
5729 u32 misc_fw_support
;
5731 struct CfgTable __iomem
*cfgtable
;
5734 u16 command_register
;
5736 /* For controllers as old as the P600, this is very nearly
5739 * pci_save_state(pci_dev);
5740 * pci_set_power_state(pci_dev, PCI_D3hot);
5741 * pci_set_power_state(pci_dev, PCI_D0);
5742 * pci_restore_state(pci_dev);
5744 * For controllers newer than the P600, the pci power state
5745 * method of resetting doesn't work so we have another way
5746 * using the doorbell register.
5749 rc
= hpsa_lookup_board_id(pdev
, &board_id
);
5751 dev_warn(&pdev
->dev
, "Board ID not found\n");
5754 if (!ctlr_is_resettable(board_id
)) {
5755 dev_warn(&pdev
->dev
, "Controller not resettable\n");
5759 /* if controller is soft- but not hard resettable... */
5760 if (!ctlr_is_hard_resettable(board_id
))
5761 return -ENOTSUPP
; /* try soft reset later. */
5763 /* Save the PCI command register */
5764 pci_read_config_word(pdev
, 4, &command_register
);
5765 pci_save_state(pdev
);
5767 /* find the first memory BAR, so we can find the cfg table */
5768 rc
= hpsa_pci_find_memory_BAR(pdev
, &paddr
);
5771 vaddr
= remap_pci_mem(paddr
, 0x250);
5775 /* find cfgtable in order to check if reset via doorbell is supported */
5776 rc
= hpsa_find_cfg_addrs(pdev
, vaddr
, &cfg_base_addr
,
5777 &cfg_base_addr_index
, &cfg_offset
);
5780 cfgtable
= remap_pci_mem(pci_resource_start(pdev
,
5781 cfg_base_addr_index
) + cfg_offset
, sizeof(*cfgtable
));
5786 rc
= write_driver_ver_to_cfgtable(cfgtable
);
5788 goto unmap_cfgtable
;
5790 /* If reset via doorbell register is supported, use that.
5791 * There are two such methods. Favor the newest method.
5793 misc_fw_support
= readl(&cfgtable
->misc_fw_support
);
5794 use_doorbell
= misc_fw_support
& MISC_FW_DOORBELL_RESET2
;
5796 use_doorbell
= DOORBELL_CTLR_RESET2
;
5798 use_doorbell
= misc_fw_support
& MISC_FW_DOORBELL_RESET
;
5800 dev_warn(&pdev
->dev
,
5801 "Soft reset not supported. Firmware update is required.\n");
5802 rc
= -ENOTSUPP
; /* try soft reset */
5803 goto unmap_cfgtable
;
5807 rc
= hpsa_controller_hard_reset(pdev
, vaddr
, use_doorbell
);
5809 goto unmap_cfgtable
;
5811 pci_restore_state(pdev
);
5812 pci_write_config_word(pdev
, 4, command_register
);
5814 /* Some devices (notably the HP Smart Array 5i Controller)
5815 need a little pause here */
5816 msleep(HPSA_POST_RESET_PAUSE_MSECS
);
5818 rc
= hpsa_wait_for_board_state(pdev
, vaddr
, BOARD_READY
);
5820 dev_warn(&pdev
->dev
,
5821 "Failed waiting for board to become ready after hard reset\n");
5822 goto unmap_cfgtable
;
5825 rc
= controller_reset_failed(vaddr
);
5827 goto unmap_cfgtable
;
5829 dev_warn(&pdev
->dev
, "Unable to successfully reset "
5830 "controller. Will try soft reset.\n");
5833 dev_info(&pdev
->dev
, "board ready after hard reset.\n");
5845 * We cannot read the structure directly, for portability we must use
5847 * This is for debug only.
5849 static void print_cfg_table(struct device
*dev
, struct CfgTable __iomem
*tb
)
5855 dev_info(dev
, "Controller Configuration information\n");
5856 dev_info(dev
, "------------------------------------\n");
5857 for (i
= 0; i
< 4; i
++)
5858 temp_name
[i
] = readb(&(tb
->Signature
[i
]));
5859 temp_name
[4] = '\0';
5860 dev_info(dev
, " Signature = %s\n", temp_name
);
5861 dev_info(dev
, " Spec Number = %d\n", readl(&(tb
->SpecValence
)));
5862 dev_info(dev
, " Transport methods supported = 0x%x\n",
5863 readl(&(tb
->TransportSupport
)));
5864 dev_info(dev
, " Transport methods active = 0x%x\n",
5865 readl(&(tb
->TransportActive
)));
5866 dev_info(dev
, " Requested transport Method = 0x%x\n",
5867 readl(&(tb
->HostWrite
.TransportRequest
)));
5868 dev_info(dev
, " Coalesce Interrupt Delay = 0x%x\n",
5869 readl(&(tb
->HostWrite
.CoalIntDelay
)));
5870 dev_info(dev
, " Coalesce Interrupt Count = 0x%x\n",
5871 readl(&(tb
->HostWrite
.CoalIntCount
)));
5872 dev_info(dev
, " Max outstanding commands = %d\n",
5873 readl(&(tb
->CmdsOutMax
)));
5874 dev_info(dev
, " Bus Types = 0x%x\n", readl(&(tb
->BusTypes
)));
5875 for (i
= 0; i
< 16; i
++)
5876 temp_name
[i
] = readb(&(tb
->ServerName
[i
]));
5877 temp_name
[16] = '\0';
5878 dev_info(dev
, " Server Name = %s\n", temp_name
);
5879 dev_info(dev
, " Heartbeat Counter = 0x%x\n\n\n",
5880 readl(&(tb
->HeartBeat
)));
5881 #endif /* HPSA_DEBUG */
5884 static int find_PCI_BAR_index(struct pci_dev
*pdev
, unsigned long pci_bar_addr
)
5886 int i
, offset
, mem_type
, bar_type
;
5888 if (pci_bar_addr
== PCI_BASE_ADDRESS_0
) /* looking for BAR zero? */
5891 for (i
= 0; i
< DEVICE_COUNT_RESOURCE
; i
++) {
5892 bar_type
= pci_resource_flags(pdev
, i
) & PCI_BASE_ADDRESS_SPACE
;
5893 if (bar_type
== PCI_BASE_ADDRESS_SPACE_IO
)
5896 mem_type
= pci_resource_flags(pdev
, i
) &
5897 PCI_BASE_ADDRESS_MEM_TYPE_MASK
;
5899 case PCI_BASE_ADDRESS_MEM_TYPE_32
:
5900 case PCI_BASE_ADDRESS_MEM_TYPE_1M
:
5901 offset
+= 4; /* 32 bit */
5903 case PCI_BASE_ADDRESS_MEM_TYPE_64
:
5906 default: /* reserved in PCI 2.2 */
5907 dev_warn(&pdev
->dev
,
5908 "base address is invalid\n");
5913 if (offset
== pci_bar_addr
- PCI_BASE_ADDRESS_0
)
5919 /* If MSI/MSI-X is supported by the kernel we will try to enable it on
5920 * controllers that are capable. If not, we use legacy INTx mode.
5923 static void hpsa_interrupt_mode(struct ctlr_info
*h
)
5925 #ifdef CONFIG_PCI_MSI
5927 struct msix_entry hpsa_msix_entries
[MAX_REPLY_QUEUES
];
5929 for (i
= 0; i
< MAX_REPLY_QUEUES
; i
++) {
5930 hpsa_msix_entries
[i
].vector
= 0;
5931 hpsa_msix_entries
[i
].entry
= i
;
5934 /* Some boards advertise MSI but don't really support it */
5935 if ((h
->board_id
== 0x40700E11) || (h
->board_id
== 0x40800E11) ||
5936 (h
->board_id
== 0x40820E11) || (h
->board_id
== 0x40830E11))
5937 goto default_int_mode
;
5938 if (pci_find_capability(h
->pdev
, PCI_CAP_ID_MSIX
)) {
5939 dev_info(&h
->pdev
->dev
, "MSI-X capable controller\n");
5940 h
->msix_vector
= MAX_REPLY_QUEUES
;
5941 if (h
->msix_vector
> num_online_cpus())
5942 h
->msix_vector
= num_online_cpus();
5943 err
= pci_enable_msix_range(h
->pdev
, hpsa_msix_entries
,
5946 dev_warn(&h
->pdev
->dev
, "MSI-X init failed %d\n", err
);
5948 goto single_msi_mode
;
5949 } else if (err
< h
->msix_vector
) {
5950 dev_warn(&h
->pdev
->dev
, "only %d MSI-X vectors "
5951 "available\n", err
);
5953 h
->msix_vector
= err
;
5954 for (i
= 0; i
< h
->msix_vector
; i
++)
5955 h
->intr
[i
] = hpsa_msix_entries
[i
].vector
;
5959 if (pci_find_capability(h
->pdev
, PCI_CAP_ID_MSI
)) {
5960 dev_info(&h
->pdev
->dev
, "MSI capable controller\n");
5961 if (!pci_enable_msi(h
->pdev
))
5964 dev_warn(&h
->pdev
->dev
, "MSI init failed\n");
5967 #endif /* CONFIG_PCI_MSI */
5968 /* if we get here we're going to use the default interrupt mode */
5969 h
->intr
[h
->intr_mode
] = h
->pdev
->irq
;
5972 static int hpsa_lookup_board_id(struct pci_dev
*pdev
, u32
*board_id
)
5975 u32 subsystem_vendor_id
, subsystem_device_id
;
5977 subsystem_vendor_id
= pdev
->subsystem_vendor
;
5978 subsystem_device_id
= pdev
->subsystem_device
;
5979 *board_id
= ((subsystem_device_id
<< 16) & 0xffff0000) |
5980 subsystem_vendor_id
;
5982 for (i
= 0; i
< ARRAY_SIZE(products
); i
++)
5983 if (*board_id
== products
[i
].board_id
)
5986 if ((subsystem_vendor_id
!= PCI_VENDOR_ID_HP
&&
5987 subsystem_vendor_id
!= PCI_VENDOR_ID_COMPAQ
) ||
5989 dev_warn(&pdev
->dev
, "unrecognized board ID: "
5990 "0x%08x, ignoring.\n", *board_id
);
5993 return ARRAY_SIZE(products
) - 1; /* generic unknown smart array */
5996 static int hpsa_pci_find_memory_BAR(struct pci_dev
*pdev
,
5997 unsigned long *memory_bar
)
6001 for (i
= 0; i
< DEVICE_COUNT_RESOURCE
; i
++)
6002 if (pci_resource_flags(pdev
, i
) & IORESOURCE_MEM
) {
6003 /* addressing mode bits already removed */
6004 *memory_bar
= pci_resource_start(pdev
, i
);
6005 dev_dbg(&pdev
->dev
, "memory BAR = %lx\n",
6009 dev_warn(&pdev
->dev
, "no memory BAR found\n");
6013 static int hpsa_wait_for_board_state(struct pci_dev
*pdev
, void __iomem
*vaddr
,
6019 iterations
= HPSA_BOARD_READY_ITERATIONS
;
6021 iterations
= HPSA_BOARD_NOT_READY_ITERATIONS
;
6023 for (i
= 0; i
< iterations
; i
++) {
6024 scratchpad
= readl(vaddr
+ SA5_SCRATCHPAD_OFFSET
);
6025 if (wait_for_ready
) {
6026 if (scratchpad
== HPSA_FIRMWARE_READY
)
6029 if (scratchpad
!= HPSA_FIRMWARE_READY
)
6032 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS
);
6034 dev_warn(&pdev
->dev
, "board not ready, timed out.\n");
6038 static int hpsa_find_cfg_addrs(struct pci_dev
*pdev
, void __iomem
*vaddr
,
6039 u32
*cfg_base_addr
, u64
*cfg_base_addr_index
,
6042 *cfg_base_addr
= readl(vaddr
+ SA5_CTCFG_OFFSET
);
6043 *cfg_offset
= readl(vaddr
+ SA5_CTMEM_OFFSET
);
6044 *cfg_base_addr
&= (u32
) 0x0000ffff;
6045 *cfg_base_addr_index
= find_PCI_BAR_index(pdev
, *cfg_base_addr
);
6046 if (*cfg_base_addr_index
== -1) {
6047 dev_warn(&pdev
->dev
, "cannot find cfg_base_addr_index\n");
6053 static int hpsa_find_cfgtables(struct ctlr_info
*h
)
6057 u64 cfg_base_addr_index
;
6061 rc
= hpsa_find_cfg_addrs(h
->pdev
, h
->vaddr
, &cfg_base_addr
,
6062 &cfg_base_addr_index
, &cfg_offset
);
6065 h
->cfgtable
= remap_pci_mem(pci_resource_start(h
->pdev
,
6066 cfg_base_addr_index
) + cfg_offset
, sizeof(*h
->cfgtable
));
6068 dev_err(&h
->pdev
->dev
, "Failed mapping cfgtable\n");
6071 rc
= write_driver_ver_to_cfgtable(h
->cfgtable
);
6074 /* Find performant mode table. */
6075 trans_offset
= readl(&h
->cfgtable
->TransMethodOffset
);
6076 h
->transtable
= remap_pci_mem(pci_resource_start(h
->pdev
,
6077 cfg_base_addr_index
)+cfg_offset
+trans_offset
,
6078 sizeof(*h
->transtable
));
6084 static void hpsa_get_max_perf_mode_cmds(struct ctlr_info
*h
)
6086 h
->max_commands
= readl(&(h
->cfgtable
->MaxPerformantModeCommands
));
6088 /* Limit commands in memory limited kdump scenario. */
6089 if (reset_devices
&& h
->max_commands
> 32)
6090 h
->max_commands
= 32;
6092 if (h
->max_commands
< 16) {
6093 dev_warn(&h
->pdev
->dev
, "Controller reports "
6094 "max supported commands of %d, an obvious lie. "
6095 "Using 16. Ensure that firmware is up to date.\n",
6097 h
->max_commands
= 16;
6101 /* If the controller reports that the total max sg entries is greater than 512,
6102 * then we know that chained SG blocks work. (Original smart arrays did not
6103 * support chained SG blocks and would return zero for max sg entries.)
6105 static int hpsa_supports_chained_sg_blocks(struct ctlr_info
*h
)
6107 return h
->maxsgentries
> 512;
6110 /* Interrogate the hardware for some limits:
6111 * max commands, max SG elements without chaining, and with chaining,
6112 * SG chain block size, etc.
6114 static void hpsa_find_board_params(struct ctlr_info
*h
)
6116 hpsa_get_max_perf_mode_cmds(h
);
6117 h
->nr_cmds
= h
->max_commands
;
6118 h
->maxsgentries
= readl(&(h
->cfgtable
->MaxScatterGatherElements
));
6119 h
->fw_support
= readl(&(h
->cfgtable
->misc_fw_support
));
6120 if (hpsa_supports_chained_sg_blocks(h
)) {
6121 /* Limit in-command s/g elements to 32 save dma'able memory. */
6122 h
->max_cmd_sg_entries
= 32;
6123 h
->chainsize
= h
->maxsgentries
- h
->max_cmd_sg_entries
;
6124 h
->maxsgentries
--; /* save one for chain pointer */
6127 * Original smart arrays supported at most 31 s/g entries
6128 * embedded inline in the command (trying to use more
6129 * would lock up the controller)
6131 h
->max_cmd_sg_entries
= 31;
6132 h
->maxsgentries
= 31; /* default to traditional values */
6136 /* Find out what task management functions are supported and cache */
6137 h
->TMFSupportFlags
= readl(&(h
->cfgtable
->TMFSupportFlags
));
6138 if (!(HPSATMF_PHYS_TASK_ABORT
& h
->TMFSupportFlags
))
6139 dev_warn(&h
->pdev
->dev
, "Physical aborts not supported\n");
6140 if (!(HPSATMF_LOG_TASK_ABORT
& h
->TMFSupportFlags
))
6141 dev_warn(&h
->pdev
->dev
, "Logical aborts not supported\n");
6144 static inline bool hpsa_CISS_signature_present(struct ctlr_info
*h
)
6146 if (!check_signature(h
->cfgtable
->Signature
, "CISS", 4)) {
6147 dev_err(&h
->pdev
->dev
, "not a valid CISS config table\n");
6153 static inline void hpsa_set_driver_support_bits(struct ctlr_info
*h
)
6157 driver_support
= readl(&(h
->cfgtable
->driver_support
));
6158 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
6160 driver_support
|= ENABLE_SCSI_PREFETCH
;
6162 driver_support
|= ENABLE_UNIT_ATTN
;
6163 writel(driver_support
, &(h
->cfgtable
->driver_support
));
6166 /* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result
6167 * in a prefetch beyond physical memory.
6169 static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info
*h
)
6173 if (h
->board_id
!= 0x3225103C)
6175 dma_prefetch
= readl(h
->vaddr
+ I2O_DMA1_CFG
);
6176 dma_prefetch
|= 0x8000;
6177 writel(dma_prefetch
, h
->vaddr
+ I2O_DMA1_CFG
);
6180 static void hpsa_wait_for_clear_event_notify_ack(struct ctlr_info
*h
)
6184 unsigned long flags
;
6185 /* wait until the clear_event_notify bit 6 is cleared by controller. */
6186 for (i
= 0; i
< MAX_CONFIG_WAIT
; i
++) {
6187 spin_lock_irqsave(&h
->lock
, flags
);
6188 doorbell_value
= readl(h
->vaddr
+ SA5_DOORBELL
);
6189 spin_unlock_irqrestore(&h
->lock
, flags
);
6190 if (!(doorbell_value
& DOORBELL_CLEAR_EVENTS
))
6192 /* delay and try again */
6197 static void hpsa_wait_for_mode_change_ack(struct ctlr_info
*h
)
6201 unsigned long flags
;
6203 /* under certain very rare conditions, this can take awhile.
6204 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
6205 * as we enter this code.)
6207 for (i
= 0; i
< MAX_CONFIG_WAIT
; i
++) {
6208 spin_lock_irqsave(&h
->lock
, flags
);
6209 doorbell_value
= readl(h
->vaddr
+ SA5_DOORBELL
);
6210 spin_unlock_irqrestore(&h
->lock
, flags
);
6211 if (!(doorbell_value
& CFGTBL_ChangeReq
))
6213 /* delay and try again */
6214 usleep_range(10000, 20000);
6218 static int hpsa_enter_simple_mode(struct ctlr_info
*h
)
6222 trans_support
= readl(&(h
->cfgtable
->TransportSupport
));
6223 if (!(trans_support
& SIMPLE_MODE
))
6226 h
->max_commands
= readl(&(h
->cfgtable
->CmdsOutMax
));
6228 /* Update the field, and then ring the doorbell */
6229 writel(CFGTBL_Trans_Simple
, &(h
->cfgtable
->HostWrite
.TransportRequest
));
6230 writel(0, &h
->cfgtable
->HostWrite
.command_pool_addr_hi
);
6231 writel(CFGTBL_ChangeReq
, h
->vaddr
+ SA5_DOORBELL
);
6232 hpsa_wait_for_mode_change_ack(h
);
6233 print_cfg_table(&h
->pdev
->dev
, h
->cfgtable
);
6234 if (!(readl(&(h
->cfgtable
->TransportActive
)) & CFGTBL_Trans_Simple
))
6236 h
->transMethod
= CFGTBL_Trans_Simple
;
6239 dev_err(&h
->pdev
->dev
, "failed to enter simple mode\n");
6243 static int hpsa_pci_init(struct ctlr_info
*h
)
6245 int prod_index
, err
;
6247 prod_index
= hpsa_lookup_board_id(h
->pdev
, &h
->board_id
);
6250 h
->product_name
= products
[prod_index
].product_name
;
6251 h
->access
= *(products
[prod_index
].access
);
6253 pci_disable_link_state(h
->pdev
, PCIE_LINK_STATE_L0S
|
6254 PCIE_LINK_STATE_L1
| PCIE_LINK_STATE_CLKPM
);
6256 err
= pci_enable_device(h
->pdev
);
6258 dev_warn(&h
->pdev
->dev
, "unable to enable PCI device\n");
6262 err
= pci_request_regions(h
->pdev
, HPSA
);
6264 dev_err(&h
->pdev
->dev
,
6265 "cannot obtain PCI resources, aborting\n");
6269 pci_set_master(h
->pdev
);
6271 hpsa_interrupt_mode(h
);
6272 err
= hpsa_pci_find_memory_BAR(h
->pdev
, &h
->paddr
);
6274 goto err_out_free_res
;
6275 h
->vaddr
= remap_pci_mem(h
->paddr
, 0x250);
6278 goto err_out_free_res
;
6280 err
= hpsa_wait_for_board_state(h
->pdev
, h
->vaddr
, BOARD_READY
);
6282 goto err_out_free_res
;
6283 err
= hpsa_find_cfgtables(h
);
6285 goto err_out_free_res
;
6286 hpsa_find_board_params(h
);
6288 if (!hpsa_CISS_signature_present(h
)) {
6290 goto err_out_free_res
;
6292 hpsa_set_driver_support_bits(h
);
6293 hpsa_p600_dma_prefetch_quirk(h
);
6294 err
= hpsa_enter_simple_mode(h
);
6296 goto err_out_free_res
;
6301 iounmap(h
->transtable
);
6303 iounmap(h
->cfgtable
);
6306 pci_disable_device(h
->pdev
);
6307 pci_release_regions(h
->pdev
);
6311 static void hpsa_hba_inquiry(struct ctlr_info
*h
)
6315 #define HBA_INQUIRY_BYTE_COUNT 64
6316 h
->hba_inquiry_data
= kmalloc(HBA_INQUIRY_BYTE_COUNT
, GFP_KERNEL
);
6317 if (!h
->hba_inquiry_data
)
6319 rc
= hpsa_scsi_do_inquiry(h
, RAID_CTLR_LUNID
, 0,
6320 h
->hba_inquiry_data
, HBA_INQUIRY_BYTE_COUNT
);
6322 kfree(h
->hba_inquiry_data
);
6323 h
->hba_inquiry_data
= NULL
;
6327 static int hpsa_init_reset_devices(struct pci_dev
*pdev
)
6330 void __iomem
*vaddr
;
6335 /* kdump kernel is loading, we don't know in which state is
6336 * the pci interface. The dev->enable_cnt is equal zero
6337 * so we call enable+disable, wait a while and switch it on.
6339 rc
= pci_enable_device(pdev
);
6341 dev_warn(&pdev
->dev
, "Failed to enable PCI device\n");
6344 pci_disable_device(pdev
);
6345 msleep(260); /* a randomly chosen number */
6346 rc
= pci_enable_device(pdev
);
6348 dev_warn(&pdev
->dev
, "failed to enable device.\n");
6352 pci_set_master(pdev
);
6354 vaddr
= pci_ioremap_bar(pdev
, 0);
6355 if (vaddr
== NULL
) {
6359 writel(SA5_INTR_OFF
, vaddr
+ SA5_REPLY_INTR_MASK_OFFSET
);
6362 /* Reset the controller with a PCI power-cycle or via doorbell */
6363 rc
= hpsa_kdump_hard_reset_controller(pdev
);
6365 /* -ENOTSUPP here means we cannot reset the controller
6366 * but it's already (and still) up and running in
6367 * "performant mode". Or, it might be 640x, which can't reset
6368 * due to concerns about shared bbwc between 6402/6404 pair.
6373 /* Now try to get the controller to respond to a no-op */
6374 dev_info(&pdev
->dev
, "Waiting for controller to respond to no-op\n");
6375 for (i
= 0; i
< HPSA_POST_RESET_NOOP_RETRIES
; i
++) {
6376 if (hpsa_noop(pdev
) == 0)
6379 dev_warn(&pdev
->dev
, "no-op failed%s\n",
6380 (i
< 11 ? "; re-trying" : ""));
6385 pci_disable_device(pdev
);
6389 static int hpsa_allocate_cmd_pool(struct ctlr_info
*h
)
6391 h
->cmd_pool_bits
= kzalloc(
6392 DIV_ROUND_UP(h
->nr_cmds
, BITS_PER_LONG
) *
6393 sizeof(unsigned long), GFP_KERNEL
);
6394 h
->cmd_pool
= pci_alloc_consistent(h
->pdev
,
6395 h
->nr_cmds
* sizeof(*h
->cmd_pool
),
6396 &(h
->cmd_pool_dhandle
));
6397 h
->errinfo_pool
= pci_alloc_consistent(h
->pdev
,
6398 h
->nr_cmds
* sizeof(*h
->errinfo_pool
),
6399 &(h
->errinfo_pool_dhandle
));
6400 if ((h
->cmd_pool_bits
== NULL
)
6401 || (h
->cmd_pool
== NULL
)
6402 || (h
->errinfo_pool
== NULL
)) {
6403 dev_err(&h
->pdev
->dev
, "out of memory in %s", __func__
);
6408 hpsa_free_cmd_pool(h
);
6412 static void hpsa_free_cmd_pool(struct ctlr_info
*h
)
6414 kfree(h
->cmd_pool_bits
);
6416 pci_free_consistent(h
->pdev
,
6417 h
->nr_cmds
* sizeof(struct CommandList
),
6418 h
->cmd_pool
, h
->cmd_pool_dhandle
);
6419 if (h
->ioaccel2_cmd_pool
)
6420 pci_free_consistent(h
->pdev
,
6421 h
->nr_cmds
* sizeof(*h
->ioaccel2_cmd_pool
),
6422 h
->ioaccel2_cmd_pool
, h
->ioaccel2_cmd_pool_dhandle
);
6423 if (h
->errinfo_pool
)
6424 pci_free_consistent(h
->pdev
,
6425 h
->nr_cmds
* sizeof(struct ErrorInfo
),
6427 h
->errinfo_pool_dhandle
);
6428 if (h
->ioaccel_cmd_pool
)
6429 pci_free_consistent(h
->pdev
,
6430 h
->nr_cmds
* sizeof(struct io_accel1_cmd
),
6431 h
->ioaccel_cmd_pool
, h
->ioaccel_cmd_pool_dhandle
);
6434 static void hpsa_irq_affinity_hints(struct ctlr_info
*h
)
6438 cpu
= cpumask_first(cpu_online_mask
);
6439 for (i
= 0; i
< h
->msix_vector
; i
++) {
6440 irq_set_affinity_hint(h
->intr
[i
], get_cpu_mask(cpu
));
6441 cpu
= cpumask_next(cpu
, cpu_online_mask
);
6445 /* clear affinity hints and free MSI-X, MSI, or legacy INTx vectors */
6446 static void hpsa_free_irqs(struct ctlr_info
*h
)
6450 if (!h
->msix_vector
|| h
->intr_mode
!= PERF_MODE_INT
) {
6451 /* Single reply queue, only one irq to free */
6453 irq_set_affinity_hint(h
->intr
[i
], NULL
);
6454 free_irq(h
->intr
[i
], &h
->q
[i
]);
6458 for (i
= 0; i
< h
->msix_vector
; i
++) {
6459 irq_set_affinity_hint(h
->intr
[i
], NULL
);
6460 free_irq(h
->intr
[i
], &h
->q
[i
]);
6462 for (; i
< MAX_REPLY_QUEUES
; i
++)
6466 /* returns 0 on success; cleans up and returns -Enn on error */
6467 static int hpsa_request_irqs(struct ctlr_info
*h
,
6468 irqreturn_t (*msixhandler
)(int, void *),
6469 irqreturn_t (*intxhandler
)(int, void *))
6474 * initialize h->q[x] = x so that interrupt handlers know which
6477 for (i
= 0; i
< MAX_REPLY_QUEUES
; i
++)
6480 if (h
->intr_mode
== PERF_MODE_INT
&& h
->msix_vector
> 0) {
6481 /* If performant mode and MSI-X, use multiple reply queues */
6482 for (i
= 0; i
< h
->msix_vector
; i
++) {
6483 rc
= request_irq(h
->intr
[i
], msixhandler
,
6489 dev_err(&h
->pdev
->dev
,
6490 "failed to get irq %d for %s\n",
6491 h
->intr
[i
], h
->devname
);
6492 for (j
= 0; j
< i
; j
++) {
6493 free_irq(h
->intr
[j
], &h
->q
[j
]);
6496 for (; j
< MAX_REPLY_QUEUES
; j
++)
6501 hpsa_irq_affinity_hints(h
);
6503 /* Use single reply pool */
6504 if (h
->msix_vector
> 0 || h
->msi_vector
) {
6505 rc
= request_irq(h
->intr
[h
->intr_mode
],
6506 msixhandler
, 0, h
->devname
,
6507 &h
->q
[h
->intr_mode
]);
6509 rc
= request_irq(h
->intr
[h
->intr_mode
],
6510 intxhandler
, IRQF_SHARED
, h
->devname
,
6511 &h
->q
[h
->intr_mode
]);
6515 dev_err(&h
->pdev
->dev
, "unable to get irq %d for %s\n",
6516 h
->intr
[h
->intr_mode
], h
->devname
);
6522 static int hpsa_kdump_soft_reset(struct ctlr_info
*h
)
6524 if (hpsa_send_host_reset(h
, RAID_CTLR_LUNID
,
6525 HPSA_RESET_TYPE_CONTROLLER
)) {
6526 dev_warn(&h
->pdev
->dev
, "Resetting array controller failed.\n");
6530 dev_info(&h
->pdev
->dev
, "Waiting for board to soft reset.\n");
6531 if (hpsa_wait_for_board_state(h
->pdev
, h
->vaddr
, BOARD_NOT_READY
)) {
6532 dev_warn(&h
->pdev
->dev
, "Soft reset had no effect.\n");
6536 dev_info(&h
->pdev
->dev
, "Board reset, awaiting READY status.\n");
6537 if (hpsa_wait_for_board_state(h
->pdev
, h
->vaddr
, BOARD_READY
)) {
6538 dev_warn(&h
->pdev
->dev
, "Board failed to become ready "
6539 "after soft reset.\n");
6546 static void hpsa_free_irqs_and_disable_msix(struct ctlr_info
*h
)
6549 #ifdef CONFIG_PCI_MSI
6550 if (h
->msix_vector
) {
6551 if (h
->pdev
->msix_enabled
)
6552 pci_disable_msix(h
->pdev
);
6553 } else if (h
->msi_vector
) {
6554 if (h
->pdev
->msi_enabled
)
6555 pci_disable_msi(h
->pdev
);
6557 #endif /* CONFIG_PCI_MSI */
6560 static void hpsa_free_reply_queues(struct ctlr_info
*h
)
6564 for (i
= 0; i
< h
->nreply_queues
; i
++) {
6565 if (!h
->reply_queue
[i
].head
)
6567 pci_free_consistent(h
->pdev
, h
->reply_queue_size
,
6568 h
->reply_queue
[i
].head
, h
->reply_queue
[i
].busaddr
);
6569 h
->reply_queue
[i
].head
= NULL
;
6570 h
->reply_queue
[i
].busaddr
= 0;
6574 static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info
*h
)
6576 hpsa_free_irqs_and_disable_msix(h
);
6577 hpsa_free_sg_chain_blocks(h
);
6578 hpsa_free_cmd_pool(h
);
6579 kfree(h
->ioaccel1_blockFetchTable
);
6580 kfree(h
->blockFetchTable
);
6581 hpsa_free_reply_queues(h
);
6585 iounmap(h
->transtable
);
6587 iounmap(h
->cfgtable
);
6588 pci_disable_device(h
->pdev
);
6589 pci_release_regions(h
->pdev
);
6593 /* Called when controller lockup detected. */
6594 static void fail_all_outstanding_cmds(struct ctlr_info
*h
)
6597 struct CommandList
*c
;
6599 flush_workqueue(h
->resubmit_wq
); /* ensure all cmds are fully built */
6600 for (i
= 0; i
< h
->nr_cmds
; i
++) {
6601 c
= h
->cmd_pool
+ i
;
6602 refcount
= atomic_inc_return(&c
->refcount
);
6604 c
->err_info
->CommandStatus
= CMD_HARDWARE_ERR
;
6611 static void set_lockup_detected_for_all_cpus(struct ctlr_info
*h
, u32 value
)
6615 cpu
= cpumask_first(cpu_online_mask
);
6616 for (i
= 0; i
< num_online_cpus(); i
++) {
6617 u32
*lockup_detected
;
6618 lockup_detected
= per_cpu_ptr(h
->lockup_detected
, cpu
);
6619 *lockup_detected
= value
;
6620 cpu
= cpumask_next(cpu
, cpu_online_mask
);
6622 wmb(); /* be sure the per-cpu variables are out to memory */
6625 static void controller_lockup_detected(struct ctlr_info
*h
)
6627 unsigned long flags
;
6628 u32 lockup_detected
;
6630 h
->access
.set_intr_mask(h
, HPSA_INTR_OFF
);
6631 spin_lock_irqsave(&h
->lock
, flags
);
6632 lockup_detected
= readl(h
->vaddr
+ SA5_SCRATCHPAD_OFFSET
);
6633 if (!lockup_detected
) {
6634 /* no heartbeat, but controller gave us a zero. */
6635 dev_warn(&h
->pdev
->dev
,
6636 "lockup detected but scratchpad register is zero\n");
6637 lockup_detected
= 0xffffffff;
6639 set_lockup_detected_for_all_cpus(h
, lockup_detected
);
6640 spin_unlock_irqrestore(&h
->lock
, flags
);
6641 dev_warn(&h
->pdev
->dev
, "Controller lockup detected: 0x%08x\n",
6643 pci_disable_device(h
->pdev
);
6644 fail_all_outstanding_cmds(h
);
6647 static void detect_controller_lockup(struct ctlr_info
*h
)
6651 unsigned long flags
;
6653 now
= get_jiffies_64();
6654 /* If we've received an interrupt recently, we're ok. */
6655 if (time_after64(h
->last_intr_timestamp
+
6656 (h
->heartbeat_sample_interval
), now
))
6660 * If we've already checked the heartbeat recently, we're ok.
6661 * This could happen if someone sends us a signal. We
6662 * otherwise don't care about signals in this thread.
6664 if (time_after64(h
->last_heartbeat_timestamp
+
6665 (h
->heartbeat_sample_interval
), now
))
6668 /* If heartbeat has not changed since we last looked, we're not ok. */
6669 spin_lock_irqsave(&h
->lock
, flags
);
6670 heartbeat
= readl(&h
->cfgtable
->HeartBeat
);
6671 spin_unlock_irqrestore(&h
->lock
, flags
);
6672 if (h
->last_heartbeat
== heartbeat
) {
6673 controller_lockup_detected(h
);
6678 h
->last_heartbeat
= heartbeat
;
6679 h
->last_heartbeat_timestamp
= now
;
6682 static void hpsa_ack_ctlr_events(struct ctlr_info
*h
)
6687 if (!(h
->fw_support
& MISC_FW_EVENT_NOTIFY
))
6690 /* Ask the controller to clear the events we're handling. */
6691 if ((h
->transMethod
& (CFGTBL_Trans_io_accel1
6692 | CFGTBL_Trans_io_accel2
)) &&
6693 (h
->events
& HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE
||
6694 h
->events
& HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE
)) {
6696 if (h
->events
& HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE
)
6697 event_type
= "state change";
6698 if (h
->events
& HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE
)
6699 event_type
= "configuration change";
6700 /* Stop sending new RAID offload reqs via the IO accelerator */
6701 scsi_block_requests(h
->scsi_host
);
6702 for (i
= 0; i
< h
->ndevices
; i
++)
6703 h
->dev
[i
]->offload_enabled
= 0;
6704 hpsa_drain_accel_commands(h
);
6705 /* Set 'accelerator path config change' bit */
6706 dev_warn(&h
->pdev
->dev
,
6707 "Acknowledging event: 0x%08x (HP SSD Smart Path %s)\n",
6708 h
->events
, event_type
);
6709 writel(h
->events
, &(h
->cfgtable
->clear_event_notify
));
6710 /* Set the "clear event notify field update" bit 6 */
6711 writel(DOORBELL_CLEAR_EVENTS
, h
->vaddr
+ SA5_DOORBELL
);
6712 /* Wait until ctlr clears 'clear event notify field', bit 6 */
6713 hpsa_wait_for_clear_event_notify_ack(h
);
6714 scsi_unblock_requests(h
->scsi_host
);
6716 /* Acknowledge controller notification events. */
6717 writel(h
->events
, &(h
->cfgtable
->clear_event_notify
));
6718 writel(DOORBELL_CLEAR_EVENTS
, h
->vaddr
+ SA5_DOORBELL
);
6719 hpsa_wait_for_clear_event_notify_ack(h
);
6721 writel(CFGTBL_ChangeReq
, h
->vaddr
+ SA5_DOORBELL
);
6722 hpsa_wait_for_mode_change_ack(h
);
6728 /* Check a register on the controller to see if there are configuration
6729 * changes (added/changed/removed logical drives, etc.) which mean that
6730 * we should rescan the controller for devices.
6731 * Also check flag for driver-initiated rescan.
6733 static int hpsa_ctlr_needs_rescan(struct ctlr_info
*h
)
6735 if (!(h
->fw_support
& MISC_FW_EVENT_NOTIFY
))
6738 h
->events
= readl(&(h
->cfgtable
->event_notify
));
6739 return h
->events
& RESCAN_REQUIRED_EVENT_BITS
;
6743 * Check if any of the offline devices have become ready
6745 static int hpsa_offline_devices_ready(struct ctlr_info
*h
)
6747 unsigned long flags
;
6748 struct offline_device_entry
*d
;
6749 struct list_head
*this, *tmp
;
6751 spin_lock_irqsave(&h
->offline_device_lock
, flags
);
6752 list_for_each_safe(this, tmp
, &h
->offline_device_list
) {
6753 d
= list_entry(this, struct offline_device_entry
,
6755 spin_unlock_irqrestore(&h
->offline_device_lock
, flags
);
6756 if (!hpsa_volume_offline(h
, d
->scsi3addr
)) {
6757 spin_lock_irqsave(&h
->offline_device_lock
, flags
);
6758 list_del(&d
->offline_list
);
6759 spin_unlock_irqrestore(&h
->offline_device_lock
, flags
);
6762 spin_lock_irqsave(&h
->offline_device_lock
, flags
);
6764 spin_unlock_irqrestore(&h
->offline_device_lock
, flags
);
6769 static void hpsa_monitor_ctlr_worker(struct work_struct
*work
)
6771 unsigned long flags
;
6772 struct ctlr_info
*h
= container_of(to_delayed_work(work
),
6773 struct ctlr_info
, monitor_ctlr_work
);
6774 detect_controller_lockup(h
);
6775 if (lockup_detected(h
))
6778 if (hpsa_ctlr_needs_rescan(h
) || hpsa_offline_devices_ready(h
)) {
6779 scsi_host_get(h
->scsi_host
);
6780 hpsa_ack_ctlr_events(h
);
6781 hpsa_scan_start(h
->scsi_host
);
6782 scsi_host_put(h
->scsi_host
);
6785 spin_lock_irqsave(&h
->lock
, flags
);
6786 if (h
->remove_in_progress
) {
6787 spin_unlock_irqrestore(&h
->lock
, flags
);
6790 schedule_delayed_work(&h
->monitor_ctlr_work
,
6791 h
->heartbeat_sample_interval
);
6792 spin_unlock_irqrestore(&h
->lock
, flags
);
6795 static int hpsa_init_one(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
6798 struct ctlr_info
*h
;
6799 int try_soft_reset
= 0;
6800 unsigned long flags
;
6802 if (number_of_controllers
== 0)
6803 printk(KERN_INFO DRIVER_NAME
"\n");
6805 rc
= hpsa_init_reset_devices(pdev
);
6807 if (rc
!= -ENOTSUPP
)
6809 /* If the reset fails in a particular way (it has no way to do
6810 * a proper hard reset, so returns -ENOTSUPP) we can try to do
6811 * a soft reset once we get the controller configured up to the
6812 * point that it can accept a command.
6818 reinit_after_soft_reset
:
6820 /* Command structures must be aligned on a 32-byte boundary because
6821 * the 5 lower bits of the address are used by the hardware. and by
6822 * the driver. See comments in hpsa.h for more info.
6824 BUILD_BUG_ON(sizeof(struct CommandList
) % COMMANDLIST_ALIGNMENT
);
6825 h
= kzalloc(sizeof(*h
), GFP_KERNEL
);
6830 h
->intr_mode
= hpsa_simple_mode
? SIMPLE_MODE_INT
: PERF_MODE_INT
;
6831 INIT_LIST_HEAD(&h
->offline_device_list
);
6832 spin_lock_init(&h
->lock
);
6833 spin_lock_init(&h
->offline_device_lock
);
6834 spin_lock_init(&h
->scan_lock
);
6835 atomic_set(&h
->passthru_cmds_avail
, HPSA_MAX_CONCURRENT_PASSTHRUS
);
6837 h
->resubmit_wq
= alloc_workqueue("hpsa", WQ_MEM_RECLAIM
, 0);
6838 if (!h
->resubmit_wq
) {
6839 dev_err(&h
->pdev
->dev
, "Failed to allocate work queue\n");
6843 /* Allocate and clear per-cpu variable lockup_detected */
6844 h
->lockup_detected
= alloc_percpu(u32
);
6845 if (!h
->lockup_detected
) {
6849 set_lockup_detected_for_all_cpus(h
, 0);
6851 rc
= hpsa_pci_init(h
);
6855 sprintf(h
->devname
, HPSA
"%d", number_of_controllers
);
6856 h
->ctlr
= number_of_controllers
;
6857 number_of_controllers
++;
6859 /* configure PCI DMA stuff */
6860 rc
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(64));
6864 rc
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
6868 dev_err(&pdev
->dev
, "no suitable DMA available\n");
6873 /* make sure the board interrupts are off */
6874 h
->access
.set_intr_mask(h
, HPSA_INTR_OFF
);
6876 if (hpsa_request_irqs(h
, do_hpsa_intr_msi
, do_hpsa_intr_intx
))
6878 dev_info(&pdev
->dev
, "%s: <0x%x> at IRQ %d%s using DAC\n",
6879 h
->devname
, pdev
->device
,
6880 h
->intr
[h
->intr_mode
], dac
? "" : " not");
6881 rc
= hpsa_allocate_cmd_pool(h
);
6883 goto clean2_and_free_irqs
;
6884 if (hpsa_allocate_sg_chain_blocks(h
))
6886 init_waitqueue_head(&h
->scan_wait_queue
);
6887 h
->scan_finished
= 1; /* no scan currently in progress */
6889 pci_set_drvdata(pdev
, h
);
6891 h
->hba_mode_enabled
= 0;
6892 h
->scsi_host
= NULL
;
6893 spin_lock_init(&h
->devlock
);
6894 hpsa_put_ctlr_into_performant_mode(h
);
6896 /* At this point, the controller is ready to take commands.
6897 * Now, if reset_devices and the hard reset didn't work, try
6898 * the soft reset and see if that works.
6900 if (try_soft_reset
) {
6902 /* This is kind of gross. We may or may not get a completion
6903 * from the soft reset command, and if we do, then the value
6904 * from the fifo may or may not be valid. So, we wait 10 secs
6905 * after the reset throwing away any completions we get during
6906 * that time. Unregister the interrupt handler and register
6907 * fake ones to scoop up any residual completions.
6909 spin_lock_irqsave(&h
->lock
, flags
);
6910 h
->access
.set_intr_mask(h
, HPSA_INTR_OFF
);
6911 spin_unlock_irqrestore(&h
->lock
, flags
);
6913 rc
= hpsa_request_irqs(h
, hpsa_msix_discard_completions
,
6914 hpsa_intx_discard_completions
);
6916 dev_warn(&h
->pdev
->dev
,
6917 "Failed to request_irq after soft reset.\n");
6921 rc
= hpsa_kdump_soft_reset(h
);
6923 /* Neither hard nor soft reset worked, we're hosed. */
6926 dev_info(&h
->pdev
->dev
, "Board READY.\n");
6927 dev_info(&h
->pdev
->dev
,
6928 "Waiting for stale completions to drain.\n");
6929 h
->access
.set_intr_mask(h
, HPSA_INTR_ON
);
6931 h
->access
.set_intr_mask(h
, HPSA_INTR_OFF
);
6933 rc
= controller_reset_failed(h
->cfgtable
);
6935 dev_info(&h
->pdev
->dev
,
6936 "Soft reset appears to have failed.\n");
6938 /* since the controller's reset, we have to go back and re-init
6939 * everything. Easiest to just forget what we've done and do it
6942 hpsa_undo_allocations_after_kdump_soft_reset(h
);
6945 /* don't go to clean4, we already unallocated */
6948 goto reinit_after_soft_reset
;
6951 /* Enable Accelerated IO path at driver layer */
6952 h
->acciopath_status
= 1;
6955 /* Turn the interrupts on so we can service requests */
6956 h
->access
.set_intr_mask(h
, HPSA_INTR_ON
);
6958 hpsa_hba_inquiry(h
);
6959 hpsa_register_scsi(h
); /* hook ourselves into SCSI subsystem */
6961 /* Monitor the controller for firmware lockups */
6962 h
->heartbeat_sample_interval
= HEARTBEAT_SAMPLE_INTERVAL
;
6963 INIT_DELAYED_WORK(&h
->monitor_ctlr_work
, hpsa_monitor_ctlr_worker
);
6964 schedule_delayed_work(&h
->monitor_ctlr_work
,
6965 h
->heartbeat_sample_interval
);
6969 hpsa_free_sg_chain_blocks(h
);
6970 hpsa_free_cmd_pool(h
);
6971 clean2_and_free_irqs
:
6976 destroy_workqueue(h
->resubmit_wq
);
6977 if (h
->lockup_detected
)
6978 free_percpu(h
->lockup_detected
);
6983 static void hpsa_flush_cache(struct ctlr_info
*h
)
6986 struct CommandList
*c
;
6988 /* Don't bother trying to flush the cache if locked up */
6989 if (unlikely(lockup_detected(h
)))
6991 flush_buf
= kzalloc(4, GFP_KERNEL
);
6997 dev_warn(&h
->pdev
->dev
, "cmd_alloc returned NULL!\n");
7000 if (fill_cmd(c
, HPSA_CACHE_FLUSH
, h
, flush_buf
, 4, 0,
7001 RAID_CTLR_LUNID
, TYPE_CMD
)) {
7004 hpsa_scsi_do_simple_cmd_with_retry(h
, c
, PCI_DMA_TODEVICE
);
7005 if (c
->err_info
->CommandStatus
!= 0)
7007 dev_warn(&h
->pdev
->dev
,
7008 "error flushing cache on controller\n");
7014 static void hpsa_shutdown(struct pci_dev
*pdev
)
7016 struct ctlr_info
*h
;
7018 h
= pci_get_drvdata(pdev
);
7019 /* Turn board interrupts off and send the flush cache command
7020 * sendcmd will turn off interrupt, and send the flush...
7021 * To write all data in the battery backed cache to disks
7023 hpsa_flush_cache(h
);
7024 h
->access
.set_intr_mask(h
, HPSA_INTR_OFF
);
7025 hpsa_free_irqs_and_disable_msix(h
);
7028 static void hpsa_free_device_info(struct ctlr_info
*h
)
7032 for (i
= 0; i
< h
->ndevices
; i
++)
7036 static void hpsa_remove_one(struct pci_dev
*pdev
)
7038 struct ctlr_info
*h
;
7039 unsigned long flags
;
7041 if (pci_get_drvdata(pdev
) == NULL
) {
7042 dev_err(&pdev
->dev
, "unable to remove device\n");
7045 h
= pci_get_drvdata(pdev
);
7047 /* Get rid of any controller monitoring work items */
7048 spin_lock_irqsave(&h
->lock
, flags
);
7049 h
->remove_in_progress
= 1;
7050 cancel_delayed_work(&h
->monitor_ctlr_work
);
7051 spin_unlock_irqrestore(&h
->lock
, flags
);
7052 hpsa_unregister_scsi(h
); /* unhook from SCSI subsystem */
7053 hpsa_shutdown(pdev
);
7054 destroy_workqueue(h
->resubmit_wq
);
7056 iounmap(h
->transtable
);
7057 iounmap(h
->cfgtable
);
7058 hpsa_free_device_info(h
);
7059 hpsa_free_sg_chain_blocks(h
);
7060 pci_free_consistent(h
->pdev
,
7061 h
->nr_cmds
* sizeof(struct CommandList
),
7062 h
->cmd_pool
, h
->cmd_pool_dhandle
);
7063 pci_free_consistent(h
->pdev
,
7064 h
->nr_cmds
* sizeof(struct ErrorInfo
),
7065 h
->errinfo_pool
, h
->errinfo_pool_dhandle
);
7066 hpsa_free_reply_queues(h
);
7067 kfree(h
->cmd_pool_bits
);
7068 kfree(h
->blockFetchTable
);
7069 kfree(h
->ioaccel1_blockFetchTable
);
7070 kfree(h
->ioaccel2_blockFetchTable
);
7071 kfree(h
->hba_inquiry_data
);
7072 pci_disable_device(pdev
);
7073 pci_release_regions(pdev
);
7074 free_percpu(h
->lockup_detected
);
7078 static int hpsa_suspend(__attribute__((unused
)) struct pci_dev
*pdev
,
7079 __attribute__((unused
)) pm_message_t state
)
7084 static int hpsa_resume(__attribute__((unused
)) struct pci_dev
*pdev
)
7089 static struct pci_driver hpsa_pci_driver
= {
7091 .probe
= hpsa_init_one
,
7092 .remove
= hpsa_remove_one
,
7093 .id_table
= hpsa_pci_device_id
, /* id_table */
7094 .shutdown
= hpsa_shutdown
,
7095 .suspend
= hpsa_suspend
,
7096 .resume
= hpsa_resume
,
7099 /* Fill in bucket_map[], given nsgs (the max number of
7100 * scatter gather elements supported) and bucket[],
7101 * which is an array of 8 integers. The bucket[] array
7102 * contains 8 different DMA transfer sizes (in 16
7103 * byte increments) which the controller uses to fetch
7104 * commands. This function fills in bucket_map[], which
7105 * maps a given number of scatter gather elements to one of
7106 * the 8 DMA transfer sizes. The point of it is to allow the
7107 * controller to only do as much DMA as needed to fetch the
7108 * command, with the DMA transfer size encoded in the lower
7109 * bits of the command address.
7111 static void calc_bucket_map(int bucket
[], int num_buckets
,
7112 int nsgs
, int min_blocks
, u32
*bucket_map
)
7116 /* Note, bucket_map must have nsgs+1 entries. */
7117 for (i
= 0; i
<= nsgs
; i
++) {
7118 /* Compute size of a command with i SG entries */
7119 size
= i
+ min_blocks
;
7120 b
= num_buckets
; /* Assume the biggest bucket */
7121 /* Find the bucket that is just big enough */
7122 for (j
= 0; j
< num_buckets
; j
++) {
7123 if (bucket
[j
] >= size
) {
7128 /* for a command with i SG entries, use bucket b. */
7133 static void hpsa_enter_performant_mode(struct ctlr_info
*h
, u32 trans_support
)
7136 unsigned long register_value
;
7137 unsigned long transMethod
= CFGTBL_Trans_Performant
|
7138 (trans_support
& CFGTBL_Trans_use_short_tags
) |
7139 CFGTBL_Trans_enable_directed_msix
|
7140 (trans_support
& (CFGTBL_Trans_io_accel1
|
7141 CFGTBL_Trans_io_accel2
));
7142 struct access_method access
= SA5_performant_access
;
7144 /* This is a bit complicated. There are 8 registers on
7145 * the controller which we write to to tell it 8 different
7146 * sizes of commands which there may be. It's a way of
7147 * reducing the DMA done to fetch each command. Encoded into
7148 * each command's tag are 3 bits which communicate to the controller
7149 * which of the eight sizes that command fits within. The size of
7150 * each command depends on how many scatter gather entries there are.
7151 * Each SG entry requires 16 bytes. The eight registers are programmed
7152 * with the number of 16-byte blocks a command of that size requires.
7153 * The smallest command possible requires 5 such 16 byte blocks.
7154 * the largest command possible requires SG_ENTRIES_IN_CMD + 4 16-byte
7155 * blocks. Note, this only extends to the SG entries contained
7156 * within the command block, and does not extend to chained blocks
7157 * of SG elements. bft[] contains the eight values we write to
7158 * the registers. They are not evenly distributed, but have more
7159 * sizes for small commands, and fewer sizes for larger commands.
7161 int bft
[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD
+ 4};
7162 #define MIN_IOACCEL2_BFT_ENTRY 5
7163 #define HPSA_IOACCEL2_HEADER_SZ 4
7164 int bft2
[16] = {MIN_IOACCEL2_BFT_ENTRY
, 6, 7, 8, 9, 10, 11, 12,
7165 13, 14, 15, 16, 17, 18, 19,
7166 HPSA_IOACCEL2_HEADER_SZ
+ IOACCEL2_MAXSGENTRIES
};
7167 BUILD_BUG_ON(ARRAY_SIZE(bft2
) != 16);
7168 BUILD_BUG_ON(ARRAY_SIZE(bft
) != 8);
7169 BUILD_BUG_ON(offsetof(struct io_accel2_cmd
, sg
) >
7170 16 * MIN_IOACCEL2_BFT_ENTRY
);
7171 BUILD_BUG_ON(sizeof(struct ioaccel2_sg_element
) != 16);
7172 BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD
+ 4);
7173 /* 5 = 1 s/g entry or 4k
7174 * 6 = 2 s/g entry or 8k
7175 * 8 = 4 s/g entry or 16k
7176 * 10 = 6 s/g entry or 24k
7179 /* If the controller supports either ioaccel method then
7180 * we can also use the RAID stack submit path that does not
7181 * perform the superfluous readl() after each command submission.
7183 if (trans_support
& (CFGTBL_Trans_io_accel1
| CFGTBL_Trans_io_accel2
))
7184 access
= SA5_performant_access_no_read
;
7186 /* Controller spec: zero out this buffer. */
7187 for (i
= 0; i
< h
->nreply_queues
; i
++)
7188 memset(h
->reply_queue
[i
].head
, 0, h
->reply_queue_size
);
7190 bft
[7] = SG_ENTRIES_IN_CMD
+ 4;
7191 calc_bucket_map(bft
, ARRAY_SIZE(bft
),
7192 SG_ENTRIES_IN_CMD
, 4, h
->blockFetchTable
);
7193 for (i
= 0; i
< 8; i
++)
7194 writel(bft
[i
], &h
->transtable
->BlockFetch
[i
]);
7196 /* size of controller ring buffer */
7197 writel(h
->max_commands
, &h
->transtable
->RepQSize
);
7198 writel(h
->nreply_queues
, &h
->transtable
->RepQCount
);
7199 writel(0, &h
->transtable
->RepQCtrAddrLow32
);
7200 writel(0, &h
->transtable
->RepQCtrAddrHigh32
);
7202 for (i
= 0; i
< h
->nreply_queues
; i
++) {
7203 writel(0, &h
->transtable
->RepQAddr
[i
].upper
);
7204 writel(h
->reply_queue
[i
].busaddr
,
7205 &h
->transtable
->RepQAddr
[i
].lower
);
7208 writel(0, &h
->cfgtable
->HostWrite
.command_pool_addr_hi
);
7209 writel(transMethod
, &(h
->cfgtable
->HostWrite
.TransportRequest
));
7211 * enable outbound interrupt coalescing in accelerator mode;
7213 if (trans_support
& CFGTBL_Trans_io_accel1
) {
7214 access
= SA5_ioaccel_mode1_access
;
7215 writel(10, &h
->cfgtable
->HostWrite
.CoalIntDelay
);
7216 writel(4, &h
->cfgtable
->HostWrite
.CoalIntCount
);
7218 if (trans_support
& CFGTBL_Trans_io_accel2
) {
7219 access
= SA5_ioaccel_mode2_access
;
7220 writel(10, &h
->cfgtable
->HostWrite
.CoalIntDelay
);
7221 writel(4, &h
->cfgtable
->HostWrite
.CoalIntCount
);
7224 writel(CFGTBL_ChangeReq
, h
->vaddr
+ SA5_DOORBELL
);
7225 hpsa_wait_for_mode_change_ack(h
);
7226 register_value
= readl(&(h
->cfgtable
->TransportActive
));
7227 if (!(register_value
& CFGTBL_Trans_Performant
)) {
7228 dev_err(&h
->pdev
->dev
,
7229 "performant mode problem - transport not active\n");
7232 /* Change the access methods to the performant access methods */
7234 h
->transMethod
= transMethod
;
7236 if (!((trans_support
& CFGTBL_Trans_io_accel1
) ||
7237 (trans_support
& CFGTBL_Trans_io_accel2
)))
7240 if (trans_support
& CFGTBL_Trans_io_accel1
) {
7241 /* Set up I/O accelerator mode */
7242 for (i
= 0; i
< h
->nreply_queues
; i
++) {
7243 writel(i
, h
->vaddr
+ IOACCEL_MODE1_REPLY_QUEUE_INDEX
);
7244 h
->reply_queue
[i
].current_entry
=
7245 readl(h
->vaddr
+ IOACCEL_MODE1_PRODUCER_INDEX
);
7247 bft
[7] = h
->ioaccel_maxsg
+ 8;
7248 calc_bucket_map(bft
, ARRAY_SIZE(bft
), h
->ioaccel_maxsg
, 8,
7249 h
->ioaccel1_blockFetchTable
);
7251 /* initialize all reply queue entries to unused */
7252 for (i
= 0; i
< h
->nreply_queues
; i
++)
7253 memset(h
->reply_queue
[i
].head
,
7254 (u8
) IOACCEL_MODE1_REPLY_UNUSED
,
7255 h
->reply_queue_size
);
7257 /* set all the constant fields in the accelerator command
7258 * frames once at init time to save CPU cycles later.
7260 for (i
= 0; i
< h
->nr_cmds
; i
++) {
7261 struct io_accel1_cmd
*cp
= &h
->ioaccel_cmd_pool
[i
];
7263 cp
->function
= IOACCEL1_FUNCTION_SCSIIO
;
7264 cp
->err_info
= (u32
) (h
->errinfo_pool_dhandle
+
7265 (i
* sizeof(struct ErrorInfo
)));
7266 cp
->err_info_len
= sizeof(struct ErrorInfo
);
7267 cp
->sgl_offset
= IOACCEL1_SGLOFFSET
;
7268 cp
->host_context_flags
=
7269 cpu_to_le16(IOACCEL1_HCFLAGS_CISS_FORMAT
);
7270 cp
->timeout_sec
= 0;
7273 cpu_to_le64((i
<< DIRECT_LOOKUP_SHIFT
));
7275 cpu_to_le64(h
->ioaccel_cmd_pool_dhandle
+
7276 (i
* sizeof(struct io_accel1_cmd
)));
7278 } else if (trans_support
& CFGTBL_Trans_io_accel2
) {
7279 u64 cfg_offset
, cfg_base_addr_index
;
7280 u32 bft2_offset
, cfg_base_addr
;
7283 rc
= hpsa_find_cfg_addrs(h
->pdev
, h
->vaddr
, &cfg_base_addr
,
7284 &cfg_base_addr_index
, &cfg_offset
);
7285 BUILD_BUG_ON(offsetof(struct io_accel2_cmd
, sg
) != 64);
7286 bft2
[15] = h
->ioaccel_maxsg
+ HPSA_IOACCEL2_HEADER_SZ
;
7287 calc_bucket_map(bft2
, ARRAY_SIZE(bft2
), h
->ioaccel_maxsg
,
7288 4, h
->ioaccel2_blockFetchTable
);
7289 bft2_offset
= readl(&h
->cfgtable
->io_accel_request_size_offset
);
7290 BUILD_BUG_ON(offsetof(struct CfgTable
,
7291 io_accel_request_size_offset
) != 0xb8);
7292 h
->ioaccel2_bft2_regs
=
7293 remap_pci_mem(pci_resource_start(h
->pdev
,
7294 cfg_base_addr_index
) +
7295 cfg_offset
+ bft2_offset
,
7297 sizeof(*h
->ioaccel2_bft2_regs
));
7298 for (i
= 0; i
< ARRAY_SIZE(bft2
); i
++)
7299 writel(bft2
[i
], &h
->ioaccel2_bft2_regs
[i
]);
7301 writel(CFGTBL_ChangeReq
, h
->vaddr
+ SA5_DOORBELL
);
7302 hpsa_wait_for_mode_change_ack(h
);
7305 static int hpsa_alloc_ioaccel_cmd_and_bft(struct ctlr_info
*h
)
7308 readl(&(h
->cfgtable
->io_accel_max_embedded_sg_count
));
7309 if (h
->ioaccel_maxsg
> IOACCEL1_MAXSGENTRIES
)
7310 h
->ioaccel_maxsg
= IOACCEL1_MAXSGENTRIES
;
7312 /* Command structures must be aligned on a 128-byte boundary
7313 * because the 7 lower bits of the address are used by the
7316 BUILD_BUG_ON(sizeof(struct io_accel1_cmd
) %
7317 IOACCEL1_COMMANDLIST_ALIGNMENT
);
7318 h
->ioaccel_cmd_pool
=
7319 pci_alloc_consistent(h
->pdev
,
7320 h
->nr_cmds
* sizeof(*h
->ioaccel_cmd_pool
),
7321 &(h
->ioaccel_cmd_pool_dhandle
));
7323 h
->ioaccel1_blockFetchTable
=
7324 kmalloc(((h
->ioaccel_maxsg
+ 1) *
7325 sizeof(u32
)), GFP_KERNEL
);
7327 if ((h
->ioaccel_cmd_pool
== NULL
) ||
7328 (h
->ioaccel1_blockFetchTable
== NULL
))
7331 memset(h
->ioaccel_cmd_pool
, 0,
7332 h
->nr_cmds
* sizeof(*h
->ioaccel_cmd_pool
));
7336 if (h
->ioaccel_cmd_pool
)
7337 pci_free_consistent(h
->pdev
,
7338 h
->nr_cmds
* sizeof(*h
->ioaccel_cmd_pool
),
7339 h
->ioaccel_cmd_pool
, h
->ioaccel_cmd_pool_dhandle
);
7340 kfree(h
->ioaccel1_blockFetchTable
);
7344 static int ioaccel2_alloc_cmds_and_bft(struct ctlr_info
*h
)
7346 /* Allocate ioaccel2 mode command blocks and block fetch table */
7349 readl(&(h
->cfgtable
->io_accel_max_embedded_sg_count
));
7350 if (h
->ioaccel_maxsg
> IOACCEL2_MAXSGENTRIES
)
7351 h
->ioaccel_maxsg
= IOACCEL2_MAXSGENTRIES
;
7353 BUILD_BUG_ON(sizeof(struct io_accel2_cmd
) %
7354 IOACCEL2_COMMANDLIST_ALIGNMENT
);
7355 h
->ioaccel2_cmd_pool
=
7356 pci_alloc_consistent(h
->pdev
,
7357 h
->nr_cmds
* sizeof(*h
->ioaccel2_cmd_pool
),
7358 &(h
->ioaccel2_cmd_pool_dhandle
));
7360 h
->ioaccel2_blockFetchTable
=
7361 kmalloc(((h
->ioaccel_maxsg
+ 1) *
7362 sizeof(u32
)), GFP_KERNEL
);
7364 if ((h
->ioaccel2_cmd_pool
== NULL
) ||
7365 (h
->ioaccel2_blockFetchTable
== NULL
))
7368 memset(h
->ioaccel2_cmd_pool
, 0,
7369 h
->nr_cmds
* sizeof(*h
->ioaccel2_cmd_pool
));
7373 if (h
->ioaccel2_cmd_pool
)
7374 pci_free_consistent(h
->pdev
,
7375 h
->nr_cmds
* sizeof(*h
->ioaccel2_cmd_pool
),
7376 h
->ioaccel2_cmd_pool
, h
->ioaccel2_cmd_pool_dhandle
);
7377 kfree(h
->ioaccel2_blockFetchTable
);
7381 static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info
*h
)
7384 unsigned long transMethod
= CFGTBL_Trans_Performant
|
7385 CFGTBL_Trans_use_short_tags
;
7388 if (hpsa_simple_mode
)
7391 trans_support
= readl(&(h
->cfgtable
->TransportSupport
));
7392 if (!(trans_support
& PERFORMANT_MODE
))
7395 /* Check for I/O accelerator mode support */
7396 if (trans_support
& CFGTBL_Trans_io_accel1
) {
7397 transMethod
|= CFGTBL_Trans_io_accel1
|
7398 CFGTBL_Trans_enable_directed_msix
;
7399 if (hpsa_alloc_ioaccel_cmd_and_bft(h
))
7402 if (trans_support
& CFGTBL_Trans_io_accel2
) {
7403 transMethod
|= CFGTBL_Trans_io_accel2
|
7404 CFGTBL_Trans_enable_directed_msix
;
7405 if (ioaccel2_alloc_cmds_and_bft(h
))
7410 h
->nreply_queues
= h
->msix_vector
> 0 ? h
->msix_vector
: 1;
7411 hpsa_get_max_perf_mode_cmds(h
);
7412 /* Performant mode ring buffer and supporting data structures */
7413 h
->reply_queue_size
= h
->max_commands
* sizeof(u64
);
7415 for (i
= 0; i
< h
->nreply_queues
; i
++) {
7416 h
->reply_queue
[i
].head
= pci_alloc_consistent(h
->pdev
,
7417 h
->reply_queue_size
,
7418 &(h
->reply_queue
[i
].busaddr
));
7419 if (!h
->reply_queue
[i
].head
)
7421 h
->reply_queue
[i
].size
= h
->max_commands
;
7422 h
->reply_queue
[i
].wraparound
= 1; /* spec: init to 1 */
7423 h
->reply_queue
[i
].current_entry
= 0;
7426 /* Need a block fetch table for performant mode */
7427 h
->blockFetchTable
= kmalloc(((SG_ENTRIES_IN_CMD
+ 1) *
7428 sizeof(u32
)), GFP_KERNEL
);
7429 if (!h
->blockFetchTable
)
7432 hpsa_enter_performant_mode(h
, trans_support
);
7436 hpsa_free_reply_queues(h
);
7437 kfree(h
->blockFetchTable
);
7440 static int is_accelerated_cmd(struct CommandList
*c
)
7442 return c
->cmd_type
== CMD_IOACCEL1
|| c
->cmd_type
== CMD_IOACCEL2
;
7445 static void hpsa_drain_accel_commands(struct ctlr_info
*h
)
7447 struct CommandList
*c
= NULL
;
7448 int i
, accel_cmds_out
;
7451 do { /* wait for all outstanding ioaccel commands to drain out */
7453 for (i
= 0; i
< h
->nr_cmds
; i
++) {
7454 c
= h
->cmd_pool
+ i
;
7455 refcount
= atomic_inc_return(&c
->refcount
);
7456 if (refcount
> 1) /* Command is allocated */
7457 accel_cmds_out
+= is_accelerated_cmd(c
);
7460 if (accel_cmds_out
<= 0)
7467 * This is it. Register the PCI driver information for the cards we control
7468 * the OS will call our registered routines when it finds one of our cards.
7470 static int __init
hpsa_init(void)
7472 return pci_register_driver(&hpsa_pci_driver
);
7475 static void __exit
hpsa_cleanup(void)
7477 pci_unregister_driver(&hpsa_pci_driver
);
7480 static void __attribute__((unused
)) verify_offsets(void)
7482 #define VERIFY_OFFSET(member, offset) \
7483 BUILD_BUG_ON(offsetof(struct raid_map_data, member) != offset)
7485 VERIFY_OFFSET(structure_size
, 0);
7486 VERIFY_OFFSET(volume_blk_size
, 4);
7487 VERIFY_OFFSET(volume_blk_cnt
, 8);
7488 VERIFY_OFFSET(phys_blk_shift
, 16);
7489 VERIFY_OFFSET(parity_rotation_shift
, 17);
7490 VERIFY_OFFSET(strip_size
, 18);
7491 VERIFY_OFFSET(disk_starting_blk
, 20);
7492 VERIFY_OFFSET(disk_blk_cnt
, 28);
7493 VERIFY_OFFSET(data_disks_per_row
, 36);
7494 VERIFY_OFFSET(metadata_disks_per_row
, 38);
7495 VERIFY_OFFSET(row_cnt
, 40);
7496 VERIFY_OFFSET(layout_map_count
, 42);
7497 VERIFY_OFFSET(flags
, 44);
7498 VERIFY_OFFSET(dekindex
, 46);
7499 /* VERIFY_OFFSET(reserved, 48 */
7500 VERIFY_OFFSET(data
, 64);
7502 #undef VERIFY_OFFSET
7504 #define VERIFY_OFFSET(member, offset) \
7505 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset)
7507 VERIFY_OFFSET(IU_type
, 0);
7508 VERIFY_OFFSET(direction
, 1);
7509 VERIFY_OFFSET(reply_queue
, 2);
7510 /* VERIFY_OFFSET(reserved1, 3); */
7511 VERIFY_OFFSET(scsi_nexus
, 4);
7512 VERIFY_OFFSET(Tag
, 8);
7513 VERIFY_OFFSET(cdb
, 16);
7514 VERIFY_OFFSET(cciss_lun
, 32);
7515 VERIFY_OFFSET(data_len
, 40);
7516 VERIFY_OFFSET(cmd_priority_task_attr
, 44);
7517 VERIFY_OFFSET(sg_count
, 45);
7518 /* VERIFY_OFFSET(reserved3 */
7519 VERIFY_OFFSET(err_ptr
, 48);
7520 VERIFY_OFFSET(err_len
, 56);
7521 /* VERIFY_OFFSET(reserved4 */
7522 VERIFY_OFFSET(sg
, 64);
7524 #undef VERIFY_OFFSET
7526 #define VERIFY_OFFSET(member, offset) \
7527 BUILD_BUG_ON(offsetof(struct io_accel1_cmd, member) != offset)
7529 VERIFY_OFFSET(dev_handle
, 0x00);
7530 VERIFY_OFFSET(reserved1
, 0x02);
7531 VERIFY_OFFSET(function
, 0x03);
7532 VERIFY_OFFSET(reserved2
, 0x04);
7533 VERIFY_OFFSET(err_info
, 0x0C);
7534 VERIFY_OFFSET(reserved3
, 0x10);
7535 VERIFY_OFFSET(err_info_len
, 0x12);
7536 VERIFY_OFFSET(reserved4
, 0x13);
7537 VERIFY_OFFSET(sgl_offset
, 0x14);
7538 VERIFY_OFFSET(reserved5
, 0x15);
7539 VERIFY_OFFSET(transfer_len
, 0x1C);
7540 VERIFY_OFFSET(reserved6
, 0x20);
7541 VERIFY_OFFSET(io_flags
, 0x24);
7542 VERIFY_OFFSET(reserved7
, 0x26);
7543 VERIFY_OFFSET(LUN
, 0x34);
7544 VERIFY_OFFSET(control
, 0x3C);
7545 VERIFY_OFFSET(CDB
, 0x40);
7546 VERIFY_OFFSET(reserved8
, 0x50);
7547 VERIFY_OFFSET(host_context_flags
, 0x60);
7548 VERIFY_OFFSET(timeout_sec
, 0x62);
7549 VERIFY_OFFSET(ReplyQueue
, 0x64);
7550 VERIFY_OFFSET(reserved9
, 0x65);
7551 VERIFY_OFFSET(tag
, 0x68);
7552 VERIFY_OFFSET(host_addr
, 0x70);
7553 VERIFY_OFFSET(CISS_LUN
, 0x78);
7554 VERIFY_OFFSET(SG
, 0x78 + 8);
7555 #undef VERIFY_OFFSET
7558 module_init(hpsa_init
);
7559 module_exit(hpsa_cleanup
);