[SCSI] scsi: add Kconfig dependency on NET
[deliverable/linux.git] / drivers / scsi / ipr.c
CommitLineData
1da177e4
LT
1/*
2 * ipr.c -- driver for IBM Power Linux RAID adapters
3 *
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5 *
6 * Copyright (C) 2003, 2004 IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24/*
25 * Notes:
26 *
27 * This driver is used to control the following SCSI adapters:
28 *
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30 *
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
35 *
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
43 * - Hot spare
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
47 * by adding disks
48 *
49 * Driver Features:
50 * - Tagged command queuing
51 * - Adapter microcode download
52 * - PCI hot plug
53 * - SCSI device hot plug
54 *
55 */
56
1da177e4
LT
57#include <linux/fs.h>
58#include <linux/init.h>
59#include <linux/types.h>
60#include <linux/errno.h>
61#include <linux/kernel.h>
5a0e3ad6 62#include <linux/slab.h>
1da177e4
LT
63#include <linux/ioport.h>
64#include <linux/delay.h>
65#include <linux/pci.h>
66#include <linux/wait.h>
67#include <linux/spinlock.h>
68#include <linux/sched.h>
69#include <linux/interrupt.h>
70#include <linux/blkdev.h>
71#include <linux/firmware.h>
72#include <linux/module.h>
73#include <linux/moduleparam.h>
35a39691 74#include <linux/libata.h>
0ce3a7e5 75#include <linux/hdreg.h>
f72919ec 76#include <linux/reboot.h>
3e7ebdfa 77#include <linux/stringify.h>
1da177e4
LT
78#include <asm/io.h>
79#include <asm/irq.h>
80#include <asm/processor.h>
81#include <scsi/scsi.h>
82#include <scsi/scsi_host.h>
83#include <scsi/scsi_tcq.h>
84#include <scsi/scsi_eh.h>
85#include <scsi/scsi_cmnd.h>
1da177e4
LT
86#include "ipr.h"
87
88/*
89 * Global Data
90 */
b7d68ca3 91static LIST_HEAD(ipr_ioa_head);
1da177e4
LT
92static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
93static unsigned int ipr_max_speed = 1;
94static int ipr_testmode = 0;
95static unsigned int ipr_fastfail = 0;
5469cb5b 96static unsigned int ipr_transop_timeout = 0;
d3c74871 97static unsigned int ipr_debug = 0;
3e7ebdfa 98static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
ac09c349 99static unsigned int ipr_dual_ioa_raid = 1;
1da177e4
LT
100static DEFINE_SPINLOCK(ipr_driver_lock);
101
102/* This table describes the differences between DMA controller chips */
103static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
60e7486b 104 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
1da177e4
LT
105 .mailbox = 0x0042C,
106 .cache_line_size = 0x20,
107 {
108 .set_interrupt_mask_reg = 0x0022C,
109 .clr_interrupt_mask_reg = 0x00230,
214777ba 110 .clr_interrupt_mask_reg32 = 0x00230,
1da177e4 111 .sense_interrupt_mask_reg = 0x0022C,
214777ba 112 .sense_interrupt_mask_reg32 = 0x0022C,
1da177e4 113 .clr_interrupt_reg = 0x00228,
214777ba 114 .clr_interrupt_reg32 = 0x00228,
1da177e4 115 .sense_interrupt_reg = 0x00224,
214777ba 116 .sense_interrupt_reg32 = 0x00224,
1da177e4
LT
117 .ioarrin_reg = 0x00404,
118 .sense_uproc_interrupt_reg = 0x00214,
214777ba 119 .sense_uproc_interrupt_reg32 = 0x00214,
1da177e4 120 .set_uproc_interrupt_reg = 0x00214,
214777ba
WB
121 .set_uproc_interrupt_reg32 = 0x00214,
122 .clr_uproc_interrupt_reg = 0x00218,
123 .clr_uproc_interrupt_reg32 = 0x00218
1da177e4
LT
124 }
125 },
126 { /* Snipe and Scamp */
127 .mailbox = 0x0052C,
128 .cache_line_size = 0x20,
129 {
130 .set_interrupt_mask_reg = 0x00288,
131 .clr_interrupt_mask_reg = 0x0028C,
214777ba 132 .clr_interrupt_mask_reg32 = 0x0028C,
1da177e4 133 .sense_interrupt_mask_reg = 0x00288,
214777ba 134 .sense_interrupt_mask_reg32 = 0x00288,
1da177e4 135 .clr_interrupt_reg = 0x00284,
214777ba 136 .clr_interrupt_reg32 = 0x00284,
1da177e4 137 .sense_interrupt_reg = 0x00280,
214777ba 138 .sense_interrupt_reg32 = 0x00280,
1da177e4
LT
139 .ioarrin_reg = 0x00504,
140 .sense_uproc_interrupt_reg = 0x00290,
214777ba 141 .sense_uproc_interrupt_reg32 = 0x00290,
1da177e4 142 .set_uproc_interrupt_reg = 0x00290,
214777ba
WB
143 .set_uproc_interrupt_reg32 = 0x00290,
144 .clr_uproc_interrupt_reg = 0x00294,
145 .clr_uproc_interrupt_reg32 = 0x00294
1da177e4
LT
146 }
147 },
a74c1639
WB
148 { /* CRoC */
149 .mailbox = 0x00040,
150 .cache_line_size = 0x20,
151 {
152 .set_interrupt_mask_reg = 0x00010,
153 .clr_interrupt_mask_reg = 0x00018,
214777ba 154 .clr_interrupt_mask_reg32 = 0x0001C,
a74c1639 155 .sense_interrupt_mask_reg = 0x00010,
214777ba 156 .sense_interrupt_mask_reg32 = 0x00014,
a74c1639 157 .clr_interrupt_reg = 0x00008,
214777ba 158 .clr_interrupt_reg32 = 0x0000C,
a74c1639 159 .sense_interrupt_reg = 0x00000,
214777ba 160 .sense_interrupt_reg32 = 0x00004,
a74c1639
WB
161 .ioarrin_reg = 0x00070,
162 .sense_uproc_interrupt_reg = 0x00020,
214777ba 163 .sense_uproc_interrupt_reg32 = 0x00024,
a74c1639 164 .set_uproc_interrupt_reg = 0x00020,
214777ba 165 .set_uproc_interrupt_reg32 = 0x00024,
dcbad00e 166 .clr_uproc_interrupt_reg = 0x00028,
214777ba
WB
167 .clr_uproc_interrupt_reg32 = 0x0002C,
168 .init_feedback_reg = 0x0005C,
dcbad00e 169 .dump_addr_reg = 0x00064,
8701f185
WB
170 .dump_data_reg = 0x00068,
171 .endian_swap_reg = 0x00084
a74c1639
WB
172 }
173 },
1da177e4
LT
174};
175
176static const struct ipr_chip_t ipr_chip[] = {
cb237ef7
WB
177 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
178 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
179 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
180 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
181 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
182 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
183 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
184 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
185 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
1da177e4
LT
186};
187
188static int ipr_max_bus_speeds [] = {
189 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
190};
191
192MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
193MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
194module_param_named(max_speed, ipr_max_speed, uint, 0);
195MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
196module_param_named(log_level, ipr_log_level, uint, 0);
197MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
198module_param_named(testmode, ipr_testmode, int, 0);
199MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
2cf22be0 200module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
1da177e4
LT
201MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
202module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
203MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
2cf22be0 204module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
d3c74871 205MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
ac09c349
BK
206module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
207MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
3e7ebdfa
WB
208module_param_named(max_devs, ipr_max_devs, int, 0);
209MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
210 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
1da177e4
LT
211MODULE_LICENSE("GPL");
212MODULE_VERSION(IPR_DRIVER_VERSION);
213
1da177e4
LT
214/* A constant array of IOASCs/URCs/Error Messages */
215static const
216struct ipr_error_table_t ipr_error_table[] = {
933916f3 217 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
218 "8155: An unknown error was received"},
219 {0x00330000, 0, 0,
220 "Soft underlength error"},
221 {0x005A0000, 0, 0,
222 "Command to be cancelled not found"},
223 {0x00808000, 0, 0,
224 "Qualified success"},
933916f3 225 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 226 "FFFE: Soft device bus error recovered by the IOA"},
933916f3 227 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 228 "4101: Soft device bus fabric error"},
5aa3a333
WB
229 {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
230 "FFFC: Logical block guard error recovered by the device"},
231 {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
232 "FFFC: Logical block reference tag error recovered by the device"},
233 {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
234 "4171: Recovered scatter list tag / sequence number error"},
235 {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
236 "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
237 {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
238 "4171: Recovered logical block sequence number error on IOA to Host transfer"},
239 {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
240 "FFFD: Recovered logical block reference tag error detected by the IOA"},
241 {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
242 "FFFD: Logical block guard error recovered by the IOA"},
933916f3 243 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 244 "FFF9: Device sector reassign successful"},
933916f3 245 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 246 "FFF7: Media error recovered by device rewrite procedures"},
933916f3 247 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 248 "7001: IOA sector reassignment successful"},
933916f3 249 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 250 "FFF9: Soft media error. Sector reassignment recommended"},
933916f3 251 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 252 "FFF7: Media error recovered by IOA rewrite procedures"},
933916f3 253 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 254 "FF3D: Soft PCI bus error recovered by the IOA"},
933916f3 255 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 256 "FFF6: Device hardware error recovered by the IOA"},
933916f3 257 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 258 "FFF6: Device hardware error recovered by the device"},
933916f3 259 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 260 "FF3D: Soft IOA error recovered by the IOA"},
933916f3 261 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 262 "FFFA: Undefined device response recovered by the IOA"},
933916f3 263 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 264 "FFF6: Device bus error, message or command phase"},
933916f3 265 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
35a39691 266 "FFFE: Task Management Function failed"},
933916f3 267 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 268 "FFF6: Failure prediction threshold exceeded"},
933916f3 269 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
270 "8009: Impending cache battery pack failure"},
271 {0x02040400, 0, 0,
272 "34FF: Disk device format in progress"},
65f56475
BK
273 {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
274 "9070: IOA requested reset"},
1da177e4
LT
275 {0x023F0000, 0, 0,
276 "Synchronization required"},
277 {0x024E0000, 0, 0,
278 "No ready, IOA shutdown"},
279 {0x025A0000, 0, 0,
280 "Not ready, IOA has been shutdown"},
933916f3 281 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
282 "3020: Storage subsystem configuration error"},
283 {0x03110B00, 0, 0,
284 "FFF5: Medium error, data unreadable, recommend reassign"},
285 {0x03110C00, 0, 0,
286 "7000: Medium error, data unreadable, do not reassign"},
933916f3 287 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 288 "FFF3: Disk media format bad"},
933916f3 289 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 290 "3002: Addressed device failed to respond to selection"},
933916f3 291 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 292 "3100: Device bus error"},
933916f3 293 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
294 "3109: IOA timed out a device command"},
295 {0x04088000, 0, 0,
296 "3120: SCSI bus is not operational"},
933916f3 297 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 298 "4100: Hard device bus fabric error"},
5aa3a333
WB
299 {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
300 "310C: Logical block guard error detected by the device"},
301 {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
302 "310C: Logical block reference tag error detected by the device"},
303 {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
304 "4170: Scatter list tag / sequence number error"},
305 {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
306 "8150: Logical block CRC error on IOA to Host transfer"},
307 {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
308 "4170: Logical block sequence number error on IOA to Host transfer"},
309 {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
310 "310D: Logical block reference tag error detected by the IOA"},
311 {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
312 "310D: Logical block guard error detected by the IOA"},
933916f3 313 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 314 "9000: IOA reserved area data check"},
933916f3 315 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 316 "9001: IOA reserved area invalid data pattern"},
933916f3 317 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 318 "9002: IOA reserved area LRC error"},
5aa3a333
WB
319 {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
320 "Hardware Error, IOA metadata access error"},
933916f3 321 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 322 "102E: Out of alternate sectors for disk storage"},
933916f3 323 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 324 "FFF4: Data transfer underlength error"},
933916f3 325 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 326 "FFF4: Data transfer overlength error"},
933916f3 327 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 328 "3400: Logical unit failure"},
933916f3 329 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 330 "FFF4: Device microcode is corrupt"},
933916f3 331 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
332 "8150: PCI bus error"},
333 {0x04430000, 1, 0,
334 "Unsupported device bus message received"},
933916f3 335 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 336 "FFF4: Disk device problem"},
933916f3 337 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 338 "8150: Permanent IOA failure"},
933916f3 339 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 340 "3010: Disk device returned wrong response to IOA"},
933916f3 341 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
342 "8151: IOA microcode error"},
343 {0x04448500, 0, 0,
344 "Device bus status error"},
933916f3 345 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 346 "8157: IOA error requiring IOA reset to recover"},
35a39691
BK
347 {0x04448700, 0, 0,
348 "ATA device status error"},
1da177e4
LT
349 {0x04490000, 0, 0,
350 "Message reject received from the device"},
933916f3 351 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 352 "8008: A permanent cache battery pack failure occurred"},
933916f3 353 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 354 "9090: Disk unit has been modified after the last known status"},
933916f3 355 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 356 "9081: IOA detected device error"},
933916f3 357 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 358 "9082: IOA detected device error"},
933916f3 359 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 360 "3110: Device bus error, message or command phase"},
933916f3 361 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
35a39691 362 "3110: SAS Command / Task Management Function failed"},
933916f3 363 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 364 "9091: Incorrect hardware configuration change has been detected"},
933916f3 365 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 366 "9073: Invalid multi-adapter configuration"},
933916f3 367 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 368 "4010: Incorrect connection between cascaded expanders"},
933916f3 369 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 370 "4020: Connections exceed IOA design limits"},
933916f3 371 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 372 "4030: Incorrect multipath connection"},
933916f3 373 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 374 "4110: Unsupported enclosure function"},
933916f3 375 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
376 "FFF4: Command to logical unit failed"},
377 {0x05240000, 1, 0,
378 "Illegal request, invalid request type or request packet"},
379 {0x05250000, 0, 0,
380 "Illegal request, invalid resource handle"},
b0df54bb 381 {0x05258000, 0, 0,
382 "Illegal request, commands not allowed to this device"},
383 {0x05258100, 0, 0,
384 "Illegal request, command not allowed to a secondary adapter"},
5aa3a333
WB
385 {0x05258200, 0, 0,
386 "Illegal request, command not allowed to a non-optimized resource"},
1da177e4
LT
387 {0x05260000, 0, 0,
388 "Illegal request, invalid field in parameter list"},
389 {0x05260100, 0, 0,
390 "Illegal request, parameter not supported"},
391 {0x05260200, 0, 0,
392 "Illegal request, parameter value invalid"},
393 {0x052C0000, 0, 0,
394 "Illegal request, command sequence error"},
b0df54bb 395 {0x052C8000, 1, 0,
396 "Illegal request, dual adapter support not enabled"},
933916f3 397 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 398 "9031: Array protection temporarily suspended, protection resuming"},
933916f3 399 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 400 "9040: Array protection temporarily suspended, protection resuming"},
933916f3 401 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 402 "3140: Device bus not ready to ready transition"},
933916f3 403 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
404 "FFFB: SCSI bus was reset"},
405 {0x06290500, 0, 0,
406 "FFFE: SCSI bus transition to single ended"},
407 {0x06290600, 0, 0,
408 "FFFE: SCSI bus transition to LVD"},
933916f3 409 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 410 "FFFB: SCSI bus was reset by another initiator"},
933916f3 411 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 412 "3029: A device replacement has occurred"},
933916f3 413 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 414 "9051: IOA cache data exists for a missing or failed device"},
933916f3 415 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 416 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
933916f3 417 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 418 "9025: Disk unit is not supported at its physical location"},
933916f3 419 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 420 "3020: IOA detected a SCSI bus configuration error"},
933916f3 421 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 422 "3150: SCSI bus configuration error"},
933916f3 423 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 424 "9074: Asymmetric advanced function disk configuration"},
933916f3 425 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 426 "4040: Incomplete multipath connection between IOA and enclosure"},
933916f3 427 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 428 "4041: Incomplete multipath connection between enclosure and device"},
933916f3 429 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 430 "9075: Incomplete multipath connection between IOA and remote IOA"},
933916f3 431 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 432 "9076: Configuration error, missing remote IOA"},
933916f3 433 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 434 "4050: Enclosure does not support a required multipath function"},
b75424fc
WB
435 {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
436 "4070: Logically bad block written on device"},
933916f3 437 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 438 "9041: Array protection temporarily suspended"},
933916f3 439 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 440 "9042: Corrupt array parity detected on specified device"},
933916f3 441 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 442 "9030: Array no longer protected due to missing or failed disk unit"},
933916f3 443 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 444 "9071: Link operational transition"},
933916f3 445 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 446 "9072: Link not operational transition"},
933916f3 447 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 448 "9032: Array exposed but still protected"},
e435340c
BK
449 {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
450 "70DD: Device forced failed by disrupt device command"},
933916f3 451 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 452 "4061: Multipath redundancy level got better"},
933916f3 453 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 454 "4060: Multipath redundancy level got worse"},
1da177e4
LT
455 {0x07270000, 0, 0,
456 "Failure due to other device"},
933916f3 457 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 458 "9008: IOA does not support functions expected by devices"},
933916f3 459 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 460 "9010: Cache data associated with attached devices cannot be found"},
933916f3 461 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 462 "9011: Cache data belongs to devices other than those attached"},
933916f3 463 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 464 "9020: Array missing 2 or more devices with only 1 device present"},
933916f3 465 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 466 "9021: Array missing 2 or more devices with 2 or more devices present"},
933916f3 467 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 468 "9022: Exposed array is missing a required device"},
933916f3 469 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 470 "9023: Array member(s) not at required physical locations"},
933916f3 471 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 472 "9024: Array not functional due to present hardware configuration"},
933916f3 473 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 474 "9026: Array not functional due to present hardware configuration"},
933916f3 475 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 476 "9027: Array is missing a device and parity is out of sync"},
933916f3 477 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 478 "9028: Maximum number of arrays already exist"},
933916f3 479 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 480 "9050: Required cache data cannot be located for a disk unit"},
933916f3 481 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 482 "9052: Cache data exists for a device that has been modified"},
933916f3 483 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 484 "9054: IOA resources not available due to previous problems"},
933916f3 485 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 486 "9092: Disk unit requires initialization before use"},
933916f3 487 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 488 "9029: Incorrect hardware configuration change has been detected"},
933916f3 489 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 490 "9060: One or more disk pairs are missing from an array"},
933916f3 491 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 492 "9061: One or more disks are missing from an array"},
933916f3 493 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 494 "9062: One or more disks are missing from an array"},
933916f3 495 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
496 "9063: Maximum number of functional arrays has been exceeded"},
497 {0x0B260000, 0, 0,
498 "Aborted command, invalid descriptor"},
499 {0x0B5A0000, 0, 0,
500 "Command terminated by host"}
501};
502
503static const struct ipr_ses_table_entry ipr_ses_table[] = {
504 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
505 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
506 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
507 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
508 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
509 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
510 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
511 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
512 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
513 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
514 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
515 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
516 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
517};
518
519/*
520 * Function Prototypes
521 */
522static int ipr_reset_alert(struct ipr_cmnd *);
523static void ipr_process_ccn(struct ipr_cmnd *);
524static void ipr_process_error(struct ipr_cmnd *);
525static void ipr_reset_ioa_job(struct ipr_cmnd *);
526static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
527 enum ipr_shutdown_type);
528
529#ifdef CONFIG_SCSI_IPR_TRACE
530/**
531 * ipr_trc_hook - Add a trace entry to the driver trace
532 * @ipr_cmd: ipr command struct
533 * @type: trace type
534 * @add_data: additional data
535 *
536 * Return value:
537 * none
538 **/
539static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
540 u8 type, u32 add_data)
541{
542 struct ipr_trace_entry *trace_entry;
543 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
544
545 trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
546 trace_entry->time = jiffies;
547 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
548 trace_entry->type = type;
a32c055f
WB
549 if (ipr_cmd->ioa_cfg->sis64)
550 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
551 else
552 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
35a39691 553 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
1da177e4
LT
554 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
555 trace_entry->u.add_data = add_data;
556}
557#else
558#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
559#endif
560
561/**
562 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
563 * @ipr_cmd: ipr command struct
564 *
565 * Return value:
566 * none
567 **/
568static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
569{
570 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
96d21f00
WB
571 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
572 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
a32c055f 573 dma_addr_t dma_addr = ipr_cmd->dma_addr;
1da177e4
LT
574
575 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
a32c055f 576 ioarcb->data_transfer_length = 0;
1da177e4 577 ioarcb->read_data_transfer_length = 0;
a32c055f 578 ioarcb->ioadl_len = 0;
1da177e4 579 ioarcb->read_ioadl_len = 0;
a32c055f 580
96d21f00 581 if (ipr_cmd->ioa_cfg->sis64) {
a32c055f
WB
582 ioarcb->u.sis64_addr_data.data_ioadl_addr =
583 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
96d21f00
WB
584 ioasa64->u.gata.status = 0;
585 } else {
a32c055f
WB
586 ioarcb->write_ioadl_addr =
587 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
588 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
96d21f00 589 ioasa->u.gata.status = 0;
a32c055f
WB
590 }
591
96d21f00
WB
592 ioasa->hdr.ioasc = 0;
593 ioasa->hdr.residual_data_len = 0;
1da177e4 594 ipr_cmd->scsi_cmd = NULL;
35a39691 595 ipr_cmd->qc = NULL;
1da177e4
LT
596 ipr_cmd->sense_buffer[0] = 0;
597 ipr_cmd->dma_use_sg = 0;
598}
599
600/**
601 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
602 * @ipr_cmd: ipr command struct
603 *
604 * Return value:
605 * none
606 **/
607static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
608{
609 ipr_reinit_ipr_cmnd(ipr_cmd);
610 ipr_cmd->u.scratch = 0;
611 ipr_cmd->sibling = NULL;
612 init_timer(&ipr_cmd->timer);
613}
614
615/**
616 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
617 * @ioa_cfg: ioa config struct
618 *
619 * Return value:
620 * pointer to ipr command struct
621 **/
622static
623struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
624{
625 struct ipr_cmnd *ipr_cmd;
626
627 ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
628 list_del(&ipr_cmd->queue);
629 ipr_init_ipr_cmnd(ipr_cmd);
630
631 return ipr_cmd;
632}
633
1da177e4
LT
634/**
635 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
636 * @ioa_cfg: ioa config struct
637 * @clr_ints: interrupts to clear
638 *
639 * This function masks all interrupts on the adapter, then clears the
640 * interrupts specified in the mask
641 *
642 * Return value:
643 * none
644 **/
645static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
646 u32 clr_ints)
647{
648 volatile u32 int_reg;
649
650 /* Stop new interrupts */
651 ioa_cfg->allow_interrupts = 0;
652
653 /* Set interrupt mask to stop all new interrupts */
214777ba
WB
654 if (ioa_cfg->sis64)
655 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
656 else
657 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
1da177e4
LT
658
659 /* Clear any pending interrupts */
214777ba
WB
660 if (ioa_cfg->sis64)
661 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
662 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
1da177e4
LT
663 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
664}
665
666/**
667 * ipr_save_pcix_cmd_reg - Save PCI-X command register
668 * @ioa_cfg: ioa config struct
669 *
670 * Return value:
671 * 0 on success / -EIO on failure
672 **/
673static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
674{
675 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
676
7dce0e1c
BK
677 if (pcix_cmd_reg == 0)
678 return 0;
1da177e4
LT
679
680 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
681 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
682 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
683 return -EIO;
684 }
685
686 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
687 return 0;
688}
689
690/**
691 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
692 * @ioa_cfg: ioa config struct
693 *
694 * Return value:
695 * 0 on success / -EIO on failure
696 **/
697static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
698{
699 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
700
701 if (pcix_cmd_reg) {
702 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
703 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
704 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
705 return -EIO;
706 }
1da177e4
LT
707 }
708
709 return 0;
710}
711
35a39691
BK
712/**
713 * ipr_sata_eh_done - done function for aborted SATA commands
714 * @ipr_cmd: ipr command struct
715 *
716 * This function is invoked for ops generated to SATA
717 * devices which are being aborted.
718 *
719 * Return value:
720 * none
721 **/
722static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
723{
724 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
725 struct ata_queued_cmd *qc = ipr_cmd->qc;
726 struct ipr_sata_port *sata_port = qc->ap->private_data;
727
728 qc->err_mask |= AC_ERR_OTHER;
729 sata_port->ioasa.status |= ATA_BUSY;
730 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
731 ata_qc_complete(qc);
732}
733
1da177e4
LT
734/**
735 * ipr_scsi_eh_done - mid-layer done function for aborted ops
736 * @ipr_cmd: ipr command struct
737 *
738 * This function is invoked by the interrupt handler for
739 * ops generated by the SCSI mid-layer which are being aborted.
740 *
741 * Return value:
742 * none
743 **/
744static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
745{
746 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
747 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
748
749 scsi_cmd->result |= (DID_ERROR << 16);
750
63015bc9 751 scsi_dma_unmap(ipr_cmd->scsi_cmd);
1da177e4
LT
752 scsi_cmd->scsi_done(scsi_cmd);
753 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
754}
755
756/**
757 * ipr_fail_all_ops - Fails all outstanding ops.
758 * @ioa_cfg: ioa config struct
759 *
760 * This function fails all outstanding ops.
761 *
762 * Return value:
763 * none
764 **/
765static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
766{
767 struct ipr_cmnd *ipr_cmd, *temp;
768
769 ENTER;
770 list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
771 list_del(&ipr_cmd->queue);
772
96d21f00
WB
773 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
774 ipr_cmd->s.ioasa.hdr.ilid = cpu_to_be32(IPR_DRIVER_ILID);
1da177e4
LT
775
776 if (ipr_cmd->scsi_cmd)
777 ipr_cmd->done = ipr_scsi_eh_done;
35a39691
BK
778 else if (ipr_cmd->qc)
779 ipr_cmd->done = ipr_sata_eh_done;
1da177e4
LT
780
781 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
782 del_timer(&ipr_cmd->timer);
783 ipr_cmd->done(ipr_cmd);
784 }
785
786 LEAVE;
787}
788
a32c055f
WB
789/**
790 * ipr_send_command - Send driver initiated requests.
791 * @ipr_cmd: ipr command struct
792 *
793 * This function sends a command to the adapter using the correct write call.
794 * In the case of sis64, calculate the ioarcb size required. Then or in the
795 * appropriate bits.
796 *
797 * Return value:
798 * none
799 **/
800static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
801{
802 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
803 dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
804
805 if (ioa_cfg->sis64) {
806 /* The default size is 256 bytes */
807 send_dma_addr |= 0x1;
808
809 /* If the number of ioadls * size of ioadl > 128 bytes,
810 then use a 512 byte ioarcb */
811 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
812 send_dma_addr |= 0x4;
813 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
814 } else
815 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
816}
817
1da177e4
LT
818/**
819 * ipr_do_req - Send driver initiated requests.
820 * @ipr_cmd: ipr command struct
821 * @done: done function
822 * @timeout_func: timeout function
823 * @timeout: timeout value
824 *
825 * This function sends the specified command to the adapter with the
826 * timeout given. The done function is invoked on command completion.
827 *
828 * Return value:
829 * none
830 **/
831static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
832 void (*done) (struct ipr_cmnd *),
833 void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
834{
835 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
836
837 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
838
839 ipr_cmd->done = done;
840
841 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
842 ipr_cmd->timer.expires = jiffies + timeout;
843 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
844
845 add_timer(&ipr_cmd->timer);
846
847 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
848
849 mb();
a32c055f
WB
850
851 ipr_send_command(ipr_cmd);
1da177e4
LT
852}
853
854/**
855 * ipr_internal_cmd_done - Op done function for an internally generated op.
856 * @ipr_cmd: ipr command struct
857 *
858 * This function is the op done function for an internally generated,
859 * blocking op. It simply wakes the sleeping thread.
860 *
861 * Return value:
862 * none
863 **/
864static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
865{
866 if (ipr_cmd->sibling)
867 ipr_cmd->sibling = NULL;
868 else
869 complete(&ipr_cmd->completion);
870}
871
a32c055f
WB
872/**
873 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
874 * @ipr_cmd: ipr command struct
875 * @dma_addr: dma address
876 * @len: transfer length
877 * @flags: ioadl flag value
878 *
879 * This function initializes an ioadl in the case where there is only a single
880 * descriptor.
881 *
882 * Return value:
883 * nothing
884 **/
885static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
886 u32 len, int flags)
887{
888 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
889 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
890
891 ipr_cmd->dma_use_sg = 1;
892
893 if (ipr_cmd->ioa_cfg->sis64) {
894 ioadl64->flags = cpu_to_be32(flags);
895 ioadl64->data_len = cpu_to_be32(len);
896 ioadl64->address = cpu_to_be64(dma_addr);
897
898 ipr_cmd->ioarcb.ioadl_len =
899 cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
900 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
901 } else {
902 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
903 ioadl->address = cpu_to_be32(dma_addr);
904
905 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
906 ipr_cmd->ioarcb.read_ioadl_len =
907 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
908 ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
909 } else {
910 ipr_cmd->ioarcb.ioadl_len =
911 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
912 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
913 }
914 }
915}
916
1da177e4
LT
917/**
918 * ipr_send_blocking_cmd - Send command and sleep on its completion.
919 * @ipr_cmd: ipr command struct
920 * @timeout_func: function to invoke if command times out
921 * @timeout: timeout
922 *
923 * Return value:
924 * none
925 **/
926static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
927 void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
928 u32 timeout)
929{
930 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
931
932 init_completion(&ipr_cmd->completion);
933 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
934
935 spin_unlock_irq(ioa_cfg->host->host_lock);
936 wait_for_completion(&ipr_cmd->completion);
937 spin_lock_irq(ioa_cfg->host->host_lock);
938}
939
940/**
941 * ipr_send_hcam - Send an HCAM to the adapter.
942 * @ioa_cfg: ioa config struct
943 * @type: HCAM type
944 * @hostrcb: hostrcb struct
945 *
946 * This function will send a Host Controlled Async command to the adapter.
947 * If HCAMs are currently not allowed to be issued to the adapter, it will
948 * place the hostrcb on the free queue.
949 *
950 * Return value:
951 * none
952 **/
953static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
954 struct ipr_hostrcb *hostrcb)
955{
956 struct ipr_cmnd *ipr_cmd;
957 struct ipr_ioarcb *ioarcb;
958
959 if (ioa_cfg->allow_cmds) {
960 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
961 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
962 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
963
964 ipr_cmd->u.hostrcb = hostrcb;
965 ioarcb = &ipr_cmd->ioarcb;
966
967 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
968 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
969 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
970 ioarcb->cmd_pkt.cdb[1] = type;
971 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
972 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
973
a32c055f
WB
974 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
975 sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
976
977 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
978 ipr_cmd->done = ipr_process_ccn;
979 else
980 ipr_cmd->done = ipr_process_error;
981
982 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
983
984 mb();
a32c055f
WB
985
986 ipr_send_command(ipr_cmd);
1da177e4
LT
987 } else {
988 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
989 }
990}
991
3e7ebdfa
WB
992/**
993 * ipr_update_ata_class - Update the ata class in the resource entry
994 * @res: resource entry struct
995 * @proto: cfgte device bus protocol value
996 *
997 * Return value:
998 * none
999 **/
1000static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
1001{
1002 switch(proto) {
1003 case IPR_PROTO_SATA:
1004 case IPR_PROTO_SAS_STP:
1005 res->ata_class = ATA_DEV_ATA;
1006 break;
1007 case IPR_PROTO_SATA_ATAPI:
1008 case IPR_PROTO_SAS_STP_ATAPI:
1009 res->ata_class = ATA_DEV_ATAPI;
1010 break;
1011 default:
1012 res->ata_class = ATA_DEV_UNKNOWN;
1013 break;
1014 };
1015}
1016
1da177e4
LT
1017/**
1018 * ipr_init_res_entry - Initialize a resource entry struct.
1019 * @res: resource entry struct
3e7ebdfa 1020 * @cfgtew: config table entry wrapper struct
1da177e4
LT
1021 *
1022 * Return value:
1023 * none
1024 **/
3e7ebdfa
WB
1025static void ipr_init_res_entry(struct ipr_resource_entry *res,
1026 struct ipr_config_table_entry_wrapper *cfgtew)
1da177e4 1027{
3e7ebdfa
WB
1028 int found = 0;
1029 unsigned int proto;
1030 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1031 struct ipr_resource_entry *gscsi_res = NULL;
1032
ee0a90fa 1033 res->needs_sync_complete = 0;
1da177e4
LT
1034 res->in_erp = 0;
1035 res->add_to_ml = 0;
1036 res->del_from_ml = 0;
1037 res->resetting_device = 0;
1038 res->sdev = NULL;
35a39691 1039 res->sata_port = NULL;
3e7ebdfa
WB
1040
1041 if (ioa_cfg->sis64) {
1042 proto = cfgtew->u.cfgte64->proto;
1043 res->res_flags = cfgtew->u.cfgte64->res_flags;
1044 res->qmodel = IPR_QUEUEING_MODEL64(res);
438b0331 1045 res->type = cfgtew->u.cfgte64->res_type;
3e7ebdfa
WB
1046
1047 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1048 sizeof(res->res_path));
1049
1050 res->bus = 0;
1051 res->lun = scsilun_to_int(&res->dev_lun);
1052
1053 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1054 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1055 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1056 found = 1;
1057 res->target = gscsi_res->target;
1058 break;
1059 }
1060 }
1061 if (!found) {
1062 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1063 ioa_cfg->max_devs_supported);
1064 set_bit(res->target, ioa_cfg->target_ids);
1065 }
1066
1067 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1068 sizeof(res->dev_lun.scsi_lun));
1069 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1070 res->bus = IPR_IOAFP_VIRTUAL_BUS;
1071 res->target = 0;
1072 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1073 res->bus = IPR_ARRAY_VIRTUAL_BUS;
1074 res->target = find_first_zero_bit(ioa_cfg->array_ids,
1075 ioa_cfg->max_devs_supported);
1076 set_bit(res->target, ioa_cfg->array_ids);
1077 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1078 res->bus = IPR_VSET_VIRTUAL_BUS;
1079 res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1080 ioa_cfg->max_devs_supported);
1081 set_bit(res->target, ioa_cfg->vset_ids);
1082 } else {
1083 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1084 ioa_cfg->max_devs_supported);
1085 set_bit(res->target, ioa_cfg->target_ids);
1086 }
1087 } else {
1088 proto = cfgtew->u.cfgte->proto;
1089 res->qmodel = IPR_QUEUEING_MODEL(res);
1090 res->flags = cfgtew->u.cfgte->flags;
1091 if (res->flags & IPR_IS_IOA_RESOURCE)
1092 res->type = IPR_RES_TYPE_IOAFP;
1093 else
1094 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1095
1096 res->bus = cfgtew->u.cfgte->res_addr.bus;
1097 res->target = cfgtew->u.cfgte->res_addr.target;
1098 res->lun = cfgtew->u.cfgte->res_addr.lun;
1099 }
1100
1101 ipr_update_ata_class(res, proto);
1102}
1103
1104/**
1105 * ipr_is_same_device - Determine if two devices are the same.
1106 * @res: resource entry struct
1107 * @cfgtew: config table entry wrapper struct
1108 *
1109 * Return value:
1110 * 1 if the devices are the same / 0 otherwise
1111 **/
1112static int ipr_is_same_device(struct ipr_resource_entry *res,
1113 struct ipr_config_table_entry_wrapper *cfgtew)
1114{
1115 if (res->ioa_cfg->sis64) {
1116 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1117 sizeof(cfgtew->u.cfgte64->dev_id)) &&
1118 !memcmp(&res->lun, &cfgtew->u.cfgte64->lun,
1119 sizeof(cfgtew->u.cfgte64->lun))) {
1120 return 1;
1121 }
1122 } else {
1123 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1124 res->target == cfgtew->u.cfgte->res_addr.target &&
1125 res->lun == cfgtew->u.cfgte->res_addr.lun)
1126 return 1;
1127 }
1128
1129 return 0;
1130}
1131
1132/**
5adcbeb3 1133 * ipr_format_res_path - Format the resource path for printing.
3e7ebdfa
WB
1134 * @res_path: resource path
1135 * @buf: buffer
1136 *
1137 * Return value:
1138 * pointer to buffer
1139 **/
5adcbeb3 1140static char *ipr_format_res_path(u8 *res_path, char *buffer, int len)
3e7ebdfa
WB
1141{
1142 int i;
5adcbeb3 1143 char *p = buffer;
3e7ebdfa 1144
5adcbeb3
WB
1145 res_path[0] = '\0';
1146 p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1147 for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1148 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
3e7ebdfa
WB
1149
1150 return buffer;
1151}
1152
1153/**
1154 * ipr_update_res_entry - Update the resource entry.
1155 * @res: resource entry struct
1156 * @cfgtew: config table entry wrapper struct
1157 *
1158 * Return value:
1159 * none
1160 **/
1161static void ipr_update_res_entry(struct ipr_resource_entry *res,
1162 struct ipr_config_table_entry_wrapper *cfgtew)
1163{
1164 char buffer[IPR_MAX_RES_PATH_LENGTH];
1165 unsigned int proto;
1166 int new_path = 0;
1167
1168 if (res->ioa_cfg->sis64) {
1169 res->flags = cfgtew->u.cfgte64->flags;
1170 res->res_flags = cfgtew->u.cfgte64->res_flags;
1171 res->type = cfgtew->u.cfgte64->res_type & 0x0f;
1172
1173 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1174 sizeof(struct ipr_std_inq_data));
1175
1176 res->qmodel = IPR_QUEUEING_MODEL64(res);
1177 proto = cfgtew->u.cfgte64->proto;
1178 res->res_handle = cfgtew->u.cfgte64->res_handle;
1179 res->dev_id = cfgtew->u.cfgte64->dev_id;
1180
1181 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1182 sizeof(res->dev_lun.scsi_lun));
1183
1184 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1185 sizeof(res->res_path))) {
1186 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1187 sizeof(res->res_path));
1188 new_path = 1;
1189 }
1190
1191 if (res->sdev && new_path)
1192 sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
5adcbeb3
WB
1193 ipr_format_res_path(res->res_path, buffer,
1194 sizeof(buffer)));
3e7ebdfa
WB
1195 } else {
1196 res->flags = cfgtew->u.cfgte->flags;
1197 if (res->flags & IPR_IS_IOA_RESOURCE)
1198 res->type = IPR_RES_TYPE_IOAFP;
1199 else
1200 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1201
1202 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1203 sizeof(struct ipr_std_inq_data));
1204
1205 res->qmodel = IPR_QUEUEING_MODEL(res);
1206 proto = cfgtew->u.cfgte->proto;
1207 res->res_handle = cfgtew->u.cfgte->res_handle;
1208 }
1209
1210 ipr_update_ata_class(res, proto);
1211}
1212
1213/**
1214 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1215 * for the resource.
1216 * @res: resource entry struct
1217 * @cfgtew: config table entry wrapper struct
1218 *
1219 * Return value:
1220 * none
1221 **/
1222static void ipr_clear_res_target(struct ipr_resource_entry *res)
1223{
1224 struct ipr_resource_entry *gscsi_res = NULL;
1225 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1226
1227 if (!ioa_cfg->sis64)
1228 return;
1229
1230 if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1231 clear_bit(res->target, ioa_cfg->array_ids);
1232 else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1233 clear_bit(res->target, ioa_cfg->vset_ids);
1234 else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1235 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1236 if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1237 return;
1238 clear_bit(res->target, ioa_cfg->target_ids);
1239
1240 } else if (res->bus == 0)
1241 clear_bit(res->target, ioa_cfg->target_ids);
1da177e4
LT
1242}
1243
1244/**
1245 * ipr_handle_config_change - Handle a config change from the adapter
1246 * @ioa_cfg: ioa config struct
1247 * @hostrcb: hostrcb
1248 *
1249 * Return value:
1250 * none
1251 **/
1252static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
3e7ebdfa 1253 struct ipr_hostrcb *hostrcb)
1da177e4
LT
1254{
1255 struct ipr_resource_entry *res = NULL;
3e7ebdfa
WB
1256 struct ipr_config_table_entry_wrapper cfgtew;
1257 __be32 cc_res_handle;
1258
1da177e4
LT
1259 u32 is_ndn = 1;
1260
3e7ebdfa
WB
1261 if (ioa_cfg->sis64) {
1262 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1263 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1264 } else {
1265 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1266 cc_res_handle = cfgtew.u.cfgte->res_handle;
1267 }
1da177e4
LT
1268
1269 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3e7ebdfa 1270 if (res->res_handle == cc_res_handle) {
1da177e4
LT
1271 is_ndn = 0;
1272 break;
1273 }
1274 }
1275
1276 if (is_ndn) {
1277 if (list_empty(&ioa_cfg->free_res_q)) {
1278 ipr_send_hcam(ioa_cfg,
1279 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1280 hostrcb);
1281 return;
1282 }
1283
1284 res = list_entry(ioa_cfg->free_res_q.next,
1285 struct ipr_resource_entry, queue);
1286
1287 list_del(&res->queue);
3e7ebdfa 1288 ipr_init_res_entry(res, &cfgtew);
1da177e4
LT
1289 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1290 }
1291
3e7ebdfa 1292 ipr_update_res_entry(res, &cfgtew);
1da177e4
LT
1293
1294 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1295 if (res->sdev) {
1da177e4 1296 res->del_from_ml = 1;
3e7ebdfa 1297 res->res_handle = IPR_INVALID_RES_HANDLE;
1da177e4
LT
1298 if (ioa_cfg->allow_ml_add_del)
1299 schedule_work(&ioa_cfg->work_q);
3e7ebdfa
WB
1300 } else {
1301 ipr_clear_res_target(res);
1da177e4 1302 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3e7ebdfa 1303 }
1da177e4
LT
1304 } else if (!res->sdev) {
1305 res->add_to_ml = 1;
1306 if (ioa_cfg->allow_ml_add_del)
1307 schedule_work(&ioa_cfg->work_q);
1308 }
1309
1310 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1311}
1312
1313/**
1314 * ipr_process_ccn - Op done function for a CCN.
1315 * @ipr_cmd: ipr command struct
1316 *
1317 * This function is the op done function for a configuration
1318 * change notification host controlled async from the adapter.
1319 *
1320 * Return value:
1321 * none
1322 **/
1323static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1324{
1325 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1326 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
96d21f00 1327 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
1328
1329 list_del(&hostrcb->queue);
1330 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1331
1332 if (ioasc) {
1333 if (ioasc != IPR_IOASC_IOA_WAS_RESET)
1334 dev_err(&ioa_cfg->pdev->dev,
1335 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1336
1337 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1338 } else {
1339 ipr_handle_config_change(ioa_cfg, hostrcb);
1340 }
1341}
1342
8cf093e2
BK
1343/**
1344 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1345 * @i: index into buffer
1346 * @buf: string to modify
1347 *
1348 * This function will strip all trailing whitespace, pad the end
1349 * of the string with a single space, and NULL terminate the string.
1350 *
1351 * Return value:
1352 * new length of string
1353 **/
1354static int strip_and_pad_whitespace(int i, char *buf)
1355{
1356 while (i && buf[i] == ' ')
1357 i--;
1358 buf[i+1] = ' ';
1359 buf[i+2] = '\0';
1360 return i + 2;
1361}
1362
1363/**
1364 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1365 * @prefix: string to print at start of printk
1366 * @hostrcb: hostrcb pointer
1367 * @vpd: vendor/product id/sn struct
1368 *
1369 * Return value:
1370 * none
1371 **/
1372static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1373 struct ipr_vpd *vpd)
1374{
1375 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1376 int i = 0;
1377
1378 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1379 i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1380
1381 memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1382 i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1383
1384 memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1385 buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1386
1387 ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1388}
1389
1da177e4
LT
1390/**
1391 * ipr_log_vpd - Log the passed VPD to the error log.
cfc32139 1392 * @vpd: vendor/product id/sn struct
1da177e4
LT
1393 *
1394 * Return value:
1395 * none
1396 **/
cfc32139 1397static void ipr_log_vpd(struct ipr_vpd *vpd)
1da177e4
LT
1398{
1399 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1400 + IPR_SERIAL_NUM_LEN];
1401
cfc32139 1402 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1403 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1da177e4
LT
1404 IPR_PROD_ID_LEN);
1405 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1406 ipr_err("Vendor/Product ID: %s\n", buffer);
1407
cfc32139 1408 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1da177e4
LT
1409 buffer[IPR_SERIAL_NUM_LEN] = '\0';
1410 ipr_err(" Serial Number: %s\n", buffer);
1411}
1412
8cf093e2
BK
1413/**
1414 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1415 * @prefix: string to print at start of printk
1416 * @hostrcb: hostrcb pointer
1417 * @vpd: vendor/product id/sn/wwn struct
1418 *
1419 * Return value:
1420 * none
1421 **/
1422static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1423 struct ipr_ext_vpd *vpd)
1424{
1425 ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1426 ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1427 be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1428}
1429
ee0f05b8 1430/**
1431 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1432 * @vpd: vendor/product id/sn/wwn struct
1433 *
1434 * Return value:
1435 * none
1436 **/
1437static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1438{
1439 ipr_log_vpd(&vpd->vpd);
1440 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1441 be32_to_cpu(vpd->wwid[1]));
1442}
1443
1444/**
1445 * ipr_log_enhanced_cache_error - Log a cache error.
1446 * @ioa_cfg: ioa config struct
1447 * @hostrcb: hostrcb struct
1448 *
1449 * Return value:
1450 * none
1451 **/
1452static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1453 struct ipr_hostrcb *hostrcb)
1454{
4565e370
WB
1455 struct ipr_hostrcb_type_12_error *error;
1456
1457 if (ioa_cfg->sis64)
1458 error = &hostrcb->hcam.u.error64.u.type_12_error;
1459 else
1460 error = &hostrcb->hcam.u.error.u.type_12_error;
ee0f05b8 1461
1462 ipr_err("-----Current Configuration-----\n");
1463 ipr_err("Cache Directory Card Information:\n");
1464 ipr_log_ext_vpd(&error->ioa_vpd);
1465 ipr_err("Adapter Card Information:\n");
1466 ipr_log_ext_vpd(&error->cfc_vpd);
1467
1468 ipr_err("-----Expected Configuration-----\n");
1469 ipr_err("Cache Directory Card Information:\n");
1470 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1471 ipr_err("Adapter Card Information:\n");
1472 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1473
1474 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1475 be32_to_cpu(error->ioa_data[0]),
1476 be32_to_cpu(error->ioa_data[1]),
1477 be32_to_cpu(error->ioa_data[2]));
1478}
1479
1da177e4
LT
1480/**
1481 * ipr_log_cache_error - Log a cache error.
1482 * @ioa_cfg: ioa config struct
1483 * @hostrcb: hostrcb struct
1484 *
1485 * Return value:
1486 * none
1487 **/
1488static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1489 struct ipr_hostrcb *hostrcb)
1490{
1491 struct ipr_hostrcb_type_02_error *error =
1492 &hostrcb->hcam.u.error.u.type_02_error;
1493
1494 ipr_err("-----Current Configuration-----\n");
1495 ipr_err("Cache Directory Card Information:\n");
cfc32139 1496 ipr_log_vpd(&error->ioa_vpd);
1da177e4 1497 ipr_err("Adapter Card Information:\n");
cfc32139 1498 ipr_log_vpd(&error->cfc_vpd);
1da177e4
LT
1499
1500 ipr_err("-----Expected Configuration-----\n");
1501 ipr_err("Cache Directory Card Information:\n");
cfc32139 1502 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1da177e4 1503 ipr_err("Adapter Card Information:\n");
cfc32139 1504 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1da177e4
LT
1505
1506 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1507 be32_to_cpu(error->ioa_data[0]),
1508 be32_to_cpu(error->ioa_data[1]),
1509 be32_to_cpu(error->ioa_data[2]));
1510}
1511
ee0f05b8 1512/**
1513 * ipr_log_enhanced_config_error - Log a configuration error.
1514 * @ioa_cfg: ioa config struct
1515 * @hostrcb: hostrcb struct
1516 *
1517 * Return value:
1518 * none
1519 **/
1520static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1521 struct ipr_hostrcb *hostrcb)
1522{
1523 int errors_logged, i;
1524 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1525 struct ipr_hostrcb_type_13_error *error;
1526
1527 error = &hostrcb->hcam.u.error.u.type_13_error;
1528 errors_logged = be32_to_cpu(error->errors_logged);
1529
1530 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1531 be32_to_cpu(error->errors_detected), errors_logged);
1532
1533 dev_entry = error->dev;
1534
1535 for (i = 0; i < errors_logged; i++, dev_entry++) {
1536 ipr_err_separator;
1537
1538 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1539 ipr_log_ext_vpd(&dev_entry->vpd);
1540
1541 ipr_err("-----New Device Information-----\n");
1542 ipr_log_ext_vpd(&dev_entry->new_vpd);
1543
1544 ipr_err("Cache Directory Card Information:\n");
1545 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1546
1547 ipr_err("Adapter Card Information:\n");
1548 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1549 }
1550}
1551
4565e370
WB
1552/**
1553 * ipr_log_sis64_config_error - Log a device error.
1554 * @ioa_cfg: ioa config struct
1555 * @hostrcb: hostrcb struct
1556 *
1557 * Return value:
1558 * none
1559 **/
1560static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1561 struct ipr_hostrcb *hostrcb)
1562{
1563 int errors_logged, i;
1564 struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1565 struct ipr_hostrcb_type_23_error *error;
1566 char buffer[IPR_MAX_RES_PATH_LENGTH];
1567
1568 error = &hostrcb->hcam.u.error64.u.type_23_error;
1569 errors_logged = be32_to_cpu(error->errors_logged);
1570
1571 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1572 be32_to_cpu(error->errors_detected), errors_logged);
1573
1574 dev_entry = error->dev;
1575
1576 for (i = 0; i < errors_logged; i++, dev_entry++) {
1577 ipr_err_separator;
1578
1579 ipr_err("Device %d : %s", i + 1,
5adcbeb3
WB
1580 ipr_format_res_path(dev_entry->res_path, buffer,
1581 sizeof(buffer)));
4565e370
WB
1582 ipr_log_ext_vpd(&dev_entry->vpd);
1583
1584 ipr_err("-----New Device Information-----\n");
1585 ipr_log_ext_vpd(&dev_entry->new_vpd);
1586
1587 ipr_err("Cache Directory Card Information:\n");
1588 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1589
1590 ipr_err("Adapter Card Information:\n");
1591 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1592 }
1593}
1594
1da177e4
LT
1595/**
1596 * ipr_log_config_error - Log a configuration error.
1597 * @ioa_cfg: ioa config struct
1598 * @hostrcb: hostrcb struct
1599 *
1600 * Return value:
1601 * none
1602 **/
1603static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1604 struct ipr_hostrcb *hostrcb)
1605{
1606 int errors_logged, i;
1607 struct ipr_hostrcb_device_data_entry *dev_entry;
1608 struct ipr_hostrcb_type_03_error *error;
1609
1610 error = &hostrcb->hcam.u.error.u.type_03_error;
1611 errors_logged = be32_to_cpu(error->errors_logged);
1612
1613 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1614 be32_to_cpu(error->errors_detected), errors_logged);
1615
cfc32139 1616 dev_entry = error->dev;
1da177e4
LT
1617
1618 for (i = 0; i < errors_logged; i++, dev_entry++) {
1619 ipr_err_separator;
1620
fa15b1f6 1621 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
cfc32139 1622 ipr_log_vpd(&dev_entry->vpd);
1da177e4
LT
1623
1624 ipr_err("-----New Device Information-----\n");
cfc32139 1625 ipr_log_vpd(&dev_entry->new_vpd);
1da177e4
LT
1626
1627 ipr_err("Cache Directory Card Information:\n");
cfc32139 1628 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1da177e4
LT
1629
1630 ipr_err("Adapter Card Information:\n");
cfc32139 1631 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1da177e4
LT
1632
1633 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1634 be32_to_cpu(dev_entry->ioa_data[0]),
1635 be32_to_cpu(dev_entry->ioa_data[1]),
1636 be32_to_cpu(dev_entry->ioa_data[2]),
1637 be32_to_cpu(dev_entry->ioa_data[3]),
1638 be32_to_cpu(dev_entry->ioa_data[4]));
1639 }
1640}
1641
ee0f05b8 1642/**
1643 * ipr_log_enhanced_array_error - Log an array configuration error.
1644 * @ioa_cfg: ioa config struct
1645 * @hostrcb: hostrcb struct
1646 *
1647 * Return value:
1648 * none
1649 **/
1650static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1651 struct ipr_hostrcb *hostrcb)
1652{
1653 int i, num_entries;
1654 struct ipr_hostrcb_type_14_error *error;
1655 struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1656 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1657
1658 error = &hostrcb->hcam.u.error.u.type_14_error;
1659
1660 ipr_err_separator;
1661
1662 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1663 error->protection_level,
1664 ioa_cfg->host->host_no,
1665 error->last_func_vset_res_addr.bus,
1666 error->last_func_vset_res_addr.target,
1667 error->last_func_vset_res_addr.lun);
1668
1669 ipr_err_separator;
1670
1671 array_entry = error->array_member;
1672 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1673 sizeof(error->array_member));
1674
1675 for (i = 0; i < num_entries; i++, array_entry++) {
1676 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1677 continue;
1678
1679 if (be32_to_cpu(error->exposed_mode_adn) == i)
1680 ipr_err("Exposed Array Member %d:\n", i);
1681 else
1682 ipr_err("Array Member %d:\n", i);
1683
1684 ipr_log_ext_vpd(&array_entry->vpd);
1685 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1686 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1687 "Expected Location");
1688
1689 ipr_err_separator;
1690 }
1691}
1692
1da177e4
LT
1693/**
1694 * ipr_log_array_error - Log an array configuration error.
1695 * @ioa_cfg: ioa config struct
1696 * @hostrcb: hostrcb struct
1697 *
1698 * Return value:
1699 * none
1700 **/
1701static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1702 struct ipr_hostrcb *hostrcb)
1703{
1704 int i;
1705 struct ipr_hostrcb_type_04_error *error;
1706 struct ipr_hostrcb_array_data_entry *array_entry;
1707 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1708
1709 error = &hostrcb->hcam.u.error.u.type_04_error;
1710
1711 ipr_err_separator;
1712
1713 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1714 error->protection_level,
1715 ioa_cfg->host->host_no,
1716 error->last_func_vset_res_addr.bus,
1717 error->last_func_vset_res_addr.target,
1718 error->last_func_vset_res_addr.lun);
1719
1720 ipr_err_separator;
1721
1722 array_entry = error->array_member;
1723
1724 for (i = 0; i < 18; i++) {
cfc32139 1725 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1da177e4
LT
1726 continue;
1727
fa15b1f6 1728 if (be32_to_cpu(error->exposed_mode_adn) == i)
1da177e4 1729 ipr_err("Exposed Array Member %d:\n", i);
fa15b1f6 1730 else
1da177e4 1731 ipr_err("Array Member %d:\n", i);
1da177e4 1732
cfc32139 1733 ipr_log_vpd(&array_entry->vpd);
1da177e4 1734
fa15b1f6 1735 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1736 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1737 "Expected Location");
1da177e4
LT
1738
1739 ipr_err_separator;
1740
1741 if (i == 9)
1742 array_entry = error->array_member2;
1743 else
1744 array_entry++;
1745 }
1746}
1747
1748/**
b0df54bb 1749 * ipr_log_hex_data - Log additional hex IOA error data.
ac719aba 1750 * @ioa_cfg: ioa config struct
b0df54bb 1751 * @data: IOA error data
1752 * @len: data length
1da177e4
LT
1753 *
1754 * Return value:
1755 * none
1756 **/
ac719aba 1757static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
1da177e4
LT
1758{
1759 int i;
1da177e4 1760
b0df54bb 1761 if (len == 0)
1da177e4
LT
1762 return;
1763
ac719aba
BK
1764 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1765 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1766
b0df54bb 1767 for (i = 0; i < len / 4; i += 4) {
1da177e4 1768 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
b0df54bb 1769 be32_to_cpu(data[i]),
1770 be32_to_cpu(data[i+1]),
1771 be32_to_cpu(data[i+2]),
1772 be32_to_cpu(data[i+3]));
1da177e4
LT
1773 }
1774}
1775
ee0f05b8 1776/**
1777 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1778 * @ioa_cfg: ioa config struct
1779 * @hostrcb: hostrcb struct
1780 *
1781 * Return value:
1782 * none
1783 **/
1784static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1785 struct ipr_hostrcb *hostrcb)
1786{
1787 struct ipr_hostrcb_type_17_error *error;
1788
4565e370
WB
1789 if (ioa_cfg->sis64)
1790 error = &hostrcb->hcam.u.error64.u.type_17_error;
1791 else
1792 error = &hostrcb->hcam.u.error.u.type_17_error;
1793
ee0f05b8 1794 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
ca54cb8c 1795 strim(error->failure_reason);
ee0f05b8 1796
8cf093e2
BK
1797 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1798 be32_to_cpu(hostrcb->hcam.u.error.prc));
1799 ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
ac719aba 1800 ipr_log_hex_data(ioa_cfg, error->data,
ee0f05b8 1801 be32_to_cpu(hostrcb->hcam.length) -
1802 (offsetof(struct ipr_hostrcb_error, u) +
1803 offsetof(struct ipr_hostrcb_type_17_error, data)));
1804}
1805
b0df54bb 1806/**
1807 * ipr_log_dual_ioa_error - Log a dual adapter error.
1808 * @ioa_cfg: ioa config struct
1809 * @hostrcb: hostrcb struct
1810 *
1811 * Return value:
1812 * none
1813 **/
1814static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1815 struct ipr_hostrcb *hostrcb)
1816{
1817 struct ipr_hostrcb_type_07_error *error;
1818
1819 error = &hostrcb->hcam.u.error.u.type_07_error;
1820 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
ca54cb8c 1821 strim(error->failure_reason);
b0df54bb 1822
8cf093e2
BK
1823 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1824 be32_to_cpu(hostrcb->hcam.u.error.prc));
1825 ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
ac719aba 1826 ipr_log_hex_data(ioa_cfg, error->data,
b0df54bb 1827 be32_to_cpu(hostrcb->hcam.length) -
1828 (offsetof(struct ipr_hostrcb_error, u) +
1829 offsetof(struct ipr_hostrcb_type_07_error, data)));
1830}
1831
49dc6a18
BK
1832static const struct {
1833 u8 active;
1834 char *desc;
1835} path_active_desc[] = {
1836 { IPR_PATH_NO_INFO, "Path" },
1837 { IPR_PATH_ACTIVE, "Active path" },
1838 { IPR_PATH_NOT_ACTIVE, "Inactive path" }
1839};
1840
1841static const struct {
1842 u8 state;
1843 char *desc;
1844} path_state_desc[] = {
1845 { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1846 { IPR_PATH_HEALTHY, "is healthy" },
1847 { IPR_PATH_DEGRADED, "is degraded" },
1848 { IPR_PATH_FAILED, "is failed" }
1849};
1850
1851/**
1852 * ipr_log_fabric_path - Log a fabric path error
1853 * @hostrcb: hostrcb struct
1854 * @fabric: fabric descriptor
1855 *
1856 * Return value:
1857 * none
1858 **/
1859static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1860 struct ipr_hostrcb_fabric_desc *fabric)
1861{
1862 int i, j;
1863 u8 path_state = fabric->path_state;
1864 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1865 u8 state = path_state & IPR_PATH_STATE_MASK;
1866
1867 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1868 if (path_active_desc[i].active != active)
1869 continue;
1870
1871 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1872 if (path_state_desc[j].state != state)
1873 continue;
1874
1875 if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
1876 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
1877 path_active_desc[i].desc, path_state_desc[j].desc,
1878 fabric->ioa_port);
1879 } else if (fabric->cascaded_expander == 0xff) {
1880 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
1881 path_active_desc[i].desc, path_state_desc[j].desc,
1882 fabric->ioa_port, fabric->phy);
1883 } else if (fabric->phy == 0xff) {
1884 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
1885 path_active_desc[i].desc, path_state_desc[j].desc,
1886 fabric->ioa_port, fabric->cascaded_expander);
1887 } else {
1888 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
1889 path_active_desc[i].desc, path_state_desc[j].desc,
1890 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1891 }
1892 return;
1893 }
1894 }
1895
1896 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
1897 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1898}
1899
4565e370
WB
1900/**
1901 * ipr_log64_fabric_path - Log a fabric path error
1902 * @hostrcb: hostrcb struct
1903 * @fabric: fabric descriptor
1904 *
1905 * Return value:
1906 * none
1907 **/
1908static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
1909 struct ipr_hostrcb64_fabric_desc *fabric)
1910{
1911 int i, j;
1912 u8 path_state = fabric->path_state;
1913 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1914 u8 state = path_state & IPR_PATH_STATE_MASK;
1915 char buffer[IPR_MAX_RES_PATH_LENGTH];
1916
1917 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1918 if (path_active_desc[i].active != active)
1919 continue;
1920
1921 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1922 if (path_state_desc[j].state != state)
1923 continue;
1924
1925 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
1926 path_active_desc[i].desc, path_state_desc[j].desc,
5adcbeb3
WB
1927 ipr_format_res_path(fabric->res_path, buffer,
1928 sizeof(buffer)));
4565e370
WB
1929 return;
1930 }
1931 }
1932
1933 ipr_err("Path state=%02X Resource Path=%s\n", path_state,
5adcbeb3 1934 ipr_format_res_path(fabric->res_path, buffer, sizeof(buffer)));
4565e370
WB
1935}
1936
49dc6a18
BK
1937static const struct {
1938 u8 type;
1939 char *desc;
1940} path_type_desc[] = {
1941 { IPR_PATH_CFG_IOA_PORT, "IOA port" },
1942 { IPR_PATH_CFG_EXP_PORT, "Expander port" },
1943 { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
1944 { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
1945};
1946
1947static const struct {
1948 u8 status;
1949 char *desc;
1950} path_status_desc[] = {
1951 { IPR_PATH_CFG_NO_PROB, "Functional" },
1952 { IPR_PATH_CFG_DEGRADED, "Degraded" },
1953 { IPR_PATH_CFG_FAILED, "Failed" },
1954 { IPR_PATH_CFG_SUSPECT, "Suspect" },
1955 { IPR_PATH_NOT_DETECTED, "Missing" },
1956 { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
1957};
1958
1959static const char *link_rate[] = {
1960 "unknown",
1961 "disabled",
1962 "phy reset problem",
1963 "spinup hold",
1964 "port selector",
1965 "unknown",
1966 "unknown",
1967 "unknown",
1968 "1.5Gbps",
1969 "3.0Gbps",
1970 "unknown",
1971 "unknown",
1972 "unknown",
1973 "unknown",
1974 "unknown",
1975 "unknown"
1976};
1977
1978/**
1979 * ipr_log_path_elem - Log a fabric path element.
1980 * @hostrcb: hostrcb struct
1981 * @cfg: fabric path element struct
1982 *
1983 * Return value:
1984 * none
1985 **/
1986static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
1987 struct ipr_hostrcb_config_element *cfg)
1988{
1989 int i, j;
1990 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
1991 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
1992
1993 if (type == IPR_PATH_CFG_NOT_EXIST)
1994 return;
1995
1996 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
1997 if (path_type_desc[i].type != type)
1998 continue;
1999
2000 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2001 if (path_status_desc[j].status != status)
2002 continue;
2003
2004 if (type == IPR_PATH_CFG_IOA_PORT) {
2005 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2006 path_status_desc[j].desc, path_type_desc[i].desc,
2007 cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2008 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2009 } else {
2010 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2011 ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2012 path_status_desc[j].desc, path_type_desc[i].desc,
2013 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2014 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2015 } else if (cfg->cascaded_expander == 0xff) {
2016 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2017 "WWN=%08X%08X\n", path_status_desc[j].desc,
2018 path_type_desc[i].desc, cfg->phy,
2019 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2020 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2021 } else if (cfg->phy == 0xff) {
2022 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2023 "WWN=%08X%08X\n", path_status_desc[j].desc,
2024 path_type_desc[i].desc, cfg->cascaded_expander,
2025 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2026 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2027 } else {
2028 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2029 "WWN=%08X%08X\n", path_status_desc[j].desc,
2030 path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2031 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2032 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2033 }
2034 }
2035 return;
2036 }
2037 }
2038
2039 ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2040 "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2041 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2042 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2043}
2044
4565e370
WB
2045/**
2046 * ipr_log64_path_elem - Log a fabric path element.
2047 * @hostrcb: hostrcb struct
2048 * @cfg: fabric path element struct
2049 *
2050 * Return value:
2051 * none
2052 **/
2053static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2054 struct ipr_hostrcb64_config_element *cfg)
2055{
2056 int i, j;
2057 u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2058 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2059 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2060 char buffer[IPR_MAX_RES_PATH_LENGTH];
2061
2062 if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2063 return;
2064
2065 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2066 if (path_type_desc[i].type != type)
2067 continue;
2068
2069 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2070 if (path_status_desc[j].status != status)
2071 continue;
2072
2073 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2074 path_status_desc[j].desc, path_type_desc[i].desc,
5adcbeb3
WB
2075 ipr_format_res_path(cfg->res_path, buffer,
2076 sizeof(buffer)),
4565e370
WB
2077 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2078 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2079 return;
2080 }
2081 }
2082 ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2083 "WWN=%08X%08X\n", cfg->type_status,
5adcbeb3 2084 ipr_format_res_path(cfg->res_path, buffer, sizeof(buffer)),
4565e370
WB
2085 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2086 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2087}
2088
49dc6a18
BK
2089/**
2090 * ipr_log_fabric_error - Log a fabric error.
2091 * @ioa_cfg: ioa config struct
2092 * @hostrcb: hostrcb struct
2093 *
2094 * Return value:
2095 * none
2096 **/
2097static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2098 struct ipr_hostrcb *hostrcb)
2099{
2100 struct ipr_hostrcb_type_20_error *error;
2101 struct ipr_hostrcb_fabric_desc *fabric;
2102 struct ipr_hostrcb_config_element *cfg;
2103 int i, add_len;
2104
2105 error = &hostrcb->hcam.u.error.u.type_20_error;
2106 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2107 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2108
2109 add_len = be32_to_cpu(hostrcb->hcam.length) -
2110 (offsetof(struct ipr_hostrcb_error, u) +
2111 offsetof(struct ipr_hostrcb_type_20_error, desc));
2112
2113 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2114 ipr_log_fabric_path(hostrcb, fabric);
2115 for_each_fabric_cfg(fabric, cfg)
2116 ipr_log_path_elem(hostrcb, cfg);
2117
2118 add_len -= be16_to_cpu(fabric->length);
2119 fabric = (struct ipr_hostrcb_fabric_desc *)
2120 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2121 }
2122
ac719aba 2123 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
49dc6a18
BK
2124}
2125
4565e370
WB
2126/**
2127 * ipr_log_sis64_array_error - Log a sis64 array error.
2128 * @ioa_cfg: ioa config struct
2129 * @hostrcb: hostrcb struct
2130 *
2131 * Return value:
2132 * none
2133 **/
2134static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2135 struct ipr_hostrcb *hostrcb)
2136{
2137 int i, num_entries;
2138 struct ipr_hostrcb_type_24_error *error;
2139 struct ipr_hostrcb64_array_data_entry *array_entry;
2140 char buffer[IPR_MAX_RES_PATH_LENGTH];
2141 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2142
2143 error = &hostrcb->hcam.u.error64.u.type_24_error;
2144
2145 ipr_err_separator;
2146
2147 ipr_err("RAID %s Array Configuration: %s\n",
2148 error->protection_level,
5adcbeb3 2149 ipr_format_res_path(error->last_res_path, buffer, sizeof(buffer)));
4565e370
WB
2150
2151 ipr_err_separator;
2152
2153 array_entry = error->array_member;
2154 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
2155 sizeof(error->array_member));
2156
2157 for (i = 0; i < num_entries; i++, array_entry++) {
2158
2159 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2160 continue;
2161
2162 if (error->exposed_mode_adn == i)
2163 ipr_err("Exposed Array Member %d:\n", i);
2164 else
2165 ipr_err("Array Member %d:\n", i);
2166
2167 ipr_err("Array Member %d:\n", i);
2168 ipr_log_ext_vpd(&array_entry->vpd);
2169 ipr_err("Current Location: %s",
5adcbeb3
WB
2170 ipr_format_res_path(array_entry->res_path, buffer,
2171 sizeof(buffer)));
4565e370 2172 ipr_err("Expected Location: %s",
5adcbeb3
WB
2173 ipr_format_res_path(array_entry->expected_res_path,
2174 buffer, sizeof(buffer)));
4565e370
WB
2175
2176 ipr_err_separator;
2177 }
2178}
2179
2180/**
2181 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2182 * @ioa_cfg: ioa config struct
2183 * @hostrcb: hostrcb struct
2184 *
2185 * Return value:
2186 * none
2187 **/
2188static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2189 struct ipr_hostrcb *hostrcb)
2190{
2191 struct ipr_hostrcb_type_30_error *error;
2192 struct ipr_hostrcb64_fabric_desc *fabric;
2193 struct ipr_hostrcb64_config_element *cfg;
2194 int i, add_len;
2195
2196 error = &hostrcb->hcam.u.error64.u.type_30_error;
2197
2198 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2199 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2200
2201 add_len = be32_to_cpu(hostrcb->hcam.length) -
2202 (offsetof(struct ipr_hostrcb64_error, u) +
2203 offsetof(struct ipr_hostrcb_type_30_error, desc));
2204
2205 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2206 ipr_log64_fabric_path(hostrcb, fabric);
2207 for_each_fabric_cfg(fabric, cfg)
2208 ipr_log64_path_elem(hostrcb, cfg);
2209
2210 add_len -= be16_to_cpu(fabric->length);
2211 fabric = (struct ipr_hostrcb64_fabric_desc *)
2212 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2213 }
2214
2215 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2216}
2217
b0df54bb 2218/**
2219 * ipr_log_generic_error - Log an adapter error.
2220 * @ioa_cfg: ioa config struct
2221 * @hostrcb: hostrcb struct
2222 *
2223 * Return value:
2224 * none
2225 **/
2226static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2227 struct ipr_hostrcb *hostrcb)
2228{
ac719aba 2229 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
b0df54bb 2230 be32_to_cpu(hostrcb->hcam.length));
2231}
2232
1da177e4
LT
2233/**
2234 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2235 * @ioasc: IOASC
2236 *
2237 * This function will return the index of into the ipr_error_table
2238 * for the specified IOASC. If the IOASC is not in the table,
2239 * 0 will be returned, which points to the entry used for unknown errors.
2240 *
2241 * Return value:
2242 * index into the ipr_error_table
2243 **/
2244static u32 ipr_get_error(u32 ioasc)
2245{
2246 int i;
2247
2248 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
35a39691 2249 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
1da177e4
LT
2250 return i;
2251
2252 return 0;
2253}
2254
2255/**
2256 * ipr_handle_log_data - Log an adapter error.
2257 * @ioa_cfg: ioa config struct
2258 * @hostrcb: hostrcb struct
2259 *
2260 * This function logs an adapter error to the system.
2261 *
2262 * Return value:
2263 * none
2264 **/
2265static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2266 struct ipr_hostrcb *hostrcb)
2267{
2268 u32 ioasc;
2269 int error_index;
2270
2271 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2272 return;
2273
2274 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2275 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2276
4565e370
WB
2277 if (ioa_cfg->sis64)
2278 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2279 else
2280 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
1da177e4 2281
4565e370
WB
2282 if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2283 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
1da177e4
LT
2284 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2285 scsi_report_bus_reset(ioa_cfg->host,
4565e370 2286 hostrcb->hcam.u.error.fd_res_addr.bus);
1da177e4
LT
2287 }
2288
2289 error_index = ipr_get_error(ioasc);
2290
2291 if (!ipr_error_table[error_index].log_hcam)
2292 return;
2293
49dc6a18 2294 ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
1da177e4
LT
2295
2296 /* Set indication we have logged an error */
2297 ioa_cfg->errors_logged++;
2298
933916f3 2299 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
1da177e4 2300 return;
cf852037 2301 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2302 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
1da177e4
LT
2303
2304 switch (hostrcb->hcam.overlay_id) {
1da177e4
LT
2305 case IPR_HOST_RCB_OVERLAY_ID_2:
2306 ipr_log_cache_error(ioa_cfg, hostrcb);
2307 break;
2308 case IPR_HOST_RCB_OVERLAY_ID_3:
2309 ipr_log_config_error(ioa_cfg, hostrcb);
2310 break;
2311 case IPR_HOST_RCB_OVERLAY_ID_4:
2312 case IPR_HOST_RCB_OVERLAY_ID_6:
2313 ipr_log_array_error(ioa_cfg, hostrcb);
2314 break;
b0df54bb 2315 case IPR_HOST_RCB_OVERLAY_ID_7:
2316 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2317 break;
ee0f05b8 2318 case IPR_HOST_RCB_OVERLAY_ID_12:
2319 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2320 break;
2321 case IPR_HOST_RCB_OVERLAY_ID_13:
2322 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2323 break;
2324 case IPR_HOST_RCB_OVERLAY_ID_14:
2325 case IPR_HOST_RCB_OVERLAY_ID_16:
2326 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2327 break;
2328 case IPR_HOST_RCB_OVERLAY_ID_17:
2329 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2330 break;
49dc6a18
BK
2331 case IPR_HOST_RCB_OVERLAY_ID_20:
2332 ipr_log_fabric_error(ioa_cfg, hostrcb);
2333 break;
4565e370
WB
2334 case IPR_HOST_RCB_OVERLAY_ID_23:
2335 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2336 break;
2337 case IPR_HOST_RCB_OVERLAY_ID_24:
2338 case IPR_HOST_RCB_OVERLAY_ID_26:
2339 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2340 break;
2341 case IPR_HOST_RCB_OVERLAY_ID_30:
2342 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2343 break;
cf852037 2344 case IPR_HOST_RCB_OVERLAY_ID_1:
1da177e4 2345 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
1da177e4 2346 default:
a9cfca96 2347 ipr_log_generic_error(ioa_cfg, hostrcb);
1da177e4
LT
2348 break;
2349 }
2350}
2351
2352/**
2353 * ipr_process_error - Op done function for an adapter error log.
2354 * @ipr_cmd: ipr command struct
2355 *
2356 * This function is the op done function for an error log host
2357 * controlled async from the adapter. It will log the error and
2358 * send the HCAM back to the adapter.
2359 *
2360 * Return value:
2361 * none
2362 **/
2363static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2364{
2365 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2366 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
96d21f00 2367 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
4565e370
WB
2368 u32 fd_ioasc;
2369
2370 if (ioa_cfg->sis64)
2371 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2372 else
2373 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
1da177e4
LT
2374
2375 list_del(&hostrcb->queue);
2376 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
2377
2378 if (!ioasc) {
2379 ipr_handle_log_data(ioa_cfg, hostrcb);
65f56475
BK
2380 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2381 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
1da177e4
LT
2382 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
2383 dev_err(&ioa_cfg->pdev->dev,
2384 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2385 }
2386
2387 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2388}
2389
2390/**
2391 * ipr_timeout - An internally generated op has timed out.
2392 * @ipr_cmd: ipr command struct
2393 *
2394 * This function blocks host requests and initiates an
2395 * adapter reset.
2396 *
2397 * Return value:
2398 * none
2399 **/
2400static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
2401{
2402 unsigned long lock_flags = 0;
2403 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2404
2405 ENTER;
2406 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2407
2408 ioa_cfg->errors_logged++;
2409 dev_err(&ioa_cfg->pdev->dev,
2410 "Adapter being reset due to command timeout.\n");
2411
2412 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2413 ioa_cfg->sdt_state = GET_DUMP;
2414
2415 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2416 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2417
2418 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2419 LEAVE;
2420}
2421
2422/**
2423 * ipr_oper_timeout - Adapter timed out transitioning to operational
2424 * @ipr_cmd: ipr command struct
2425 *
2426 * This function blocks host requests and initiates an
2427 * adapter reset.
2428 *
2429 * Return value:
2430 * none
2431 **/
2432static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
2433{
2434 unsigned long lock_flags = 0;
2435 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2436
2437 ENTER;
2438 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2439
2440 ioa_cfg->errors_logged++;
2441 dev_err(&ioa_cfg->pdev->dev,
2442 "Adapter timed out transitioning to operational.\n");
2443
2444 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2445 ioa_cfg->sdt_state = GET_DUMP;
2446
2447 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2448 if (ipr_fastfail)
2449 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2450 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2451 }
2452
2453 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2454 LEAVE;
2455}
2456
2457/**
2458 * ipr_reset_reload - Reset/Reload the IOA
2459 * @ioa_cfg: ioa config struct
2460 * @shutdown_type: shutdown type
2461 *
2462 * This function resets the adapter and re-initializes it.
2463 * This function assumes that all new host commands have been stopped.
2464 * Return value:
2465 * SUCCESS / FAILED
2466 **/
2467static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
2468 enum ipr_shutdown_type shutdown_type)
2469{
2470 if (!ioa_cfg->in_reset_reload)
2471 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
2472
2473 spin_unlock_irq(ioa_cfg->host->host_lock);
2474 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2475 spin_lock_irq(ioa_cfg->host->host_lock);
2476
2477 /* If we got hit with a host reset while we were already resetting
2478 the adapter for some reason, and the reset failed. */
2479 if (ioa_cfg->ioa_is_dead) {
2480 ipr_trace;
2481 return FAILED;
2482 }
2483
2484 return SUCCESS;
2485}
2486
2487/**
2488 * ipr_find_ses_entry - Find matching SES in SES table
2489 * @res: resource entry struct of SES
2490 *
2491 * Return value:
2492 * pointer to SES table entry / NULL on failure
2493 **/
2494static const struct ipr_ses_table_entry *
2495ipr_find_ses_entry(struct ipr_resource_entry *res)
2496{
2497 int i, j, matches;
3e7ebdfa 2498 struct ipr_std_inq_vpids *vpids;
1da177e4
LT
2499 const struct ipr_ses_table_entry *ste = ipr_ses_table;
2500
2501 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2502 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2503 if (ste->compare_product_id_byte[j] == 'X') {
3e7ebdfa
WB
2504 vpids = &res->std_inq_data.vpids;
2505 if (vpids->product_id[j] == ste->product_id[j])
1da177e4
LT
2506 matches++;
2507 else
2508 break;
2509 } else
2510 matches++;
2511 }
2512
2513 if (matches == IPR_PROD_ID_LEN)
2514 return ste;
2515 }
2516
2517 return NULL;
2518}
2519
2520/**
2521 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2522 * @ioa_cfg: ioa config struct
2523 * @bus: SCSI bus
2524 * @bus_width: bus width
2525 *
2526 * Return value:
2527 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2528 * For a 2-byte wide SCSI bus, the maximum transfer speed is
2529 * twice the maximum transfer rate (e.g. for a wide enabled bus,
2530 * max 160MHz = max 320MB/sec).
2531 **/
2532static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2533{
2534 struct ipr_resource_entry *res;
2535 const struct ipr_ses_table_entry *ste;
2536 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2537
2538 /* Loop through each config table entry in the config table buffer */
2539 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3e7ebdfa 2540 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
1da177e4
LT
2541 continue;
2542
3e7ebdfa 2543 if (bus != res->bus)
1da177e4
LT
2544 continue;
2545
2546 if (!(ste = ipr_find_ses_entry(res)))
2547 continue;
2548
2549 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2550 }
2551
2552 return max_xfer_rate;
2553}
2554
2555/**
2556 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2557 * @ioa_cfg: ioa config struct
2558 * @max_delay: max delay in micro-seconds to wait
2559 *
2560 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2561 *
2562 * Return value:
2563 * 0 on success / other on failure
2564 **/
2565static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2566{
2567 volatile u32 pcii_reg;
2568 int delay = 1;
2569
2570 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2571 while (delay < max_delay) {
2572 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2573
2574 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2575 return 0;
2576
2577 /* udelay cannot be used if delay is more than a few milliseconds */
2578 if ((delay / 1000) > MAX_UDELAY_MS)
2579 mdelay(delay / 1000);
2580 else
2581 udelay(delay);
2582
2583 delay += delay;
2584 }
2585 return -EIO;
2586}
2587
dcbad00e
WB
2588/**
2589 * ipr_get_sis64_dump_data_section - Dump IOA memory
2590 * @ioa_cfg: ioa config struct
2591 * @start_addr: adapter address to dump
2592 * @dest: destination kernel buffer
2593 * @length_in_words: length to dump in 4 byte words
2594 *
2595 * Return value:
2596 * 0 on success
2597 **/
2598static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2599 u32 start_addr,
2600 __be32 *dest, u32 length_in_words)
2601{
2602 int i;
2603
2604 for (i = 0; i < length_in_words; i++) {
2605 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2606 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2607 dest++;
2608 }
2609
2610 return 0;
2611}
2612
1da177e4
LT
2613/**
2614 * ipr_get_ldump_data_section - Dump IOA memory
2615 * @ioa_cfg: ioa config struct
2616 * @start_addr: adapter address to dump
2617 * @dest: destination kernel buffer
2618 * @length_in_words: length to dump in 4 byte words
2619 *
2620 * Return value:
2621 * 0 on success / -EIO on failure
2622 **/
2623static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2624 u32 start_addr,
2625 __be32 *dest, u32 length_in_words)
2626{
2627 volatile u32 temp_pcii_reg;
2628 int i, delay = 0;
2629
dcbad00e
WB
2630 if (ioa_cfg->sis64)
2631 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2632 dest, length_in_words);
2633
1da177e4
LT
2634 /* Write IOA interrupt reg starting LDUMP state */
2635 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
214777ba 2636 ioa_cfg->regs.set_uproc_interrupt_reg32);
1da177e4
LT
2637
2638 /* Wait for IO debug acknowledge */
2639 if (ipr_wait_iodbg_ack(ioa_cfg,
2640 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2641 dev_err(&ioa_cfg->pdev->dev,
2642 "IOA dump long data transfer timeout\n");
2643 return -EIO;
2644 }
2645
2646 /* Signal LDUMP interlocked - clear IO debug ack */
2647 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2648 ioa_cfg->regs.clr_interrupt_reg);
2649
2650 /* Write Mailbox with starting address */
2651 writel(start_addr, ioa_cfg->ioa_mailbox);
2652
2653 /* Signal address valid - clear IOA Reset alert */
2654 writel(IPR_UPROCI_RESET_ALERT,
214777ba 2655 ioa_cfg->regs.clr_uproc_interrupt_reg32);
1da177e4
LT
2656
2657 for (i = 0; i < length_in_words; i++) {
2658 /* Wait for IO debug acknowledge */
2659 if (ipr_wait_iodbg_ack(ioa_cfg,
2660 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2661 dev_err(&ioa_cfg->pdev->dev,
2662 "IOA dump short data transfer timeout\n");
2663 return -EIO;
2664 }
2665
2666 /* Read data from mailbox and increment destination pointer */
2667 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2668 dest++;
2669
2670 /* For all but the last word of data, signal data received */
2671 if (i < (length_in_words - 1)) {
2672 /* Signal dump data received - Clear IO debug Ack */
2673 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2674 ioa_cfg->regs.clr_interrupt_reg);
2675 }
2676 }
2677
2678 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2679 writel(IPR_UPROCI_RESET_ALERT,
214777ba 2680 ioa_cfg->regs.set_uproc_interrupt_reg32);
1da177e4
LT
2681
2682 writel(IPR_UPROCI_IO_DEBUG_ALERT,
214777ba 2683 ioa_cfg->regs.clr_uproc_interrupt_reg32);
1da177e4
LT
2684
2685 /* Signal dump data received - Clear IO debug Ack */
2686 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2687 ioa_cfg->regs.clr_interrupt_reg);
2688
2689 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2690 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2691 temp_pcii_reg =
214777ba 2692 readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
1da177e4
LT
2693
2694 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2695 return 0;
2696
2697 udelay(10);
2698 delay += 10;
2699 }
2700
2701 return 0;
2702}
2703
2704#ifdef CONFIG_SCSI_IPR_DUMP
2705/**
2706 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2707 * @ioa_cfg: ioa config struct
2708 * @pci_address: adapter address
2709 * @length: length of data to copy
2710 *
2711 * Copy data from PCI adapter to kernel buffer.
2712 * Note: length MUST be a 4 byte multiple
2713 * Return value:
2714 * 0 on success / other on failure
2715 **/
2716static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2717 unsigned long pci_address, u32 length)
2718{
2719 int bytes_copied = 0;
2720 int cur_len, rc, rem_len, rem_page_len;
2721 __be32 *page;
2722 unsigned long lock_flags = 0;
2723 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2724
2725 while (bytes_copied < length &&
2726 (ioa_dump->hdr.len + bytes_copied) < IPR_MAX_IOA_DUMP_SIZE) {
2727 if (ioa_dump->page_offset >= PAGE_SIZE ||
2728 ioa_dump->page_offset == 0) {
2729 page = (__be32 *)__get_free_page(GFP_ATOMIC);
2730
2731 if (!page) {
2732 ipr_trace;
2733 return bytes_copied;
2734 }
2735
2736 ioa_dump->page_offset = 0;
2737 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2738 ioa_dump->next_page_index++;
2739 } else
2740 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2741
2742 rem_len = length - bytes_copied;
2743 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2744 cur_len = min(rem_len, rem_page_len);
2745
2746 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2747 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2748 rc = -EIO;
2749 } else {
2750 rc = ipr_get_ldump_data_section(ioa_cfg,
2751 pci_address + bytes_copied,
2752 &page[ioa_dump->page_offset / 4],
2753 (cur_len / sizeof(u32)));
2754 }
2755 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2756
2757 if (!rc) {
2758 ioa_dump->page_offset += cur_len;
2759 bytes_copied += cur_len;
2760 } else {
2761 ipr_trace;
2762 break;
2763 }
2764 schedule();
2765 }
2766
2767 return bytes_copied;
2768}
2769
2770/**
2771 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2772 * @hdr: dump entry header struct
2773 *
2774 * Return value:
2775 * nothing
2776 **/
2777static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2778{
2779 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2780 hdr->num_elems = 1;
2781 hdr->offset = sizeof(*hdr);
2782 hdr->status = IPR_DUMP_STATUS_SUCCESS;
2783}
2784
2785/**
2786 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2787 * @ioa_cfg: ioa config struct
2788 * @driver_dump: driver dump struct
2789 *
2790 * Return value:
2791 * nothing
2792 **/
2793static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2794 struct ipr_driver_dump *driver_dump)
2795{
2796 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2797
2798 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2799 driver_dump->ioa_type_entry.hdr.len =
2800 sizeof(struct ipr_dump_ioa_type_entry) -
2801 sizeof(struct ipr_dump_entry_header);
2802 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2803 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2804 driver_dump->ioa_type_entry.type = ioa_cfg->type;
2805 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2806 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2807 ucode_vpd->minor_release[1];
2808 driver_dump->hdr.num_entries++;
2809}
2810
2811/**
2812 * ipr_dump_version_data - Fill in the driver version in the dump.
2813 * @ioa_cfg: ioa config struct
2814 * @driver_dump: driver dump struct
2815 *
2816 * Return value:
2817 * nothing
2818 **/
2819static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2820 struct ipr_driver_dump *driver_dump)
2821{
2822 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2823 driver_dump->version_entry.hdr.len =
2824 sizeof(struct ipr_dump_version_entry) -
2825 sizeof(struct ipr_dump_entry_header);
2826 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2827 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2828 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
2829 driver_dump->hdr.num_entries++;
2830}
2831
2832/**
2833 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
2834 * @ioa_cfg: ioa config struct
2835 * @driver_dump: driver dump struct
2836 *
2837 * Return value:
2838 * nothing
2839 **/
2840static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
2841 struct ipr_driver_dump *driver_dump)
2842{
2843 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
2844 driver_dump->trace_entry.hdr.len =
2845 sizeof(struct ipr_dump_trace_entry) -
2846 sizeof(struct ipr_dump_entry_header);
2847 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2848 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
2849 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
2850 driver_dump->hdr.num_entries++;
2851}
2852
2853/**
2854 * ipr_dump_location_data - Fill in the IOA location in the dump.
2855 * @ioa_cfg: ioa config struct
2856 * @driver_dump: driver dump struct
2857 *
2858 * Return value:
2859 * nothing
2860 **/
2861static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
2862 struct ipr_driver_dump *driver_dump)
2863{
2864 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
2865 driver_dump->location_entry.hdr.len =
2866 sizeof(struct ipr_dump_location_entry) -
2867 sizeof(struct ipr_dump_entry_header);
2868 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2869 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
71610f55 2870 strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
1da177e4
LT
2871 driver_dump->hdr.num_entries++;
2872}
2873
2874/**
2875 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
2876 * @ioa_cfg: ioa config struct
2877 * @dump: dump struct
2878 *
2879 * Return value:
2880 * nothing
2881 **/
2882static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2883{
2884 unsigned long start_addr, sdt_word;
2885 unsigned long lock_flags = 0;
2886 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
2887 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
2888 u32 num_entries, start_off, end_off;
2889 u32 bytes_to_copy, bytes_copied, rc;
2890 struct ipr_sdt *sdt;
dcbad00e 2891 int valid = 1;
1da177e4
LT
2892 int i;
2893
2894 ENTER;
2895
2896 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2897
2898 if (ioa_cfg->sdt_state != GET_DUMP) {
2899 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2900 return;
2901 }
2902
2903 start_addr = readl(ioa_cfg->ioa_mailbox);
2904
dcbad00e 2905 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
1da177e4
LT
2906 dev_err(&ioa_cfg->pdev->dev,
2907 "Invalid dump table format: %lx\n", start_addr);
2908 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2909 return;
2910 }
2911
2912 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
2913
2914 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
2915
2916 /* Initialize the overall dump header */
2917 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
2918 driver_dump->hdr.num_entries = 1;
2919 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
2920 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
2921 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
2922 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
2923
2924 ipr_dump_version_data(ioa_cfg, driver_dump);
2925 ipr_dump_location_data(ioa_cfg, driver_dump);
2926 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
2927 ipr_dump_trace_data(ioa_cfg, driver_dump);
2928
2929 /* Update dump_header */
2930 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
2931
2932 /* IOA Dump entry */
2933 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
1da177e4
LT
2934 ioa_dump->hdr.len = 0;
2935 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2936 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
2937
2938 /* First entries in sdt are actually a list of dump addresses and
2939 lengths to gather the real dump data. sdt represents the pointer
2940 to the ioa generated dump table. Dump data will be extracted based
2941 on entries in this table */
2942 sdt = &ioa_dump->sdt;
2943
2944 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
2945 sizeof(struct ipr_sdt) / sizeof(__be32));
2946
2947 /* Smart Dump table is ready to use and the first entry is valid */
dcbad00e
WB
2948 if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
2949 (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
1da177e4
LT
2950 dev_err(&ioa_cfg->pdev->dev,
2951 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
2952 rc, be32_to_cpu(sdt->hdr.state));
2953 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
2954 ioa_cfg->sdt_state = DUMP_OBTAINED;
2955 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2956 return;
2957 }
2958
2959 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
2960
2961 if (num_entries > IPR_NUM_SDT_ENTRIES)
2962 num_entries = IPR_NUM_SDT_ENTRIES;
2963
2964 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2965
2966 for (i = 0; i < num_entries; i++) {
2967 if (ioa_dump->hdr.len > IPR_MAX_IOA_DUMP_SIZE) {
2968 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2969 break;
2970 }
2971
2972 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
dcbad00e
WB
2973 sdt_word = be32_to_cpu(sdt->entry[i].start_token);
2974 if (ioa_cfg->sis64)
2975 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
2976 else {
2977 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
2978 end_off = be32_to_cpu(sdt->entry[i].end_token);
2979
2980 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
2981 bytes_to_copy = end_off - start_off;
2982 else
2983 valid = 0;
2984 }
2985 if (valid) {
1da177e4
LT
2986 if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) {
2987 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
2988 continue;
2989 }
2990
2991 /* Copy data from adapter to driver buffers */
2992 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
2993 bytes_to_copy);
2994
2995 ioa_dump->hdr.len += bytes_copied;
2996
2997 if (bytes_copied != bytes_to_copy) {
2998 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2999 break;
3000 }
3001 }
3002 }
3003 }
3004
3005 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3006
3007 /* Update dump_header */
3008 driver_dump->hdr.len += ioa_dump->hdr.len;
3009 wmb();
3010 ioa_cfg->sdt_state = DUMP_OBTAINED;
3011 LEAVE;
3012}
3013
3014#else
3015#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
3016#endif
3017
3018/**
3019 * ipr_release_dump - Free adapter dump memory
3020 * @kref: kref struct
3021 *
3022 * Return value:
3023 * nothing
3024 **/
3025static void ipr_release_dump(struct kref *kref)
3026{
3027 struct ipr_dump *dump = container_of(kref,struct ipr_dump,kref);
3028 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3029 unsigned long lock_flags = 0;
3030 int i;
3031
3032 ENTER;
3033 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3034 ioa_cfg->dump = NULL;
3035 ioa_cfg->sdt_state = INACTIVE;
3036 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3037
3038 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3039 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3040
3041 kfree(dump);
3042 LEAVE;
3043}
3044
3045/**
3046 * ipr_worker_thread - Worker thread
c4028958 3047 * @work: ioa config struct
1da177e4
LT
3048 *
3049 * Called at task level from a work thread. This function takes care
3050 * of adding and removing device from the mid-layer as configuration
3051 * changes are detected by the adapter.
3052 *
3053 * Return value:
3054 * nothing
3055 **/
c4028958 3056static void ipr_worker_thread(struct work_struct *work)
1da177e4
LT
3057{
3058 unsigned long lock_flags;
3059 struct ipr_resource_entry *res;
3060 struct scsi_device *sdev;
3061 struct ipr_dump *dump;
c4028958
DH
3062 struct ipr_ioa_cfg *ioa_cfg =
3063 container_of(work, struct ipr_ioa_cfg, work_q);
1da177e4
LT
3064 u8 bus, target, lun;
3065 int did_work;
3066
3067 ENTER;
3068 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3069
3070 if (ioa_cfg->sdt_state == GET_DUMP) {
3071 dump = ioa_cfg->dump;
3072 if (!dump) {
3073 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3074 return;
3075 }
3076 kref_get(&dump->kref);
3077 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3078 ipr_get_ioa_dump(ioa_cfg, dump);
3079 kref_put(&dump->kref, ipr_release_dump);
3080
3081 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3082 if (ioa_cfg->sdt_state == DUMP_OBTAINED)
3083 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3084 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3085 return;
3086 }
3087
3088restart:
3089 do {
3090 did_work = 0;
3091 if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
3092 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3093 return;
3094 }
3095
3096 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3097 if (res->del_from_ml && res->sdev) {
3098 did_work = 1;
3099 sdev = res->sdev;
3100 if (!scsi_device_get(sdev)) {
1da177e4
LT
3101 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3102 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3103 scsi_remove_device(sdev);
3104 scsi_device_put(sdev);
3105 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3106 }
3107 break;
3108 }
3109 }
3110 } while(did_work);
3111
3112 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3113 if (res->add_to_ml) {
3e7ebdfa
WB
3114 bus = res->bus;
3115 target = res->target;
3116 lun = res->lun;
1121b794 3117 res->add_to_ml = 0;
1da177e4
LT
3118 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3119 scsi_add_device(ioa_cfg->host, bus, target, lun);
3120 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3121 goto restart;
3122 }
3123 }
3124
3125 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ee959b00 3126 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
1da177e4
LT
3127 LEAVE;
3128}
3129
3130#ifdef CONFIG_SCSI_IPR_TRACE
3131/**
3132 * ipr_read_trace - Dump the adapter trace
2c3c8bea 3133 * @filp: open sysfs file
1da177e4 3134 * @kobj: kobject struct
91a69029 3135 * @bin_attr: bin_attribute struct
1da177e4
LT
3136 * @buf: buffer
3137 * @off: offset
3138 * @count: buffer size
3139 *
3140 * Return value:
3141 * number of bytes printed to buffer
3142 **/
2c3c8bea 3143static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
91a69029
ZR
3144 struct bin_attribute *bin_attr,
3145 char *buf, loff_t off, size_t count)
1da177e4 3146{
ee959b00
TJ
3147 struct device *dev = container_of(kobj, struct device, kobj);
3148 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3149 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3150 unsigned long lock_flags = 0;
d777aaf3 3151 ssize_t ret;
1da177e4
LT
3152
3153 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
d777aaf3
AM
3154 ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3155 IPR_TRACE_SIZE);
1da177e4 3156 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
d777aaf3
AM
3157
3158 return ret;
1da177e4
LT
3159}
3160
3161static struct bin_attribute ipr_trace_attr = {
3162 .attr = {
3163 .name = "trace",
3164 .mode = S_IRUGO,
3165 },
3166 .size = 0,
3167 .read = ipr_read_trace,
3168};
3169#endif
3170
3171/**
3172 * ipr_show_fw_version - Show the firmware version
ee959b00
TJ
3173 * @dev: class device struct
3174 * @buf: buffer
1da177e4
LT
3175 *
3176 * Return value:
3177 * number of bytes printed to buffer
3178 **/
ee959b00
TJ
3179static ssize_t ipr_show_fw_version(struct device *dev,
3180 struct device_attribute *attr, char *buf)
1da177e4 3181{
ee959b00 3182 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3183 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3184 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3185 unsigned long lock_flags = 0;
3186 int len;
3187
3188 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3189 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3190 ucode_vpd->major_release, ucode_vpd->card_type,
3191 ucode_vpd->minor_release[0],
3192 ucode_vpd->minor_release[1]);
3193 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3194 return len;
3195}
3196
ee959b00 3197static struct device_attribute ipr_fw_version_attr = {
1da177e4
LT
3198 .attr = {
3199 .name = "fw_version",
3200 .mode = S_IRUGO,
3201 },
3202 .show = ipr_show_fw_version,
3203};
3204
3205/**
3206 * ipr_show_log_level - Show the adapter's error logging level
ee959b00
TJ
3207 * @dev: class device struct
3208 * @buf: buffer
1da177e4
LT
3209 *
3210 * Return value:
3211 * number of bytes printed to buffer
3212 **/
ee959b00
TJ
3213static ssize_t ipr_show_log_level(struct device *dev,
3214 struct device_attribute *attr, char *buf)
1da177e4 3215{
ee959b00 3216 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3217 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3218 unsigned long lock_flags = 0;
3219 int len;
3220
3221 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3222 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3223 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3224 return len;
3225}
3226
3227/**
3228 * ipr_store_log_level - Change the adapter's error logging level
ee959b00
TJ
3229 * @dev: class device struct
3230 * @buf: buffer
1da177e4
LT
3231 *
3232 * Return value:
3233 * number of bytes printed to buffer
3234 **/
ee959b00
TJ
3235static ssize_t ipr_store_log_level(struct device *dev,
3236 struct device_attribute *attr,
1da177e4
LT
3237 const char *buf, size_t count)
3238{
ee959b00 3239 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3240 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3241 unsigned long lock_flags = 0;
3242
3243 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3244 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3245 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3246 return strlen(buf);
3247}
3248
ee959b00 3249static struct device_attribute ipr_log_level_attr = {
1da177e4
LT
3250 .attr = {
3251 .name = "log_level",
3252 .mode = S_IRUGO | S_IWUSR,
3253 },
3254 .show = ipr_show_log_level,
3255 .store = ipr_store_log_level
3256};
3257
3258/**
3259 * ipr_store_diagnostics - IOA Diagnostics interface
ee959b00
TJ
3260 * @dev: device struct
3261 * @buf: buffer
3262 * @count: buffer size
1da177e4
LT
3263 *
3264 * This function will reset the adapter and wait a reasonable
3265 * amount of time for any errors that the adapter might log.
3266 *
3267 * Return value:
3268 * count on success / other on failure
3269 **/
ee959b00
TJ
3270static ssize_t ipr_store_diagnostics(struct device *dev,
3271 struct device_attribute *attr,
1da177e4
LT
3272 const char *buf, size_t count)
3273{
ee959b00 3274 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3275 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3276 unsigned long lock_flags = 0;
3277 int rc = count;
3278
3279 if (!capable(CAP_SYS_ADMIN))
3280 return -EACCES;
3281
1da177e4 3282 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
970ea294
BK
3283 while(ioa_cfg->in_reset_reload) {
3284 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3285 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3286 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3287 }
3288
1da177e4
LT
3289 ioa_cfg->errors_logged = 0;
3290 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3291
3292 if (ioa_cfg->in_reset_reload) {
3293 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3294 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3295
3296 /* Wait for a second for any errors to be logged */
3297 msleep(1000);
3298 } else {
3299 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3300 return -EIO;
3301 }
3302
3303 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3304 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3305 rc = -EIO;
3306 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3307
3308 return rc;
3309}
3310
ee959b00 3311static struct device_attribute ipr_diagnostics_attr = {
1da177e4
LT
3312 .attr = {
3313 .name = "run_diagnostics",
3314 .mode = S_IWUSR,
3315 },
3316 .store = ipr_store_diagnostics
3317};
3318
f37eb54b 3319/**
3320 * ipr_show_adapter_state - Show the adapter's state
ee959b00
TJ
3321 * @class_dev: device struct
3322 * @buf: buffer
f37eb54b 3323 *
3324 * Return value:
3325 * number of bytes printed to buffer
3326 **/
ee959b00
TJ
3327static ssize_t ipr_show_adapter_state(struct device *dev,
3328 struct device_attribute *attr, char *buf)
f37eb54b 3329{
ee959b00 3330 struct Scsi_Host *shost = class_to_shost(dev);
f37eb54b 3331 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3332 unsigned long lock_flags = 0;
3333 int len;
3334
3335 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3336 if (ioa_cfg->ioa_is_dead)
3337 len = snprintf(buf, PAGE_SIZE, "offline\n");
3338 else
3339 len = snprintf(buf, PAGE_SIZE, "online\n");
3340 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3341 return len;
3342}
3343
3344/**
3345 * ipr_store_adapter_state - Change adapter state
ee959b00
TJ
3346 * @dev: device struct
3347 * @buf: buffer
3348 * @count: buffer size
f37eb54b 3349 *
3350 * This function will change the adapter's state.
3351 *
3352 * Return value:
3353 * count on success / other on failure
3354 **/
ee959b00
TJ
3355static ssize_t ipr_store_adapter_state(struct device *dev,
3356 struct device_attribute *attr,
f37eb54b 3357 const char *buf, size_t count)
3358{
ee959b00 3359 struct Scsi_Host *shost = class_to_shost(dev);
f37eb54b 3360 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3361 unsigned long lock_flags;
3362 int result = count;
3363
3364 if (!capable(CAP_SYS_ADMIN))
3365 return -EACCES;
3366
3367 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3368 if (ioa_cfg->ioa_is_dead && !strncmp(buf, "online", 6)) {
3369 ioa_cfg->ioa_is_dead = 0;
3370 ioa_cfg->reset_retries = 0;
3371 ioa_cfg->in_ioa_bringdown = 0;
3372 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3373 }
3374 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3375 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3376
3377 return result;
3378}
3379
ee959b00 3380static struct device_attribute ipr_ioa_state_attr = {
f37eb54b 3381 .attr = {
49dd0961 3382 .name = "online_state",
f37eb54b 3383 .mode = S_IRUGO | S_IWUSR,
3384 },
3385 .show = ipr_show_adapter_state,
3386 .store = ipr_store_adapter_state
3387};
3388
1da177e4
LT
3389/**
3390 * ipr_store_reset_adapter - Reset the adapter
ee959b00
TJ
3391 * @dev: device struct
3392 * @buf: buffer
3393 * @count: buffer size
1da177e4
LT
3394 *
3395 * This function will reset the adapter.
3396 *
3397 * Return value:
3398 * count on success / other on failure
3399 **/
ee959b00
TJ
3400static ssize_t ipr_store_reset_adapter(struct device *dev,
3401 struct device_attribute *attr,
1da177e4
LT
3402 const char *buf, size_t count)
3403{
ee959b00 3404 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3405 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3406 unsigned long lock_flags;
3407 int result = count;
3408
3409 if (!capable(CAP_SYS_ADMIN))
3410 return -EACCES;
3411
3412 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3413 if (!ioa_cfg->in_reset_reload)
3414 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3415 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3416 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3417
3418 return result;
3419}
3420
ee959b00 3421static struct device_attribute ipr_ioa_reset_attr = {
1da177e4
LT
3422 .attr = {
3423 .name = "reset_host",
3424 .mode = S_IWUSR,
3425 },
3426 .store = ipr_store_reset_adapter
3427};
3428
3429/**
3430 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3431 * @buf_len: buffer length
3432 *
3433 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3434 * list to use for microcode download
3435 *
3436 * Return value:
3437 * pointer to sglist / NULL on failure
3438 **/
3439static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3440{
3441 int sg_size, order, bsize_elem, num_elem, i, j;
3442 struct ipr_sglist *sglist;
3443 struct scatterlist *scatterlist;
3444 struct page *page;
3445
3446 /* Get the minimum size per scatter/gather element */
3447 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3448
3449 /* Get the actual size per element */
3450 order = get_order(sg_size);
3451
3452 /* Determine the actual number of bytes per element */
3453 bsize_elem = PAGE_SIZE * (1 << order);
3454
3455 /* Determine the actual number of sg entries needed */
3456 if (buf_len % bsize_elem)
3457 num_elem = (buf_len / bsize_elem) + 1;
3458 else
3459 num_elem = buf_len / bsize_elem;
3460
3461 /* Allocate a scatter/gather list for the DMA */
0bc42e35 3462 sglist = kzalloc(sizeof(struct ipr_sglist) +
1da177e4
LT
3463 (sizeof(struct scatterlist) * (num_elem - 1)),
3464 GFP_KERNEL);
3465
3466 if (sglist == NULL) {
3467 ipr_trace;
3468 return NULL;
3469 }
3470
1da177e4 3471 scatterlist = sglist->scatterlist;
45711f1a 3472 sg_init_table(scatterlist, num_elem);
1da177e4
LT
3473
3474 sglist->order = order;
3475 sglist->num_sg = num_elem;
3476
3477 /* Allocate a bunch of sg elements */
3478 for (i = 0; i < num_elem; i++) {
3479 page = alloc_pages(GFP_KERNEL, order);
3480 if (!page) {
3481 ipr_trace;
3482
3483 /* Free up what we already allocated */
3484 for (j = i - 1; j >= 0; j--)
45711f1a 3485 __free_pages(sg_page(&scatterlist[j]), order);
1da177e4
LT
3486 kfree(sglist);
3487 return NULL;
3488 }
3489
642f1490 3490 sg_set_page(&scatterlist[i], page, 0, 0);
1da177e4
LT
3491 }
3492
3493 return sglist;
3494}
3495
3496/**
3497 * ipr_free_ucode_buffer - Frees a microcode download buffer
3498 * @p_dnld: scatter/gather list pointer
3499 *
3500 * Free a DMA'able ucode download buffer previously allocated with
3501 * ipr_alloc_ucode_buffer
3502 *
3503 * Return value:
3504 * nothing
3505 **/
3506static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3507{
3508 int i;
3509
3510 for (i = 0; i < sglist->num_sg; i++)
45711f1a 3511 __free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
1da177e4
LT
3512
3513 kfree(sglist);
3514}
3515
3516/**
3517 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3518 * @sglist: scatter/gather list pointer
3519 * @buffer: buffer pointer
3520 * @len: buffer length
3521 *
3522 * Copy a microcode image from a user buffer into a buffer allocated by
3523 * ipr_alloc_ucode_buffer
3524 *
3525 * Return value:
3526 * 0 on success / other on failure
3527 **/
3528static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3529 u8 *buffer, u32 len)
3530{
3531 int bsize_elem, i, result = 0;
3532 struct scatterlist *scatterlist;
3533 void *kaddr;
3534
3535 /* Determine the actual number of bytes per element */
3536 bsize_elem = PAGE_SIZE * (1 << sglist->order);
3537
3538 scatterlist = sglist->scatterlist;
3539
3540 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
45711f1a
JA
3541 struct page *page = sg_page(&scatterlist[i]);
3542
3543 kaddr = kmap(page);
1da177e4 3544 memcpy(kaddr, buffer, bsize_elem);
45711f1a 3545 kunmap(page);
1da177e4
LT
3546
3547 scatterlist[i].length = bsize_elem;
3548
3549 if (result != 0) {
3550 ipr_trace;
3551 return result;
3552 }
3553 }
3554
3555 if (len % bsize_elem) {
45711f1a
JA
3556 struct page *page = sg_page(&scatterlist[i]);
3557
3558 kaddr = kmap(page);
1da177e4 3559 memcpy(kaddr, buffer, len % bsize_elem);
45711f1a 3560 kunmap(page);
1da177e4
LT
3561
3562 scatterlist[i].length = len % bsize_elem;
3563 }
3564
3565 sglist->buffer_len = len;
3566 return result;
3567}
3568
a32c055f
WB
3569/**
3570 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3571 * @ipr_cmd: ipr command struct
3572 * @sglist: scatter/gather list
3573 *
3574 * Builds a microcode download IOA data list (IOADL).
3575 *
3576 **/
3577static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3578 struct ipr_sglist *sglist)
3579{
3580 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3581 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3582 struct scatterlist *scatterlist = sglist->scatterlist;
3583 int i;
3584
3585 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3586 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3587 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3588
3589 ioarcb->ioadl_len =
3590 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3591 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3592 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3593 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3594 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3595 }
3596
3597 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3598}
3599
1da177e4 3600/**
12baa420 3601 * ipr_build_ucode_ioadl - Build a microcode download IOADL
1da177e4
LT
3602 * @ipr_cmd: ipr command struct
3603 * @sglist: scatter/gather list
1da177e4 3604 *
12baa420 3605 * Builds a microcode download IOA data list (IOADL).
1da177e4 3606 *
1da177e4 3607 **/
12baa420 3608static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3609 struct ipr_sglist *sglist)
1da177e4 3610{
1da177e4 3611 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
a32c055f 3612 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1da177e4
LT
3613 struct scatterlist *scatterlist = sglist->scatterlist;
3614 int i;
3615
12baa420 3616 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
1da177e4 3617 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
a32c055f
WB
3618 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3619
3620 ioarcb->ioadl_len =
1da177e4
LT
3621 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3622
3623 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3624 ioadl[i].flags_and_data_len =
3625 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3626 ioadl[i].address =
3627 cpu_to_be32(sg_dma_address(&scatterlist[i]));
3628 }
3629
12baa420 3630 ioadl[i-1].flags_and_data_len |=
3631 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3632}
3633
3634/**
3635 * ipr_update_ioa_ucode - Update IOA's microcode
3636 * @ioa_cfg: ioa config struct
3637 * @sglist: scatter/gather list
3638 *
3639 * Initiate an adapter reset to update the IOA's microcode
3640 *
3641 * Return value:
3642 * 0 on success / -EIO on failure
3643 **/
3644static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3645 struct ipr_sglist *sglist)
3646{
3647 unsigned long lock_flags;
3648
3649 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
970ea294
BK
3650 while(ioa_cfg->in_reset_reload) {
3651 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3652 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3653 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3654 }
12baa420 3655
3656 if (ioa_cfg->ucode_sglist) {
3657 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3658 dev_err(&ioa_cfg->pdev->dev,
3659 "Microcode download already in progress\n");
3660 return -EIO;
1da177e4 3661 }
12baa420 3662
3663 sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
3664 sglist->num_sg, DMA_TO_DEVICE);
3665
3666 if (!sglist->num_dma_sg) {
3667 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3668 dev_err(&ioa_cfg->pdev->dev,
3669 "Failed to map microcode download buffer!\n");
1da177e4
LT
3670 return -EIO;
3671 }
3672
12baa420 3673 ioa_cfg->ucode_sglist = sglist;
3674 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3675 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3676 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3677
3678 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3679 ioa_cfg->ucode_sglist = NULL;
3680 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1da177e4
LT
3681 return 0;
3682}
3683
3684/**
3685 * ipr_store_update_fw - Update the firmware on the adapter
ee959b00
TJ
3686 * @class_dev: device struct
3687 * @buf: buffer
3688 * @count: buffer size
1da177e4
LT
3689 *
3690 * This function will update the firmware on the adapter.
3691 *
3692 * Return value:
3693 * count on success / other on failure
3694 **/
ee959b00
TJ
3695static ssize_t ipr_store_update_fw(struct device *dev,
3696 struct device_attribute *attr,
3697 const char *buf, size_t count)
1da177e4 3698{
ee959b00 3699 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3700 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3701 struct ipr_ucode_image_header *image_hdr;
3702 const struct firmware *fw_entry;
3703 struct ipr_sglist *sglist;
1da177e4
LT
3704 char fname[100];
3705 char *src;
3706 int len, result, dnld_size;
3707
3708 if (!capable(CAP_SYS_ADMIN))
3709 return -EACCES;
3710
3711 len = snprintf(fname, 99, "%s", buf);
3712 fname[len-1] = '\0';
3713
3714 if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
3715 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
3716 return -EIO;
3717 }
3718
3719 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
3720
3721 if (be32_to_cpu(image_hdr->header_length) > fw_entry->size ||
3722 (ioa_cfg->vpd_cbs->page3_data.card_type &&
3723 ioa_cfg->vpd_cbs->page3_data.card_type != image_hdr->card_type)) {
3724 dev_err(&ioa_cfg->pdev->dev, "Invalid microcode buffer\n");
3725 release_firmware(fw_entry);
3726 return -EINVAL;
3727 }
3728
3729 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
3730 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
3731 sglist = ipr_alloc_ucode_buffer(dnld_size);
3732
3733 if (!sglist) {
3734 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
3735 release_firmware(fw_entry);
3736 return -ENOMEM;
3737 }
3738
3739 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
3740
3741 if (result) {
3742 dev_err(&ioa_cfg->pdev->dev,
3743 "Microcode buffer copy to DMA buffer failed\n");
12baa420 3744 goto out;
1da177e4
LT
3745 }
3746
12baa420 3747 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
1da177e4 3748
12baa420 3749 if (!result)
3750 result = count;
3751out:
1da177e4
LT
3752 ipr_free_ucode_buffer(sglist);
3753 release_firmware(fw_entry);
12baa420 3754 return result;
1da177e4
LT
3755}
3756
ee959b00 3757static struct device_attribute ipr_update_fw_attr = {
1da177e4
LT
3758 .attr = {
3759 .name = "update_fw",
3760 .mode = S_IWUSR,
3761 },
3762 .store = ipr_store_update_fw
3763};
3764
ee959b00 3765static struct device_attribute *ipr_ioa_attrs[] = {
1da177e4
LT
3766 &ipr_fw_version_attr,
3767 &ipr_log_level_attr,
3768 &ipr_diagnostics_attr,
f37eb54b 3769 &ipr_ioa_state_attr,
1da177e4
LT
3770 &ipr_ioa_reset_attr,
3771 &ipr_update_fw_attr,
3772 NULL,
3773};
3774
3775#ifdef CONFIG_SCSI_IPR_DUMP
3776/**
3777 * ipr_read_dump - Dump the adapter
2c3c8bea 3778 * @filp: open sysfs file
1da177e4 3779 * @kobj: kobject struct
91a69029 3780 * @bin_attr: bin_attribute struct
1da177e4
LT
3781 * @buf: buffer
3782 * @off: offset
3783 * @count: buffer size
3784 *
3785 * Return value:
3786 * number of bytes printed to buffer
3787 **/
2c3c8bea 3788static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
91a69029
ZR
3789 struct bin_attribute *bin_attr,
3790 char *buf, loff_t off, size_t count)
1da177e4 3791{
ee959b00 3792 struct device *cdev = container_of(kobj, struct device, kobj);
1da177e4
LT
3793 struct Scsi_Host *shost = class_to_shost(cdev);
3794 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3795 struct ipr_dump *dump;
3796 unsigned long lock_flags = 0;
3797 char *src;
3798 int len;
3799 size_t rc = count;
3800
3801 if (!capable(CAP_SYS_ADMIN))
3802 return -EACCES;
3803
3804 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3805 dump = ioa_cfg->dump;
3806
3807 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
3808 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3809 return 0;
3810 }
3811 kref_get(&dump->kref);
3812 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3813
3814 if (off > dump->driver_dump.hdr.len) {
3815 kref_put(&dump->kref, ipr_release_dump);
3816 return 0;
3817 }
3818
3819 if (off + count > dump->driver_dump.hdr.len) {
3820 count = dump->driver_dump.hdr.len - off;
3821 rc = count;
3822 }
3823
3824 if (count && off < sizeof(dump->driver_dump)) {
3825 if (off + count > sizeof(dump->driver_dump))
3826 len = sizeof(dump->driver_dump) - off;
3827 else
3828 len = count;
3829 src = (u8 *)&dump->driver_dump + off;
3830 memcpy(buf, src, len);
3831 buf += len;
3832 off += len;
3833 count -= len;
3834 }
3835
3836 off -= sizeof(dump->driver_dump);
3837
3838 if (count && off < offsetof(struct ipr_ioa_dump, ioa_data)) {
3839 if (off + count > offsetof(struct ipr_ioa_dump, ioa_data))
3840 len = offsetof(struct ipr_ioa_dump, ioa_data) - off;
3841 else
3842 len = count;
3843 src = (u8 *)&dump->ioa_dump + off;
3844 memcpy(buf, src, len);
3845 buf += len;
3846 off += len;
3847 count -= len;
3848 }
3849
3850 off -= offsetof(struct ipr_ioa_dump, ioa_data);
3851
3852 while (count) {
3853 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
3854 len = PAGE_ALIGN(off) - off;
3855 else
3856 len = count;
3857 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
3858 src += off & ~PAGE_MASK;
3859 memcpy(buf, src, len);
3860 buf += len;
3861 off += len;
3862 count -= len;
3863 }
3864
3865 kref_put(&dump->kref, ipr_release_dump);
3866 return rc;
3867}
3868
3869/**
3870 * ipr_alloc_dump - Prepare for adapter dump
3871 * @ioa_cfg: ioa config struct
3872 *
3873 * Return value:
3874 * 0 on success / other on failure
3875 **/
3876static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
3877{
3878 struct ipr_dump *dump;
3879 unsigned long lock_flags = 0;
3880
0bc42e35 3881 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
1da177e4
LT
3882
3883 if (!dump) {
3884 ipr_err("Dump memory allocation failed\n");
3885 return -ENOMEM;
3886 }
3887
1da177e4
LT
3888 kref_init(&dump->kref);
3889 dump->ioa_cfg = ioa_cfg;
3890
3891 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3892
3893 if (INACTIVE != ioa_cfg->sdt_state) {
3894 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3895 kfree(dump);
3896 return 0;
3897 }
3898
3899 ioa_cfg->dump = dump;
3900 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
3901 if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
3902 ioa_cfg->dump_taken = 1;
3903 schedule_work(&ioa_cfg->work_q);
3904 }
3905 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3906
1da177e4
LT
3907 return 0;
3908}
3909
3910/**
3911 * ipr_free_dump - Free adapter dump memory
3912 * @ioa_cfg: ioa config struct
3913 *
3914 * Return value:
3915 * 0 on success / other on failure
3916 **/
3917static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
3918{
3919 struct ipr_dump *dump;
3920 unsigned long lock_flags = 0;
3921
3922 ENTER;
3923
3924 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3925 dump = ioa_cfg->dump;
3926 if (!dump) {
3927 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3928 return 0;
3929 }
3930
3931 ioa_cfg->dump = NULL;
3932 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3933
3934 kref_put(&dump->kref, ipr_release_dump);
3935
3936 LEAVE;
3937 return 0;
3938}
3939
3940/**
3941 * ipr_write_dump - Setup dump state of adapter
2c3c8bea 3942 * @filp: open sysfs file
1da177e4 3943 * @kobj: kobject struct
91a69029 3944 * @bin_attr: bin_attribute struct
1da177e4
LT
3945 * @buf: buffer
3946 * @off: offset
3947 * @count: buffer size
3948 *
3949 * Return value:
3950 * number of bytes printed to buffer
3951 **/
2c3c8bea 3952static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
91a69029
ZR
3953 struct bin_attribute *bin_attr,
3954 char *buf, loff_t off, size_t count)
1da177e4 3955{
ee959b00 3956 struct device *cdev = container_of(kobj, struct device, kobj);
1da177e4
LT
3957 struct Scsi_Host *shost = class_to_shost(cdev);
3958 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3959 int rc;
3960
3961 if (!capable(CAP_SYS_ADMIN))
3962 return -EACCES;
3963
3964 if (buf[0] == '1')
3965 rc = ipr_alloc_dump(ioa_cfg);
3966 else if (buf[0] == '0')
3967 rc = ipr_free_dump(ioa_cfg);
3968 else
3969 return -EINVAL;
3970
3971 if (rc)
3972 return rc;
3973 else
3974 return count;
3975}
3976
3977static struct bin_attribute ipr_dump_attr = {
3978 .attr = {
3979 .name = "dump",
3980 .mode = S_IRUSR | S_IWUSR,
3981 },
3982 .size = 0,
3983 .read = ipr_read_dump,
3984 .write = ipr_write_dump
3985};
3986#else
3987static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
3988#endif
3989
3990/**
3991 * ipr_change_queue_depth - Change the device's queue depth
3992 * @sdev: scsi device struct
3993 * @qdepth: depth to set
e881a172 3994 * @reason: calling context
1da177e4
LT
3995 *
3996 * Return value:
3997 * actual depth set
3998 **/
e881a172
MC
3999static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth,
4000 int reason)
1da177e4 4001{
35a39691
BK
4002 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4003 struct ipr_resource_entry *res;
4004 unsigned long lock_flags = 0;
4005
e881a172
MC
4006 if (reason != SCSI_QDEPTH_DEFAULT)
4007 return -EOPNOTSUPP;
4008
35a39691
BK
4009 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4010 res = (struct ipr_resource_entry *)sdev->hostdata;
4011
4012 if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4013 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4014 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4015
1da177e4
LT
4016 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
4017 return sdev->queue_depth;
4018}
4019
4020/**
4021 * ipr_change_queue_type - Change the device's queue type
4022 * @dsev: scsi device struct
4023 * @tag_type: type of tags to use
4024 *
4025 * Return value:
4026 * actual queue type set
4027 **/
4028static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
4029{
4030 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4031 struct ipr_resource_entry *res;
4032 unsigned long lock_flags = 0;
4033
4034 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4035 res = (struct ipr_resource_entry *)sdev->hostdata;
4036
4037 if (res) {
4038 if (ipr_is_gscsi(res) && sdev->tagged_supported) {
4039 /*
4040 * We don't bother quiescing the device here since the
4041 * adapter firmware does it for us.
4042 */
4043 scsi_set_tag_type(sdev, tag_type);
4044
4045 if (tag_type)
4046 scsi_activate_tcq(sdev, sdev->queue_depth);
4047 else
4048 scsi_deactivate_tcq(sdev, sdev->queue_depth);
4049 } else
4050 tag_type = 0;
4051 } else
4052 tag_type = 0;
4053
4054 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4055 return tag_type;
4056}
4057
4058/**
4059 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4060 * @dev: device struct
4061 * @buf: buffer
4062 *
4063 * Return value:
4064 * number of bytes printed to buffer
4065 **/
10523b3b 4066static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
1da177e4
LT
4067{
4068 struct scsi_device *sdev = to_scsi_device(dev);
4069 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4070 struct ipr_resource_entry *res;
4071 unsigned long lock_flags = 0;
4072 ssize_t len = -ENXIO;
4073
4074 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4075 res = (struct ipr_resource_entry *)sdev->hostdata;
4076 if (res)
3e7ebdfa 4077 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
1da177e4
LT
4078 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4079 return len;
4080}
4081
4082static struct device_attribute ipr_adapter_handle_attr = {
4083 .attr = {
4084 .name = "adapter_handle",
4085 .mode = S_IRUSR,
4086 },
4087 .show = ipr_show_adapter_handle
4088};
4089
3e7ebdfa 4090/**
5adcbeb3
WB
4091 * ipr_show_resource_path - Show the resource path or the resource address for
4092 * this device.
3e7ebdfa
WB
4093 * @dev: device struct
4094 * @buf: buffer
4095 *
4096 * Return value:
4097 * number of bytes printed to buffer
4098 **/
4099static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4100{
4101 struct scsi_device *sdev = to_scsi_device(dev);
4102 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4103 struct ipr_resource_entry *res;
4104 unsigned long lock_flags = 0;
4105 ssize_t len = -ENXIO;
4106 char buffer[IPR_MAX_RES_PATH_LENGTH];
4107
4108 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4109 res = (struct ipr_resource_entry *)sdev->hostdata;
5adcbeb3 4110 if (res && ioa_cfg->sis64)
3e7ebdfa 4111 len = snprintf(buf, PAGE_SIZE, "%s\n",
5adcbeb3
WB
4112 ipr_format_res_path(res->res_path, buffer,
4113 sizeof(buffer)));
4114 else if (res)
4115 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4116 res->bus, res->target, res->lun);
4117
3e7ebdfa
WB
4118 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4119 return len;
4120}
4121
4122static struct device_attribute ipr_resource_path_attr = {
4123 .attr = {
4124 .name = "resource_path",
4125 .mode = S_IRUSR,
4126 },
4127 .show = ipr_show_resource_path
4128};
4129
1da177e4
LT
4130static struct device_attribute *ipr_dev_attrs[] = {
4131 &ipr_adapter_handle_attr,
3e7ebdfa 4132 &ipr_resource_path_attr,
1da177e4
LT
4133 NULL,
4134};
4135
4136/**
4137 * ipr_biosparam - Return the HSC mapping
4138 * @sdev: scsi device struct
4139 * @block_device: block device pointer
4140 * @capacity: capacity of the device
4141 * @parm: Array containing returned HSC values.
4142 *
4143 * This function generates the HSC parms that fdisk uses.
4144 * We want to make sure we return something that places partitions
4145 * on 4k boundaries for best performance with the IOA.
4146 *
4147 * Return value:
4148 * 0 on success
4149 **/
4150static int ipr_biosparam(struct scsi_device *sdev,
4151 struct block_device *block_device,
4152 sector_t capacity, int *parm)
4153{
4154 int heads, sectors;
4155 sector_t cylinders;
4156
4157 heads = 128;
4158 sectors = 32;
4159
4160 cylinders = capacity;
4161 sector_div(cylinders, (128 * 32));
4162
4163 /* return result */
4164 parm[0] = heads;
4165 parm[1] = sectors;
4166 parm[2] = cylinders;
4167
4168 return 0;
4169}
4170
35a39691
BK
4171/**
4172 * ipr_find_starget - Find target based on bus/target.
4173 * @starget: scsi target struct
4174 *
4175 * Return value:
4176 * resource entry pointer if found / NULL if not found
4177 **/
4178static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4179{
4180 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4181 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4182 struct ipr_resource_entry *res;
4183
4184 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3e7ebdfa
WB
4185 if ((res->bus == starget->channel) &&
4186 (res->target == starget->id) &&
4187 (res->lun == 0)) {
35a39691
BK
4188 return res;
4189 }
4190 }
4191
4192 return NULL;
4193}
4194
4195static struct ata_port_info sata_port_info;
4196
4197/**
4198 * ipr_target_alloc - Prepare for commands to a SCSI target
4199 * @starget: scsi target struct
4200 *
4201 * If the device is a SATA device, this function allocates an
4202 * ATA port with libata, else it does nothing.
4203 *
4204 * Return value:
4205 * 0 on success / non-0 on failure
4206 **/
4207static int ipr_target_alloc(struct scsi_target *starget)
4208{
4209 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4210 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4211 struct ipr_sata_port *sata_port;
4212 struct ata_port *ap;
4213 struct ipr_resource_entry *res;
4214 unsigned long lock_flags;
4215
4216 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4217 res = ipr_find_starget(starget);
4218 starget->hostdata = NULL;
4219
4220 if (res && ipr_is_gata(res)) {
4221 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4222 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4223 if (!sata_port)
4224 return -ENOMEM;
4225
4226 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4227 if (ap) {
4228 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4229 sata_port->ioa_cfg = ioa_cfg;
4230 sata_port->ap = ap;
4231 sata_port->res = res;
4232
4233 res->sata_port = sata_port;
4234 ap->private_data = sata_port;
4235 starget->hostdata = sata_port;
4236 } else {
4237 kfree(sata_port);
4238 return -ENOMEM;
4239 }
4240 }
4241 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4242
4243 return 0;
4244}
4245
4246/**
4247 * ipr_target_destroy - Destroy a SCSI target
4248 * @starget: scsi target struct
4249 *
4250 * If the device was a SATA device, this function frees the libata
4251 * ATA port, else it does nothing.
4252 *
4253 **/
4254static void ipr_target_destroy(struct scsi_target *starget)
4255{
4256 struct ipr_sata_port *sata_port = starget->hostdata;
3e7ebdfa
WB
4257 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4258 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4259
4260 if (ioa_cfg->sis64) {
4261 if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4262 clear_bit(starget->id, ioa_cfg->array_ids);
4263 else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4264 clear_bit(starget->id, ioa_cfg->vset_ids);
4265 else if (starget->channel == 0)
4266 clear_bit(starget->id, ioa_cfg->target_ids);
4267 }
35a39691
BK
4268
4269 if (sata_port) {
4270 starget->hostdata = NULL;
4271 ata_sas_port_destroy(sata_port->ap);
4272 kfree(sata_port);
4273 }
4274}
4275
4276/**
4277 * ipr_find_sdev - Find device based on bus/target/lun.
4278 * @sdev: scsi device struct
4279 *
4280 * Return value:
4281 * resource entry pointer if found / NULL if not found
4282 **/
4283static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4284{
4285 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4286 struct ipr_resource_entry *res;
4287
4288 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3e7ebdfa
WB
4289 if ((res->bus == sdev->channel) &&
4290 (res->target == sdev->id) &&
4291 (res->lun == sdev->lun))
35a39691
BK
4292 return res;
4293 }
4294
4295 return NULL;
4296}
4297
1da177e4
LT
4298/**
4299 * ipr_slave_destroy - Unconfigure a SCSI device
4300 * @sdev: scsi device struct
4301 *
4302 * Return value:
4303 * nothing
4304 **/
4305static void ipr_slave_destroy(struct scsi_device *sdev)
4306{
4307 struct ipr_resource_entry *res;
4308 struct ipr_ioa_cfg *ioa_cfg;
4309 unsigned long lock_flags = 0;
4310
4311 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4312
4313 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4314 res = (struct ipr_resource_entry *) sdev->hostdata;
4315 if (res) {
35a39691 4316 if (res->sata_port)
3e4ec344 4317 res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
1da177e4
LT
4318 sdev->hostdata = NULL;
4319 res->sdev = NULL;
35a39691 4320 res->sata_port = NULL;
1da177e4
LT
4321 }
4322 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4323}
4324
4325/**
4326 * ipr_slave_configure - Configure a SCSI device
4327 * @sdev: scsi device struct
4328 *
4329 * This function configures the specified scsi device.
4330 *
4331 * Return value:
4332 * 0 on success
4333 **/
4334static int ipr_slave_configure(struct scsi_device *sdev)
4335{
4336 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4337 struct ipr_resource_entry *res;
dd406ef8 4338 struct ata_port *ap = NULL;
1da177e4 4339 unsigned long lock_flags = 0;
3e7ebdfa 4340 char buffer[IPR_MAX_RES_PATH_LENGTH];
1da177e4
LT
4341
4342 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4343 res = sdev->hostdata;
4344 if (res) {
4345 if (ipr_is_af_dasd_device(res))
4346 sdev->type = TYPE_RAID;
0726ce26 4347 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
1da177e4 4348 sdev->scsi_level = 4;
0726ce26 4349 sdev->no_uld_attach = 1;
4350 }
1da177e4 4351 if (ipr_is_vset_device(res)) {
242f9dcb
JA
4352 blk_queue_rq_timeout(sdev->request_queue,
4353 IPR_VSET_RW_TIMEOUT);
086fa5ff 4354 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
1da177e4 4355 }
dd406ef8
BK
4356 if (ipr_is_gata(res) && res->sata_port)
4357 ap = res->sata_port->ap;
4358 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4359
4360 if (ap) {
35a39691 4361 scsi_adjust_queue_depth(sdev, 0, IPR_MAX_CMD_PER_ATA_LUN);
dd406ef8
BK
4362 ata_sas_slave_configure(sdev, ap);
4363 } else
35a39691 4364 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
3e7ebdfa
WB
4365 if (ioa_cfg->sis64)
4366 sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
5adcbeb3
WB
4367 ipr_format_res_path(res->res_path, buffer,
4368 sizeof(buffer)));
dd406ef8 4369 return 0;
1da177e4
LT
4370 }
4371 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4372 return 0;
4373}
4374
35a39691
BK
4375/**
4376 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4377 * @sdev: scsi device struct
4378 *
4379 * This function initializes an ATA port so that future commands
4380 * sent through queuecommand will work.
4381 *
4382 * Return value:
4383 * 0 on success
4384 **/
4385static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4386{
4387 struct ipr_sata_port *sata_port = NULL;
4388 int rc = -ENXIO;
4389
4390 ENTER;
4391 if (sdev->sdev_target)
4392 sata_port = sdev->sdev_target->hostdata;
4393 if (sata_port)
4394 rc = ata_sas_port_init(sata_port->ap);
4395 if (rc)
4396 ipr_slave_destroy(sdev);
4397
4398 LEAVE;
4399 return rc;
4400}
4401
1da177e4
LT
4402/**
4403 * ipr_slave_alloc - Prepare for commands to a device.
4404 * @sdev: scsi device struct
4405 *
4406 * This function saves a pointer to the resource entry
4407 * in the scsi device struct if the device exists. We
4408 * can then use this pointer in ipr_queuecommand when
4409 * handling new commands.
4410 *
4411 * Return value:
692aebfc 4412 * 0 on success / -ENXIO if device does not exist
1da177e4
LT
4413 **/
4414static int ipr_slave_alloc(struct scsi_device *sdev)
4415{
4416 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4417 struct ipr_resource_entry *res;
4418 unsigned long lock_flags;
692aebfc 4419 int rc = -ENXIO;
1da177e4
LT
4420
4421 sdev->hostdata = NULL;
4422
4423 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4424
35a39691
BK
4425 res = ipr_find_sdev(sdev);
4426 if (res) {
4427 res->sdev = sdev;
4428 res->add_to_ml = 0;
4429 res->in_erp = 0;
4430 sdev->hostdata = res;
4431 if (!ipr_is_naca_model(res))
4432 res->needs_sync_complete = 1;
4433 rc = 0;
4434 if (ipr_is_gata(res)) {
4435 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4436 return ipr_ata_slave_alloc(sdev);
1da177e4
LT
4437 }
4438 }
4439
4440 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4441
692aebfc 4442 return rc;
1da177e4
LT
4443}
4444
4445/**
4446 * ipr_eh_host_reset - Reset the host adapter
4447 * @scsi_cmd: scsi command struct
4448 *
4449 * Return value:
4450 * SUCCESS / FAILED
4451 **/
df0ae249 4452static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
1da177e4
LT
4453{
4454 struct ipr_ioa_cfg *ioa_cfg;
4455 int rc;
4456
4457 ENTER;
4458 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4459
4460 dev_err(&ioa_cfg->pdev->dev,
4461 "Adapter being reset as a result of error recovery.\n");
4462
4463 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4464 ioa_cfg->sdt_state = GET_DUMP;
4465
4466 rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
4467
4468 LEAVE;
4469 return rc;
4470}
4471
df0ae249
JG
4472static int ipr_eh_host_reset(struct scsi_cmnd * cmd)
4473{
4474 int rc;
4475
4476 spin_lock_irq(cmd->device->host->host_lock);
4477 rc = __ipr_eh_host_reset(cmd);
4478 spin_unlock_irq(cmd->device->host->host_lock);
4479
4480 return rc;
4481}
4482
c6513096
BK
4483/**
4484 * ipr_device_reset - Reset the device
4485 * @ioa_cfg: ioa config struct
4486 * @res: resource entry struct
4487 *
4488 * This function issues a device reset to the affected device.
4489 * If the device is a SCSI device, a LUN reset will be sent
4490 * to the device first. If that does not work, a target reset
35a39691
BK
4491 * will be sent. If the device is a SATA device, a PHY reset will
4492 * be sent.
c6513096
BK
4493 *
4494 * Return value:
4495 * 0 on success / non-zero on failure
4496 **/
4497static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
4498 struct ipr_resource_entry *res)
4499{
4500 struct ipr_cmnd *ipr_cmd;
4501 struct ipr_ioarcb *ioarcb;
4502 struct ipr_cmd_pkt *cmd_pkt;
35a39691 4503 struct ipr_ioarcb_ata_regs *regs;
c6513096
BK
4504 u32 ioasc;
4505
4506 ENTER;
4507 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4508 ioarcb = &ipr_cmd->ioarcb;
4509 cmd_pkt = &ioarcb->cmd_pkt;
a32c055f
WB
4510
4511 if (ipr_cmd->ioa_cfg->sis64) {
4512 regs = &ipr_cmd->i.ata_ioadl.regs;
4513 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
4514 } else
4515 regs = &ioarcb->u.add_data.u.regs;
c6513096 4516
3e7ebdfa 4517 ioarcb->res_handle = res->res_handle;
c6513096
BK
4518 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4519 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
35a39691
BK
4520 if (ipr_is_gata(res)) {
4521 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
a32c055f 4522 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
35a39691
BK
4523 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
4524 }
c6513096
BK
4525
4526 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
96d21f00 4527 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
c6513096 4528 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
96d21f00
WB
4529 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
4530 if (ipr_cmd->ioa_cfg->sis64)
4531 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
4532 sizeof(struct ipr_ioasa_gata));
4533 else
4534 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
4535 sizeof(struct ipr_ioasa_gata));
4536 }
c6513096
BK
4537
4538 LEAVE;
4539 return (IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0);
4540}
4541
35a39691
BK
4542/**
4543 * ipr_sata_reset - Reset the SATA port
cc0680a5 4544 * @link: SATA link to reset
35a39691
BK
4545 * @classes: class of the attached device
4546 *
cc0680a5 4547 * This function issues a SATA phy reset to the affected ATA link.
35a39691
BK
4548 *
4549 * Return value:
4550 * 0 on success / non-zero on failure
4551 **/
cc0680a5 4552static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
120bda35 4553 unsigned long deadline)
35a39691 4554{
cc0680a5 4555 struct ipr_sata_port *sata_port = link->ap->private_data;
35a39691
BK
4556 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4557 struct ipr_resource_entry *res;
4558 unsigned long lock_flags = 0;
4559 int rc = -ENXIO;
4560
4561 ENTER;
4562 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
73d98ff0
BK
4563 while(ioa_cfg->in_reset_reload) {
4564 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4565 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4566 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4567 }
4568
35a39691
BK
4569 res = sata_port->res;
4570 if (res) {
4571 rc = ipr_device_reset(ioa_cfg, res);
3e7ebdfa 4572 *classes = res->ata_class;
35a39691
BK
4573 }
4574
4575 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4576 LEAVE;
4577 return rc;
4578}
4579
1da177e4
LT
4580/**
4581 * ipr_eh_dev_reset - Reset the device
4582 * @scsi_cmd: scsi command struct
4583 *
4584 * This function issues a device reset to the affected device.
4585 * A LUN reset will be sent to the device first. If that does
4586 * not work, a target reset will be sent.
4587 *
4588 * Return value:
4589 * SUCCESS / FAILED
4590 **/
94d0e7b8 4591static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
1da177e4
LT
4592{
4593 struct ipr_cmnd *ipr_cmd;
4594 struct ipr_ioa_cfg *ioa_cfg;
4595 struct ipr_resource_entry *res;
35a39691
BK
4596 struct ata_port *ap;
4597 int rc = 0;
1da177e4
LT
4598
4599 ENTER;
4600 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4601 res = scsi_cmd->device->hostdata;
4602
eeb88307 4603 if (!res)
1da177e4
LT
4604 return FAILED;
4605
4606 /*
4607 * If we are currently going through reset/reload, return failed. This will force the
4608 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
4609 * reset to complete
4610 */
4611 if (ioa_cfg->in_reset_reload)
4612 return FAILED;
4613 if (ioa_cfg->ioa_is_dead)
4614 return FAILED;
4615
4616 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3e7ebdfa 4617 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
1da177e4
LT
4618 if (ipr_cmd->scsi_cmd)
4619 ipr_cmd->done = ipr_scsi_eh_done;
24d6f2b5
BK
4620 if (ipr_cmd->qc)
4621 ipr_cmd->done = ipr_sata_eh_done;
7402ecef
BK
4622 if (ipr_cmd->qc && !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
4623 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
4624 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
4625 }
1da177e4
LT
4626 }
4627 }
4628
4629 res->resetting_device = 1;
fb3ed3cb 4630 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
35a39691
BK
4631
4632 if (ipr_is_gata(res) && res->sata_port) {
4633 ap = res->sata_port->ap;
4634 spin_unlock_irq(scsi_cmd->device->host->host_lock);
a1efdaba 4635 ata_std_error_handler(ap);
35a39691 4636 spin_lock_irq(scsi_cmd->device->host->host_lock);
5af23d26
BK
4637
4638 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3e7ebdfa 4639 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
5af23d26
BK
4640 rc = -EIO;
4641 break;
4642 }
4643 }
35a39691
BK
4644 } else
4645 rc = ipr_device_reset(ioa_cfg, res);
1da177e4
LT
4646 res->resetting_device = 0;
4647
1da177e4 4648 LEAVE;
c6513096 4649 return (rc ? FAILED : SUCCESS);
1da177e4
LT
4650}
4651
94d0e7b8
JG
4652static int ipr_eh_dev_reset(struct scsi_cmnd * cmd)
4653{
4654 int rc;
4655
4656 spin_lock_irq(cmd->device->host->host_lock);
4657 rc = __ipr_eh_dev_reset(cmd);
4658 spin_unlock_irq(cmd->device->host->host_lock);
4659
4660 return rc;
4661}
4662
1da177e4
LT
4663/**
4664 * ipr_bus_reset_done - Op done function for bus reset.
4665 * @ipr_cmd: ipr command struct
4666 *
4667 * This function is the op done function for a bus reset
4668 *
4669 * Return value:
4670 * none
4671 **/
4672static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
4673{
4674 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4675 struct ipr_resource_entry *res;
4676
4677 ENTER;
3e7ebdfa
WB
4678 if (!ioa_cfg->sis64)
4679 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4680 if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
4681 scsi_report_bus_reset(ioa_cfg->host, res->bus);
4682 break;
4683 }
1da177e4 4684 }
1da177e4
LT
4685
4686 /*
4687 * If abort has not completed, indicate the reset has, else call the
4688 * abort's done function to wake the sleeping eh thread
4689 */
4690 if (ipr_cmd->sibling->sibling)
4691 ipr_cmd->sibling->sibling = NULL;
4692 else
4693 ipr_cmd->sibling->done(ipr_cmd->sibling);
4694
4695 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4696 LEAVE;
4697}
4698
4699/**
4700 * ipr_abort_timeout - An abort task has timed out
4701 * @ipr_cmd: ipr command struct
4702 *
4703 * This function handles when an abort task times out. If this
4704 * happens we issue a bus reset since we have resources tied
4705 * up that must be freed before returning to the midlayer.
4706 *
4707 * Return value:
4708 * none
4709 **/
4710static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
4711{
4712 struct ipr_cmnd *reset_cmd;
4713 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4714 struct ipr_cmd_pkt *cmd_pkt;
4715 unsigned long lock_flags = 0;
4716
4717 ENTER;
4718 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4719 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
4720 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4721 return;
4722 }
4723
fb3ed3cb 4724 sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
1da177e4
LT
4725 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4726 ipr_cmd->sibling = reset_cmd;
4727 reset_cmd->sibling = ipr_cmd;
4728 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
4729 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
4730 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4731 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
4732 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
4733
4734 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4735 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4736 LEAVE;
4737}
4738
4739/**
4740 * ipr_cancel_op - Cancel specified op
4741 * @scsi_cmd: scsi command struct
4742 *
4743 * This function cancels specified op.
4744 *
4745 * Return value:
4746 * SUCCESS / FAILED
4747 **/
4748static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
4749{
4750 struct ipr_cmnd *ipr_cmd;
4751 struct ipr_ioa_cfg *ioa_cfg;
4752 struct ipr_resource_entry *res;
4753 struct ipr_cmd_pkt *cmd_pkt;
4754 u32 ioasc;
4755 int op_found = 0;
4756
4757 ENTER;
4758 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
4759 res = scsi_cmd->device->hostdata;
4760
8fa728a2
JG
4761 /* If we are currently going through reset/reload, return failed.
4762 * This will force the mid-layer to call ipr_eh_host_reset,
4763 * which will then go to sleep and wait for the reset to complete
4764 */
4765 if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
4766 return FAILED;
04d9768f 4767 if (!res || !ipr_is_gscsi(res))
1da177e4
LT
4768 return FAILED;
4769
4770 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4771 if (ipr_cmd->scsi_cmd == scsi_cmd) {
4772 ipr_cmd->done = ipr_scsi_eh_done;
4773 op_found = 1;
4774 break;
4775 }
4776 }
4777
4778 if (!op_found)
4779 return SUCCESS;
4780
4781 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3e7ebdfa 4782 ipr_cmd->ioarcb.res_handle = res->res_handle;
1da177e4
LT
4783 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4784 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4785 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
4786 ipr_cmd->u.sdev = scsi_cmd->device;
4787
fb3ed3cb
BK
4788 scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
4789 scsi_cmd->cmnd[0]);
1da177e4 4790 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
96d21f00 4791 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
4792
4793 /*
4794 * If the abort task timed out and we sent a bus reset, we will get
4795 * one the following responses to the abort
4796 */
4797 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
4798 ioasc = 0;
4799 ipr_trace;
4800 }
4801
4802 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
ee0a90fa 4803 if (!ipr_is_naca_model(res))
4804 res->needs_sync_complete = 1;
1da177e4
LT
4805
4806 LEAVE;
4807 return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
4808}
4809
4810/**
4811 * ipr_eh_abort - Abort a single op
4812 * @scsi_cmd: scsi command struct
4813 *
4814 * Return value:
4815 * SUCCESS / FAILED
4816 **/
4817static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
4818{
8fa728a2
JG
4819 unsigned long flags;
4820 int rc;
1da177e4
LT
4821
4822 ENTER;
1da177e4 4823
8fa728a2
JG
4824 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
4825 rc = ipr_cancel_op(scsi_cmd);
4826 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
1da177e4
LT
4827
4828 LEAVE;
8fa728a2 4829 return rc;
1da177e4
LT
4830}
4831
4832/**
4833 * ipr_handle_other_interrupt - Handle "other" interrupts
4834 * @ioa_cfg: ioa config struct
1da177e4
LT
4835 *
4836 * Return value:
4837 * IRQ_NONE / IRQ_HANDLED
4838 **/
64ffdb76 4839static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg)
1da177e4
LT
4840{
4841 irqreturn_t rc = IRQ_HANDLED;
64ffdb76
WB
4842 volatile u32 int_reg, int_mask_reg;
4843
4844 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
4845 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg;
4846
4847 /* If an interrupt on the adapter did not occur, ignore it.
4848 * Or in the case of SIS 64, check for a stage change interrupt.
4849 */
4850 if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
4851 if (ioa_cfg->sis64) {
4852 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
4853 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4854 if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
4855
4856 /* clear stage change */
4857 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
4858 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4859 list_del(&ioa_cfg->reset_cmd->queue);
4860 del_timer(&ioa_cfg->reset_cmd->timer);
4861 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
4862 return IRQ_HANDLED;
4863 }
4864 }
4865
4866 return IRQ_NONE;
4867 }
1da177e4
LT
4868
4869 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
4870 /* Mask the interrupt */
4871 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
4872
4873 /* Clear the interrupt */
4874 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
4875 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
4876
4877 list_del(&ioa_cfg->reset_cmd->queue);
4878 del_timer(&ioa_cfg->reset_cmd->timer);
4879 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
4880 } else {
4881 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
4882 ioa_cfg->ioa_unit_checked = 1;
4883 else
4884 dev_err(&ioa_cfg->pdev->dev,
4885 "Permanent IOA failure. 0x%08X\n", int_reg);
4886
4887 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4888 ioa_cfg->sdt_state = GET_DUMP;
4889
4890 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
4891 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4892 }
4893
4894 return rc;
4895}
4896
3feeb89d
WB
4897/**
4898 * ipr_isr_eh - Interrupt service routine error handler
4899 * @ioa_cfg: ioa config struct
4900 * @msg: message to log
4901 *
4902 * Return value:
4903 * none
4904 **/
4905static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg)
4906{
4907 ioa_cfg->errors_logged++;
4908 dev_err(&ioa_cfg->pdev->dev, "%s\n", msg);
4909
4910 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4911 ioa_cfg->sdt_state = GET_DUMP;
4912
4913 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4914}
4915
1da177e4
LT
4916/**
4917 * ipr_isr - Interrupt service routine
4918 * @irq: irq number
4919 * @devp: pointer to ioa config struct
1da177e4
LT
4920 *
4921 * Return value:
4922 * IRQ_NONE / IRQ_HANDLED
4923 **/
7d12e780 4924static irqreturn_t ipr_isr(int irq, void *devp)
1da177e4
LT
4925{
4926 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
4927 unsigned long lock_flags = 0;
64ffdb76 4928 volatile u32 int_reg;
1da177e4
LT
4929 u32 ioasc;
4930 u16 cmd_index;
3feeb89d 4931 int num_hrrq = 0;
1da177e4
LT
4932 struct ipr_cmnd *ipr_cmd;
4933 irqreturn_t rc = IRQ_NONE;
4934
4935 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4936
4937 /* If interrupts are disabled, ignore the interrupt */
4938 if (!ioa_cfg->allow_interrupts) {
4939 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4940 return IRQ_NONE;
4941 }
4942
1da177e4
LT
4943 while (1) {
4944 ipr_cmd = NULL;
4945
4946 while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
4947 ioa_cfg->toggle_bit) {
4948
4949 cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
4950 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
4951
4952 if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
3feeb89d 4953 ipr_isr_eh(ioa_cfg, "Invalid response handle from IOA");
1da177e4
LT
4954 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4955 return IRQ_HANDLED;
4956 }
4957
4958 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
4959
96d21f00 4960 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
4961
4962 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
4963
4964 list_del(&ipr_cmd->queue);
4965 del_timer(&ipr_cmd->timer);
4966 ipr_cmd->done(ipr_cmd);
4967
4968 rc = IRQ_HANDLED;
4969
4970 if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
4971 ioa_cfg->hrrq_curr++;
4972 } else {
4973 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
4974 ioa_cfg->toggle_bit ^= 1u;
4975 }
4976 }
4977
4978 if (ipr_cmd != NULL) {
4979 /* Clear the PCI interrupt */
3feeb89d 4980 do {
214777ba 4981 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
64ffdb76 4982 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
3feeb89d
WB
4983 } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
4984 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
4985
4986 if (int_reg & IPR_PCII_HRRQ_UPDATED) {
4987 ipr_isr_eh(ioa_cfg, "Error clearing HRRQ");
4988 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4989 return IRQ_HANDLED;
4990 }
4991
1da177e4
LT
4992 } else
4993 break;
4994 }
4995
4996 if (unlikely(rc == IRQ_NONE))
64ffdb76 4997 rc = ipr_handle_other_interrupt(ioa_cfg);
1da177e4
LT
4998
4999 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5000 return rc;
5001}
5002
a32c055f
WB
5003/**
5004 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5005 * @ioa_cfg: ioa config struct
5006 * @ipr_cmd: ipr command struct
5007 *
5008 * Return value:
5009 * 0 on success / -1 on failure
5010 **/
5011static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5012 struct ipr_cmnd *ipr_cmd)
5013{
5014 int i, nseg;
5015 struct scatterlist *sg;
5016 u32 length;
5017 u32 ioadl_flags = 0;
5018 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5019 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5020 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5021
5022 length = scsi_bufflen(scsi_cmd);
5023 if (!length)
5024 return 0;
5025
5026 nseg = scsi_dma_map(scsi_cmd);
5027 if (nseg < 0) {
5028 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5029 return -1;
5030 }
5031
5032 ipr_cmd->dma_use_sg = nseg;
5033
438b0331 5034 ioarcb->data_transfer_length = cpu_to_be32(length);
b8803b1c
WB
5035 ioarcb->ioadl_len =
5036 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
438b0331 5037
a32c055f
WB
5038 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5039 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5040 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5041 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5042 ioadl_flags = IPR_IOADL_FLAGS_READ;
5043
5044 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5045 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5046 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5047 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5048 }
5049
5050 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5051 return 0;
5052}
5053
1da177e4
LT
5054/**
5055 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5056 * @ioa_cfg: ioa config struct
5057 * @ipr_cmd: ipr command struct
5058 *
5059 * Return value:
5060 * 0 on success / -1 on failure
5061 **/
5062static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5063 struct ipr_cmnd *ipr_cmd)
5064{
63015bc9
FT
5065 int i, nseg;
5066 struct scatterlist *sg;
1da177e4
LT
5067 u32 length;
5068 u32 ioadl_flags = 0;
5069 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5070 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
a32c055f 5071 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1da177e4 5072
63015bc9
FT
5073 length = scsi_bufflen(scsi_cmd);
5074 if (!length)
1da177e4
LT
5075 return 0;
5076
63015bc9
FT
5077 nseg = scsi_dma_map(scsi_cmd);
5078 if (nseg < 0) {
5079 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5080 return -1;
5081 }
51b1c7e1 5082
63015bc9
FT
5083 ipr_cmd->dma_use_sg = nseg;
5084
5085 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5086 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5087 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
a32c055f
WB
5088 ioarcb->data_transfer_length = cpu_to_be32(length);
5089 ioarcb->ioadl_len =
63015bc9
FT
5090 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5091 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5092 ioadl_flags = IPR_IOADL_FLAGS_READ;
5093 ioarcb->read_data_transfer_length = cpu_to_be32(length);
5094 ioarcb->read_ioadl_len =
5095 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5096 }
1da177e4 5097
a32c055f
WB
5098 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5099 ioadl = ioarcb->u.add_data.u.ioadl;
5100 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5101 offsetof(struct ipr_ioarcb, u.add_data));
63015bc9
FT
5102 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5103 }
1da177e4 5104
63015bc9
FT
5105 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5106 ioadl[i].flags_and_data_len =
5107 cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5108 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
1da177e4
LT
5109 }
5110
63015bc9
FT
5111 ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5112 return 0;
1da177e4
LT
5113}
5114
5115/**
5116 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
5117 * @scsi_cmd: scsi command struct
5118 *
5119 * Return value:
5120 * task attributes
5121 **/
5122static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
5123{
5124 u8 tag[2];
5125 u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
5126
5127 if (scsi_populate_tag_msg(scsi_cmd, tag)) {
5128 switch (tag[0]) {
5129 case MSG_SIMPLE_TAG:
5130 rc = IPR_FLAGS_LO_SIMPLE_TASK;
5131 break;
5132 case MSG_HEAD_TAG:
5133 rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
5134 break;
5135 case MSG_ORDERED_TAG:
5136 rc = IPR_FLAGS_LO_ORDERED_TASK;
5137 break;
5138 };
5139 }
5140
5141 return rc;
5142}
5143
5144/**
5145 * ipr_erp_done - Process completion of ERP for a device
5146 * @ipr_cmd: ipr command struct
5147 *
5148 * This function copies the sense buffer into the scsi_cmd
5149 * struct and pushes the scsi_done function.
5150 *
5151 * Return value:
5152 * nothing
5153 **/
5154static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5155{
5156 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5157 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5158 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
96d21f00 5159 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
5160
5161 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5162 scsi_cmd->result |= (DID_ERROR << 16);
fb3ed3cb
BK
5163 scmd_printk(KERN_ERR, scsi_cmd,
5164 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
1da177e4
LT
5165 } else {
5166 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
5167 SCSI_SENSE_BUFFERSIZE);
5168 }
5169
5170 if (res) {
ee0a90fa 5171 if (!ipr_is_naca_model(res))
5172 res->needs_sync_complete = 1;
1da177e4
LT
5173 res->in_erp = 0;
5174 }
63015bc9 5175 scsi_dma_unmap(ipr_cmd->scsi_cmd);
1da177e4
LT
5176 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5177 scsi_cmd->scsi_done(scsi_cmd);
5178}
5179
5180/**
5181 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
5182 * @ipr_cmd: ipr command struct
5183 *
5184 * Return value:
5185 * none
5186 **/
5187static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
5188{
51b1c7e1 5189 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
96d21f00 5190 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
a32c055f 5191 dma_addr_t dma_addr = ipr_cmd->dma_addr;
1da177e4
LT
5192
5193 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
a32c055f 5194 ioarcb->data_transfer_length = 0;
1da177e4 5195 ioarcb->read_data_transfer_length = 0;
a32c055f 5196 ioarcb->ioadl_len = 0;
1da177e4 5197 ioarcb->read_ioadl_len = 0;
96d21f00
WB
5198 ioasa->hdr.ioasc = 0;
5199 ioasa->hdr.residual_data_len = 0;
a32c055f
WB
5200
5201 if (ipr_cmd->ioa_cfg->sis64)
5202 ioarcb->u.sis64_addr_data.data_ioadl_addr =
5203 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
5204 else {
5205 ioarcb->write_ioadl_addr =
5206 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
5207 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5208 }
1da177e4
LT
5209}
5210
5211/**
5212 * ipr_erp_request_sense - Send request sense to a device
5213 * @ipr_cmd: ipr command struct
5214 *
5215 * This function sends a request sense to a device as a result
5216 * of a check condition.
5217 *
5218 * Return value:
5219 * nothing
5220 **/
5221static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
5222{
5223 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
96d21f00 5224 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
5225
5226 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5227 ipr_erp_done(ipr_cmd);
5228 return;
5229 }
5230
5231 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5232
5233 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
5234 cmd_pkt->cdb[0] = REQUEST_SENSE;
5235 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
5236 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
5237 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5238 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
5239
a32c055f
WB
5240 ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
5241 SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
5242
5243 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
5244 IPR_REQUEST_SENSE_TIMEOUT * 2);
5245}
5246
5247/**
5248 * ipr_erp_cancel_all - Send cancel all to a device
5249 * @ipr_cmd: ipr command struct
5250 *
5251 * This function sends a cancel all to a device to clear the
5252 * queue. If we are running TCQ on the device, QERR is set to 1,
5253 * which means all outstanding ops have been dropped on the floor.
5254 * Cancel all will return them to us.
5255 *
5256 * Return value:
5257 * nothing
5258 **/
5259static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
5260{
5261 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5262 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5263 struct ipr_cmd_pkt *cmd_pkt;
5264
5265 res->in_erp = 1;
5266
5267 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5268
5269 if (!scsi_get_tag_type(scsi_cmd->device)) {
5270 ipr_erp_request_sense(ipr_cmd);
5271 return;
5272 }
5273
5274 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5275 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5276 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5277
5278 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
5279 IPR_CANCEL_ALL_TIMEOUT);
5280}
5281
5282/**
5283 * ipr_dump_ioasa - Dump contents of IOASA
5284 * @ioa_cfg: ioa config struct
5285 * @ipr_cmd: ipr command struct
fe964d0a 5286 * @res: resource entry struct
1da177e4
LT
5287 *
5288 * This function is invoked by the interrupt handler when ops
5289 * fail. It will log the IOASA if appropriate. Only called
5290 * for GPDD ops.
5291 *
5292 * Return value:
5293 * none
5294 **/
5295static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
fe964d0a 5296 struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
1da177e4
LT
5297{
5298 int i;
5299 u16 data_len;
b0692dd4 5300 u32 ioasc, fd_ioasc;
96d21f00 5301 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
1da177e4
LT
5302 __be32 *ioasa_data = (__be32 *)ioasa;
5303 int error_index;
5304
96d21f00
WB
5305 ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
5306 fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
1da177e4
LT
5307
5308 if (0 == ioasc)
5309 return;
5310
5311 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
5312 return;
5313
b0692dd4
BK
5314 if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
5315 error_index = ipr_get_error(fd_ioasc);
5316 else
5317 error_index = ipr_get_error(ioasc);
1da177e4
LT
5318
5319 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
5320 /* Don't log an error if the IOA already logged one */
96d21f00 5321 if (ioasa->hdr.ilid != 0)
1da177e4
LT
5322 return;
5323
cc9bd5d4
BK
5324 if (!ipr_is_gscsi(res))
5325 return;
5326
1da177e4
LT
5327 if (ipr_error_table[error_index].log_ioasa == 0)
5328 return;
5329 }
5330
fe964d0a 5331 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
1da177e4 5332
96d21f00
WB
5333 data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
5334 if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
5335 data_len = sizeof(struct ipr_ioasa64);
5336 else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
1da177e4 5337 data_len = sizeof(struct ipr_ioasa);
1da177e4
LT
5338
5339 ipr_err("IOASA Dump:\n");
5340
5341 for (i = 0; i < data_len / 4; i += 4) {
5342 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
5343 be32_to_cpu(ioasa_data[i]),
5344 be32_to_cpu(ioasa_data[i+1]),
5345 be32_to_cpu(ioasa_data[i+2]),
5346 be32_to_cpu(ioasa_data[i+3]));
5347 }
5348}
5349
5350/**
5351 * ipr_gen_sense - Generate SCSI sense data from an IOASA
5352 * @ioasa: IOASA
5353 * @sense_buf: sense data buffer
5354 *
5355 * Return value:
5356 * none
5357 **/
5358static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
5359{
5360 u32 failing_lba;
5361 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
5362 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
96d21f00
WB
5363 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5364 u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
1da177e4
LT
5365
5366 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
5367
5368 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
5369 return;
5370
5371 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
5372
5373 if (ipr_is_vset_device(res) &&
5374 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
5375 ioasa->u.vset.failing_lba_hi != 0) {
5376 sense_buf[0] = 0x72;
5377 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
5378 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
5379 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
5380
5381 sense_buf[7] = 12;
5382 sense_buf[8] = 0;
5383 sense_buf[9] = 0x0A;
5384 sense_buf[10] = 0x80;
5385
5386 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
5387
5388 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
5389 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
5390 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
5391 sense_buf[15] = failing_lba & 0x000000ff;
5392
5393 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5394
5395 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
5396 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
5397 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
5398 sense_buf[19] = failing_lba & 0x000000ff;
5399 } else {
5400 sense_buf[0] = 0x70;
5401 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
5402 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
5403 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
5404
5405 /* Illegal request */
5406 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
96d21f00 5407 (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
1da177e4
LT
5408 sense_buf[7] = 10; /* additional length */
5409
5410 /* IOARCB was in error */
5411 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
5412 sense_buf[15] = 0xC0;
5413 else /* Parameter data was invalid */
5414 sense_buf[15] = 0x80;
5415
5416 sense_buf[16] =
5417 ((IPR_FIELD_POINTER_MASK &
96d21f00 5418 be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
1da177e4
LT
5419 sense_buf[17] =
5420 (IPR_FIELD_POINTER_MASK &
96d21f00 5421 be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
1da177e4
LT
5422 } else {
5423 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
5424 if (ipr_is_vset_device(res))
5425 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5426 else
5427 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
5428
5429 sense_buf[0] |= 0x80; /* Or in the Valid bit */
5430 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
5431 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
5432 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
5433 sense_buf[6] = failing_lba & 0x000000ff;
5434 }
5435
5436 sense_buf[7] = 6; /* additional length */
5437 }
5438 }
5439}
5440
ee0a90fa 5441/**
5442 * ipr_get_autosense - Copy autosense data to sense buffer
5443 * @ipr_cmd: ipr command struct
5444 *
5445 * This function copies the autosense buffer to the buffer
5446 * in the scsi_cmd, if there is autosense available.
5447 *
5448 * Return value:
5449 * 1 if autosense was available / 0 if not
5450 **/
5451static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
5452{
96d21f00
WB
5453 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5454 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
ee0a90fa 5455
96d21f00 5456 if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
ee0a90fa 5457 return 0;
5458
96d21f00
WB
5459 if (ipr_cmd->ioa_cfg->sis64)
5460 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
5461 min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
5462 SCSI_SENSE_BUFFERSIZE));
5463 else
5464 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
5465 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
5466 SCSI_SENSE_BUFFERSIZE));
ee0a90fa 5467 return 1;
5468}
5469
1da177e4
LT
5470/**
5471 * ipr_erp_start - Process an error response for a SCSI op
5472 * @ioa_cfg: ioa config struct
5473 * @ipr_cmd: ipr command struct
5474 *
5475 * This function determines whether or not to initiate ERP
5476 * on the affected device.
5477 *
5478 * Return value:
5479 * nothing
5480 **/
5481static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
5482 struct ipr_cmnd *ipr_cmd)
5483{
5484 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5485 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
96d21f00 5486 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
8a048994 5487 u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
1da177e4
LT
5488
5489 if (!res) {
5490 ipr_scsi_eh_done(ipr_cmd);
5491 return;
5492 }
5493
8a048994 5494 if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
1da177e4
LT
5495 ipr_gen_sense(ipr_cmd);
5496
cc9bd5d4
BK
5497 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
5498
8a048994 5499 switch (masked_ioasc) {
1da177e4 5500 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
ee0a90fa 5501 if (ipr_is_naca_model(res))
5502 scsi_cmd->result |= (DID_ABORT << 16);
5503 else
5504 scsi_cmd->result |= (DID_IMM_RETRY << 16);
1da177e4
LT
5505 break;
5506 case IPR_IOASC_IR_RESOURCE_HANDLE:
b0df54bb 5507 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
1da177e4
LT
5508 scsi_cmd->result |= (DID_NO_CONNECT << 16);
5509 break;
5510 case IPR_IOASC_HW_SEL_TIMEOUT:
5511 scsi_cmd->result |= (DID_NO_CONNECT << 16);
ee0a90fa 5512 if (!ipr_is_naca_model(res))
5513 res->needs_sync_complete = 1;
1da177e4
LT
5514 break;
5515 case IPR_IOASC_SYNC_REQUIRED:
5516 if (!res->in_erp)
5517 res->needs_sync_complete = 1;
5518 scsi_cmd->result |= (DID_IMM_RETRY << 16);
5519 break;
5520 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
b0df54bb 5521 case IPR_IOASA_IR_DUAL_IOA_DISABLED:
1da177e4
LT
5522 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
5523 break;
5524 case IPR_IOASC_BUS_WAS_RESET:
5525 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
5526 /*
5527 * Report the bus reset and ask for a retry. The device
5528 * will give CC/UA the next command.
5529 */
5530 if (!res->resetting_device)
5531 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
5532 scsi_cmd->result |= (DID_ERROR << 16);
ee0a90fa 5533 if (!ipr_is_naca_model(res))
5534 res->needs_sync_complete = 1;
1da177e4
LT
5535 break;
5536 case IPR_IOASC_HW_DEV_BUS_STATUS:
5537 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
5538 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
ee0a90fa 5539 if (!ipr_get_autosense(ipr_cmd)) {
5540 if (!ipr_is_naca_model(res)) {
5541 ipr_erp_cancel_all(ipr_cmd);
5542 return;
5543 }
5544 }
1da177e4 5545 }
ee0a90fa 5546 if (!ipr_is_naca_model(res))
5547 res->needs_sync_complete = 1;
1da177e4
LT
5548 break;
5549 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
5550 break;
5551 default:
5b7304fb
BK
5552 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
5553 scsi_cmd->result |= (DID_ERROR << 16);
ee0a90fa 5554 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
1da177e4
LT
5555 res->needs_sync_complete = 1;
5556 break;
5557 }
5558
63015bc9 5559 scsi_dma_unmap(ipr_cmd->scsi_cmd);
1da177e4
LT
5560 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5561 scsi_cmd->scsi_done(scsi_cmd);
5562}
5563
5564/**
5565 * ipr_scsi_done - mid-layer done function
5566 * @ipr_cmd: ipr command struct
5567 *
5568 * This function is invoked by the interrupt handler for
5569 * ops generated by the SCSI mid-layer
5570 *
5571 * Return value:
5572 * none
5573 **/
5574static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
5575{
5576 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5577 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
96d21f00 5578 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4 5579
96d21f00 5580 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
1da177e4
LT
5581
5582 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
63015bc9 5583 scsi_dma_unmap(ipr_cmd->scsi_cmd);
1da177e4
LT
5584 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5585 scsi_cmd->scsi_done(scsi_cmd);
5586 } else
5587 ipr_erp_start(ioa_cfg, ipr_cmd);
5588}
5589
1da177e4
LT
5590/**
5591 * ipr_queuecommand - Queue a mid-layer request
5592 * @scsi_cmd: scsi command struct
5593 * @done: done function
5594 *
5595 * This function queues a request generated by the mid-layer.
5596 *
5597 * Return value:
5598 * 0 on success
5599 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
5600 * SCSI_MLQUEUE_HOST_BUSY if host is busy
5601 **/
5602static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
5603 void (*done) (struct scsi_cmnd *))
5604{
5605 struct ipr_ioa_cfg *ioa_cfg;
5606 struct ipr_resource_entry *res;
5607 struct ipr_ioarcb *ioarcb;
5608 struct ipr_cmnd *ipr_cmd;
5609 int rc = 0;
5610
5611 scsi_cmd->scsi_done = done;
5612 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5613 res = scsi_cmd->device->hostdata;
5614 scsi_cmd->result = (DID_OK << 16);
5615
5616 /*
5617 * We are currently blocking all devices due to a host reset
5618 * We have told the host to stop giving us new requests, but
5619 * ERP ops don't count. FIXME
5620 */
5621 if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))
5622 return SCSI_MLQUEUE_HOST_BUSY;
5623
5624 /*
5625 * FIXME - Create scsi_set_host_offline interface
5626 * and the ioa_is_dead check can be removed
5627 */
5628 if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
5629 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
5630 scsi_cmd->result = (DID_NO_CONNECT << 16);
5631 scsi_cmd->scsi_done(scsi_cmd);
5632 return 0;
5633 }
5634
35a39691
BK
5635 if (ipr_is_gata(res) && res->sata_port)
5636 return ata_sas_queuecmd(scsi_cmd, done, res->sata_port->ap);
5637
1da177e4
LT
5638 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5639 ioarcb = &ipr_cmd->ioarcb;
5640 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5641
5642 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
5643 ipr_cmd->scsi_cmd = scsi_cmd;
3e7ebdfa 5644 ioarcb->res_handle = res->res_handle;
1da177e4 5645 ipr_cmd->done = ipr_scsi_done;
3e7ebdfa 5646 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
1da177e4
LT
5647
5648 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
5649 if (scsi_cmd->underflow == 0)
5650 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5651
5652 if (res->needs_sync_complete) {
5653 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
5654 res->needs_sync_complete = 0;
5655 }
5656
5657 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
5658 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
5659 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
5660 ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
5661 }
5662
5663 if (scsi_cmd->cmnd[0] >= 0xC0 &&
5664 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
5665 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5666
a32c055f
WB
5667 if (likely(rc == 0)) {
5668 if (ioa_cfg->sis64)
5669 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
5670 else
5671 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
5672 }
1da177e4
LT
5673
5674 if (likely(rc == 0)) {
5675 mb();
a32c055f 5676 ipr_send_command(ipr_cmd);
1da177e4
LT
5677 } else {
5678 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5679 return SCSI_MLQUEUE_HOST_BUSY;
5680 }
5681
5682 return 0;
5683}
5684
35a39691
BK
5685/**
5686 * ipr_ioctl - IOCTL handler
5687 * @sdev: scsi device struct
5688 * @cmd: IOCTL cmd
5689 * @arg: IOCTL arg
5690 *
5691 * Return value:
5692 * 0 on success / other on failure
5693 **/
bd705f2d 5694static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
35a39691
BK
5695{
5696 struct ipr_resource_entry *res;
5697
5698 res = (struct ipr_resource_entry *)sdev->hostdata;
0ce3a7e5
BK
5699 if (res && ipr_is_gata(res)) {
5700 if (cmd == HDIO_GET_IDENTITY)
5701 return -ENOTTY;
94be9a58 5702 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
0ce3a7e5 5703 }
35a39691
BK
5704
5705 return -EINVAL;
5706}
5707
1da177e4
LT
5708/**
5709 * ipr_info - Get information about the card/driver
5710 * @scsi_host: scsi host struct
5711 *
5712 * Return value:
5713 * pointer to buffer with description string
5714 **/
5715static const char * ipr_ioa_info(struct Scsi_Host *host)
5716{
5717 static char buffer[512];
5718 struct ipr_ioa_cfg *ioa_cfg;
5719 unsigned long lock_flags = 0;
5720
5721 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
5722
5723 spin_lock_irqsave(host->host_lock, lock_flags);
5724 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
5725 spin_unlock_irqrestore(host->host_lock, lock_flags);
5726
5727 return buffer;
5728}
5729
5730static struct scsi_host_template driver_template = {
5731 .module = THIS_MODULE,
5732 .name = "IPR",
5733 .info = ipr_ioa_info,
35a39691 5734 .ioctl = ipr_ioctl,
1da177e4
LT
5735 .queuecommand = ipr_queuecommand,
5736 .eh_abort_handler = ipr_eh_abort,
5737 .eh_device_reset_handler = ipr_eh_dev_reset,
5738 .eh_host_reset_handler = ipr_eh_host_reset,
5739 .slave_alloc = ipr_slave_alloc,
5740 .slave_configure = ipr_slave_configure,
5741 .slave_destroy = ipr_slave_destroy,
35a39691
BK
5742 .target_alloc = ipr_target_alloc,
5743 .target_destroy = ipr_target_destroy,
1da177e4
LT
5744 .change_queue_depth = ipr_change_queue_depth,
5745 .change_queue_type = ipr_change_queue_type,
5746 .bios_param = ipr_biosparam,
5747 .can_queue = IPR_MAX_COMMANDS,
5748 .this_id = -1,
5749 .sg_tablesize = IPR_MAX_SGLIST,
5750 .max_sectors = IPR_IOA_MAX_SECTORS,
5751 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
5752 .use_clustering = ENABLE_CLUSTERING,
5753 .shost_attrs = ipr_ioa_attrs,
5754 .sdev_attrs = ipr_dev_attrs,
5755 .proc_name = IPR_NAME
5756};
5757
35a39691
BK
5758/**
5759 * ipr_ata_phy_reset - libata phy_reset handler
5760 * @ap: ata port to reset
5761 *
5762 **/
5763static void ipr_ata_phy_reset(struct ata_port *ap)
5764{
5765 unsigned long flags;
5766 struct ipr_sata_port *sata_port = ap->private_data;
5767 struct ipr_resource_entry *res = sata_port->res;
5768 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5769 int rc;
5770
5771 ENTER;
5772 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5773 while(ioa_cfg->in_reset_reload) {
5774 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5775 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5776 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5777 }
5778
5779 if (!ioa_cfg->allow_cmds)
5780 goto out_unlock;
5781
5782 rc = ipr_device_reset(ioa_cfg, res);
5783
5784 if (rc) {
3e4ec344 5785 ap->link.device[0].class = ATA_DEV_NONE;
35a39691
BK
5786 goto out_unlock;
5787 }
5788
3e7ebdfa
WB
5789 ap->link.device[0].class = res->ata_class;
5790 if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
3e4ec344 5791 ap->link.device[0].class = ATA_DEV_NONE;
35a39691
BK
5792
5793out_unlock:
5794 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5795 LEAVE;
5796}
5797
5798/**
5799 * ipr_ata_post_internal - Cleanup after an internal command
5800 * @qc: ATA queued command
5801 *
5802 * Return value:
5803 * none
5804 **/
5805static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
5806{
5807 struct ipr_sata_port *sata_port = qc->ap->private_data;
5808 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5809 struct ipr_cmnd *ipr_cmd;
5810 unsigned long flags;
5811
5812 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
73d98ff0
BK
5813 while(ioa_cfg->in_reset_reload) {
5814 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5815 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5816 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5817 }
5818
35a39691
BK
5819 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
5820 if (ipr_cmd->qc == qc) {
5821 ipr_device_reset(ioa_cfg, sata_port->res);
5822 break;
5823 }
5824 }
5825 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5826}
5827
35a39691
BK
5828/**
5829 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
5830 * @regs: destination
5831 * @tf: source ATA taskfile
5832 *
5833 * Return value:
5834 * none
5835 **/
5836static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
5837 struct ata_taskfile *tf)
5838{
5839 regs->feature = tf->feature;
5840 regs->nsect = tf->nsect;
5841 regs->lbal = tf->lbal;
5842 regs->lbam = tf->lbam;
5843 regs->lbah = tf->lbah;
5844 regs->device = tf->device;
5845 regs->command = tf->command;
5846 regs->hob_feature = tf->hob_feature;
5847 regs->hob_nsect = tf->hob_nsect;
5848 regs->hob_lbal = tf->hob_lbal;
5849 regs->hob_lbam = tf->hob_lbam;
5850 regs->hob_lbah = tf->hob_lbah;
5851 regs->ctl = tf->ctl;
5852}
5853
5854/**
5855 * ipr_sata_done - done function for SATA commands
5856 * @ipr_cmd: ipr command struct
5857 *
5858 * This function is invoked by the interrupt handler for
5859 * ops generated by the SCSI mid-layer to SATA devices
5860 *
5861 * Return value:
5862 * none
5863 **/
5864static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
5865{
5866 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5867 struct ata_queued_cmd *qc = ipr_cmd->qc;
5868 struct ipr_sata_port *sata_port = qc->ap->private_data;
5869 struct ipr_resource_entry *res = sata_port->res;
96d21f00 5870 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
35a39691 5871
96d21f00
WB
5872 if (ipr_cmd->ioa_cfg->sis64)
5873 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
5874 sizeof(struct ipr_ioasa_gata));
5875 else
5876 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
5877 sizeof(struct ipr_ioasa_gata));
35a39691
BK
5878 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
5879
96d21f00 5880 if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
3e7ebdfa 5881 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
35a39691
BK
5882
5883 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
96d21f00 5884 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
35a39691 5885 else
96d21f00 5886 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
35a39691
BK
5887 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5888 ata_qc_complete(qc);
5889}
5890
a32c055f
WB
5891/**
5892 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
5893 * @ipr_cmd: ipr command struct
5894 * @qc: ATA queued command
5895 *
5896 **/
5897static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
5898 struct ata_queued_cmd *qc)
5899{
5900 u32 ioadl_flags = 0;
5901 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5902 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5903 struct ipr_ioadl64_desc *last_ioadl64 = NULL;
5904 int len = qc->nbytes;
5905 struct scatterlist *sg;
5906 unsigned int si;
5907 dma_addr_t dma_addr = ipr_cmd->dma_addr;
5908
5909 if (len == 0)
5910 return;
5911
5912 if (qc->dma_dir == DMA_TO_DEVICE) {
5913 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5914 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5915 } else if (qc->dma_dir == DMA_FROM_DEVICE)
5916 ioadl_flags = IPR_IOADL_FLAGS_READ;
5917
5918 ioarcb->data_transfer_length = cpu_to_be32(len);
5919 ioarcb->ioadl_len =
5920 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5921 ioarcb->u.sis64_addr_data.data_ioadl_addr =
5922 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl));
5923
5924 for_each_sg(qc->sg, sg, qc->n_elem, si) {
5925 ioadl64->flags = cpu_to_be32(ioadl_flags);
5926 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
5927 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
5928
5929 last_ioadl64 = ioadl64;
5930 ioadl64++;
5931 }
5932
5933 if (likely(last_ioadl64))
5934 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5935}
5936
35a39691
BK
5937/**
5938 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
5939 * @ipr_cmd: ipr command struct
5940 * @qc: ATA queued command
5941 *
5942 **/
5943static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
5944 struct ata_queued_cmd *qc)
5945{
5946 u32 ioadl_flags = 0;
5947 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
a32c055f 5948 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3be6cbd7 5949 struct ipr_ioadl_desc *last_ioadl = NULL;
dde20207 5950 int len = qc->nbytes;
35a39691 5951 struct scatterlist *sg;
ff2aeb1e 5952 unsigned int si;
35a39691
BK
5953
5954 if (len == 0)
5955 return;
5956
5957 if (qc->dma_dir == DMA_TO_DEVICE) {
5958 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5959 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
a32c055f
WB
5960 ioarcb->data_transfer_length = cpu_to_be32(len);
5961 ioarcb->ioadl_len =
35a39691
BK
5962 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5963 } else if (qc->dma_dir == DMA_FROM_DEVICE) {
5964 ioadl_flags = IPR_IOADL_FLAGS_READ;
5965 ioarcb->read_data_transfer_length = cpu_to_be32(len);
5966 ioarcb->read_ioadl_len =
5967 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5968 }
5969
ff2aeb1e 5970 for_each_sg(qc->sg, sg, qc->n_elem, si) {
35a39691
BK
5971 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5972 ioadl->address = cpu_to_be32(sg_dma_address(sg));
3be6cbd7
JG
5973
5974 last_ioadl = ioadl;
5975 ioadl++;
35a39691 5976 }
3be6cbd7
JG
5977
5978 if (likely(last_ioadl))
5979 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
35a39691
BK
5980}
5981
5982/**
5983 * ipr_qc_issue - Issue a SATA qc to a device
5984 * @qc: queued command
5985 *
5986 * Return value:
5987 * 0 if success
5988 **/
5989static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
5990{
5991 struct ata_port *ap = qc->ap;
5992 struct ipr_sata_port *sata_port = ap->private_data;
5993 struct ipr_resource_entry *res = sata_port->res;
5994 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5995 struct ipr_cmnd *ipr_cmd;
5996 struct ipr_ioarcb *ioarcb;
5997 struct ipr_ioarcb_ata_regs *regs;
5998
5999 if (unlikely(!ioa_cfg->allow_cmds || ioa_cfg->ioa_is_dead))
0feeed82 6000 return AC_ERR_SYSTEM;
35a39691
BK
6001
6002 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
6003 ioarcb = &ipr_cmd->ioarcb;
35a39691 6004
a32c055f
WB
6005 if (ioa_cfg->sis64) {
6006 regs = &ipr_cmd->i.ata_ioadl.regs;
6007 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
6008 } else
6009 regs = &ioarcb->u.add_data.u.regs;
6010
6011 memset(regs, 0, sizeof(*regs));
6012 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
35a39691
BK
6013
6014 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
6015 ipr_cmd->qc = qc;
6016 ipr_cmd->done = ipr_sata_done;
3e7ebdfa 6017 ipr_cmd->ioarcb.res_handle = res->res_handle;
35a39691
BK
6018 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
6019 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6020 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
dde20207 6021 ipr_cmd->dma_use_sg = qc->n_elem;
35a39691 6022
a32c055f
WB
6023 if (ioa_cfg->sis64)
6024 ipr_build_ata_ioadl64(ipr_cmd, qc);
6025 else
6026 ipr_build_ata_ioadl(ipr_cmd, qc);
6027
35a39691
BK
6028 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
6029 ipr_copy_sata_tf(regs, &qc->tf);
6030 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
3e7ebdfa 6031 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
35a39691
BK
6032
6033 switch (qc->tf.protocol) {
6034 case ATA_PROT_NODATA:
6035 case ATA_PROT_PIO:
6036 break;
6037
6038 case ATA_PROT_DMA:
6039 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6040 break;
6041
0dc36888
TH
6042 case ATAPI_PROT_PIO:
6043 case ATAPI_PROT_NODATA:
35a39691
BK
6044 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6045 break;
6046
0dc36888 6047 case ATAPI_PROT_DMA:
35a39691
BK
6048 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6049 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6050 break;
6051
6052 default:
6053 WARN_ON(1);
0feeed82 6054 return AC_ERR_INVALID;
35a39691
BK
6055 }
6056
6057 mb();
a32c055f
WB
6058
6059 ipr_send_command(ipr_cmd);
6060
35a39691
BK
6061 return 0;
6062}
6063
4c9bf4e7
TH
6064/**
6065 * ipr_qc_fill_rtf - Read result TF
6066 * @qc: ATA queued command
6067 *
6068 * Return value:
6069 * true
6070 **/
6071static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
6072{
6073 struct ipr_sata_port *sata_port = qc->ap->private_data;
6074 struct ipr_ioasa_gata *g = &sata_port->ioasa;
6075 struct ata_taskfile *tf = &qc->result_tf;
6076
6077 tf->feature = g->error;
6078 tf->nsect = g->nsect;
6079 tf->lbal = g->lbal;
6080 tf->lbam = g->lbam;
6081 tf->lbah = g->lbah;
6082 tf->device = g->device;
6083 tf->command = g->status;
6084 tf->hob_nsect = g->hob_nsect;
6085 tf->hob_lbal = g->hob_lbal;
6086 tf->hob_lbam = g->hob_lbam;
6087 tf->hob_lbah = g->hob_lbah;
6088 tf->ctl = g->alt_status;
6089
6090 return true;
6091}
6092
35a39691 6093static struct ata_port_operations ipr_sata_ops = {
35a39691 6094 .phy_reset = ipr_ata_phy_reset,
a1efdaba 6095 .hardreset = ipr_sata_reset,
35a39691 6096 .post_internal_cmd = ipr_ata_post_internal,
35a39691
BK
6097 .qc_prep = ata_noop_qc_prep,
6098 .qc_issue = ipr_qc_issue,
4c9bf4e7 6099 .qc_fill_rtf = ipr_qc_fill_rtf,
35a39691
BK
6100 .port_start = ata_sas_port_start,
6101 .port_stop = ata_sas_port_stop
6102};
6103
6104static struct ata_port_info sata_port_info = {
6105 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | ATA_FLAG_SATA_RESET |
6106 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
6107 .pio_mask = 0x10, /* pio4 */
6108 .mwdma_mask = 0x07,
6109 .udma_mask = 0x7f, /* udma0-6 */
6110 .port_ops = &ipr_sata_ops
6111};
6112
1da177e4
LT
6113#ifdef CONFIG_PPC_PSERIES
6114static const u16 ipr_blocked_processors[] = {
6115 PV_NORTHSTAR,
6116 PV_PULSAR,
6117 PV_POWER4,
6118 PV_ICESTAR,
6119 PV_SSTAR,
6120 PV_POWER4p,
6121 PV_630,
6122 PV_630p
6123};
6124
6125/**
6126 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
6127 * @ioa_cfg: ioa cfg struct
6128 *
6129 * Adapters that use Gemstone revision < 3.1 do not work reliably on
6130 * certain pSeries hardware. This function determines if the given
6131 * adapter is in one of these confgurations or not.
6132 *
6133 * Return value:
6134 * 1 if adapter is not supported / 0 if adapter is supported
6135 **/
6136static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
6137{
1da177e4
LT
6138 int i;
6139
44c10138
AK
6140 if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
6141 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
6142 if (__is_processor(ipr_blocked_processors[i]))
6143 return 1;
1da177e4
LT
6144 }
6145 }
6146 return 0;
6147}
6148#else
6149#define ipr_invalid_adapter(ioa_cfg) 0
6150#endif
6151
6152/**
6153 * ipr_ioa_bringdown_done - IOA bring down completion.
6154 * @ipr_cmd: ipr command struct
6155 *
6156 * This function processes the completion of an adapter bring down.
6157 * It wakes any reset sleepers.
6158 *
6159 * Return value:
6160 * IPR_RC_JOB_RETURN
6161 **/
6162static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
6163{
6164 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6165
6166 ENTER;
6167 ioa_cfg->in_reset_reload = 0;
6168 ioa_cfg->reset_retries = 0;
6169 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6170 wake_up_all(&ioa_cfg->reset_wait_q);
6171
6172 spin_unlock_irq(ioa_cfg->host->host_lock);
6173 scsi_unblock_requests(ioa_cfg->host);
6174 spin_lock_irq(ioa_cfg->host->host_lock);
6175 LEAVE;
6176
6177 return IPR_RC_JOB_RETURN;
6178}
6179
6180/**
6181 * ipr_ioa_reset_done - IOA reset completion.
6182 * @ipr_cmd: ipr command struct
6183 *
6184 * This function processes the completion of an adapter reset.
6185 * It schedules any necessary mid-layer add/removes and
6186 * wakes any reset sleepers.
6187 *
6188 * Return value:
6189 * IPR_RC_JOB_RETURN
6190 **/
6191static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
6192{
6193 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6194 struct ipr_resource_entry *res;
6195 struct ipr_hostrcb *hostrcb, *temp;
6196 int i = 0;
6197
6198 ENTER;
6199 ioa_cfg->in_reset_reload = 0;
6200 ioa_cfg->allow_cmds = 1;
6201 ioa_cfg->reset_cmd = NULL;
3d1d0da6 6202 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
1da177e4
LT
6203
6204 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
6205 if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
6206 ipr_trace;
6207 break;
6208 }
6209 }
6210 schedule_work(&ioa_cfg->work_q);
6211
6212 list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
6213 list_del(&hostrcb->queue);
6214 if (i++ < IPR_NUM_LOG_HCAMS)
6215 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
6216 else
6217 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
6218 }
6219
6bb04170 6220 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
1da177e4
LT
6221 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
6222
6223 ioa_cfg->reset_retries = 0;
6224 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6225 wake_up_all(&ioa_cfg->reset_wait_q);
6226
30237853 6227 spin_unlock(ioa_cfg->host->host_lock);
1da177e4 6228 scsi_unblock_requests(ioa_cfg->host);
30237853 6229 spin_lock(ioa_cfg->host->host_lock);
1da177e4
LT
6230
6231 if (!ioa_cfg->allow_cmds)
6232 scsi_block_requests(ioa_cfg->host);
6233
6234 LEAVE;
6235 return IPR_RC_JOB_RETURN;
6236}
6237
6238/**
6239 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
6240 * @supported_dev: supported device struct
6241 * @vpids: vendor product id struct
6242 *
6243 * Return value:
6244 * none
6245 **/
6246static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
6247 struct ipr_std_inq_vpids *vpids)
6248{
6249 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
6250 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
6251 supported_dev->num_records = 1;
6252 supported_dev->data_length =
6253 cpu_to_be16(sizeof(struct ipr_supported_device));
6254 supported_dev->reserved = 0;
6255}
6256
6257/**
6258 * ipr_set_supported_devs - Send Set Supported Devices for a device
6259 * @ipr_cmd: ipr command struct
6260 *
a32c055f 6261 * This function sends a Set Supported Devices to the adapter
1da177e4
LT
6262 *
6263 * Return value:
6264 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6265 **/
6266static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
6267{
6268 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6269 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
1da177e4
LT
6270 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6271 struct ipr_resource_entry *res = ipr_cmd->u.res;
6272
6273 ipr_cmd->job_step = ipr_ioa_reset_done;
6274
6275 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
e4fbf44e 6276 if (!ipr_is_scsi_disk(res))
1da177e4
LT
6277 continue;
6278
6279 ipr_cmd->u.res = res;
3e7ebdfa 6280 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
1da177e4
LT
6281
6282 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6283 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6284 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6285
6286 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
3e7ebdfa 6287 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
1da177e4
LT
6288 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
6289 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
6290
a32c055f
WB
6291 ipr_init_ioadl(ipr_cmd,
6292 ioa_cfg->vpd_cbs_dma +
6293 offsetof(struct ipr_misc_cbs, supp_dev),
6294 sizeof(struct ipr_supported_device),
6295 IPR_IOADL_FLAGS_WRITE_LAST);
1da177e4
LT
6296
6297 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
6298 IPR_SET_SUP_DEVICE_TIMEOUT);
6299
3e7ebdfa
WB
6300 if (!ioa_cfg->sis64)
6301 ipr_cmd->job_step = ipr_set_supported_devs;
1da177e4
LT
6302 return IPR_RC_JOB_RETURN;
6303 }
6304
6305 return IPR_RC_JOB_CONTINUE;
6306}
6307
6308/**
6309 * ipr_get_mode_page - Locate specified mode page
6310 * @mode_pages: mode page buffer
6311 * @page_code: page code to find
6312 * @len: minimum required length for mode page
6313 *
6314 * Return value:
6315 * pointer to mode page / NULL on failure
6316 **/
6317static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
6318 u32 page_code, u32 len)
6319{
6320 struct ipr_mode_page_hdr *mode_hdr;
6321 u32 page_length;
6322 u32 length;
6323
6324 if (!mode_pages || (mode_pages->hdr.length == 0))
6325 return NULL;
6326
6327 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
6328 mode_hdr = (struct ipr_mode_page_hdr *)
6329 (mode_pages->data + mode_pages->hdr.block_desc_len);
6330
6331 while (length) {
6332 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
6333 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
6334 return mode_hdr;
6335 break;
6336 } else {
6337 page_length = (sizeof(struct ipr_mode_page_hdr) +
6338 mode_hdr->page_length);
6339 length -= page_length;
6340 mode_hdr = (struct ipr_mode_page_hdr *)
6341 ((unsigned long)mode_hdr + page_length);
6342 }
6343 }
6344 return NULL;
6345}
6346
6347/**
6348 * ipr_check_term_power - Check for term power errors
6349 * @ioa_cfg: ioa config struct
6350 * @mode_pages: IOAFP mode pages buffer
6351 *
6352 * Check the IOAFP's mode page 28 for term power errors
6353 *
6354 * Return value:
6355 * nothing
6356 **/
6357static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
6358 struct ipr_mode_pages *mode_pages)
6359{
6360 int i;
6361 int entry_length;
6362 struct ipr_dev_bus_entry *bus;
6363 struct ipr_mode_page28 *mode_page;
6364
6365 mode_page = ipr_get_mode_page(mode_pages, 0x28,
6366 sizeof(struct ipr_mode_page28));
6367
6368 entry_length = mode_page->entry_length;
6369
6370 bus = mode_page->bus;
6371
6372 for (i = 0; i < mode_page->num_entries; i++) {
6373 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
6374 dev_err(&ioa_cfg->pdev->dev,
6375 "Term power is absent on scsi bus %d\n",
6376 bus->res_addr.bus);
6377 }
6378
6379 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
6380 }
6381}
6382
6383/**
6384 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
6385 * @ioa_cfg: ioa config struct
6386 *
6387 * Looks through the config table checking for SES devices. If
6388 * the SES device is in the SES table indicating a maximum SCSI
6389 * bus speed, the speed is limited for the bus.
6390 *
6391 * Return value:
6392 * none
6393 **/
6394static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
6395{
6396 u32 max_xfer_rate;
6397 int i;
6398
6399 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
6400 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
6401 ioa_cfg->bus_attr[i].bus_width);
6402
6403 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
6404 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
6405 }
6406}
6407
6408/**
6409 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
6410 * @ioa_cfg: ioa config struct
6411 * @mode_pages: mode page 28 buffer
6412 *
6413 * Updates mode page 28 based on driver configuration
6414 *
6415 * Return value:
6416 * none
6417 **/
6418static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
6419 struct ipr_mode_pages *mode_pages)
6420{
6421 int i, entry_length;
6422 struct ipr_dev_bus_entry *bus;
6423 struct ipr_bus_attributes *bus_attr;
6424 struct ipr_mode_page28 *mode_page;
6425
6426 mode_page = ipr_get_mode_page(mode_pages, 0x28,
6427 sizeof(struct ipr_mode_page28));
6428
6429 entry_length = mode_page->entry_length;
6430
6431 /* Loop for each device bus entry */
6432 for (i = 0, bus = mode_page->bus;
6433 i < mode_page->num_entries;
6434 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
6435 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
6436 dev_err(&ioa_cfg->pdev->dev,
6437 "Invalid resource address reported: 0x%08X\n",
6438 IPR_GET_PHYS_LOC(bus->res_addr));
6439 continue;
6440 }
6441
6442 bus_attr = &ioa_cfg->bus_attr[i];
6443 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
6444 bus->bus_width = bus_attr->bus_width;
6445 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
6446 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
6447 if (bus_attr->qas_enabled)
6448 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
6449 else
6450 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
6451 }
6452}
6453
6454/**
6455 * ipr_build_mode_select - Build a mode select command
6456 * @ipr_cmd: ipr command struct
6457 * @res_handle: resource handle to send command to
6458 * @parm: Byte 2 of Mode Sense command
6459 * @dma_addr: DMA buffer address
6460 * @xfer_len: data transfer length
6461 *
6462 * Return value:
6463 * none
6464 **/
6465static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
a32c055f
WB
6466 __be32 res_handle, u8 parm,
6467 dma_addr_t dma_addr, u8 xfer_len)
1da177e4 6468{
1da177e4
LT
6469 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6470
6471 ioarcb->res_handle = res_handle;
6472 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6473 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6474 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
6475 ioarcb->cmd_pkt.cdb[1] = parm;
6476 ioarcb->cmd_pkt.cdb[4] = xfer_len;
6477
a32c055f 6478 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
1da177e4
LT
6479}
6480
6481/**
6482 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
6483 * @ipr_cmd: ipr command struct
6484 *
6485 * This function sets up the SCSI bus attributes and sends
6486 * a Mode Select for Page 28 to activate them.
6487 *
6488 * Return value:
6489 * IPR_RC_JOB_RETURN
6490 **/
6491static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
6492{
6493 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6494 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
6495 int length;
6496
6497 ENTER;
4733804c
BK
6498 ipr_scsi_bus_speed_limit(ioa_cfg);
6499 ipr_check_term_power(ioa_cfg, mode_pages);
6500 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
6501 length = mode_pages->hdr.length + 1;
6502 mode_pages->hdr.length = 0;
1da177e4
LT
6503
6504 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
6505 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
6506 length);
6507
f72919ec
WB
6508 ipr_cmd->job_step = ipr_set_supported_devs;
6509 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
6510 struct ipr_resource_entry, queue);
1da177e4
LT
6511 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6512
6513 LEAVE;
6514 return IPR_RC_JOB_RETURN;
6515}
6516
6517/**
6518 * ipr_build_mode_sense - Builds a mode sense command
6519 * @ipr_cmd: ipr command struct
6520 * @res: resource entry struct
6521 * @parm: Byte 2 of mode sense command
6522 * @dma_addr: DMA address of mode sense buffer
6523 * @xfer_len: Size of DMA buffer
6524 *
6525 * Return value:
6526 * none
6527 **/
6528static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
6529 __be32 res_handle,
a32c055f 6530 u8 parm, dma_addr_t dma_addr, u8 xfer_len)
1da177e4 6531{
1da177e4
LT
6532 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6533
6534 ioarcb->res_handle = res_handle;
6535 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
6536 ioarcb->cmd_pkt.cdb[2] = parm;
6537 ioarcb->cmd_pkt.cdb[4] = xfer_len;
6538 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6539
a32c055f 6540 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
6541}
6542
dfed823e 6543/**
6544 * ipr_reset_cmd_failed - Handle failure of IOA reset command
6545 * @ipr_cmd: ipr command struct
6546 *
6547 * This function handles the failure of an IOA bringup command.
6548 *
6549 * Return value:
6550 * IPR_RC_JOB_RETURN
6551 **/
6552static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
6553{
6554 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
96d21f00 6555 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
dfed823e 6556
6557 dev_err(&ioa_cfg->pdev->dev,
6558 "0x%02X failed with IOASC: 0x%08X\n",
6559 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
6560
6561 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
6562 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6563 return IPR_RC_JOB_RETURN;
6564}
6565
6566/**
6567 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
6568 * @ipr_cmd: ipr command struct
6569 *
6570 * This function handles the failure of a Mode Sense to the IOAFP.
6571 * Some adapters do not handle all mode pages.
6572 *
6573 * Return value:
6574 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6575 **/
6576static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
6577{
f72919ec 6578 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
96d21f00 6579 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
dfed823e 6580
6581 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
f72919ec
WB
6582 ipr_cmd->job_step = ipr_set_supported_devs;
6583 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
6584 struct ipr_resource_entry, queue);
dfed823e 6585 return IPR_RC_JOB_CONTINUE;
6586 }
6587
6588 return ipr_reset_cmd_failed(ipr_cmd);
6589}
6590
1da177e4
LT
6591/**
6592 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
6593 * @ipr_cmd: ipr command struct
6594 *
6595 * This function send a Page 28 mode sense to the IOA to
6596 * retrieve SCSI bus attributes.
6597 *
6598 * Return value:
6599 * IPR_RC_JOB_RETURN
6600 **/
6601static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
6602{
6603 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6604
6605 ENTER;
6606 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
6607 0x28, ioa_cfg->vpd_cbs_dma +
6608 offsetof(struct ipr_misc_cbs, mode_pages),
6609 sizeof(struct ipr_mode_pages));
6610
6611 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
dfed823e 6612 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
1da177e4
LT
6613
6614 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6615
6616 LEAVE;
6617 return IPR_RC_JOB_RETURN;
6618}
6619
ac09c349
BK
6620/**
6621 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
6622 * @ipr_cmd: ipr command struct
6623 *
6624 * This function enables dual IOA RAID support if possible.
6625 *
6626 * Return value:
6627 * IPR_RC_JOB_RETURN
6628 **/
6629static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
6630{
6631 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6632 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
6633 struct ipr_mode_page24 *mode_page;
6634 int length;
6635
6636 ENTER;
6637 mode_page = ipr_get_mode_page(mode_pages, 0x24,
6638 sizeof(struct ipr_mode_page24));
6639
6640 if (mode_page)
6641 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
6642
6643 length = mode_pages->hdr.length + 1;
6644 mode_pages->hdr.length = 0;
6645
6646 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
6647 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
6648 length);
6649
6650 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
6651 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6652
6653 LEAVE;
6654 return IPR_RC_JOB_RETURN;
6655}
6656
6657/**
6658 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
6659 * @ipr_cmd: ipr command struct
6660 *
6661 * This function handles the failure of a Mode Sense to the IOAFP.
6662 * Some adapters do not handle all mode pages.
6663 *
6664 * Return value:
6665 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6666 **/
6667static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
6668{
96d21f00 6669 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
ac09c349
BK
6670
6671 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
6672 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
6673 return IPR_RC_JOB_CONTINUE;
6674 }
6675
6676 return ipr_reset_cmd_failed(ipr_cmd);
6677}
6678
6679/**
6680 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
6681 * @ipr_cmd: ipr command struct
6682 *
6683 * This function send a mode sense to the IOA to retrieve
6684 * the IOA Advanced Function Control mode page.
6685 *
6686 * Return value:
6687 * IPR_RC_JOB_RETURN
6688 **/
6689static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
6690{
6691 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6692
6693 ENTER;
6694 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
6695 0x24, ioa_cfg->vpd_cbs_dma +
6696 offsetof(struct ipr_misc_cbs, mode_pages),
6697 sizeof(struct ipr_mode_pages));
6698
6699 ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
6700 ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
6701
6702 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6703
6704 LEAVE;
6705 return IPR_RC_JOB_RETURN;
6706}
6707
1da177e4
LT
6708/**
6709 * ipr_init_res_table - Initialize the resource table
6710 * @ipr_cmd: ipr command struct
6711 *
6712 * This function looks through the existing resource table, comparing
6713 * it with the config table. This function will take care of old/new
6714 * devices and schedule adding/removing them from the mid-layer
6715 * as appropriate.
6716 *
6717 * Return value:
6718 * IPR_RC_JOB_CONTINUE
6719 **/
6720static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
6721{
6722 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6723 struct ipr_resource_entry *res, *temp;
3e7ebdfa
WB
6724 struct ipr_config_table_entry_wrapper cfgtew;
6725 int entries, found, flag, i;
1da177e4
LT
6726 LIST_HEAD(old_res);
6727
6728 ENTER;
3e7ebdfa
WB
6729 if (ioa_cfg->sis64)
6730 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
6731 else
6732 flag = ioa_cfg->u.cfg_table->hdr.flags;
6733
6734 if (flag & IPR_UCODE_DOWNLOAD_REQ)
1da177e4
LT
6735 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
6736
6737 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
6738 list_move_tail(&res->queue, &old_res);
6739
3e7ebdfa 6740 if (ioa_cfg->sis64)
438b0331 6741 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
3e7ebdfa
WB
6742 else
6743 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
6744
6745 for (i = 0; i < entries; i++) {
6746 if (ioa_cfg->sis64)
6747 cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
6748 else
6749 cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
1da177e4
LT
6750 found = 0;
6751
6752 list_for_each_entry_safe(res, temp, &old_res, queue) {
3e7ebdfa 6753 if (ipr_is_same_device(res, &cfgtew)) {
1da177e4
LT
6754 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
6755 found = 1;
6756 break;
6757 }
6758 }
6759
6760 if (!found) {
6761 if (list_empty(&ioa_cfg->free_res_q)) {
6762 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
6763 break;
6764 }
6765
6766 found = 1;
6767 res = list_entry(ioa_cfg->free_res_q.next,
6768 struct ipr_resource_entry, queue);
6769 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
3e7ebdfa 6770 ipr_init_res_entry(res, &cfgtew);
1da177e4 6771 res->add_to_ml = 1;
56115598
WB
6772 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
6773 res->sdev->allow_restart = 1;
1da177e4
LT
6774
6775 if (found)
3e7ebdfa 6776 ipr_update_res_entry(res, &cfgtew);
1da177e4
LT
6777 }
6778
6779 list_for_each_entry_safe(res, temp, &old_res, queue) {
6780 if (res->sdev) {
6781 res->del_from_ml = 1;
3e7ebdfa 6782 res->res_handle = IPR_INVALID_RES_HANDLE;
1da177e4 6783 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
1da177e4
LT
6784 }
6785 }
6786
3e7ebdfa
WB
6787 list_for_each_entry_safe(res, temp, &old_res, queue) {
6788 ipr_clear_res_target(res);
6789 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
6790 }
6791
ac09c349
BK
6792 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
6793 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
6794 else
6795 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
1da177e4
LT
6796
6797 LEAVE;
6798 return IPR_RC_JOB_CONTINUE;
6799}
6800
6801/**
6802 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
6803 * @ipr_cmd: ipr command struct
6804 *
6805 * This function sends a Query IOA Configuration command
6806 * to the adapter to retrieve the IOA configuration table.
6807 *
6808 * Return value:
6809 * IPR_RC_JOB_RETURN
6810 **/
6811static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
6812{
6813 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6814 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
1da177e4 6815 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
ac09c349 6816 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
1da177e4
LT
6817
6818 ENTER;
ac09c349
BK
6819 if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
6820 ioa_cfg->dual_raid = 1;
1da177e4
LT
6821 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
6822 ucode_vpd->major_release, ucode_vpd->card_type,
6823 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
6824 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6825 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6826
6827 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
438b0331 6828 ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
3e7ebdfa
WB
6829 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
6830 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
1da177e4 6831
3e7ebdfa 6832 ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
a32c055f 6833 IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
6834
6835 ipr_cmd->job_step = ipr_init_res_table;
6836
6837 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6838
6839 LEAVE;
6840 return IPR_RC_JOB_RETURN;
6841}
6842
6843/**
6844 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
6845 * @ipr_cmd: ipr command struct
6846 *
6847 * This utility function sends an inquiry to the adapter.
6848 *
6849 * Return value:
6850 * none
6851 **/
6852static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
a32c055f 6853 dma_addr_t dma_addr, u8 xfer_len)
1da177e4
LT
6854{
6855 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
1da177e4
LT
6856
6857 ENTER;
6858 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6859 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6860
6861 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
6862 ioarcb->cmd_pkt.cdb[1] = flags;
6863 ioarcb->cmd_pkt.cdb[2] = page;
6864 ioarcb->cmd_pkt.cdb[4] = xfer_len;
6865
a32c055f 6866 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
6867
6868 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6869 LEAVE;
6870}
6871
62275040 6872/**
6873 * ipr_inquiry_page_supported - Is the given inquiry page supported
6874 * @page0: inquiry page 0 buffer
6875 * @page: page code.
6876 *
6877 * This function determines if the specified inquiry page is supported.
6878 *
6879 * Return value:
6880 * 1 if page is supported / 0 if not
6881 **/
6882static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
6883{
6884 int i;
6885
6886 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
6887 if (page0->page[i] == page)
6888 return 1;
6889
6890 return 0;
6891}
6892
ac09c349
BK
6893/**
6894 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
6895 * @ipr_cmd: ipr command struct
6896 *
6897 * This function sends a Page 0xD0 inquiry to the adapter
6898 * to retrieve adapter capabilities.
6899 *
6900 * Return value:
6901 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6902 **/
6903static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
6904{
6905 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6906 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
6907 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
6908
6909 ENTER;
6910 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
6911 memset(cap, 0, sizeof(*cap));
6912
6913 if (ipr_inquiry_page_supported(page0, 0xD0)) {
6914 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
6915 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
6916 sizeof(struct ipr_inquiry_cap));
6917 return IPR_RC_JOB_RETURN;
6918 }
6919
6920 LEAVE;
6921 return IPR_RC_JOB_CONTINUE;
6922}
6923
1da177e4
LT
6924/**
6925 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
6926 * @ipr_cmd: ipr command struct
6927 *
6928 * This function sends a Page 3 inquiry to the adapter
6929 * to retrieve software VPD information.
6930 *
6931 * Return value:
6932 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6933 **/
6934static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
62275040 6935{
6936 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
62275040 6937
6938 ENTER;
6939
ac09c349 6940 ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
62275040 6941
6942 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
6943 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
6944 sizeof(struct ipr_inquiry_page3));
6945
6946 LEAVE;
6947 return IPR_RC_JOB_RETURN;
6948}
6949
6950/**
6951 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
6952 * @ipr_cmd: ipr command struct
6953 *
6954 * This function sends a Page 0 inquiry to the adapter
6955 * to retrieve supported inquiry pages.
6956 *
6957 * Return value:
6958 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6959 **/
6960static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
1da177e4
LT
6961{
6962 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6963 char type[5];
6964
6965 ENTER;
6966
6967 /* Grab the type out of the VPD and store it away */
6968 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
6969 type[4] = '\0';
6970 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
6971
62275040 6972 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
1da177e4 6973
62275040 6974 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
6975 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
6976 sizeof(struct ipr_inquiry_page0));
1da177e4
LT
6977
6978 LEAVE;
6979 return IPR_RC_JOB_RETURN;
6980}
6981
6982/**
6983 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
6984 * @ipr_cmd: ipr command struct
6985 *
6986 * This function sends a standard inquiry to the adapter.
6987 *
6988 * Return value:
6989 * IPR_RC_JOB_RETURN
6990 **/
6991static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
6992{
6993 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6994
6995 ENTER;
62275040 6996 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
1da177e4
LT
6997
6998 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
6999 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
7000 sizeof(struct ipr_ioa_vpd));
7001
7002 LEAVE;
7003 return IPR_RC_JOB_RETURN;
7004}
7005
7006/**
214777ba 7007 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
1da177e4
LT
7008 * @ipr_cmd: ipr command struct
7009 *
7010 * This function send an Identify Host Request Response Queue
7011 * command to establish the HRRQ with the adapter.
7012 *
7013 * Return value:
7014 * IPR_RC_JOB_RETURN
7015 **/
214777ba 7016static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
1da177e4
LT
7017{
7018 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7019 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7020
7021 ENTER;
7022 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
7023
7024 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
7025 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7026
7027 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
214777ba
WB
7028 if (ioa_cfg->sis64)
7029 ioarcb->cmd_pkt.cdb[1] = 0x1;
1da177e4 7030 ioarcb->cmd_pkt.cdb[2] =
214777ba 7031 ((u64) ioa_cfg->host_rrq_dma >> 24) & 0xff;
1da177e4 7032 ioarcb->cmd_pkt.cdb[3] =
214777ba 7033 ((u64) ioa_cfg->host_rrq_dma >> 16) & 0xff;
1da177e4 7034 ioarcb->cmd_pkt.cdb[4] =
214777ba 7035 ((u64) ioa_cfg->host_rrq_dma >> 8) & 0xff;
1da177e4 7036 ioarcb->cmd_pkt.cdb[5] =
214777ba 7037 ((u64) ioa_cfg->host_rrq_dma) & 0xff;
1da177e4
LT
7038 ioarcb->cmd_pkt.cdb[7] =
7039 ((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
7040 ioarcb->cmd_pkt.cdb[8] =
7041 (sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
7042
214777ba
WB
7043 if (ioa_cfg->sis64) {
7044 ioarcb->cmd_pkt.cdb[10] =
7045 ((u64) ioa_cfg->host_rrq_dma >> 56) & 0xff;
7046 ioarcb->cmd_pkt.cdb[11] =
7047 ((u64) ioa_cfg->host_rrq_dma >> 48) & 0xff;
7048 ioarcb->cmd_pkt.cdb[12] =
7049 ((u64) ioa_cfg->host_rrq_dma >> 40) & 0xff;
7050 ioarcb->cmd_pkt.cdb[13] =
7051 ((u64) ioa_cfg->host_rrq_dma >> 32) & 0xff;
7052 }
7053
1da177e4
LT
7054 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
7055
7056 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7057
7058 LEAVE;
7059 return IPR_RC_JOB_RETURN;
7060}
7061
7062/**
7063 * ipr_reset_timer_done - Adapter reset timer function
7064 * @ipr_cmd: ipr command struct
7065 *
7066 * Description: This function is used in adapter reset processing
7067 * for timing events. If the reset_cmd pointer in the IOA
7068 * config struct is not this adapter's we are doing nested
7069 * resets and fail_all_ops will take care of freeing the
7070 * command block.
7071 *
7072 * Return value:
7073 * none
7074 **/
7075static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
7076{
7077 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7078 unsigned long lock_flags = 0;
7079
7080 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7081
7082 if (ioa_cfg->reset_cmd == ipr_cmd) {
7083 list_del(&ipr_cmd->queue);
7084 ipr_cmd->done(ipr_cmd);
7085 }
7086
7087 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7088}
7089
7090/**
7091 * ipr_reset_start_timer - Start a timer for adapter reset job
7092 * @ipr_cmd: ipr command struct
7093 * @timeout: timeout value
7094 *
7095 * Description: This function is used in adapter reset processing
7096 * for timing events. If the reset_cmd pointer in the IOA
7097 * config struct is not this adapter's we are doing nested
7098 * resets and fail_all_ops will take care of freeing the
7099 * command block.
7100 *
7101 * Return value:
7102 * none
7103 **/
7104static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
7105 unsigned long timeout)
7106{
7107 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
7108 ipr_cmd->done = ipr_reset_ioa_job;
7109
7110 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7111 ipr_cmd->timer.expires = jiffies + timeout;
7112 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
7113 add_timer(&ipr_cmd->timer);
7114}
7115
7116/**
7117 * ipr_init_ioa_mem - Initialize ioa_cfg control block
7118 * @ioa_cfg: ioa cfg struct
7119 *
7120 * Return value:
7121 * nothing
7122 **/
7123static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
7124{
7125 memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
7126
7127 /* Initialize Host RRQ pointers */
7128 ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
7129 ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
7130 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
7131 ioa_cfg->toggle_bit = 1;
7132
7133 /* Zero out config table */
3e7ebdfa 7134 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
1da177e4
LT
7135}
7136
214777ba
WB
7137/**
7138 * ipr_reset_next_stage - Process IPL stage change based on feedback register.
7139 * @ipr_cmd: ipr command struct
7140 *
7141 * Return value:
7142 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7143 **/
7144static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
7145{
7146 unsigned long stage, stage_time;
7147 u32 feedback;
7148 volatile u32 int_reg;
7149 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7150 u64 maskval = 0;
7151
7152 feedback = readl(ioa_cfg->regs.init_feedback_reg);
7153 stage = feedback & IPR_IPL_INIT_STAGE_MASK;
7154 stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
7155
7156 ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
7157
7158 /* sanity check the stage_time value */
438b0331
WB
7159 if (stage_time == 0)
7160 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
7161 else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
214777ba
WB
7162 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
7163 else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
7164 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
7165
7166 if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
7167 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
7168 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7169 stage_time = ioa_cfg->transop_timeout;
7170 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7171 } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
7172 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7173 maskval = IPR_PCII_IPL_STAGE_CHANGE;
7174 maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
7175 writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
7176 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7177 return IPR_RC_JOB_CONTINUE;
7178 }
7179
7180 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7181 ipr_cmd->timer.expires = jiffies + stage_time * HZ;
7182 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7183 ipr_cmd->done = ipr_reset_ioa_job;
7184 add_timer(&ipr_cmd->timer);
7185 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
7186
7187 return IPR_RC_JOB_RETURN;
7188}
7189
1da177e4
LT
7190/**
7191 * ipr_reset_enable_ioa - Enable the IOA following a reset.
7192 * @ipr_cmd: ipr command struct
7193 *
7194 * This function reinitializes some control blocks and
7195 * enables destructive diagnostics on the adapter.
7196 *
7197 * Return value:
7198 * IPR_RC_JOB_RETURN
7199 **/
7200static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
7201{
7202 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7203 volatile u32 int_reg;
7be96900 7204 volatile u64 maskval;
1da177e4
LT
7205
7206 ENTER;
214777ba 7207 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
1da177e4
LT
7208 ipr_init_ioa_mem(ioa_cfg);
7209
7210 ioa_cfg->allow_interrupts = 1;
8701f185
WB
7211 if (ioa_cfg->sis64) {
7212 /* Set the adapter to the correct endian mode. */
7213 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
7214 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
7215 }
7216
7be96900 7217 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
1da177e4
LT
7218
7219 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7220 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
214777ba 7221 ioa_cfg->regs.clr_interrupt_mask_reg32);
1da177e4
LT
7222 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7223 return IPR_RC_JOB_CONTINUE;
7224 }
7225
7226 /* Enable destructive diagnostics on IOA */
214777ba
WB
7227 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
7228
7be96900
WB
7229 if (ioa_cfg->sis64) {
7230 maskval = IPR_PCII_IPL_STAGE_CHANGE;
7231 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
7232 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
7233 } else
7234 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
1da177e4 7235
1da177e4
LT
7236 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7237
7238 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
7239
214777ba
WB
7240 if (ioa_cfg->sis64) {
7241 ipr_cmd->job_step = ipr_reset_next_stage;
7242 return IPR_RC_JOB_CONTINUE;
7243 }
7244
1da177e4 7245 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
5469cb5b 7246 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
1da177e4
LT
7247 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7248 ipr_cmd->done = ipr_reset_ioa_job;
7249 add_timer(&ipr_cmd->timer);
7250 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
7251
7252 LEAVE;
7253 return IPR_RC_JOB_RETURN;
7254}
7255
7256/**
7257 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
7258 * @ipr_cmd: ipr command struct
7259 *
7260 * This function is invoked when an adapter dump has run out
7261 * of processing time.
7262 *
7263 * Return value:
7264 * IPR_RC_JOB_CONTINUE
7265 **/
7266static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
7267{
7268 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7269
7270 if (ioa_cfg->sdt_state == GET_DUMP)
7271 ioa_cfg->sdt_state = ABORT_DUMP;
7272
7273 ipr_cmd->job_step = ipr_reset_alert;
7274
7275 return IPR_RC_JOB_CONTINUE;
7276}
7277
7278/**
7279 * ipr_unit_check_no_data - Log a unit check/no data error log
7280 * @ioa_cfg: ioa config struct
7281 *
7282 * Logs an error indicating the adapter unit checked, but for some
7283 * reason, we were unable to fetch the unit check buffer.
7284 *
7285 * Return value:
7286 * nothing
7287 **/
7288static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
7289{
7290 ioa_cfg->errors_logged++;
7291 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
7292}
7293
7294/**
7295 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
7296 * @ioa_cfg: ioa config struct
7297 *
7298 * Fetches the unit check buffer from the adapter by clocking the data
7299 * through the mailbox register.
7300 *
7301 * Return value:
7302 * nothing
7303 **/
7304static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
7305{
7306 unsigned long mailbox;
7307 struct ipr_hostrcb *hostrcb;
7308 struct ipr_uc_sdt sdt;
7309 int rc, length;
65f56475 7310 u32 ioasc;
1da177e4
LT
7311
7312 mailbox = readl(ioa_cfg->ioa_mailbox);
7313
dcbad00e 7314 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
1da177e4
LT
7315 ipr_unit_check_no_data(ioa_cfg);
7316 return;
7317 }
7318
7319 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
7320 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
7321 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
7322
dcbad00e
WB
7323 if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
7324 ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
7325 (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
1da177e4
LT
7326 ipr_unit_check_no_data(ioa_cfg);
7327 return;
7328 }
7329
7330 /* Find length of the first sdt entry (UC buffer) */
dcbad00e
WB
7331 if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
7332 length = be32_to_cpu(sdt.entry[0].end_token);
7333 else
7334 length = (be32_to_cpu(sdt.entry[0].end_token) -
7335 be32_to_cpu(sdt.entry[0].start_token)) &
7336 IPR_FMT2_MBX_ADDR_MASK;
1da177e4
LT
7337
7338 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
7339 struct ipr_hostrcb, queue);
7340 list_del(&hostrcb->queue);
7341 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
7342
7343 rc = ipr_get_ldump_data_section(ioa_cfg,
dcbad00e 7344 be32_to_cpu(sdt.entry[0].start_token),
1da177e4
LT
7345 (__be32 *)&hostrcb->hcam,
7346 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
7347
65f56475 7348 if (!rc) {
1da177e4 7349 ipr_handle_log_data(ioa_cfg, hostrcb);
4565e370 7350 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
65f56475
BK
7351 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
7352 ioa_cfg->sdt_state == GET_DUMP)
7353 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7354 } else
1da177e4
LT
7355 ipr_unit_check_no_data(ioa_cfg);
7356
7357 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
7358}
7359
7360/**
7361 * ipr_reset_restore_cfg_space - Restore PCI config space.
7362 * @ipr_cmd: ipr command struct
7363 *
7364 * Description: This function restores the saved PCI config space of
7365 * the adapter, fails all outstanding ops back to the callers, and
7366 * fetches the dump/unit check if applicable to this reset.
7367 *
7368 * Return value:
7369 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7370 **/
7371static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
7372{
7373 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8701f185 7374 volatile u32 int_reg;
1da177e4
LT
7375 int rc;
7376
7377 ENTER;
99c965dd 7378 ioa_cfg->pdev->state_saved = true;
1da177e4
LT
7379 rc = pci_restore_state(ioa_cfg->pdev);
7380
7381 if (rc != PCIBIOS_SUCCESSFUL) {
96d21f00 7382 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
1da177e4
LT
7383 return IPR_RC_JOB_CONTINUE;
7384 }
7385
7386 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
96d21f00 7387 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
1da177e4
LT
7388 return IPR_RC_JOB_CONTINUE;
7389 }
7390
7391 ipr_fail_all_ops(ioa_cfg);
7392
8701f185
WB
7393 if (ioa_cfg->sis64) {
7394 /* Set the adapter to the correct endian mode. */
7395 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
7396 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
7397 }
7398
1da177e4
LT
7399 if (ioa_cfg->ioa_unit_checked) {
7400 ioa_cfg->ioa_unit_checked = 0;
7401 ipr_get_unit_check_buffer(ioa_cfg);
7402 ipr_cmd->job_step = ipr_reset_alert;
7403 ipr_reset_start_timer(ipr_cmd, 0);
7404 return IPR_RC_JOB_RETURN;
7405 }
7406
7407 if (ioa_cfg->in_ioa_bringdown) {
7408 ipr_cmd->job_step = ipr_ioa_bringdown_done;
7409 } else {
7410 ipr_cmd->job_step = ipr_reset_enable_ioa;
7411
7412 if (GET_DUMP == ioa_cfg->sdt_state) {
7413 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_TIMEOUT);
7414 ipr_cmd->job_step = ipr_reset_wait_for_dump;
7415 schedule_work(&ioa_cfg->work_q);
7416 return IPR_RC_JOB_RETURN;
7417 }
7418 }
7419
438b0331 7420 LEAVE;
1da177e4
LT
7421 return IPR_RC_JOB_CONTINUE;
7422}
7423
e619e1a7
BK
7424/**
7425 * ipr_reset_bist_done - BIST has completed on the adapter.
7426 * @ipr_cmd: ipr command struct
7427 *
7428 * Description: Unblock config space and resume the reset process.
7429 *
7430 * Return value:
7431 * IPR_RC_JOB_CONTINUE
7432 **/
7433static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
7434{
7435 ENTER;
7436 pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
7437 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
7438 LEAVE;
7439 return IPR_RC_JOB_CONTINUE;
7440}
7441
1da177e4
LT
7442/**
7443 * ipr_reset_start_bist - Run BIST on the adapter.
7444 * @ipr_cmd: ipr command struct
7445 *
7446 * Description: This function runs BIST on the adapter, then delays 2 seconds.
7447 *
7448 * Return value:
7449 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7450 **/
7451static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
7452{
7453 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
cb237ef7 7454 int rc = PCIBIOS_SUCCESSFUL;
1da177e4
LT
7455
7456 ENTER;
b30197d2 7457 pci_block_user_cfg_access(ioa_cfg->pdev);
1da177e4 7458
cb237ef7
WB
7459 if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
7460 writel(IPR_UPROCI_SIS64_START_BIST,
7461 ioa_cfg->regs.set_uproc_interrupt_reg32);
7462 else
7463 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
7464
7465 if (rc == PCIBIOS_SUCCESSFUL) {
e619e1a7 7466 ipr_cmd->job_step = ipr_reset_bist_done;
1da177e4
LT
7467 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
7468 rc = IPR_RC_JOB_RETURN;
cb237ef7
WB
7469 } else {
7470 pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
7471 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
7472 rc = IPR_RC_JOB_CONTINUE;
1da177e4
LT
7473 }
7474
7475 LEAVE;
7476 return rc;
7477}
7478
463fc696
BK
7479/**
7480 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
7481 * @ipr_cmd: ipr command struct
7482 *
7483 * Description: This clears PCI reset to the adapter and delays two seconds.
7484 *
7485 * Return value:
7486 * IPR_RC_JOB_RETURN
7487 **/
7488static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
7489{
7490 ENTER;
7491 pci_set_pcie_reset_state(ipr_cmd->ioa_cfg->pdev, pcie_deassert_reset);
7492 ipr_cmd->job_step = ipr_reset_bist_done;
7493 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
7494 LEAVE;
7495 return IPR_RC_JOB_RETURN;
7496}
7497
7498/**
7499 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
7500 * @ipr_cmd: ipr command struct
7501 *
7502 * Description: This asserts PCI reset to the adapter.
7503 *
7504 * Return value:
7505 * IPR_RC_JOB_RETURN
7506 **/
7507static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
7508{
7509 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7510 struct pci_dev *pdev = ioa_cfg->pdev;
7511
7512 ENTER;
7513 pci_block_user_cfg_access(pdev);
7514 pci_set_pcie_reset_state(pdev, pcie_warm_reset);
7515 ipr_cmd->job_step = ipr_reset_slot_reset_done;
7516 ipr_reset_start_timer(ipr_cmd, IPR_PCI_RESET_TIMEOUT);
7517 LEAVE;
7518 return IPR_RC_JOB_RETURN;
7519}
7520
1da177e4
LT
7521/**
7522 * ipr_reset_allowed - Query whether or not IOA can be reset
7523 * @ioa_cfg: ioa config struct
7524 *
7525 * Return value:
7526 * 0 if reset not allowed / non-zero if reset is allowed
7527 **/
7528static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
7529{
7530 volatile u32 temp_reg;
7531
7532 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
7533 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
7534}
7535
7536/**
7537 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
7538 * @ipr_cmd: ipr command struct
7539 *
7540 * Description: This function waits for adapter permission to run BIST,
7541 * then runs BIST. If the adapter does not give permission after a
7542 * reasonable time, we will reset the adapter anyway. The impact of
7543 * resetting the adapter without warning the adapter is the risk of
7544 * losing the persistent error log on the adapter. If the adapter is
7545 * reset while it is writing to the flash on the adapter, the flash
7546 * segment will have bad ECC and be zeroed.
7547 *
7548 * Return value:
7549 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7550 **/
7551static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
7552{
7553 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7554 int rc = IPR_RC_JOB_RETURN;
7555
7556 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
7557 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
7558 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
7559 } else {
463fc696 7560 ipr_cmd->job_step = ioa_cfg->reset;
1da177e4
LT
7561 rc = IPR_RC_JOB_CONTINUE;
7562 }
7563
7564 return rc;
7565}
7566
7567/**
8701f185 7568 * ipr_reset_alert - Alert the adapter of a pending reset
1da177e4
LT
7569 * @ipr_cmd: ipr command struct
7570 *
7571 * Description: This function alerts the adapter that it will be reset.
7572 * If memory space is not currently enabled, proceed directly
7573 * to running BIST on the adapter. The timer must always be started
7574 * so we guarantee we do not run BIST from ipr_isr.
7575 *
7576 * Return value:
7577 * IPR_RC_JOB_RETURN
7578 **/
7579static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
7580{
7581 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7582 u16 cmd_reg;
7583 int rc;
7584
7585 ENTER;
7586 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
7587
7588 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
7589 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
214777ba 7590 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
1da177e4
LT
7591 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
7592 } else {
463fc696 7593 ipr_cmd->job_step = ioa_cfg->reset;
1da177e4
LT
7594 }
7595
7596 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
7597 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
7598
7599 LEAVE;
7600 return IPR_RC_JOB_RETURN;
7601}
7602
7603/**
7604 * ipr_reset_ucode_download_done - Microcode download completion
7605 * @ipr_cmd: ipr command struct
7606 *
7607 * Description: This function unmaps the microcode download buffer.
7608 *
7609 * Return value:
7610 * IPR_RC_JOB_CONTINUE
7611 **/
7612static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
7613{
7614 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7615 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
7616
7617 pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
7618 sglist->num_sg, DMA_TO_DEVICE);
7619
7620 ipr_cmd->job_step = ipr_reset_alert;
7621 return IPR_RC_JOB_CONTINUE;
7622}
7623
7624/**
7625 * ipr_reset_ucode_download - Download microcode to the adapter
7626 * @ipr_cmd: ipr command struct
7627 *
7628 * Description: This function checks to see if it there is microcode
7629 * to download to the adapter. If there is, a download is performed.
7630 *
7631 * Return value:
7632 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7633 **/
7634static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
7635{
7636 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7637 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
7638
7639 ENTER;
7640 ipr_cmd->job_step = ipr_reset_alert;
7641
7642 if (!sglist)
7643 return IPR_RC_JOB_CONTINUE;
7644
7645 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7646 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7647 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
7648 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
7649 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
7650 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
7651 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
7652
a32c055f
WB
7653 if (ioa_cfg->sis64)
7654 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
7655 else
7656 ipr_build_ucode_ioadl(ipr_cmd, sglist);
1da177e4
LT
7657 ipr_cmd->job_step = ipr_reset_ucode_download_done;
7658
7659 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7660 IPR_WRITE_BUFFER_TIMEOUT);
7661
7662 LEAVE;
7663 return IPR_RC_JOB_RETURN;
7664}
7665
7666/**
7667 * ipr_reset_shutdown_ioa - Shutdown the adapter
7668 * @ipr_cmd: ipr command struct
7669 *
7670 * Description: This function issues an adapter shutdown of the
7671 * specified type to the specified adapter as part of the
7672 * adapter reset job.
7673 *
7674 * Return value:
7675 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7676 **/
7677static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
7678{
7679 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7680 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
7681 unsigned long timeout;
7682 int rc = IPR_RC_JOB_CONTINUE;
7683
7684 ENTER;
7685 if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
7686 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7687 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7688 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
7689 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
7690
ac09c349
BK
7691 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
7692 timeout = IPR_SHUTDOWN_TIMEOUT;
1da177e4
LT
7693 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
7694 timeout = IPR_INTERNAL_TIMEOUT;
ac09c349
BK
7695 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7696 timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
1da177e4 7697 else
ac09c349 7698 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
1da177e4
LT
7699
7700 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
7701
7702 rc = IPR_RC_JOB_RETURN;
7703 ipr_cmd->job_step = ipr_reset_ucode_download;
7704 } else
7705 ipr_cmd->job_step = ipr_reset_alert;
7706
7707 LEAVE;
7708 return rc;
7709}
7710
7711/**
7712 * ipr_reset_ioa_job - Adapter reset job
7713 * @ipr_cmd: ipr command struct
7714 *
7715 * Description: This function is the job router for the adapter reset job.
7716 *
7717 * Return value:
7718 * none
7719 **/
7720static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
7721{
7722 u32 rc, ioasc;
1da177e4
LT
7723 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7724
7725 do {
96d21f00 7726 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
7727
7728 if (ioa_cfg->reset_cmd != ipr_cmd) {
7729 /*
7730 * We are doing nested adapter resets and this is
7731 * not the current reset job.
7732 */
7733 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
7734 return;
7735 }
7736
7737 if (IPR_IOASC_SENSE_KEY(ioasc)) {
dfed823e 7738 rc = ipr_cmd->job_step_failed(ipr_cmd);
7739 if (rc == IPR_RC_JOB_RETURN)
7740 return;
1da177e4
LT
7741 }
7742
7743 ipr_reinit_ipr_cmnd(ipr_cmd);
dfed823e 7744 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
1da177e4
LT
7745 rc = ipr_cmd->job_step(ipr_cmd);
7746 } while(rc == IPR_RC_JOB_CONTINUE);
7747}
7748
7749/**
7750 * _ipr_initiate_ioa_reset - Initiate an adapter reset
7751 * @ioa_cfg: ioa config struct
7752 * @job_step: first job step of reset job
7753 * @shutdown_type: shutdown type
7754 *
7755 * Description: This function will initiate the reset of the given adapter
7756 * starting at the selected job step.
7757 * If the caller needs to wait on the completion of the reset,
7758 * the caller must sleep on the reset_wait_q.
7759 *
7760 * Return value:
7761 * none
7762 **/
7763static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
7764 int (*job_step) (struct ipr_cmnd *),
7765 enum ipr_shutdown_type shutdown_type)
7766{
7767 struct ipr_cmnd *ipr_cmd;
7768
7769 ioa_cfg->in_reset_reload = 1;
7770 ioa_cfg->allow_cmds = 0;
7771 scsi_block_requests(ioa_cfg->host);
7772
7773 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
7774 ioa_cfg->reset_cmd = ipr_cmd;
7775 ipr_cmd->job_step = job_step;
7776 ipr_cmd->u.shutdown_type = shutdown_type;
7777
7778 ipr_reset_ioa_job(ipr_cmd);
7779}
7780
7781/**
7782 * ipr_initiate_ioa_reset - Initiate an adapter reset
7783 * @ioa_cfg: ioa config struct
7784 * @shutdown_type: shutdown type
7785 *
7786 * Description: This function will initiate the reset of the given adapter.
7787 * If the caller needs to wait on the completion of the reset,
7788 * the caller must sleep on the reset_wait_q.
7789 *
7790 * Return value:
7791 * none
7792 **/
7793static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
7794 enum ipr_shutdown_type shutdown_type)
7795{
7796 if (ioa_cfg->ioa_is_dead)
7797 return;
7798
7799 if (ioa_cfg->in_reset_reload && ioa_cfg->sdt_state == GET_DUMP)
7800 ioa_cfg->sdt_state = ABORT_DUMP;
7801
7802 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
7803 dev_err(&ioa_cfg->pdev->dev,
7804 "IOA taken offline - error recovery failed\n");
7805
7806 ioa_cfg->reset_retries = 0;
7807 ioa_cfg->ioa_is_dead = 1;
7808
7809 if (ioa_cfg->in_ioa_bringdown) {
7810 ioa_cfg->reset_cmd = NULL;
7811 ioa_cfg->in_reset_reload = 0;
7812 ipr_fail_all_ops(ioa_cfg);
7813 wake_up_all(&ioa_cfg->reset_wait_q);
7814
7815 spin_unlock_irq(ioa_cfg->host->host_lock);
7816 scsi_unblock_requests(ioa_cfg->host);
7817 spin_lock_irq(ioa_cfg->host->host_lock);
7818 return;
7819 } else {
7820 ioa_cfg->in_ioa_bringdown = 1;
7821 shutdown_type = IPR_SHUTDOWN_NONE;
7822 }
7823 }
7824
7825 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
7826 shutdown_type);
7827}
7828
f8a88b19
LV
7829/**
7830 * ipr_reset_freeze - Hold off all I/O activity
7831 * @ipr_cmd: ipr command struct
7832 *
7833 * Description: If the PCI slot is frozen, hold off all I/O
7834 * activity; then, as soon as the slot is available again,
7835 * initiate an adapter reset.
7836 */
7837static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
7838{
7839 /* Disallow new interrupts, avoid loop */
7840 ipr_cmd->ioa_cfg->allow_interrupts = 0;
7841 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
7842 ipr_cmd->done = ipr_reset_ioa_job;
7843 return IPR_RC_JOB_RETURN;
7844}
7845
7846/**
7847 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
7848 * @pdev: PCI device struct
7849 *
7850 * Description: This routine is called to tell us that the PCI bus
7851 * is down. Can't do anything here, except put the device driver
7852 * into a holding pattern, waiting for the PCI bus to come back.
7853 */
7854static void ipr_pci_frozen(struct pci_dev *pdev)
7855{
7856 unsigned long flags = 0;
7857 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7858
7859 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
7860 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
7861 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
7862}
7863
7864/**
7865 * ipr_pci_slot_reset - Called when PCI slot has been reset.
7866 * @pdev: PCI device struct
7867 *
7868 * Description: This routine is called by the pci error recovery
7869 * code after the PCI slot has been reset, just before we
7870 * should resume normal operations.
7871 */
7872static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
7873{
7874 unsigned long flags = 0;
7875 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7876
7877 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
463fc696
BK
7878 if (ioa_cfg->needs_warm_reset)
7879 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7880 else
7881 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
7882 IPR_SHUTDOWN_NONE);
f8a88b19
LV
7883 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
7884 return PCI_ERS_RESULT_RECOVERED;
7885}
7886
7887/**
7888 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
7889 * @pdev: PCI device struct
7890 *
7891 * Description: This routine is called when the PCI bus has
7892 * permanently failed.
7893 */
7894static void ipr_pci_perm_failure(struct pci_dev *pdev)
7895{
7896 unsigned long flags = 0;
7897 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7898
7899 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
7900 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
7901 ioa_cfg->sdt_state = ABORT_DUMP;
7902 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
7903 ioa_cfg->in_ioa_bringdown = 1;
6ff63896 7904 ioa_cfg->allow_cmds = 0;
f8a88b19
LV
7905 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7906 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
7907}
7908
7909/**
7910 * ipr_pci_error_detected - Called when a PCI error is detected.
7911 * @pdev: PCI device struct
7912 * @state: PCI channel state
7913 *
7914 * Description: Called when a PCI error is detected.
7915 *
7916 * Return value:
7917 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
7918 */
7919static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
7920 pci_channel_state_t state)
7921{
7922 switch (state) {
7923 case pci_channel_io_frozen:
7924 ipr_pci_frozen(pdev);
7925 return PCI_ERS_RESULT_NEED_RESET;
7926 case pci_channel_io_perm_failure:
7927 ipr_pci_perm_failure(pdev);
7928 return PCI_ERS_RESULT_DISCONNECT;
7929 break;
7930 default:
7931 break;
7932 }
7933 return PCI_ERS_RESULT_NEED_RESET;
7934}
7935
1da177e4
LT
7936/**
7937 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
7938 * @ioa_cfg: ioa cfg struct
7939 *
7940 * Description: This is the second phase of adapter intialization
7941 * This function takes care of initilizing the adapter to the point
7942 * where it can accept new commands.
7943
7944 * Return value:
b1c11812 7945 * 0 on success / -EIO on failure
1da177e4
LT
7946 **/
7947static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
7948{
7949 int rc = 0;
7950 unsigned long host_lock_flags = 0;
7951
7952 ENTER;
7953 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7954 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
ce155cce 7955 if (ioa_cfg->needs_hard_reset) {
7956 ioa_cfg->needs_hard_reset = 0;
7957 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7958 } else
7959 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
7960 IPR_SHUTDOWN_NONE);
1da177e4
LT
7961
7962 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7963 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
7964 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7965
7966 if (ioa_cfg->ioa_is_dead) {
7967 rc = -EIO;
7968 } else if (ipr_invalid_adapter(ioa_cfg)) {
7969 if (!ipr_testmode)
7970 rc = -EIO;
7971
7972 dev_err(&ioa_cfg->pdev->dev,
7973 "Adapter not supported in this hardware configuration.\n");
7974 }
7975
7976 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7977
7978 LEAVE;
7979 return rc;
7980}
7981
7982/**
7983 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
7984 * @ioa_cfg: ioa config struct
7985 *
7986 * Return value:
7987 * none
7988 **/
7989static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
7990{
7991 int i;
7992
7993 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
7994 if (ioa_cfg->ipr_cmnd_list[i])
7995 pci_pool_free(ioa_cfg->ipr_cmd_pool,
7996 ioa_cfg->ipr_cmnd_list[i],
7997 ioa_cfg->ipr_cmnd_list_dma[i]);
7998
7999 ioa_cfg->ipr_cmnd_list[i] = NULL;
8000 }
8001
8002 if (ioa_cfg->ipr_cmd_pool)
8003 pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
8004
8005 ioa_cfg->ipr_cmd_pool = NULL;
8006}
8007
8008/**
8009 * ipr_free_mem - Frees memory allocated for an adapter
8010 * @ioa_cfg: ioa cfg struct
8011 *
8012 * Return value:
8013 * nothing
8014 **/
8015static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
8016{
8017 int i;
8018
8019 kfree(ioa_cfg->res_entries);
8020 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
8021 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
8022 ipr_free_cmd_blks(ioa_cfg);
8023 pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
8024 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
3e7ebdfa
WB
8025 pci_free_consistent(ioa_cfg->pdev, ioa_cfg->cfg_table_size,
8026 ioa_cfg->u.cfg_table,
1da177e4
LT
8027 ioa_cfg->cfg_table_dma);
8028
8029 for (i = 0; i < IPR_NUM_HCAMS; i++) {
8030 pci_free_consistent(ioa_cfg->pdev,
8031 sizeof(struct ipr_hostrcb),
8032 ioa_cfg->hostrcb[i],
8033 ioa_cfg->hostrcb_dma[i]);
8034 }
8035
8036 ipr_free_dump(ioa_cfg);
1da177e4
LT
8037 kfree(ioa_cfg->trace);
8038}
8039
8040/**
8041 * ipr_free_all_resources - Free all allocated resources for an adapter.
8042 * @ipr_cmd: ipr command struct
8043 *
8044 * This function frees all allocated resources for the
8045 * specified adapter.
8046 *
8047 * Return value:
8048 * none
8049 **/
8050static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
8051{
8052 struct pci_dev *pdev = ioa_cfg->pdev;
8053
8054 ENTER;
8055 free_irq(pdev->irq, ioa_cfg);
5a9ef25b 8056 pci_disable_msi(pdev);
1da177e4
LT
8057 iounmap(ioa_cfg->hdw_dma_regs);
8058 pci_release_regions(pdev);
8059 ipr_free_mem(ioa_cfg);
8060 scsi_host_put(ioa_cfg->host);
8061 pci_disable_device(pdev);
8062 LEAVE;
8063}
8064
8065/**
8066 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
8067 * @ioa_cfg: ioa config struct
8068 *
8069 * Return value:
8070 * 0 on success / -ENOMEM on allocation failure
8071 **/
8072static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8073{
8074 struct ipr_cmnd *ipr_cmd;
8075 struct ipr_ioarcb *ioarcb;
8076 dma_addr_t dma_addr;
8077 int i;
8078
8079 ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
a32c055f 8080 sizeof(struct ipr_cmnd), 16, 0);
1da177e4
LT
8081
8082 if (!ioa_cfg->ipr_cmd_pool)
8083 return -ENOMEM;
8084
8085 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
e94b1766 8086 ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
1da177e4
LT
8087
8088 if (!ipr_cmd) {
8089 ipr_free_cmd_blks(ioa_cfg);
8090 return -ENOMEM;
8091 }
8092
8093 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
8094 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
8095 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
8096
8097 ioarcb = &ipr_cmd->ioarcb;
a32c055f
WB
8098 ipr_cmd->dma_addr = dma_addr;
8099 if (ioa_cfg->sis64)
8100 ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
8101 else
8102 ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
8103
1da177e4 8104 ioarcb->host_response_handle = cpu_to_be32(i << 2);
a32c055f
WB
8105 if (ioa_cfg->sis64) {
8106 ioarcb->u.sis64_addr_data.data_ioadl_addr =
8107 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
8108 ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
96d21f00 8109 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
a32c055f
WB
8110 } else {
8111 ioarcb->write_ioadl_addr =
8112 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
8113 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
8114 ioarcb->ioasa_host_pci_addr =
96d21f00 8115 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
a32c055f 8116 }
1da177e4
LT
8117 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
8118 ipr_cmd->cmd_index = i;
8119 ipr_cmd->ioa_cfg = ioa_cfg;
8120 ipr_cmd->sense_buffer_dma = dma_addr +
8121 offsetof(struct ipr_cmnd, sense_buffer);
8122
8123 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
8124 }
8125
8126 return 0;
8127}
8128
8129/**
8130 * ipr_alloc_mem - Allocate memory for an adapter
8131 * @ioa_cfg: ioa config struct
8132 *
8133 * Return value:
8134 * 0 on success / non-zero for error
8135 **/
8136static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
8137{
8138 struct pci_dev *pdev = ioa_cfg->pdev;
8139 int i, rc = -ENOMEM;
8140
8141 ENTER;
0bc42e35 8142 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
3e7ebdfa 8143 ioa_cfg->max_devs_supported, GFP_KERNEL);
1da177e4
LT
8144
8145 if (!ioa_cfg->res_entries)
8146 goto out;
8147
3e7ebdfa
WB
8148 if (ioa_cfg->sis64) {
8149 ioa_cfg->target_ids = kzalloc(sizeof(unsigned long) *
8150 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8151 ioa_cfg->array_ids = kzalloc(sizeof(unsigned long) *
8152 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8153 ioa_cfg->vset_ids = kzalloc(sizeof(unsigned long) *
8154 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8155 }
8156
8157 for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
1da177e4 8158 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
3e7ebdfa
WB
8159 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
8160 }
1da177e4
LT
8161
8162 ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
8163 sizeof(struct ipr_misc_cbs),
8164 &ioa_cfg->vpd_cbs_dma);
8165
8166 if (!ioa_cfg->vpd_cbs)
8167 goto out_free_res_entries;
8168
8169 if (ipr_alloc_cmd_blks(ioa_cfg))
8170 goto out_free_vpd_cbs;
8171
8172 ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
8173 sizeof(u32) * IPR_NUM_CMD_BLKS,
8174 &ioa_cfg->host_rrq_dma);
8175
8176 if (!ioa_cfg->host_rrq)
8177 goto out_ipr_free_cmd_blocks;
8178
3e7ebdfa
WB
8179 ioa_cfg->u.cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
8180 ioa_cfg->cfg_table_size,
8181 &ioa_cfg->cfg_table_dma);
1da177e4 8182
3e7ebdfa 8183 if (!ioa_cfg->u.cfg_table)
1da177e4
LT
8184 goto out_free_host_rrq;
8185
8186 for (i = 0; i < IPR_NUM_HCAMS; i++) {
8187 ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
8188 sizeof(struct ipr_hostrcb),
8189 &ioa_cfg->hostrcb_dma[i]);
8190
8191 if (!ioa_cfg->hostrcb[i])
8192 goto out_free_hostrcb_dma;
8193
8194 ioa_cfg->hostrcb[i]->hostrcb_dma =
8195 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
49dc6a18 8196 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
1da177e4
LT
8197 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
8198 }
8199
0bc42e35 8200 ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
1da177e4
LT
8201 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
8202
8203 if (!ioa_cfg->trace)
8204 goto out_free_hostrcb_dma;
8205
1da177e4
LT
8206 rc = 0;
8207out:
8208 LEAVE;
8209 return rc;
8210
8211out_free_hostrcb_dma:
8212 while (i-- > 0) {
8213 pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
8214 ioa_cfg->hostrcb[i],
8215 ioa_cfg->hostrcb_dma[i]);
8216 }
3e7ebdfa
WB
8217 pci_free_consistent(pdev, ioa_cfg->cfg_table_size,
8218 ioa_cfg->u.cfg_table,
8219 ioa_cfg->cfg_table_dma);
1da177e4
LT
8220out_free_host_rrq:
8221 pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
8222 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
8223out_ipr_free_cmd_blocks:
8224 ipr_free_cmd_blks(ioa_cfg);
8225out_free_vpd_cbs:
8226 pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
8227 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
8228out_free_res_entries:
8229 kfree(ioa_cfg->res_entries);
8230 goto out;
8231}
8232
8233/**
8234 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
8235 * @ioa_cfg: ioa config struct
8236 *
8237 * Return value:
8238 * none
8239 **/
8240static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
8241{
8242 int i;
8243
8244 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
8245 ioa_cfg->bus_attr[i].bus = i;
8246 ioa_cfg->bus_attr[i].qas_enabled = 0;
8247 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
8248 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
8249 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
8250 else
8251 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
8252 }
8253}
8254
8255/**
8256 * ipr_init_ioa_cfg - Initialize IOA config struct
8257 * @ioa_cfg: ioa config struct
8258 * @host: scsi host struct
8259 * @pdev: PCI dev struct
8260 *
8261 * Return value:
8262 * none
8263 **/
8264static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
8265 struct Scsi_Host *host, struct pci_dev *pdev)
8266{
8267 const struct ipr_interrupt_offsets *p;
8268 struct ipr_interrupts *t;
8269 void __iomem *base;
8270
8271 ioa_cfg->host = host;
8272 ioa_cfg->pdev = pdev;
8273 ioa_cfg->log_level = ipr_log_level;
3d1d0da6 8274 ioa_cfg->doorbell = IPR_DOORBELL;
1da177e4
LT
8275 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
8276 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
8277 sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
8278 sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
8279 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
8280 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
8281 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
8282 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
8283
8284 INIT_LIST_HEAD(&ioa_cfg->free_q);
8285 INIT_LIST_HEAD(&ioa_cfg->pending_q);
8286 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
8287 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
8288 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
8289 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
c4028958 8290 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
1da177e4 8291 init_waitqueue_head(&ioa_cfg->reset_wait_q);
95fecd90 8292 init_waitqueue_head(&ioa_cfg->msi_wait_q);
1da177e4
LT
8293 ioa_cfg->sdt_state = INACTIVE;
8294
8295 ipr_initialize_bus_attr(ioa_cfg);
3e7ebdfa 8296 ioa_cfg->max_devs_supported = ipr_max_devs;
1da177e4 8297
3e7ebdfa
WB
8298 if (ioa_cfg->sis64) {
8299 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
8300 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
8301 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
8302 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
8303 } else {
8304 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
8305 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
8306 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
8307 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
8308 }
1da177e4
LT
8309 host->max_channel = IPR_MAX_BUS_TO_SCAN;
8310 host->unique_id = host->host_no;
8311 host->max_cmd_len = IPR_MAX_CDB_LEN;
8312 pci_set_drvdata(pdev, ioa_cfg);
8313
8314 p = &ioa_cfg->chip_cfg->regs;
8315 t = &ioa_cfg->regs;
8316 base = ioa_cfg->hdw_dma_regs;
8317
8318 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
8319 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
214777ba 8320 t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
1da177e4 8321 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
214777ba 8322 t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
1da177e4 8323 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
214777ba 8324 t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
1da177e4 8325 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
214777ba 8326 t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
1da177e4
LT
8327 t->ioarrin_reg = base + p->ioarrin_reg;
8328 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
214777ba 8329 t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
1da177e4 8330 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
214777ba 8331 t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
1da177e4 8332 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
214777ba 8333 t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
dcbad00e
WB
8334
8335 if (ioa_cfg->sis64) {
214777ba 8336 t->init_feedback_reg = base + p->init_feedback_reg;
dcbad00e
WB
8337 t->dump_addr_reg = base + p->dump_addr_reg;
8338 t->dump_data_reg = base + p->dump_data_reg;
8701f185 8339 t->endian_swap_reg = base + p->endian_swap_reg;
dcbad00e 8340 }
1da177e4
LT
8341}
8342
8343/**
1be7bd82 8344 * ipr_get_chip_info - Find adapter chip information
1da177e4
LT
8345 * @dev_id: PCI device id struct
8346 *
8347 * Return value:
1be7bd82 8348 * ptr to chip information on success / NULL on failure
1da177e4 8349 **/
1be7bd82
WB
8350static const struct ipr_chip_t * __devinit
8351ipr_get_chip_info(const struct pci_device_id *dev_id)
1da177e4
LT
8352{
8353 int i;
8354
1da177e4
LT
8355 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
8356 if (ipr_chip[i].vendor == dev_id->vendor &&
8357 ipr_chip[i].device == dev_id->device)
1be7bd82 8358 return &ipr_chip[i];
1da177e4
LT
8359 return NULL;
8360}
8361
95fecd90
WB
8362/**
8363 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
8364 * @pdev: PCI device struct
8365 *
8366 * Description: Simply set the msi_received flag to 1 indicating that
8367 * Message Signaled Interrupts are supported.
8368 *
8369 * Return value:
8370 * 0 on success / non-zero on failure
8371 **/
8372static irqreturn_t __devinit ipr_test_intr(int irq, void *devp)
8373{
8374 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
8375 unsigned long lock_flags = 0;
8376 irqreturn_t rc = IRQ_HANDLED;
8377
8378 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8379
8380 ioa_cfg->msi_received = 1;
8381 wake_up(&ioa_cfg->msi_wait_q);
8382
8383 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8384 return rc;
8385}
8386
8387/**
8388 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
8389 * @pdev: PCI device struct
8390 *
8391 * Description: The return value from pci_enable_msi() can not always be
8392 * trusted. This routine sets up and initiates a test interrupt to determine
8393 * if the interrupt is received via the ipr_test_intr() service routine.
8394 * If the tests fails, the driver will fall back to LSI.
8395 *
8396 * Return value:
8397 * 0 on success / non-zero on failure
8398 **/
8399static int __devinit ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg,
8400 struct pci_dev *pdev)
8401{
8402 int rc;
8403 volatile u32 int_reg;
8404 unsigned long lock_flags = 0;
8405
8406 ENTER;
8407
8408 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8409 init_waitqueue_head(&ioa_cfg->msi_wait_q);
8410 ioa_cfg->msi_received = 0;
8411 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
214777ba 8412 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
95fecd90
WB
8413 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8414 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8415
8416 rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
8417 if (rc) {
8418 dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
8419 return rc;
8420 } else if (ipr_debug)
8421 dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
8422
214777ba 8423 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
95fecd90
WB
8424 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8425 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
8426 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8427
8428 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8429 if (!ioa_cfg->msi_received) {
8430 /* MSI test failed */
8431 dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n");
8432 rc = -EOPNOTSUPP;
8433 } else if (ipr_debug)
8434 dev_info(&pdev->dev, "MSI test succeeded.\n");
8435
8436 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8437
8438 free_irq(pdev->irq, ioa_cfg);
8439
8440 LEAVE;
8441
8442 return rc;
8443}
8444
1da177e4
LT
8445/**
8446 * ipr_probe_ioa - Allocates memory and does first stage of initialization
8447 * @pdev: PCI device struct
8448 * @dev_id: PCI device id struct
8449 *
8450 * Return value:
8451 * 0 on success / non-zero on failure
8452 **/
8453static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
8454 const struct pci_device_id *dev_id)
8455{
8456 struct ipr_ioa_cfg *ioa_cfg;
8457 struct Scsi_Host *host;
8458 unsigned long ipr_regs_pci;
8459 void __iomem *ipr_regs;
a2a65a3e 8460 int rc = PCIBIOS_SUCCESSFUL;
473b1e8e 8461 volatile u32 mask, uproc, interrupts;
1da177e4
LT
8462
8463 ENTER;
8464
8465 if ((rc = pci_enable_device(pdev))) {
8466 dev_err(&pdev->dev, "Cannot enable adapter\n");
8467 goto out;
8468 }
8469
8470 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
8471
8472 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
8473
8474 if (!host) {
8475 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
8476 rc = -ENOMEM;
8477 goto out_disable;
8478 }
8479
8480 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
8481 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
35a39691
BK
8482 ata_host_init(&ioa_cfg->ata_host, &pdev->dev,
8483 sata_port_info.flags, &ipr_sata_ops);
1da177e4 8484
1be7bd82 8485 ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
1da177e4 8486
1be7bd82 8487 if (!ioa_cfg->ipr_chip) {
1da177e4
LT
8488 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
8489 dev_id->vendor, dev_id->device);
8490 goto out_scsi_host_put;
8491 }
8492
a32c055f
WB
8493 /* set SIS 32 or SIS 64 */
8494 ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
1be7bd82
WB
8495 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
8496
5469cb5b
BK
8497 if (ipr_transop_timeout)
8498 ioa_cfg->transop_timeout = ipr_transop_timeout;
8499 else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
8500 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
8501 else
8502 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
8503
44c10138 8504 ioa_cfg->revid = pdev->revision;
463fc696 8505
1da177e4
LT
8506 ipr_regs_pci = pci_resource_start(pdev, 0);
8507
8508 rc = pci_request_regions(pdev, IPR_NAME);
8509 if (rc < 0) {
8510 dev_err(&pdev->dev,
8511 "Couldn't register memory range of registers\n");
8512 goto out_scsi_host_put;
8513 }
8514
25729a7f 8515 ipr_regs = pci_ioremap_bar(pdev, 0);
1da177e4
LT
8516
8517 if (!ipr_regs) {
8518 dev_err(&pdev->dev,
8519 "Couldn't map memory range of registers\n");
8520 rc = -ENOMEM;
8521 goto out_release_regions;
8522 }
8523
8524 ioa_cfg->hdw_dma_regs = ipr_regs;
8525 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
8526 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
8527
8528 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
8529
8530 pci_set_master(pdev);
8531
a32c055f
WB
8532 if (ioa_cfg->sis64) {
8533 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
8534 if (rc < 0) {
8535 dev_dbg(&pdev->dev, "Failed to set 64 bit PCI DMA mask\n");
8536 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
8537 }
8538
8539 } else
8540 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
8541
1da177e4
LT
8542 if (rc < 0) {
8543 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
8544 goto cleanup_nomem;
8545 }
8546
8547 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
8548 ioa_cfg->chip_cfg->cache_line_size);
8549
8550 if (rc != PCIBIOS_SUCCESSFUL) {
8551 dev_err(&pdev->dev, "Write of cache line size failed\n");
8552 rc = -EIO;
8553 goto cleanup_nomem;
8554 }
8555
95fecd90 8556 /* Enable MSI style interrupts if they are supported. */
1be7bd82 8557 if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI && !pci_enable_msi(pdev)) {
95fecd90
WB
8558 rc = ipr_test_msi(ioa_cfg, pdev);
8559 if (rc == -EOPNOTSUPP)
8560 pci_disable_msi(pdev);
8561 else if (rc)
8562 goto out_msi_disable;
8563 else
8564 dev_info(&pdev->dev, "MSI enabled with IRQ: %d\n", pdev->irq);
8565 } else if (ipr_debug)
8566 dev_info(&pdev->dev, "Cannot enable MSI.\n");
8567
1da177e4
LT
8568 /* Save away PCI config space for use following IOA reset */
8569 rc = pci_save_state(pdev);
8570
8571 if (rc != PCIBIOS_SUCCESSFUL) {
8572 dev_err(&pdev->dev, "Failed to save PCI config space\n");
8573 rc = -EIO;
8574 goto cleanup_nomem;
8575 }
8576
8577 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
8578 goto cleanup_nomem;
8579
8580 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
8581 goto cleanup_nomem;
8582
3e7ebdfa
WB
8583 if (ioa_cfg->sis64)
8584 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
8585 + ((sizeof(struct ipr_config_table_entry64)
8586 * ioa_cfg->max_devs_supported)));
8587 else
8588 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
8589 + ((sizeof(struct ipr_config_table_entry)
8590 * ioa_cfg->max_devs_supported)));
8591
1da177e4
LT
8592 rc = ipr_alloc_mem(ioa_cfg);
8593 if (rc < 0) {
8594 dev_err(&pdev->dev,
8595 "Couldn't allocate enough memory for device driver!\n");
8596 goto cleanup_nomem;
8597 }
8598
ce155cce 8599 /*
8600 * If HRRQ updated interrupt is not masked, or reset alert is set,
8601 * the card is in an unknown state and needs a hard reset
8602 */
214777ba
WB
8603 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
8604 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
8605 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
ce155cce 8606 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
8607 ioa_cfg->needs_hard_reset = 1;
473b1e8e
BK
8608 if (interrupts & IPR_PCII_ERROR_INTERRUPTS)
8609 ioa_cfg->needs_hard_reset = 1;
8610 if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
8611 ioa_cfg->ioa_unit_checked = 1;
ce155cce 8612
1da177e4 8613 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
95fecd90
WB
8614 rc = request_irq(pdev->irq, ipr_isr,
8615 ioa_cfg->msi_received ? 0 : IRQF_SHARED,
8616 IPR_NAME, ioa_cfg);
1da177e4
LT
8617
8618 if (rc) {
8619 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
8620 pdev->irq, rc);
8621 goto cleanup_nolog;
8622 }
8623
463fc696
BK
8624 if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
8625 (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
8626 ioa_cfg->needs_warm_reset = 1;
8627 ioa_cfg->reset = ipr_reset_slot_reset;
8628 } else
8629 ioa_cfg->reset = ipr_reset_start_bist;
8630
1da177e4
LT
8631 spin_lock(&ipr_driver_lock);
8632 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
8633 spin_unlock(&ipr_driver_lock);
8634
8635 LEAVE;
8636out:
8637 return rc;
8638
8639cleanup_nolog:
8640 ipr_free_mem(ioa_cfg);
8641cleanup_nomem:
8642 iounmap(ipr_regs);
95fecd90
WB
8643out_msi_disable:
8644 pci_disable_msi(pdev);
1da177e4
LT
8645out_release_regions:
8646 pci_release_regions(pdev);
8647out_scsi_host_put:
8648 scsi_host_put(host);
8649out_disable:
8650 pci_disable_device(pdev);
8651 goto out;
8652}
8653
8654/**
8655 * ipr_scan_vsets - Scans for VSET devices
8656 * @ioa_cfg: ioa config struct
8657 *
8658 * Description: Since the VSET resources do not follow SAM in that we can have
8659 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
8660 *
8661 * Return value:
8662 * none
8663 **/
8664static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
8665{
8666 int target, lun;
8667
8668 for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
8669 for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ )
8670 scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
8671}
8672
8673/**
8674 * ipr_initiate_ioa_bringdown - Bring down an adapter
8675 * @ioa_cfg: ioa config struct
8676 * @shutdown_type: shutdown type
8677 *
8678 * Description: This function will initiate bringing down the adapter.
8679 * This consists of issuing an IOA shutdown to the adapter
8680 * to flush the cache, and running BIST.
8681 * If the caller needs to wait on the completion of the reset,
8682 * the caller must sleep on the reset_wait_q.
8683 *
8684 * Return value:
8685 * none
8686 **/
8687static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
8688 enum ipr_shutdown_type shutdown_type)
8689{
8690 ENTER;
8691 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
8692 ioa_cfg->sdt_state = ABORT_DUMP;
8693 ioa_cfg->reset_retries = 0;
8694 ioa_cfg->in_ioa_bringdown = 1;
8695 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
8696 LEAVE;
8697}
8698
8699/**
8700 * __ipr_remove - Remove a single adapter
8701 * @pdev: pci device struct
8702 *
8703 * Adapter hot plug remove entry point.
8704 *
8705 * Return value:
8706 * none
8707 **/
8708static void __ipr_remove(struct pci_dev *pdev)
8709{
8710 unsigned long host_lock_flags = 0;
8711 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8712 ENTER;
8713
8714 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
970ea294
BK
8715 while(ioa_cfg->in_reset_reload) {
8716 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8717 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8718 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8719 }
8720
1da177e4
LT
8721 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
8722
8723 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8724 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5cbf5eae 8725 flush_scheduled_work();
1da177e4
LT
8726 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8727
8728 spin_lock(&ipr_driver_lock);
8729 list_del(&ioa_cfg->queue);
8730 spin_unlock(&ipr_driver_lock);
8731
8732 if (ioa_cfg->sdt_state == ABORT_DUMP)
8733 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8734 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8735
8736 ipr_free_all_resources(ioa_cfg);
8737
8738 LEAVE;
8739}
8740
8741/**
8742 * ipr_remove - IOA hot plug remove entry point
8743 * @pdev: pci device struct
8744 *
8745 * Adapter hot plug remove entry point.
8746 *
8747 * Return value:
8748 * none
8749 **/
f381642d 8750static void __devexit ipr_remove(struct pci_dev *pdev)
1da177e4
LT
8751{
8752 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8753
8754 ENTER;
8755
ee959b00 8756 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4 8757 &ipr_trace_attr);
ee959b00 8758 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
8759 &ipr_dump_attr);
8760 scsi_remove_host(ioa_cfg->host);
8761
8762 __ipr_remove(pdev);
8763
8764 LEAVE;
8765}
8766
8767/**
8768 * ipr_probe - Adapter hot plug add entry point
8769 *
8770 * Return value:
8771 * 0 on success / non-zero on failure
8772 **/
8773static int __devinit ipr_probe(struct pci_dev *pdev,
8774 const struct pci_device_id *dev_id)
8775{
8776 struct ipr_ioa_cfg *ioa_cfg;
8777 int rc;
8778
8779 rc = ipr_probe_ioa(pdev, dev_id);
8780
8781 if (rc)
8782 return rc;
8783
8784 ioa_cfg = pci_get_drvdata(pdev);
8785 rc = ipr_probe_ioa_part2(ioa_cfg);
8786
8787 if (rc) {
8788 __ipr_remove(pdev);
8789 return rc;
8790 }
8791
8792 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
8793
8794 if (rc) {
8795 __ipr_remove(pdev);
8796 return rc;
8797 }
8798
ee959b00 8799 rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
8800 &ipr_trace_attr);
8801
8802 if (rc) {
8803 scsi_remove_host(ioa_cfg->host);
8804 __ipr_remove(pdev);
8805 return rc;
8806 }
8807
ee959b00 8808 rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
8809 &ipr_dump_attr);
8810
8811 if (rc) {
ee959b00 8812 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
8813 &ipr_trace_attr);
8814 scsi_remove_host(ioa_cfg->host);
8815 __ipr_remove(pdev);
8816 return rc;
8817 }
8818
8819 scsi_scan_host(ioa_cfg->host);
8820 ipr_scan_vsets(ioa_cfg);
8821 scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
8822 ioa_cfg->allow_ml_add_del = 1;
11cd8f12 8823 ioa_cfg->host->max_channel = IPR_VSET_BUS;
1da177e4
LT
8824 schedule_work(&ioa_cfg->work_q);
8825 return 0;
8826}
8827
8828/**
8829 * ipr_shutdown - Shutdown handler.
d18c3db5 8830 * @pdev: pci device struct
1da177e4
LT
8831 *
8832 * This function is invoked upon system shutdown/reboot. It will issue
8833 * an adapter shutdown to the adapter to flush the write cache.
8834 *
8835 * Return value:
8836 * none
8837 **/
d18c3db5 8838static void ipr_shutdown(struct pci_dev *pdev)
1da177e4 8839{
d18c3db5 8840 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
1da177e4
LT
8841 unsigned long lock_flags = 0;
8842
8843 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
970ea294
BK
8844 while(ioa_cfg->in_reset_reload) {
8845 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8846 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8847 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8848 }
8849
1da177e4
LT
8850 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
8851 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8852 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8853}
8854
8855static struct pci_device_id ipr_pci_table[] __devinitdata = {
8856 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 8857 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
1da177e4 8858 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 8859 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
1da177e4 8860 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 8861 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
1da177e4 8862 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 8863 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
1da177e4 8864 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6d84c944 8865 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
1da177e4 8866 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6d84c944 8867 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
1da177e4 8868 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6d84c944 8869 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
86f51436 8870 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
5469cb5b
BK
8871 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
8872 IPR_USE_LONG_TRANSOP_TIMEOUT },
86f51436 8873 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
6d84c944 8874 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
86f51436 8875 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
22d2e402
BK
8876 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
8877 IPR_USE_LONG_TRANSOP_TIMEOUT },
60e7486b 8878 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
5469cb5b
BK
8879 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
8880 IPR_USE_LONG_TRANSOP_TIMEOUT },
86f51436 8881 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
6d84c944 8882 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
86f51436 8883 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
22d2e402
BK
8884 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
8885 IPR_USE_LONG_TRANSOP_TIMEOUT},
60e7486b 8886 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
5469cb5b
BK
8887 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
8888 IPR_USE_LONG_TRANSOP_TIMEOUT },
185eb31c 8889 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
22d2e402
BK
8890 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
8891 IPR_USE_LONG_TRANSOP_TIMEOUT },
185eb31c
BK
8892 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
8893 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
b0f56d3d
WB
8894 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
8895 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
60e7486b 8896 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
5469cb5b 8897 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
463fc696 8898 IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
1da177e4 8899 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
6d84c944 8900 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
1da177e4 8901 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
6d84c944 8902 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
86f51436 8903 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
5469cb5b
BK
8904 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
8905 IPR_USE_LONG_TRANSOP_TIMEOUT },
60e7486b 8906 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
5469cb5b
BK
8907 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
8908 IPR_USE_LONG_TRANSOP_TIMEOUT },
d7b4627f
WB
8909 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
8910 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
8911 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
8912 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
8913 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
8914 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
8915 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
8916 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
8917 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
8918 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
8919 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
8920 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
8921 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
8922 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575D, 0, 0, 0 },
8923 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
8924 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
1da177e4
LT
8925 { }
8926};
8927MODULE_DEVICE_TABLE(pci, ipr_pci_table);
8928
f8a88b19
LV
8929static struct pci_error_handlers ipr_err_handler = {
8930 .error_detected = ipr_pci_error_detected,
8931 .slot_reset = ipr_pci_slot_reset,
8932};
8933
1da177e4
LT
8934static struct pci_driver ipr_driver = {
8935 .name = IPR_NAME,
8936 .id_table = ipr_pci_table,
8937 .probe = ipr_probe,
f381642d 8938 .remove = __devexit_p(ipr_remove),
d18c3db5 8939 .shutdown = ipr_shutdown,
f8a88b19 8940 .err_handler = &ipr_err_handler,
1da177e4
LT
8941};
8942
f72919ec
WB
8943/**
8944 * ipr_halt_done - Shutdown prepare completion
8945 *
8946 * Return value:
8947 * none
8948 **/
8949static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
8950{
8951 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8952
8953 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
8954}
8955
8956/**
8957 * ipr_halt - Issue shutdown prepare to all adapters
8958 *
8959 * Return value:
8960 * NOTIFY_OK on success / NOTIFY_DONE on failure
8961 **/
8962static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
8963{
8964 struct ipr_cmnd *ipr_cmd;
8965 struct ipr_ioa_cfg *ioa_cfg;
8966 unsigned long flags = 0;
8967
8968 if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
8969 return NOTIFY_DONE;
8970
8971 spin_lock(&ipr_driver_lock);
8972
8973 list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
8974 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8975 if (!ioa_cfg->allow_cmds) {
8976 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8977 continue;
8978 }
8979
8980 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
8981 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8982 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8983 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
8984 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
8985
8986 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
8987 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8988 }
8989 spin_unlock(&ipr_driver_lock);
8990
8991 return NOTIFY_OK;
8992}
8993
8994static struct notifier_block ipr_notifier = {
8995 ipr_halt, NULL, 0
8996};
8997
1da177e4
LT
8998/**
8999 * ipr_init - Module entry point
9000 *
9001 * Return value:
9002 * 0 on success / negative value on failure
9003 **/
9004static int __init ipr_init(void)
9005{
9006 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
9007 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
9008
f72919ec 9009 register_reboot_notifier(&ipr_notifier);
dcbccbde 9010 return pci_register_driver(&ipr_driver);
1da177e4
LT
9011}
9012
9013/**
9014 * ipr_exit - Module unload
9015 *
9016 * Module unload entry point.
9017 *
9018 * Return value:
9019 * none
9020 **/
9021static void __exit ipr_exit(void)
9022{
f72919ec 9023 unregister_reboot_notifier(&ipr_notifier);
1da177e4
LT
9024 pci_unregister_driver(&ipr_driver);
9025}
9026
9027module_init(ipr_init);
9028module_exit(ipr_exit);
This page took 1.239325 seconds and 5 git commands to generate.