[SCSI] ipr: add support for multiple stages of initialization
[deliverable/linux.git] / drivers / scsi / ipr.c
CommitLineData
1da177e4
LT
1/*
2 * ipr.c -- driver for IBM Power Linux RAID adapters
3 *
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5 *
6 * Copyright (C) 2003, 2004 IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24/*
25 * Notes:
26 *
27 * This driver is used to control the following SCSI adapters:
28 *
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30 *
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
35 *
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
43 * - Hot spare
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
47 * by adding disks
48 *
49 * Driver Features:
50 * - Tagged command queuing
51 * - Adapter microcode download
52 * - PCI hot plug
53 * - SCSI device hot plug
54 *
55 */
56
1da177e4
LT
57#include <linux/fs.h>
58#include <linux/init.h>
59#include <linux/types.h>
60#include <linux/errno.h>
61#include <linux/kernel.h>
62#include <linux/ioport.h>
63#include <linux/delay.h>
64#include <linux/pci.h>
65#include <linux/wait.h>
66#include <linux/spinlock.h>
67#include <linux/sched.h>
68#include <linux/interrupt.h>
69#include <linux/blkdev.h>
70#include <linux/firmware.h>
71#include <linux/module.h>
72#include <linux/moduleparam.h>
35a39691 73#include <linux/libata.h>
0ce3a7e5 74#include <linux/hdreg.h>
f72919ec 75#include <linux/reboot.h>
3e7ebdfa 76#include <linux/stringify.h>
1da177e4
LT
77#include <asm/io.h>
78#include <asm/irq.h>
79#include <asm/processor.h>
80#include <scsi/scsi.h>
81#include <scsi/scsi_host.h>
82#include <scsi/scsi_tcq.h>
83#include <scsi/scsi_eh.h>
84#include <scsi/scsi_cmnd.h>
1da177e4
LT
85#include "ipr.h"
86
87/*
88 * Global Data
89 */
b7d68ca3 90static LIST_HEAD(ipr_ioa_head);
1da177e4
LT
91static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
92static unsigned int ipr_max_speed = 1;
93static int ipr_testmode = 0;
94static unsigned int ipr_fastfail = 0;
5469cb5b 95static unsigned int ipr_transop_timeout = 0;
d3c74871 96static unsigned int ipr_debug = 0;
3e7ebdfa 97static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
ac09c349 98static unsigned int ipr_dual_ioa_raid = 1;
1da177e4
LT
99static DEFINE_SPINLOCK(ipr_driver_lock);
100
101/* This table describes the differences between DMA controller chips */
102static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
60e7486b 103 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
1da177e4
LT
104 .mailbox = 0x0042C,
105 .cache_line_size = 0x20,
106 {
107 .set_interrupt_mask_reg = 0x0022C,
108 .clr_interrupt_mask_reg = 0x00230,
214777ba 109 .clr_interrupt_mask_reg32 = 0x00230,
1da177e4 110 .sense_interrupt_mask_reg = 0x0022C,
214777ba 111 .sense_interrupt_mask_reg32 = 0x0022C,
1da177e4 112 .clr_interrupt_reg = 0x00228,
214777ba 113 .clr_interrupt_reg32 = 0x00228,
1da177e4 114 .sense_interrupt_reg = 0x00224,
214777ba 115 .sense_interrupt_reg32 = 0x00224,
1da177e4
LT
116 .ioarrin_reg = 0x00404,
117 .sense_uproc_interrupt_reg = 0x00214,
214777ba 118 .sense_uproc_interrupt_reg32 = 0x00214,
1da177e4 119 .set_uproc_interrupt_reg = 0x00214,
214777ba
WB
120 .set_uproc_interrupt_reg32 = 0x00214,
121 .clr_uproc_interrupt_reg = 0x00218,
122 .clr_uproc_interrupt_reg32 = 0x00218
1da177e4
LT
123 }
124 },
125 { /* Snipe and Scamp */
126 .mailbox = 0x0052C,
127 .cache_line_size = 0x20,
128 {
129 .set_interrupt_mask_reg = 0x00288,
130 .clr_interrupt_mask_reg = 0x0028C,
214777ba 131 .clr_interrupt_mask_reg32 = 0x0028C,
1da177e4 132 .sense_interrupt_mask_reg = 0x00288,
214777ba 133 .sense_interrupt_mask_reg32 = 0x00288,
1da177e4 134 .clr_interrupt_reg = 0x00284,
214777ba 135 .clr_interrupt_reg32 = 0x00284,
1da177e4 136 .sense_interrupt_reg = 0x00280,
214777ba 137 .sense_interrupt_reg32 = 0x00280,
1da177e4
LT
138 .ioarrin_reg = 0x00504,
139 .sense_uproc_interrupt_reg = 0x00290,
214777ba 140 .sense_uproc_interrupt_reg32 = 0x00290,
1da177e4 141 .set_uproc_interrupt_reg = 0x00290,
214777ba
WB
142 .set_uproc_interrupt_reg32 = 0x00290,
143 .clr_uproc_interrupt_reg = 0x00294,
144 .clr_uproc_interrupt_reg32 = 0x00294
1da177e4
LT
145 }
146 },
a74c1639
WB
147 { /* CRoC */
148 .mailbox = 0x00040,
149 .cache_line_size = 0x20,
150 {
151 .set_interrupt_mask_reg = 0x00010,
152 .clr_interrupt_mask_reg = 0x00018,
214777ba 153 .clr_interrupt_mask_reg32 = 0x0001C,
a74c1639 154 .sense_interrupt_mask_reg = 0x00010,
214777ba 155 .sense_interrupt_mask_reg32 = 0x00014,
a74c1639 156 .clr_interrupt_reg = 0x00008,
214777ba 157 .clr_interrupt_reg32 = 0x0000C,
a74c1639 158 .sense_interrupt_reg = 0x00000,
214777ba 159 .sense_interrupt_reg32 = 0x00004,
a74c1639
WB
160 .ioarrin_reg = 0x00070,
161 .sense_uproc_interrupt_reg = 0x00020,
214777ba 162 .sense_uproc_interrupt_reg32 = 0x00024,
a74c1639 163 .set_uproc_interrupt_reg = 0x00020,
214777ba 164 .set_uproc_interrupt_reg32 = 0x00024,
dcbad00e 165 .clr_uproc_interrupt_reg = 0x00028,
214777ba
WB
166 .clr_uproc_interrupt_reg32 = 0x0002C,
167 .init_feedback_reg = 0x0005C,
dcbad00e
WB
168 .dump_addr_reg = 0x00064,
169 .dump_data_reg = 0x00068
a74c1639
WB
170 }
171 },
1da177e4
LT
172};
173
174static const struct ipr_chip_t ipr_chip[] = {
a32c055f
WB
175 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
176 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
177 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
178 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
179 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, &ipr_chip_cfg[0] },
180 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[1] },
181 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[1] }
1da177e4
LT
182};
183
184static int ipr_max_bus_speeds [] = {
185 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
186};
187
188MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
189MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
190module_param_named(max_speed, ipr_max_speed, uint, 0);
191MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
192module_param_named(log_level, ipr_log_level, uint, 0);
193MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
194module_param_named(testmode, ipr_testmode, int, 0);
195MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
2cf22be0 196module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
1da177e4
LT
197MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
198module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
199MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
2cf22be0 200module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
d3c74871 201MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
ac09c349
BK
202module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
203MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
3e7ebdfa
WB
204module_param_named(max_devs, ipr_max_devs, int, 0);
205MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
206 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
1da177e4
LT
207MODULE_LICENSE("GPL");
208MODULE_VERSION(IPR_DRIVER_VERSION);
209
1da177e4
LT
210/* A constant array of IOASCs/URCs/Error Messages */
211static const
212struct ipr_error_table_t ipr_error_table[] = {
933916f3 213 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
214 "8155: An unknown error was received"},
215 {0x00330000, 0, 0,
216 "Soft underlength error"},
217 {0x005A0000, 0, 0,
218 "Command to be cancelled not found"},
219 {0x00808000, 0, 0,
220 "Qualified success"},
933916f3 221 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 222 "FFFE: Soft device bus error recovered by the IOA"},
933916f3 223 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 224 "4101: Soft device bus fabric error"},
933916f3 225 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 226 "FFF9: Device sector reassign successful"},
933916f3 227 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 228 "FFF7: Media error recovered by device rewrite procedures"},
933916f3 229 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 230 "7001: IOA sector reassignment successful"},
933916f3 231 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 232 "FFF9: Soft media error. Sector reassignment recommended"},
933916f3 233 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 234 "FFF7: Media error recovered by IOA rewrite procedures"},
933916f3 235 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 236 "FF3D: Soft PCI bus error recovered by the IOA"},
933916f3 237 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 238 "FFF6: Device hardware error recovered by the IOA"},
933916f3 239 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 240 "FFF6: Device hardware error recovered by the device"},
933916f3 241 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 242 "FF3D: Soft IOA error recovered by the IOA"},
933916f3 243 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 244 "FFFA: Undefined device response recovered by the IOA"},
933916f3 245 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 246 "FFF6: Device bus error, message or command phase"},
933916f3 247 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
35a39691 248 "FFFE: Task Management Function failed"},
933916f3 249 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 250 "FFF6: Failure prediction threshold exceeded"},
933916f3 251 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
252 "8009: Impending cache battery pack failure"},
253 {0x02040400, 0, 0,
254 "34FF: Disk device format in progress"},
65f56475
BK
255 {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
256 "9070: IOA requested reset"},
1da177e4
LT
257 {0x023F0000, 0, 0,
258 "Synchronization required"},
259 {0x024E0000, 0, 0,
260 "No ready, IOA shutdown"},
261 {0x025A0000, 0, 0,
262 "Not ready, IOA has been shutdown"},
933916f3 263 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
264 "3020: Storage subsystem configuration error"},
265 {0x03110B00, 0, 0,
266 "FFF5: Medium error, data unreadable, recommend reassign"},
267 {0x03110C00, 0, 0,
268 "7000: Medium error, data unreadable, do not reassign"},
933916f3 269 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 270 "FFF3: Disk media format bad"},
933916f3 271 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 272 "3002: Addressed device failed to respond to selection"},
933916f3 273 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 274 "3100: Device bus error"},
933916f3 275 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
276 "3109: IOA timed out a device command"},
277 {0x04088000, 0, 0,
278 "3120: SCSI bus is not operational"},
933916f3 279 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 280 "4100: Hard device bus fabric error"},
933916f3 281 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 282 "9000: IOA reserved area data check"},
933916f3 283 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 284 "9001: IOA reserved area invalid data pattern"},
933916f3 285 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 286 "9002: IOA reserved area LRC error"},
933916f3 287 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 288 "102E: Out of alternate sectors for disk storage"},
933916f3 289 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 290 "FFF4: Data transfer underlength error"},
933916f3 291 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 292 "FFF4: Data transfer overlength error"},
933916f3 293 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 294 "3400: Logical unit failure"},
933916f3 295 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 296 "FFF4: Device microcode is corrupt"},
933916f3 297 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
298 "8150: PCI bus error"},
299 {0x04430000, 1, 0,
300 "Unsupported device bus message received"},
933916f3 301 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 302 "FFF4: Disk device problem"},
933916f3 303 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 304 "8150: Permanent IOA failure"},
933916f3 305 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 306 "3010: Disk device returned wrong response to IOA"},
933916f3 307 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
308 "8151: IOA microcode error"},
309 {0x04448500, 0, 0,
310 "Device bus status error"},
933916f3 311 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 312 "8157: IOA error requiring IOA reset to recover"},
35a39691
BK
313 {0x04448700, 0, 0,
314 "ATA device status error"},
1da177e4
LT
315 {0x04490000, 0, 0,
316 "Message reject received from the device"},
933916f3 317 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 318 "8008: A permanent cache battery pack failure occurred"},
933916f3 319 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 320 "9090: Disk unit has been modified after the last known status"},
933916f3 321 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 322 "9081: IOA detected device error"},
933916f3 323 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 324 "9082: IOA detected device error"},
933916f3 325 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 326 "3110: Device bus error, message or command phase"},
933916f3 327 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
35a39691 328 "3110: SAS Command / Task Management Function failed"},
933916f3 329 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 330 "9091: Incorrect hardware configuration change has been detected"},
933916f3 331 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 332 "9073: Invalid multi-adapter configuration"},
933916f3 333 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 334 "4010: Incorrect connection between cascaded expanders"},
933916f3 335 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 336 "4020: Connections exceed IOA design limits"},
933916f3 337 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 338 "4030: Incorrect multipath connection"},
933916f3 339 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 340 "4110: Unsupported enclosure function"},
933916f3 341 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
342 "FFF4: Command to logical unit failed"},
343 {0x05240000, 1, 0,
344 "Illegal request, invalid request type or request packet"},
345 {0x05250000, 0, 0,
346 "Illegal request, invalid resource handle"},
b0df54bb 347 {0x05258000, 0, 0,
348 "Illegal request, commands not allowed to this device"},
349 {0x05258100, 0, 0,
350 "Illegal request, command not allowed to a secondary adapter"},
1da177e4
LT
351 {0x05260000, 0, 0,
352 "Illegal request, invalid field in parameter list"},
353 {0x05260100, 0, 0,
354 "Illegal request, parameter not supported"},
355 {0x05260200, 0, 0,
356 "Illegal request, parameter value invalid"},
357 {0x052C0000, 0, 0,
358 "Illegal request, command sequence error"},
b0df54bb 359 {0x052C8000, 1, 0,
360 "Illegal request, dual adapter support not enabled"},
933916f3 361 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 362 "9031: Array protection temporarily suspended, protection resuming"},
933916f3 363 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 364 "9040: Array protection temporarily suspended, protection resuming"},
933916f3 365 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 366 "3140: Device bus not ready to ready transition"},
933916f3 367 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
368 "FFFB: SCSI bus was reset"},
369 {0x06290500, 0, 0,
370 "FFFE: SCSI bus transition to single ended"},
371 {0x06290600, 0, 0,
372 "FFFE: SCSI bus transition to LVD"},
933916f3 373 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 374 "FFFB: SCSI bus was reset by another initiator"},
933916f3 375 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 376 "3029: A device replacement has occurred"},
933916f3 377 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 378 "9051: IOA cache data exists for a missing or failed device"},
933916f3 379 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 380 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
933916f3 381 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 382 "9025: Disk unit is not supported at its physical location"},
933916f3 383 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 384 "3020: IOA detected a SCSI bus configuration error"},
933916f3 385 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 386 "3150: SCSI bus configuration error"},
933916f3 387 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 388 "9074: Asymmetric advanced function disk configuration"},
933916f3 389 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 390 "4040: Incomplete multipath connection between IOA and enclosure"},
933916f3 391 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 392 "4041: Incomplete multipath connection between enclosure and device"},
933916f3 393 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 394 "9075: Incomplete multipath connection between IOA and remote IOA"},
933916f3 395 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 396 "9076: Configuration error, missing remote IOA"},
933916f3 397 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 398 "4050: Enclosure does not support a required multipath function"},
b75424fc
WB
399 {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
400 "4070: Logically bad block written on device"},
933916f3 401 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 402 "9041: Array protection temporarily suspended"},
933916f3 403 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 404 "9042: Corrupt array parity detected on specified device"},
933916f3 405 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 406 "9030: Array no longer protected due to missing or failed disk unit"},
933916f3 407 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 408 "9071: Link operational transition"},
933916f3 409 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 410 "9072: Link not operational transition"},
933916f3 411 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 412 "9032: Array exposed but still protected"},
e435340c
BK
413 {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
414 "70DD: Device forced failed by disrupt device command"},
933916f3 415 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 416 "4061: Multipath redundancy level got better"},
933916f3 417 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 418 "4060: Multipath redundancy level got worse"},
1da177e4
LT
419 {0x07270000, 0, 0,
420 "Failure due to other device"},
933916f3 421 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 422 "9008: IOA does not support functions expected by devices"},
933916f3 423 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 424 "9010: Cache data associated with attached devices cannot be found"},
933916f3 425 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 426 "9011: Cache data belongs to devices other than those attached"},
933916f3 427 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 428 "9020: Array missing 2 or more devices with only 1 device present"},
933916f3 429 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 430 "9021: Array missing 2 or more devices with 2 or more devices present"},
933916f3 431 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 432 "9022: Exposed array is missing a required device"},
933916f3 433 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 434 "9023: Array member(s) not at required physical locations"},
933916f3 435 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 436 "9024: Array not functional due to present hardware configuration"},
933916f3 437 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 438 "9026: Array not functional due to present hardware configuration"},
933916f3 439 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 440 "9027: Array is missing a device and parity is out of sync"},
933916f3 441 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 442 "9028: Maximum number of arrays already exist"},
933916f3 443 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 444 "9050: Required cache data cannot be located for a disk unit"},
933916f3 445 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 446 "9052: Cache data exists for a device that has been modified"},
933916f3 447 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 448 "9054: IOA resources not available due to previous problems"},
933916f3 449 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 450 "9092: Disk unit requires initialization before use"},
933916f3 451 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 452 "9029: Incorrect hardware configuration change has been detected"},
933916f3 453 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 454 "9060: One or more disk pairs are missing from an array"},
933916f3 455 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 456 "9061: One or more disks are missing from an array"},
933916f3 457 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 458 "9062: One or more disks are missing from an array"},
933916f3 459 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
460 "9063: Maximum number of functional arrays has been exceeded"},
461 {0x0B260000, 0, 0,
462 "Aborted command, invalid descriptor"},
463 {0x0B5A0000, 0, 0,
464 "Command terminated by host"}
465};
466
467static const struct ipr_ses_table_entry ipr_ses_table[] = {
468 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
469 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
470 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
471 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
472 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
473 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
474 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
475 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
476 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
477 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
478 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
479 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
480 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
481};
482
483/*
484 * Function Prototypes
485 */
486static int ipr_reset_alert(struct ipr_cmnd *);
487static void ipr_process_ccn(struct ipr_cmnd *);
488static void ipr_process_error(struct ipr_cmnd *);
489static void ipr_reset_ioa_job(struct ipr_cmnd *);
490static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
491 enum ipr_shutdown_type);
492
493#ifdef CONFIG_SCSI_IPR_TRACE
494/**
495 * ipr_trc_hook - Add a trace entry to the driver trace
496 * @ipr_cmd: ipr command struct
497 * @type: trace type
498 * @add_data: additional data
499 *
500 * Return value:
501 * none
502 **/
503static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
504 u8 type, u32 add_data)
505{
506 struct ipr_trace_entry *trace_entry;
507 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
508
509 trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
510 trace_entry->time = jiffies;
511 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
512 trace_entry->type = type;
a32c055f
WB
513 if (ipr_cmd->ioa_cfg->sis64)
514 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
515 else
516 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
35a39691 517 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
1da177e4
LT
518 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
519 trace_entry->u.add_data = add_data;
520}
521#else
522#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
523#endif
524
525/**
526 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
527 * @ipr_cmd: ipr command struct
528 *
529 * Return value:
530 * none
531 **/
532static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
533{
534 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
535 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
a32c055f 536 dma_addr_t dma_addr = ipr_cmd->dma_addr;
1da177e4
LT
537
538 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
a32c055f 539 ioarcb->data_transfer_length = 0;
1da177e4 540 ioarcb->read_data_transfer_length = 0;
a32c055f 541 ioarcb->ioadl_len = 0;
1da177e4 542 ioarcb->read_ioadl_len = 0;
a32c055f
WB
543
544 if (ipr_cmd->ioa_cfg->sis64)
545 ioarcb->u.sis64_addr_data.data_ioadl_addr =
546 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
547 else {
548 ioarcb->write_ioadl_addr =
549 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
550 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
551 }
552
1da177e4
LT
553 ioasa->ioasc = 0;
554 ioasa->residual_data_len = 0;
35a39691 555 ioasa->u.gata.status = 0;
1da177e4
LT
556
557 ipr_cmd->scsi_cmd = NULL;
35a39691 558 ipr_cmd->qc = NULL;
1da177e4
LT
559 ipr_cmd->sense_buffer[0] = 0;
560 ipr_cmd->dma_use_sg = 0;
561}
562
563/**
564 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
565 * @ipr_cmd: ipr command struct
566 *
567 * Return value:
568 * none
569 **/
570static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
571{
572 ipr_reinit_ipr_cmnd(ipr_cmd);
573 ipr_cmd->u.scratch = 0;
574 ipr_cmd->sibling = NULL;
575 init_timer(&ipr_cmd->timer);
576}
577
578/**
579 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
580 * @ioa_cfg: ioa config struct
581 *
582 * Return value:
583 * pointer to ipr command struct
584 **/
585static
586struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
587{
588 struct ipr_cmnd *ipr_cmd;
589
590 ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
591 list_del(&ipr_cmd->queue);
592 ipr_init_ipr_cmnd(ipr_cmd);
593
594 return ipr_cmd;
595}
596
1da177e4
LT
597/**
598 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
599 * @ioa_cfg: ioa config struct
600 * @clr_ints: interrupts to clear
601 *
602 * This function masks all interrupts on the adapter, then clears the
603 * interrupts specified in the mask
604 *
605 * Return value:
606 * none
607 **/
608static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
609 u32 clr_ints)
610{
611 volatile u32 int_reg;
612
613 /* Stop new interrupts */
614 ioa_cfg->allow_interrupts = 0;
615
616 /* Set interrupt mask to stop all new interrupts */
214777ba
WB
617 if (ioa_cfg->sis64)
618 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
619 else
620 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
1da177e4
LT
621
622 /* Clear any pending interrupts */
214777ba
WB
623 if (ioa_cfg->sis64)
624 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
625 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
1da177e4
LT
626 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
627}
628
629/**
630 * ipr_save_pcix_cmd_reg - Save PCI-X command register
631 * @ioa_cfg: ioa config struct
632 *
633 * Return value:
634 * 0 on success / -EIO on failure
635 **/
636static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
637{
638 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
639
7dce0e1c
BK
640 if (pcix_cmd_reg == 0)
641 return 0;
1da177e4
LT
642
643 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
644 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
645 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
646 return -EIO;
647 }
648
649 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
650 return 0;
651}
652
653/**
654 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
655 * @ioa_cfg: ioa config struct
656 *
657 * Return value:
658 * 0 on success / -EIO on failure
659 **/
660static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
661{
662 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
663
664 if (pcix_cmd_reg) {
665 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
666 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
667 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
668 return -EIO;
669 }
1da177e4
LT
670 }
671
672 return 0;
673}
674
35a39691
BK
675/**
676 * ipr_sata_eh_done - done function for aborted SATA commands
677 * @ipr_cmd: ipr command struct
678 *
679 * This function is invoked for ops generated to SATA
680 * devices which are being aborted.
681 *
682 * Return value:
683 * none
684 **/
685static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
686{
687 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
688 struct ata_queued_cmd *qc = ipr_cmd->qc;
689 struct ipr_sata_port *sata_port = qc->ap->private_data;
690
691 qc->err_mask |= AC_ERR_OTHER;
692 sata_port->ioasa.status |= ATA_BUSY;
693 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
694 ata_qc_complete(qc);
695}
696
1da177e4
LT
697/**
698 * ipr_scsi_eh_done - mid-layer done function for aborted ops
699 * @ipr_cmd: ipr command struct
700 *
701 * This function is invoked by the interrupt handler for
702 * ops generated by the SCSI mid-layer which are being aborted.
703 *
704 * Return value:
705 * none
706 **/
707static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
708{
709 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
710 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
711
712 scsi_cmd->result |= (DID_ERROR << 16);
713
63015bc9 714 scsi_dma_unmap(ipr_cmd->scsi_cmd);
1da177e4
LT
715 scsi_cmd->scsi_done(scsi_cmd);
716 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
717}
718
719/**
720 * ipr_fail_all_ops - Fails all outstanding ops.
721 * @ioa_cfg: ioa config struct
722 *
723 * This function fails all outstanding ops.
724 *
725 * Return value:
726 * none
727 **/
728static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
729{
730 struct ipr_cmnd *ipr_cmd, *temp;
731
732 ENTER;
733 list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
734 list_del(&ipr_cmd->queue);
735
736 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
737 ipr_cmd->ioasa.ilid = cpu_to_be32(IPR_DRIVER_ILID);
738
739 if (ipr_cmd->scsi_cmd)
740 ipr_cmd->done = ipr_scsi_eh_done;
35a39691
BK
741 else if (ipr_cmd->qc)
742 ipr_cmd->done = ipr_sata_eh_done;
1da177e4
LT
743
744 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
745 del_timer(&ipr_cmd->timer);
746 ipr_cmd->done(ipr_cmd);
747 }
748
749 LEAVE;
750}
751
a32c055f
WB
752/**
753 * ipr_send_command - Send driver initiated requests.
754 * @ipr_cmd: ipr command struct
755 *
756 * This function sends a command to the adapter using the correct write call.
757 * In the case of sis64, calculate the ioarcb size required. Then or in the
758 * appropriate bits.
759 *
760 * Return value:
761 * none
762 **/
763static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
764{
765 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
766 dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
767
768 if (ioa_cfg->sis64) {
769 /* The default size is 256 bytes */
770 send_dma_addr |= 0x1;
771
772 /* If the number of ioadls * size of ioadl > 128 bytes,
773 then use a 512 byte ioarcb */
774 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
775 send_dma_addr |= 0x4;
776 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
777 } else
778 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
779}
780
1da177e4
LT
781/**
782 * ipr_do_req - Send driver initiated requests.
783 * @ipr_cmd: ipr command struct
784 * @done: done function
785 * @timeout_func: timeout function
786 * @timeout: timeout value
787 *
788 * This function sends the specified command to the adapter with the
789 * timeout given. The done function is invoked on command completion.
790 *
791 * Return value:
792 * none
793 **/
794static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
795 void (*done) (struct ipr_cmnd *),
796 void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
797{
798 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
799
800 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
801
802 ipr_cmd->done = done;
803
804 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
805 ipr_cmd->timer.expires = jiffies + timeout;
806 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
807
808 add_timer(&ipr_cmd->timer);
809
810 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
811
812 mb();
a32c055f
WB
813
814 ipr_send_command(ipr_cmd);
1da177e4
LT
815}
816
817/**
818 * ipr_internal_cmd_done - Op done function for an internally generated op.
819 * @ipr_cmd: ipr command struct
820 *
821 * This function is the op done function for an internally generated,
822 * blocking op. It simply wakes the sleeping thread.
823 *
824 * Return value:
825 * none
826 **/
827static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
828{
829 if (ipr_cmd->sibling)
830 ipr_cmd->sibling = NULL;
831 else
832 complete(&ipr_cmd->completion);
833}
834
a32c055f
WB
835/**
836 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
837 * @ipr_cmd: ipr command struct
838 * @dma_addr: dma address
839 * @len: transfer length
840 * @flags: ioadl flag value
841 *
842 * This function initializes an ioadl in the case where there is only a single
843 * descriptor.
844 *
845 * Return value:
846 * nothing
847 **/
848static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
849 u32 len, int flags)
850{
851 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
852 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
853
854 ipr_cmd->dma_use_sg = 1;
855
856 if (ipr_cmd->ioa_cfg->sis64) {
857 ioadl64->flags = cpu_to_be32(flags);
858 ioadl64->data_len = cpu_to_be32(len);
859 ioadl64->address = cpu_to_be64(dma_addr);
860
861 ipr_cmd->ioarcb.ioadl_len =
862 cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
863 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
864 } else {
865 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
866 ioadl->address = cpu_to_be32(dma_addr);
867
868 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
869 ipr_cmd->ioarcb.read_ioadl_len =
870 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
871 ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
872 } else {
873 ipr_cmd->ioarcb.ioadl_len =
874 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
875 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
876 }
877 }
878}
879
1da177e4
LT
880/**
881 * ipr_send_blocking_cmd - Send command and sleep on its completion.
882 * @ipr_cmd: ipr command struct
883 * @timeout_func: function to invoke if command times out
884 * @timeout: timeout
885 *
886 * Return value:
887 * none
888 **/
889static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
890 void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
891 u32 timeout)
892{
893 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
894
895 init_completion(&ipr_cmd->completion);
896 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
897
898 spin_unlock_irq(ioa_cfg->host->host_lock);
899 wait_for_completion(&ipr_cmd->completion);
900 spin_lock_irq(ioa_cfg->host->host_lock);
901}
902
903/**
904 * ipr_send_hcam - Send an HCAM to the adapter.
905 * @ioa_cfg: ioa config struct
906 * @type: HCAM type
907 * @hostrcb: hostrcb struct
908 *
909 * This function will send a Host Controlled Async command to the adapter.
910 * If HCAMs are currently not allowed to be issued to the adapter, it will
911 * place the hostrcb on the free queue.
912 *
913 * Return value:
914 * none
915 **/
916static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
917 struct ipr_hostrcb *hostrcb)
918{
919 struct ipr_cmnd *ipr_cmd;
920 struct ipr_ioarcb *ioarcb;
921
922 if (ioa_cfg->allow_cmds) {
923 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
924 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
925 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
926
927 ipr_cmd->u.hostrcb = hostrcb;
928 ioarcb = &ipr_cmd->ioarcb;
929
930 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
931 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
932 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
933 ioarcb->cmd_pkt.cdb[1] = type;
934 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
935 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
936
a32c055f
WB
937 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
938 sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
939
940 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
941 ipr_cmd->done = ipr_process_ccn;
942 else
943 ipr_cmd->done = ipr_process_error;
944
945 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
946
947 mb();
a32c055f
WB
948
949 ipr_send_command(ipr_cmd);
1da177e4
LT
950 } else {
951 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
952 }
953}
954
3e7ebdfa
WB
955/**
956 * ipr_update_ata_class - Update the ata class in the resource entry
957 * @res: resource entry struct
958 * @proto: cfgte device bus protocol value
959 *
960 * Return value:
961 * none
962 **/
963static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
964{
965 switch(proto) {
966 case IPR_PROTO_SATA:
967 case IPR_PROTO_SAS_STP:
968 res->ata_class = ATA_DEV_ATA;
969 break;
970 case IPR_PROTO_SATA_ATAPI:
971 case IPR_PROTO_SAS_STP_ATAPI:
972 res->ata_class = ATA_DEV_ATAPI;
973 break;
974 default:
975 res->ata_class = ATA_DEV_UNKNOWN;
976 break;
977 };
978}
979
1da177e4
LT
980/**
981 * ipr_init_res_entry - Initialize a resource entry struct.
982 * @res: resource entry struct
3e7ebdfa 983 * @cfgtew: config table entry wrapper struct
1da177e4
LT
984 *
985 * Return value:
986 * none
987 **/
3e7ebdfa
WB
988static void ipr_init_res_entry(struct ipr_resource_entry *res,
989 struct ipr_config_table_entry_wrapper *cfgtew)
1da177e4 990{
3e7ebdfa
WB
991 int found = 0;
992 unsigned int proto;
993 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
994 struct ipr_resource_entry *gscsi_res = NULL;
995
ee0a90fa 996 res->needs_sync_complete = 0;
1da177e4
LT
997 res->in_erp = 0;
998 res->add_to_ml = 0;
999 res->del_from_ml = 0;
1000 res->resetting_device = 0;
1001 res->sdev = NULL;
35a39691 1002 res->sata_port = NULL;
3e7ebdfa
WB
1003
1004 if (ioa_cfg->sis64) {
1005 proto = cfgtew->u.cfgte64->proto;
1006 res->res_flags = cfgtew->u.cfgte64->res_flags;
1007 res->qmodel = IPR_QUEUEING_MODEL64(res);
1008 res->type = cfgtew->u.cfgte64->res_type & 0x0f;
1009
1010 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1011 sizeof(res->res_path));
1012
1013 res->bus = 0;
1014 res->lun = scsilun_to_int(&res->dev_lun);
1015
1016 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1017 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1018 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1019 found = 1;
1020 res->target = gscsi_res->target;
1021 break;
1022 }
1023 }
1024 if (!found) {
1025 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1026 ioa_cfg->max_devs_supported);
1027 set_bit(res->target, ioa_cfg->target_ids);
1028 }
1029
1030 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1031 sizeof(res->dev_lun.scsi_lun));
1032 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1033 res->bus = IPR_IOAFP_VIRTUAL_BUS;
1034 res->target = 0;
1035 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1036 res->bus = IPR_ARRAY_VIRTUAL_BUS;
1037 res->target = find_first_zero_bit(ioa_cfg->array_ids,
1038 ioa_cfg->max_devs_supported);
1039 set_bit(res->target, ioa_cfg->array_ids);
1040 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1041 res->bus = IPR_VSET_VIRTUAL_BUS;
1042 res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1043 ioa_cfg->max_devs_supported);
1044 set_bit(res->target, ioa_cfg->vset_ids);
1045 } else {
1046 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1047 ioa_cfg->max_devs_supported);
1048 set_bit(res->target, ioa_cfg->target_ids);
1049 }
1050 } else {
1051 proto = cfgtew->u.cfgte->proto;
1052 res->qmodel = IPR_QUEUEING_MODEL(res);
1053 res->flags = cfgtew->u.cfgte->flags;
1054 if (res->flags & IPR_IS_IOA_RESOURCE)
1055 res->type = IPR_RES_TYPE_IOAFP;
1056 else
1057 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1058
1059 res->bus = cfgtew->u.cfgte->res_addr.bus;
1060 res->target = cfgtew->u.cfgte->res_addr.target;
1061 res->lun = cfgtew->u.cfgte->res_addr.lun;
1062 }
1063
1064 ipr_update_ata_class(res, proto);
1065}
1066
1067/**
1068 * ipr_is_same_device - Determine if two devices are the same.
1069 * @res: resource entry struct
1070 * @cfgtew: config table entry wrapper struct
1071 *
1072 * Return value:
1073 * 1 if the devices are the same / 0 otherwise
1074 **/
1075static int ipr_is_same_device(struct ipr_resource_entry *res,
1076 struct ipr_config_table_entry_wrapper *cfgtew)
1077{
1078 if (res->ioa_cfg->sis64) {
1079 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1080 sizeof(cfgtew->u.cfgte64->dev_id)) &&
1081 !memcmp(&res->lun, &cfgtew->u.cfgte64->lun,
1082 sizeof(cfgtew->u.cfgte64->lun))) {
1083 return 1;
1084 }
1085 } else {
1086 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1087 res->target == cfgtew->u.cfgte->res_addr.target &&
1088 res->lun == cfgtew->u.cfgte->res_addr.lun)
1089 return 1;
1090 }
1091
1092 return 0;
1093}
1094
1095/**
1096 * ipr_format_resource_path - Format the resource path for printing.
1097 * @res_path: resource path
1098 * @buf: buffer
1099 *
1100 * Return value:
1101 * pointer to buffer
1102 **/
1103static char *ipr_format_resource_path(u8 *res_path, char *buffer)
1104{
1105 int i;
1106
1107 sprintf(buffer, "%02X", res_path[0]);
1108 for (i=1; res_path[i] != 0xff; i++)
4565e370 1109 sprintf(buffer, "%s-%02X", buffer, res_path[i]);
3e7ebdfa
WB
1110
1111 return buffer;
1112}
1113
1114/**
1115 * ipr_update_res_entry - Update the resource entry.
1116 * @res: resource entry struct
1117 * @cfgtew: config table entry wrapper struct
1118 *
1119 * Return value:
1120 * none
1121 **/
1122static void ipr_update_res_entry(struct ipr_resource_entry *res,
1123 struct ipr_config_table_entry_wrapper *cfgtew)
1124{
1125 char buffer[IPR_MAX_RES_PATH_LENGTH];
1126 unsigned int proto;
1127 int new_path = 0;
1128
1129 if (res->ioa_cfg->sis64) {
1130 res->flags = cfgtew->u.cfgte64->flags;
1131 res->res_flags = cfgtew->u.cfgte64->res_flags;
1132 res->type = cfgtew->u.cfgte64->res_type & 0x0f;
1133
1134 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1135 sizeof(struct ipr_std_inq_data));
1136
1137 res->qmodel = IPR_QUEUEING_MODEL64(res);
1138 proto = cfgtew->u.cfgte64->proto;
1139 res->res_handle = cfgtew->u.cfgte64->res_handle;
1140 res->dev_id = cfgtew->u.cfgte64->dev_id;
1141
1142 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1143 sizeof(res->dev_lun.scsi_lun));
1144
1145 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1146 sizeof(res->res_path))) {
1147 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1148 sizeof(res->res_path));
1149 new_path = 1;
1150 }
1151
1152 if (res->sdev && new_path)
1153 sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1154 ipr_format_resource_path(&res->res_path[0], &buffer[0]));
1155 } else {
1156 res->flags = cfgtew->u.cfgte->flags;
1157 if (res->flags & IPR_IS_IOA_RESOURCE)
1158 res->type = IPR_RES_TYPE_IOAFP;
1159 else
1160 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1161
1162 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1163 sizeof(struct ipr_std_inq_data));
1164
1165 res->qmodel = IPR_QUEUEING_MODEL(res);
1166 proto = cfgtew->u.cfgte->proto;
1167 res->res_handle = cfgtew->u.cfgte->res_handle;
1168 }
1169
1170 ipr_update_ata_class(res, proto);
1171}
1172
1173/**
1174 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1175 * for the resource.
1176 * @res: resource entry struct
1177 * @cfgtew: config table entry wrapper struct
1178 *
1179 * Return value:
1180 * none
1181 **/
1182static void ipr_clear_res_target(struct ipr_resource_entry *res)
1183{
1184 struct ipr_resource_entry *gscsi_res = NULL;
1185 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1186
1187 if (!ioa_cfg->sis64)
1188 return;
1189
1190 if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1191 clear_bit(res->target, ioa_cfg->array_ids);
1192 else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1193 clear_bit(res->target, ioa_cfg->vset_ids);
1194 else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1195 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1196 if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1197 return;
1198 clear_bit(res->target, ioa_cfg->target_ids);
1199
1200 } else if (res->bus == 0)
1201 clear_bit(res->target, ioa_cfg->target_ids);
1da177e4
LT
1202}
1203
1204/**
1205 * ipr_handle_config_change - Handle a config change from the adapter
1206 * @ioa_cfg: ioa config struct
1207 * @hostrcb: hostrcb
1208 *
1209 * Return value:
1210 * none
1211 **/
1212static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
3e7ebdfa 1213 struct ipr_hostrcb *hostrcb)
1da177e4
LT
1214{
1215 struct ipr_resource_entry *res = NULL;
3e7ebdfa
WB
1216 struct ipr_config_table_entry_wrapper cfgtew;
1217 __be32 cc_res_handle;
1218
1da177e4
LT
1219 u32 is_ndn = 1;
1220
3e7ebdfa
WB
1221 if (ioa_cfg->sis64) {
1222 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1223 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1224 } else {
1225 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1226 cc_res_handle = cfgtew.u.cfgte->res_handle;
1227 }
1da177e4
LT
1228
1229 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3e7ebdfa 1230 if (res->res_handle == cc_res_handle) {
1da177e4
LT
1231 is_ndn = 0;
1232 break;
1233 }
1234 }
1235
1236 if (is_ndn) {
1237 if (list_empty(&ioa_cfg->free_res_q)) {
1238 ipr_send_hcam(ioa_cfg,
1239 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1240 hostrcb);
1241 return;
1242 }
1243
1244 res = list_entry(ioa_cfg->free_res_q.next,
1245 struct ipr_resource_entry, queue);
1246
1247 list_del(&res->queue);
3e7ebdfa 1248 ipr_init_res_entry(res, &cfgtew);
1da177e4
LT
1249 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1250 }
1251
3e7ebdfa 1252 ipr_update_res_entry(res, &cfgtew);
1da177e4
LT
1253
1254 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1255 if (res->sdev) {
1da177e4 1256 res->del_from_ml = 1;
3e7ebdfa 1257 res->res_handle = IPR_INVALID_RES_HANDLE;
1da177e4
LT
1258 if (ioa_cfg->allow_ml_add_del)
1259 schedule_work(&ioa_cfg->work_q);
3e7ebdfa
WB
1260 } else {
1261 ipr_clear_res_target(res);
1da177e4 1262 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3e7ebdfa 1263 }
1da177e4
LT
1264 } else if (!res->sdev) {
1265 res->add_to_ml = 1;
1266 if (ioa_cfg->allow_ml_add_del)
1267 schedule_work(&ioa_cfg->work_q);
1268 }
1269
1270 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1271}
1272
1273/**
1274 * ipr_process_ccn - Op done function for a CCN.
1275 * @ipr_cmd: ipr command struct
1276 *
1277 * This function is the op done function for a configuration
1278 * change notification host controlled async from the adapter.
1279 *
1280 * Return value:
1281 * none
1282 **/
1283static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1284{
1285 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1286 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1287 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
1288
1289 list_del(&hostrcb->queue);
1290 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1291
1292 if (ioasc) {
1293 if (ioasc != IPR_IOASC_IOA_WAS_RESET)
1294 dev_err(&ioa_cfg->pdev->dev,
1295 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1296
1297 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1298 } else {
1299 ipr_handle_config_change(ioa_cfg, hostrcb);
1300 }
1301}
1302
8cf093e2
BK
1303/**
1304 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1305 * @i: index into buffer
1306 * @buf: string to modify
1307 *
1308 * This function will strip all trailing whitespace, pad the end
1309 * of the string with a single space, and NULL terminate the string.
1310 *
1311 * Return value:
1312 * new length of string
1313 **/
1314static int strip_and_pad_whitespace(int i, char *buf)
1315{
1316 while (i && buf[i] == ' ')
1317 i--;
1318 buf[i+1] = ' ';
1319 buf[i+2] = '\0';
1320 return i + 2;
1321}
1322
1323/**
1324 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1325 * @prefix: string to print at start of printk
1326 * @hostrcb: hostrcb pointer
1327 * @vpd: vendor/product id/sn struct
1328 *
1329 * Return value:
1330 * none
1331 **/
1332static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1333 struct ipr_vpd *vpd)
1334{
1335 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1336 int i = 0;
1337
1338 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1339 i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1340
1341 memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1342 i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1343
1344 memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1345 buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1346
1347 ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1348}
1349
1da177e4
LT
1350/**
1351 * ipr_log_vpd - Log the passed VPD to the error log.
cfc32139 1352 * @vpd: vendor/product id/sn struct
1da177e4
LT
1353 *
1354 * Return value:
1355 * none
1356 **/
cfc32139 1357static void ipr_log_vpd(struct ipr_vpd *vpd)
1da177e4
LT
1358{
1359 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1360 + IPR_SERIAL_NUM_LEN];
1361
cfc32139 1362 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1363 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1da177e4
LT
1364 IPR_PROD_ID_LEN);
1365 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1366 ipr_err("Vendor/Product ID: %s\n", buffer);
1367
cfc32139 1368 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1da177e4
LT
1369 buffer[IPR_SERIAL_NUM_LEN] = '\0';
1370 ipr_err(" Serial Number: %s\n", buffer);
1371}
1372
8cf093e2
BK
1373/**
1374 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1375 * @prefix: string to print at start of printk
1376 * @hostrcb: hostrcb pointer
1377 * @vpd: vendor/product id/sn/wwn struct
1378 *
1379 * Return value:
1380 * none
1381 **/
1382static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1383 struct ipr_ext_vpd *vpd)
1384{
1385 ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1386 ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1387 be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1388}
1389
ee0f05b8 1390/**
1391 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1392 * @vpd: vendor/product id/sn/wwn struct
1393 *
1394 * Return value:
1395 * none
1396 **/
1397static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1398{
1399 ipr_log_vpd(&vpd->vpd);
1400 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1401 be32_to_cpu(vpd->wwid[1]));
1402}
1403
1404/**
1405 * ipr_log_enhanced_cache_error - Log a cache error.
1406 * @ioa_cfg: ioa config struct
1407 * @hostrcb: hostrcb struct
1408 *
1409 * Return value:
1410 * none
1411 **/
1412static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1413 struct ipr_hostrcb *hostrcb)
1414{
4565e370
WB
1415 struct ipr_hostrcb_type_12_error *error;
1416
1417 if (ioa_cfg->sis64)
1418 error = &hostrcb->hcam.u.error64.u.type_12_error;
1419 else
1420 error = &hostrcb->hcam.u.error.u.type_12_error;
ee0f05b8 1421
1422 ipr_err("-----Current Configuration-----\n");
1423 ipr_err("Cache Directory Card Information:\n");
1424 ipr_log_ext_vpd(&error->ioa_vpd);
1425 ipr_err("Adapter Card Information:\n");
1426 ipr_log_ext_vpd(&error->cfc_vpd);
1427
1428 ipr_err("-----Expected Configuration-----\n");
1429 ipr_err("Cache Directory Card Information:\n");
1430 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1431 ipr_err("Adapter Card Information:\n");
1432 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1433
1434 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1435 be32_to_cpu(error->ioa_data[0]),
1436 be32_to_cpu(error->ioa_data[1]),
1437 be32_to_cpu(error->ioa_data[2]));
1438}
1439
1da177e4
LT
1440/**
1441 * ipr_log_cache_error - Log a cache error.
1442 * @ioa_cfg: ioa config struct
1443 * @hostrcb: hostrcb struct
1444 *
1445 * Return value:
1446 * none
1447 **/
1448static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1449 struct ipr_hostrcb *hostrcb)
1450{
1451 struct ipr_hostrcb_type_02_error *error =
1452 &hostrcb->hcam.u.error.u.type_02_error;
1453
1454 ipr_err("-----Current Configuration-----\n");
1455 ipr_err("Cache Directory Card Information:\n");
cfc32139 1456 ipr_log_vpd(&error->ioa_vpd);
1da177e4 1457 ipr_err("Adapter Card Information:\n");
cfc32139 1458 ipr_log_vpd(&error->cfc_vpd);
1da177e4
LT
1459
1460 ipr_err("-----Expected Configuration-----\n");
1461 ipr_err("Cache Directory Card Information:\n");
cfc32139 1462 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1da177e4 1463 ipr_err("Adapter Card Information:\n");
cfc32139 1464 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1da177e4
LT
1465
1466 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1467 be32_to_cpu(error->ioa_data[0]),
1468 be32_to_cpu(error->ioa_data[1]),
1469 be32_to_cpu(error->ioa_data[2]));
1470}
1471
ee0f05b8 1472/**
1473 * ipr_log_enhanced_config_error - Log a configuration error.
1474 * @ioa_cfg: ioa config struct
1475 * @hostrcb: hostrcb struct
1476 *
1477 * Return value:
1478 * none
1479 **/
1480static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1481 struct ipr_hostrcb *hostrcb)
1482{
1483 int errors_logged, i;
1484 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1485 struct ipr_hostrcb_type_13_error *error;
1486
1487 error = &hostrcb->hcam.u.error.u.type_13_error;
1488 errors_logged = be32_to_cpu(error->errors_logged);
1489
1490 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1491 be32_to_cpu(error->errors_detected), errors_logged);
1492
1493 dev_entry = error->dev;
1494
1495 for (i = 0; i < errors_logged; i++, dev_entry++) {
1496 ipr_err_separator;
1497
1498 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1499 ipr_log_ext_vpd(&dev_entry->vpd);
1500
1501 ipr_err("-----New Device Information-----\n");
1502 ipr_log_ext_vpd(&dev_entry->new_vpd);
1503
1504 ipr_err("Cache Directory Card Information:\n");
1505 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1506
1507 ipr_err("Adapter Card Information:\n");
1508 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1509 }
1510}
1511
4565e370
WB
1512/**
1513 * ipr_log_sis64_config_error - Log a device error.
1514 * @ioa_cfg: ioa config struct
1515 * @hostrcb: hostrcb struct
1516 *
1517 * Return value:
1518 * none
1519 **/
1520static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1521 struct ipr_hostrcb *hostrcb)
1522{
1523 int errors_logged, i;
1524 struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1525 struct ipr_hostrcb_type_23_error *error;
1526 char buffer[IPR_MAX_RES_PATH_LENGTH];
1527
1528 error = &hostrcb->hcam.u.error64.u.type_23_error;
1529 errors_logged = be32_to_cpu(error->errors_logged);
1530
1531 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1532 be32_to_cpu(error->errors_detected), errors_logged);
1533
1534 dev_entry = error->dev;
1535
1536 for (i = 0; i < errors_logged; i++, dev_entry++) {
1537 ipr_err_separator;
1538
1539 ipr_err("Device %d : %s", i + 1,
1540 ipr_format_resource_path(&dev_entry->res_path[0], &buffer[0]));
1541 ipr_log_ext_vpd(&dev_entry->vpd);
1542
1543 ipr_err("-----New Device Information-----\n");
1544 ipr_log_ext_vpd(&dev_entry->new_vpd);
1545
1546 ipr_err("Cache Directory Card Information:\n");
1547 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1548
1549 ipr_err("Adapter Card Information:\n");
1550 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1551 }
1552}
1553
1da177e4
LT
1554/**
1555 * ipr_log_config_error - Log a configuration error.
1556 * @ioa_cfg: ioa config struct
1557 * @hostrcb: hostrcb struct
1558 *
1559 * Return value:
1560 * none
1561 **/
1562static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1563 struct ipr_hostrcb *hostrcb)
1564{
1565 int errors_logged, i;
1566 struct ipr_hostrcb_device_data_entry *dev_entry;
1567 struct ipr_hostrcb_type_03_error *error;
1568
1569 error = &hostrcb->hcam.u.error.u.type_03_error;
1570 errors_logged = be32_to_cpu(error->errors_logged);
1571
1572 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1573 be32_to_cpu(error->errors_detected), errors_logged);
1574
cfc32139 1575 dev_entry = error->dev;
1da177e4
LT
1576
1577 for (i = 0; i < errors_logged; i++, dev_entry++) {
1578 ipr_err_separator;
1579
fa15b1f6 1580 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
cfc32139 1581 ipr_log_vpd(&dev_entry->vpd);
1da177e4
LT
1582
1583 ipr_err("-----New Device Information-----\n");
cfc32139 1584 ipr_log_vpd(&dev_entry->new_vpd);
1da177e4
LT
1585
1586 ipr_err("Cache Directory Card Information:\n");
cfc32139 1587 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1da177e4
LT
1588
1589 ipr_err("Adapter Card Information:\n");
cfc32139 1590 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1da177e4
LT
1591
1592 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1593 be32_to_cpu(dev_entry->ioa_data[0]),
1594 be32_to_cpu(dev_entry->ioa_data[1]),
1595 be32_to_cpu(dev_entry->ioa_data[2]),
1596 be32_to_cpu(dev_entry->ioa_data[3]),
1597 be32_to_cpu(dev_entry->ioa_data[4]));
1598 }
1599}
1600
ee0f05b8 1601/**
1602 * ipr_log_enhanced_array_error - Log an array configuration error.
1603 * @ioa_cfg: ioa config struct
1604 * @hostrcb: hostrcb struct
1605 *
1606 * Return value:
1607 * none
1608 **/
1609static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1610 struct ipr_hostrcb *hostrcb)
1611{
1612 int i, num_entries;
1613 struct ipr_hostrcb_type_14_error *error;
1614 struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1615 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1616
1617 error = &hostrcb->hcam.u.error.u.type_14_error;
1618
1619 ipr_err_separator;
1620
1621 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1622 error->protection_level,
1623 ioa_cfg->host->host_no,
1624 error->last_func_vset_res_addr.bus,
1625 error->last_func_vset_res_addr.target,
1626 error->last_func_vset_res_addr.lun);
1627
1628 ipr_err_separator;
1629
1630 array_entry = error->array_member;
1631 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1632 sizeof(error->array_member));
1633
1634 for (i = 0; i < num_entries; i++, array_entry++) {
1635 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1636 continue;
1637
1638 if (be32_to_cpu(error->exposed_mode_adn) == i)
1639 ipr_err("Exposed Array Member %d:\n", i);
1640 else
1641 ipr_err("Array Member %d:\n", i);
1642
1643 ipr_log_ext_vpd(&array_entry->vpd);
1644 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1645 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1646 "Expected Location");
1647
1648 ipr_err_separator;
1649 }
1650}
1651
1da177e4
LT
1652/**
1653 * ipr_log_array_error - Log an array configuration error.
1654 * @ioa_cfg: ioa config struct
1655 * @hostrcb: hostrcb struct
1656 *
1657 * Return value:
1658 * none
1659 **/
1660static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1661 struct ipr_hostrcb *hostrcb)
1662{
1663 int i;
1664 struct ipr_hostrcb_type_04_error *error;
1665 struct ipr_hostrcb_array_data_entry *array_entry;
1666 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1667
1668 error = &hostrcb->hcam.u.error.u.type_04_error;
1669
1670 ipr_err_separator;
1671
1672 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1673 error->protection_level,
1674 ioa_cfg->host->host_no,
1675 error->last_func_vset_res_addr.bus,
1676 error->last_func_vset_res_addr.target,
1677 error->last_func_vset_res_addr.lun);
1678
1679 ipr_err_separator;
1680
1681 array_entry = error->array_member;
1682
1683 for (i = 0; i < 18; i++) {
cfc32139 1684 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1da177e4
LT
1685 continue;
1686
fa15b1f6 1687 if (be32_to_cpu(error->exposed_mode_adn) == i)
1da177e4 1688 ipr_err("Exposed Array Member %d:\n", i);
fa15b1f6 1689 else
1da177e4 1690 ipr_err("Array Member %d:\n", i);
1da177e4 1691
cfc32139 1692 ipr_log_vpd(&array_entry->vpd);
1da177e4 1693
fa15b1f6 1694 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1695 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1696 "Expected Location");
1da177e4
LT
1697
1698 ipr_err_separator;
1699
1700 if (i == 9)
1701 array_entry = error->array_member2;
1702 else
1703 array_entry++;
1704 }
1705}
1706
1707/**
b0df54bb 1708 * ipr_log_hex_data - Log additional hex IOA error data.
ac719aba 1709 * @ioa_cfg: ioa config struct
b0df54bb 1710 * @data: IOA error data
1711 * @len: data length
1da177e4
LT
1712 *
1713 * Return value:
1714 * none
1715 **/
ac719aba 1716static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
1da177e4
LT
1717{
1718 int i;
1da177e4 1719
b0df54bb 1720 if (len == 0)
1da177e4
LT
1721 return;
1722
ac719aba
BK
1723 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1724 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1725
b0df54bb 1726 for (i = 0; i < len / 4; i += 4) {
1da177e4 1727 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
b0df54bb 1728 be32_to_cpu(data[i]),
1729 be32_to_cpu(data[i+1]),
1730 be32_to_cpu(data[i+2]),
1731 be32_to_cpu(data[i+3]));
1da177e4
LT
1732 }
1733}
1734
ee0f05b8 1735/**
1736 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1737 * @ioa_cfg: ioa config struct
1738 * @hostrcb: hostrcb struct
1739 *
1740 * Return value:
1741 * none
1742 **/
1743static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1744 struct ipr_hostrcb *hostrcb)
1745{
1746 struct ipr_hostrcb_type_17_error *error;
1747
4565e370
WB
1748 if (ioa_cfg->sis64)
1749 error = &hostrcb->hcam.u.error64.u.type_17_error;
1750 else
1751 error = &hostrcb->hcam.u.error.u.type_17_error;
1752
ee0f05b8 1753 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
ca54cb8c 1754 strim(error->failure_reason);
ee0f05b8 1755
8cf093e2
BK
1756 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1757 be32_to_cpu(hostrcb->hcam.u.error.prc));
1758 ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
ac719aba 1759 ipr_log_hex_data(ioa_cfg, error->data,
ee0f05b8 1760 be32_to_cpu(hostrcb->hcam.length) -
1761 (offsetof(struct ipr_hostrcb_error, u) +
1762 offsetof(struct ipr_hostrcb_type_17_error, data)));
1763}
1764
b0df54bb 1765/**
1766 * ipr_log_dual_ioa_error - Log a dual adapter error.
1767 * @ioa_cfg: ioa config struct
1768 * @hostrcb: hostrcb struct
1769 *
1770 * Return value:
1771 * none
1772 **/
1773static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1774 struct ipr_hostrcb *hostrcb)
1775{
1776 struct ipr_hostrcb_type_07_error *error;
1777
1778 error = &hostrcb->hcam.u.error.u.type_07_error;
1779 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
ca54cb8c 1780 strim(error->failure_reason);
b0df54bb 1781
8cf093e2
BK
1782 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1783 be32_to_cpu(hostrcb->hcam.u.error.prc));
1784 ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
ac719aba 1785 ipr_log_hex_data(ioa_cfg, error->data,
b0df54bb 1786 be32_to_cpu(hostrcb->hcam.length) -
1787 (offsetof(struct ipr_hostrcb_error, u) +
1788 offsetof(struct ipr_hostrcb_type_07_error, data)));
1789}
1790
49dc6a18
BK
1791static const struct {
1792 u8 active;
1793 char *desc;
1794} path_active_desc[] = {
1795 { IPR_PATH_NO_INFO, "Path" },
1796 { IPR_PATH_ACTIVE, "Active path" },
1797 { IPR_PATH_NOT_ACTIVE, "Inactive path" }
1798};
1799
1800static const struct {
1801 u8 state;
1802 char *desc;
1803} path_state_desc[] = {
1804 { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1805 { IPR_PATH_HEALTHY, "is healthy" },
1806 { IPR_PATH_DEGRADED, "is degraded" },
1807 { IPR_PATH_FAILED, "is failed" }
1808};
1809
1810/**
1811 * ipr_log_fabric_path - Log a fabric path error
1812 * @hostrcb: hostrcb struct
1813 * @fabric: fabric descriptor
1814 *
1815 * Return value:
1816 * none
1817 **/
1818static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1819 struct ipr_hostrcb_fabric_desc *fabric)
1820{
1821 int i, j;
1822 u8 path_state = fabric->path_state;
1823 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1824 u8 state = path_state & IPR_PATH_STATE_MASK;
1825
1826 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1827 if (path_active_desc[i].active != active)
1828 continue;
1829
1830 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1831 if (path_state_desc[j].state != state)
1832 continue;
1833
1834 if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
1835 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
1836 path_active_desc[i].desc, path_state_desc[j].desc,
1837 fabric->ioa_port);
1838 } else if (fabric->cascaded_expander == 0xff) {
1839 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
1840 path_active_desc[i].desc, path_state_desc[j].desc,
1841 fabric->ioa_port, fabric->phy);
1842 } else if (fabric->phy == 0xff) {
1843 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
1844 path_active_desc[i].desc, path_state_desc[j].desc,
1845 fabric->ioa_port, fabric->cascaded_expander);
1846 } else {
1847 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
1848 path_active_desc[i].desc, path_state_desc[j].desc,
1849 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1850 }
1851 return;
1852 }
1853 }
1854
1855 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
1856 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1857}
1858
4565e370
WB
1859/**
1860 * ipr_log64_fabric_path - Log a fabric path error
1861 * @hostrcb: hostrcb struct
1862 * @fabric: fabric descriptor
1863 *
1864 * Return value:
1865 * none
1866 **/
1867static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
1868 struct ipr_hostrcb64_fabric_desc *fabric)
1869{
1870 int i, j;
1871 u8 path_state = fabric->path_state;
1872 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1873 u8 state = path_state & IPR_PATH_STATE_MASK;
1874 char buffer[IPR_MAX_RES_PATH_LENGTH];
1875
1876 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1877 if (path_active_desc[i].active != active)
1878 continue;
1879
1880 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1881 if (path_state_desc[j].state != state)
1882 continue;
1883
1884 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
1885 path_active_desc[i].desc, path_state_desc[j].desc,
1886 ipr_format_resource_path(&fabric->res_path[0], &buffer[0]));
1887 return;
1888 }
1889 }
1890
1891 ipr_err("Path state=%02X Resource Path=%s\n", path_state,
1892 ipr_format_resource_path(&fabric->res_path[0], &buffer[0]));
1893}
1894
49dc6a18
BK
1895static const struct {
1896 u8 type;
1897 char *desc;
1898} path_type_desc[] = {
1899 { IPR_PATH_CFG_IOA_PORT, "IOA port" },
1900 { IPR_PATH_CFG_EXP_PORT, "Expander port" },
1901 { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
1902 { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
1903};
1904
1905static const struct {
1906 u8 status;
1907 char *desc;
1908} path_status_desc[] = {
1909 { IPR_PATH_CFG_NO_PROB, "Functional" },
1910 { IPR_PATH_CFG_DEGRADED, "Degraded" },
1911 { IPR_PATH_CFG_FAILED, "Failed" },
1912 { IPR_PATH_CFG_SUSPECT, "Suspect" },
1913 { IPR_PATH_NOT_DETECTED, "Missing" },
1914 { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
1915};
1916
1917static const char *link_rate[] = {
1918 "unknown",
1919 "disabled",
1920 "phy reset problem",
1921 "spinup hold",
1922 "port selector",
1923 "unknown",
1924 "unknown",
1925 "unknown",
1926 "1.5Gbps",
1927 "3.0Gbps",
1928 "unknown",
1929 "unknown",
1930 "unknown",
1931 "unknown",
1932 "unknown",
1933 "unknown"
1934};
1935
1936/**
1937 * ipr_log_path_elem - Log a fabric path element.
1938 * @hostrcb: hostrcb struct
1939 * @cfg: fabric path element struct
1940 *
1941 * Return value:
1942 * none
1943 **/
1944static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
1945 struct ipr_hostrcb_config_element *cfg)
1946{
1947 int i, j;
1948 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
1949 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
1950
1951 if (type == IPR_PATH_CFG_NOT_EXIST)
1952 return;
1953
1954 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
1955 if (path_type_desc[i].type != type)
1956 continue;
1957
1958 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
1959 if (path_status_desc[j].status != status)
1960 continue;
1961
1962 if (type == IPR_PATH_CFG_IOA_PORT) {
1963 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
1964 path_status_desc[j].desc, path_type_desc[i].desc,
1965 cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1966 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1967 } else {
1968 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
1969 ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
1970 path_status_desc[j].desc, path_type_desc[i].desc,
1971 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1972 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1973 } else if (cfg->cascaded_expander == 0xff) {
1974 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
1975 "WWN=%08X%08X\n", path_status_desc[j].desc,
1976 path_type_desc[i].desc, cfg->phy,
1977 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1978 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1979 } else if (cfg->phy == 0xff) {
1980 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
1981 "WWN=%08X%08X\n", path_status_desc[j].desc,
1982 path_type_desc[i].desc, cfg->cascaded_expander,
1983 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1984 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1985 } else {
1986 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
1987 "WWN=%08X%08X\n", path_status_desc[j].desc,
1988 path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
1989 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1990 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1991 }
1992 }
1993 return;
1994 }
1995 }
1996
1997 ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
1998 "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
1999 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2000 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2001}
2002
4565e370
WB
2003/**
2004 * ipr_log64_path_elem - Log a fabric path element.
2005 * @hostrcb: hostrcb struct
2006 * @cfg: fabric path element struct
2007 *
2008 * Return value:
2009 * none
2010 **/
2011static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2012 struct ipr_hostrcb64_config_element *cfg)
2013{
2014 int i, j;
2015 u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2016 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2017 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2018 char buffer[IPR_MAX_RES_PATH_LENGTH];
2019
2020 if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2021 return;
2022
2023 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2024 if (path_type_desc[i].type != type)
2025 continue;
2026
2027 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2028 if (path_status_desc[j].status != status)
2029 continue;
2030
2031 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2032 path_status_desc[j].desc, path_type_desc[i].desc,
2033 ipr_format_resource_path(&cfg->res_path[0], &buffer[0]),
2034 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2035 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2036 return;
2037 }
2038 }
2039 ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2040 "WWN=%08X%08X\n", cfg->type_status,
2041 ipr_format_resource_path(&cfg->res_path[0], &buffer[0]),
2042 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2043 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2044}
2045
49dc6a18
BK
2046/**
2047 * ipr_log_fabric_error - Log a fabric error.
2048 * @ioa_cfg: ioa config struct
2049 * @hostrcb: hostrcb struct
2050 *
2051 * Return value:
2052 * none
2053 **/
2054static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2055 struct ipr_hostrcb *hostrcb)
2056{
2057 struct ipr_hostrcb_type_20_error *error;
2058 struct ipr_hostrcb_fabric_desc *fabric;
2059 struct ipr_hostrcb_config_element *cfg;
2060 int i, add_len;
2061
2062 error = &hostrcb->hcam.u.error.u.type_20_error;
2063 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2064 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2065
2066 add_len = be32_to_cpu(hostrcb->hcam.length) -
2067 (offsetof(struct ipr_hostrcb_error, u) +
2068 offsetof(struct ipr_hostrcb_type_20_error, desc));
2069
2070 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2071 ipr_log_fabric_path(hostrcb, fabric);
2072 for_each_fabric_cfg(fabric, cfg)
2073 ipr_log_path_elem(hostrcb, cfg);
2074
2075 add_len -= be16_to_cpu(fabric->length);
2076 fabric = (struct ipr_hostrcb_fabric_desc *)
2077 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2078 }
2079
ac719aba 2080 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
49dc6a18
BK
2081}
2082
4565e370
WB
2083/**
2084 * ipr_log_sis64_array_error - Log a sis64 array error.
2085 * @ioa_cfg: ioa config struct
2086 * @hostrcb: hostrcb struct
2087 *
2088 * Return value:
2089 * none
2090 **/
2091static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2092 struct ipr_hostrcb *hostrcb)
2093{
2094 int i, num_entries;
2095 struct ipr_hostrcb_type_24_error *error;
2096 struct ipr_hostrcb64_array_data_entry *array_entry;
2097 char buffer[IPR_MAX_RES_PATH_LENGTH];
2098 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2099
2100 error = &hostrcb->hcam.u.error64.u.type_24_error;
2101
2102 ipr_err_separator;
2103
2104 ipr_err("RAID %s Array Configuration: %s\n",
2105 error->protection_level,
2106 ipr_format_resource_path(&error->last_res_path[0], &buffer[0]));
2107
2108 ipr_err_separator;
2109
2110 array_entry = error->array_member;
2111 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
2112 sizeof(error->array_member));
2113
2114 for (i = 0; i < num_entries; i++, array_entry++) {
2115
2116 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2117 continue;
2118
2119 if (error->exposed_mode_adn == i)
2120 ipr_err("Exposed Array Member %d:\n", i);
2121 else
2122 ipr_err("Array Member %d:\n", i);
2123
2124 ipr_err("Array Member %d:\n", i);
2125 ipr_log_ext_vpd(&array_entry->vpd);
2126 ipr_err("Current Location: %s",
2127 ipr_format_resource_path(&array_entry->res_path[0], &buffer[0]));
2128 ipr_err("Expected Location: %s",
2129 ipr_format_resource_path(&array_entry->expected_res_path[0], &buffer[0]));
2130
2131 ipr_err_separator;
2132 }
2133}
2134
2135/**
2136 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2137 * @ioa_cfg: ioa config struct
2138 * @hostrcb: hostrcb struct
2139 *
2140 * Return value:
2141 * none
2142 **/
2143static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2144 struct ipr_hostrcb *hostrcb)
2145{
2146 struct ipr_hostrcb_type_30_error *error;
2147 struct ipr_hostrcb64_fabric_desc *fabric;
2148 struct ipr_hostrcb64_config_element *cfg;
2149 int i, add_len;
2150
2151 error = &hostrcb->hcam.u.error64.u.type_30_error;
2152
2153 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2154 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2155
2156 add_len = be32_to_cpu(hostrcb->hcam.length) -
2157 (offsetof(struct ipr_hostrcb64_error, u) +
2158 offsetof(struct ipr_hostrcb_type_30_error, desc));
2159
2160 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2161 ipr_log64_fabric_path(hostrcb, fabric);
2162 for_each_fabric_cfg(fabric, cfg)
2163 ipr_log64_path_elem(hostrcb, cfg);
2164
2165 add_len -= be16_to_cpu(fabric->length);
2166 fabric = (struct ipr_hostrcb64_fabric_desc *)
2167 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2168 }
2169
2170 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2171}
2172
b0df54bb 2173/**
2174 * ipr_log_generic_error - Log an adapter error.
2175 * @ioa_cfg: ioa config struct
2176 * @hostrcb: hostrcb struct
2177 *
2178 * Return value:
2179 * none
2180 **/
2181static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2182 struct ipr_hostrcb *hostrcb)
2183{
ac719aba 2184 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
b0df54bb 2185 be32_to_cpu(hostrcb->hcam.length));
2186}
2187
1da177e4
LT
2188/**
2189 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2190 * @ioasc: IOASC
2191 *
2192 * This function will return the index of into the ipr_error_table
2193 * for the specified IOASC. If the IOASC is not in the table,
2194 * 0 will be returned, which points to the entry used for unknown errors.
2195 *
2196 * Return value:
2197 * index into the ipr_error_table
2198 **/
2199static u32 ipr_get_error(u32 ioasc)
2200{
2201 int i;
2202
2203 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
35a39691 2204 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
1da177e4
LT
2205 return i;
2206
2207 return 0;
2208}
2209
2210/**
2211 * ipr_handle_log_data - Log an adapter error.
2212 * @ioa_cfg: ioa config struct
2213 * @hostrcb: hostrcb struct
2214 *
2215 * This function logs an adapter error to the system.
2216 *
2217 * Return value:
2218 * none
2219 **/
2220static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2221 struct ipr_hostrcb *hostrcb)
2222{
2223 u32 ioasc;
2224 int error_index;
2225
2226 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2227 return;
2228
2229 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2230 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2231
4565e370
WB
2232 if (ioa_cfg->sis64)
2233 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2234 else
2235 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
1da177e4 2236
4565e370
WB
2237 if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2238 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
1da177e4
LT
2239 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2240 scsi_report_bus_reset(ioa_cfg->host,
4565e370 2241 hostrcb->hcam.u.error.fd_res_addr.bus);
1da177e4
LT
2242 }
2243
2244 error_index = ipr_get_error(ioasc);
2245
2246 if (!ipr_error_table[error_index].log_hcam)
2247 return;
2248
49dc6a18 2249 ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
1da177e4
LT
2250
2251 /* Set indication we have logged an error */
2252 ioa_cfg->errors_logged++;
2253
933916f3 2254 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
1da177e4 2255 return;
cf852037 2256 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2257 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
1da177e4
LT
2258
2259 switch (hostrcb->hcam.overlay_id) {
1da177e4
LT
2260 case IPR_HOST_RCB_OVERLAY_ID_2:
2261 ipr_log_cache_error(ioa_cfg, hostrcb);
2262 break;
2263 case IPR_HOST_RCB_OVERLAY_ID_3:
2264 ipr_log_config_error(ioa_cfg, hostrcb);
2265 break;
2266 case IPR_HOST_RCB_OVERLAY_ID_4:
2267 case IPR_HOST_RCB_OVERLAY_ID_6:
2268 ipr_log_array_error(ioa_cfg, hostrcb);
2269 break;
b0df54bb 2270 case IPR_HOST_RCB_OVERLAY_ID_7:
2271 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2272 break;
ee0f05b8 2273 case IPR_HOST_RCB_OVERLAY_ID_12:
2274 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2275 break;
2276 case IPR_HOST_RCB_OVERLAY_ID_13:
2277 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2278 break;
2279 case IPR_HOST_RCB_OVERLAY_ID_14:
2280 case IPR_HOST_RCB_OVERLAY_ID_16:
2281 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2282 break;
2283 case IPR_HOST_RCB_OVERLAY_ID_17:
2284 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2285 break;
49dc6a18
BK
2286 case IPR_HOST_RCB_OVERLAY_ID_20:
2287 ipr_log_fabric_error(ioa_cfg, hostrcb);
2288 break;
4565e370
WB
2289 case IPR_HOST_RCB_OVERLAY_ID_23:
2290 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2291 break;
2292 case IPR_HOST_RCB_OVERLAY_ID_24:
2293 case IPR_HOST_RCB_OVERLAY_ID_26:
2294 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2295 break;
2296 case IPR_HOST_RCB_OVERLAY_ID_30:
2297 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2298 break;
cf852037 2299 case IPR_HOST_RCB_OVERLAY_ID_1:
1da177e4 2300 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
1da177e4 2301 default:
a9cfca96 2302 ipr_log_generic_error(ioa_cfg, hostrcb);
1da177e4
LT
2303 break;
2304 }
2305}
2306
2307/**
2308 * ipr_process_error - Op done function for an adapter error log.
2309 * @ipr_cmd: ipr command struct
2310 *
2311 * This function is the op done function for an error log host
2312 * controlled async from the adapter. It will log the error and
2313 * send the HCAM back to the adapter.
2314 *
2315 * Return value:
2316 * none
2317 **/
2318static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2319{
2320 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2321 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
2322 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4565e370
WB
2323 u32 fd_ioasc;
2324
2325 if (ioa_cfg->sis64)
2326 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2327 else
2328 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
1da177e4
LT
2329
2330 list_del(&hostrcb->queue);
2331 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
2332
2333 if (!ioasc) {
2334 ipr_handle_log_data(ioa_cfg, hostrcb);
65f56475
BK
2335 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2336 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
1da177e4
LT
2337 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
2338 dev_err(&ioa_cfg->pdev->dev,
2339 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2340 }
2341
2342 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2343}
2344
2345/**
2346 * ipr_timeout - An internally generated op has timed out.
2347 * @ipr_cmd: ipr command struct
2348 *
2349 * This function blocks host requests and initiates an
2350 * adapter reset.
2351 *
2352 * Return value:
2353 * none
2354 **/
2355static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
2356{
2357 unsigned long lock_flags = 0;
2358 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2359
2360 ENTER;
2361 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2362
2363 ioa_cfg->errors_logged++;
2364 dev_err(&ioa_cfg->pdev->dev,
2365 "Adapter being reset due to command timeout.\n");
2366
2367 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2368 ioa_cfg->sdt_state = GET_DUMP;
2369
2370 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2371 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2372
2373 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2374 LEAVE;
2375}
2376
2377/**
2378 * ipr_oper_timeout - Adapter timed out transitioning to operational
2379 * @ipr_cmd: ipr command struct
2380 *
2381 * This function blocks host requests and initiates an
2382 * adapter reset.
2383 *
2384 * Return value:
2385 * none
2386 **/
2387static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
2388{
2389 unsigned long lock_flags = 0;
2390 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2391
2392 ENTER;
2393 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2394
2395 ioa_cfg->errors_logged++;
2396 dev_err(&ioa_cfg->pdev->dev,
2397 "Adapter timed out transitioning to operational.\n");
2398
2399 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2400 ioa_cfg->sdt_state = GET_DUMP;
2401
2402 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2403 if (ipr_fastfail)
2404 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2405 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2406 }
2407
2408 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2409 LEAVE;
2410}
2411
2412/**
2413 * ipr_reset_reload - Reset/Reload the IOA
2414 * @ioa_cfg: ioa config struct
2415 * @shutdown_type: shutdown type
2416 *
2417 * This function resets the adapter and re-initializes it.
2418 * This function assumes that all new host commands have been stopped.
2419 * Return value:
2420 * SUCCESS / FAILED
2421 **/
2422static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
2423 enum ipr_shutdown_type shutdown_type)
2424{
2425 if (!ioa_cfg->in_reset_reload)
2426 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
2427
2428 spin_unlock_irq(ioa_cfg->host->host_lock);
2429 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2430 spin_lock_irq(ioa_cfg->host->host_lock);
2431
2432 /* If we got hit with a host reset while we were already resetting
2433 the adapter for some reason, and the reset failed. */
2434 if (ioa_cfg->ioa_is_dead) {
2435 ipr_trace;
2436 return FAILED;
2437 }
2438
2439 return SUCCESS;
2440}
2441
2442/**
2443 * ipr_find_ses_entry - Find matching SES in SES table
2444 * @res: resource entry struct of SES
2445 *
2446 * Return value:
2447 * pointer to SES table entry / NULL on failure
2448 **/
2449static const struct ipr_ses_table_entry *
2450ipr_find_ses_entry(struct ipr_resource_entry *res)
2451{
2452 int i, j, matches;
3e7ebdfa 2453 struct ipr_std_inq_vpids *vpids;
1da177e4
LT
2454 const struct ipr_ses_table_entry *ste = ipr_ses_table;
2455
2456 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2457 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2458 if (ste->compare_product_id_byte[j] == 'X') {
3e7ebdfa
WB
2459 vpids = &res->std_inq_data.vpids;
2460 if (vpids->product_id[j] == ste->product_id[j])
1da177e4
LT
2461 matches++;
2462 else
2463 break;
2464 } else
2465 matches++;
2466 }
2467
2468 if (matches == IPR_PROD_ID_LEN)
2469 return ste;
2470 }
2471
2472 return NULL;
2473}
2474
2475/**
2476 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2477 * @ioa_cfg: ioa config struct
2478 * @bus: SCSI bus
2479 * @bus_width: bus width
2480 *
2481 * Return value:
2482 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2483 * For a 2-byte wide SCSI bus, the maximum transfer speed is
2484 * twice the maximum transfer rate (e.g. for a wide enabled bus,
2485 * max 160MHz = max 320MB/sec).
2486 **/
2487static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2488{
2489 struct ipr_resource_entry *res;
2490 const struct ipr_ses_table_entry *ste;
2491 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2492
2493 /* Loop through each config table entry in the config table buffer */
2494 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3e7ebdfa 2495 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
1da177e4
LT
2496 continue;
2497
3e7ebdfa 2498 if (bus != res->bus)
1da177e4
LT
2499 continue;
2500
2501 if (!(ste = ipr_find_ses_entry(res)))
2502 continue;
2503
2504 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2505 }
2506
2507 return max_xfer_rate;
2508}
2509
2510/**
2511 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2512 * @ioa_cfg: ioa config struct
2513 * @max_delay: max delay in micro-seconds to wait
2514 *
2515 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2516 *
2517 * Return value:
2518 * 0 on success / other on failure
2519 **/
2520static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2521{
2522 volatile u32 pcii_reg;
2523 int delay = 1;
2524
2525 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2526 while (delay < max_delay) {
2527 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2528
2529 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2530 return 0;
2531
2532 /* udelay cannot be used if delay is more than a few milliseconds */
2533 if ((delay / 1000) > MAX_UDELAY_MS)
2534 mdelay(delay / 1000);
2535 else
2536 udelay(delay);
2537
2538 delay += delay;
2539 }
2540 return -EIO;
2541}
2542
dcbad00e
WB
2543/**
2544 * ipr_get_sis64_dump_data_section - Dump IOA memory
2545 * @ioa_cfg: ioa config struct
2546 * @start_addr: adapter address to dump
2547 * @dest: destination kernel buffer
2548 * @length_in_words: length to dump in 4 byte words
2549 *
2550 * Return value:
2551 * 0 on success
2552 **/
2553static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2554 u32 start_addr,
2555 __be32 *dest, u32 length_in_words)
2556{
2557 int i;
2558
2559 for (i = 0; i < length_in_words; i++) {
2560 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2561 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2562 dest++;
2563 }
2564
2565 return 0;
2566}
2567
1da177e4
LT
2568/**
2569 * ipr_get_ldump_data_section - Dump IOA memory
2570 * @ioa_cfg: ioa config struct
2571 * @start_addr: adapter address to dump
2572 * @dest: destination kernel buffer
2573 * @length_in_words: length to dump in 4 byte words
2574 *
2575 * Return value:
2576 * 0 on success / -EIO on failure
2577 **/
2578static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2579 u32 start_addr,
2580 __be32 *dest, u32 length_in_words)
2581{
2582 volatile u32 temp_pcii_reg;
2583 int i, delay = 0;
2584
dcbad00e
WB
2585 if (ioa_cfg->sis64)
2586 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2587 dest, length_in_words);
2588
1da177e4
LT
2589 /* Write IOA interrupt reg starting LDUMP state */
2590 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
214777ba 2591 ioa_cfg->regs.set_uproc_interrupt_reg32);
1da177e4
LT
2592
2593 /* Wait for IO debug acknowledge */
2594 if (ipr_wait_iodbg_ack(ioa_cfg,
2595 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2596 dev_err(&ioa_cfg->pdev->dev,
2597 "IOA dump long data transfer timeout\n");
2598 return -EIO;
2599 }
2600
2601 /* Signal LDUMP interlocked - clear IO debug ack */
2602 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2603 ioa_cfg->regs.clr_interrupt_reg);
2604
2605 /* Write Mailbox with starting address */
2606 writel(start_addr, ioa_cfg->ioa_mailbox);
2607
2608 /* Signal address valid - clear IOA Reset alert */
2609 writel(IPR_UPROCI_RESET_ALERT,
214777ba 2610 ioa_cfg->regs.clr_uproc_interrupt_reg32);
1da177e4
LT
2611
2612 for (i = 0; i < length_in_words; i++) {
2613 /* Wait for IO debug acknowledge */
2614 if (ipr_wait_iodbg_ack(ioa_cfg,
2615 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2616 dev_err(&ioa_cfg->pdev->dev,
2617 "IOA dump short data transfer timeout\n");
2618 return -EIO;
2619 }
2620
2621 /* Read data from mailbox and increment destination pointer */
2622 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2623 dest++;
2624
2625 /* For all but the last word of data, signal data received */
2626 if (i < (length_in_words - 1)) {
2627 /* Signal dump data received - Clear IO debug Ack */
2628 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2629 ioa_cfg->regs.clr_interrupt_reg);
2630 }
2631 }
2632
2633 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2634 writel(IPR_UPROCI_RESET_ALERT,
214777ba 2635 ioa_cfg->regs.set_uproc_interrupt_reg32);
1da177e4
LT
2636
2637 writel(IPR_UPROCI_IO_DEBUG_ALERT,
214777ba 2638 ioa_cfg->regs.clr_uproc_interrupt_reg32);
1da177e4
LT
2639
2640 /* Signal dump data received - Clear IO debug Ack */
2641 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2642 ioa_cfg->regs.clr_interrupt_reg);
2643
2644 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2645 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2646 temp_pcii_reg =
214777ba 2647 readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
1da177e4
LT
2648
2649 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2650 return 0;
2651
2652 udelay(10);
2653 delay += 10;
2654 }
2655
2656 return 0;
2657}
2658
2659#ifdef CONFIG_SCSI_IPR_DUMP
2660/**
2661 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2662 * @ioa_cfg: ioa config struct
2663 * @pci_address: adapter address
2664 * @length: length of data to copy
2665 *
2666 * Copy data from PCI adapter to kernel buffer.
2667 * Note: length MUST be a 4 byte multiple
2668 * Return value:
2669 * 0 on success / other on failure
2670 **/
2671static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2672 unsigned long pci_address, u32 length)
2673{
2674 int bytes_copied = 0;
2675 int cur_len, rc, rem_len, rem_page_len;
2676 __be32 *page;
2677 unsigned long lock_flags = 0;
2678 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2679
2680 while (bytes_copied < length &&
2681 (ioa_dump->hdr.len + bytes_copied) < IPR_MAX_IOA_DUMP_SIZE) {
2682 if (ioa_dump->page_offset >= PAGE_SIZE ||
2683 ioa_dump->page_offset == 0) {
2684 page = (__be32 *)__get_free_page(GFP_ATOMIC);
2685
2686 if (!page) {
2687 ipr_trace;
2688 return bytes_copied;
2689 }
2690
2691 ioa_dump->page_offset = 0;
2692 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2693 ioa_dump->next_page_index++;
2694 } else
2695 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2696
2697 rem_len = length - bytes_copied;
2698 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2699 cur_len = min(rem_len, rem_page_len);
2700
2701 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2702 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2703 rc = -EIO;
2704 } else {
2705 rc = ipr_get_ldump_data_section(ioa_cfg,
2706 pci_address + bytes_copied,
2707 &page[ioa_dump->page_offset / 4],
2708 (cur_len / sizeof(u32)));
2709 }
2710 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2711
2712 if (!rc) {
2713 ioa_dump->page_offset += cur_len;
2714 bytes_copied += cur_len;
2715 } else {
2716 ipr_trace;
2717 break;
2718 }
2719 schedule();
2720 }
2721
2722 return bytes_copied;
2723}
2724
2725/**
2726 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2727 * @hdr: dump entry header struct
2728 *
2729 * Return value:
2730 * nothing
2731 **/
2732static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2733{
2734 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2735 hdr->num_elems = 1;
2736 hdr->offset = sizeof(*hdr);
2737 hdr->status = IPR_DUMP_STATUS_SUCCESS;
2738}
2739
2740/**
2741 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2742 * @ioa_cfg: ioa config struct
2743 * @driver_dump: driver dump struct
2744 *
2745 * Return value:
2746 * nothing
2747 **/
2748static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2749 struct ipr_driver_dump *driver_dump)
2750{
2751 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2752
2753 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2754 driver_dump->ioa_type_entry.hdr.len =
2755 sizeof(struct ipr_dump_ioa_type_entry) -
2756 sizeof(struct ipr_dump_entry_header);
2757 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2758 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2759 driver_dump->ioa_type_entry.type = ioa_cfg->type;
2760 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2761 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2762 ucode_vpd->minor_release[1];
2763 driver_dump->hdr.num_entries++;
2764}
2765
2766/**
2767 * ipr_dump_version_data - Fill in the driver version in the dump.
2768 * @ioa_cfg: ioa config struct
2769 * @driver_dump: driver dump struct
2770 *
2771 * Return value:
2772 * nothing
2773 **/
2774static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2775 struct ipr_driver_dump *driver_dump)
2776{
2777 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2778 driver_dump->version_entry.hdr.len =
2779 sizeof(struct ipr_dump_version_entry) -
2780 sizeof(struct ipr_dump_entry_header);
2781 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2782 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2783 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
2784 driver_dump->hdr.num_entries++;
2785}
2786
2787/**
2788 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
2789 * @ioa_cfg: ioa config struct
2790 * @driver_dump: driver dump struct
2791 *
2792 * Return value:
2793 * nothing
2794 **/
2795static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
2796 struct ipr_driver_dump *driver_dump)
2797{
2798 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
2799 driver_dump->trace_entry.hdr.len =
2800 sizeof(struct ipr_dump_trace_entry) -
2801 sizeof(struct ipr_dump_entry_header);
2802 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2803 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
2804 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
2805 driver_dump->hdr.num_entries++;
2806}
2807
2808/**
2809 * ipr_dump_location_data - Fill in the IOA location in the dump.
2810 * @ioa_cfg: ioa config struct
2811 * @driver_dump: driver dump struct
2812 *
2813 * Return value:
2814 * nothing
2815 **/
2816static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
2817 struct ipr_driver_dump *driver_dump)
2818{
2819 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
2820 driver_dump->location_entry.hdr.len =
2821 sizeof(struct ipr_dump_location_entry) -
2822 sizeof(struct ipr_dump_entry_header);
2823 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2824 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
71610f55 2825 strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
1da177e4
LT
2826 driver_dump->hdr.num_entries++;
2827}
2828
2829/**
2830 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
2831 * @ioa_cfg: ioa config struct
2832 * @dump: dump struct
2833 *
2834 * Return value:
2835 * nothing
2836 **/
2837static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2838{
2839 unsigned long start_addr, sdt_word;
2840 unsigned long lock_flags = 0;
2841 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
2842 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
2843 u32 num_entries, start_off, end_off;
2844 u32 bytes_to_copy, bytes_copied, rc;
2845 struct ipr_sdt *sdt;
dcbad00e 2846 int valid = 1;
1da177e4
LT
2847 int i;
2848
2849 ENTER;
2850
2851 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2852
2853 if (ioa_cfg->sdt_state != GET_DUMP) {
2854 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2855 return;
2856 }
2857
2858 start_addr = readl(ioa_cfg->ioa_mailbox);
2859
dcbad00e 2860 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
1da177e4
LT
2861 dev_err(&ioa_cfg->pdev->dev,
2862 "Invalid dump table format: %lx\n", start_addr);
2863 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2864 return;
2865 }
2866
2867 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
2868
2869 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
2870
2871 /* Initialize the overall dump header */
2872 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
2873 driver_dump->hdr.num_entries = 1;
2874 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
2875 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
2876 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
2877 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
2878
2879 ipr_dump_version_data(ioa_cfg, driver_dump);
2880 ipr_dump_location_data(ioa_cfg, driver_dump);
2881 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
2882 ipr_dump_trace_data(ioa_cfg, driver_dump);
2883
2884 /* Update dump_header */
2885 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
2886
2887 /* IOA Dump entry */
2888 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
1da177e4
LT
2889 ioa_dump->hdr.len = 0;
2890 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2891 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
2892
2893 /* First entries in sdt are actually a list of dump addresses and
2894 lengths to gather the real dump data. sdt represents the pointer
2895 to the ioa generated dump table. Dump data will be extracted based
2896 on entries in this table */
2897 sdt = &ioa_dump->sdt;
2898
2899 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
2900 sizeof(struct ipr_sdt) / sizeof(__be32));
2901
2902 /* Smart Dump table is ready to use and the first entry is valid */
dcbad00e
WB
2903 if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
2904 (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
1da177e4
LT
2905 dev_err(&ioa_cfg->pdev->dev,
2906 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
2907 rc, be32_to_cpu(sdt->hdr.state));
2908 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
2909 ioa_cfg->sdt_state = DUMP_OBTAINED;
2910 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2911 return;
2912 }
2913
2914 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
2915
2916 if (num_entries > IPR_NUM_SDT_ENTRIES)
2917 num_entries = IPR_NUM_SDT_ENTRIES;
2918
2919 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2920
2921 for (i = 0; i < num_entries; i++) {
2922 if (ioa_dump->hdr.len > IPR_MAX_IOA_DUMP_SIZE) {
2923 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2924 break;
2925 }
2926
2927 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
dcbad00e
WB
2928 sdt_word = be32_to_cpu(sdt->entry[i].start_token);
2929 if (ioa_cfg->sis64)
2930 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
2931 else {
2932 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
2933 end_off = be32_to_cpu(sdt->entry[i].end_token);
2934
2935 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
2936 bytes_to_copy = end_off - start_off;
2937 else
2938 valid = 0;
2939 }
2940 if (valid) {
1da177e4
LT
2941 if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) {
2942 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
2943 continue;
2944 }
2945
2946 /* Copy data from adapter to driver buffers */
2947 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
2948 bytes_to_copy);
2949
2950 ioa_dump->hdr.len += bytes_copied;
2951
2952 if (bytes_copied != bytes_to_copy) {
2953 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2954 break;
2955 }
2956 }
2957 }
2958 }
2959
2960 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
2961
2962 /* Update dump_header */
2963 driver_dump->hdr.len += ioa_dump->hdr.len;
2964 wmb();
2965 ioa_cfg->sdt_state = DUMP_OBTAINED;
2966 LEAVE;
2967}
2968
2969#else
2970#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
2971#endif
2972
2973/**
2974 * ipr_release_dump - Free adapter dump memory
2975 * @kref: kref struct
2976 *
2977 * Return value:
2978 * nothing
2979 **/
2980static void ipr_release_dump(struct kref *kref)
2981{
2982 struct ipr_dump *dump = container_of(kref,struct ipr_dump,kref);
2983 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
2984 unsigned long lock_flags = 0;
2985 int i;
2986
2987 ENTER;
2988 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2989 ioa_cfg->dump = NULL;
2990 ioa_cfg->sdt_state = INACTIVE;
2991 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2992
2993 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
2994 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
2995
2996 kfree(dump);
2997 LEAVE;
2998}
2999
3000/**
3001 * ipr_worker_thread - Worker thread
c4028958 3002 * @work: ioa config struct
1da177e4
LT
3003 *
3004 * Called at task level from a work thread. This function takes care
3005 * of adding and removing device from the mid-layer as configuration
3006 * changes are detected by the adapter.
3007 *
3008 * Return value:
3009 * nothing
3010 **/
c4028958 3011static void ipr_worker_thread(struct work_struct *work)
1da177e4
LT
3012{
3013 unsigned long lock_flags;
3014 struct ipr_resource_entry *res;
3015 struct scsi_device *sdev;
3016 struct ipr_dump *dump;
c4028958
DH
3017 struct ipr_ioa_cfg *ioa_cfg =
3018 container_of(work, struct ipr_ioa_cfg, work_q);
1da177e4
LT
3019 u8 bus, target, lun;
3020 int did_work;
3021
3022 ENTER;
3023 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3024
3025 if (ioa_cfg->sdt_state == GET_DUMP) {
3026 dump = ioa_cfg->dump;
3027 if (!dump) {
3028 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3029 return;
3030 }
3031 kref_get(&dump->kref);
3032 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3033 ipr_get_ioa_dump(ioa_cfg, dump);
3034 kref_put(&dump->kref, ipr_release_dump);
3035
3036 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3037 if (ioa_cfg->sdt_state == DUMP_OBTAINED)
3038 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3039 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3040 return;
3041 }
3042
3043restart:
3044 do {
3045 did_work = 0;
3046 if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
3047 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3048 return;
3049 }
3050
3051 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3052 if (res->del_from_ml && res->sdev) {
3053 did_work = 1;
3054 sdev = res->sdev;
3055 if (!scsi_device_get(sdev)) {
1da177e4
LT
3056 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3057 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3058 scsi_remove_device(sdev);
3059 scsi_device_put(sdev);
3060 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3061 }
3062 break;
3063 }
3064 }
3065 } while(did_work);
3066
3067 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3068 if (res->add_to_ml) {
3e7ebdfa
WB
3069 bus = res->bus;
3070 target = res->target;
3071 lun = res->lun;
1121b794 3072 res->add_to_ml = 0;
1da177e4
LT
3073 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3074 scsi_add_device(ioa_cfg->host, bus, target, lun);
3075 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3076 goto restart;
3077 }
3078 }
3079
3080 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ee959b00 3081 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
1da177e4
LT
3082 LEAVE;
3083}
3084
3085#ifdef CONFIG_SCSI_IPR_TRACE
3086/**
3087 * ipr_read_trace - Dump the adapter trace
3088 * @kobj: kobject struct
91a69029 3089 * @bin_attr: bin_attribute struct
1da177e4
LT
3090 * @buf: buffer
3091 * @off: offset
3092 * @count: buffer size
3093 *
3094 * Return value:
3095 * number of bytes printed to buffer
3096 **/
91a69029
ZR
3097static ssize_t ipr_read_trace(struct kobject *kobj,
3098 struct bin_attribute *bin_attr,
3099 char *buf, loff_t off, size_t count)
1da177e4 3100{
ee959b00
TJ
3101 struct device *dev = container_of(kobj, struct device, kobj);
3102 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3103 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3104 unsigned long lock_flags = 0;
d777aaf3 3105 ssize_t ret;
1da177e4
LT
3106
3107 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
d777aaf3
AM
3108 ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3109 IPR_TRACE_SIZE);
1da177e4 3110 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
d777aaf3
AM
3111
3112 return ret;
1da177e4
LT
3113}
3114
3115static struct bin_attribute ipr_trace_attr = {
3116 .attr = {
3117 .name = "trace",
3118 .mode = S_IRUGO,
3119 },
3120 .size = 0,
3121 .read = ipr_read_trace,
3122};
3123#endif
3124
3125/**
3126 * ipr_show_fw_version - Show the firmware version
ee959b00
TJ
3127 * @dev: class device struct
3128 * @buf: buffer
1da177e4
LT
3129 *
3130 * Return value:
3131 * number of bytes printed to buffer
3132 **/
ee959b00
TJ
3133static ssize_t ipr_show_fw_version(struct device *dev,
3134 struct device_attribute *attr, char *buf)
1da177e4 3135{
ee959b00 3136 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3137 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3138 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3139 unsigned long lock_flags = 0;
3140 int len;
3141
3142 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3143 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3144 ucode_vpd->major_release, ucode_vpd->card_type,
3145 ucode_vpd->minor_release[0],
3146 ucode_vpd->minor_release[1]);
3147 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3148 return len;
3149}
3150
ee959b00 3151static struct device_attribute ipr_fw_version_attr = {
1da177e4
LT
3152 .attr = {
3153 .name = "fw_version",
3154 .mode = S_IRUGO,
3155 },
3156 .show = ipr_show_fw_version,
3157};
3158
3159/**
3160 * ipr_show_log_level - Show the adapter's error logging level
ee959b00
TJ
3161 * @dev: class device struct
3162 * @buf: buffer
1da177e4
LT
3163 *
3164 * Return value:
3165 * number of bytes printed to buffer
3166 **/
ee959b00
TJ
3167static ssize_t ipr_show_log_level(struct device *dev,
3168 struct device_attribute *attr, char *buf)
1da177e4 3169{
ee959b00 3170 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3171 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3172 unsigned long lock_flags = 0;
3173 int len;
3174
3175 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3176 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3177 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3178 return len;
3179}
3180
3181/**
3182 * ipr_store_log_level - Change the adapter's error logging level
ee959b00
TJ
3183 * @dev: class device struct
3184 * @buf: buffer
1da177e4
LT
3185 *
3186 * Return value:
3187 * number of bytes printed to buffer
3188 **/
ee959b00
TJ
3189static ssize_t ipr_store_log_level(struct device *dev,
3190 struct device_attribute *attr,
1da177e4
LT
3191 const char *buf, size_t count)
3192{
ee959b00 3193 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3194 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3195 unsigned long lock_flags = 0;
3196
3197 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3198 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3199 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3200 return strlen(buf);
3201}
3202
ee959b00 3203static struct device_attribute ipr_log_level_attr = {
1da177e4
LT
3204 .attr = {
3205 .name = "log_level",
3206 .mode = S_IRUGO | S_IWUSR,
3207 },
3208 .show = ipr_show_log_level,
3209 .store = ipr_store_log_level
3210};
3211
3212/**
3213 * ipr_store_diagnostics - IOA Diagnostics interface
ee959b00
TJ
3214 * @dev: device struct
3215 * @buf: buffer
3216 * @count: buffer size
1da177e4
LT
3217 *
3218 * This function will reset the adapter and wait a reasonable
3219 * amount of time for any errors that the adapter might log.
3220 *
3221 * Return value:
3222 * count on success / other on failure
3223 **/
ee959b00
TJ
3224static ssize_t ipr_store_diagnostics(struct device *dev,
3225 struct device_attribute *attr,
1da177e4
LT
3226 const char *buf, size_t count)
3227{
ee959b00 3228 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3229 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3230 unsigned long lock_flags = 0;
3231 int rc = count;
3232
3233 if (!capable(CAP_SYS_ADMIN))
3234 return -EACCES;
3235
1da177e4 3236 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
970ea294
BK
3237 while(ioa_cfg->in_reset_reload) {
3238 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3239 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3240 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3241 }
3242
1da177e4
LT
3243 ioa_cfg->errors_logged = 0;
3244 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3245
3246 if (ioa_cfg->in_reset_reload) {
3247 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3248 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3249
3250 /* Wait for a second for any errors to be logged */
3251 msleep(1000);
3252 } else {
3253 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3254 return -EIO;
3255 }
3256
3257 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3258 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3259 rc = -EIO;
3260 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3261
3262 return rc;
3263}
3264
ee959b00 3265static struct device_attribute ipr_diagnostics_attr = {
1da177e4
LT
3266 .attr = {
3267 .name = "run_diagnostics",
3268 .mode = S_IWUSR,
3269 },
3270 .store = ipr_store_diagnostics
3271};
3272
f37eb54b 3273/**
3274 * ipr_show_adapter_state - Show the adapter's state
ee959b00
TJ
3275 * @class_dev: device struct
3276 * @buf: buffer
f37eb54b 3277 *
3278 * Return value:
3279 * number of bytes printed to buffer
3280 **/
ee959b00
TJ
3281static ssize_t ipr_show_adapter_state(struct device *dev,
3282 struct device_attribute *attr, char *buf)
f37eb54b 3283{
ee959b00 3284 struct Scsi_Host *shost = class_to_shost(dev);
f37eb54b 3285 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3286 unsigned long lock_flags = 0;
3287 int len;
3288
3289 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3290 if (ioa_cfg->ioa_is_dead)
3291 len = snprintf(buf, PAGE_SIZE, "offline\n");
3292 else
3293 len = snprintf(buf, PAGE_SIZE, "online\n");
3294 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3295 return len;
3296}
3297
3298/**
3299 * ipr_store_adapter_state - Change adapter state
ee959b00
TJ
3300 * @dev: device struct
3301 * @buf: buffer
3302 * @count: buffer size
f37eb54b 3303 *
3304 * This function will change the adapter's state.
3305 *
3306 * Return value:
3307 * count on success / other on failure
3308 **/
ee959b00
TJ
3309static ssize_t ipr_store_adapter_state(struct device *dev,
3310 struct device_attribute *attr,
f37eb54b 3311 const char *buf, size_t count)
3312{
ee959b00 3313 struct Scsi_Host *shost = class_to_shost(dev);
f37eb54b 3314 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3315 unsigned long lock_flags;
3316 int result = count;
3317
3318 if (!capable(CAP_SYS_ADMIN))
3319 return -EACCES;
3320
3321 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3322 if (ioa_cfg->ioa_is_dead && !strncmp(buf, "online", 6)) {
3323 ioa_cfg->ioa_is_dead = 0;
3324 ioa_cfg->reset_retries = 0;
3325 ioa_cfg->in_ioa_bringdown = 0;
3326 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3327 }
3328 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3329 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3330
3331 return result;
3332}
3333
ee959b00 3334static struct device_attribute ipr_ioa_state_attr = {
f37eb54b 3335 .attr = {
49dd0961 3336 .name = "online_state",
f37eb54b 3337 .mode = S_IRUGO | S_IWUSR,
3338 },
3339 .show = ipr_show_adapter_state,
3340 .store = ipr_store_adapter_state
3341};
3342
1da177e4
LT
3343/**
3344 * ipr_store_reset_adapter - Reset the adapter
ee959b00
TJ
3345 * @dev: device struct
3346 * @buf: buffer
3347 * @count: buffer size
1da177e4
LT
3348 *
3349 * This function will reset the adapter.
3350 *
3351 * Return value:
3352 * count on success / other on failure
3353 **/
ee959b00
TJ
3354static ssize_t ipr_store_reset_adapter(struct device *dev,
3355 struct device_attribute *attr,
1da177e4
LT
3356 const char *buf, size_t count)
3357{
ee959b00 3358 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3359 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3360 unsigned long lock_flags;
3361 int result = count;
3362
3363 if (!capable(CAP_SYS_ADMIN))
3364 return -EACCES;
3365
3366 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3367 if (!ioa_cfg->in_reset_reload)
3368 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3369 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3370 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3371
3372 return result;
3373}
3374
ee959b00 3375static struct device_attribute ipr_ioa_reset_attr = {
1da177e4
LT
3376 .attr = {
3377 .name = "reset_host",
3378 .mode = S_IWUSR,
3379 },
3380 .store = ipr_store_reset_adapter
3381};
3382
3383/**
3384 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3385 * @buf_len: buffer length
3386 *
3387 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3388 * list to use for microcode download
3389 *
3390 * Return value:
3391 * pointer to sglist / NULL on failure
3392 **/
3393static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3394{
3395 int sg_size, order, bsize_elem, num_elem, i, j;
3396 struct ipr_sglist *sglist;
3397 struct scatterlist *scatterlist;
3398 struct page *page;
3399
3400 /* Get the minimum size per scatter/gather element */
3401 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3402
3403 /* Get the actual size per element */
3404 order = get_order(sg_size);
3405
3406 /* Determine the actual number of bytes per element */
3407 bsize_elem = PAGE_SIZE * (1 << order);
3408
3409 /* Determine the actual number of sg entries needed */
3410 if (buf_len % bsize_elem)
3411 num_elem = (buf_len / bsize_elem) + 1;
3412 else
3413 num_elem = buf_len / bsize_elem;
3414
3415 /* Allocate a scatter/gather list for the DMA */
0bc42e35 3416 sglist = kzalloc(sizeof(struct ipr_sglist) +
1da177e4
LT
3417 (sizeof(struct scatterlist) * (num_elem - 1)),
3418 GFP_KERNEL);
3419
3420 if (sglist == NULL) {
3421 ipr_trace;
3422 return NULL;
3423 }
3424
1da177e4 3425 scatterlist = sglist->scatterlist;
45711f1a 3426 sg_init_table(scatterlist, num_elem);
1da177e4
LT
3427
3428 sglist->order = order;
3429 sglist->num_sg = num_elem;
3430
3431 /* Allocate a bunch of sg elements */
3432 for (i = 0; i < num_elem; i++) {
3433 page = alloc_pages(GFP_KERNEL, order);
3434 if (!page) {
3435 ipr_trace;
3436
3437 /* Free up what we already allocated */
3438 for (j = i - 1; j >= 0; j--)
45711f1a 3439 __free_pages(sg_page(&scatterlist[j]), order);
1da177e4
LT
3440 kfree(sglist);
3441 return NULL;
3442 }
3443
642f1490 3444 sg_set_page(&scatterlist[i], page, 0, 0);
1da177e4
LT
3445 }
3446
3447 return sglist;
3448}
3449
3450/**
3451 * ipr_free_ucode_buffer - Frees a microcode download buffer
3452 * @p_dnld: scatter/gather list pointer
3453 *
3454 * Free a DMA'able ucode download buffer previously allocated with
3455 * ipr_alloc_ucode_buffer
3456 *
3457 * Return value:
3458 * nothing
3459 **/
3460static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3461{
3462 int i;
3463
3464 for (i = 0; i < sglist->num_sg; i++)
45711f1a 3465 __free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
1da177e4
LT
3466
3467 kfree(sglist);
3468}
3469
3470/**
3471 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3472 * @sglist: scatter/gather list pointer
3473 * @buffer: buffer pointer
3474 * @len: buffer length
3475 *
3476 * Copy a microcode image from a user buffer into a buffer allocated by
3477 * ipr_alloc_ucode_buffer
3478 *
3479 * Return value:
3480 * 0 on success / other on failure
3481 **/
3482static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3483 u8 *buffer, u32 len)
3484{
3485 int bsize_elem, i, result = 0;
3486 struct scatterlist *scatterlist;
3487 void *kaddr;
3488
3489 /* Determine the actual number of bytes per element */
3490 bsize_elem = PAGE_SIZE * (1 << sglist->order);
3491
3492 scatterlist = sglist->scatterlist;
3493
3494 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
45711f1a
JA
3495 struct page *page = sg_page(&scatterlist[i]);
3496
3497 kaddr = kmap(page);
1da177e4 3498 memcpy(kaddr, buffer, bsize_elem);
45711f1a 3499 kunmap(page);
1da177e4
LT
3500
3501 scatterlist[i].length = bsize_elem;
3502
3503 if (result != 0) {
3504 ipr_trace;
3505 return result;
3506 }
3507 }
3508
3509 if (len % bsize_elem) {
45711f1a
JA
3510 struct page *page = sg_page(&scatterlist[i]);
3511
3512 kaddr = kmap(page);
1da177e4 3513 memcpy(kaddr, buffer, len % bsize_elem);
45711f1a 3514 kunmap(page);
1da177e4
LT
3515
3516 scatterlist[i].length = len % bsize_elem;
3517 }
3518
3519 sglist->buffer_len = len;
3520 return result;
3521}
3522
a32c055f
WB
3523/**
3524 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3525 * @ipr_cmd: ipr command struct
3526 * @sglist: scatter/gather list
3527 *
3528 * Builds a microcode download IOA data list (IOADL).
3529 *
3530 **/
3531static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3532 struct ipr_sglist *sglist)
3533{
3534 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3535 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3536 struct scatterlist *scatterlist = sglist->scatterlist;
3537 int i;
3538
3539 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3540 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3541 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3542
3543 ioarcb->ioadl_len =
3544 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3545 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3546 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3547 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3548 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3549 }
3550
3551 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3552}
3553
1da177e4 3554/**
12baa420 3555 * ipr_build_ucode_ioadl - Build a microcode download IOADL
1da177e4
LT
3556 * @ipr_cmd: ipr command struct
3557 * @sglist: scatter/gather list
1da177e4 3558 *
12baa420 3559 * Builds a microcode download IOA data list (IOADL).
1da177e4 3560 *
1da177e4 3561 **/
12baa420 3562static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3563 struct ipr_sglist *sglist)
1da177e4 3564{
1da177e4 3565 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
a32c055f 3566 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1da177e4
LT
3567 struct scatterlist *scatterlist = sglist->scatterlist;
3568 int i;
3569
12baa420 3570 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
1da177e4 3571 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
a32c055f
WB
3572 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3573
3574 ioarcb->ioadl_len =
1da177e4
LT
3575 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3576
3577 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3578 ioadl[i].flags_and_data_len =
3579 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3580 ioadl[i].address =
3581 cpu_to_be32(sg_dma_address(&scatterlist[i]));
3582 }
3583
12baa420 3584 ioadl[i-1].flags_and_data_len |=
3585 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3586}
3587
3588/**
3589 * ipr_update_ioa_ucode - Update IOA's microcode
3590 * @ioa_cfg: ioa config struct
3591 * @sglist: scatter/gather list
3592 *
3593 * Initiate an adapter reset to update the IOA's microcode
3594 *
3595 * Return value:
3596 * 0 on success / -EIO on failure
3597 **/
3598static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3599 struct ipr_sglist *sglist)
3600{
3601 unsigned long lock_flags;
3602
3603 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
970ea294
BK
3604 while(ioa_cfg->in_reset_reload) {
3605 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3606 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3607 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3608 }
12baa420 3609
3610 if (ioa_cfg->ucode_sglist) {
3611 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3612 dev_err(&ioa_cfg->pdev->dev,
3613 "Microcode download already in progress\n");
3614 return -EIO;
1da177e4 3615 }
12baa420 3616
3617 sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
3618 sglist->num_sg, DMA_TO_DEVICE);
3619
3620 if (!sglist->num_dma_sg) {
3621 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3622 dev_err(&ioa_cfg->pdev->dev,
3623 "Failed to map microcode download buffer!\n");
1da177e4
LT
3624 return -EIO;
3625 }
3626
12baa420 3627 ioa_cfg->ucode_sglist = sglist;
3628 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3629 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3630 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3631
3632 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3633 ioa_cfg->ucode_sglist = NULL;
3634 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1da177e4
LT
3635 return 0;
3636}
3637
3638/**
3639 * ipr_store_update_fw - Update the firmware on the adapter
ee959b00
TJ
3640 * @class_dev: device struct
3641 * @buf: buffer
3642 * @count: buffer size
1da177e4
LT
3643 *
3644 * This function will update the firmware on the adapter.
3645 *
3646 * Return value:
3647 * count on success / other on failure
3648 **/
ee959b00
TJ
3649static ssize_t ipr_store_update_fw(struct device *dev,
3650 struct device_attribute *attr,
3651 const char *buf, size_t count)
1da177e4 3652{
ee959b00 3653 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3654 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3655 struct ipr_ucode_image_header *image_hdr;
3656 const struct firmware *fw_entry;
3657 struct ipr_sglist *sglist;
1da177e4
LT
3658 char fname[100];
3659 char *src;
3660 int len, result, dnld_size;
3661
3662 if (!capable(CAP_SYS_ADMIN))
3663 return -EACCES;
3664
3665 len = snprintf(fname, 99, "%s", buf);
3666 fname[len-1] = '\0';
3667
3668 if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
3669 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
3670 return -EIO;
3671 }
3672
3673 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
3674
3675 if (be32_to_cpu(image_hdr->header_length) > fw_entry->size ||
3676 (ioa_cfg->vpd_cbs->page3_data.card_type &&
3677 ioa_cfg->vpd_cbs->page3_data.card_type != image_hdr->card_type)) {
3678 dev_err(&ioa_cfg->pdev->dev, "Invalid microcode buffer\n");
3679 release_firmware(fw_entry);
3680 return -EINVAL;
3681 }
3682
3683 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
3684 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
3685 sglist = ipr_alloc_ucode_buffer(dnld_size);
3686
3687 if (!sglist) {
3688 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
3689 release_firmware(fw_entry);
3690 return -ENOMEM;
3691 }
3692
3693 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
3694
3695 if (result) {
3696 dev_err(&ioa_cfg->pdev->dev,
3697 "Microcode buffer copy to DMA buffer failed\n");
12baa420 3698 goto out;
1da177e4
LT
3699 }
3700
12baa420 3701 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
1da177e4 3702
12baa420 3703 if (!result)
3704 result = count;
3705out:
1da177e4
LT
3706 ipr_free_ucode_buffer(sglist);
3707 release_firmware(fw_entry);
12baa420 3708 return result;
1da177e4
LT
3709}
3710
ee959b00 3711static struct device_attribute ipr_update_fw_attr = {
1da177e4
LT
3712 .attr = {
3713 .name = "update_fw",
3714 .mode = S_IWUSR,
3715 },
3716 .store = ipr_store_update_fw
3717};
3718
ee959b00 3719static struct device_attribute *ipr_ioa_attrs[] = {
1da177e4
LT
3720 &ipr_fw_version_attr,
3721 &ipr_log_level_attr,
3722 &ipr_diagnostics_attr,
f37eb54b 3723 &ipr_ioa_state_attr,
1da177e4
LT
3724 &ipr_ioa_reset_attr,
3725 &ipr_update_fw_attr,
3726 NULL,
3727};
3728
3729#ifdef CONFIG_SCSI_IPR_DUMP
3730/**
3731 * ipr_read_dump - Dump the adapter
3732 * @kobj: kobject struct
91a69029 3733 * @bin_attr: bin_attribute struct
1da177e4
LT
3734 * @buf: buffer
3735 * @off: offset
3736 * @count: buffer size
3737 *
3738 * Return value:
3739 * number of bytes printed to buffer
3740 **/
91a69029
ZR
3741static ssize_t ipr_read_dump(struct kobject *kobj,
3742 struct bin_attribute *bin_attr,
3743 char *buf, loff_t off, size_t count)
1da177e4 3744{
ee959b00 3745 struct device *cdev = container_of(kobj, struct device, kobj);
1da177e4
LT
3746 struct Scsi_Host *shost = class_to_shost(cdev);
3747 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3748 struct ipr_dump *dump;
3749 unsigned long lock_flags = 0;
3750 char *src;
3751 int len;
3752 size_t rc = count;
3753
3754 if (!capable(CAP_SYS_ADMIN))
3755 return -EACCES;
3756
3757 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3758 dump = ioa_cfg->dump;
3759
3760 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
3761 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3762 return 0;
3763 }
3764 kref_get(&dump->kref);
3765 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3766
3767 if (off > dump->driver_dump.hdr.len) {
3768 kref_put(&dump->kref, ipr_release_dump);
3769 return 0;
3770 }
3771
3772 if (off + count > dump->driver_dump.hdr.len) {
3773 count = dump->driver_dump.hdr.len - off;
3774 rc = count;
3775 }
3776
3777 if (count && off < sizeof(dump->driver_dump)) {
3778 if (off + count > sizeof(dump->driver_dump))
3779 len = sizeof(dump->driver_dump) - off;
3780 else
3781 len = count;
3782 src = (u8 *)&dump->driver_dump + off;
3783 memcpy(buf, src, len);
3784 buf += len;
3785 off += len;
3786 count -= len;
3787 }
3788
3789 off -= sizeof(dump->driver_dump);
3790
3791 if (count && off < offsetof(struct ipr_ioa_dump, ioa_data)) {
3792 if (off + count > offsetof(struct ipr_ioa_dump, ioa_data))
3793 len = offsetof(struct ipr_ioa_dump, ioa_data) - off;
3794 else
3795 len = count;
3796 src = (u8 *)&dump->ioa_dump + off;
3797 memcpy(buf, src, len);
3798 buf += len;
3799 off += len;
3800 count -= len;
3801 }
3802
3803 off -= offsetof(struct ipr_ioa_dump, ioa_data);
3804
3805 while (count) {
3806 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
3807 len = PAGE_ALIGN(off) - off;
3808 else
3809 len = count;
3810 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
3811 src += off & ~PAGE_MASK;
3812 memcpy(buf, src, len);
3813 buf += len;
3814 off += len;
3815 count -= len;
3816 }
3817
3818 kref_put(&dump->kref, ipr_release_dump);
3819 return rc;
3820}
3821
3822/**
3823 * ipr_alloc_dump - Prepare for adapter dump
3824 * @ioa_cfg: ioa config struct
3825 *
3826 * Return value:
3827 * 0 on success / other on failure
3828 **/
3829static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
3830{
3831 struct ipr_dump *dump;
3832 unsigned long lock_flags = 0;
3833
0bc42e35 3834 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
1da177e4
LT
3835
3836 if (!dump) {
3837 ipr_err("Dump memory allocation failed\n");
3838 return -ENOMEM;
3839 }
3840
1da177e4
LT
3841 kref_init(&dump->kref);
3842 dump->ioa_cfg = ioa_cfg;
3843
3844 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3845
3846 if (INACTIVE != ioa_cfg->sdt_state) {
3847 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3848 kfree(dump);
3849 return 0;
3850 }
3851
3852 ioa_cfg->dump = dump;
3853 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
3854 if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
3855 ioa_cfg->dump_taken = 1;
3856 schedule_work(&ioa_cfg->work_q);
3857 }
3858 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3859
1da177e4
LT
3860 return 0;
3861}
3862
3863/**
3864 * ipr_free_dump - Free adapter dump memory
3865 * @ioa_cfg: ioa config struct
3866 *
3867 * Return value:
3868 * 0 on success / other on failure
3869 **/
3870static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
3871{
3872 struct ipr_dump *dump;
3873 unsigned long lock_flags = 0;
3874
3875 ENTER;
3876
3877 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3878 dump = ioa_cfg->dump;
3879 if (!dump) {
3880 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3881 return 0;
3882 }
3883
3884 ioa_cfg->dump = NULL;
3885 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3886
3887 kref_put(&dump->kref, ipr_release_dump);
3888
3889 LEAVE;
3890 return 0;
3891}
3892
3893/**
3894 * ipr_write_dump - Setup dump state of adapter
3895 * @kobj: kobject struct
91a69029 3896 * @bin_attr: bin_attribute struct
1da177e4
LT
3897 * @buf: buffer
3898 * @off: offset
3899 * @count: buffer size
3900 *
3901 * Return value:
3902 * number of bytes printed to buffer
3903 **/
91a69029
ZR
3904static ssize_t ipr_write_dump(struct kobject *kobj,
3905 struct bin_attribute *bin_attr,
3906 char *buf, loff_t off, size_t count)
1da177e4 3907{
ee959b00 3908 struct device *cdev = container_of(kobj, struct device, kobj);
1da177e4
LT
3909 struct Scsi_Host *shost = class_to_shost(cdev);
3910 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3911 int rc;
3912
3913 if (!capable(CAP_SYS_ADMIN))
3914 return -EACCES;
3915
3916 if (buf[0] == '1')
3917 rc = ipr_alloc_dump(ioa_cfg);
3918 else if (buf[0] == '0')
3919 rc = ipr_free_dump(ioa_cfg);
3920 else
3921 return -EINVAL;
3922
3923 if (rc)
3924 return rc;
3925 else
3926 return count;
3927}
3928
3929static struct bin_attribute ipr_dump_attr = {
3930 .attr = {
3931 .name = "dump",
3932 .mode = S_IRUSR | S_IWUSR,
3933 },
3934 .size = 0,
3935 .read = ipr_read_dump,
3936 .write = ipr_write_dump
3937};
3938#else
3939static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
3940#endif
3941
3942/**
3943 * ipr_change_queue_depth - Change the device's queue depth
3944 * @sdev: scsi device struct
3945 * @qdepth: depth to set
e881a172 3946 * @reason: calling context
1da177e4
LT
3947 *
3948 * Return value:
3949 * actual depth set
3950 **/
e881a172
MC
3951static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth,
3952 int reason)
1da177e4 3953{
35a39691
BK
3954 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3955 struct ipr_resource_entry *res;
3956 unsigned long lock_flags = 0;
3957
e881a172
MC
3958 if (reason != SCSI_QDEPTH_DEFAULT)
3959 return -EOPNOTSUPP;
3960
35a39691
BK
3961 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3962 res = (struct ipr_resource_entry *)sdev->hostdata;
3963
3964 if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
3965 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
3966 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3967
1da177e4
LT
3968 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
3969 return sdev->queue_depth;
3970}
3971
3972/**
3973 * ipr_change_queue_type - Change the device's queue type
3974 * @dsev: scsi device struct
3975 * @tag_type: type of tags to use
3976 *
3977 * Return value:
3978 * actual queue type set
3979 **/
3980static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
3981{
3982 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3983 struct ipr_resource_entry *res;
3984 unsigned long lock_flags = 0;
3985
3986 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3987 res = (struct ipr_resource_entry *)sdev->hostdata;
3988
3989 if (res) {
3990 if (ipr_is_gscsi(res) && sdev->tagged_supported) {
3991 /*
3992 * We don't bother quiescing the device here since the
3993 * adapter firmware does it for us.
3994 */
3995 scsi_set_tag_type(sdev, tag_type);
3996
3997 if (tag_type)
3998 scsi_activate_tcq(sdev, sdev->queue_depth);
3999 else
4000 scsi_deactivate_tcq(sdev, sdev->queue_depth);
4001 } else
4002 tag_type = 0;
4003 } else
4004 tag_type = 0;
4005
4006 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4007 return tag_type;
4008}
4009
4010/**
4011 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4012 * @dev: device struct
4013 * @buf: buffer
4014 *
4015 * Return value:
4016 * number of bytes printed to buffer
4017 **/
10523b3b 4018static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
1da177e4
LT
4019{
4020 struct scsi_device *sdev = to_scsi_device(dev);
4021 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4022 struct ipr_resource_entry *res;
4023 unsigned long lock_flags = 0;
4024 ssize_t len = -ENXIO;
4025
4026 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4027 res = (struct ipr_resource_entry *)sdev->hostdata;
4028 if (res)
3e7ebdfa 4029 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
1da177e4
LT
4030 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4031 return len;
4032}
4033
4034static struct device_attribute ipr_adapter_handle_attr = {
4035 .attr = {
4036 .name = "adapter_handle",
4037 .mode = S_IRUSR,
4038 },
4039 .show = ipr_show_adapter_handle
4040};
4041
3e7ebdfa
WB
4042/**
4043 * ipr_show_resource_path - Show the resource path for this device.
4044 * @dev: device struct
4045 * @buf: buffer
4046 *
4047 * Return value:
4048 * number of bytes printed to buffer
4049 **/
4050static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4051{
4052 struct scsi_device *sdev = to_scsi_device(dev);
4053 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4054 struct ipr_resource_entry *res;
4055 unsigned long lock_flags = 0;
4056 ssize_t len = -ENXIO;
4057 char buffer[IPR_MAX_RES_PATH_LENGTH];
4058
4059 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4060 res = (struct ipr_resource_entry *)sdev->hostdata;
4061 if (res)
4062 len = snprintf(buf, PAGE_SIZE, "%s\n",
4063 ipr_format_resource_path(&res->res_path[0], &buffer[0]));
4064 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4065 return len;
4066}
4067
4068static struct device_attribute ipr_resource_path_attr = {
4069 .attr = {
4070 .name = "resource_path",
4071 .mode = S_IRUSR,
4072 },
4073 .show = ipr_show_resource_path
4074};
4075
1da177e4
LT
4076static struct device_attribute *ipr_dev_attrs[] = {
4077 &ipr_adapter_handle_attr,
3e7ebdfa 4078 &ipr_resource_path_attr,
1da177e4
LT
4079 NULL,
4080};
4081
4082/**
4083 * ipr_biosparam - Return the HSC mapping
4084 * @sdev: scsi device struct
4085 * @block_device: block device pointer
4086 * @capacity: capacity of the device
4087 * @parm: Array containing returned HSC values.
4088 *
4089 * This function generates the HSC parms that fdisk uses.
4090 * We want to make sure we return something that places partitions
4091 * on 4k boundaries for best performance with the IOA.
4092 *
4093 * Return value:
4094 * 0 on success
4095 **/
4096static int ipr_biosparam(struct scsi_device *sdev,
4097 struct block_device *block_device,
4098 sector_t capacity, int *parm)
4099{
4100 int heads, sectors;
4101 sector_t cylinders;
4102
4103 heads = 128;
4104 sectors = 32;
4105
4106 cylinders = capacity;
4107 sector_div(cylinders, (128 * 32));
4108
4109 /* return result */
4110 parm[0] = heads;
4111 parm[1] = sectors;
4112 parm[2] = cylinders;
4113
4114 return 0;
4115}
4116
35a39691
BK
4117/**
4118 * ipr_find_starget - Find target based on bus/target.
4119 * @starget: scsi target struct
4120 *
4121 * Return value:
4122 * resource entry pointer if found / NULL if not found
4123 **/
4124static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4125{
4126 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4127 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4128 struct ipr_resource_entry *res;
4129
4130 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3e7ebdfa
WB
4131 if ((res->bus == starget->channel) &&
4132 (res->target == starget->id) &&
4133 (res->lun == 0)) {
35a39691
BK
4134 return res;
4135 }
4136 }
4137
4138 return NULL;
4139}
4140
4141static struct ata_port_info sata_port_info;
4142
4143/**
4144 * ipr_target_alloc - Prepare for commands to a SCSI target
4145 * @starget: scsi target struct
4146 *
4147 * If the device is a SATA device, this function allocates an
4148 * ATA port with libata, else it does nothing.
4149 *
4150 * Return value:
4151 * 0 on success / non-0 on failure
4152 **/
4153static int ipr_target_alloc(struct scsi_target *starget)
4154{
4155 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4156 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4157 struct ipr_sata_port *sata_port;
4158 struct ata_port *ap;
4159 struct ipr_resource_entry *res;
4160 unsigned long lock_flags;
4161
4162 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4163 res = ipr_find_starget(starget);
4164 starget->hostdata = NULL;
4165
4166 if (res && ipr_is_gata(res)) {
4167 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4168 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4169 if (!sata_port)
4170 return -ENOMEM;
4171
4172 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4173 if (ap) {
4174 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4175 sata_port->ioa_cfg = ioa_cfg;
4176 sata_port->ap = ap;
4177 sata_port->res = res;
4178
4179 res->sata_port = sata_port;
4180 ap->private_data = sata_port;
4181 starget->hostdata = sata_port;
4182 } else {
4183 kfree(sata_port);
4184 return -ENOMEM;
4185 }
4186 }
4187 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4188
4189 return 0;
4190}
4191
4192/**
4193 * ipr_target_destroy - Destroy a SCSI target
4194 * @starget: scsi target struct
4195 *
4196 * If the device was a SATA device, this function frees the libata
4197 * ATA port, else it does nothing.
4198 *
4199 **/
4200static void ipr_target_destroy(struct scsi_target *starget)
4201{
4202 struct ipr_sata_port *sata_port = starget->hostdata;
3e7ebdfa
WB
4203 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4204 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4205
4206 if (ioa_cfg->sis64) {
4207 if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4208 clear_bit(starget->id, ioa_cfg->array_ids);
4209 else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4210 clear_bit(starget->id, ioa_cfg->vset_ids);
4211 else if (starget->channel == 0)
4212 clear_bit(starget->id, ioa_cfg->target_ids);
4213 }
35a39691
BK
4214
4215 if (sata_port) {
4216 starget->hostdata = NULL;
4217 ata_sas_port_destroy(sata_port->ap);
4218 kfree(sata_port);
4219 }
4220}
4221
4222/**
4223 * ipr_find_sdev - Find device based on bus/target/lun.
4224 * @sdev: scsi device struct
4225 *
4226 * Return value:
4227 * resource entry pointer if found / NULL if not found
4228 **/
4229static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4230{
4231 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4232 struct ipr_resource_entry *res;
4233
4234 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3e7ebdfa
WB
4235 if ((res->bus == sdev->channel) &&
4236 (res->target == sdev->id) &&
4237 (res->lun == sdev->lun))
35a39691
BK
4238 return res;
4239 }
4240
4241 return NULL;
4242}
4243
1da177e4
LT
4244/**
4245 * ipr_slave_destroy - Unconfigure a SCSI device
4246 * @sdev: scsi device struct
4247 *
4248 * Return value:
4249 * nothing
4250 **/
4251static void ipr_slave_destroy(struct scsi_device *sdev)
4252{
4253 struct ipr_resource_entry *res;
4254 struct ipr_ioa_cfg *ioa_cfg;
4255 unsigned long lock_flags = 0;
4256
4257 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4258
4259 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4260 res = (struct ipr_resource_entry *) sdev->hostdata;
4261 if (res) {
35a39691
BK
4262 if (res->sata_port)
4263 ata_port_disable(res->sata_port->ap);
1da177e4
LT
4264 sdev->hostdata = NULL;
4265 res->sdev = NULL;
35a39691 4266 res->sata_port = NULL;
1da177e4
LT
4267 }
4268 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4269}
4270
4271/**
4272 * ipr_slave_configure - Configure a SCSI device
4273 * @sdev: scsi device struct
4274 *
4275 * This function configures the specified scsi device.
4276 *
4277 * Return value:
4278 * 0 on success
4279 **/
4280static int ipr_slave_configure(struct scsi_device *sdev)
4281{
4282 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4283 struct ipr_resource_entry *res;
dd406ef8 4284 struct ata_port *ap = NULL;
1da177e4 4285 unsigned long lock_flags = 0;
3e7ebdfa 4286 char buffer[IPR_MAX_RES_PATH_LENGTH];
1da177e4
LT
4287
4288 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4289 res = sdev->hostdata;
4290 if (res) {
4291 if (ipr_is_af_dasd_device(res))
4292 sdev->type = TYPE_RAID;
0726ce26 4293 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
1da177e4 4294 sdev->scsi_level = 4;
0726ce26 4295 sdev->no_uld_attach = 1;
4296 }
1da177e4 4297 if (ipr_is_vset_device(res)) {
242f9dcb
JA
4298 blk_queue_rq_timeout(sdev->request_queue,
4299 IPR_VSET_RW_TIMEOUT);
086fa5ff 4300 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
1da177e4 4301 }
e4fbf44e 4302 if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))
1da177e4 4303 sdev->allow_restart = 1;
dd406ef8
BK
4304 if (ipr_is_gata(res) && res->sata_port)
4305 ap = res->sata_port->ap;
4306 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4307
4308 if (ap) {
35a39691 4309 scsi_adjust_queue_depth(sdev, 0, IPR_MAX_CMD_PER_ATA_LUN);
dd406ef8
BK
4310 ata_sas_slave_configure(sdev, ap);
4311 } else
35a39691 4312 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
3e7ebdfa
WB
4313 if (ioa_cfg->sis64)
4314 sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4315 ipr_format_resource_path(&res->res_path[0], &buffer[0]));
dd406ef8 4316 return 0;
1da177e4
LT
4317 }
4318 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4319 return 0;
4320}
4321
35a39691
BK
4322/**
4323 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4324 * @sdev: scsi device struct
4325 *
4326 * This function initializes an ATA port so that future commands
4327 * sent through queuecommand will work.
4328 *
4329 * Return value:
4330 * 0 on success
4331 **/
4332static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4333{
4334 struct ipr_sata_port *sata_port = NULL;
4335 int rc = -ENXIO;
4336
4337 ENTER;
4338 if (sdev->sdev_target)
4339 sata_port = sdev->sdev_target->hostdata;
4340 if (sata_port)
4341 rc = ata_sas_port_init(sata_port->ap);
4342 if (rc)
4343 ipr_slave_destroy(sdev);
4344
4345 LEAVE;
4346 return rc;
4347}
4348
1da177e4
LT
4349/**
4350 * ipr_slave_alloc - Prepare for commands to a device.
4351 * @sdev: scsi device struct
4352 *
4353 * This function saves a pointer to the resource entry
4354 * in the scsi device struct if the device exists. We
4355 * can then use this pointer in ipr_queuecommand when
4356 * handling new commands.
4357 *
4358 * Return value:
692aebfc 4359 * 0 on success / -ENXIO if device does not exist
1da177e4
LT
4360 **/
4361static int ipr_slave_alloc(struct scsi_device *sdev)
4362{
4363 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4364 struct ipr_resource_entry *res;
4365 unsigned long lock_flags;
692aebfc 4366 int rc = -ENXIO;
1da177e4
LT
4367
4368 sdev->hostdata = NULL;
4369
4370 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4371
35a39691
BK
4372 res = ipr_find_sdev(sdev);
4373 if (res) {
4374 res->sdev = sdev;
4375 res->add_to_ml = 0;
4376 res->in_erp = 0;
4377 sdev->hostdata = res;
4378 if (!ipr_is_naca_model(res))
4379 res->needs_sync_complete = 1;
4380 rc = 0;
4381 if (ipr_is_gata(res)) {
4382 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4383 return ipr_ata_slave_alloc(sdev);
1da177e4
LT
4384 }
4385 }
4386
4387 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4388
692aebfc 4389 return rc;
1da177e4
LT
4390}
4391
4392/**
4393 * ipr_eh_host_reset - Reset the host adapter
4394 * @scsi_cmd: scsi command struct
4395 *
4396 * Return value:
4397 * SUCCESS / FAILED
4398 **/
df0ae249 4399static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
1da177e4
LT
4400{
4401 struct ipr_ioa_cfg *ioa_cfg;
4402 int rc;
4403
4404 ENTER;
4405 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4406
4407 dev_err(&ioa_cfg->pdev->dev,
4408 "Adapter being reset as a result of error recovery.\n");
4409
4410 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4411 ioa_cfg->sdt_state = GET_DUMP;
4412
4413 rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
4414
4415 LEAVE;
4416 return rc;
4417}
4418
df0ae249
JG
4419static int ipr_eh_host_reset(struct scsi_cmnd * cmd)
4420{
4421 int rc;
4422
4423 spin_lock_irq(cmd->device->host->host_lock);
4424 rc = __ipr_eh_host_reset(cmd);
4425 spin_unlock_irq(cmd->device->host->host_lock);
4426
4427 return rc;
4428}
4429
c6513096
BK
4430/**
4431 * ipr_device_reset - Reset the device
4432 * @ioa_cfg: ioa config struct
4433 * @res: resource entry struct
4434 *
4435 * This function issues a device reset to the affected device.
4436 * If the device is a SCSI device, a LUN reset will be sent
4437 * to the device first. If that does not work, a target reset
35a39691
BK
4438 * will be sent. If the device is a SATA device, a PHY reset will
4439 * be sent.
c6513096
BK
4440 *
4441 * Return value:
4442 * 0 on success / non-zero on failure
4443 **/
4444static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
4445 struct ipr_resource_entry *res)
4446{
4447 struct ipr_cmnd *ipr_cmd;
4448 struct ipr_ioarcb *ioarcb;
4449 struct ipr_cmd_pkt *cmd_pkt;
35a39691 4450 struct ipr_ioarcb_ata_regs *regs;
c6513096
BK
4451 u32 ioasc;
4452
4453 ENTER;
4454 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4455 ioarcb = &ipr_cmd->ioarcb;
4456 cmd_pkt = &ioarcb->cmd_pkt;
a32c055f
WB
4457
4458 if (ipr_cmd->ioa_cfg->sis64) {
4459 regs = &ipr_cmd->i.ata_ioadl.regs;
4460 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
4461 } else
4462 regs = &ioarcb->u.add_data.u.regs;
c6513096 4463
3e7ebdfa 4464 ioarcb->res_handle = res->res_handle;
c6513096
BK
4465 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4466 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
35a39691
BK
4467 if (ipr_is_gata(res)) {
4468 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
a32c055f 4469 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
35a39691
BK
4470 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
4471 }
c6513096
BK
4472
4473 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4474 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4475 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
35a39691
BK
4476 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET)
4477 memcpy(&res->sata_port->ioasa, &ipr_cmd->ioasa.u.gata,
4478 sizeof(struct ipr_ioasa_gata));
c6513096
BK
4479
4480 LEAVE;
4481 return (IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0);
4482}
4483
35a39691
BK
4484/**
4485 * ipr_sata_reset - Reset the SATA port
cc0680a5 4486 * @link: SATA link to reset
35a39691
BK
4487 * @classes: class of the attached device
4488 *
cc0680a5 4489 * This function issues a SATA phy reset to the affected ATA link.
35a39691
BK
4490 *
4491 * Return value:
4492 * 0 on success / non-zero on failure
4493 **/
cc0680a5 4494static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
120bda35 4495 unsigned long deadline)
35a39691 4496{
cc0680a5 4497 struct ipr_sata_port *sata_port = link->ap->private_data;
35a39691
BK
4498 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4499 struct ipr_resource_entry *res;
4500 unsigned long lock_flags = 0;
4501 int rc = -ENXIO;
4502
4503 ENTER;
4504 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
73d98ff0
BK
4505 while(ioa_cfg->in_reset_reload) {
4506 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4507 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4508 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4509 }
4510
35a39691
BK
4511 res = sata_port->res;
4512 if (res) {
4513 rc = ipr_device_reset(ioa_cfg, res);
3e7ebdfa 4514 *classes = res->ata_class;
35a39691
BK
4515 }
4516
4517 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4518 LEAVE;
4519 return rc;
4520}
4521
1da177e4
LT
4522/**
4523 * ipr_eh_dev_reset - Reset the device
4524 * @scsi_cmd: scsi command struct
4525 *
4526 * This function issues a device reset to the affected device.
4527 * A LUN reset will be sent to the device first. If that does
4528 * not work, a target reset will be sent.
4529 *
4530 * Return value:
4531 * SUCCESS / FAILED
4532 **/
94d0e7b8 4533static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
1da177e4
LT
4534{
4535 struct ipr_cmnd *ipr_cmd;
4536 struct ipr_ioa_cfg *ioa_cfg;
4537 struct ipr_resource_entry *res;
35a39691
BK
4538 struct ata_port *ap;
4539 int rc = 0;
1da177e4
LT
4540
4541 ENTER;
4542 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4543 res = scsi_cmd->device->hostdata;
4544
eeb88307 4545 if (!res)
1da177e4
LT
4546 return FAILED;
4547
4548 /*
4549 * If we are currently going through reset/reload, return failed. This will force the
4550 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
4551 * reset to complete
4552 */
4553 if (ioa_cfg->in_reset_reload)
4554 return FAILED;
4555 if (ioa_cfg->ioa_is_dead)
4556 return FAILED;
4557
4558 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3e7ebdfa 4559 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
1da177e4
LT
4560 if (ipr_cmd->scsi_cmd)
4561 ipr_cmd->done = ipr_scsi_eh_done;
24d6f2b5
BK
4562 if (ipr_cmd->qc)
4563 ipr_cmd->done = ipr_sata_eh_done;
7402ecef
BK
4564 if (ipr_cmd->qc && !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
4565 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
4566 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
4567 }
1da177e4
LT
4568 }
4569 }
4570
4571 res->resetting_device = 1;
fb3ed3cb 4572 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
35a39691
BK
4573
4574 if (ipr_is_gata(res) && res->sata_port) {
4575 ap = res->sata_port->ap;
4576 spin_unlock_irq(scsi_cmd->device->host->host_lock);
a1efdaba 4577 ata_std_error_handler(ap);
35a39691 4578 spin_lock_irq(scsi_cmd->device->host->host_lock);
5af23d26
BK
4579
4580 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3e7ebdfa 4581 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
5af23d26
BK
4582 rc = -EIO;
4583 break;
4584 }
4585 }
35a39691
BK
4586 } else
4587 rc = ipr_device_reset(ioa_cfg, res);
1da177e4
LT
4588 res->resetting_device = 0;
4589
1da177e4 4590 LEAVE;
c6513096 4591 return (rc ? FAILED : SUCCESS);
1da177e4
LT
4592}
4593
94d0e7b8
JG
4594static int ipr_eh_dev_reset(struct scsi_cmnd * cmd)
4595{
4596 int rc;
4597
4598 spin_lock_irq(cmd->device->host->host_lock);
4599 rc = __ipr_eh_dev_reset(cmd);
4600 spin_unlock_irq(cmd->device->host->host_lock);
4601
4602 return rc;
4603}
4604
1da177e4
LT
4605/**
4606 * ipr_bus_reset_done - Op done function for bus reset.
4607 * @ipr_cmd: ipr command struct
4608 *
4609 * This function is the op done function for a bus reset
4610 *
4611 * Return value:
4612 * none
4613 **/
4614static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
4615{
4616 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4617 struct ipr_resource_entry *res;
4618
4619 ENTER;
3e7ebdfa
WB
4620 if (!ioa_cfg->sis64)
4621 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4622 if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
4623 scsi_report_bus_reset(ioa_cfg->host, res->bus);
4624 break;
4625 }
1da177e4 4626 }
1da177e4
LT
4627
4628 /*
4629 * If abort has not completed, indicate the reset has, else call the
4630 * abort's done function to wake the sleeping eh thread
4631 */
4632 if (ipr_cmd->sibling->sibling)
4633 ipr_cmd->sibling->sibling = NULL;
4634 else
4635 ipr_cmd->sibling->done(ipr_cmd->sibling);
4636
4637 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4638 LEAVE;
4639}
4640
4641/**
4642 * ipr_abort_timeout - An abort task has timed out
4643 * @ipr_cmd: ipr command struct
4644 *
4645 * This function handles when an abort task times out. If this
4646 * happens we issue a bus reset since we have resources tied
4647 * up that must be freed before returning to the midlayer.
4648 *
4649 * Return value:
4650 * none
4651 **/
4652static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
4653{
4654 struct ipr_cmnd *reset_cmd;
4655 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4656 struct ipr_cmd_pkt *cmd_pkt;
4657 unsigned long lock_flags = 0;
4658
4659 ENTER;
4660 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4661 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
4662 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4663 return;
4664 }
4665
fb3ed3cb 4666 sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
1da177e4
LT
4667 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4668 ipr_cmd->sibling = reset_cmd;
4669 reset_cmd->sibling = ipr_cmd;
4670 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
4671 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
4672 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4673 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
4674 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
4675
4676 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4677 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4678 LEAVE;
4679}
4680
4681/**
4682 * ipr_cancel_op - Cancel specified op
4683 * @scsi_cmd: scsi command struct
4684 *
4685 * This function cancels specified op.
4686 *
4687 * Return value:
4688 * SUCCESS / FAILED
4689 **/
4690static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
4691{
4692 struct ipr_cmnd *ipr_cmd;
4693 struct ipr_ioa_cfg *ioa_cfg;
4694 struct ipr_resource_entry *res;
4695 struct ipr_cmd_pkt *cmd_pkt;
4696 u32 ioasc;
4697 int op_found = 0;
4698
4699 ENTER;
4700 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
4701 res = scsi_cmd->device->hostdata;
4702
8fa728a2
JG
4703 /* If we are currently going through reset/reload, return failed.
4704 * This will force the mid-layer to call ipr_eh_host_reset,
4705 * which will then go to sleep and wait for the reset to complete
4706 */
4707 if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
4708 return FAILED;
04d9768f 4709 if (!res || !ipr_is_gscsi(res))
1da177e4
LT
4710 return FAILED;
4711
4712 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4713 if (ipr_cmd->scsi_cmd == scsi_cmd) {
4714 ipr_cmd->done = ipr_scsi_eh_done;
4715 op_found = 1;
4716 break;
4717 }
4718 }
4719
4720 if (!op_found)
4721 return SUCCESS;
4722
4723 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3e7ebdfa 4724 ipr_cmd->ioarcb.res_handle = res->res_handle;
1da177e4
LT
4725 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4726 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4727 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
4728 ipr_cmd->u.sdev = scsi_cmd->device;
4729
fb3ed3cb
BK
4730 scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
4731 scsi_cmd->cmnd[0]);
1da177e4
LT
4732 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
4733 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4734
4735 /*
4736 * If the abort task timed out and we sent a bus reset, we will get
4737 * one the following responses to the abort
4738 */
4739 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
4740 ioasc = 0;
4741 ipr_trace;
4742 }
4743
4744 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
ee0a90fa 4745 if (!ipr_is_naca_model(res))
4746 res->needs_sync_complete = 1;
1da177e4
LT
4747
4748 LEAVE;
4749 return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
4750}
4751
4752/**
4753 * ipr_eh_abort - Abort a single op
4754 * @scsi_cmd: scsi command struct
4755 *
4756 * Return value:
4757 * SUCCESS / FAILED
4758 **/
4759static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
4760{
8fa728a2
JG
4761 unsigned long flags;
4762 int rc;
1da177e4
LT
4763
4764 ENTER;
1da177e4 4765
8fa728a2
JG
4766 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
4767 rc = ipr_cancel_op(scsi_cmd);
4768 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
1da177e4
LT
4769
4770 LEAVE;
8fa728a2 4771 return rc;
1da177e4
LT
4772}
4773
4774/**
4775 * ipr_handle_other_interrupt - Handle "other" interrupts
4776 * @ioa_cfg: ioa config struct
4777 * @int_reg: interrupt register
4778 *
4779 * Return value:
4780 * IRQ_NONE / IRQ_HANDLED
4781 **/
4782static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
4783 volatile u32 int_reg)
4784{
4785 irqreturn_t rc = IRQ_HANDLED;
4786
4787 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
4788 /* Mask the interrupt */
4789 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
4790
4791 /* Clear the interrupt */
4792 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
4793 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
4794
4795 list_del(&ioa_cfg->reset_cmd->queue);
4796 del_timer(&ioa_cfg->reset_cmd->timer);
4797 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
4798 } else {
4799 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
4800 ioa_cfg->ioa_unit_checked = 1;
4801 else
4802 dev_err(&ioa_cfg->pdev->dev,
4803 "Permanent IOA failure. 0x%08X\n", int_reg);
4804
4805 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4806 ioa_cfg->sdt_state = GET_DUMP;
4807
4808 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
4809 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4810 }
4811
4812 return rc;
4813}
4814
3feeb89d
WB
4815/**
4816 * ipr_isr_eh - Interrupt service routine error handler
4817 * @ioa_cfg: ioa config struct
4818 * @msg: message to log
4819 *
4820 * Return value:
4821 * none
4822 **/
4823static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg)
4824{
4825 ioa_cfg->errors_logged++;
4826 dev_err(&ioa_cfg->pdev->dev, "%s\n", msg);
4827
4828 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4829 ioa_cfg->sdt_state = GET_DUMP;
4830
4831 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4832}
4833
1da177e4
LT
4834/**
4835 * ipr_isr - Interrupt service routine
4836 * @irq: irq number
4837 * @devp: pointer to ioa config struct
1da177e4
LT
4838 *
4839 * Return value:
4840 * IRQ_NONE / IRQ_HANDLED
4841 **/
7d12e780 4842static irqreturn_t ipr_isr(int irq, void *devp)
1da177e4
LT
4843{
4844 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
4845 unsigned long lock_flags = 0;
4846 volatile u32 int_reg, int_mask_reg;
4847 u32 ioasc;
4848 u16 cmd_index;
3feeb89d 4849 int num_hrrq = 0;
1da177e4
LT
4850 struct ipr_cmnd *ipr_cmd;
4851 irqreturn_t rc = IRQ_NONE;
4852
4853 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4854
4855 /* If interrupts are disabled, ignore the interrupt */
4856 if (!ioa_cfg->allow_interrupts) {
4857 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4858 return IRQ_NONE;
4859 }
4860
214777ba
WB
4861 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
4862 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg;
1da177e4 4863
214777ba
WB
4864 /* If an interrupt on the adapter did not occur, ignore it.
4865 * Or in the case of SIS 64, check for a stage change interrupt.
4866 */
1da177e4 4867 if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) {
214777ba
WB
4868 if (ioa_cfg->sis64) {
4869 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
4870 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4871 if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
4872
4873 /* clear stage change */
4874 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
4875 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4876 list_del(&ioa_cfg->reset_cmd->queue);
4877 del_timer(&ioa_cfg->reset_cmd->timer);
4878 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
4879 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4880 return IRQ_HANDLED;
4881 }
4882 }
4883
1da177e4
LT
4884 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4885 return IRQ_NONE;
4886 }
4887
4888 while (1) {
4889 ipr_cmd = NULL;
4890
4891 while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
4892 ioa_cfg->toggle_bit) {
4893
4894 cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
4895 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
4896
4897 if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
3feeb89d 4898 ipr_isr_eh(ioa_cfg, "Invalid response handle from IOA");
1da177e4
LT
4899 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4900 return IRQ_HANDLED;
4901 }
4902
4903 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
4904
4905 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4906
4907 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
4908
4909 list_del(&ipr_cmd->queue);
4910 del_timer(&ipr_cmd->timer);
4911 ipr_cmd->done(ipr_cmd);
4912
4913 rc = IRQ_HANDLED;
4914
4915 if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
4916 ioa_cfg->hrrq_curr++;
4917 } else {
4918 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
4919 ioa_cfg->toggle_bit ^= 1u;
4920 }
4921 }
4922
4923 if (ipr_cmd != NULL) {
4924 /* Clear the PCI interrupt */
3feeb89d 4925 do {
214777ba
WB
4926 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
4927 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg;
3feeb89d
WB
4928 } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
4929 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
4930
4931 if (int_reg & IPR_PCII_HRRQ_UPDATED) {
4932 ipr_isr_eh(ioa_cfg, "Error clearing HRRQ");
4933 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4934 return IRQ_HANDLED;
4935 }
4936
1da177e4
LT
4937 } else
4938 break;
4939 }
4940
4941 if (unlikely(rc == IRQ_NONE))
4942 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
4943
4944 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4945 return rc;
4946}
4947
a32c055f
WB
4948/**
4949 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
4950 * @ioa_cfg: ioa config struct
4951 * @ipr_cmd: ipr command struct
4952 *
4953 * Return value:
4954 * 0 on success / -1 on failure
4955 **/
4956static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
4957 struct ipr_cmnd *ipr_cmd)
4958{
4959 int i, nseg;
4960 struct scatterlist *sg;
4961 u32 length;
4962 u32 ioadl_flags = 0;
4963 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4964 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4965 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
4966
4967 length = scsi_bufflen(scsi_cmd);
4968 if (!length)
4969 return 0;
4970
4971 nseg = scsi_dma_map(scsi_cmd);
4972 if (nseg < 0) {
4973 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
4974 return -1;
4975 }
4976
4977 ipr_cmd->dma_use_sg = nseg;
4978
4979 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
4980 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
4981 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4982 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
4983 ioadl_flags = IPR_IOADL_FLAGS_READ;
4984
4985 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
4986 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
4987 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
4988 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
4989 }
4990
4991 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
4992 return 0;
4993}
4994
1da177e4
LT
4995/**
4996 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
4997 * @ioa_cfg: ioa config struct
4998 * @ipr_cmd: ipr command struct
4999 *
5000 * Return value:
5001 * 0 on success / -1 on failure
5002 **/
5003static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5004 struct ipr_cmnd *ipr_cmd)
5005{
63015bc9
FT
5006 int i, nseg;
5007 struct scatterlist *sg;
1da177e4
LT
5008 u32 length;
5009 u32 ioadl_flags = 0;
5010 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5011 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
a32c055f 5012 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1da177e4 5013
63015bc9
FT
5014 length = scsi_bufflen(scsi_cmd);
5015 if (!length)
1da177e4
LT
5016 return 0;
5017
63015bc9
FT
5018 nseg = scsi_dma_map(scsi_cmd);
5019 if (nseg < 0) {
5020 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5021 return -1;
5022 }
51b1c7e1 5023
63015bc9
FT
5024 ipr_cmd->dma_use_sg = nseg;
5025
5026 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5027 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5028 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
a32c055f
WB
5029 ioarcb->data_transfer_length = cpu_to_be32(length);
5030 ioarcb->ioadl_len =
63015bc9
FT
5031 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5032 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5033 ioadl_flags = IPR_IOADL_FLAGS_READ;
5034 ioarcb->read_data_transfer_length = cpu_to_be32(length);
5035 ioarcb->read_ioadl_len =
5036 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5037 }
1da177e4 5038
a32c055f
WB
5039 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5040 ioadl = ioarcb->u.add_data.u.ioadl;
5041 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5042 offsetof(struct ipr_ioarcb, u.add_data));
63015bc9
FT
5043 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5044 }
1da177e4 5045
63015bc9
FT
5046 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5047 ioadl[i].flags_and_data_len =
5048 cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5049 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
1da177e4
LT
5050 }
5051
63015bc9
FT
5052 ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5053 return 0;
1da177e4
LT
5054}
5055
5056/**
5057 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
5058 * @scsi_cmd: scsi command struct
5059 *
5060 * Return value:
5061 * task attributes
5062 **/
5063static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
5064{
5065 u8 tag[2];
5066 u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
5067
5068 if (scsi_populate_tag_msg(scsi_cmd, tag)) {
5069 switch (tag[0]) {
5070 case MSG_SIMPLE_TAG:
5071 rc = IPR_FLAGS_LO_SIMPLE_TASK;
5072 break;
5073 case MSG_HEAD_TAG:
5074 rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
5075 break;
5076 case MSG_ORDERED_TAG:
5077 rc = IPR_FLAGS_LO_ORDERED_TASK;
5078 break;
5079 };
5080 }
5081
5082 return rc;
5083}
5084
5085/**
5086 * ipr_erp_done - Process completion of ERP for a device
5087 * @ipr_cmd: ipr command struct
5088 *
5089 * This function copies the sense buffer into the scsi_cmd
5090 * struct and pushes the scsi_done function.
5091 *
5092 * Return value:
5093 * nothing
5094 **/
5095static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5096{
5097 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5098 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5099 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5100 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5101
5102 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5103 scsi_cmd->result |= (DID_ERROR << 16);
fb3ed3cb
BK
5104 scmd_printk(KERN_ERR, scsi_cmd,
5105 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
1da177e4
LT
5106 } else {
5107 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
5108 SCSI_SENSE_BUFFERSIZE);
5109 }
5110
5111 if (res) {
ee0a90fa 5112 if (!ipr_is_naca_model(res))
5113 res->needs_sync_complete = 1;
1da177e4
LT
5114 res->in_erp = 0;
5115 }
63015bc9 5116 scsi_dma_unmap(ipr_cmd->scsi_cmd);
1da177e4
LT
5117 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5118 scsi_cmd->scsi_done(scsi_cmd);
5119}
5120
5121/**
5122 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
5123 * @ipr_cmd: ipr command struct
5124 *
5125 * Return value:
5126 * none
5127 **/
5128static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
5129{
51b1c7e1
BK
5130 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5131 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
a32c055f 5132 dma_addr_t dma_addr = ipr_cmd->dma_addr;
1da177e4
LT
5133
5134 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
a32c055f 5135 ioarcb->data_transfer_length = 0;
1da177e4 5136 ioarcb->read_data_transfer_length = 0;
a32c055f 5137 ioarcb->ioadl_len = 0;
1da177e4
LT
5138 ioarcb->read_ioadl_len = 0;
5139 ioasa->ioasc = 0;
5140 ioasa->residual_data_len = 0;
a32c055f
WB
5141
5142 if (ipr_cmd->ioa_cfg->sis64)
5143 ioarcb->u.sis64_addr_data.data_ioadl_addr =
5144 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
5145 else {
5146 ioarcb->write_ioadl_addr =
5147 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
5148 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5149 }
1da177e4
LT
5150}
5151
5152/**
5153 * ipr_erp_request_sense - Send request sense to a device
5154 * @ipr_cmd: ipr command struct
5155 *
5156 * This function sends a request sense to a device as a result
5157 * of a check condition.
5158 *
5159 * Return value:
5160 * nothing
5161 **/
5162static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
5163{
5164 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5165 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5166
5167 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5168 ipr_erp_done(ipr_cmd);
5169 return;
5170 }
5171
5172 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5173
5174 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
5175 cmd_pkt->cdb[0] = REQUEST_SENSE;
5176 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
5177 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
5178 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5179 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
5180
a32c055f
WB
5181 ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
5182 SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
5183
5184 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
5185 IPR_REQUEST_SENSE_TIMEOUT * 2);
5186}
5187
5188/**
5189 * ipr_erp_cancel_all - Send cancel all to a device
5190 * @ipr_cmd: ipr command struct
5191 *
5192 * This function sends a cancel all to a device to clear the
5193 * queue. If we are running TCQ on the device, QERR is set to 1,
5194 * which means all outstanding ops have been dropped on the floor.
5195 * Cancel all will return them to us.
5196 *
5197 * Return value:
5198 * nothing
5199 **/
5200static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
5201{
5202 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5203 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5204 struct ipr_cmd_pkt *cmd_pkt;
5205
5206 res->in_erp = 1;
5207
5208 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5209
5210 if (!scsi_get_tag_type(scsi_cmd->device)) {
5211 ipr_erp_request_sense(ipr_cmd);
5212 return;
5213 }
5214
5215 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5216 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5217 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5218
5219 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
5220 IPR_CANCEL_ALL_TIMEOUT);
5221}
5222
5223/**
5224 * ipr_dump_ioasa - Dump contents of IOASA
5225 * @ioa_cfg: ioa config struct
5226 * @ipr_cmd: ipr command struct
fe964d0a 5227 * @res: resource entry struct
1da177e4
LT
5228 *
5229 * This function is invoked by the interrupt handler when ops
5230 * fail. It will log the IOASA if appropriate. Only called
5231 * for GPDD ops.
5232 *
5233 * Return value:
5234 * none
5235 **/
5236static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
fe964d0a 5237 struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
1da177e4
LT
5238{
5239 int i;
5240 u16 data_len;
b0692dd4 5241 u32 ioasc, fd_ioasc;
1da177e4
LT
5242 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
5243 __be32 *ioasa_data = (__be32 *)ioasa;
5244 int error_index;
5245
5246 ioasc = be32_to_cpu(ioasa->ioasc) & IPR_IOASC_IOASC_MASK;
b0692dd4 5247 fd_ioasc = be32_to_cpu(ioasa->fd_ioasc) & IPR_IOASC_IOASC_MASK;
1da177e4
LT
5248
5249 if (0 == ioasc)
5250 return;
5251
5252 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
5253 return;
5254
b0692dd4
BK
5255 if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
5256 error_index = ipr_get_error(fd_ioasc);
5257 else
5258 error_index = ipr_get_error(ioasc);
1da177e4
LT
5259
5260 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
5261 /* Don't log an error if the IOA already logged one */
5262 if (ioasa->ilid != 0)
5263 return;
5264
cc9bd5d4
BK
5265 if (!ipr_is_gscsi(res))
5266 return;
5267
1da177e4
LT
5268 if (ipr_error_table[error_index].log_ioasa == 0)
5269 return;
5270 }
5271
fe964d0a 5272 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
1da177e4
LT
5273
5274 if (sizeof(struct ipr_ioasa) < be16_to_cpu(ioasa->ret_stat_len))
5275 data_len = sizeof(struct ipr_ioasa);
5276 else
5277 data_len = be16_to_cpu(ioasa->ret_stat_len);
5278
5279 ipr_err("IOASA Dump:\n");
5280
5281 for (i = 0; i < data_len / 4; i += 4) {
5282 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
5283 be32_to_cpu(ioasa_data[i]),
5284 be32_to_cpu(ioasa_data[i+1]),
5285 be32_to_cpu(ioasa_data[i+2]),
5286 be32_to_cpu(ioasa_data[i+3]));
5287 }
5288}
5289
5290/**
5291 * ipr_gen_sense - Generate SCSI sense data from an IOASA
5292 * @ioasa: IOASA
5293 * @sense_buf: sense data buffer
5294 *
5295 * Return value:
5296 * none
5297 **/
5298static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
5299{
5300 u32 failing_lba;
5301 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
5302 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
5303 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
5304 u32 ioasc = be32_to_cpu(ioasa->ioasc);
5305
5306 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
5307
5308 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
5309 return;
5310
5311 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
5312
5313 if (ipr_is_vset_device(res) &&
5314 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
5315 ioasa->u.vset.failing_lba_hi != 0) {
5316 sense_buf[0] = 0x72;
5317 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
5318 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
5319 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
5320
5321 sense_buf[7] = 12;
5322 sense_buf[8] = 0;
5323 sense_buf[9] = 0x0A;
5324 sense_buf[10] = 0x80;
5325
5326 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
5327
5328 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
5329 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
5330 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
5331 sense_buf[15] = failing_lba & 0x000000ff;
5332
5333 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5334
5335 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
5336 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
5337 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
5338 sense_buf[19] = failing_lba & 0x000000ff;
5339 } else {
5340 sense_buf[0] = 0x70;
5341 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
5342 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
5343 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
5344
5345 /* Illegal request */
5346 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
5347 (be32_to_cpu(ioasa->ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
5348 sense_buf[7] = 10; /* additional length */
5349
5350 /* IOARCB was in error */
5351 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
5352 sense_buf[15] = 0xC0;
5353 else /* Parameter data was invalid */
5354 sense_buf[15] = 0x80;
5355
5356 sense_buf[16] =
5357 ((IPR_FIELD_POINTER_MASK &
5358 be32_to_cpu(ioasa->ioasc_specific)) >> 8) & 0xff;
5359 sense_buf[17] =
5360 (IPR_FIELD_POINTER_MASK &
5361 be32_to_cpu(ioasa->ioasc_specific)) & 0xff;
5362 } else {
5363 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
5364 if (ipr_is_vset_device(res))
5365 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5366 else
5367 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
5368
5369 sense_buf[0] |= 0x80; /* Or in the Valid bit */
5370 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
5371 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
5372 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
5373 sense_buf[6] = failing_lba & 0x000000ff;
5374 }
5375
5376 sense_buf[7] = 6; /* additional length */
5377 }
5378 }
5379}
5380
ee0a90fa 5381/**
5382 * ipr_get_autosense - Copy autosense data to sense buffer
5383 * @ipr_cmd: ipr command struct
5384 *
5385 * This function copies the autosense buffer to the buffer
5386 * in the scsi_cmd, if there is autosense available.
5387 *
5388 * Return value:
5389 * 1 if autosense was available / 0 if not
5390 **/
5391static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
5392{
5393 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
5394
117d2ce1 5395 if ((be32_to_cpu(ioasa->ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
ee0a90fa 5396 return 0;
5397
5398 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
5399 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
5400 SCSI_SENSE_BUFFERSIZE));
5401 return 1;
5402}
5403
1da177e4
LT
5404/**
5405 * ipr_erp_start - Process an error response for a SCSI op
5406 * @ioa_cfg: ioa config struct
5407 * @ipr_cmd: ipr command struct
5408 *
5409 * This function determines whether or not to initiate ERP
5410 * on the affected device.
5411 *
5412 * Return value:
5413 * nothing
5414 **/
5415static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
5416 struct ipr_cmnd *ipr_cmd)
5417{
5418 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5419 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5420 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
8a048994 5421 u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
1da177e4
LT
5422
5423 if (!res) {
5424 ipr_scsi_eh_done(ipr_cmd);
5425 return;
5426 }
5427
8a048994 5428 if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
1da177e4
LT
5429 ipr_gen_sense(ipr_cmd);
5430
cc9bd5d4
BK
5431 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
5432
8a048994 5433 switch (masked_ioasc) {
1da177e4 5434 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
ee0a90fa 5435 if (ipr_is_naca_model(res))
5436 scsi_cmd->result |= (DID_ABORT << 16);
5437 else
5438 scsi_cmd->result |= (DID_IMM_RETRY << 16);
1da177e4
LT
5439 break;
5440 case IPR_IOASC_IR_RESOURCE_HANDLE:
b0df54bb 5441 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
1da177e4
LT
5442 scsi_cmd->result |= (DID_NO_CONNECT << 16);
5443 break;
5444 case IPR_IOASC_HW_SEL_TIMEOUT:
5445 scsi_cmd->result |= (DID_NO_CONNECT << 16);
ee0a90fa 5446 if (!ipr_is_naca_model(res))
5447 res->needs_sync_complete = 1;
1da177e4
LT
5448 break;
5449 case IPR_IOASC_SYNC_REQUIRED:
5450 if (!res->in_erp)
5451 res->needs_sync_complete = 1;
5452 scsi_cmd->result |= (DID_IMM_RETRY << 16);
5453 break;
5454 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
b0df54bb 5455 case IPR_IOASA_IR_DUAL_IOA_DISABLED:
1da177e4
LT
5456 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
5457 break;
5458 case IPR_IOASC_BUS_WAS_RESET:
5459 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
5460 /*
5461 * Report the bus reset and ask for a retry. The device
5462 * will give CC/UA the next command.
5463 */
5464 if (!res->resetting_device)
5465 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
5466 scsi_cmd->result |= (DID_ERROR << 16);
ee0a90fa 5467 if (!ipr_is_naca_model(res))
5468 res->needs_sync_complete = 1;
1da177e4
LT
5469 break;
5470 case IPR_IOASC_HW_DEV_BUS_STATUS:
5471 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
5472 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
ee0a90fa 5473 if (!ipr_get_autosense(ipr_cmd)) {
5474 if (!ipr_is_naca_model(res)) {
5475 ipr_erp_cancel_all(ipr_cmd);
5476 return;
5477 }
5478 }
1da177e4 5479 }
ee0a90fa 5480 if (!ipr_is_naca_model(res))
5481 res->needs_sync_complete = 1;
1da177e4
LT
5482 break;
5483 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
5484 break;
5485 default:
5b7304fb
BK
5486 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
5487 scsi_cmd->result |= (DID_ERROR << 16);
ee0a90fa 5488 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
1da177e4
LT
5489 res->needs_sync_complete = 1;
5490 break;
5491 }
5492
63015bc9 5493 scsi_dma_unmap(ipr_cmd->scsi_cmd);
1da177e4
LT
5494 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5495 scsi_cmd->scsi_done(scsi_cmd);
5496}
5497
5498/**
5499 * ipr_scsi_done - mid-layer done function
5500 * @ipr_cmd: ipr command struct
5501 *
5502 * This function is invoked by the interrupt handler for
5503 * ops generated by the SCSI mid-layer
5504 *
5505 * Return value:
5506 * none
5507 **/
5508static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
5509{
5510 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5511 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5512 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5513
63015bc9 5514 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->ioasa.residual_data_len));
1da177e4
LT
5515
5516 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
63015bc9 5517 scsi_dma_unmap(ipr_cmd->scsi_cmd);
1da177e4
LT
5518 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5519 scsi_cmd->scsi_done(scsi_cmd);
5520 } else
5521 ipr_erp_start(ioa_cfg, ipr_cmd);
5522}
5523
1da177e4
LT
5524/**
5525 * ipr_queuecommand - Queue a mid-layer request
5526 * @scsi_cmd: scsi command struct
5527 * @done: done function
5528 *
5529 * This function queues a request generated by the mid-layer.
5530 *
5531 * Return value:
5532 * 0 on success
5533 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
5534 * SCSI_MLQUEUE_HOST_BUSY if host is busy
5535 **/
5536static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
5537 void (*done) (struct scsi_cmnd *))
5538{
5539 struct ipr_ioa_cfg *ioa_cfg;
5540 struct ipr_resource_entry *res;
5541 struct ipr_ioarcb *ioarcb;
5542 struct ipr_cmnd *ipr_cmd;
5543 int rc = 0;
5544
5545 scsi_cmd->scsi_done = done;
5546 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5547 res = scsi_cmd->device->hostdata;
5548 scsi_cmd->result = (DID_OK << 16);
5549
5550 /*
5551 * We are currently blocking all devices due to a host reset
5552 * We have told the host to stop giving us new requests, but
5553 * ERP ops don't count. FIXME
5554 */
5555 if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))
5556 return SCSI_MLQUEUE_HOST_BUSY;
5557
5558 /*
5559 * FIXME - Create scsi_set_host_offline interface
5560 * and the ioa_is_dead check can be removed
5561 */
5562 if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
5563 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
5564 scsi_cmd->result = (DID_NO_CONNECT << 16);
5565 scsi_cmd->scsi_done(scsi_cmd);
5566 return 0;
5567 }
5568
35a39691
BK
5569 if (ipr_is_gata(res) && res->sata_port)
5570 return ata_sas_queuecmd(scsi_cmd, done, res->sata_port->ap);
5571
1da177e4
LT
5572 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5573 ioarcb = &ipr_cmd->ioarcb;
5574 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5575
5576 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
5577 ipr_cmd->scsi_cmd = scsi_cmd;
3e7ebdfa 5578 ioarcb->res_handle = res->res_handle;
1da177e4 5579 ipr_cmd->done = ipr_scsi_done;
3e7ebdfa 5580 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
1da177e4
LT
5581
5582 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
5583 if (scsi_cmd->underflow == 0)
5584 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5585
5586 if (res->needs_sync_complete) {
5587 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
5588 res->needs_sync_complete = 0;
5589 }
5590
5591 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
5592 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
5593 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
5594 ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
5595 }
5596
5597 if (scsi_cmd->cmnd[0] >= 0xC0 &&
5598 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
5599 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5600
a32c055f
WB
5601 if (likely(rc == 0)) {
5602 if (ioa_cfg->sis64)
5603 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
5604 else
5605 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
5606 }
1da177e4
LT
5607
5608 if (likely(rc == 0)) {
5609 mb();
a32c055f 5610 ipr_send_command(ipr_cmd);
1da177e4
LT
5611 } else {
5612 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5613 return SCSI_MLQUEUE_HOST_BUSY;
5614 }
5615
5616 return 0;
5617}
5618
35a39691
BK
5619/**
5620 * ipr_ioctl - IOCTL handler
5621 * @sdev: scsi device struct
5622 * @cmd: IOCTL cmd
5623 * @arg: IOCTL arg
5624 *
5625 * Return value:
5626 * 0 on success / other on failure
5627 **/
bd705f2d 5628static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
35a39691
BK
5629{
5630 struct ipr_resource_entry *res;
5631
5632 res = (struct ipr_resource_entry *)sdev->hostdata;
0ce3a7e5
BK
5633 if (res && ipr_is_gata(res)) {
5634 if (cmd == HDIO_GET_IDENTITY)
5635 return -ENOTTY;
94be9a58 5636 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
0ce3a7e5 5637 }
35a39691
BK
5638
5639 return -EINVAL;
5640}
5641
1da177e4
LT
5642/**
5643 * ipr_info - Get information about the card/driver
5644 * @scsi_host: scsi host struct
5645 *
5646 * Return value:
5647 * pointer to buffer with description string
5648 **/
5649static const char * ipr_ioa_info(struct Scsi_Host *host)
5650{
5651 static char buffer[512];
5652 struct ipr_ioa_cfg *ioa_cfg;
5653 unsigned long lock_flags = 0;
5654
5655 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
5656
5657 spin_lock_irqsave(host->host_lock, lock_flags);
5658 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
5659 spin_unlock_irqrestore(host->host_lock, lock_flags);
5660
5661 return buffer;
5662}
5663
5664static struct scsi_host_template driver_template = {
5665 .module = THIS_MODULE,
5666 .name = "IPR",
5667 .info = ipr_ioa_info,
35a39691 5668 .ioctl = ipr_ioctl,
1da177e4
LT
5669 .queuecommand = ipr_queuecommand,
5670 .eh_abort_handler = ipr_eh_abort,
5671 .eh_device_reset_handler = ipr_eh_dev_reset,
5672 .eh_host_reset_handler = ipr_eh_host_reset,
5673 .slave_alloc = ipr_slave_alloc,
5674 .slave_configure = ipr_slave_configure,
5675 .slave_destroy = ipr_slave_destroy,
35a39691
BK
5676 .target_alloc = ipr_target_alloc,
5677 .target_destroy = ipr_target_destroy,
1da177e4
LT
5678 .change_queue_depth = ipr_change_queue_depth,
5679 .change_queue_type = ipr_change_queue_type,
5680 .bios_param = ipr_biosparam,
5681 .can_queue = IPR_MAX_COMMANDS,
5682 .this_id = -1,
5683 .sg_tablesize = IPR_MAX_SGLIST,
5684 .max_sectors = IPR_IOA_MAX_SECTORS,
5685 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
5686 .use_clustering = ENABLE_CLUSTERING,
5687 .shost_attrs = ipr_ioa_attrs,
5688 .sdev_attrs = ipr_dev_attrs,
5689 .proc_name = IPR_NAME
5690};
5691
35a39691
BK
5692/**
5693 * ipr_ata_phy_reset - libata phy_reset handler
5694 * @ap: ata port to reset
5695 *
5696 **/
5697static void ipr_ata_phy_reset(struct ata_port *ap)
5698{
5699 unsigned long flags;
5700 struct ipr_sata_port *sata_port = ap->private_data;
5701 struct ipr_resource_entry *res = sata_port->res;
5702 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5703 int rc;
5704
5705 ENTER;
5706 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5707 while(ioa_cfg->in_reset_reload) {
5708 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5709 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5710 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5711 }
5712
5713 if (!ioa_cfg->allow_cmds)
5714 goto out_unlock;
5715
5716 rc = ipr_device_reset(ioa_cfg, res);
5717
5718 if (rc) {
ac8869d5 5719 ata_port_disable(ap);
35a39691
BK
5720 goto out_unlock;
5721 }
5722
3e7ebdfa
WB
5723 ap->link.device[0].class = res->ata_class;
5724 if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
ac8869d5 5725 ata_port_disable(ap);
35a39691
BK
5726
5727out_unlock:
5728 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5729 LEAVE;
5730}
5731
5732/**
5733 * ipr_ata_post_internal - Cleanup after an internal command
5734 * @qc: ATA queued command
5735 *
5736 * Return value:
5737 * none
5738 **/
5739static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
5740{
5741 struct ipr_sata_port *sata_port = qc->ap->private_data;
5742 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5743 struct ipr_cmnd *ipr_cmd;
5744 unsigned long flags;
5745
5746 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
73d98ff0
BK
5747 while(ioa_cfg->in_reset_reload) {
5748 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5749 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5750 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5751 }
5752
35a39691
BK
5753 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
5754 if (ipr_cmd->qc == qc) {
5755 ipr_device_reset(ioa_cfg, sata_port->res);
5756 break;
5757 }
5758 }
5759 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5760}
5761
35a39691
BK
5762/**
5763 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
5764 * @regs: destination
5765 * @tf: source ATA taskfile
5766 *
5767 * Return value:
5768 * none
5769 **/
5770static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
5771 struct ata_taskfile *tf)
5772{
5773 regs->feature = tf->feature;
5774 regs->nsect = tf->nsect;
5775 regs->lbal = tf->lbal;
5776 regs->lbam = tf->lbam;
5777 regs->lbah = tf->lbah;
5778 regs->device = tf->device;
5779 regs->command = tf->command;
5780 regs->hob_feature = tf->hob_feature;
5781 regs->hob_nsect = tf->hob_nsect;
5782 regs->hob_lbal = tf->hob_lbal;
5783 regs->hob_lbam = tf->hob_lbam;
5784 regs->hob_lbah = tf->hob_lbah;
5785 regs->ctl = tf->ctl;
5786}
5787
5788/**
5789 * ipr_sata_done - done function for SATA commands
5790 * @ipr_cmd: ipr command struct
5791 *
5792 * This function is invoked by the interrupt handler for
5793 * ops generated by the SCSI mid-layer to SATA devices
5794 *
5795 * Return value:
5796 * none
5797 **/
5798static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
5799{
5800 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5801 struct ata_queued_cmd *qc = ipr_cmd->qc;
5802 struct ipr_sata_port *sata_port = qc->ap->private_data;
5803 struct ipr_resource_entry *res = sata_port->res;
5804 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5805
5806 memcpy(&sata_port->ioasa, &ipr_cmd->ioasa.u.gata,
5807 sizeof(struct ipr_ioasa_gata));
5808 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
5809
5810 if (be32_to_cpu(ipr_cmd->ioasa.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
3e7ebdfa 5811 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
35a39691
BK
5812
5813 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
5814 qc->err_mask |= __ac_err_mask(ipr_cmd->ioasa.u.gata.status);
5815 else
5816 qc->err_mask |= ac_err_mask(ipr_cmd->ioasa.u.gata.status);
5817 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5818 ata_qc_complete(qc);
5819}
5820
a32c055f
WB
5821/**
5822 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
5823 * @ipr_cmd: ipr command struct
5824 * @qc: ATA queued command
5825 *
5826 **/
5827static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
5828 struct ata_queued_cmd *qc)
5829{
5830 u32 ioadl_flags = 0;
5831 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5832 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5833 struct ipr_ioadl64_desc *last_ioadl64 = NULL;
5834 int len = qc->nbytes;
5835 struct scatterlist *sg;
5836 unsigned int si;
5837 dma_addr_t dma_addr = ipr_cmd->dma_addr;
5838
5839 if (len == 0)
5840 return;
5841
5842 if (qc->dma_dir == DMA_TO_DEVICE) {
5843 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5844 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5845 } else if (qc->dma_dir == DMA_FROM_DEVICE)
5846 ioadl_flags = IPR_IOADL_FLAGS_READ;
5847
5848 ioarcb->data_transfer_length = cpu_to_be32(len);
5849 ioarcb->ioadl_len =
5850 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5851 ioarcb->u.sis64_addr_data.data_ioadl_addr =
5852 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl));
5853
5854 for_each_sg(qc->sg, sg, qc->n_elem, si) {
5855 ioadl64->flags = cpu_to_be32(ioadl_flags);
5856 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
5857 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
5858
5859 last_ioadl64 = ioadl64;
5860 ioadl64++;
5861 }
5862
5863 if (likely(last_ioadl64))
5864 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5865}
5866
35a39691
BK
5867/**
5868 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
5869 * @ipr_cmd: ipr command struct
5870 * @qc: ATA queued command
5871 *
5872 **/
5873static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
5874 struct ata_queued_cmd *qc)
5875{
5876 u32 ioadl_flags = 0;
5877 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
a32c055f 5878 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3be6cbd7 5879 struct ipr_ioadl_desc *last_ioadl = NULL;
dde20207 5880 int len = qc->nbytes;
35a39691 5881 struct scatterlist *sg;
ff2aeb1e 5882 unsigned int si;
35a39691
BK
5883
5884 if (len == 0)
5885 return;
5886
5887 if (qc->dma_dir == DMA_TO_DEVICE) {
5888 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5889 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
a32c055f
WB
5890 ioarcb->data_transfer_length = cpu_to_be32(len);
5891 ioarcb->ioadl_len =
35a39691
BK
5892 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5893 } else if (qc->dma_dir == DMA_FROM_DEVICE) {
5894 ioadl_flags = IPR_IOADL_FLAGS_READ;
5895 ioarcb->read_data_transfer_length = cpu_to_be32(len);
5896 ioarcb->read_ioadl_len =
5897 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5898 }
5899
ff2aeb1e 5900 for_each_sg(qc->sg, sg, qc->n_elem, si) {
35a39691
BK
5901 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5902 ioadl->address = cpu_to_be32(sg_dma_address(sg));
3be6cbd7
JG
5903
5904 last_ioadl = ioadl;
5905 ioadl++;
35a39691 5906 }
3be6cbd7
JG
5907
5908 if (likely(last_ioadl))
5909 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
35a39691
BK
5910}
5911
5912/**
5913 * ipr_qc_issue - Issue a SATA qc to a device
5914 * @qc: queued command
5915 *
5916 * Return value:
5917 * 0 if success
5918 **/
5919static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
5920{
5921 struct ata_port *ap = qc->ap;
5922 struct ipr_sata_port *sata_port = ap->private_data;
5923 struct ipr_resource_entry *res = sata_port->res;
5924 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5925 struct ipr_cmnd *ipr_cmd;
5926 struct ipr_ioarcb *ioarcb;
5927 struct ipr_ioarcb_ata_regs *regs;
5928
5929 if (unlikely(!ioa_cfg->allow_cmds || ioa_cfg->ioa_is_dead))
0feeed82 5930 return AC_ERR_SYSTEM;
35a39691
BK
5931
5932 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5933 ioarcb = &ipr_cmd->ioarcb;
35a39691 5934
a32c055f
WB
5935 if (ioa_cfg->sis64) {
5936 regs = &ipr_cmd->i.ata_ioadl.regs;
5937 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
5938 } else
5939 regs = &ioarcb->u.add_data.u.regs;
5940
5941 memset(regs, 0, sizeof(*regs));
5942 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
35a39691
BK
5943
5944 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5945 ipr_cmd->qc = qc;
5946 ipr_cmd->done = ipr_sata_done;
3e7ebdfa 5947 ipr_cmd->ioarcb.res_handle = res->res_handle;
35a39691
BK
5948 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
5949 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
5950 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
dde20207 5951 ipr_cmd->dma_use_sg = qc->n_elem;
35a39691 5952
a32c055f
WB
5953 if (ioa_cfg->sis64)
5954 ipr_build_ata_ioadl64(ipr_cmd, qc);
5955 else
5956 ipr_build_ata_ioadl(ipr_cmd, qc);
5957
35a39691
BK
5958 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
5959 ipr_copy_sata_tf(regs, &qc->tf);
5960 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
3e7ebdfa 5961 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
35a39691
BK
5962
5963 switch (qc->tf.protocol) {
5964 case ATA_PROT_NODATA:
5965 case ATA_PROT_PIO:
5966 break;
5967
5968 case ATA_PROT_DMA:
5969 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
5970 break;
5971
0dc36888
TH
5972 case ATAPI_PROT_PIO:
5973 case ATAPI_PROT_NODATA:
35a39691
BK
5974 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
5975 break;
5976
0dc36888 5977 case ATAPI_PROT_DMA:
35a39691
BK
5978 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
5979 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
5980 break;
5981
5982 default:
5983 WARN_ON(1);
0feeed82 5984 return AC_ERR_INVALID;
35a39691
BK
5985 }
5986
5987 mb();
a32c055f
WB
5988
5989 ipr_send_command(ipr_cmd);
5990
35a39691
BK
5991 return 0;
5992}
5993
4c9bf4e7
TH
5994/**
5995 * ipr_qc_fill_rtf - Read result TF
5996 * @qc: ATA queued command
5997 *
5998 * Return value:
5999 * true
6000 **/
6001static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
6002{
6003 struct ipr_sata_port *sata_port = qc->ap->private_data;
6004 struct ipr_ioasa_gata *g = &sata_port->ioasa;
6005 struct ata_taskfile *tf = &qc->result_tf;
6006
6007 tf->feature = g->error;
6008 tf->nsect = g->nsect;
6009 tf->lbal = g->lbal;
6010 tf->lbam = g->lbam;
6011 tf->lbah = g->lbah;
6012 tf->device = g->device;
6013 tf->command = g->status;
6014 tf->hob_nsect = g->hob_nsect;
6015 tf->hob_lbal = g->hob_lbal;
6016 tf->hob_lbam = g->hob_lbam;
6017 tf->hob_lbah = g->hob_lbah;
6018 tf->ctl = g->alt_status;
6019
6020 return true;
6021}
6022
35a39691 6023static struct ata_port_operations ipr_sata_ops = {
35a39691 6024 .phy_reset = ipr_ata_phy_reset,
a1efdaba 6025 .hardreset = ipr_sata_reset,
35a39691 6026 .post_internal_cmd = ipr_ata_post_internal,
35a39691
BK
6027 .qc_prep = ata_noop_qc_prep,
6028 .qc_issue = ipr_qc_issue,
4c9bf4e7 6029 .qc_fill_rtf = ipr_qc_fill_rtf,
35a39691
BK
6030 .port_start = ata_sas_port_start,
6031 .port_stop = ata_sas_port_stop
6032};
6033
6034static struct ata_port_info sata_port_info = {
6035 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | ATA_FLAG_SATA_RESET |
6036 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
6037 .pio_mask = 0x10, /* pio4 */
6038 .mwdma_mask = 0x07,
6039 .udma_mask = 0x7f, /* udma0-6 */
6040 .port_ops = &ipr_sata_ops
6041};
6042
1da177e4
LT
6043#ifdef CONFIG_PPC_PSERIES
6044static const u16 ipr_blocked_processors[] = {
6045 PV_NORTHSTAR,
6046 PV_PULSAR,
6047 PV_POWER4,
6048 PV_ICESTAR,
6049 PV_SSTAR,
6050 PV_POWER4p,
6051 PV_630,
6052 PV_630p
6053};
6054
6055/**
6056 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
6057 * @ioa_cfg: ioa cfg struct
6058 *
6059 * Adapters that use Gemstone revision < 3.1 do not work reliably on
6060 * certain pSeries hardware. This function determines if the given
6061 * adapter is in one of these confgurations or not.
6062 *
6063 * Return value:
6064 * 1 if adapter is not supported / 0 if adapter is supported
6065 **/
6066static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
6067{
1da177e4
LT
6068 int i;
6069
44c10138
AK
6070 if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
6071 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
6072 if (__is_processor(ipr_blocked_processors[i]))
6073 return 1;
1da177e4
LT
6074 }
6075 }
6076 return 0;
6077}
6078#else
6079#define ipr_invalid_adapter(ioa_cfg) 0
6080#endif
6081
6082/**
6083 * ipr_ioa_bringdown_done - IOA bring down completion.
6084 * @ipr_cmd: ipr command struct
6085 *
6086 * This function processes the completion of an adapter bring down.
6087 * It wakes any reset sleepers.
6088 *
6089 * Return value:
6090 * IPR_RC_JOB_RETURN
6091 **/
6092static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
6093{
6094 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6095
6096 ENTER;
6097 ioa_cfg->in_reset_reload = 0;
6098 ioa_cfg->reset_retries = 0;
6099 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6100 wake_up_all(&ioa_cfg->reset_wait_q);
6101
6102 spin_unlock_irq(ioa_cfg->host->host_lock);
6103 scsi_unblock_requests(ioa_cfg->host);
6104 spin_lock_irq(ioa_cfg->host->host_lock);
6105 LEAVE;
6106
6107 return IPR_RC_JOB_RETURN;
6108}
6109
6110/**
6111 * ipr_ioa_reset_done - IOA reset completion.
6112 * @ipr_cmd: ipr command struct
6113 *
6114 * This function processes the completion of an adapter reset.
6115 * It schedules any necessary mid-layer add/removes and
6116 * wakes any reset sleepers.
6117 *
6118 * Return value:
6119 * IPR_RC_JOB_RETURN
6120 **/
6121static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
6122{
6123 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6124 struct ipr_resource_entry *res;
6125 struct ipr_hostrcb *hostrcb, *temp;
6126 int i = 0;
6127
6128 ENTER;
6129 ioa_cfg->in_reset_reload = 0;
6130 ioa_cfg->allow_cmds = 1;
6131 ioa_cfg->reset_cmd = NULL;
3d1d0da6 6132 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
1da177e4
LT
6133
6134 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
6135 if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
6136 ipr_trace;
6137 break;
6138 }
6139 }
6140 schedule_work(&ioa_cfg->work_q);
6141
6142 list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
6143 list_del(&hostrcb->queue);
6144 if (i++ < IPR_NUM_LOG_HCAMS)
6145 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
6146 else
6147 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
6148 }
6149
6bb04170 6150 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
1da177e4
LT
6151 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
6152
6153 ioa_cfg->reset_retries = 0;
6154 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6155 wake_up_all(&ioa_cfg->reset_wait_q);
6156
30237853 6157 spin_unlock(ioa_cfg->host->host_lock);
1da177e4 6158 scsi_unblock_requests(ioa_cfg->host);
30237853 6159 spin_lock(ioa_cfg->host->host_lock);
1da177e4
LT
6160
6161 if (!ioa_cfg->allow_cmds)
6162 scsi_block_requests(ioa_cfg->host);
6163
6164 LEAVE;
6165 return IPR_RC_JOB_RETURN;
6166}
6167
6168/**
6169 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
6170 * @supported_dev: supported device struct
6171 * @vpids: vendor product id struct
6172 *
6173 * Return value:
6174 * none
6175 **/
6176static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
6177 struct ipr_std_inq_vpids *vpids)
6178{
6179 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
6180 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
6181 supported_dev->num_records = 1;
6182 supported_dev->data_length =
6183 cpu_to_be16(sizeof(struct ipr_supported_device));
6184 supported_dev->reserved = 0;
6185}
6186
6187/**
6188 * ipr_set_supported_devs - Send Set Supported Devices for a device
6189 * @ipr_cmd: ipr command struct
6190 *
a32c055f 6191 * This function sends a Set Supported Devices to the adapter
1da177e4
LT
6192 *
6193 * Return value:
6194 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6195 **/
6196static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
6197{
6198 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6199 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
1da177e4
LT
6200 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6201 struct ipr_resource_entry *res = ipr_cmd->u.res;
6202
6203 ipr_cmd->job_step = ipr_ioa_reset_done;
6204
6205 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
e4fbf44e 6206 if (!ipr_is_scsi_disk(res))
1da177e4
LT
6207 continue;
6208
6209 ipr_cmd->u.res = res;
3e7ebdfa 6210 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
1da177e4
LT
6211
6212 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6213 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6214 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6215
6216 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
3e7ebdfa 6217 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
1da177e4
LT
6218 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
6219 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
6220
a32c055f
WB
6221 ipr_init_ioadl(ipr_cmd,
6222 ioa_cfg->vpd_cbs_dma +
6223 offsetof(struct ipr_misc_cbs, supp_dev),
6224 sizeof(struct ipr_supported_device),
6225 IPR_IOADL_FLAGS_WRITE_LAST);
1da177e4
LT
6226
6227 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
6228 IPR_SET_SUP_DEVICE_TIMEOUT);
6229
3e7ebdfa
WB
6230 if (!ioa_cfg->sis64)
6231 ipr_cmd->job_step = ipr_set_supported_devs;
1da177e4
LT
6232 return IPR_RC_JOB_RETURN;
6233 }
6234
6235 return IPR_RC_JOB_CONTINUE;
6236}
6237
6238/**
6239 * ipr_get_mode_page - Locate specified mode page
6240 * @mode_pages: mode page buffer
6241 * @page_code: page code to find
6242 * @len: minimum required length for mode page
6243 *
6244 * Return value:
6245 * pointer to mode page / NULL on failure
6246 **/
6247static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
6248 u32 page_code, u32 len)
6249{
6250 struct ipr_mode_page_hdr *mode_hdr;
6251 u32 page_length;
6252 u32 length;
6253
6254 if (!mode_pages || (mode_pages->hdr.length == 0))
6255 return NULL;
6256
6257 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
6258 mode_hdr = (struct ipr_mode_page_hdr *)
6259 (mode_pages->data + mode_pages->hdr.block_desc_len);
6260
6261 while (length) {
6262 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
6263 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
6264 return mode_hdr;
6265 break;
6266 } else {
6267 page_length = (sizeof(struct ipr_mode_page_hdr) +
6268 mode_hdr->page_length);
6269 length -= page_length;
6270 mode_hdr = (struct ipr_mode_page_hdr *)
6271 ((unsigned long)mode_hdr + page_length);
6272 }
6273 }
6274 return NULL;
6275}
6276
6277/**
6278 * ipr_check_term_power - Check for term power errors
6279 * @ioa_cfg: ioa config struct
6280 * @mode_pages: IOAFP mode pages buffer
6281 *
6282 * Check the IOAFP's mode page 28 for term power errors
6283 *
6284 * Return value:
6285 * nothing
6286 **/
6287static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
6288 struct ipr_mode_pages *mode_pages)
6289{
6290 int i;
6291 int entry_length;
6292 struct ipr_dev_bus_entry *bus;
6293 struct ipr_mode_page28 *mode_page;
6294
6295 mode_page = ipr_get_mode_page(mode_pages, 0x28,
6296 sizeof(struct ipr_mode_page28));
6297
6298 entry_length = mode_page->entry_length;
6299
6300 bus = mode_page->bus;
6301
6302 for (i = 0; i < mode_page->num_entries; i++) {
6303 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
6304 dev_err(&ioa_cfg->pdev->dev,
6305 "Term power is absent on scsi bus %d\n",
6306 bus->res_addr.bus);
6307 }
6308
6309 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
6310 }
6311}
6312
6313/**
6314 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
6315 * @ioa_cfg: ioa config struct
6316 *
6317 * Looks through the config table checking for SES devices. If
6318 * the SES device is in the SES table indicating a maximum SCSI
6319 * bus speed, the speed is limited for the bus.
6320 *
6321 * Return value:
6322 * none
6323 **/
6324static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
6325{
6326 u32 max_xfer_rate;
6327 int i;
6328
6329 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
6330 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
6331 ioa_cfg->bus_attr[i].bus_width);
6332
6333 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
6334 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
6335 }
6336}
6337
6338/**
6339 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
6340 * @ioa_cfg: ioa config struct
6341 * @mode_pages: mode page 28 buffer
6342 *
6343 * Updates mode page 28 based on driver configuration
6344 *
6345 * Return value:
6346 * none
6347 **/
6348static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
6349 struct ipr_mode_pages *mode_pages)
6350{
6351 int i, entry_length;
6352 struct ipr_dev_bus_entry *bus;
6353 struct ipr_bus_attributes *bus_attr;
6354 struct ipr_mode_page28 *mode_page;
6355
6356 mode_page = ipr_get_mode_page(mode_pages, 0x28,
6357 sizeof(struct ipr_mode_page28));
6358
6359 entry_length = mode_page->entry_length;
6360
6361 /* Loop for each device bus entry */
6362 for (i = 0, bus = mode_page->bus;
6363 i < mode_page->num_entries;
6364 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
6365 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
6366 dev_err(&ioa_cfg->pdev->dev,
6367 "Invalid resource address reported: 0x%08X\n",
6368 IPR_GET_PHYS_LOC(bus->res_addr));
6369 continue;
6370 }
6371
6372 bus_attr = &ioa_cfg->bus_attr[i];
6373 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
6374 bus->bus_width = bus_attr->bus_width;
6375 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
6376 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
6377 if (bus_attr->qas_enabled)
6378 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
6379 else
6380 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
6381 }
6382}
6383
6384/**
6385 * ipr_build_mode_select - Build a mode select command
6386 * @ipr_cmd: ipr command struct
6387 * @res_handle: resource handle to send command to
6388 * @parm: Byte 2 of Mode Sense command
6389 * @dma_addr: DMA buffer address
6390 * @xfer_len: data transfer length
6391 *
6392 * Return value:
6393 * none
6394 **/
6395static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
a32c055f
WB
6396 __be32 res_handle, u8 parm,
6397 dma_addr_t dma_addr, u8 xfer_len)
1da177e4 6398{
1da177e4
LT
6399 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6400
6401 ioarcb->res_handle = res_handle;
6402 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6403 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6404 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
6405 ioarcb->cmd_pkt.cdb[1] = parm;
6406 ioarcb->cmd_pkt.cdb[4] = xfer_len;
6407
a32c055f 6408 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
1da177e4
LT
6409}
6410
6411/**
6412 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
6413 * @ipr_cmd: ipr command struct
6414 *
6415 * This function sets up the SCSI bus attributes and sends
6416 * a Mode Select for Page 28 to activate them.
6417 *
6418 * Return value:
6419 * IPR_RC_JOB_RETURN
6420 **/
6421static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
6422{
6423 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6424 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
6425 int length;
6426
6427 ENTER;
4733804c
BK
6428 ipr_scsi_bus_speed_limit(ioa_cfg);
6429 ipr_check_term_power(ioa_cfg, mode_pages);
6430 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
6431 length = mode_pages->hdr.length + 1;
6432 mode_pages->hdr.length = 0;
1da177e4
LT
6433
6434 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
6435 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
6436 length);
6437
f72919ec
WB
6438 ipr_cmd->job_step = ipr_set_supported_devs;
6439 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
6440 struct ipr_resource_entry, queue);
1da177e4
LT
6441 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6442
6443 LEAVE;
6444 return IPR_RC_JOB_RETURN;
6445}
6446
6447/**
6448 * ipr_build_mode_sense - Builds a mode sense command
6449 * @ipr_cmd: ipr command struct
6450 * @res: resource entry struct
6451 * @parm: Byte 2 of mode sense command
6452 * @dma_addr: DMA address of mode sense buffer
6453 * @xfer_len: Size of DMA buffer
6454 *
6455 * Return value:
6456 * none
6457 **/
6458static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
6459 __be32 res_handle,
a32c055f 6460 u8 parm, dma_addr_t dma_addr, u8 xfer_len)
1da177e4 6461{
1da177e4
LT
6462 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6463
6464 ioarcb->res_handle = res_handle;
6465 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
6466 ioarcb->cmd_pkt.cdb[2] = parm;
6467 ioarcb->cmd_pkt.cdb[4] = xfer_len;
6468 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6469
a32c055f 6470 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
6471}
6472
dfed823e 6473/**
6474 * ipr_reset_cmd_failed - Handle failure of IOA reset command
6475 * @ipr_cmd: ipr command struct
6476 *
6477 * This function handles the failure of an IOA bringup command.
6478 *
6479 * Return value:
6480 * IPR_RC_JOB_RETURN
6481 **/
6482static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
6483{
6484 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6485 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
6486
6487 dev_err(&ioa_cfg->pdev->dev,
6488 "0x%02X failed with IOASC: 0x%08X\n",
6489 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
6490
6491 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
6492 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6493 return IPR_RC_JOB_RETURN;
6494}
6495
6496/**
6497 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
6498 * @ipr_cmd: ipr command struct
6499 *
6500 * This function handles the failure of a Mode Sense to the IOAFP.
6501 * Some adapters do not handle all mode pages.
6502 *
6503 * Return value:
6504 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6505 **/
6506static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
6507{
f72919ec 6508 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
dfed823e 6509 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
6510
6511 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
f72919ec
WB
6512 ipr_cmd->job_step = ipr_set_supported_devs;
6513 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
6514 struct ipr_resource_entry, queue);
dfed823e 6515 return IPR_RC_JOB_CONTINUE;
6516 }
6517
6518 return ipr_reset_cmd_failed(ipr_cmd);
6519}
6520
1da177e4
LT
6521/**
6522 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
6523 * @ipr_cmd: ipr command struct
6524 *
6525 * This function send a Page 28 mode sense to the IOA to
6526 * retrieve SCSI bus attributes.
6527 *
6528 * Return value:
6529 * IPR_RC_JOB_RETURN
6530 **/
6531static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
6532{
6533 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6534
6535 ENTER;
6536 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
6537 0x28, ioa_cfg->vpd_cbs_dma +
6538 offsetof(struct ipr_misc_cbs, mode_pages),
6539 sizeof(struct ipr_mode_pages));
6540
6541 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
dfed823e 6542 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
1da177e4
LT
6543
6544 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6545
6546 LEAVE;
6547 return IPR_RC_JOB_RETURN;
6548}
6549
ac09c349
BK
6550/**
6551 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
6552 * @ipr_cmd: ipr command struct
6553 *
6554 * This function enables dual IOA RAID support if possible.
6555 *
6556 * Return value:
6557 * IPR_RC_JOB_RETURN
6558 **/
6559static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
6560{
6561 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6562 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
6563 struct ipr_mode_page24 *mode_page;
6564 int length;
6565
6566 ENTER;
6567 mode_page = ipr_get_mode_page(mode_pages, 0x24,
6568 sizeof(struct ipr_mode_page24));
6569
6570 if (mode_page)
6571 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
6572
6573 length = mode_pages->hdr.length + 1;
6574 mode_pages->hdr.length = 0;
6575
6576 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
6577 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
6578 length);
6579
6580 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
6581 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6582
6583 LEAVE;
6584 return IPR_RC_JOB_RETURN;
6585}
6586
6587/**
6588 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
6589 * @ipr_cmd: ipr command struct
6590 *
6591 * This function handles the failure of a Mode Sense to the IOAFP.
6592 * Some adapters do not handle all mode pages.
6593 *
6594 * Return value:
6595 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6596 **/
6597static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
6598{
6599 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
6600
6601 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
6602 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
6603 return IPR_RC_JOB_CONTINUE;
6604 }
6605
6606 return ipr_reset_cmd_failed(ipr_cmd);
6607}
6608
6609/**
6610 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
6611 * @ipr_cmd: ipr command struct
6612 *
6613 * This function send a mode sense to the IOA to retrieve
6614 * the IOA Advanced Function Control mode page.
6615 *
6616 * Return value:
6617 * IPR_RC_JOB_RETURN
6618 **/
6619static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
6620{
6621 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6622
6623 ENTER;
6624 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
6625 0x24, ioa_cfg->vpd_cbs_dma +
6626 offsetof(struct ipr_misc_cbs, mode_pages),
6627 sizeof(struct ipr_mode_pages));
6628
6629 ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
6630 ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
6631
6632 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6633
6634 LEAVE;
6635 return IPR_RC_JOB_RETURN;
6636}
6637
1da177e4
LT
6638/**
6639 * ipr_init_res_table - Initialize the resource table
6640 * @ipr_cmd: ipr command struct
6641 *
6642 * This function looks through the existing resource table, comparing
6643 * it with the config table. This function will take care of old/new
6644 * devices and schedule adding/removing them from the mid-layer
6645 * as appropriate.
6646 *
6647 * Return value:
6648 * IPR_RC_JOB_CONTINUE
6649 **/
6650static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
6651{
6652 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6653 struct ipr_resource_entry *res, *temp;
3e7ebdfa
WB
6654 struct ipr_config_table_entry_wrapper cfgtew;
6655 int entries, found, flag, i;
1da177e4
LT
6656 LIST_HEAD(old_res);
6657
6658 ENTER;
3e7ebdfa
WB
6659 if (ioa_cfg->sis64)
6660 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
6661 else
6662 flag = ioa_cfg->u.cfg_table->hdr.flags;
6663
6664 if (flag & IPR_UCODE_DOWNLOAD_REQ)
1da177e4
LT
6665 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
6666
6667 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
6668 list_move_tail(&res->queue, &old_res);
6669
3e7ebdfa
WB
6670 if (ioa_cfg->sis64)
6671 entries = ioa_cfg->u.cfg_table64->hdr64.num_entries;
6672 else
6673 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
6674
6675 for (i = 0; i < entries; i++) {
6676 if (ioa_cfg->sis64)
6677 cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
6678 else
6679 cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
1da177e4
LT
6680 found = 0;
6681
6682 list_for_each_entry_safe(res, temp, &old_res, queue) {
3e7ebdfa 6683 if (ipr_is_same_device(res, &cfgtew)) {
1da177e4
LT
6684 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
6685 found = 1;
6686 break;
6687 }
6688 }
6689
6690 if (!found) {
6691 if (list_empty(&ioa_cfg->free_res_q)) {
6692 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
6693 break;
6694 }
6695
6696 found = 1;
6697 res = list_entry(ioa_cfg->free_res_q.next,
6698 struct ipr_resource_entry, queue);
6699 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
3e7ebdfa 6700 ipr_init_res_entry(res, &cfgtew);
1da177e4
LT
6701 res->add_to_ml = 1;
6702 }
6703
6704 if (found)
3e7ebdfa 6705 ipr_update_res_entry(res, &cfgtew);
1da177e4
LT
6706 }
6707
6708 list_for_each_entry_safe(res, temp, &old_res, queue) {
6709 if (res->sdev) {
6710 res->del_from_ml = 1;
3e7ebdfa 6711 res->res_handle = IPR_INVALID_RES_HANDLE;
1da177e4 6712 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
1da177e4
LT
6713 }
6714 }
6715
3e7ebdfa
WB
6716 list_for_each_entry_safe(res, temp, &old_res, queue) {
6717 ipr_clear_res_target(res);
6718 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
6719 }
6720
ac09c349
BK
6721 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
6722 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
6723 else
6724 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
1da177e4
LT
6725
6726 LEAVE;
6727 return IPR_RC_JOB_CONTINUE;
6728}
6729
6730/**
6731 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
6732 * @ipr_cmd: ipr command struct
6733 *
6734 * This function sends a Query IOA Configuration command
6735 * to the adapter to retrieve the IOA configuration table.
6736 *
6737 * Return value:
6738 * IPR_RC_JOB_RETURN
6739 **/
6740static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
6741{
6742 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6743 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
1da177e4 6744 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
ac09c349 6745 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
1da177e4
LT
6746
6747 ENTER;
ac09c349
BK
6748 if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
6749 ioa_cfg->dual_raid = 1;
1da177e4
LT
6750 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
6751 ucode_vpd->major_release, ucode_vpd->card_type,
6752 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
6753 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6754 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6755
6756 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
3e7ebdfa
WB
6757 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
6758 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
1da177e4 6759
3e7ebdfa 6760 ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
a32c055f 6761 IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
6762
6763 ipr_cmd->job_step = ipr_init_res_table;
6764
6765 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6766
6767 LEAVE;
6768 return IPR_RC_JOB_RETURN;
6769}
6770
6771/**
6772 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
6773 * @ipr_cmd: ipr command struct
6774 *
6775 * This utility function sends an inquiry to the adapter.
6776 *
6777 * Return value:
6778 * none
6779 **/
6780static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
a32c055f 6781 dma_addr_t dma_addr, u8 xfer_len)
1da177e4
LT
6782{
6783 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
1da177e4
LT
6784
6785 ENTER;
6786 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6787 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6788
6789 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
6790 ioarcb->cmd_pkt.cdb[1] = flags;
6791 ioarcb->cmd_pkt.cdb[2] = page;
6792 ioarcb->cmd_pkt.cdb[4] = xfer_len;
6793
a32c055f 6794 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
6795
6796 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6797 LEAVE;
6798}
6799
62275040 6800/**
6801 * ipr_inquiry_page_supported - Is the given inquiry page supported
6802 * @page0: inquiry page 0 buffer
6803 * @page: page code.
6804 *
6805 * This function determines if the specified inquiry page is supported.
6806 *
6807 * Return value:
6808 * 1 if page is supported / 0 if not
6809 **/
6810static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
6811{
6812 int i;
6813
6814 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
6815 if (page0->page[i] == page)
6816 return 1;
6817
6818 return 0;
6819}
6820
ac09c349
BK
6821/**
6822 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
6823 * @ipr_cmd: ipr command struct
6824 *
6825 * This function sends a Page 0xD0 inquiry to the adapter
6826 * to retrieve adapter capabilities.
6827 *
6828 * Return value:
6829 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6830 **/
6831static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
6832{
6833 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6834 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
6835 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
6836
6837 ENTER;
6838 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
6839 memset(cap, 0, sizeof(*cap));
6840
6841 if (ipr_inquiry_page_supported(page0, 0xD0)) {
6842 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
6843 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
6844 sizeof(struct ipr_inquiry_cap));
6845 return IPR_RC_JOB_RETURN;
6846 }
6847
6848 LEAVE;
6849 return IPR_RC_JOB_CONTINUE;
6850}
6851
1da177e4
LT
6852/**
6853 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
6854 * @ipr_cmd: ipr command struct
6855 *
6856 * This function sends a Page 3 inquiry to the adapter
6857 * to retrieve software VPD information.
6858 *
6859 * Return value:
6860 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6861 **/
6862static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
62275040 6863{
6864 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
62275040 6865
6866 ENTER;
6867
ac09c349 6868 ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
62275040 6869
6870 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
6871 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
6872 sizeof(struct ipr_inquiry_page3));
6873
6874 LEAVE;
6875 return IPR_RC_JOB_RETURN;
6876}
6877
6878/**
6879 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
6880 * @ipr_cmd: ipr command struct
6881 *
6882 * This function sends a Page 0 inquiry to the adapter
6883 * to retrieve supported inquiry pages.
6884 *
6885 * Return value:
6886 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6887 **/
6888static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
1da177e4
LT
6889{
6890 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6891 char type[5];
6892
6893 ENTER;
6894
6895 /* Grab the type out of the VPD and store it away */
6896 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
6897 type[4] = '\0';
6898 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
6899
62275040 6900 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
1da177e4 6901
62275040 6902 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
6903 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
6904 sizeof(struct ipr_inquiry_page0));
1da177e4
LT
6905
6906 LEAVE;
6907 return IPR_RC_JOB_RETURN;
6908}
6909
6910/**
6911 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
6912 * @ipr_cmd: ipr command struct
6913 *
6914 * This function sends a standard inquiry to the adapter.
6915 *
6916 * Return value:
6917 * IPR_RC_JOB_RETURN
6918 **/
6919static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
6920{
6921 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6922
6923 ENTER;
62275040 6924 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
1da177e4
LT
6925
6926 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
6927 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
6928 sizeof(struct ipr_ioa_vpd));
6929
6930 LEAVE;
6931 return IPR_RC_JOB_RETURN;
6932}
6933
6934/**
214777ba 6935 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
1da177e4
LT
6936 * @ipr_cmd: ipr command struct
6937 *
6938 * This function send an Identify Host Request Response Queue
6939 * command to establish the HRRQ with the adapter.
6940 *
6941 * Return value:
6942 * IPR_RC_JOB_RETURN
6943 **/
214777ba 6944static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
1da177e4
LT
6945{
6946 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6947 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6948
6949 ENTER;
6950 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
6951
6952 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
6953 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6954
6955 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
214777ba
WB
6956 if (ioa_cfg->sis64)
6957 ioarcb->cmd_pkt.cdb[1] = 0x1;
1da177e4 6958 ioarcb->cmd_pkt.cdb[2] =
214777ba 6959 ((u64) ioa_cfg->host_rrq_dma >> 24) & 0xff;
1da177e4 6960 ioarcb->cmd_pkt.cdb[3] =
214777ba 6961 ((u64) ioa_cfg->host_rrq_dma >> 16) & 0xff;
1da177e4 6962 ioarcb->cmd_pkt.cdb[4] =
214777ba 6963 ((u64) ioa_cfg->host_rrq_dma >> 8) & 0xff;
1da177e4 6964 ioarcb->cmd_pkt.cdb[5] =
214777ba 6965 ((u64) ioa_cfg->host_rrq_dma) & 0xff;
1da177e4
LT
6966 ioarcb->cmd_pkt.cdb[7] =
6967 ((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
6968 ioarcb->cmd_pkt.cdb[8] =
6969 (sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
6970
214777ba
WB
6971 if (ioa_cfg->sis64) {
6972 ioarcb->cmd_pkt.cdb[10] =
6973 ((u64) ioa_cfg->host_rrq_dma >> 56) & 0xff;
6974 ioarcb->cmd_pkt.cdb[11] =
6975 ((u64) ioa_cfg->host_rrq_dma >> 48) & 0xff;
6976 ioarcb->cmd_pkt.cdb[12] =
6977 ((u64) ioa_cfg->host_rrq_dma >> 40) & 0xff;
6978 ioarcb->cmd_pkt.cdb[13] =
6979 ((u64) ioa_cfg->host_rrq_dma >> 32) & 0xff;
6980 }
6981
1da177e4
LT
6982 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
6983
6984 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6985
6986 LEAVE;
6987 return IPR_RC_JOB_RETURN;
6988}
6989
6990/**
6991 * ipr_reset_timer_done - Adapter reset timer function
6992 * @ipr_cmd: ipr command struct
6993 *
6994 * Description: This function is used in adapter reset processing
6995 * for timing events. If the reset_cmd pointer in the IOA
6996 * config struct is not this adapter's we are doing nested
6997 * resets and fail_all_ops will take care of freeing the
6998 * command block.
6999 *
7000 * Return value:
7001 * none
7002 **/
7003static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
7004{
7005 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7006 unsigned long lock_flags = 0;
7007
7008 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7009
7010 if (ioa_cfg->reset_cmd == ipr_cmd) {
7011 list_del(&ipr_cmd->queue);
7012 ipr_cmd->done(ipr_cmd);
7013 }
7014
7015 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7016}
7017
7018/**
7019 * ipr_reset_start_timer - Start a timer for adapter reset job
7020 * @ipr_cmd: ipr command struct
7021 * @timeout: timeout value
7022 *
7023 * Description: This function is used in adapter reset processing
7024 * for timing events. If the reset_cmd pointer in the IOA
7025 * config struct is not this adapter's we are doing nested
7026 * resets and fail_all_ops will take care of freeing the
7027 * command block.
7028 *
7029 * Return value:
7030 * none
7031 **/
7032static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
7033 unsigned long timeout)
7034{
7035 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
7036 ipr_cmd->done = ipr_reset_ioa_job;
7037
7038 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7039 ipr_cmd->timer.expires = jiffies + timeout;
7040 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
7041 add_timer(&ipr_cmd->timer);
7042}
7043
7044/**
7045 * ipr_init_ioa_mem - Initialize ioa_cfg control block
7046 * @ioa_cfg: ioa cfg struct
7047 *
7048 * Return value:
7049 * nothing
7050 **/
7051static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
7052{
7053 memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
7054
7055 /* Initialize Host RRQ pointers */
7056 ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
7057 ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
7058 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
7059 ioa_cfg->toggle_bit = 1;
7060
7061 /* Zero out config table */
3e7ebdfa 7062 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
1da177e4
LT
7063}
7064
214777ba
WB
7065/**
7066 * ipr_reset_next_stage - Process IPL stage change based on feedback register.
7067 * @ipr_cmd: ipr command struct
7068 *
7069 * Return value:
7070 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7071 **/
7072static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
7073{
7074 unsigned long stage, stage_time;
7075 u32 feedback;
7076 volatile u32 int_reg;
7077 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7078 u64 maskval = 0;
7079
7080 feedback = readl(ioa_cfg->regs.init_feedback_reg);
7081 stage = feedback & IPR_IPL_INIT_STAGE_MASK;
7082 stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
7083
7084 ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
7085
7086 /* sanity check the stage_time value */
7087 if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
7088 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
7089 else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
7090 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
7091
7092 if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
7093 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
7094 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7095 stage_time = ioa_cfg->transop_timeout;
7096 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7097 } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
7098 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7099 maskval = IPR_PCII_IPL_STAGE_CHANGE;
7100 maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
7101 writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
7102 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7103 return IPR_RC_JOB_CONTINUE;
7104 }
7105
7106 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7107 ipr_cmd->timer.expires = jiffies + stage_time * HZ;
7108 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7109 ipr_cmd->done = ipr_reset_ioa_job;
7110 add_timer(&ipr_cmd->timer);
7111 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
7112
7113 return IPR_RC_JOB_RETURN;
7114}
7115
1da177e4
LT
7116/**
7117 * ipr_reset_enable_ioa - Enable the IOA following a reset.
7118 * @ipr_cmd: ipr command struct
7119 *
7120 * This function reinitializes some control blocks and
7121 * enables destructive diagnostics on the adapter.
7122 *
7123 * Return value:
7124 * IPR_RC_JOB_RETURN
7125 **/
7126static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
7127{
7128 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7129 volatile u32 int_reg;
7130
7131 ENTER;
214777ba 7132 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
1da177e4
LT
7133 ipr_init_ioa_mem(ioa_cfg);
7134
7135 ioa_cfg->allow_interrupts = 1;
7136 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
7137
7138 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7139 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
214777ba 7140 ioa_cfg->regs.clr_interrupt_mask_reg32);
1da177e4
LT
7141 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7142 return IPR_RC_JOB_CONTINUE;
7143 }
7144
7145 /* Enable destructive diagnostics on IOA */
214777ba
WB
7146 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
7147
7148 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
7149 if (ioa_cfg->sis64)
7150 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_mask_reg);
1da177e4 7151
1da177e4
LT
7152 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7153
7154 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
7155
214777ba
WB
7156 if (ioa_cfg->sis64) {
7157 ipr_cmd->job_step = ipr_reset_next_stage;
7158 return IPR_RC_JOB_CONTINUE;
7159 }
7160
1da177e4 7161 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
5469cb5b 7162 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
1da177e4
LT
7163 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7164 ipr_cmd->done = ipr_reset_ioa_job;
7165 add_timer(&ipr_cmd->timer);
7166 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
7167
7168 LEAVE;
7169 return IPR_RC_JOB_RETURN;
7170}
7171
7172/**
7173 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
7174 * @ipr_cmd: ipr command struct
7175 *
7176 * This function is invoked when an adapter dump has run out
7177 * of processing time.
7178 *
7179 * Return value:
7180 * IPR_RC_JOB_CONTINUE
7181 **/
7182static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
7183{
7184 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7185
7186 if (ioa_cfg->sdt_state == GET_DUMP)
7187 ioa_cfg->sdt_state = ABORT_DUMP;
7188
7189 ipr_cmd->job_step = ipr_reset_alert;
7190
7191 return IPR_RC_JOB_CONTINUE;
7192}
7193
7194/**
7195 * ipr_unit_check_no_data - Log a unit check/no data error log
7196 * @ioa_cfg: ioa config struct
7197 *
7198 * Logs an error indicating the adapter unit checked, but for some
7199 * reason, we were unable to fetch the unit check buffer.
7200 *
7201 * Return value:
7202 * nothing
7203 **/
7204static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
7205{
7206 ioa_cfg->errors_logged++;
7207 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
7208}
7209
7210/**
7211 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
7212 * @ioa_cfg: ioa config struct
7213 *
7214 * Fetches the unit check buffer from the adapter by clocking the data
7215 * through the mailbox register.
7216 *
7217 * Return value:
7218 * nothing
7219 **/
7220static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
7221{
7222 unsigned long mailbox;
7223 struct ipr_hostrcb *hostrcb;
7224 struct ipr_uc_sdt sdt;
7225 int rc, length;
65f56475 7226 u32 ioasc;
1da177e4
LT
7227
7228 mailbox = readl(ioa_cfg->ioa_mailbox);
7229
dcbad00e 7230 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
1da177e4
LT
7231 ipr_unit_check_no_data(ioa_cfg);
7232 return;
7233 }
7234
7235 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
7236 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
7237 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
7238
dcbad00e
WB
7239 if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
7240 ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
7241 (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
1da177e4
LT
7242 ipr_unit_check_no_data(ioa_cfg);
7243 return;
7244 }
7245
7246 /* Find length of the first sdt entry (UC buffer) */
dcbad00e
WB
7247 if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
7248 length = be32_to_cpu(sdt.entry[0].end_token);
7249 else
7250 length = (be32_to_cpu(sdt.entry[0].end_token) -
7251 be32_to_cpu(sdt.entry[0].start_token)) &
7252 IPR_FMT2_MBX_ADDR_MASK;
1da177e4
LT
7253
7254 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
7255 struct ipr_hostrcb, queue);
7256 list_del(&hostrcb->queue);
7257 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
7258
7259 rc = ipr_get_ldump_data_section(ioa_cfg,
dcbad00e 7260 be32_to_cpu(sdt.entry[0].start_token),
1da177e4
LT
7261 (__be32 *)&hostrcb->hcam,
7262 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
7263
65f56475 7264 if (!rc) {
1da177e4 7265 ipr_handle_log_data(ioa_cfg, hostrcb);
4565e370 7266 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
65f56475
BK
7267 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
7268 ioa_cfg->sdt_state == GET_DUMP)
7269 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7270 } else
1da177e4
LT
7271 ipr_unit_check_no_data(ioa_cfg);
7272
7273 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
7274}
7275
7276/**
7277 * ipr_reset_restore_cfg_space - Restore PCI config space.
7278 * @ipr_cmd: ipr command struct
7279 *
7280 * Description: This function restores the saved PCI config space of
7281 * the adapter, fails all outstanding ops back to the callers, and
7282 * fetches the dump/unit check if applicable to this reset.
7283 *
7284 * Return value:
7285 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7286 **/
7287static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
7288{
7289 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7290 int rc;
7291
7292 ENTER;
99c965dd 7293 ioa_cfg->pdev->state_saved = true;
1da177e4
LT
7294 rc = pci_restore_state(ioa_cfg->pdev);
7295
7296 if (rc != PCIBIOS_SUCCESSFUL) {
7297 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
7298 return IPR_RC_JOB_CONTINUE;
7299 }
7300
7301 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
7302 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
7303 return IPR_RC_JOB_CONTINUE;
7304 }
7305
7306 ipr_fail_all_ops(ioa_cfg);
7307
7308 if (ioa_cfg->ioa_unit_checked) {
7309 ioa_cfg->ioa_unit_checked = 0;
7310 ipr_get_unit_check_buffer(ioa_cfg);
7311 ipr_cmd->job_step = ipr_reset_alert;
7312 ipr_reset_start_timer(ipr_cmd, 0);
7313 return IPR_RC_JOB_RETURN;
7314 }
7315
7316 if (ioa_cfg->in_ioa_bringdown) {
7317 ipr_cmd->job_step = ipr_ioa_bringdown_done;
7318 } else {
7319 ipr_cmd->job_step = ipr_reset_enable_ioa;
7320
7321 if (GET_DUMP == ioa_cfg->sdt_state) {
7322 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_TIMEOUT);
7323 ipr_cmd->job_step = ipr_reset_wait_for_dump;
7324 schedule_work(&ioa_cfg->work_q);
7325 return IPR_RC_JOB_RETURN;
7326 }
7327 }
7328
7329 ENTER;
7330 return IPR_RC_JOB_CONTINUE;
7331}
7332
e619e1a7
BK
7333/**
7334 * ipr_reset_bist_done - BIST has completed on the adapter.
7335 * @ipr_cmd: ipr command struct
7336 *
7337 * Description: Unblock config space and resume the reset process.
7338 *
7339 * Return value:
7340 * IPR_RC_JOB_CONTINUE
7341 **/
7342static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
7343{
7344 ENTER;
7345 pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
7346 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
7347 LEAVE;
7348 return IPR_RC_JOB_CONTINUE;
7349}
7350
1da177e4
LT
7351/**
7352 * ipr_reset_start_bist - Run BIST on the adapter.
7353 * @ipr_cmd: ipr command struct
7354 *
7355 * Description: This function runs BIST on the adapter, then delays 2 seconds.
7356 *
7357 * Return value:
7358 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7359 **/
7360static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
7361{
7362 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7363 int rc;
7364
7365 ENTER;
b30197d2 7366 pci_block_user_cfg_access(ioa_cfg->pdev);
1da177e4
LT
7367 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
7368
7369 if (rc != PCIBIOS_SUCCESSFUL) {
a9aedb09 7370 pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
1da177e4
LT
7371 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
7372 rc = IPR_RC_JOB_CONTINUE;
7373 } else {
e619e1a7 7374 ipr_cmd->job_step = ipr_reset_bist_done;
1da177e4
LT
7375 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
7376 rc = IPR_RC_JOB_RETURN;
7377 }
7378
7379 LEAVE;
7380 return rc;
7381}
7382
463fc696
BK
7383/**
7384 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
7385 * @ipr_cmd: ipr command struct
7386 *
7387 * Description: This clears PCI reset to the adapter and delays two seconds.
7388 *
7389 * Return value:
7390 * IPR_RC_JOB_RETURN
7391 **/
7392static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
7393{
7394 ENTER;
7395 pci_set_pcie_reset_state(ipr_cmd->ioa_cfg->pdev, pcie_deassert_reset);
7396 ipr_cmd->job_step = ipr_reset_bist_done;
7397 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
7398 LEAVE;
7399 return IPR_RC_JOB_RETURN;
7400}
7401
7402/**
7403 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
7404 * @ipr_cmd: ipr command struct
7405 *
7406 * Description: This asserts PCI reset to the adapter.
7407 *
7408 * Return value:
7409 * IPR_RC_JOB_RETURN
7410 **/
7411static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
7412{
7413 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7414 struct pci_dev *pdev = ioa_cfg->pdev;
7415
7416 ENTER;
7417 pci_block_user_cfg_access(pdev);
7418 pci_set_pcie_reset_state(pdev, pcie_warm_reset);
7419 ipr_cmd->job_step = ipr_reset_slot_reset_done;
7420 ipr_reset_start_timer(ipr_cmd, IPR_PCI_RESET_TIMEOUT);
7421 LEAVE;
7422 return IPR_RC_JOB_RETURN;
7423}
7424
1da177e4
LT
7425/**
7426 * ipr_reset_allowed - Query whether or not IOA can be reset
7427 * @ioa_cfg: ioa config struct
7428 *
7429 * Return value:
7430 * 0 if reset not allowed / non-zero if reset is allowed
7431 **/
7432static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
7433{
7434 volatile u32 temp_reg;
7435
7436 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
7437 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
7438}
7439
7440/**
7441 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
7442 * @ipr_cmd: ipr command struct
7443 *
7444 * Description: This function waits for adapter permission to run BIST,
7445 * then runs BIST. If the adapter does not give permission after a
7446 * reasonable time, we will reset the adapter anyway. The impact of
7447 * resetting the adapter without warning the adapter is the risk of
7448 * losing the persistent error log on the adapter. If the adapter is
7449 * reset while it is writing to the flash on the adapter, the flash
7450 * segment will have bad ECC and be zeroed.
7451 *
7452 * Return value:
7453 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7454 **/
7455static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
7456{
7457 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7458 int rc = IPR_RC_JOB_RETURN;
7459
7460 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
7461 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
7462 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
7463 } else {
463fc696 7464 ipr_cmd->job_step = ioa_cfg->reset;
1da177e4
LT
7465 rc = IPR_RC_JOB_CONTINUE;
7466 }
7467
7468 return rc;
7469}
7470
7471/**
7472 * ipr_reset_alert_part2 - Alert the adapter of a pending reset
7473 * @ipr_cmd: ipr command struct
7474 *
7475 * Description: This function alerts the adapter that it will be reset.
7476 * If memory space is not currently enabled, proceed directly
7477 * to running BIST on the adapter. The timer must always be started
7478 * so we guarantee we do not run BIST from ipr_isr.
7479 *
7480 * Return value:
7481 * IPR_RC_JOB_RETURN
7482 **/
7483static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
7484{
7485 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7486 u16 cmd_reg;
7487 int rc;
7488
7489 ENTER;
7490 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
7491
7492 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
7493 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
214777ba 7494 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
1da177e4
LT
7495 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
7496 } else {
463fc696 7497 ipr_cmd->job_step = ioa_cfg->reset;
1da177e4
LT
7498 }
7499
7500 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
7501 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
7502
7503 LEAVE;
7504 return IPR_RC_JOB_RETURN;
7505}
7506
7507/**
7508 * ipr_reset_ucode_download_done - Microcode download completion
7509 * @ipr_cmd: ipr command struct
7510 *
7511 * Description: This function unmaps the microcode download buffer.
7512 *
7513 * Return value:
7514 * IPR_RC_JOB_CONTINUE
7515 **/
7516static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
7517{
7518 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7519 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
7520
7521 pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
7522 sglist->num_sg, DMA_TO_DEVICE);
7523
7524 ipr_cmd->job_step = ipr_reset_alert;
7525 return IPR_RC_JOB_CONTINUE;
7526}
7527
7528/**
7529 * ipr_reset_ucode_download - Download microcode to the adapter
7530 * @ipr_cmd: ipr command struct
7531 *
7532 * Description: This function checks to see if it there is microcode
7533 * to download to the adapter. If there is, a download is performed.
7534 *
7535 * Return value:
7536 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7537 **/
7538static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
7539{
7540 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7541 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
7542
7543 ENTER;
7544 ipr_cmd->job_step = ipr_reset_alert;
7545
7546 if (!sglist)
7547 return IPR_RC_JOB_CONTINUE;
7548
7549 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7550 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7551 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
7552 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
7553 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
7554 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
7555 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
7556
a32c055f
WB
7557 if (ioa_cfg->sis64)
7558 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
7559 else
7560 ipr_build_ucode_ioadl(ipr_cmd, sglist);
1da177e4
LT
7561 ipr_cmd->job_step = ipr_reset_ucode_download_done;
7562
7563 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7564 IPR_WRITE_BUFFER_TIMEOUT);
7565
7566 LEAVE;
7567 return IPR_RC_JOB_RETURN;
7568}
7569
7570/**
7571 * ipr_reset_shutdown_ioa - Shutdown the adapter
7572 * @ipr_cmd: ipr command struct
7573 *
7574 * Description: This function issues an adapter shutdown of the
7575 * specified type to the specified adapter as part of the
7576 * adapter reset job.
7577 *
7578 * Return value:
7579 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7580 **/
7581static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
7582{
7583 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7584 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
7585 unsigned long timeout;
7586 int rc = IPR_RC_JOB_CONTINUE;
7587
7588 ENTER;
7589 if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
7590 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7591 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7592 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
7593 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
7594
ac09c349
BK
7595 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
7596 timeout = IPR_SHUTDOWN_TIMEOUT;
1da177e4
LT
7597 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
7598 timeout = IPR_INTERNAL_TIMEOUT;
ac09c349
BK
7599 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7600 timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
1da177e4 7601 else
ac09c349 7602 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
1da177e4
LT
7603
7604 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
7605
7606 rc = IPR_RC_JOB_RETURN;
7607 ipr_cmd->job_step = ipr_reset_ucode_download;
7608 } else
7609 ipr_cmd->job_step = ipr_reset_alert;
7610
7611 LEAVE;
7612 return rc;
7613}
7614
7615/**
7616 * ipr_reset_ioa_job - Adapter reset job
7617 * @ipr_cmd: ipr command struct
7618 *
7619 * Description: This function is the job router for the adapter reset job.
7620 *
7621 * Return value:
7622 * none
7623 **/
7624static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
7625{
7626 u32 rc, ioasc;
1da177e4
LT
7627 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7628
7629 do {
7630 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
7631
7632 if (ioa_cfg->reset_cmd != ipr_cmd) {
7633 /*
7634 * We are doing nested adapter resets and this is
7635 * not the current reset job.
7636 */
7637 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
7638 return;
7639 }
7640
7641 if (IPR_IOASC_SENSE_KEY(ioasc)) {
dfed823e 7642 rc = ipr_cmd->job_step_failed(ipr_cmd);
7643 if (rc == IPR_RC_JOB_RETURN)
7644 return;
1da177e4
LT
7645 }
7646
7647 ipr_reinit_ipr_cmnd(ipr_cmd);
dfed823e 7648 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
1da177e4
LT
7649 rc = ipr_cmd->job_step(ipr_cmd);
7650 } while(rc == IPR_RC_JOB_CONTINUE);
7651}
7652
7653/**
7654 * _ipr_initiate_ioa_reset - Initiate an adapter reset
7655 * @ioa_cfg: ioa config struct
7656 * @job_step: first job step of reset job
7657 * @shutdown_type: shutdown type
7658 *
7659 * Description: This function will initiate the reset of the given adapter
7660 * starting at the selected job step.
7661 * If the caller needs to wait on the completion of the reset,
7662 * the caller must sleep on the reset_wait_q.
7663 *
7664 * Return value:
7665 * none
7666 **/
7667static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
7668 int (*job_step) (struct ipr_cmnd *),
7669 enum ipr_shutdown_type shutdown_type)
7670{
7671 struct ipr_cmnd *ipr_cmd;
7672
7673 ioa_cfg->in_reset_reload = 1;
7674 ioa_cfg->allow_cmds = 0;
7675 scsi_block_requests(ioa_cfg->host);
7676
7677 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
7678 ioa_cfg->reset_cmd = ipr_cmd;
7679 ipr_cmd->job_step = job_step;
7680 ipr_cmd->u.shutdown_type = shutdown_type;
7681
7682 ipr_reset_ioa_job(ipr_cmd);
7683}
7684
7685/**
7686 * ipr_initiate_ioa_reset - Initiate an adapter reset
7687 * @ioa_cfg: ioa config struct
7688 * @shutdown_type: shutdown type
7689 *
7690 * Description: This function will initiate the reset of the given adapter.
7691 * If the caller needs to wait on the completion of the reset,
7692 * the caller must sleep on the reset_wait_q.
7693 *
7694 * Return value:
7695 * none
7696 **/
7697static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
7698 enum ipr_shutdown_type shutdown_type)
7699{
7700 if (ioa_cfg->ioa_is_dead)
7701 return;
7702
7703 if (ioa_cfg->in_reset_reload && ioa_cfg->sdt_state == GET_DUMP)
7704 ioa_cfg->sdt_state = ABORT_DUMP;
7705
7706 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
7707 dev_err(&ioa_cfg->pdev->dev,
7708 "IOA taken offline - error recovery failed\n");
7709
7710 ioa_cfg->reset_retries = 0;
7711 ioa_cfg->ioa_is_dead = 1;
7712
7713 if (ioa_cfg->in_ioa_bringdown) {
7714 ioa_cfg->reset_cmd = NULL;
7715 ioa_cfg->in_reset_reload = 0;
7716 ipr_fail_all_ops(ioa_cfg);
7717 wake_up_all(&ioa_cfg->reset_wait_q);
7718
7719 spin_unlock_irq(ioa_cfg->host->host_lock);
7720 scsi_unblock_requests(ioa_cfg->host);
7721 spin_lock_irq(ioa_cfg->host->host_lock);
7722 return;
7723 } else {
7724 ioa_cfg->in_ioa_bringdown = 1;
7725 shutdown_type = IPR_SHUTDOWN_NONE;
7726 }
7727 }
7728
7729 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
7730 shutdown_type);
7731}
7732
f8a88b19
LV
7733/**
7734 * ipr_reset_freeze - Hold off all I/O activity
7735 * @ipr_cmd: ipr command struct
7736 *
7737 * Description: If the PCI slot is frozen, hold off all I/O
7738 * activity; then, as soon as the slot is available again,
7739 * initiate an adapter reset.
7740 */
7741static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
7742{
7743 /* Disallow new interrupts, avoid loop */
7744 ipr_cmd->ioa_cfg->allow_interrupts = 0;
7745 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
7746 ipr_cmd->done = ipr_reset_ioa_job;
7747 return IPR_RC_JOB_RETURN;
7748}
7749
7750/**
7751 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
7752 * @pdev: PCI device struct
7753 *
7754 * Description: This routine is called to tell us that the PCI bus
7755 * is down. Can't do anything here, except put the device driver
7756 * into a holding pattern, waiting for the PCI bus to come back.
7757 */
7758static void ipr_pci_frozen(struct pci_dev *pdev)
7759{
7760 unsigned long flags = 0;
7761 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7762
7763 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
7764 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
7765 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
7766}
7767
7768/**
7769 * ipr_pci_slot_reset - Called when PCI slot has been reset.
7770 * @pdev: PCI device struct
7771 *
7772 * Description: This routine is called by the pci error recovery
7773 * code after the PCI slot has been reset, just before we
7774 * should resume normal operations.
7775 */
7776static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
7777{
7778 unsigned long flags = 0;
7779 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7780
7781 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
463fc696
BK
7782 if (ioa_cfg->needs_warm_reset)
7783 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7784 else
7785 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
7786 IPR_SHUTDOWN_NONE);
f8a88b19
LV
7787 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
7788 return PCI_ERS_RESULT_RECOVERED;
7789}
7790
7791/**
7792 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
7793 * @pdev: PCI device struct
7794 *
7795 * Description: This routine is called when the PCI bus has
7796 * permanently failed.
7797 */
7798static void ipr_pci_perm_failure(struct pci_dev *pdev)
7799{
7800 unsigned long flags = 0;
7801 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7802
7803 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
7804 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
7805 ioa_cfg->sdt_state = ABORT_DUMP;
7806 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
7807 ioa_cfg->in_ioa_bringdown = 1;
6ff63896 7808 ioa_cfg->allow_cmds = 0;
f8a88b19
LV
7809 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7810 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
7811}
7812
7813/**
7814 * ipr_pci_error_detected - Called when a PCI error is detected.
7815 * @pdev: PCI device struct
7816 * @state: PCI channel state
7817 *
7818 * Description: Called when a PCI error is detected.
7819 *
7820 * Return value:
7821 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
7822 */
7823static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
7824 pci_channel_state_t state)
7825{
7826 switch (state) {
7827 case pci_channel_io_frozen:
7828 ipr_pci_frozen(pdev);
7829 return PCI_ERS_RESULT_NEED_RESET;
7830 case pci_channel_io_perm_failure:
7831 ipr_pci_perm_failure(pdev);
7832 return PCI_ERS_RESULT_DISCONNECT;
7833 break;
7834 default:
7835 break;
7836 }
7837 return PCI_ERS_RESULT_NEED_RESET;
7838}
7839
1da177e4
LT
7840/**
7841 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
7842 * @ioa_cfg: ioa cfg struct
7843 *
7844 * Description: This is the second phase of adapter intialization
7845 * This function takes care of initilizing the adapter to the point
7846 * where it can accept new commands.
7847
7848 * Return value:
b1c11812 7849 * 0 on success / -EIO on failure
1da177e4
LT
7850 **/
7851static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
7852{
7853 int rc = 0;
7854 unsigned long host_lock_flags = 0;
7855
7856 ENTER;
7857 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7858 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
ce155cce 7859 if (ioa_cfg->needs_hard_reset) {
7860 ioa_cfg->needs_hard_reset = 0;
7861 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7862 } else
7863 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
7864 IPR_SHUTDOWN_NONE);
1da177e4
LT
7865
7866 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7867 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
7868 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7869
7870 if (ioa_cfg->ioa_is_dead) {
7871 rc = -EIO;
7872 } else if (ipr_invalid_adapter(ioa_cfg)) {
7873 if (!ipr_testmode)
7874 rc = -EIO;
7875
7876 dev_err(&ioa_cfg->pdev->dev,
7877 "Adapter not supported in this hardware configuration.\n");
7878 }
7879
7880 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7881
7882 LEAVE;
7883 return rc;
7884}
7885
7886/**
7887 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
7888 * @ioa_cfg: ioa config struct
7889 *
7890 * Return value:
7891 * none
7892 **/
7893static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
7894{
7895 int i;
7896
7897 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
7898 if (ioa_cfg->ipr_cmnd_list[i])
7899 pci_pool_free(ioa_cfg->ipr_cmd_pool,
7900 ioa_cfg->ipr_cmnd_list[i],
7901 ioa_cfg->ipr_cmnd_list_dma[i]);
7902
7903 ioa_cfg->ipr_cmnd_list[i] = NULL;
7904 }
7905
7906 if (ioa_cfg->ipr_cmd_pool)
7907 pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
7908
7909 ioa_cfg->ipr_cmd_pool = NULL;
7910}
7911
7912/**
7913 * ipr_free_mem - Frees memory allocated for an adapter
7914 * @ioa_cfg: ioa cfg struct
7915 *
7916 * Return value:
7917 * nothing
7918 **/
7919static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
7920{
7921 int i;
7922
7923 kfree(ioa_cfg->res_entries);
7924 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
7925 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
7926 ipr_free_cmd_blks(ioa_cfg);
7927 pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
7928 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
3e7ebdfa
WB
7929 pci_free_consistent(ioa_cfg->pdev, ioa_cfg->cfg_table_size,
7930 ioa_cfg->u.cfg_table,
1da177e4
LT
7931 ioa_cfg->cfg_table_dma);
7932
7933 for (i = 0; i < IPR_NUM_HCAMS; i++) {
7934 pci_free_consistent(ioa_cfg->pdev,
7935 sizeof(struct ipr_hostrcb),
7936 ioa_cfg->hostrcb[i],
7937 ioa_cfg->hostrcb_dma[i]);
7938 }
7939
7940 ipr_free_dump(ioa_cfg);
1da177e4
LT
7941 kfree(ioa_cfg->trace);
7942}
7943
7944/**
7945 * ipr_free_all_resources - Free all allocated resources for an adapter.
7946 * @ipr_cmd: ipr command struct
7947 *
7948 * This function frees all allocated resources for the
7949 * specified adapter.
7950 *
7951 * Return value:
7952 * none
7953 **/
7954static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
7955{
7956 struct pci_dev *pdev = ioa_cfg->pdev;
7957
7958 ENTER;
7959 free_irq(pdev->irq, ioa_cfg);
5a9ef25b 7960 pci_disable_msi(pdev);
1da177e4
LT
7961 iounmap(ioa_cfg->hdw_dma_regs);
7962 pci_release_regions(pdev);
7963 ipr_free_mem(ioa_cfg);
7964 scsi_host_put(ioa_cfg->host);
7965 pci_disable_device(pdev);
7966 LEAVE;
7967}
7968
7969/**
7970 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
7971 * @ioa_cfg: ioa config struct
7972 *
7973 * Return value:
7974 * 0 on success / -ENOMEM on allocation failure
7975 **/
7976static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
7977{
7978 struct ipr_cmnd *ipr_cmd;
7979 struct ipr_ioarcb *ioarcb;
7980 dma_addr_t dma_addr;
7981 int i;
7982
7983 ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
a32c055f 7984 sizeof(struct ipr_cmnd), 16, 0);
1da177e4
LT
7985
7986 if (!ioa_cfg->ipr_cmd_pool)
7987 return -ENOMEM;
7988
7989 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
e94b1766 7990 ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
1da177e4
LT
7991
7992 if (!ipr_cmd) {
7993 ipr_free_cmd_blks(ioa_cfg);
7994 return -ENOMEM;
7995 }
7996
7997 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
7998 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
7999 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
8000
8001 ioarcb = &ipr_cmd->ioarcb;
a32c055f
WB
8002 ipr_cmd->dma_addr = dma_addr;
8003 if (ioa_cfg->sis64)
8004 ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
8005 else
8006 ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
8007
1da177e4 8008 ioarcb->host_response_handle = cpu_to_be32(i << 2);
a32c055f
WB
8009 if (ioa_cfg->sis64) {
8010 ioarcb->u.sis64_addr_data.data_ioadl_addr =
8011 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
8012 ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
8013 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, ioasa));
8014 } else {
8015 ioarcb->write_ioadl_addr =
8016 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
8017 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
8018 ioarcb->ioasa_host_pci_addr =
8019 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa));
8020 }
1da177e4
LT
8021 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
8022 ipr_cmd->cmd_index = i;
8023 ipr_cmd->ioa_cfg = ioa_cfg;
8024 ipr_cmd->sense_buffer_dma = dma_addr +
8025 offsetof(struct ipr_cmnd, sense_buffer);
8026
8027 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
8028 }
8029
8030 return 0;
8031}
8032
8033/**
8034 * ipr_alloc_mem - Allocate memory for an adapter
8035 * @ioa_cfg: ioa config struct
8036 *
8037 * Return value:
8038 * 0 on success / non-zero for error
8039 **/
8040static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
8041{
8042 struct pci_dev *pdev = ioa_cfg->pdev;
8043 int i, rc = -ENOMEM;
8044
8045 ENTER;
0bc42e35 8046 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
3e7ebdfa 8047 ioa_cfg->max_devs_supported, GFP_KERNEL);
1da177e4
LT
8048
8049 if (!ioa_cfg->res_entries)
8050 goto out;
8051
3e7ebdfa
WB
8052 if (ioa_cfg->sis64) {
8053 ioa_cfg->target_ids = kzalloc(sizeof(unsigned long) *
8054 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8055 ioa_cfg->array_ids = kzalloc(sizeof(unsigned long) *
8056 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8057 ioa_cfg->vset_ids = kzalloc(sizeof(unsigned long) *
8058 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8059 }
8060
8061 for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
1da177e4 8062 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
3e7ebdfa
WB
8063 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
8064 }
1da177e4
LT
8065
8066 ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
8067 sizeof(struct ipr_misc_cbs),
8068 &ioa_cfg->vpd_cbs_dma);
8069
8070 if (!ioa_cfg->vpd_cbs)
8071 goto out_free_res_entries;
8072
8073 if (ipr_alloc_cmd_blks(ioa_cfg))
8074 goto out_free_vpd_cbs;
8075
8076 ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
8077 sizeof(u32) * IPR_NUM_CMD_BLKS,
8078 &ioa_cfg->host_rrq_dma);
8079
8080 if (!ioa_cfg->host_rrq)
8081 goto out_ipr_free_cmd_blocks;
8082
3e7ebdfa
WB
8083 ioa_cfg->u.cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
8084 ioa_cfg->cfg_table_size,
8085 &ioa_cfg->cfg_table_dma);
1da177e4 8086
3e7ebdfa 8087 if (!ioa_cfg->u.cfg_table)
1da177e4
LT
8088 goto out_free_host_rrq;
8089
8090 for (i = 0; i < IPR_NUM_HCAMS; i++) {
8091 ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
8092 sizeof(struct ipr_hostrcb),
8093 &ioa_cfg->hostrcb_dma[i]);
8094
8095 if (!ioa_cfg->hostrcb[i])
8096 goto out_free_hostrcb_dma;
8097
8098 ioa_cfg->hostrcb[i]->hostrcb_dma =
8099 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
49dc6a18 8100 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
1da177e4
LT
8101 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
8102 }
8103
0bc42e35 8104 ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
1da177e4
LT
8105 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
8106
8107 if (!ioa_cfg->trace)
8108 goto out_free_hostrcb_dma;
8109
1da177e4
LT
8110 rc = 0;
8111out:
8112 LEAVE;
8113 return rc;
8114
8115out_free_hostrcb_dma:
8116 while (i-- > 0) {
8117 pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
8118 ioa_cfg->hostrcb[i],
8119 ioa_cfg->hostrcb_dma[i]);
8120 }
3e7ebdfa
WB
8121 pci_free_consistent(pdev, ioa_cfg->cfg_table_size,
8122 ioa_cfg->u.cfg_table,
8123 ioa_cfg->cfg_table_dma);
1da177e4
LT
8124out_free_host_rrq:
8125 pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
8126 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
8127out_ipr_free_cmd_blocks:
8128 ipr_free_cmd_blks(ioa_cfg);
8129out_free_vpd_cbs:
8130 pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
8131 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
8132out_free_res_entries:
8133 kfree(ioa_cfg->res_entries);
8134 goto out;
8135}
8136
8137/**
8138 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
8139 * @ioa_cfg: ioa config struct
8140 *
8141 * Return value:
8142 * none
8143 **/
8144static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
8145{
8146 int i;
8147
8148 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
8149 ioa_cfg->bus_attr[i].bus = i;
8150 ioa_cfg->bus_attr[i].qas_enabled = 0;
8151 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
8152 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
8153 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
8154 else
8155 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
8156 }
8157}
8158
8159/**
8160 * ipr_init_ioa_cfg - Initialize IOA config struct
8161 * @ioa_cfg: ioa config struct
8162 * @host: scsi host struct
8163 * @pdev: PCI dev struct
8164 *
8165 * Return value:
8166 * none
8167 **/
8168static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
8169 struct Scsi_Host *host, struct pci_dev *pdev)
8170{
8171 const struct ipr_interrupt_offsets *p;
8172 struct ipr_interrupts *t;
8173 void __iomem *base;
8174
8175 ioa_cfg->host = host;
8176 ioa_cfg->pdev = pdev;
8177 ioa_cfg->log_level = ipr_log_level;
3d1d0da6 8178 ioa_cfg->doorbell = IPR_DOORBELL;
1da177e4
LT
8179 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
8180 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
8181 sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
8182 sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
8183 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
8184 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
8185 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
8186 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
8187
8188 INIT_LIST_HEAD(&ioa_cfg->free_q);
8189 INIT_LIST_HEAD(&ioa_cfg->pending_q);
8190 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
8191 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
8192 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
8193 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
c4028958 8194 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
1da177e4 8195 init_waitqueue_head(&ioa_cfg->reset_wait_q);
95fecd90 8196 init_waitqueue_head(&ioa_cfg->msi_wait_q);
1da177e4
LT
8197 ioa_cfg->sdt_state = INACTIVE;
8198
8199 ipr_initialize_bus_attr(ioa_cfg);
3e7ebdfa 8200 ioa_cfg->max_devs_supported = ipr_max_devs;
1da177e4 8201
3e7ebdfa
WB
8202 if (ioa_cfg->sis64) {
8203 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
8204 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
8205 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
8206 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
8207 } else {
8208 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
8209 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
8210 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
8211 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
8212 }
1da177e4
LT
8213 host->max_channel = IPR_MAX_BUS_TO_SCAN;
8214 host->unique_id = host->host_no;
8215 host->max_cmd_len = IPR_MAX_CDB_LEN;
8216 pci_set_drvdata(pdev, ioa_cfg);
8217
8218 p = &ioa_cfg->chip_cfg->regs;
8219 t = &ioa_cfg->regs;
8220 base = ioa_cfg->hdw_dma_regs;
8221
8222 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
8223 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
214777ba 8224 t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
1da177e4 8225 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
214777ba 8226 t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
1da177e4 8227 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
214777ba 8228 t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
1da177e4 8229 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
214777ba 8230 t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
1da177e4
LT
8231 t->ioarrin_reg = base + p->ioarrin_reg;
8232 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
214777ba 8233 t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
1da177e4 8234 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
214777ba 8235 t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
1da177e4 8236 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
214777ba 8237 t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
dcbad00e
WB
8238
8239 if (ioa_cfg->sis64) {
214777ba 8240 t->init_feedback_reg = base + p->init_feedback_reg;
dcbad00e
WB
8241 t->dump_addr_reg = base + p->dump_addr_reg;
8242 t->dump_data_reg = base + p->dump_data_reg;
8243 }
1da177e4
LT
8244}
8245
8246/**
1be7bd82 8247 * ipr_get_chip_info - Find adapter chip information
1da177e4
LT
8248 * @dev_id: PCI device id struct
8249 *
8250 * Return value:
1be7bd82 8251 * ptr to chip information on success / NULL on failure
1da177e4 8252 **/
1be7bd82
WB
8253static const struct ipr_chip_t * __devinit
8254ipr_get_chip_info(const struct pci_device_id *dev_id)
1da177e4
LT
8255{
8256 int i;
8257
1da177e4
LT
8258 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
8259 if (ipr_chip[i].vendor == dev_id->vendor &&
8260 ipr_chip[i].device == dev_id->device)
1be7bd82 8261 return &ipr_chip[i];
1da177e4
LT
8262 return NULL;
8263}
8264
95fecd90
WB
8265/**
8266 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
8267 * @pdev: PCI device struct
8268 *
8269 * Description: Simply set the msi_received flag to 1 indicating that
8270 * Message Signaled Interrupts are supported.
8271 *
8272 * Return value:
8273 * 0 on success / non-zero on failure
8274 **/
8275static irqreturn_t __devinit ipr_test_intr(int irq, void *devp)
8276{
8277 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
8278 unsigned long lock_flags = 0;
8279 irqreturn_t rc = IRQ_HANDLED;
8280
8281 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8282
8283 ioa_cfg->msi_received = 1;
8284 wake_up(&ioa_cfg->msi_wait_q);
8285
8286 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8287 return rc;
8288}
8289
8290/**
8291 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
8292 * @pdev: PCI device struct
8293 *
8294 * Description: The return value from pci_enable_msi() can not always be
8295 * trusted. This routine sets up and initiates a test interrupt to determine
8296 * if the interrupt is received via the ipr_test_intr() service routine.
8297 * If the tests fails, the driver will fall back to LSI.
8298 *
8299 * Return value:
8300 * 0 on success / non-zero on failure
8301 **/
8302static int __devinit ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg,
8303 struct pci_dev *pdev)
8304{
8305 int rc;
8306 volatile u32 int_reg;
8307 unsigned long lock_flags = 0;
8308
8309 ENTER;
8310
8311 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8312 init_waitqueue_head(&ioa_cfg->msi_wait_q);
8313 ioa_cfg->msi_received = 0;
8314 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
214777ba 8315 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
95fecd90
WB
8316 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8317 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8318
8319 rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
8320 if (rc) {
8321 dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
8322 return rc;
8323 } else if (ipr_debug)
8324 dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
8325
214777ba 8326 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
95fecd90
WB
8327 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8328 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
8329 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8330
8331 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8332 if (!ioa_cfg->msi_received) {
8333 /* MSI test failed */
8334 dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n");
8335 rc = -EOPNOTSUPP;
8336 } else if (ipr_debug)
8337 dev_info(&pdev->dev, "MSI test succeeded.\n");
8338
8339 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8340
8341 free_irq(pdev->irq, ioa_cfg);
8342
8343 LEAVE;
8344
8345 return rc;
8346}
8347
1da177e4
LT
8348/**
8349 * ipr_probe_ioa - Allocates memory and does first stage of initialization
8350 * @pdev: PCI device struct
8351 * @dev_id: PCI device id struct
8352 *
8353 * Return value:
8354 * 0 on success / non-zero on failure
8355 **/
8356static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
8357 const struct pci_device_id *dev_id)
8358{
8359 struct ipr_ioa_cfg *ioa_cfg;
8360 struct Scsi_Host *host;
8361 unsigned long ipr_regs_pci;
8362 void __iomem *ipr_regs;
a2a65a3e 8363 int rc = PCIBIOS_SUCCESSFUL;
473b1e8e 8364 volatile u32 mask, uproc, interrupts;
1da177e4
LT
8365
8366 ENTER;
8367
8368 if ((rc = pci_enable_device(pdev))) {
8369 dev_err(&pdev->dev, "Cannot enable adapter\n");
8370 goto out;
8371 }
8372
8373 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
8374
8375 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
8376
8377 if (!host) {
8378 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
8379 rc = -ENOMEM;
8380 goto out_disable;
8381 }
8382
8383 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
8384 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
35a39691
BK
8385 ata_host_init(&ioa_cfg->ata_host, &pdev->dev,
8386 sata_port_info.flags, &ipr_sata_ops);
1da177e4 8387
1be7bd82 8388 ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
1da177e4 8389
1be7bd82 8390 if (!ioa_cfg->ipr_chip) {
1da177e4
LT
8391 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
8392 dev_id->vendor, dev_id->device);
8393 goto out_scsi_host_put;
8394 }
8395
a32c055f
WB
8396 /* set SIS 32 or SIS 64 */
8397 ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
1be7bd82
WB
8398 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
8399
5469cb5b
BK
8400 if (ipr_transop_timeout)
8401 ioa_cfg->transop_timeout = ipr_transop_timeout;
8402 else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
8403 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
8404 else
8405 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
8406
44c10138 8407 ioa_cfg->revid = pdev->revision;
463fc696 8408
1da177e4
LT
8409 ipr_regs_pci = pci_resource_start(pdev, 0);
8410
8411 rc = pci_request_regions(pdev, IPR_NAME);
8412 if (rc < 0) {
8413 dev_err(&pdev->dev,
8414 "Couldn't register memory range of registers\n");
8415 goto out_scsi_host_put;
8416 }
8417
25729a7f 8418 ipr_regs = pci_ioremap_bar(pdev, 0);
1da177e4
LT
8419
8420 if (!ipr_regs) {
8421 dev_err(&pdev->dev,
8422 "Couldn't map memory range of registers\n");
8423 rc = -ENOMEM;
8424 goto out_release_regions;
8425 }
8426
8427 ioa_cfg->hdw_dma_regs = ipr_regs;
8428 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
8429 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
8430
8431 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
8432
8433 pci_set_master(pdev);
8434
a32c055f
WB
8435 if (ioa_cfg->sis64) {
8436 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
8437 if (rc < 0) {
8438 dev_dbg(&pdev->dev, "Failed to set 64 bit PCI DMA mask\n");
8439 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
8440 }
8441
8442 } else
8443 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
8444
1da177e4
LT
8445 if (rc < 0) {
8446 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
8447 goto cleanup_nomem;
8448 }
8449
8450 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
8451 ioa_cfg->chip_cfg->cache_line_size);
8452
8453 if (rc != PCIBIOS_SUCCESSFUL) {
8454 dev_err(&pdev->dev, "Write of cache line size failed\n");
8455 rc = -EIO;
8456 goto cleanup_nomem;
8457 }
8458
95fecd90 8459 /* Enable MSI style interrupts if they are supported. */
1be7bd82 8460 if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI && !pci_enable_msi(pdev)) {
95fecd90
WB
8461 rc = ipr_test_msi(ioa_cfg, pdev);
8462 if (rc == -EOPNOTSUPP)
8463 pci_disable_msi(pdev);
8464 else if (rc)
8465 goto out_msi_disable;
8466 else
8467 dev_info(&pdev->dev, "MSI enabled with IRQ: %d\n", pdev->irq);
8468 } else if (ipr_debug)
8469 dev_info(&pdev->dev, "Cannot enable MSI.\n");
8470
1da177e4
LT
8471 /* Save away PCI config space for use following IOA reset */
8472 rc = pci_save_state(pdev);
8473
8474 if (rc != PCIBIOS_SUCCESSFUL) {
8475 dev_err(&pdev->dev, "Failed to save PCI config space\n");
8476 rc = -EIO;
8477 goto cleanup_nomem;
8478 }
8479
8480 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
8481 goto cleanup_nomem;
8482
8483 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
8484 goto cleanup_nomem;
8485
3e7ebdfa
WB
8486 if (ioa_cfg->sis64)
8487 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
8488 + ((sizeof(struct ipr_config_table_entry64)
8489 * ioa_cfg->max_devs_supported)));
8490 else
8491 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
8492 + ((sizeof(struct ipr_config_table_entry)
8493 * ioa_cfg->max_devs_supported)));
8494
1da177e4
LT
8495 rc = ipr_alloc_mem(ioa_cfg);
8496 if (rc < 0) {
8497 dev_err(&pdev->dev,
8498 "Couldn't allocate enough memory for device driver!\n");
8499 goto cleanup_nomem;
8500 }
8501
ce155cce 8502 /*
8503 * If HRRQ updated interrupt is not masked, or reset alert is set,
8504 * the card is in an unknown state and needs a hard reset
8505 */
214777ba
WB
8506 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
8507 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
8508 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
ce155cce 8509 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
8510 ioa_cfg->needs_hard_reset = 1;
473b1e8e
BK
8511 if (interrupts & IPR_PCII_ERROR_INTERRUPTS)
8512 ioa_cfg->needs_hard_reset = 1;
8513 if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
8514 ioa_cfg->ioa_unit_checked = 1;
ce155cce 8515
1da177e4 8516 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
95fecd90
WB
8517 rc = request_irq(pdev->irq, ipr_isr,
8518 ioa_cfg->msi_received ? 0 : IRQF_SHARED,
8519 IPR_NAME, ioa_cfg);
1da177e4
LT
8520
8521 if (rc) {
8522 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
8523 pdev->irq, rc);
8524 goto cleanup_nolog;
8525 }
8526
463fc696
BK
8527 if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
8528 (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
8529 ioa_cfg->needs_warm_reset = 1;
8530 ioa_cfg->reset = ipr_reset_slot_reset;
8531 } else
8532 ioa_cfg->reset = ipr_reset_start_bist;
8533
1da177e4
LT
8534 spin_lock(&ipr_driver_lock);
8535 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
8536 spin_unlock(&ipr_driver_lock);
8537
8538 LEAVE;
8539out:
8540 return rc;
8541
8542cleanup_nolog:
8543 ipr_free_mem(ioa_cfg);
8544cleanup_nomem:
8545 iounmap(ipr_regs);
95fecd90
WB
8546out_msi_disable:
8547 pci_disable_msi(pdev);
1da177e4
LT
8548out_release_regions:
8549 pci_release_regions(pdev);
8550out_scsi_host_put:
8551 scsi_host_put(host);
8552out_disable:
8553 pci_disable_device(pdev);
8554 goto out;
8555}
8556
8557/**
8558 * ipr_scan_vsets - Scans for VSET devices
8559 * @ioa_cfg: ioa config struct
8560 *
8561 * Description: Since the VSET resources do not follow SAM in that we can have
8562 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
8563 *
8564 * Return value:
8565 * none
8566 **/
8567static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
8568{
8569 int target, lun;
8570
8571 for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
8572 for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ )
8573 scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
8574}
8575
8576/**
8577 * ipr_initiate_ioa_bringdown - Bring down an adapter
8578 * @ioa_cfg: ioa config struct
8579 * @shutdown_type: shutdown type
8580 *
8581 * Description: This function will initiate bringing down the adapter.
8582 * This consists of issuing an IOA shutdown to the adapter
8583 * to flush the cache, and running BIST.
8584 * If the caller needs to wait on the completion of the reset,
8585 * the caller must sleep on the reset_wait_q.
8586 *
8587 * Return value:
8588 * none
8589 **/
8590static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
8591 enum ipr_shutdown_type shutdown_type)
8592{
8593 ENTER;
8594 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
8595 ioa_cfg->sdt_state = ABORT_DUMP;
8596 ioa_cfg->reset_retries = 0;
8597 ioa_cfg->in_ioa_bringdown = 1;
8598 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
8599 LEAVE;
8600}
8601
8602/**
8603 * __ipr_remove - Remove a single adapter
8604 * @pdev: pci device struct
8605 *
8606 * Adapter hot plug remove entry point.
8607 *
8608 * Return value:
8609 * none
8610 **/
8611static void __ipr_remove(struct pci_dev *pdev)
8612{
8613 unsigned long host_lock_flags = 0;
8614 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8615 ENTER;
8616
8617 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
970ea294
BK
8618 while(ioa_cfg->in_reset_reload) {
8619 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8620 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8621 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8622 }
8623
1da177e4
LT
8624 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
8625
8626 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8627 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5cbf5eae 8628 flush_scheduled_work();
1da177e4
LT
8629 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8630
8631 spin_lock(&ipr_driver_lock);
8632 list_del(&ioa_cfg->queue);
8633 spin_unlock(&ipr_driver_lock);
8634
8635 if (ioa_cfg->sdt_state == ABORT_DUMP)
8636 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8637 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8638
8639 ipr_free_all_resources(ioa_cfg);
8640
8641 LEAVE;
8642}
8643
8644/**
8645 * ipr_remove - IOA hot plug remove entry point
8646 * @pdev: pci device struct
8647 *
8648 * Adapter hot plug remove entry point.
8649 *
8650 * Return value:
8651 * none
8652 **/
f381642d 8653static void __devexit ipr_remove(struct pci_dev *pdev)
1da177e4
LT
8654{
8655 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8656
8657 ENTER;
8658
ee959b00 8659 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4 8660 &ipr_trace_attr);
ee959b00 8661 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
8662 &ipr_dump_attr);
8663 scsi_remove_host(ioa_cfg->host);
8664
8665 __ipr_remove(pdev);
8666
8667 LEAVE;
8668}
8669
8670/**
8671 * ipr_probe - Adapter hot plug add entry point
8672 *
8673 * Return value:
8674 * 0 on success / non-zero on failure
8675 **/
8676static int __devinit ipr_probe(struct pci_dev *pdev,
8677 const struct pci_device_id *dev_id)
8678{
8679 struct ipr_ioa_cfg *ioa_cfg;
8680 int rc;
8681
8682 rc = ipr_probe_ioa(pdev, dev_id);
8683
8684 if (rc)
8685 return rc;
8686
8687 ioa_cfg = pci_get_drvdata(pdev);
8688 rc = ipr_probe_ioa_part2(ioa_cfg);
8689
8690 if (rc) {
8691 __ipr_remove(pdev);
8692 return rc;
8693 }
8694
8695 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
8696
8697 if (rc) {
8698 __ipr_remove(pdev);
8699 return rc;
8700 }
8701
ee959b00 8702 rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
8703 &ipr_trace_attr);
8704
8705 if (rc) {
8706 scsi_remove_host(ioa_cfg->host);
8707 __ipr_remove(pdev);
8708 return rc;
8709 }
8710
ee959b00 8711 rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
8712 &ipr_dump_attr);
8713
8714 if (rc) {
ee959b00 8715 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
8716 &ipr_trace_attr);
8717 scsi_remove_host(ioa_cfg->host);
8718 __ipr_remove(pdev);
8719 return rc;
8720 }
8721
8722 scsi_scan_host(ioa_cfg->host);
8723 ipr_scan_vsets(ioa_cfg);
8724 scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
8725 ioa_cfg->allow_ml_add_del = 1;
11cd8f12 8726 ioa_cfg->host->max_channel = IPR_VSET_BUS;
1da177e4
LT
8727 schedule_work(&ioa_cfg->work_q);
8728 return 0;
8729}
8730
8731/**
8732 * ipr_shutdown - Shutdown handler.
d18c3db5 8733 * @pdev: pci device struct
1da177e4
LT
8734 *
8735 * This function is invoked upon system shutdown/reboot. It will issue
8736 * an adapter shutdown to the adapter to flush the write cache.
8737 *
8738 * Return value:
8739 * none
8740 **/
d18c3db5 8741static void ipr_shutdown(struct pci_dev *pdev)
1da177e4 8742{
d18c3db5 8743 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
1da177e4
LT
8744 unsigned long lock_flags = 0;
8745
8746 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
970ea294
BK
8747 while(ioa_cfg->in_reset_reload) {
8748 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8749 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8750 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8751 }
8752
1da177e4
LT
8753 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
8754 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8755 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8756}
8757
8758static struct pci_device_id ipr_pci_table[] __devinitdata = {
8759 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 8760 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
1da177e4 8761 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 8762 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
1da177e4 8763 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 8764 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
1da177e4 8765 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 8766 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
1da177e4 8767 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6d84c944 8768 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
1da177e4 8769 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6d84c944 8770 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
1da177e4 8771 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6d84c944 8772 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
86f51436 8773 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
5469cb5b
BK
8774 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
8775 IPR_USE_LONG_TRANSOP_TIMEOUT },
86f51436 8776 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
6d84c944 8777 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
86f51436 8778 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
22d2e402
BK
8779 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
8780 IPR_USE_LONG_TRANSOP_TIMEOUT },
60e7486b 8781 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
5469cb5b
BK
8782 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
8783 IPR_USE_LONG_TRANSOP_TIMEOUT },
86f51436 8784 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
6d84c944 8785 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
86f51436 8786 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
22d2e402
BK
8787 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
8788 IPR_USE_LONG_TRANSOP_TIMEOUT},
60e7486b 8789 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
5469cb5b
BK
8790 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
8791 IPR_USE_LONG_TRANSOP_TIMEOUT },
185eb31c 8792 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
22d2e402
BK
8793 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
8794 IPR_USE_LONG_TRANSOP_TIMEOUT },
185eb31c
BK
8795 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
8796 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575D, 0, 0,
8797 IPR_USE_LONG_TRANSOP_TIMEOUT },
8798 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
8799 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
60e7486b 8800 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
5469cb5b 8801 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
463fc696 8802 IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
1da177e4 8803 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
6d84c944 8804 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
1da177e4 8805 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
6d84c944 8806 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
86f51436 8807 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
5469cb5b
BK
8808 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
8809 IPR_USE_LONG_TRANSOP_TIMEOUT },
60e7486b 8810 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
5469cb5b
BK
8811 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
8812 IPR_USE_LONG_TRANSOP_TIMEOUT },
185eb31c
BK
8813 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SCAMP_E,
8814 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0,
8815 IPR_USE_LONG_TRANSOP_TIMEOUT },
1da177e4
LT
8816 { }
8817};
8818MODULE_DEVICE_TABLE(pci, ipr_pci_table);
8819
f8a88b19
LV
8820static struct pci_error_handlers ipr_err_handler = {
8821 .error_detected = ipr_pci_error_detected,
8822 .slot_reset = ipr_pci_slot_reset,
8823};
8824
1da177e4
LT
8825static struct pci_driver ipr_driver = {
8826 .name = IPR_NAME,
8827 .id_table = ipr_pci_table,
8828 .probe = ipr_probe,
f381642d 8829 .remove = __devexit_p(ipr_remove),
d18c3db5 8830 .shutdown = ipr_shutdown,
f8a88b19 8831 .err_handler = &ipr_err_handler,
1da177e4
LT
8832};
8833
f72919ec
WB
8834/**
8835 * ipr_halt_done - Shutdown prepare completion
8836 *
8837 * Return value:
8838 * none
8839 **/
8840static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
8841{
8842 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8843
8844 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
8845}
8846
8847/**
8848 * ipr_halt - Issue shutdown prepare to all adapters
8849 *
8850 * Return value:
8851 * NOTIFY_OK on success / NOTIFY_DONE on failure
8852 **/
8853static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
8854{
8855 struct ipr_cmnd *ipr_cmd;
8856 struct ipr_ioa_cfg *ioa_cfg;
8857 unsigned long flags = 0;
8858
8859 if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
8860 return NOTIFY_DONE;
8861
8862 spin_lock(&ipr_driver_lock);
8863
8864 list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
8865 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8866 if (!ioa_cfg->allow_cmds) {
8867 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8868 continue;
8869 }
8870
8871 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
8872 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8873 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8874 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
8875 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
8876
8877 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
8878 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8879 }
8880 spin_unlock(&ipr_driver_lock);
8881
8882 return NOTIFY_OK;
8883}
8884
8885static struct notifier_block ipr_notifier = {
8886 ipr_halt, NULL, 0
8887};
8888
1da177e4
LT
8889/**
8890 * ipr_init - Module entry point
8891 *
8892 * Return value:
8893 * 0 on success / negative value on failure
8894 **/
8895static int __init ipr_init(void)
8896{
8897 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
8898 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
8899
f72919ec 8900 register_reboot_notifier(&ipr_notifier);
dcbccbde 8901 return pci_register_driver(&ipr_driver);
1da177e4
LT
8902}
8903
8904/**
8905 * ipr_exit - Module unload
8906 *
8907 * Module unload entry point.
8908 *
8909 * Return value:
8910 * none
8911 **/
8912static void __exit ipr_exit(void)
8913{
f72919ec 8914 unregister_reboot_notifier(&ipr_notifier);
1da177e4
LT
8915 pci_unregister_driver(&ipr_driver);
8916}
8917
8918module_init(ipr_init);
8919module_exit(ipr_exit);
This page took 0.985731 seconds and 5 git commands to generate.