Merge branch 'drm-atmel-hlcdc-fixes' of https://github.com/bbrezillon/linux-at91...
[deliverable/linux.git] / drivers / scsi / ipr.c
CommitLineData
1da177e4
LT
1/*
2 * ipr.c -- driver for IBM Power Linux RAID adapters
3 *
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5 *
6 * Copyright (C) 2003, 2004 IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24/*
25 * Notes:
26 *
27 * This driver is used to control the following SCSI adapters:
28 *
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30 *
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
35 *
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
43 * - Hot spare
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
47 * by adding disks
48 *
49 * Driver Features:
50 * - Tagged command queuing
51 * - Adapter microcode download
52 * - PCI hot plug
53 * - SCSI device hot plug
54 *
55 */
56
1da177e4
LT
57#include <linux/fs.h>
58#include <linux/init.h>
59#include <linux/types.h>
60#include <linux/errno.h>
61#include <linux/kernel.h>
5a0e3ad6 62#include <linux/slab.h>
4d4dd706 63#include <linux/vmalloc.h>
1da177e4
LT
64#include <linux/ioport.h>
65#include <linux/delay.h>
66#include <linux/pci.h>
67#include <linux/wait.h>
68#include <linux/spinlock.h>
69#include <linux/sched.h>
70#include <linux/interrupt.h>
71#include <linux/blkdev.h>
72#include <linux/firmware.h>
73#include <linux/module.h>
74#include <linux/moduleparam.h>
35a39691 75#include <linux/libata.h>
0ce3a7e5 76#include <linux/hdreg.h>
f72919ec 77#include <linux/reboot.h>
3e7ebdfa 78#include <linux/stringify.h>
1da177e4
LT
79#include <asm/io.h>
80#include <asm/irq.h>
81#include <asm/processor.h>
82#include <scsi/scsi.h>
83#include <scsi/scsi_host.h>
84#include <scsi/scsi_tcq.h>
85#include <scsi/scsi_eh.h>
86#include <scsi/scsi_cmnd.h>
1da177e4
LT
87#include "ipr.h"
88
89/*
90 * Global Data
91 */
b7d68ca3 92static LIST_HEAD(ipr_ioa_head);
1da177e4
LT
93static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
94static unsigned int ipr_max_speed = 1;
95static int ipr_testmode = 0;
96static unsigned int ipr_fastfail = 0;
5469cb5b 97static unsigned int ipr_transop_timeout = 0;
d3c74871 98static unsigned int ipr_debug = 0;
3e7ebdfa 99static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
ac09c349 100static unsigned int ipr_dual_ioa_raid = 1;
05a6538a 101static unsigned int ipr_number_of_msix = 2;
4fdd7c7a 102static unsigned int ipr_fast_reboot;
1da177e4
LT
103static DEFINE_SPINLOCK(ipr_driver_lock);
104
105/* This table describes the differences between DMA controller chips */
106static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
60e7486b 107 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
1da177e4 108 .mailbox = 0x0042C,
89aad428 109 .max_cmds = 100,
1da177e4 110 .cache_line_size = 0x20,
7dd21308 111 .clear_isr = 1,
b53d124a 112 .iopoll_weight = 0,
1da177e4
LT
113 {
114 .set_interrupt_mask_reg = 0x0022C,
115 .clr_interrupt_mask_reg = 0x00230,
214777ba 116 .clr_interrupt_mask_reg32 = 0x00230,
1da177e4 117 .sense_interrupt_mask_reg = 0x0022C,
214777ba 118 .sense_interrupt_mask_reg32 = 0x0022C,
1da177e4 119 .clr_interrupt_reg = 0x00228,
214777ba 120 .clr_interrupt_reg32 = 0x00228,
1da177e4 121 .sense_interrupt_reg = 0x00224,
214777ba 122 .sense_interrupt_reg32 = 0x00224,
1da177e4
LT
123 .ioarrin_reg = 0x00404,
124 .sense_uproc_interrupt_reg = 0x00214,
214777ba 125 .sense_uproc_interrupt_reg32 = 0x00214,
1da177e4 126 .set_uproc_interrupt_reg = 0x00214,
214777ba
WB
127 .set_uproc_interrupt_reg32 = 0x00214,
128 .clr_uproc_interrupt_reg = 0x00218,
129 .clr_uproc_interrupt_reg32 = 0x00218
1da177e4
LT
130 }
131 },
132 { /* Snipe and Scamp */
133 .mailbox = 0x0052C,
89aad428 134 .max_cmds = 100,
1da177e4 135 .cache_line_size = 0x20,
7dd21308 136 .clear_isr = 1,
b53d124a 137 .iopoll_weight = 0,
1da177e4
LT
138 {
139 .set_interrupt_mask_reg = 0x00288,
140 .clr_interrupt_mask_reg = 0x0028C,
214777ba 141 .clr_interrupt_mask_reg32 = 0x0028C,
1da177e4 142 .sense_interrupt_mask_reg = 0x00288,
214777ba 143 .sense_interrupt_mask_reg32 = 0x00288,
1da177e4 144 .clr_interrupt_reg = 0x00284,
214777ba 145 .clr_interrupt_reg32 = 0x00284,
1da177e4 146 .sense_interrupt_reg = 0x00280,
214777ba 147 .sense_interrupt_reg32 = 0x00280,
1da177e4
LT
148 .ioarrin_reg = 0x00504,
149 .sense_uproc_interrupt_reg = 0x00290,
214777ba 150 .sense_uproc_interrupt_reg32 = 0x00290,
1da177e4 151 .set_uproc_interrupt_reg = 0x00290,
214777ba
WB
152 .set_uproc_interrupt_reg32 = 0x00290,
153 .clr_uproc_interrupt_reg = 0x00294,
154 .clr_uproc_interrupt_reg32 = 0x00294
1da177e4
LT
155 }
156 },
a74c1639 157 { /* CRoC */
110def85 158 .mailbox = 0x00044,
89aad428 159 .max_cmds = 1000,
a74c1639 160 .cache_line_size = 0x20,
7dd21308 161 .clear_isr = 0,
b53d124a 162 .iopoll_weight = 64,
a74c1639
WB
163 {
164 .set_interrupt_mask_reg = 0x00010,
165 .clr_interrupt_mask_reg = 0x00018,
214777ba 166 .clr_interrupt_mask_reg32 = 0x0001C,
a74c1639 167 .sense_interrupt_mask_reg = 0x00010,
214777ba 168 .sense_interrupt_mask_reg32 = 0x00014,
a74c1639 169 .clr_interrupt_reg = 0x00008,
214777ba 170 .clr_interrupt_reg32 = 0x0000C,
a74c1639 171 .sense_interrupt_reg = 0x00000,
214777ba 172 .sense_interrupt_reg32 = 0x00004,
a74c1639
WB
173 .ioarrin_reg = 0x00070,
174 .sense_uproc_interrupt_reg = 0x00020,
214777ba 175 .sense_uproc_interrupt_reg32 = 0x00024,
a74c1639 176 .set_uproc_interrupt_reg = 0x00020,
214777ba 177 .set_uproc_interrupt_reg32 = 0x00024,
dcbad00e 178 .clr_uproc_interrupt_reg = 0x00028,
214777ba
WB
179 .clr_uproc_interrupt_reg32 = 0x0002C,
180 .init_feedback_reg = 0x0005C,
dcbad00e 181 .dump_addr_reg = 0x00064,
8701f185
WB
182 .dump_data_reg = 0x00068,
183 .endian_swap_reg = 0x00084
a74c1639
WB
184 }
185 },
1da177e4
LT
186};
187
188static const struct ipr_chip_t ipr_chip[] = {
cb237ef7
WB
189 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
190 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
191 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
192 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
193 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
194 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
195 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
196 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
cd9b3d04 197 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
1da177e4
LT
198};
199
203fa3fe 200static int ipr_max_bus_speeds[] = {
1da177e4
LT
201 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
202};
203
204MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
205MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
206module_param_named(max_speed, ipr_max_speed, uint, 0);
207MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
208module_param_named(log_level, ipr_log_level, uint, 0);
209MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
210module_param_named(testmode, ipr_testmode, int, 0);
211MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
2cf22be0 212module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
1da177e4
LT
213MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
214module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
215MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
2cf22be0 216module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
d3c74871 217MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
ac09c349
BK
218module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
219MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
3e7ebdfa
WB
220module_param_named(max_devs, ipr_max_devs, int, 0);
221MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
222 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
05a6538a 223module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
6634ff7c 224MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16). (default:2)");
4fdd7c7a
BK
225module_param_named(fast_reboot, ipr_fast_reboot, int, S_IRUGO | S_IWUSR);
226MODULE_PARM_DESC(fast_reboot, "Skip adapter shutdown during reboot. Set to 1 to enable. (default: 0)");
1da177e4
LT
227MODULE_LICENSE("GPL");
228MODULE_VERSION(IPR_DRIVER_VERSION);
229
1da177e4
LT
230/* A constant array of IOASCs/URCs/Error Messages */
231static const
232struct ipr_error_table_t ipr_error_table[] = {
933916f3 233 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
234 "8155: An unknown error was received"},
235 {0x00330000, 0, 0,
236 "Soft underlength error"},
237 {0x005A0000, 0, 0,
238 "Command to be cancelled not found"},
239 {0x00808000, 0, 0,
240 "Qualified success"},
933916f3 241 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 242 "FFFE: Soft device bus error recovered by the IOA"},
933916f3 243 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 244 "4101: Soft device bus fabric error"},
5aa3a333
WB
245 {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
246 "FFFC: Logical block guard error recovered by the device"},
247 {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
248 "FFFC: Logical block reference tag error recovered by the device"},
249 {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
250 "4171: Recovered scatter list tag / sequence number error"},
251 {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
252 "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
253 {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
254 "4171: Recovered logical block sequence number error on IOA to Host transfer"},
255 {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
256 "FFFD: Recovered logical block reference tag error detected by the IOA"},
257 {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
258 "FFFD: Logical block guard error recovered by the IOA"},
933916f3 259 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 260 "FFF9: Device sector reassign successful"},
933916f3 261 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 262 "FFF7: Media error recovered by device rewrite procedures"},
933916f3 263 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 264 "7001: IOA sector reassignment successful"},
933916f3 265 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 266 "FFF9: Soft media error. Sector reassignment recommended"},
933916f3 267 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 268 "FFF7: Media error recovered by IOA rewrite procedures"},
933916f3 269 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 270 "FF3D: Soft PCI bus error recovered by the IOA"},
933916f3 271 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 272 "FFF6: Device hardware error recovered by the IOA"},
933916f3 273 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 274 "FFF6: Device hardware error recovered by the device"},
933916f3 275 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 276 "FF3D: Soft IOA error recovered by the IOA"},
933916f3 277 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 278 "FFFA: Undefined device response recovered by the IOA"},
933916f3 279 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 280 "FFF6: Device bus error, message or command phase"},
933916f3 281 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
35a39691 282 "FFFE: Task Management Function failed"},
933916f3 283 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 284 "FFF6: Failure prediction threshold exceeded"},
933916f3 285 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 286 "8009: Impending cache battery pack failure"},
ed7bd661 287 {0x02040100, 0, 0,
288 "Logical Unit in process of becoming ready"},
289 {0x02040200, 0, 0,
290 "Initializing command required"},
1da177e4
LT
291 {0x02040400, 0, 0,
292 "34FF: Disk device format in progress"},
ed7bd661 293 {0x02040C00, 0, 0,
294 "Logical unit not accessible, target port in unavailable state"},
65f56475
BK
295 {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
296 "9070: IOA requested reset"},
1da177e4
LT
297 {0x023F0000, 0, 0,
298 "Synchronization required"},
ed7bd661 299 {0x02408500, 0, 0,
300 "IOA microcode download required"},
301 {0x02408600, 0, 0,
302 "Device bus connection is prohibited by host"},
1da177e4
LT
303 {0x024E0000, 0, 0,
304 "No ready, IOA shutdown"},
305 {0x025A0000, 0, 0,
306 "Not ready, IOA has been shutdown"},
933916f3 307 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
308 "3020: Storage subsystem configuration error"},
309 {0x03110B00, 0, 0,
310 "FFF5: Medium error, data unreadable, recommend reassign"},
311 {0x03110C00, 0, 0,
312 "7000: Medium error, data unreadable, do not reassign"},
933916f3 313 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 314 "FFF3: Disk media format bad"},
933916f3 315 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 316 "3002: Addressed device failed to respond to selection"},
933916f3 317 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 318 "3100: Device bus error"},
933916f3 319 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
320 "3109: IOA timed out a device command"},
321 {0x04088000, 0, 0,
322 "3120: SCSI bus is not operational"},
933916f3 323 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 324 "4100: Hard device bus fabric error"},
5aa3a333
WB
325 {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
326 "310C: Logical block guard error detected by the device"},
327 {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
328 "310C: Logical block reference tag error detected by the device"},
329 {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
330 "4170: Scatter list tag / sequence number error"},
331 {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
332 "8150: Logical block CRC error on IOA to Host transfer"},
333 {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
334 "4170: Logical block sequence number error on IOA to Host transfer"},
335 {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
336 "310D: Logical block reference tag error detected by the IOA"},
337 {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
338 "310D: Logical block guard error detected by the IOA"},
933916f3 339 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 340 "9000: IOA reserved area data check"},
933916f3 341 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 342 "9001: IOA reserved area invalid data pattern"},
933916f3 343 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 344 "9002: IOA reserved area LRC error"},
5aa3a333
WB
345 {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
346 "Hardware Error, IOA metadata access error"},
933916f3 347 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 348 "102E: Out of alternate sectors for disk storage"},
933916f3 349 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 350 "FFF4: Data transfer underlength error"},
933916f3 351 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 352 "FFF4: Data transfer overlength error"},
933916f3 353 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 354 "3400: Logical unit failure"},
933916f3 355 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 356 "FFF4: Device microcode is corrupt"},
933916f3 357 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
358 "8150: PCI bus error"},
359 {0x04430000, 1, 0,
360 "Unsupported device bus message received"},
933916f3 361 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 362 "FFF4: Disk device problem"},
933916f3 363 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 364 "8150: Permanent IOA failure"},
933916f3 365 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 366 "3010: Disk device returned wrong response to IOA"},
933916f3 367 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
368 "8151: IOA microcode error"},
369 {0x04448500, 0, 0,
370 "Device bus status error"},
933916f3 371 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 372 "8157: IOA error requiring IOA reset to recover"},
35a39691
BK
373 {0x04448700, 0, 0,
374 "ATA device status error"},
1da177e4
LT
375 {0x04490000, 0, 0,
376 "Message reject received from the device"},
933916f3 377 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 378 "8008: A permanent cache battery pack failure occurred"},
933916f3 379 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 380 "9090: Disk unit has been modified after the last known status"},
933916f3 381 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 382 "9081: IOA detected device error"},
933916f3 383 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 384 "9082: IOA detected device error"},
933916f3 385 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 386 "3110: Device bus error, message or command phase"},
933916f3 387 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
35a39691 388 "3110: SAS Command / Task Management Function failed"},
933916f3 389 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 390 "9091: Incorrect hardware configuration change has been detected"},
933916f3 391 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 392 "9073: Invalid multi-adapter configuration"},
933916f3 393 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 394 "4010: Incorrect connection between cascaded expanders"},
933916f3 395 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 396 "4020: Connections exceed IOA design limits"},
933916f3 397 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 398 "4030: Incorrect multipath connection"},
933916f3 399 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 400 "4110: Unsupported enclosure function"},
ed7bd661 401 {0x04679800, 0, IPR_DEFAULT_LOG_LEVEL,
402 "4120: SAS cable VPD cannot be read"},
933916f3 403 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
404 "FFF4: Command to logical unit failed"},
405 {0x05240000, 1, 0,
406 "Illegal request, invalid request type or request packet"},
407 {0x05250000, 0, 0,
408 "Illegal request, invalid resource handle"},
b0df54bb 409 {0x05258000, 0, 0,
410 "Illegal request, commands not allowed to this device"},
411 {0x05258100, 0, 0,
412 "Illegal request, command not allowed to a secondary adapter"},
5aa3a333
WB
413 {0x05258200, 0, 0,
414 "Illegal request, command not allowed to a non-optimized resource"},
1da177e4
LT
415 {0x05260000, 0, 0,
416 "Illegal request, invalid field in parameter list"},
417 {0x05260100, 0, 0,
418 "Illegal request, parameter not supported"},
419 {0x05260200, 0, 0,
420 "Illegal request, parameter value invalid"},
421 {0x052C0000, 0, 0,
422 "Illegal request, command sequence error"},
b0df54bb 423 {0x052C8000, 1, 0,
424 "Illegal request, dual adapter support not enabled"},
ed7bd661 425 {0x052C8100, 1, 0,
426 "Illegal request, another cable connector was physically disabled"},
427 {0x054E8000, 1, 0,
428 "Illegal request, inconsistent group id/group count"},
933916f3 429 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 430 "9031: Array protection temporarily suspended, protection resuming"},
933916f3 431 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 432 "9040: Array protection temporarily suspended, protection resuming"},
ed7bd661 433 {0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL,
434 "4080: IOA exceeded maximum operating temperature"},
435 {0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL,
436 "4085: Service required"},
933916f3 437 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 438 "3140: Device bus not ready to ready transition"},
933916f3 439 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
440 "FFFB: SCSI bus was reset"},
441 {0x06290500, 0, 0,
442 "FFFE: SCSI bus transition to single ended"},
443 {0x06290600, 0, 0,
444 "FFFE: SCSI bus transition to LVD"},
933916f3 445 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 446 "FFFB: SCSI bus was reset by another initiator"},
933916f3 447 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 448 "3029: A device replacement has occurred"},
ed7bd661 449 {0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL,
450 "4102: Device bus fabric performance degradation"},
933916f3 451 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 452 "9051: IOA cache data exists for a missing or failed device"},
933916f3 453 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 454 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
933916f3 455 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 456 "9025: Disk unit is not supported at its physical location"},
933916f3 457 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 458 "3020: IOA detected a SCSI bus configuration error"},
933916f3 459 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 460 "3150: SCSI bus configuration error"},
933916f3 461 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 462 "9074: Asymmetric advanced function disk configuration"},
933916f3 463 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 464 "4040: Incomplete multipath connection between IOA and enclosure"},
933916f3 465 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 466 "4041: Incomplete multipath connection between enclosure and device"},
933916f3 467 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 468 "9075: Incomplete multipath connection between IOA and remote IOA"},
933916f3 469 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 470 "9076: Configuration error, missing remote IOA"},
933916f3 471 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 472 "4050: Enclosure does not support a required multipath function"},
ed7bd661 473 {0x06679800, 0, IPR_DEFAULT_LOG_LEVEL,
474 "4121: Configuration error, required cable is missing"},
475 {0x06679900, 0, IPR_DEFAULT_LOG_LEVEL,
476 "4122: Cable is not plugged into the correct location on remote IOA"},
477 {0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL,
478 "4123: Configuration error, invalid cable vital product data"},
479 {0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL,
480 "4124: Configuration error, both cable ends are plugged into the same IOA"},
b75424fc
WB
481 {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
482 "4070: Logically bad block written on device"},
933916f3 483 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 484 "9041: Array protection temporarily suspended"},
933916f3 485 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 486 "9042: Corrupt array parity detected on specified device"},
933916f3 487 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 488 "9030: Array no longer protected due to missing or failed disk unit"},
933916f3 489 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 490 "9071: Link operational transition"},
933916f3 491 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 492 "9072: Link not operational transition"},
933916f3 493 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 494 "9032: Array exposed but still protected"},
e435340c
BK
495 {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
496 "70DD: Device forced failed by disrupt device command"},
933916f3 497 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 498 "4061: Multipath redundancy level got better"},
933916f3 499 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 500 "4060: Multipath redundancy level got worse"},
f8ee25d7
WX
501 {0x06808100, 0, IPR_DEFAULT_LOG_LEVEL,
502 "9083: Device raw mode enabled"},
503 {0x06808200, 0, IPR_DEFAULT_LOG_LEVEL,
504 "9084: Device raw mode disabled"},
1da177e4
LT
505 {0x07270000, 0, 0,
506 "Failure due to other device"},
933916f3 507 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 508 "9008: IOA does not support functions expected by devices"},
933916f3 509 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 510 "9010: Cache data associated with attached devices cannot be found"},
933916f3 511 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 512 "9011: Cache data belongs to devices other than those attached"},
933916f3 513 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 514 "9020: Array missing 2 or more devices with only 1 device present"},
933916f3 515 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 516 "9021: Array missing 2 or more devices with 2 or more devices present"},
933916f3 517 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 518 "9022: Exposed array is missing a required device"},
933916f3 519 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 520 "9023: Array member(s) not at required physical locations"},
933916f3 521 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 522 "9024: Array not functional due to present hardware configuration"},
933916f3 523 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 524 "9026: Array not functional due to present hardware configuration"},
933916f3 525 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 526 "9027: Array is missing a device and parity is out of sync"},
933916f3 527 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 528 "9028: Maximum number of arrays already exist"},
933916f3 529 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 530 "9050: Required cache data cannot be located for a disk unit"},
933916f3 531 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 532 "9052: Cache data exists for a device that has been modified"},
933916f3 533 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 534 "9054: IOA resources not available due to previous problems"},
933916f3 535 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 536 "9092: Disk unit requires initialization before use"},
933916f3 537 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 538 "9029: Incorrect hardware configuration change has been detected"},
933916f3 539 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 540 "9060: One or more disk pairs are missing from an array"},
933916f3 541 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 542 "9061: One or more disks are missing from an array"},
933916f3 543 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 544 "9062: One or more disks are missing from an array"},
933916f3 545 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 546 "9063: Maximum number of functional arrays has been exceeded"},
ed7bd661 547 {0x07279A00, 0, 0,
548 "Data protect, other volume set problem"},
1da177e4
LT
549 {0x0B260000, 0, 0,
550 "Aborted command, invalid descriptor"},
ed7bd661 551 {0x0B3F9000, 0, 0,
552 "Target operating conditions have changed, dual adapter takeover"},
553 {0x0B530200, 0, 0,
554 "Aborted command, medium removal prevented"},
1da177e4 555 {0x0B5A0000, 0, 0,
ed7bd661 556 "Command terminated by host"},
557 {0x0B5B8000, 0, 0,
558 "Aborted command, command terminated by host"}
1da177e4
LT
559};
560
561static const struct ipr_ses_table_entry ipr_ses_table[] = {
562 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
563 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
564 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
565 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
566 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
567 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
568 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
569 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
570 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
571 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
572 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
573 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
574 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
575};
576
577/*
578 * Function Prototypes
579 */
580static int ipr_reset_alert(struct ipr_cmnd *);
581static void ipr_process_ccn(struct ipr_cmnd *);
582static void ipr_process_error(struct ipr_cmnd *);
583static void ipr_reset_ioa_job(struct ipr_cmnd *);
584static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
585 enum ipr_shutdown_type);
586
587#ifdef CONFIG_SCSI_IPR_TRACE
588/**
589 * ipr_trc_hook - Add a trace entry to the driver trace
590 * @ipr_cmd: ipr command struct
591 * @type: trace type
592 * @add_data: additional data
593 *
594 * Return value:
595 * none
596 **/
597static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
598 u8 type, u32 add_data)
599{
600 struct ipr_trace_entry *trace_entry;
601 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
bb7c5433 602 unsigned int trace_index;
1da177e4 603
bb7c5433
BK
604 trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK;
605 trace_entry = &ioa_cfg->trace[trace_index];
1da177e4
LT
606 trace_entry->time = jiffies;
607 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
608 trace_entry->type = type;
a32c055f
WB
609 if (ipr_cmd->ioa_cfg->sis64)
610 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
611 else
612 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
35a39691 613 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
1da177e4
LT
614 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
615 trace_entry->u.add_data = add_data;
56d6aa33 616 wmb();
1da177e4
LT
617}
618#else
203fa3fe 619#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
1da177e4
LT
620#endif
621
172cd6e1
BK
622/**
623 * ipr_lock_and_done - Acquire lock and complete command
624 * @ipr_cmd: ipr command struct
625 *
626 * Return value:
627 * none
628 **/
629static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
630{
631 unsigned long lock_flags;
632 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
633
634 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
635 ipr_cmd->done(ipr_cmd);
636 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
637}
638
1da177e4
LT
639/**
640 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
641 * @ipr_cmd: ipr command struct
642 *
643 * Return value:
644 * none
645 **/
646static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
647{
648 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
96d21f00
WB
649 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
650 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
a32c055f 651 dma_addr_t dma_addr = ipr_cmd->dma_addr;
05a6538a 652 int hrrq_id;
1da177e4 653
05a6538a 654 hrrq_id = ioarcb->cmd_pkt.hrrq_id;
1da177e4 655 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
05a6538a 656 ioarcb->cmd_pkt.hrrq_id = hrrq_id;
a32c055f 657 ioarcb->data_transfer_length = 0;
1da177e4 658 ioarcb->read_data_transfer_length = 0;
a32c055f 659 ioarcb->ioadl_len = 0;
1da177e4 660 ioarcb->read_ioadl_len = 0;
a32c055f 661
96d21f00 662 if (ipr_cmd->ioa_cfg->sis64) {
a32c055f
WB
663 ioarcb->u.sis64_addr_data.data_ioadl_addr =
664 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
96d21f00
WB
665 ioasa64->u.gata.status = 0;
666 } else {
a32c055f
WB
667 ioarcb->write_ioadl_addr =
668 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
669 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
96d21f00 670 ioasa->u.gata.status = 0;
a32c055f
WB
671 }
672
96d21f00
WB
673 ioasa->hdr.ioasc = 0;
674 ioasa->hdr.residual_data_len = 0;
1da177e4 675 ipr_cmd->scsi_cmd = NULL;
35a39691 676 ipr_cmd->qc = NULL;
1da177e4
LT
677 ipr_cmd->sense_buffer[0] = 0;
678 ipr_cmd->dma_use_sg = 0;
679}
680
681/**
682 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
683 * @ipr_cmd: ipr command struct
684 *
685 * Return value:
686 * none
687 **/
172cd6e1
BK
688static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
689 void (*fast_done) (struct ipr_cmnd *))
1da177e4
LT
690{
691 ipr_reinit_ipr_cmnd(ipr_cmd);
692 ipr_cmd->u.scratch = 0;
693 ipr_cmd->sibling = NULL;
6cdb0817 694 ipr_cmd->eh_comp = NULL;
172cd6e1 695 ipr_cmd->fast_done = fast_done;
1da177e4
LT
696 init_timer(&ipr_cmd->timer);
697}
698
699/**
00bfef2c 700 * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
1da177e4
LT
701 * @ioa_cfg: ioa config struct
702 *
703 * Return value:
704 * pointer to ipr command struct
705 **/
706static
05a6538a 707struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
1da177e4 708{
05a6538a 709 struct ipr_cmnd *ipr_cmd = NULL;
710
711 if (likely(!list_empty(&hrrq->hrrq_free_q))) {
712 ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
713 struct ipr_cmnd, queue);
714 list_del(&ipr_cmd->queue);
715 }
1da177e4 716
1da177e4
LT
717
718 return ipr_cmd;
719}
720
00bfef2c
BK
721/**
722 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
723 * @ioa_cfg: ioa config struct
724 *
725 * Return value:
726 * pointer to ipr command struct
727 **/
728static
729struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
730{
05a6538a 731 struct ipr_cmnd *ipr_cmd =
732 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
172cd6e1 733 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
00bfef2c
BK
734 return ipr_cmd;
735}
736
1da177e4
LT
737/**
738 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
739 * @ioa_cfg: ioa config struct
740 * @clr_ints: interrupts to clear
741 *
742 * This function masks all interrupts on the adapter, then clears the
743 * interrupts specified in the mask
744 *
745 * Return value:
746 * none
747 **/
748static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
749 u32 clr_ints)
750{
751 volatile u32 int_reg;
56d6aa33 752 int i;
1da177e4
LT
753
754 /* Stop new interrupts */
56d6aa33 755 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
756 spin_lock(&ioa_cfg->hrrq[i]._lock);
757 ioa_cfg->hrrq[i].allow_interrupts = 0;
758 spin_unlock(&ioa_cfg->hrrq[i]._lock);
759 }
760 wmb();
1da177e4
LT
761
762 /* Set interrupt mask to stop all new interrupts */
214777ba
WB
763 if (ioa_cfg->sis64)
764 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
765 else
766 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
1da177e4
LT
767
768 /* Clear any pending interrupts */
214777ba
WB
769 if (ioa_cfg->sis64)
770 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
771 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
1da177e4
LT
772 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
773}
774
775/**
776 * ipr_save_pcix_cmd_reg - Save PCI-X command register
777 * @ioa_cfg: ioa config struct
778 *
779 * Return value:
780 * 0 on success / -EIO on failure
781 **/
782static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
783{
784 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
785
7dce0e1c
BK
786 if (pcix_cmd_reg == 0)
787 return 0;
1da177e4
LT
788
789 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
790 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
791 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
792 return -EIO;
793 }
794
795 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
796 return 0;
797}
798
799/**
800 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
801 * @ioa_cfg: ioa config struct
802 *
803 * Return value:
804 * 0 on success / -EIO on failure
805 **/
806static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
807{
808 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
809
810 if (pcix_cmd_reg) {
811 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
812 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
813 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
814 return -EIO;
815 }
1da177e4
LT
816 }
817
818 return 0;
819}
820
35a39691
BK
821/**
822 * ipr_sata_eh_done - done function for aborted SATA commands
823 * @ipr_cmd: ipr command struct
824 *
825 * This function is invoked for ops generated to SATA
826 * devices which are being aborted.
827 *
828 * Return value:
829 * none
830 **/
831static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
832{
35a39691
BK
833 struct ata_queued_cmd *qc = ipr_cmd->qc;
834 struct ipr_sata_port *sata_port = qc->ap->private_data;
835
836 qc->err_mask |= AC_ERR_OTHER;
837 sata_port->ioasa.status |= ATA_BUSY;
05a6538a 838 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
35a39691
BK
839 ata_qc_complete(qc);
840}
841
1da177e4
LT
842/**
843 * ipr_scsi_eh_done - mid-layer done function for aborted ops
844 * @ipr_cmd: ipr command struct
845 *
846 * This function is invoked by the interrupt handler for
847 * ops generated by the SCSI mid-layer which are being aborted.
848 *
849 * Return value:
850 * none
851 **/
852static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
853{
1da177e4
LT
854 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
855
856 scsi_cmd->result |= (DID_ERROR << 16);
857
63015bc9 858 scsi_dma_unmap(ipr_cmd->scsi_cmd);
1da177e4 859 scsi_cmd->scsi_done(scsi_cmd);
6cdb0817
BK
860 if (ipr_cmd->eh_comp)
861 complete(ipr_cmd->eh_comp);
05a6538a 862 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
863}
864
865/**
866 * ipr_fail_all_ops - Fails all outstanding ops.
867 * @ioa_cfg: ioa config struct
868 *
869 * This function fails all outstanding ops.
870 *
871 * Return value:
872 * none
873 **/
874static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
875{
876 struct ipr_cmnd *ipr_cmd, *temp;
05a6538a 877 struct ipr_hrr_queue *hrrq;
1da177e4
LT
878
879 ENTER;
05a6538a 880 for_each_hrrq(hrrq, ioa_cfg) {
56d6aa33 881 spin_lock(&hrrq->_lock);
05a6538a 882 list_for_each_entry_safe(ipr_cmd,
883 temp, &hrrq->hrrq_pending_q, queue) {
884 list_del(&ipr_cmd->queue);
1da177e4 885
05a6538a 886 ipr_cmd->s.ioasa.hdr.ioasc =
887 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
888 ipr_cmd->s.ioasa.hdr.ilid =
889 cpu_to_be32(IPR_DRIVER_ILID);
1da177e4 890
05a6538a 891 if (ipr_cmd->scsi_cmd)
892 ipr_cmd->done = ipr_scsi_eh_done;
893 else if (ipr_cmd->qc)
894 ipr_cmd->done = ipr_sata_eh_done;
1da177e4 895
05a6538a 896 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
897 IPR_IOASC_IOA_WAS_RESET);
898 del_timer(&ipr_cmd->timer);
899 ipr_cmd->done(ipr_cmd);
900 }
56d6aa33 901 spin_unlock(&hrrq->_lock);
1da177e4 902 }
1da177e4
LT
903 LEAVE;
904}
905
a32c055f
WB
906/**
907 * ipr_send_command - Send driver initiated requests.
908 * @ipr_cmd: ipr command struct
909 *
910 * This function sends a command to the adapter using the correct write call.
911 * In the case of sis64, calculate the ioarcb size required. Then or in the
912 * appropriate bits.
913 *
914 * Return value:
915 * none
916 **/
917static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
918{
919 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
920 dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
921
922 if (ioa_cfg->sis64) {
923 /* The default size is 256 bytes */
924 send_dma_addr |= 0x1;
925
926 /* If the number of ioadls * size of ioadl > 128 bytes,
927 then use a 512 byte ioarcb */
928 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
929 send_dma_addr |= 0x4;
930 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
931 } else
932 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
933}
934
1da177e4
LT
935/**
936 * ipr_do_req - Send driver initiated requests.
937 * @ipr_cmd: ipr command struct
938 * @done: done function
939 * @timeout_func: timeout function
940 * @timeout: timeout value
941 *
942 * This function sends the specified command to the adapter with the
943 * timeout given. The done function is invoked on command completion.
944 *
945 * Return value:
946 * none
947 **/
948static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
949 void (*done) (struct ipr_cmnd *),
950 void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
951{
05a6538a 952 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1da177e4
LT
953
954 ipr_cmd->done = done;
955
956 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
957 ipr_cmd->timer.expires = jiffies + timeout;
958 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
959
960 add_timer(&ipr_cmd->timer);
961
962 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
963
a32c055f 964 ipr_send_command(ipr_cmd);
1da177e4
LT
965}
966
967/**
968 * ipr_internal_cmd_done - Op done function for an internally generated op.
969 * @ipr_cmd: ipr command struct
970 *
971 * This function is the op done function for an internally generated,
972 * blocking op. It simply wakes the sleeping thread.
973 *
974 * Return value:
975 * none
976 **/
977static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
978{
979 if (ipr_cmd->sibling)
980 ipr_cmd->sibling = NULL;
981 else
982 complete(&ipr_cmd->completion);
983}
984
a32c055f
WB
985/**
986 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
987 * @ipr_cmd: ipr command struct
988 * @dma_addr: dma address
989 * @len: transfer length
990 * @flags: ioadl flag value
991 *
992 * This function initializes an ioadl in the case where there is only a single
993 * descriptor.
994 *
995 * Return value:
996 * nothing
997 **/
998static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
999 u32 len, int flags)
1000{
1001 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1002 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
1003
1004 ipr_cmd->dma_use_sg = 1;
1005
1006 if (ipr_cmd->ioa_cfg->sis64) {
1007 ioadl64->flags = cpu_to_be32(flags);
1008 ioadl64->data_len = cpu_to_be32(len);
1009 ioadl64->address = cpu_to_be64(dma_addr);
1010
1011 ipr_cmd->ioarcb.ioadl_len =
1012 cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
1013 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1014 } else {
1015 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
1016 ioadl->address = cpu_to_be32(dma_addr);
1017
1018 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
1019 ipr_cmd->ioarcb.read_ioadl_len =
1020 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1021 ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
1022 } else {
1023 ipr_cmd->ioarcb.ioadl_len =
1024 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1025 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1026 }
1027 }
1028}
1029
1da177e4
LT
1030/**
1031 * ipr_send_blocking_cmd - Send command and sleep on its completion.
1032 * @ipr_cmd: ipr command struct
1033 * @timeout_func: function to invoke if command times out
1034 * @timeout: timeout
1035 *
1036 * Return value:
1037 * none
1038 **/
1039static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
1040 void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
1041 u32 timeout)
1042{
1043 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1044
1045 init_completion(&ipr_cmd->completion);
1046 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
1047
1048 spin_unlock_irq(ioa_cfg->host->host_lock);
1049 wait_for_completion(&ipr_cmd->completion);
1050 spin_lock_irq(ioa_cfg->host->host_lock);
1051}
1052
05a6538a 1053static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1054{
3f1c0581
BK
1055 unsigned int hrrq;
1056
05a6538a 1057 if (ioa_cfg->hrrq_num == 1)
3f1c0581
BK
1058 hrrq = 0;
1059 else {
1060 hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index);
1061 hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1;
1062 }
1063 return hrrq;
05a6538a 1064}
1065
1da177e4
LT
1066/**
1067 * ipr_send_hcam - Send an HCAM to the adapter.
1068 * @ioa_cfg: ioa config struct
1069 * @type: HCAM type
1070 * @hostrcb: hostrcb struct
1071 *
1072 * This function will send a Host Controlled Async command to the adapter.
1073 * If HCAMs are currently not allowed to be issued to the adapter, it will
1074 * place the hostrcb on the free queue.
1075 *
1076 * Return value:
1077 * none
1078 **/
1079static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
1080 struct ipr_hostrcb *hostrcb)
1081{
1082 struct ipr_cmnd *ipr_cmd;
1083 struct ipr_ioarcb *ioarcb;
1084
56d6aa33 1085 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1da177e4 1086 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
05a6538a 1087 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1da177e4
LT
1088 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1089
1090 ipr_cmd->u.hostrcb = hostrcb;
1091 ioarcb = &ipr_cmd->ioarcb;
1092
1093 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1094 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
1095 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
1096 ioarcb->cmd_pkt.cdb[1] = type;
1097 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
1098 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
1099
a32c055f
WB
1100 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
1101 sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
1102
1103 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
1104 ipr_cmd->done = ipr_process_ccn;
1105 else
1106 ipr_cmd->done = ipr_process_error;
1107
1108 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
1109
a32c055f 1110 ipr_send_command(ipr_cmd);
1da177e4
LT
1111 } else {
1112 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1113 }
1114}
1115
3e7ebdfa
WB
1116/**
1117 * ipr_update_ata_class - Update the ata class in the resource entry
1118 * @res: resource entry struct
1119 * @proto: cfgte device bus protocol value
1120 *
1121 * Return value:
1122 * none
1123 **/
1124static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
1125{
203fa3fe 1126 switch (proto) {
3e7ebdfa
WB
1127 case IPR_PROTO_SATA:
1128 case IPR_PROTO_SAS_STP:
1129 res->ata_class = ATA_DEV_ATA;
1130 break;
1131 case IPR_PROTO_SATA_ATAPI:
1132 case IPR_PROTO_SAS_STP_ATAPI:
1133 res->ata_class = ATA_DEV_ATAPI;
1134 break;
1135 default:
1136 res->ata_class = ATA_DEV_UNKNOWN;
1137 break;
1138 };
1139}
1140
1da177e4
LT
1141/**
1142 * ipr_init_res_entry - Initialize a resource entry struct.
1143 * @res: resource entry struct
3e7ebdfa 1144 * @cfgtew: config table entry wrapper struct
1da177e4
LT
1145 *
1146 * Return value:
1147 * none
1148 **/
3e7ebdfa
WB
1149static void ipr_init_res_entry(struct ipr_resource_entry *res,
1150 struct ipr_config_table_entry_wrapper *cfgtew)
1da177e4 1151{
3e7ebdfa
WB
1152 int found = 0;
1153 unsigned int proto;
1154 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1155 struct ipr_resource_entry *gscsi_res = NULL;
1156
ee0a90fa 1157 res->needs_sync_complete = 0;
1da177e4
LT
1158 res->in_erp = 0;
1159 res->add_to_ml = 0;
1160 res->del_from_ml = 0;
1161 res->resetting_device = 0;
0b1f8d44 1162 res->reset_occurred = 0;
1da177e4 1163 res->sdev = NULL;
35a39691 1164 res->sata_port = NULL;
3e7ebdfa
WB
1165
1166 if (ioa_cfg->sis64) {
1167 proto = cfgtew->u.cfgte64->proto;
1168 res->res_flags = cfgtew->u.cfgte64->res_flags;
1169 res->qmodel = IPR_QUEUEING_MODEL64(res);
438b0331 1170 res->type = cfgtew->u.cfgte64->res_type;
3e7ebdfa
WB
1171
1172 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1173 sizeof(res->res_path));
1174
1175 res->bus = 0;
0cb992ed
WB
1176 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1177 sizeof(res->dev_lun.scsi_lun));
3e7ebdfa
WB
1178 res->lun = scsilun_to_int(&res->dev_lun);
1179
1180 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1181 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1182 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1183 found = 1;
1184 res->target = gscsi_res->target;
1185 break;
1186 }
1187 }
1188 if (!found) {
1189 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1190 ioa_cfg->max_devs_supported);
1191 set_bit(res->target, ioa_cfg->target_ids);
1192 }
3e7ebdfa
WB
1193 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1194 res->bus = IPR_IOAFP_VIRTUAL_BUS;
1195 res->target = 0;
1196 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1197 res->bus = IPR_ARRAY_VIRTUAL_BUS;
1198 res->target = find_first_zero_bit(ioa_cfg->array_ids,
1199 ioa_cfg->max_devs_supported);
1200 set_bit(res->target, ioa_cfg->array_ids);
1201 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1202 res->bus = IPR_VSET_VIRTUAL_BUS;
1203 res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1204 ioa_cfg->max_devs_supported);
1205 set_bit(res->target, ioa_cfg->vset_ids);
1206 } else {
1207 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1208 ioa_cfg->max_devs_supported);
1209 set_bit(res->target, ioa_cfg->target_ids);
1210 }
1211 } else {
1212 proto = cfgtew->u.cfgte->proto;
1213 res->qmodel = IPR_QUEUEING_MODEL(res);
1214 res->flags = cfgtew->u.cfgte->flags;
1215 if (res->flags & IPR_IS_IOA_RESOURCE)
1216 res->type = IPR_RES_TYPE_IOAFP;
1217 else
1218 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1219
1220 res->bus = cfgtew->u.cfgte->res_addr.bus;
1221 res->target = cfgtew->u.cfgte->res_addr.target;
1222 res->lun = cfgtew->u.cfgte->res_addr.lun;
46d74563 1223 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
3e7ebdfa
WB
1224 }
1225
1226 ipr_update_ata_class(res, proto);
1227}
1228
1229/**
1230 * ipr_is_same_device - Determine if two devices are the same.
1231 * @res: resource entry struct
1232 * @cfgtew: config table entry wrapper struct
1233 *
1234 * Return value:
1235 * 1 if the devices are the same / 0 otherwise
1236 **/
1237static int ipr_is_same_device(struct ipr_resource_entry *res,
1238 struct ipr_config_table_entry_wrapper *cfgtew)
1239{
1240 if (res->ioa_cfg->sis64) {
1241 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1242 sizeof(cfgtew->u.cfgte64->dev_id)) &&
0cb992ed 1243 !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
3e7ebdfa
WB
1244 sizeof(cfgtew->u.cfgte64->lun))) {
1245 return 1;
1246 }
1247 } else {
1248 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1249 res->target == cfgtew->u.cfgte->res_addr.target &&
1250 res->lun == cfgtew->u.cfgte->res_addr.lun)
1251 return 1;
1252 }
1253
1254 return 0;
1255}
1256
1257/**
b3b3b407 1258 * __ipr_format_res_path - Format the resource path for printing.
3e7ebdfa
WB
1259 * @res_path: resource path
1260 * @buf: buffer
b3b3b407 1261 * @len: length of buffer provided
3e7ebdfa
WB
1262 *
1263 * Return value:
1264 * pointer to buffer
1265 **/
b3b3b407 1266static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
3e7ebdfa
WB
1267{
1268 int i;
5adcbeb3 1269 char *p = buffer;
3e7ebdfa 1270
46d74563 1271 *p = '\0';
5adcbeb3
WB
1272 p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1273 for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1274 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
3e7ebdfa
WB
1275
1276 return buffer;
1277}
1278
b3b3b407
BK
1279/**
1280 * ipr_format_res_path - Format the resource path for printing.
1281 * @ioa_cfg: ioa config struct
1282 * @res_path: resource path
1283 * @buf: buffer
1284 * @len: length of buffer provided
1285 *
1286 * Return value:
1287 * pointer to buffer
1288 **/
1289static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
1290 u8 *res_path, char *buffer, int len)
1291{
1292 char *p = buffer;
1293
1294 *p = '\0';
1295 p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
1296 __ipr_format_res_path(res_path, p, len - (buffer - p));
1297 return buffer;
1298}
1299
3e7ebdfa
WB
1300/**
1301 * ipr_update_res_entry - Update the resource entry.
1302 * @res: resource entry struct
1303 * @cfgtew: config table entry wrapper struct
1304 *
1305 * Return value:
1306 * none
1307 **/
1308static void ipr_update_res_entry(struct ipr_resource_entry *res,
1309 struct ipr_config_table_entry_wrapper *cfgtew)
1310{
1311 char buffer[IPR_MAX_RES_PATH_LENGTH];
1312 unsigned int proto;
1313 int new_path = 0;
1314
1315 if (res->ioa_cfg->sis64) {
1316 res->flags = cfgtew->u.cfgte64->flags;
1317 res->res_flags = cfgtew->u.cfgte64->res_flags;
75576bb9 1318 res->type = cfgtew->u.cfgte64->res_type;
3e7ebdfa
WB
1319
1320 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1321 sizeof(struct ipr_std_inq_data));
1322
1323 res->qmodel = IPR_QUEUEING_MODEL64(res);
1324 proto = cfgtew->u.cfgte64->proto;
1325 res->res_handle = cfgtew->u.cfgte64->res_handle;
1326 res->dev_id = cfgtew->u.cfgte64->dev_id;
1327
1328 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1329 sizeof(res->dev_lun.scsi_lun));
1330
1331 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1332 sizeof(res->res_path))) {
1333 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1334 sizeof(res->res_path));
1335 new_path = 1;
1336 }
1337
1338 if (res->sdev && new_path)
1339 sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
b3b3b407
BK
1340 ipr_format_res_path(res->ioa_cfg,
1341 res->res_path, buffer, sizeof(buffer)));
3e7ebdfa
WB
1342 } else {
1343 res->flags = cfgtew->u.cfgte->flags;
1344 if (res->flags & IPR_IS_IOA_RESOURCE)
1345 res->type = IPR_RES_TYPE_IOAFP;
1346 else
1347 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1348
1349 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1350 sizeof(struct ipr_std_inq_data));
1351
1352 res->qmodel = IPR_QUEUEING_MODEL(res);
1353 proto = cfgtew->u.cfgte->proto;
1354 res->res_handle = cfgtew->u.cfgte->res_handle;
1355 }
1356
1357 ipr_update_ata_class(res, proto);
1358}
1359
1360/**
1361 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1362 * for the resource.
1363 * @res: resource entry struct
1364 * @cfgtew: config table entry wrapper struct
1365 *
1366 * Return value:
1367 * none
1368 **/
1369static void ipr_clear_res_target(struct ipr_resource_entry *res)
1370{
1371 struct ipr_resource_entry *gscsi_res = NULL;
1372 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1373
1374 if (!ioa_cfg->sis64)
1375 return;
1376
1377 if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1378 clear_bit(res->target, ioa_cfg->array_ids);
1379 else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1380 clear_bit(res->target, ioa_cfg->vset_ids);
1381 else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1382 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1383 if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1384 return;
1385 clear_bit(res->target, ioa_cfg->target_ids);
1386
1387 } else if (res->bus == 0)
1388 clear_bit(res->target, ioa_cfg->target_ids);
1da177e4
LT
1389}
1390
1391/**
1392 * ipr_handle_config_change - Handle a config change from the adapter
1393 * @ioa_cfg: ioa config struct
1394 * @hostrcb: hostrcb
1395 *
1396 * Return value:
1397 * none
1398 **/
1399static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
3e7ebdfa 1400 struct ipr_hostrcb *hostrcb)
1da177e4
LT
1401{
1402 struct ipr_resource_entry *res = NULL;
3e7ebdfa
WB
1403 struct ipr_config_table_entry_wrapper cfgtew;
1404 __be32 cc_res_handle;
1405
1da177e4
LT
1406 u32 is_ndn = 1;
1407
3e7ebdfa
WB
1408 if (ioa_cfg->sis64) {
1409 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1410 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1411 } else {
1412 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1413 cc_res_handle = cfgtew.u.cfgte->res_handle;
1414 }
1da177e4
LT
1415
1416 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3e7ebdfa 1417 if (res->res_handle == cc_res_handle) {
1da177e4
LT
1418 is_ndn = 0;
1419 break;
1420 }
1421 }
1422
1423 if (is_ndn) {
1424 if (list_empty(&ioa_cfg->free_res_q)) {
1425 ipr_send_hcam(ioa_cfg,
1426 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1427 hostrcb);
1428 return;
1429 }
1430
1431 res = list_entry(ioa_cfg->free_res_q.next,
1432 struct ipr_resource_entry, queue);
1433
1434 list_del(&res->queue);
3e7ebdfa 1435 ipr_init_res_entry(res, &cfgtew);
1da177e4
LT
1436 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1437 }
1438
3e7ebdfa 1439 ipr_update_res_entry(res, &cfgtew);
1da177e4
LT
1440
1441 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1442 if (res->sdev) {
1da177e4 1443 res->del_from_ml = 1;
3e7ebdfa 1444 res->res_handle = IPR_INVALID_RES_HANDLE;
f688f96d 1445 schedule_work(&ioa_cfg->work_q);
3e7ebdfa
WB
1446 } else {
1447 ipr_clear_res_target(res);
1da177e4 1448 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3e7ebdfa 1449 }
5767a1c4 1450 } else if (!res->sdev || res->del_from_ml) {
1da177e4 1451 res->add_to_ml = 1;
f688f96d 1452 schedule_work(&ioa_cfg->work_q);
1da177e4
LT
1453 }
1454
1455 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1456}
1457
1458/**
1459 * ipr_process_ccn - Op done function for a CCN.
1460 * @ipr_cmd: ipr command struct
1461 *
1462 * This function is the op done function for a configuration
1463 * change notification host controlled async from the adapter.
1464 *
1465 * Return value:
1466 * none
1467 **/
1468static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1469{
1470 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1471 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
96d21f00 1472 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
1473
1474 list_del(&hostrcb->queue);
05a6538a 1475 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
1476
1477 if (ioasc) {
4fdd7c7a
BK
1478 if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
1479 ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST)
1da177e4
LT
1480 dev_err(&ioa_cfg->pdev->dev,
1481 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1482
1483 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1484 } else {
1485 ipr_handle_config_change(ioa_cfg, hostrcb);
1486 }
1487}
1488
8cf093e2
BK
1489/**
1490 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1491 * @i: index into buffer
1492 * @buf: string to modify
1493 *
1494 * This function will strip all trailing whitespace, pad the end
1495 * of the string with a single space, and NULL terminate the string.
1496 *
1497 * Return value:
1498 * new length of string
1499 **/
1500static int strip_and_pad_whitespace(int i, char *buf)
1501{
1502 while (i && buf[i] == ' ')
1503 i--;
1504 buf[i+1] = ' ';
1505 buf[i+2] = '\0';
1506 return i + 2;
1507}
1508
1509/**
1510 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1511 * @prefix: string to print at start of printk
1512 * @hostrcb: hostrcb pointer
1513 * @vpd: vendor/product id/sn struct
1514 *
1515 * Return value:
1516 * none
1517 **/
1518static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1519 struct ipr_vpd *vpd)
1520{
1521 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1522 int i = 0;
1523
1524 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1525 i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1526
1527 memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1528 i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1529
1530 memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1531 buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1532
1533 ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1534}
1535
1da177e4
LT
1536/**
1537 * ipr_log_vpd - Log the passed VPD to the error log.
cfc32139 1538 * @vpd: vendor/product id/sn struct
1da177e4
LT
1539 *
1540 * Return value:
1541 * none
1542 **/
cfc32139 1543static void ipr_log_vpd(struct ipr_vpd *vpd)
1da177e4
LT
1544{
1545 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1546 + IPR_SERIAL_NUM_LEN];
1547
cfc32139 1548 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1549 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1da177e4
LT
1550 IPR_PROD_ID_LEN);
1551 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1552 ipr_err("Vendor/Product ID: %s\n", buffer);
1553
cfc32139 1554 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1da177e4
LT
1555 buffer[IPR_SERIAL_NUM_LEN] = '\0';
1556 ipr_err(" Serial Number: %s\n", buffer);
1557}
1558
8cf093e2
BK
1559/**
1560 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1561 * @prefix: string to print at start of printk
1562 * @hostrcb: hostrcb pointer
1563 * @vpd: vendor/product id/sn/wwn struct
1564 *
1565 * Return value:
1566 * none
1567 **/
1568static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1569 struct ipr_ext_vpd *vpd)
1570{
1571 ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1572 ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1573 be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1574}
1575
ee0f05b8 1576/**
1577 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1578 * @vpd: vendor/product id/sn/wwn struct
1579 *
1580 * Return value:
1581 * none
1582 **/
1583static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1584{
1585 ipr_log_vpd(&vpd->vpd);
1586 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1587 be32_to_cpu(vpd->wwid[1]));
1588}
1589
1590/**
1591 * ipr_log_enhanced_cache_error - Log a cache error.
1592 * @ioa_cfg: ioa config struct
1593 * @hostrcb: hostrcb struct
1594 *
1595 * Return value:
1596 * none
1597 **/
1598static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1599 struct ipr_hostrcb *hostrcb)
1600{
4565e370
WB
1601 struct ipr_hostrcb_type_12_error *error;
1602
1603 if (ioa_cfg->sis64)
1604 error = &hostrcb->hcam.u.error64.u.type_12_error;
1605 else
1606 error = &hostrcb->hcam.u.error.u.type_12_error;
ee0f05b8 1607
1608 ipr_err("-----Current Configuration-----\n");
1609 ipr_err("Cache Directory Card Information:\n");
1610 ipr_log_ext_vpd(&error->ioa_vpd);
1611 ipr_err("Adapter Card Information:\n");
1612 ipr_log_ext_vpd(&error->cfc_vpd);
1613
1614 ipr_err("-----Expected Configuration-----\n");
1615 ipr_err("Cache Directory Card Information:\n");
1616 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1617 ipr_err("Adapter Card Information:\n");
1618 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1619
1620 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1621 be32_to_cpu(error->ioa_data[0]),
1622 be32_to_cpu(error->ioa_data[1]),
1623 be32_to_cpu(error->ioa_data[2]));
1624}
1625
1da177e4
LT
1626/**
1627 * ipr_log_cache_error - Log a cache error.
1628 * @ioa_cfg: ioa config struct
1629 * @hostrcb: hostrcb struct
1630 *
1631 * Return value:
1632 * none
1633 **/
1634static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1635 struct ipr_hostrcb *hostrcb)
1636{
1637 struct ipr_hostrcb_type_02_error *error =
1638 &hostrcb->hcam.u.error.u.type_02_error;
1639
1640 ipr_err("-----Current Configuration-----\n");
1641 ipr_err("Cache Directory Card Information:\n");
cfc32139 1642 ipr_log_vpd(&error->ioa_vpd);
1da177e4 1643 ipr_err("Adapter Card Information:\n");
cfc32139 1644 ipr_log_vpd(&error->cfc_vpd);
1da177e4
LT
1645
1646 ipr_err("-----Expected Configuration-----\n");
1647 ipr_err("Cache Directory Card Information:\n");
cfc32139 1648 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1da177e4 1649 ipr_err("Adapter Card Information:\n");
cfc32139 1650 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1da177e4
LT
1651
1652 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1653 be32_to_cpu(error->ioa_data[0]),
1654 be32_to_cpu(error->ioa_data[1]),
1655 be32_to_cpu(error->ioa_data[2]));
1656}
1657
ee0f05b8 1658/**
1659 * ipr_log_enhanced_config_error - Log a configuration error.
1660 * @ioa_cfg: ioa config struct
1661 * @hostrcb: hostrcb struct
1662 *
1663 * Return value:
1664 * none
1665 **/
1666static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1667 struct ipr_hostrcb *hostrcb)
1668{
1669 int errors_logged, i;
1670 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1671 struct ipr_hostrcb_type_13_error *error;
1672
1673 error = &hostrcb->hcam.u.error.u.type_13_error;
1674 errors_logged = be32_to_cpu(error->errors_logged);
1675
1676 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1677 be32_to_cpu(error->errors_detected), errors_logged);
1678
1679 dev_entry = error->dev;
1680
1681 for (i = 0; i < errors_logged; i++, dev_entry++) {
1682 ipr_err_separator;
1683
1684 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1685 ipr_log_ext_vpd(&dev_entry->vpd);
1686
1687 ipr_err("-----New Device Information-----\n");
1688 ipr_log_ext_vpd(&dev_entry->new_vpd);
1689
1690 ipr_err("Cache Directory Card Information:\n");
1691 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1692
1693 ipr_err("Adapter Card Information:\n");
1694 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1695 }
1696}
1697
4565e370
WB
1698/**
1699 * ipr_log_sis64_config_error - Log a device error.
1700 * @ioa_cfg: ioa config struct
1701 * @hostrcb: hostrcb struct
1702 *
1703 * Return value:
1704 * none
1705 **/
1706static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1707 struct ipr_hostrcb *hostrcb)
1708{
1709 int errors_logged, i;
1710 struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1711 struct ipr_hostrcb_type_23_error *error;
1712 char buffer[IPR_MAX_RES_PATH_LENGTH];
1713
1714 error = &hostrcb->hcam.u.error64.u.type_23_error;
1715 errors_logged = be32_to_cpu(error->errors_logged);
1716
1717 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1718 be32_to_cpu(error->errors_detected), errors_logged);
1719
1720 dev_entry = error->dev;
1721
1722 for (i = 0; i < errors_logged; i++, dev_entry++) {
1723 ipr_err_separator;
1724
1725 ipr_err("Device %d : %s", i + 1,
b3b3b407
BK
1726 __ipr_format_res_path(dev_entry->res_path,
1727 buffer, sizeof(buffer)));
4565e370
WB
1728 ipr_log_ext_vpd(&dev_entry->vpd);
1729
1730 ipr_err("-----New Device Information-----\n");
1731 ipr_log_ext_vpd(&dev_entry->new_vpd);
1732
1733 ipr_err("Cache Directory Card Information:\n");
1734 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1735
1736 ipr_err("Adapter Card Information:\n");
1737 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1738 }
1739}
1740
1da177e4
LT
1741/**
1742 * ipr_log_config_error - Log a configuration error.
1743 * @ioa_cfg: ioa config struct
1744 * @hostrcb: hostrcb struct
1745 *
1746 * Return value:
1747 * none
1748 **/
1749static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1750 struct ipr_hostrcb *hostrcb)
1751{
1752 int errors_logged, i;
1753 struct ipr_hostrcb_device_data_entry *dev_entry;
1754 struct ipr_hostrcb_type_03_error *error;
1755
1756 error = &hostrcb->hcam.u.error.u.type_03_error;
1757 errors_logged = be32_to_cpu(error->errors_logged);
1758
1759 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1760 be32_to_cpu(error->errors_detected), errors_logged);
1761
cfc32139 1762 dev_entry = error->dev;
1da177e4
LT
1763
1764 for (i = 0; i < errors_logged; i++, dev_entry++) {
1765 ipr_err_separator;
1766
fa15b1f6 1767 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
cfc32139 1768 ipr_log_vpd(&dev_entry->vpd);
1da177e4
LT
1769
1770 ipr_err("-----New Device Information-----\n");
cfc32139 1771 ipr_log_vpd(&dev_entry->new_vpd);
1da177e4
LT
1772
1773 ipr_err("Cache Directory Card Information:\n");
cfc32139 1774 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1da177e4
LT
1775
1776 ipr_err("Adapter Card Information:\n");
cfc32139 1777 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1da177e4
LT
1778
1779 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1780 be32_to_cpu(dev_entry->ioa_data[0]),
1781 be32_to_cpu(dev_entry->ioa_data[1]),
1782 be32_to_cpu(dev_entry->ioa_data[2]),
1783 be32_to_cpu(dev_entry->ioa_data[3]),
1784 be32_to_cpu(dev_entry->ioa_data[4]));
1785 }
1786}
1787
ee0f05b8 1788/**
1789 * ipr_log_enhanced_array_error - Log an array configuration error.
1790 * @ioa_cfg: ioa config struct
1791 * @hostrcb: hostrcb struct
1792 *
1793 * Return value:
1794 * none
1795 **/
1796static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1797 struct ipr_hostrcb *hostrcb)
1798{
1799 int i, num_entries;
1800 struct ipr_hostrcb_type_14_error *error;
1801 struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1802 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1803
1804 error = &hostrcb->hcam.u.error.u.type_14_error;
1805
1806 ipr_err_separator;
1807
1808 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1809 error->protection_level,
1810 ioa_cfg->host->host_no,
1811 error->last_func_vset_res_addr.bus,
1812 error->last_func_vset_res_addr.target,
1813 error->last_func_vset_res_addr.lun);
1814
1815 ipr_err_separator;
1816
1817 array_entry = error->array_member;
1818 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
7262026f 1819 ARRAY_SIZE(error->array_member));
ee0f05b8 1820
1821 for (i = 0; i < num_entries; i++, array_entry++) {
1822 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1823 continue;
1824
1825 if (be32_to_cpu(error->exposed_mode_adn) == i)
1826 ipr_err("Exposed Array Member %d:\n", i);
1827 else
1828 ipr_err("Array Member %d:\n", i);
1829
1830 ipr_log_ext_vpd(&array_entry->vpd);
1831 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1832 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1833 "Expected Location");
1834
1835 ipr_err_separator;
1836 }
1837}
1838
1da177e4
LT
1839/**
1840 * ipr_log_array_error - Log an array configuration error.
1841 * @ioa_cfg: ioa config struct
1842 * @hostrcb: hostrcb struct
1843 *
1844 * Return value:
1845 * none
1846 **/
1847static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1848 struct ipr_hostrcb *hostrcb)
1849{
1850 int i;
1851 struct ipr_hostrcb_type_04_error *error;
1852 struct ipr_hostrcb_array_data_entry *array_entry;
1853 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1854
1855 error = &hostrcb->hcam.u.error.u.type_04_error;
1856
1857 ipr_err_separator;
1858
1859 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1860 error->protection_level,
1861 ioa_cfg->host->host_no,
1862 error->last_func_vset_res_addr.bus,
1863 error->last_func_vset_res_addr.target,
1864 error->last_func_vset_res_addr.lun);
1865
1866 ipr_err_separator;
1867
1868 array_entry = error->array_member;
1869
1870 for (i = 0; i < 18; i++) {
cfc32139 1871 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1da177e4
LT
1872 continue;
1873
fa15b1f6 1874 if (be32_to_cpu(error->exposed_mode_adn) == i)
1da177e4 1875 ipr_err("Exposed Array Member %d:\n", i);
fa15b1f6 1876 else
1da177e4 1877 ipr_err("Array Member %d:\n", i);
1da177e4 1878
cfc32139 1879 ipr_log_vpd(&array_entry->vpd);
1da177e4 1880
fa15b1f6 1881 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1882 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1883 "Expected Location");
1da177e4
LT
1884
1885 ipr_err_separator;
1886
1887 if (i == 9)
1888 array_entry = error->array_member2;
1889 else
1890 array_entry++;
1891 }
1892}
1893
1894/**
b0df54bb 1895 * ipr_log_hex_data - Log additional hex IOA error data.
ac719aba 1896 * @ioa_cfg: ioa config struct
b0df54bb 1897 * @data: IOA error data
1898 * @len: data length
1da177e4
LT
1899 *
1900 * Return value:
1901 * none
1902 **/
ac719aba 1903static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
1da177e4
LT
1904{
1905 int i;
1da177e4 1906
b0df54bb 1907 if (len == 0)
1da177e4
LT
1908 return;
1909
ac719aba
BK
1910 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1911 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1912
b0df54bb 1913 for (i = 0; i < len / 4; i += 4) {
1da177e4 1914 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
b0df54bb 1915 be32_to_cpu(data[i]),
1916 be32_to_cpu(data[i+1]),
1917 be32_to_cpu(data[i+2]),
1918 be32_to_cpu(data[i+3]));
1da177e4
LT
1919 }
1920}
1921
ee0f05b8 1922/**
1923 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1924 * @ioa_cfg: ioa config struct
1925 * @hostrcb: hostrcb struct
1926 *
1927 * Return value:
1928 * none
1929 **/
1930static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1931 struct ipr_hostrcb *hostrcb)
1932{
1933 struct ipr_hostrcb_type_17_error *error;
1934
4565e370
WB
1935 if (ioa_cfg->sis64)
1936 error = &hostrcb->hcam.u.error64.u.type_17_error;
1937 else
1938 error = &hostrcb->hcam.u.error.u.type_17_error;
1939
ee0f05b8 1940 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
ca54cb8c 1941 strim(error->failure_reason);
ee0f05b8 1942
8cf093e2
BK
1943 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1944 be32_to_cpu(hostrcb->hcam.u.error.prc));
1945 ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
ac719aba 1946 ipr_log_hex_data(ioa_cfg, error->data,
ee0f05b8 1947 be32_to_cpu(hostrcb->hcam.length) -
1948 (offsetof(struct ipr_hostrcb_error, u) +
1949 offsetof(struct ipr_hostrcb_type_17_error, data)));
1950}
1951
b0df54bb 1952/**
1953 * ipr_log_dual_ioa_error - Log a dual adapter error.
1954 * @ioa_cfg: ioa config struct
1955 * @hostrcb: hostrcb struct
1956 *
1957 * Return value:
1958 * none
1959 **/
1960static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1961 struct ipr_hostrcb *hostrcb)
1962{
1963 struct ipr_hostrcb_type_07_error *error;
1964
1965 error = &hostrcb->hcam.u.error.u.type_07_error;
1966 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
ca54cb8c 1967 strim(error->failure_reason);
b0df54bb 1968
8cf093e2
BK
1969 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1970 be32_to_cpu(hostrcb->hcam.u.error.prc));
1971 ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
ac719aba 1972 ipr_log_hex_data(ioa_cfg, error->data,
b0df54bb 1973 be32_to_cpu(hostrcb->hcam.length) -
1974 (offsetof(struct ipr_hostrcb_error, u) +
1975 offsetof(struct ipr_hostrcb_type_07_error, data)));
1976}
1977
49dc6a18
BK
1978static const struct {
1979 u8 active;
1980 char *desc;
1981} path_active_desc[] = {
1982 { IPR_PATH_NO_INFO, "Path" },
1983 { IPR_PATH_ACTIVE, "Active path" },
1984 { IPR_PATH_NOT_ACTIVE, "Inactive path" }
1985};
1986
1987static const struct {
1988 u8 state;
1989 char *desc;
1990} path_state_desc[] = {
1991 { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1992 { IPR_PATH_HEALTHY, "is healthy" },
1993 { IPR_PATH_DEGRADED, "is degraded" },
1994 { IPR_PATH_FAILED, "is failed" }
1995};
1996
1997/**
1998 * ipr_log_fabric_path - Log a fabric path error
1999 * @hostrcb: hostrcb struct
2000 * @fabric: fabric descriptor
2001 *
2002 * Return value:
2003 * none
2004 **/
2005static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
2006 struct ipr_hostrcb_fabric_desc *fabric)
2007{
2008 int i, j;
2009 u8 path_state = fabric->path_state;
2010 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2011 u8 state = path_state & IPR_PATH_STATE_MASK;
2012
2013 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2014 if (path_active_desc[i].active != active)
2015 continue;
2016
2017 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2018 if (path_state_desc[j].state != state)
2019 continue;
2020
2021 if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
2022 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
2023 path_active_desc[i].desc, path_state_desc[j].desc,
2024 fabric->ioa_port);
2025 } else if (fabric->cascaded_expander == 0xff) {
2026 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
2027 path_active_desc[i].desc, path_state_desc[j].desc,
2028 fabric->ioa_port, fabric->phy);
2029 } else if (fabric->phy == 0xff) {
2030 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
2031 path_active_desc[i].desc, path_state_desc[j].desc,
2032 fabric->ioa_port, fabric->cascaded_expander);
2033 } else {
2034 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
2035 path_active_desc[i].desc, path_state_desc[j].desc,
2036 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2037 }
2038 return;
2039 }
2040 }
2041
2042 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
2043 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2044}
2045
4565e370
WB
2046/**
2047 * ipr_log64_fabric_path - Log a fabric path error
2048 * @hostrcb: hostrcb struct
2049 * @fabric: fabric descriptor
2050 *
2051 * Return value:
2052 * none
2053 **/
2054static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
2055 struct ipr_hostrcb64_fabric_desc *fabric)
2056{
2057 int i, j;
2058 u8 path_state = fabric->path_state;
2059 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2060 u8 state = path_state & IPR_PATH_STATE_MASK;
2061 char buffer[IPR_MAX_RES_PATH_LENGTH];
2062
2063 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2064 if (path_active_desc[i].active != active)
2065 continue;
2066
2067 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2068 if (path_state_desc[j].state != state)
2069 continue;
2070
2071 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
2072 path_active_desc[i].desc, path_state_desc[j].desc,
b3b3b407
BK
2073 ipr_format_res_path(hostrcb->ioa_cfg,
2074 fabric->res_path,
2075 buffer, sizeof(buffer)));
4565e370
WB
2076 return;
2077 }
2078 }
2079
2080 ipr_err("Path state=%02X Resource Path=%s\n", path_state,
b3b3b407
BK
2081 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
2082 buffer, sizeof(buffer)));
4565e370
WB
2083}
2084
49dc6a18
BK
2085static const struct {
2086 u8 type;
2087 char *desc;
2088} path_type_desc[] = {
2089 { IPR_PATH_CFG_IOA_PORT, "IOA port" },
2090 { IPR_PATH_CFG_EXP_PORT, "Expander port" },
2091 { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
2092 { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
2093};
2094
2095static const struct {
2096 u8 status;
2097 char *desc;
2098} path_status_desc[] = {
2099 { IPR_PATH_CFG_NO_PROB, "Functional" },
2100 { IPR_PATH_CFG_DEGRADED, "Degraded" },
2101 { IPR_PATH_CFG_FAILED, "Failed" },
2102 { IPR_PATH_CFG_SUSPECT, "Suspect" },
2103 { IPR_PATH_NOT_DETECTED, "Missing" },
2104 { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
2105};
2106
2107static const char *link_rate[] = {
2108 "unknown",
2109 "disabled",
2110 "phy reset problem",
2111 "spinup hold",
2112 "port selector",
2113 "unknown",
2114 "unknown",
2115 "unknown",
2116 "1.5Gbps",
2117 "3.0Gbps",
2118 "unknown",
2119 "unknown",
2120 "unknown",
2121 "unknown",
2122 "unknown",
2123 "unknown"
2124};
2125
2126/**
2127 * ipr_log_path_elem - Log a fabric path element.
2128 * @hostrcb: hostrcb struct
2129 * @cfg: fabric path element struct
2130 *
2131 * Return value:
2132 * none
2133 **/
2134static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
2135 struct ipr_hostrcb_config_element *cfg)
2136{
2137 int i, j;
2138 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2139 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2140
2141 if (type == IPR_PATH_CFG_NOT_EXIST)
2142 return;
2143
2144 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2145 if (path_type_desc[i].type != type)
2146 continue;
2147
2148 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2149 if (path_status_desc[j].status != status)
2150 continue;
2151
2152 if (type == IPR_PATH_CFG_IOA_PORT) {
2153 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2154 path_status_desc[j].desc, path_type_desc[i].desc,
2155 cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2156 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2157 } else {
2158 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2159 ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2160 path_status_desc[j].desc, path_type_desc[i].desc,
2161 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2162 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2163 } else if (cfg->cascaded_expander == 0xff) {
2164 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2165 "WWN=%08X%08X\n", path_status_desc[j].desc,
2166 path_type_desc[i].desc, cfg->phy,
2167 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2168 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2169 } else if (cfg->phy == 0xff) {
2170 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2171 "WWN=%08X%08X\n", path_status_desc[j].desc,
2172 path_type_desc[i].desc, cfg->cascaded_expander,
2173 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2174 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2175 } else {
2176 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2177 "WWN=%08X%08X\n", path_status_desc[j].desc,
2178 path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2179 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2180 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2181 }
2182 }
2183 return;
2184 }
2185 }
2186
2187 ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2188 "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2189 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2190 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2191}
2192
4565e370
WB
2193/**
2194 * ipr_log64_path_elem - Log a fabric path element.
2195 * @hostrcb: hostrcb struct
2196 * @cfg: fabric path element struct
2197 *
2198 * Return value:
2199 * none
2200 **/
2201static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2202 struct ipr_hostrcb64_config_element *cfg)
2203{
2204 int i, j;
2205 u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2206 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2207 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2208 char buffer[IPR_MAX_RES_PATH_LENGTH];
2209
2210 if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2211 return;
2212
2213 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2214 if (path_type_desc[i].type != type)
2215 continue;
2216
2217 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2218 if (path_status_desc[j].status != status)
2219 continue;
2220
2221 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2222 path_status_desc[j].desc, path_type_desc[i].desc,
b3b3b407
BK
2223 ipr_format_res_path(hostrcb->ioa_cfg,
2224 cfg->res_path, buffer, sizeof(buffer)),
2225 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2226 be32_to_cpu(cfg->wwid[0]),
2227 be32_to_cpu(cfg->wwid[1]));
4565e370
WB
2228 return;
2229 }
2230 }
2231 ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2232 "WWN=%08X%08X\n", cfg->type_status,
b3b3b407
BK
2233 ipr_format_res_path(hostrcb->ioa_cfg,
2234 cfg->res_path, buffer, sizeof(buffer)),
2235 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2236 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
4565e370
WB
2237}
2238
49dc6a18
BK
2239/**
2240 * ipr_log_fabric_error - Log a fabric error.
2241 * @ioa_cfg: ioa config struct
2242 * @hostrcb: hostrcb struct
2243 *
2244 * Return value:
2245 * none
2246 **/
2247static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2248 struct ipr_hostrcb *hostrcb)
2249{
2250 struct ipr_hostrcb_type_20_error *error;
2251 struct ipr_hostrcb_fabric_desc *fabric;
2252 struct ipr_hostrcb_config_element *cfg;
2253 int i, add_len;
2254
2255 error = &hostrcb->hcam.u.error.u.type_20_error;
2256 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2257 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2258
2259 add_len = be32_to_cpu(hostrcb->hcam.length) -
2260 (offsetof(struct ipr_hostrcb_error, u) +
2261 offsetof(struct ipr_hostrcb_type_20_error, desc));
2262
2263 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2264 ipr_log_fabric_path(hostrcb, fabric);
2265 for_each_fabric_cfg(fabric, cfg)
2266 ipr_log_path_elem(hostrcb, cfg);
2267
2268 add_len -= be16_to_cpu(fabric->length);
2269 fabric = (struct ipr_hostrcb_fabric_desc *)
2270 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2271 }
2272
ac719aba 2273 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
49dc6a18
BK
2274}
2275
4565e370
WB
2276/**
2277 * ipr_log_sis64_array_error - Log a sis64 array error.
2278 * @ioa_cfg: ioa config struct
2279 * @hostrcb: hostrcb struct
2280 *
2281 * Return value:
2282 * none
2283 **/
2284static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2285 struct ipr_hostrcb *hostrcb)
2286{
2287 int i, num_entries;
2288 struct ipr_hostrcb_type_24_error *error;
2289 struct ipr_hostrcb64_array_data_entry *array_entry;
2290 char buffer[IPR_MAX_RES_PATH_LENGTH];
2291 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2292
2293 error = &hostrcb->hcam.u.error64.u.type_24_error;
2294
2295 ipr_err_separator;
2296
2297 ipr_err("RAID %s Array Configuration: %s\n",
2298 error->protection_level,
b3b3b407
BK
2299 ipr_format_res_path(ioa_cfg, error->last_res_path,
2300 buffer, sizeof(buffer)));
4565e370
WB
2301
2302 ipr_err_separator;
2303
2304 array_entry = error->array_member;
7262026f
WB
2305 num_entries = min_t(u32, error->num_entries,
2306 ARRAY_SIZE(error->array_member));
4565e370
WB
2307
2308 for (i = 0; i < num_entries; i++, array_entry++) {
2309
2310 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2311 continue;
2312
2313 if (error->exposed_mode_adn == i)
2314 ipr_err("Exposed Array Member %d:\n", i);
2315 else
2316 ipr_err("Array Member %d:\n", i);
2317
2318 ipr_err("Array Member %d:\n", i);
2319 ipr_log_ext_vpd(&array_entry->vpd);
7262026f 2320 ipr_err("Current Location: %s\n",
b3b3b407
BK
2321 ipr_format_res_path(ioa_cfg, array_entry->res_path,
2322 buffer, sizeof(buffer)));
7262026f 2323 ipr_err("Expected Location: %s\n",
b3b3b407
BK
2324 ipr_format_res_path(ioa_cfg,
2325 array_entry->expected_res_path,
2326 buffer, sizeof(buffer)));
4565e370
WB
2327
2328 ipr_err_separator;
2329 }
2330}
2331
2332/**
2333 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2334 * @ioa_cfg: ioa config struct
2335 * @hostrcb: hostrcb struct
2336 *
2337 * Return value:
2338 * none
2339 **/
2340static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2341 struct ipr_hostrcb *hostrcb)
2342{
2343 struct ipr_hostrcb_type_30_error *error;
2344 struct ipr_hostrcb64_fabric_desc *fabric;
2345 struct ipr_hostrcb64_config_element *cfg;
2346 int i, add_len;
2347
2348 error = &hostrcb->hcam.u.error64.u.type_30_error;
2349
2350 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2351 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2352
2353 add_len = be32_to_cpu(hostrcb->hcam.length) -
2354 (offsetof(struct ipr_hostrcb64_error, u) +
2355 offsetof(struct ipr_hostrcb_type_30_error, desc));
2356
2357 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2358 ipr_log64_fabric_path(hostrcb, fabric);
2359 for_each_fabric_cfg(fabric, cfg)
2360 ipr_log64_path_elem(hostrcb, cfg);
2361
2362 add_len -= be16_to_cpu(fabric->length);
2363 fabric = (struct ipr_hostrcb64_fabric_desc *)
2364 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2365 }
2366
2367 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2368}
2369
b0df54bb 2370/**
2371 * ipr_log_generic_error - Log an adapter error.
2372 * @ioa_cfg: ioa config struct
2373 * @hostrcb: hostrcb struct
2374 *
2375 * Return value:
2376 * none
2377 **/
2378static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2379 struct ipr_hostrcb *hostrcb)
2380{
ac719aba 2381 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
b0df54bb 2382 be32_to_cpu(hostrcb->hcam.length));
2383}
2384
169b9ec8
WX
2385/**
2386 * ipr_log_sis64_device_error - Log a cache error.
2387 * @ioa_cfg: ioa config struct
2388 * @hostrcb: hostrcb struct
2389 *
2390 * Return value:
2391 * none
2392 **/
2393static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg,
2394 struct ipr_hostrcb *hostrcb)
2395{
2396 struct ipr_hostrcb_type_21_error *error;
2397 char buffer[IPR_MAX_RES_PATH_LENGTH];
2398
2399 error = &hostrcb->hcam.u.error64.u.type_21_error;
2400
2401 ipr_err("-----Failing Device Information-----\n");
2402 ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n",
2403 be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]),
2404 be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3]));
2405 ipr_err("Device Resource Path: %s\n",
2406 __ipr_format_res_path(error->res_path,
2407 buffer, sizeof(buffer)));
2408 error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0';
2409 error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0';
2410 ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc);
2411 ipr_err("Secondary Problem Description: %s\n", error->second_problem_desc);
2412 ipr_err("SCSI Sense Data:\n");
2413 ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data));
2414 ipr_err("SCSI Command Descriptor Block: \n");
2415 ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb));
2416
2417 ipr_err("Additional IOA Data:\n");
2418 ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error));
2419}
2420
1da177e4
LT
2421/**
2422 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2423 * @ioasc: IOASC
2424 *
2425 * This function will return the index of into the ipr_error_table
2426 * for the specified IOASC. If the IOASC is not in the table,
2427 * 0 will be returned, which points to the entry used for unknown errors.
2428 *
2429 * Return value:
2430 * index into the ipr_error_table
2431 **/
2432static u32 ipr_get_error(u32 ioasc)
2433{
2434 int i;
2435
2436 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
35a39691 2437 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
1da177e4
LT
2438 return i;
2439
2440 return 0;
2441}
2442
2443/**
2444 * ipr_handle_log_data - Log an adapter error.
2445 * @ioa_cfg: ioa config struct
2446 * @hostrcb: hostrcb struct
2447 *
2448 * This function logs an adapter error to the system.
2449 *
2450 * Return value:
2451 * none
2452 **/
2453static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2454 struct ipr_hostrcb *hostrcb)
2455{
2456 u32 ioasc;
2457 int error_index;
3185ea63 2458 struct ipr_hostrcb_type_21_error *error;
1da177e4
LT
2459
2460 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2461 return;
2462
2463 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2464 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2465
4565e370
WB
2466 if (ioa_cfg->sis64)
2467 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2468 else
2469 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
1da177e4 2470
4565e370
WB
2471 if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2472 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
1da177e4
LT
2473 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2474 scsi_report_bus_reset(ioa_cfg->host,
4565e370 2475 hostrcb->hcam.u.error.fd_res_addr.bus);
1da177e4
LT
2476 }
2477
2478 error_index = ipr_get_error(ioasc);
2479
2480 if (!ipr_error_table[error_index].log_hcam)
2481 return;
2482
3185ea63 2483 if (ioasc == IPR_IOASC_HW_CMD_FAILED &&
2484 hostrcb->hcam.overlay_id == IPR_HOST_RCB_OVERLAY_ID_21) {
2485 error = &hostrcb->hcam.u.error64.u.type_21_error;
2486
2487 if (((be32_to_cpu(error->sense_data[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST &&
2488 ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
2489 return;
2490 }
2491
49dc6a18 2492 ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
1da177e4
LT
2493
2494 /* Set indication we have logged an error */
2495 ioa_cfg->errors_logged++;
2496
933916f3 2497 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
1da177e4 2498 return;
cf852037 2499 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2500 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
1da177e4
LT
2501
2502 switch (hostrcb->hcam.overlay_id) {
1da177e4
LT
2503 case IPR_HOST_RCB_OVERLAY_ID_2:
2504 ipr_log_cache_error(ioa_cfg, hostrcb);
2505 break;
2506 case IPR_HOST_RCB_OVERLAY_ID_3:
2507 ipr_log_config_error(ioa_cfg, hostrcb);
2508 break;
2509 case IPR_HOST_RCB_OVERLAY_ID_4:
2510 case IPR_HOST_RCB_OVERLAY_ID_6:
2511 ipr_log_array_error(ioa_cfg, hostrcb);
2512 break;
b0df54bb 2513 case IPR_HOST_RCB_OVERLAY_ID_7:
2514 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2515 break;
ee0f05b8 2516 case IPR_HOST_RCB_OVERLAY_ID_12:
2517 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2518 break;
2519 case IPR_HOST_RCB_OVERLAY_ID_13:
2520 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2521 break;
2522 case IPR_HOST_RCB_OVERLAY_ID_14:
2523 case IPR_HOST_RCB_OVERLAY_ID_16:
2524 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2525 break;
2526 case IPR_HOST_RCB_OVERLAY_ID_17:
2527 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2528 break;
49dc6a18
BK
2529 case IPR_HOST_RCB_OVERLAY_ID_20:
2530 ipr_log_fabric_error(ioa_cfg, hostrcb);
2531 break;
169b9ec8
WX
2532 case IPR_HOST_RCB_OVERLAY_ID_21:
2533 ipr_log_sis64_device_error(ioa_cfg, hostrcb);
2534 break;
4565e370
WB
2535 case IPR_HOST_RCB_OVERLAY_ID_23:
2536 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2537 break;
2538 case IPR_HOST_RCB_OVERLAY_ID_24:
2539 case IPR_HOST_RCB_OVERLAY_ID_26:
2540 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2541 break;
2542 case IPR_HOST_RCB_OVERLAY_ID_30:
2543 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2544 break;
cf852037 2545 case IPR_HOST_RCB_OVERLAY_ID_1:
1da177e4 2546 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
1da177e4 2547 default:
a9cfca96 2548 ipr_log_generic_error(ioa_cfg, hostrcb);
1da177e4
LT
2549 break;
2550 }
2551}
2552
2553/**
2554 * ipr_process_error - Op done function for an adapter error log.
2555 * @ipr_cmd: ipr command struct
2556 *
2557 * This function is the op done function for an error log host
2558 * controlled async from the adapter. It will log the error and
2559 * send the HCAM back to the adapter.
2560 *
2561 * Return value:
2562 * none
2563 **/
2564static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2565{
2566 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2567 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
96d21f00 2568 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
4565e370
WB
2569 u32 fd_ioasc;
2570
2571 if (ioa_cfg->sis64)
2572 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2573 else
2574 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
1da177e4
LT
2575
2576 list_del(&hostrcb->queue);
05a6538a 2577 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
2578
2579 if (!ioasc) {
2580 ipr_handle_log_data(ioa_cfg, hostrcb);
65f56475
BK
2581 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2582 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
4fdd7c7a
BK
2583 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
2584 ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST) {
1da177e4
LT
2585 dev_err(&ioa_cfg->pdev->dev,
2586 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2587 }
2588
2589 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2590}
2591
2592/**
2593 * ipr_timeout - An internally generated op has timed out.
2594 * @ipr_cmd: ipr command struct
2595 *
2596 * This function blocks host requests and initiates an
2597 * adapter reset.
2598 *
2599 * Return value:
2600 * none
2601 **/
2602static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
2603{
2604 unsigned long lock_flags = 0;
2605 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2606
2607 ENTER;
2608 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2609
2610 ioa_cfg->errors_logged++;
2611 dev_err(&ioa_cfg->pdev->dev,
2612 "Adapter being reset due to command timeout.\n");
2613
2614 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2615 ioa_cfg->sdt_state = GET_DUMP;
2616
2617 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2618 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2619
2620 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2621 LEAVE;
2622}
2623
2624/**
2625 * ipr_oper_timeout - Adapter timed out transitioning to operational
2626 * @ipr_cmd: ipr command struct
2627 *
2628 * This function blocks host requests and initiates an
2629 * adapter reset.
2630 *
2631 * Return value:
2632 * none
2633 **/
2634static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
2635{
2636 unsigned long lock_flags = 0;
2637 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2638
2639 ENTER;
2640 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2641
2642 ioa_cfg->errors_logged++;
2643 dev_err(&ioa_cfg->pdev->dev,
2644 "Adapter timed out transitioning to operational.\n");
2645
2646 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2647 ioa_cfg->sdt_state = GET_DUMP;
2648
2649 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2650 if (ipr_fastfail)
2651 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2652 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2653 }
2654
2655 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2656 LEAVE;
2657}
2658
1da177e4
LT
2659/**
2660 * ipr_find_ses_entry - Find matching SES in SES table
2661 * @res: resource entry struct of SES
2662 *
2663 * Return value:
2664 * pointer to SES table entry / NULL on failure
2665 **/
2666static const struct ipr_ses_table_entry *
2667ipr_find_ses_entry(struct ipr_resource_entry *res)
2668{
2669 int i, j, matches;
3e7ebdfa 2670 struct ipr_std_inq_vpids *vpids;
1da177e4
LT
2671 const struct ipr_ses_table_entry *ste = ipr_ses_table;
2672
2673 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2674 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2675 if (ste->compare_product_id_byte[j] == 'X') {
3e7ebdfa
WB
2676 vpids = &res->std_inq_data.vpids;
2677 if (vpids->product_id[j] == ste->product_id[j])
1da177e4
LT
2678 matches++;
2679 else
2680 break;
2681 } else
2682 matches++;
2683 }
2684
2685 if (matches == IPR_PROD_ID_LEN)
2686 return ste;
2687 }
2688
2689 return NULL;
2690}
2691
2692/**
2693 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2694 * @ioa_cfg: ioa config struct
2695 * @bus: SCSI bus
2696 * @bus_width: bus width
2697 *
2698 * Return value:
2699 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2700 * For a 2-byte wide SCSI bus, the maximum transfer speed is
2701 * twice the maximum transfer rate (e.g. for a wide enabled bus,
2702 * max 160MHz = max 320MB/sec).
2703 **/
2704static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2705{
2706 struct ipr_resource_entry *res;
2707 const struct ipr_ses_table_entry *ste;
2708 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2709
2710 /* Loop through each config table entry in the config table buffer */
2711 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3e7ebdfa 2712 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
1da177e4
LT
2713 continue;
2714
3e7ebdfa 2715 if (bus != res->bus)
1da177e4
LT
2716 continue;
2717
2718 if (!(ste = ipr_find_ses_entry(res)))
2719 continue;
2720
2721 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2722 }
2723
2724 return max_xfer_rate;
2725}
2726
2727/**
2728 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2729 * @ioa_cfg: ioa config struct
2730 * @max_delay: max delay in micro-seconds to wait
2731 *
2732 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2733 *
2734 * Return value:
2735 * 0 on success / other on failure
2736 **/
2737static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2738{
2739 volatile u32 pcii_reg;
2740 int delay = 1;
2741
2742 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2743 while (delay < max_delay) {
2744 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2745
2746 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2747 return 0;
2748
2749 /* udelay cannot be used if delay is more than a few milliseconds */
2750 if ((delay / 1000) > MAX_UDELAY_MS)
2751 mdelay(delay / 1000);
2752 else
2753 udelay(delay);
2754
2755 delay += delay;
2756 }
2757 return -EIO;
2758}
2759
dcbad00e
WB
2760/**
2761 * ipr_get_sis64_dump_data_section - Dump IOA memory
2762 * @ioa_cfg: ioa config struct
2763 * @start_addr: adapter address to dump
2764 * @dest: destination kernel buffer
2765 * @length_in_words: length to dump in 4 byte words
2766 *
2767 * Return value:
2768 * 0 on success
2769 **/
2770static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2771 u32 start_addr,
2772 __be32 *dest, u32 length_in_words)
2773{
2774 int i;
2775
2776 for (i = 0; i < length_in_words; i++) {
2777 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2778 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2779 dest++;
2780 }
2781
2782 return 0;
2783}
2784
1da177e4
LT
2785/**
2786 * ipr_get_ldump_data_section - Dump IOA memory
2787 * @ioa_cfg: ioa config struct
2788 * @start_addr: adapter address to dump
2789 * @dest: destination kernel buffer
2790 * @length_in_words: length to dump in 4 byte words
2791 *
2792 * Return value:
2793 * 0 on success / -EIO on failure
2794 **/
2795static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2796 u32 start_addr,
2797 __be32 *dest, u32 length_in_words)
2798{
2799 volatile u32 temp_pcii_reg;
2800 int i, delay = 0;
2801
dcbad00e
WB
2802 if (ioa_cfg->sis64)
2803 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2804 dest, length_in_words);
2805
1da177e4
LT
2806 /* Write IOA interrupt reg starting LDUMP state */
2807 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
214777ba 2808 ioa_cfg->regs.set_uproc_interrupt_reg32);
1da177e4
LT
2809
2810 /* Wait for IO debug acknowledge */
2811 if (ipr_wait_iodbg_ack(ioa_cfg,
2812 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2813 dev_err(&ioa_cfg->pdev->dev,
2814 "IOA dump long data transfer timeout\n");
2815 return -EIO;
2816 }
2817
2818 /* Signal LDUMP interlocked - clear IO debug ack */
2819 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2820 ioa_cfg->regs.clr_interrupt_reg);
2821
2822 /* Write Mailbox with starting address */
2823 writel(start_addr, ioa_cfg->ioa_mailbox);
2824
2825 /* Signal address valid - clear IOA Reset alert */
2826 writel(IPR_UPROCI_RESET_ALERT,
214777ba 2827 ioa_cfg->regs.clr_uproc_interrupt_reg32);
1da177e4
LT
2828
2829 for (i = 0; i < length_in_words; i++) {
2830 /* Wait for IO debug acknowledge */
2831 if (ipr_wait_iodbg_ack(ioa_cfg,
2832 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2833 dev_err(&ioa_cfg->pdev->dev,
2834 "IOA dump short data transfer timeout\n");
2835 return -EIO;
2836 }
2837
2838 /* Read data from mailbox and increment destination pointer */
2839 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2840 dest++;
2841
2842 /* For all but the last word of data, signal data received */
2843 if (i < (length_in_words - 1)) {
2844 /* Signal dump data received - Clear IO debug Ack */
2845 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2846 ioa_cfg->regs.clr_interrupt_reg);
2847 }
2848 }
2849
2850 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2851 writel(IPR_UPROCI_RESET_ALERT,
214777ba 2852 ioa_cfg->regs.set_uproc_interrupt_reg32);
1da177e4
LT
2853
2854 writel(IPR_UPROCI_IO_DEBUG_ALERT,
214777ba 2855 ioa_cfg->regs.clr_uproc_interrupt_reg32);
1da177e4
LT
2856
2857 /* Signal dump data received - Clear IO debug Ack */
2858 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2859 ioa_cfg->regs.clr_interrupt_reg);
2860
2861 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2862 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2863 temp_pcii_reg =
214777ba 2864 readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
1da177e4
LT
2865
2866 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2867 return 0;
2868
2869 udelay(10);
2870 delay += 10;
2871 }
2872
2873 return 0;
2874}
2875
2876#ifdef CONFIG_SCSI_IPR_DUMP
2877/**
2878 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2879 * @ioa_cfg: ioa config struct
2880 * @pci_address: adapter address
2881 * @length: length of data to copy
2882 *
2883 * Copy data from PCI adapter to kernel buffer.
2884 * Note: length MUST be a 4 byte multiple
2885 * Return value:
2886 * 0 on success / other on failure
2887 **/
2888static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2889 unsigned long pci_address, u32 length)
2890{
2891 int bytes_copied = 0;
4d4dd706 2892 int cur_len, rc, rem_len, rem_page_len, max_dump_size;
1da177e4
LT
2893 __be32 *page;
2894 unsigned long lock_flags = 0;
2895 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2896
4d4dd706
KSS
2897 if (ioa_cfg->sis64)
2898 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2899 else
2900 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2901
1da177e4 2902 while (bytes_copied < length &&
4d4dd706 2903 (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
1da177e4
LT
2904 if (ioa_dump->page_offset >= PAGE_SIZE ||
2905 ioa_dump->page_offset == 0) {
2906 page = (__be32 *)__get_free_page(GFP_ATOMIC);
2907
2908 if (!page) {
2909 ipr_trace;
2910 return bytes_copied;
2911 }
2912
2913 ioa_dump->page_offset = 0;
2914 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2915 ioa_dump->next_page_index++;
2916 } else
2917 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2918
2919 rem_len = length - bytes_copied;
2920 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2921 cur_len = min(rem_len, rem_page_len);
2922
2923 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2924 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2925 rc = -EIO;
2926 } else {
2927 rc = ipr_get_ldump_data_section(ioa_cfg,
2928 pci_address + bytes_copied,
2929 &page[ioa_dump->page_offset / 4],
2930 (cur_len / sizeof(u32)));
2931 }
2932 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2933
2934 if (!rc) {
2935 ioa_dump->page_offset += cur_len;
2936 bytes_copied += cur_len;
2937 } else {
2938 ipr_trace;
2939 break;
2940 }
2941 schedule();
2942 }
2943
2944 return bytes_copied;
2945}
2946
2947/**
2948 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2949 * @hdr: dump entry header struct
2950 *
2951 * Return value:
2952 * nothing
2953 **/
2954static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2955{
2956 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2957 hdr->num_elems = 1;
2958 hdr->offset = sizeof(*hdr);
2959 hdr->status = IPR_DUMP_STATUS_SUCCESS;
2960}
2961
2962/**
2963 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2964 * @ioa_cfg: ioa config struct
2965 * @driver_dump: driver dump struct
2966 *
2967 * Return value:
2968 * nothing
2969 **/
2970static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2971 struct ipr_driver_dump *driver_dump)
2972{
2973 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2974
2975 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2976 driver_dump->ioa_type_entry.hdr.len =
2977 sizeof(struct ipr_dump_ioa_type_entry) -
2978 sizeof(struct ipr_dump_entry_header);
2979 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2980 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2981 driver_dump->ioa_type_entry.type = ioa_cfg->type;
2982 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2983 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2984 ucode_vpd->minor_release[1];
2985 driver_dump->hdr.num_entries++;
2986}
2987
2988/**
2989 * ipr_dump_version_data - Fill in the driver version in the dump.
2990 * @ioa_cfg: ioa config struct
2991 * @driver_dump: driver dump struct
2992 *
2993 * Return value:
2994 * nothing
2995 **/
2996static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2997 struct ipr_driver_dump *driver_dump)
2998{
2999 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
3000 driver_dump->version_entry.hdr.len =
3001 sizeof(struct ipr_dump_version_entry) -
3002 sizeof(struct ipr_dump_entry_header);
3003 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3004 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
3005 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
3006 driver_dump->hdr.num_entries++;
3007}
3008
3009/**
3010 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
3011 * @ioa_cfg: ioa config struct
3012 * @driver_dump: driver dump struct
3013 *
3014 * Return value:
3015 * nothing
3016 **/
3017static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
3018 struct ipr_driver_dump *driver_dump)
3019{
3020 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
3021 driver_dump->trace_entry.hdr.len =
3022 sizeof(struct ipr_dump_trace_entry) -
3023 sizeof(struct ipr_dump_entry_header);
3024 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3025 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
3026 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
3027 driver_dump->hdr.num_entries++;
3028}
3029
3030/**
3031 * ipr_dump_location_data - Fill in the IOA location in the dump.
3032 * @ioa_cfg: ioa config struct
3033 * @driver_dump: driver dump struct
3034 *
3035 * Return value:
3036 * nothing
3037 **/
3038static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
3039 struct ipr_driver_dump *driver_dump)
3040{
3041 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
3042 driver_dump->location_entry.hdr.len =
3043 sizeof(struct ipr_dump_location_entry) -
3044 sizeof(struct ipr_dump_entry_header);
3045 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3046 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
71610f55 3047 strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
1da177e4
LT
3048 driver_dump->hdr.num_entries++;
3049}
3050
3051/**
3052 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
3053 * @ioa_cfg: ioa config struct
3054 * @dump: dump struct
3055 *
3056 * Return value:
3057 * nothing
3058 **/
3059static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
3060{
3061 unsigned long start_addr, sdt_word;
3062 unsigned long lock_flags = 0;
3063 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
3064 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
4d4dd706
KSS
3065 u32 num_entries, max_num_entries, start_off, end_off;
3066 u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
1da177e4 3067 struct ipr_sdt *sdt;
dcbad00e 3068 int valid = 1;
1da177e4
LT
3069 int i;
3070
3071 ENTER;
3072
3073 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3074
41e9a696 3075 if (ioa_cfg->sdt_state != READ_DUMP) {
1da177e4
LT
3076 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3077 return;
3078 }
3079
110def85
WB
3080 if (ioa_cfg->sis64) {
3081 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3082 ssleep(IPR_DUMP_DELAY_SECONDS);
3083 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3084 }
3085
1da177e4
LT
3086 start_addr = readl(ioa_cfg->ioa_mailbox);
3087
dcbad00e 3088 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
1da177e4
LT
3089 dev_err(&ioa_cfg->pdev->dev,
3090 "Invalid dump table format: %lx\n", start_addr);
3091 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3092 return;
3093 }
3094
3095 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
3096
3097 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
3098
3099 /* Initialize the overall dump header */
3100 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
3101 driver_dump->hdr.num_entries = 1;
3102 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
3103 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
3104 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
3105 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
3106
3107 ipr_dump_version_data(ioa_cfg, driver_dump);
3108 ipr_dump_location_data(ioa_cfg, driver_dump);
3109 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
3110 ipr_dump_trace_data(ioa_cfg, driver_dump);
3111
3112 /* Update dump_header */
3113 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
3114
3115 /* IOA Dump entry */
3116 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
1da177e4
LT
3117 ioa_dump->hdr.len = 0;
3118 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3119 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
3120
3121 /* First entries in sdt are actually a list of dump addresses and
3122 lengths to gather the real dump data. sdt represents the pointer
3123 to the ioa generated dump table. Dump data will be extracted based
3124 on entries in this table */
3125 sdt = &ioa_dump->sdt;
3126
4d4dd706
KSS
3127 if (ioa_cfg->sis64) {
3128 max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
3129 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
3130 } else {
3131 max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
3132 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
3133 }
3134
3135 bytes_to_copy = offsetof(struct ipr_sdt, entry) +
3136 (max_num_entries * sizeof(struct ipr_sdt_entry));
1da177e4 3137 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
4d4dd706 3138 bytes_to_copy / sizeof(__be32));
1da177e4
LT
3139
3140 /* Smart Dump table is ready to use and the first entry is valid */
dcbad00e
WB
3141 if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
3142 (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
1da177e4
LT
3143 dev_err(&ioa_cfg->pdev->dev,
3144 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3145 rc, be32_to_cpu(sdt->hdr.state));
3146 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
3147 ioa_cfg->sdt_state = DUMP_OBTAINED;
3148 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3149 return;
3150 }
3151
3152 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
3153
4d4dd706
KSS
3154 if (num_entries > max_num_entries)
3155 num_entries = max_num_entries;
3156
3157 /* Update dump length to the actual data to be copied */
3158 dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
3159 if (ioa_cfg->sis64)
3160 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
3161 else
3162 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
1da177e4
LT
3163
3164 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3165
3166 for (i = 0; i < num_entries; i++) {
4d4dd706 3167 if (ioa_dump->hdr.len > max_dump_size) {
1da177e4
LT
3168 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3169 break;
3170 }
3171
3172 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
dcbad00e
WB
3173 sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3174 if (ioa_cfg->sis64)
3175 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3176 else {
3177 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3178 end_off = be32_to_cpu(sdt->entry[i].end_token);
3179
3180 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3181 bytes_to_copy = end_off - start_off;
3182 else
3183 valid = 0;
3184 }
3185 if (valid) {
4d4dd706 3186 if (bytes_to_copy > max_dump_size) {
1da177e4
LT
3187 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3188 continue;
3189 }
3190
3191 /* Copy data from adapter to driver buffers */
3192 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3193 bytes_to_copy);
3194
3195 ioa_dump->hdr.len += bytes_copied;
3196
3197 if (bytes_copied != bytes_to_copy) {
3198 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3199 break;
3200 }
3201 }
3202 }
3203 }
3204
3205 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3206
3207 /* Update dump_header */
3208 driver_dump->hdr.len += ioa_dump->hdr.len;
3209 wmb();
3210 ioa_cfg->sdt_state = DUMP_OBTAINED;
3211 LEAVE;
3212}
3213
3214#else
203fa3fe 3215#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
1da177e4
LT
3216#endif
3217
3218/**
3219 * ipr_release_dump - Free adapter dump memory
3220 * @kref: kref struct
3221 *
3222 * Return value:
3223 * nothing
3224 **/
3225static void ipr_release_dump(struct kref *kref)
3226{
203fa3fe 3227 struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
1da177e4
LT
3228 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3229 unsigned long lock_flags = 0;
3230 int i;
3231
3232 ENTER;
3233 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3234 ioa_cfg->dump = NULL;
3235 ioa_cfg->sdt_state = INACTIVE;
3236 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3237
3238 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3239 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3240
4d4dd706 3241 vfree(dump->ioa_dump.ioa_data);
1da177e4
LT
3242 kfree(dump);
3243 LEAVE;
3244}
3245
3246/**
3247 * ipr_worker_thread - Worker thread
c4028958 3248 * @work: ioa config struct
1da177e4
LT
3249 *
3250 * Called at task level from a work thread. This function takes care
3251 * of adding and removing device from the mid-layer as configuration
3252 * changes are detected by the adapter.
3253 *
3254 * Return value:
3255 * nothing
3256 **/
c4028958 3257static void ipr_worker_thread(struct work_struct *work)
1da177e4
LT
3258{
3259 unsigned long lock_flags;
3260 struct ipr_resource_entry *res;
3261 struct scsi_device *sdev;
3262 struct ipr_dump *dump;
c4028958
DH
3263 struct ipr_ioa_cfg *ioa_cfg =
3264 container_of(work, struct ipr_ioa_cfg, work_q);
1da177e4
LT
3265 u8 bus, target, lun;
3266 int did_work;
3267
3268 ENTER;
3269 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3270
41e9a696 3271 if (ioa_cfg->sdt_state == READ_DUMP) {
1da177e4
LT
3272 dump = ioa_cfg->dump;
3273 if (!dump) {
3274 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3275 return;
3276 }
3277 kref_get(&dump->kref);
3278 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3279 ipr_get_ioa_dump(ioa_cfg, dump);
3280 kref_put(&dump->kref, ipr_release_dump);
3281
3282 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4c647e90 3283 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
1da177e4
LT
3284 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3285 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3286 return;
3287 }
3288
3289restart:
3290 do {
3291 did_work = 0;
f688f96d 3292 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1da177e4
LT
3293 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3294 return;
3295 }
3296
3297 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3298 if (res->del_from_ml && res->sdev) {
3299 did_work = 1;
3300 sdev = res->sdev;
3301 if (!scsi_device_get(sdev)) {
5767a1c4
KSS
3302 if (!res->add_to_ml)
3303 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3304 else
3305 res->del_from_ml = 0;
1da177e4
LT
3306 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3307 scsi_remove_device(sdev);
3308 scsi_device_put(sdev);
3309 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3310 }
3311 break;
3312 }
3313 }
203fa3fe 3314 } while (did_work);
1da177e4
LT
3315
3316 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3317 if (res->add_to_ml) {
3e7ebdfa
WB
3318 bus = res->bus;
3319 target = res->target;
3320 lun = res->lun;
1121b794 3321 res->add_to_ml = 0;
1da177e4
LT
3322 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3323 scsi_add_device(ioa_cfg->host, bus, target, lun);
3324 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3325 goto restart;
3326 }
3327 }
3328
f688f96d 3329 ioa_cfg->scan_done = 1;
1da177e4 3330 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ee959b00 3331 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
1da177e4
LT
3332 LEAVE;
3333}
3334
3335#ifdef CONFIG_SCSI_IPR_TRACE
3336/**
3337 * ipr_read_trace - Dump the adapter trace
2c3c8bea 3338 * @filp: open sysfs file
1da177e4 3339 * @kobj: kobject struct
91a69029 3340 * @bin_attr: bin_attribute struct
1da177e4
LT
3341 * @buf: buffer
3342 * @off: offset
3343 * @count: buffer size
3344 *
3345 * Return value:
3346 * number of bytes printed to buffer
3347 **/
2c3c8bea 3348static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
91a69029
ZR
3349 struct bin_attribute *bin_attr,
3350 char *buf, loff_t off, size_t count)
1da177e4 3351{
ee959b00
TJ
3352 struct device *dev = container_of(kobj, struct device, kobj);
3353 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3354 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3355 unsigned long lock_flags = 0;
d777aaf3 3356 ssize_t ret;
1da177e4
LT
3357
3358 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
d777aaf3
AM
3359 ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3360 IPR_TRACE_SIZE);
1da177e4 3361 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
d777aaf3
AM
3362
3363 return ret;
1da177e4
LT
3364}
3365
3366static struct bin_attribute ipr_trace_attr = {
3367 .attr = {
3368 .name = "trace",
3369 .mode = S_IRUGO,
3370 },
3371 .size = 0,
3372 .read = ipr_read_trace,
3373};
3374#endif
3375
3376/**
3377 * ipr_show_fw_version - Show the firmware version
ee959b00
TJ
3378 * @dev: class device struct
3379 * @buf: buffer
1da177e4
LT
3380 *
3381 * Return value:
3382 * number of bytes printed to buffer
3383 **/
ee959b00
TJ
3384static ssize_t ipr_show_fw_version(struct device *dev,
3385 struct device_attribute *attr, char *buf)
1da177e4 3386{
ee959b00 3387 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3388 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3389 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3390 unsigned long lock_flags = 0;
3391 int len;
3392
3393 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3394 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3395 ucode_vpd->major_release, ucode_vpd->card_type,
3396 ucode_vpd->minor_release[0],
3397 ucode_vpd->minor_release[1]);
3398 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3399 return len;
3400}
3401
ee959b00 3402static struct device_attribute ipr_fw_version_attr = {
1da177e4
LT
3403 .attr = {
3404 .name = "fw_version",
3405 .mode = S_IRUGO,
3406 },
3407 .show = ipr_show_fw_version,
3408};
3409
3410/**
3411 * ipr_show_log_level - Show the adapter's error logging level
ee959b00
TJ
3412 * @dev: class device struct
3413 * @buf: buffer
1da177e4
LT
3414 *
3415 * Return value:
3416 * number of bytes printed to buffer
3417 **/
ee959b00
TJ
3418static ssize_t ipr_show_log_level(struct device *dev,
3419 struct device_attribute *attr, char *buf)
1da177e4 3420{
ee959b00 3421 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3422 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3423 unsigned long lock_flags = 0;
3424 int len;
3425
3426 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3427 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3428 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3429 return len;
3430}
3431
3432/**
3433 * ipr_store_log_level - Change the adapter's error logging level
ee959b00
TJ
3434 * @dev: class device struct
3435 * @buf: buffer
1da177e4
LT
3436 *
3437 * Return value:
3438 * number of bytes printed to buffer
3439 **/
ee959b00 3440static ssize_t ipr_store_log_level(struct device *dev,
203fa3fe 3441 struct device_attribute *attr,
1da177e4
LT
3442 const char *buf, size_t count)
3443{
ee959b00 3444 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3445 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3446 unsigned long lock_flags = 0;
3447
3448 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3449 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3450 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3451 return strlen(buf);
3452}
3453
ee959b00 3454static struct device_attribute ipr_log_level_attr = {
1da177e4
LT
3455 .attr = {
3456 .name = "log_level",
3457 .mode = S_IRUGO | S_IWUSR,
3458 },
3459 .show = ipr_show_log_level,
3460 .store = ipr_store_log_level
3461};
3462
3463/**
3464 * ipr_store_diagnostics - IOA Diagnostics interface
ee959b00
TJ
3465 * @dev: device struct
3466 * @buf: buffer
3467 * @count: buffer size
1da177e4
LT
3468 *
3469 * This function will reset the adapter and wait a reasonable
3470 * amount of time for any errors that the adapter might log.
3471 *
3472 * Return value:
3473 * count on success / other on failure
3474 **/
ee959b00
TJ
3475static ssize_t ipr_store_diagnostics(struct device *dev,
3476 struct device_attribute *attr,
1da177e4
LT
3477 const char *buf, size_t count)
3478{
ee959b00 3479 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3480 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3481 unsigned long lock_flags = 0;
3482 int rc = count;
3483
3484 if (!capable(CAP_SYS_ADMIN))
3485 return -EACCES;
3486
1da177e4 3487 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
203fa3fe 3488 while (ioa_cfg->in_reset_reload) {
970ea294
BK
3489 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3490 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3491 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3492 }
3493
1da177e4
LT
3494 ioa_cfg->errors_logged = 0;
3495 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3496
3497 if (ioa_cfg->in_reset_reload) {
3498 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3499 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3500
3501 /* Wait for a second for any errors to be logged */
3502 msleep(1000);
3503 } else {
3504 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3505 return -EIO;
3506 }
3507
3508 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3509 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3510 rc = -EIO;
3511 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3512
3513 return rc;
3514}
3515
ee959b00 3516static struct device_attribute ipr_diagnostics_attr = {
1da177e4
LT
3517 .attr = {
3518 .name = "run_diagnostics",
3519 .mode = S_IWUSR,
3520 },
3521 .store = ipr_store_diagnostics
3522};
3523
f37eb54b 3524/**
3525 * ipr_show_adapter_state - Show the adapter's state
ee959b00
TJ
3526 * @class_dev: device struct
3527 * @buf: buffer
f37eb54b 3528 *
3529 * Return value:
3530 * number of bytes printed to buffer
3531 **/
ee959b00
TJ
3532static ssize_t ipr_show_adapter_state(struct device *dev,
3533 struct device_attribute *attr, char *buf)
f37eb54b 3534{
ee959b00 3535 struct Scsi_Host *shost = class_to_shost(dev);
f37eb54b 3536 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3537 unsigned long lock_flags = 0;
3538 int len;
3539
3540 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
56d6aa33 3541 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
f37eb54b 3542 len = snprintf(buf, PAGE_SIZE, "offline\n");
3543 else
3544 len = snprintf(buf, PAGE_SIZE, "online\n");
3545 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3546 return len;
3547}
3548
3549/**
3550 * ipr_store_adapter_state - Change adapter state
ee959b00
TJ
3551 * @dev: device struct
3552 * @buf: buffer
3553 * @count: buffer size
f37eb54b 3554 *
3555 * This function will change the adapter's state.
3556 *
3557 * Return value:
3558 * count on success / other on failure
3559 **/
ee959b00
TJ
3560static ssize_t ipr_store_adapter_state(struct device *dev,
3561 struct device_attribute *attr,
f37eb54b 3562 const char *buf, size_t count)
3563{
ee959b00 3564 struct Scsi_Host *shost = class_to_shost(dev);
f37eb54b 3565 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3566 unsigned long lock_flags;
56d6aa33 3567 int result = count, i;
f37eb54b 3568
3569 if (!capable(CAP_SYS_ADMIN))
3570 return -EACCES;
3571
3572 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
56d6aa33 3573 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3574 !strncmp(buf, "online", 6)) {
3575 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
3576 spin_lock(&ioa_cfg->hrrq[i]._lock);
3577 ioa_cfg->hrrq[i].ioa_is_dead = 0;
3578 spin_unlock(&ioa_cfg->hrrq[i]._lock);
3579 }
3580 wmb();
f37eb54b 3581 ioa_cfg->reset_retries = 0;
3582 ioa_cfg->in_ioa_bringdown = 0;
3583 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3584 }
3585 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3586 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3587
3588 return result;
3589}
3590
ee959b00 3591static struct device_attribute ipr_ioa_state_attr = {
f37eb54b 3592 .attr = {
49dd0961 3593 .name = "online_state",
f37eb54b 3594 .mode = S_IRUGO | S_IWUSR,
3595 },
3596 .show = ipr_show_adapter_state,
3597 .store = ipr_store_adapter_state
3598};
3599
1da177e4
LT
3600/**
3601 * ipr_store_reset_adapter - Reset the adapter
ee959b00
TJ
3602 * @dev: device struct
3603 * @buf: buffer
3604 * @count: buffer size
1da177e4
LT
3605 *
3606 * This function will reset the adapter.
3607 *
3608 * Return value:
3609 * count on success / other on failure
3610 **/
ee959b00
TJ
3611static ssize_t ipr_store_reset_adapter(struct device *dev,
3612 struct device_attribute *attr,
1da177e4
LT
3613 const char *buf, size_t count)
3614{
ee959b00 3615 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3616 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3617 unsigned long lock_flags;
3618 int result = count;
3619
3620 if (!capable(CAP_SYS_ADMIN))
3621 return -EACCES;
3622
3623 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3624 if (!ioa_cfg->in_reset_reload)
3625 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3626 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3627 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3628
3629 return result;
3630}
3631
ee959b00 3632static struct device_attribute ipr_ioa_reset_attr = {
1da177e4
LT
3633 .attr = {
3634 .name = "reset_host",
3635 .mode = S_IWUSR,
3636 },
3637 .store = ipr_store_reset_adapter
3638};
3639
b53d124a 3640static int ipr_iopoll(struct blk_iopoll *iop, int budget);
3641 /**
3642 * ipr_show_iopoll_weight - Show ipr polling mode
3643 * @dev: class device struct
3644 * @buf: buffer
3645 *
3646 * Return value:
3647 * number of bytes printed to buffer
3648 **/
3649static ssize_t ipr_show_iopoll_weight(struct device *dev,
3650 struct device_attribute *attr, char *buf)
3651{
3652 struct Scsi_Host *shost = class_to_shost(dev);
3653 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3654 unsigned long lock_flags = 0;
3655 int len;
3656
3657 spin_lock_irqsave(shost->host_lock, lock_flags);
3658 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
3659 spin_unlock_irqrestore(shost->host_lock, lock_flags);
3660
3661 return len;
3662}
3663
3664/**
3665 * ipr_store_iopoll_weight - Change the adapter's polling mode
3666 * @dev: class device struct
3667 * @buf: buffer
3668 *
3669 * Return value:
3670 * number of bytes printed to buffer
3671 **/
3672static ssize_t ipr_store_iopoll_weight(struct device *dev,
3673 struct device_attribute *attr,
3674 const char *buf, size_t count)
3675{
3676 struct Scsi_Host *shost = class_to_shost(dev);
3677 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3678 unsigned long user_iopoll_weight;
3679 unsigned long lock_flags = 0;
3680 int i;
3681
3682 if (!ioa_cfg->sis64) {
3683 dev_info(&ioa_cfg->pdev->dev, "blk-iopoll not supported on this adapter\n");
3684 return -EINVAL;
3685 }
3686 if (kstrtoul(buf, 10, &user_iopoll_weight))
3687 return -EINVAL;
3688
3689 if (user_iopoll_weight > 256) {
3690 dev_info(&ioa_cfg->pdev->dev, "Invalid blk-iopoll weight. It must be less than 256\n");
3691 return -EINVAL;
3692 }
3693
3694 if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
3695 dev_info(&ioa_cfg->pdev->dev, "Current blk-iopoll weight has the same weight\n");
3696 return strlen(buf);
3697 }
3698
89f8b33c 3699 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
b53d124a 3700 for (i = 1; i < ioa_cfg->hrrq_num; i++)
3701 blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
3702 }
3703
3704 spin_lock_irqsave(shost->host_lock, lock_flags);
3705 ioa_cfg->iopoll_weight = user_iopoll_weight;
89f8b33c 3706 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
b53d124a 3707 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
3708 blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
3709 ioa_cfg->iopoll_weight, ipr_iopoll);
3710 blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
3711 }
3712 }
3713 spin_unlock_irqrestore(shost->host_lock, lock_flags);
3714
3715 return strlen(buf);
3716}
3717
3718static struct device_attribute ipr_iopoll_weight_attr = {
3719 .attr = {
3720 .name = "iopoll_weight",
3721 .mode = S_IRUGO | S_IWUSR,
3722 },
3723 .show = ipr_show_iopoll_weight,
3724 .store = ipr_store_iopoll_weight
3725};
3726
1da177e4
LT
3727/**
3728 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3729 * @buf_len: buffer length
3730 *
3731 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3732 * list to use for microcode download
3733 *
3734 * Return value:
3735 * pointer to sglist / NULL on failure
3736 **/
3737static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3738{
3739 int sg_size, order, bsize_elem, num_elem, i, j;
3740 struct ipr_sglist *sglist;
3741 struct scatterlist *scatterlist;
3742 struct page *page;
3743
3744 /* Get the minimum size per scatter/gather element */
3745 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3746
3747 /* Get the actual size per element */
3748 order = get_order(sg_size);
3749
3750 /* Determine the actual number of bytes per element */
3751 bsize_elem = PAGE_SIZE * (1 << order);
3752
3753 /* Determine the actual number of sg entries needed */
3754 if (buf_len % bsize_elem)
3755 num_elem = (buf_len / bsize_elem) + 1;
3756 else
3757 num_elem = buf_len / bsize_elem;
3758
3759 /* Allocate a scatter/gather list for the DMA */
0bc42e35 3760 sglist = kzalloc(sizeof(struct ipr_sglist) +
1da177e4
LT
3761 (sizeof(struct scatterlist) * (num_elem - 1)),
3762 GFP_KERNEL);
3763
3764 if (sglist == NULL) {
3765 ipr_trace;
3766 return NULL;
3767 }
3768
1da177e4 3769 scatterlist = sglist->scatterlist;
45711f1a 3770 sg_init_table(scatterlist, num_elem);
1da177e4
LT
3771
3772 sglist->order = order;
3773 sglist->num_sg = num_elem;
3774
3775 /* Allocate a bunch of sg elements */
3776 for (i = 0; i < num_elem; i++) {
3777 page = alloc_pages(GFP_KERNEL, order);
3778 if (!page) {
3779 ipr_trace;
3780
3781 /* Free up what we already allocated */
3782 for (j = i - 1; j >= 0; j--)
45711f1a 3783 __free_pages(sg_page(&scatterlist[j]), order);
1da177e4
LT
3784 kfree(sglist);
3785 return NULL;
3786 }
3787
642f1490 3788 sg_set_page(&scatterlist[i], page, 0, 0);
1da177e4
LT
3789 }
3790
3791 return sglist;
3792}
3793
3794/**
3795 * ipr_free_ucode_buffer - Frees a microcode download buffer
3796 * @p_dnld: scatter/gather list pointer
3797 *
3798 * Free a DMA'able ucode download buffer previously allocated with
3799 * ipr_alloc_ucode_buffer
3800 *
3801 * Return value:
3802 * nothing
3803 **/
3804static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3805{
3806 int i;
3807
3808 for (i = 0; i < sglist->num_sg; i++)
45711f1a 3809 __free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
1da177e4
LT
3810
3811 kfree(sglist);
3812}
3813
3814/**
3815 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3816 * @sglist: scatter/gather list pointer
3817 * @buffer: buffer pointer
3818 * @len: buffer length
3819 *
3820 * Copy a microcode image from a user buffer into a buffer allocated by
3821 * ipr_alloc_ucode_buffer
3822 *
3823 * Return value:
3824 * 0 on success / other on failure
3825 **/
3826static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3827 u8 *buffer, u32 len)
3828{
3829 int bsize_elem, i, result = 0;
3830 struct scatterlist *scatterlist;
3831 void *kaddr;
3832
3833 /* Determine the actual number of bytes per element */
3834 bsize_elem = PAGE_SIZE * (1 << sglist->order);
3835
3836 scatterlist = sglist->scatterlist;
3837
3838 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
45711f1a
JA
3839 struct page *page = sg_page(&scatterlist[i]);
3840
3841 kaddr = kmap(page);
1da177e4 3842 memcpy(kaddr, buffer, bsize_elem);
45711f1a 3843 kunmap(page);
1da177e4
LT
3844
3845 scatterlist[i].length = bsize_elem;
3846
3847 if (result != 0) {
3848 ipr_trace;
3849 return result;
3850 }
3851 }
3852
3853 if (len % bsize_elem) {
45711f1a
JA
3854 struct page *page = sg_page(&scatterlist[i]);
3855
3856 kaddr = kmap(page);
1da177e4 3857 memcpy(kaddr, buffer, len % bsize_elem);
45711f1a 3858 kunmap(page);
1da177e4
LT
3859
3860 scatterlist[i].length = len % bsize_elem;
3861 }
3862
3863 sglist->buffer_len = len;
3864 return result;
3865}
3866
a32c055f
WB
3867/**
3868 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3869 * @ipr_cmd: ipr command struct
3870 * @sglist: scatter/gather list
3871 *
3872 * Builds a microcode download IOA data list (IOADL).
3873 *
3874 **/
3875static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3876 struct ipr_sglist *sglist)
3877{
3878 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3879 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3880 struct scatterlist *scatterlist = sglist->scatterlist;
3881 int i;
3882
3883 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3884 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3885 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3886
3887 ioarcb->ioadl_len =
3888 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3889 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3890 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3891 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3892 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3893 }
3894
3895 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3896}
3897
1da177e4 3898/**
12baa420 3899 * ipr_build_ucode_ioadl - Build a microcode download IOADL
1da177e4
LT
3900 * @ipr_cmd: ipr command struct
3901 * @sglist: scatter/gather list
1da177e4 3902 *
12baa420 3903 * Builds a microcode download IOA data list (IOADL).
1da177e4 3904 *
1da177e4 3905 **/
12baa420 3906static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3907 struct ipr_sglist *sglist)
1da177e4 3908{
1da177e4 3909 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
a32c055f 3910 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1da177e4
LT
3911 struct scatterlist *scatterlist = sglist->scatterlist;
3912 int i;
3913
12baa420 3914 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
1da177e4 3915 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
a32c055f
WB
3916 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3917
3918 ioarcb->ioadl_len =
1da177e4
LT
3919 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3920
3921 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3922 ioadl[i].flags_and_data_len =
3923 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3924 ioadl[i].address =
3925 cpu_to_be32(sg_dma_address(&scatterlist[i]));
3926 }
3927
12baa420 3928 ioadl[i-1].flags_and_data_len |=
3929 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3930}
3931
3932/**
3933 * ipr_update_ioa_ucode - Update IOA's microcode
3934 * @ioa_cfg: ioa config struct
3935 * @sglist: scatter/gather list
3936 *
3937 * Initiate an adapter reset to update the IOA's microcode
3938 *
3939 * Return value:
3940 * 0 on success / -EIO on failure
3941 **/
3942static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3943 struct ipr_sglist *sglist)
3944{
3945 unsigned long lock_flags;
3946
3947 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
203fa3fe 3948 while (ioa_cfg->in_reset_reload) {
970ea294
BK
3949 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3950 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3951 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3952 }
12baa420 3953
3954 if (ioa_cfg->ucode_sglist) {
3955 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3956 dev_err(&ioa_cfg->pdev->dev,
3957 "Microcode download already in progress\n");
3958 return -EIO;
1da177e4 3959 }
12baa420 3960
d73341bf
AB
3961 sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev,
3962 sglist->scatterlist, sglist->num_sg,
3963 DMA_TO_DEVICE);
12baa420 3964
3965 if (!sglist->num_dma_sg) {
3966 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3967 dev_err(&ioa_cfg->pdev->dev,
3968 "Failed to map microcode download buffer!\n");
1da177e4
LT
3969 return -EIO;
3970 }
3971
12baa420 3972 ioa_cfg->ucode_sglist = sglist;
3973 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3974 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3975 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3976
3977 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3978 ioa_cfg->ucode_sglist = NULL;
3979 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1da177e4
LT
3980 return 0;
3981}
3982
3983/**
3984 * ipr_store_update_fw - Update the firmware on the adapter
ee959b00
TJ
3985 * @class_dev: device struct
3986 * @buf: buffer
3987 * @count: buffer size
1da177e4
LT
3988 *
3989 * This function will update the firmware on the adapter.
3990 *
3991 * Return value:
3992 * count on success / other on failure
3993 **/
ee959b00
TJ
3994static ssize_t ipr_store_update_fw(struct device *dev,
3995 struct device_attribute *attr,
3996 const char *buf, size_t count)
1da177e4 3997{
ee959b00 3998 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3999 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4000 struct ipr_ucode_image_header *image_hdr;
4001 const struct firmware *fw_entry;
4002 struct ipr_sglist *sglist;
1da177e4
LT
4003 char fname[100];
4004 char *src;
4005 int len, result, dnld_size;
4006
4007 if (!capable(CAP_SYS_ADMIN))
4008 return -EACCES;
4009
4010 len = snprintf(fname, 99, "%s", buf);
4011 fname[len-1] = '\0';
4012
203fa3fe 4013 if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
1da177e4
LT
4014 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
4015 return -EIO;
4016 }
4017
4018 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
4019
1da177e4
LT
4020 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
4021 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
4022 sglist = ipr_alloc_ucode_buffer(dnld_size);
4023
4024 if (!sglist) {
4025 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
4026 release_firmware(fw_entry);
4027 return -ENOMEM;
4028 }
4029
4030 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
4031
4032 if (result) {
4033 dev_err(&ioa_cfg->pdev->dev,
4034 "Microcode buffer copy to DMA buffer failed\n");
12baa420 4035 goto out;
1da177e4
LT
4036 }
4037
14ed9cc7
WB
4038 ipr_info("Updating microcode, please be patient. This may take up to 30 minutes.\n");
4039
12baa420 4040 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
1da177e4 4041
12baa420 4042 if (!result)
4043 result = count;
4044out:
1da177e4
LT
4045 ipr_free_ucode_buffer(sglist);
4046 release_firmware(fw_entry);
12baa420 4047 return result;
1da177e4
LT
4048}
4049
ee959b00 4050static struct device_attribute ipr_update_fw_attr = {
1da177e4
LT
4051 .attr = {
4052 .name = "update_fw",
4053 .mode = S_IWUSR,
4054 },
4055 .store = ipr_store_update_fw
4056};
4057
75576bb9
WB
4058/**
4059 * ipr_show_fw_type - Show the adapter's firmware type.
4060 * @dev: class device struct
4061 * @buf: buffer
4062 *
4063 * Return value:
4064 * number of bytes printed to buffer
4065 **/
4066static ssize_t ipr_show_fw_type(struct device *dev,
4067 struct device_attribute *attr, char *buf)
4068{
4069 struct Scsi_Host *shost = class_to_shost(dev);
4070 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4071 unsigned long lock_flags = 0;
4072 int len;
4073
4074 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4075 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
4076 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4077 return len;
4078}
4079
4080static struct device_attribute ipr_ioa_fw_type_attr = {
4081 .attr = {
4082 .name = "fw_type",
4083 .mode = S_IRUGO,
4084 },
4085 .show = ipr_show_fw_type
4086};
4087
ee959b00 4088static struct device_attribute *ipr_ioa_attrs[] = {
1da177e4
LT
4089 &ipr_fw_version_attr,
4090 &ipr_log_level_attr,
4091 &ipr_diagnostics_attr,
f37eb54b 4092 &ipr_ioa_state_attr,
1da177e4
LT
4093 &ipr_ioa_reset_attr,
4094 &ipr_update_fw_attr,
75576bb9 4095 &ipr_ioa_fw_type_attr,
b53d124a 4096 &ipr_iopoll_weight_attr,
1da177e4
LT
4097 NULL,
4098};
4099
4100#ifdef CONFIG_SCSI_IPR_DUMP
4101/**
4102 * ipr_read_dump - Dump the adapter
2c3c8bea 4103 * @filp: open sysfs file
1da177e4 4104 * @kobj: kobject struct
91a69029 4105 * @bin_attr: bin_attribute struct
1da177e4
LT
4106 * @buf: buffer
4107 * @off: offset
4108 * @count: buffer size
4109 *
4110 * Return value:
4111 * number of bytes printed to buffer
4112 **/
2c3c8bea 4113static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
91a69029
ZR
4114 struct bin_attribute *bin_attr,
4115 char *buf, loff_t off, size_t count)
1da177e4 4116{
ee959b00 4117 struct device *cdev = container_of(kobj, struct device, kobj);
1da177e4
LT
4118 struct Scsi_Host *shost = class_to_shost(cdev);
4119 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4120 struct ipr_dump *dump;
4121 unsigned long lock_flags = 0;
4122 char *src;
4d4dd706 4123 int len, sdt_end;
1da177e4
LT
4124 size_t rc = count;
4125
4126 if (!capable(CAP_SYS_ADMIN))
4127 return -EACCES;
4128
4129 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4130 dump = ioa_cfg->dump;
4131
4132 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
4133 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4134 return 0;
4135 }
4136 kref_get(&dump->kref);
4137 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4138
4139 if (off > dump->driver_dump.hdr.len) {
4140 kref_put(&dump->kref, ipr_release_dump);
4141 return 0;
4142 }
4143
4144 if (off + count > dump->driver_dump.hdr.len) {
4145 count = dump->driver_dump.hdr.len - off;
4146 rc = count;
4147 }
4148
4149 if (count && off < sizeof(dump->driver_dump)) {
4150 if (off + count > sizeof(dump->driver_dump))
4151 len = sizeof(dump->driver_dump) - off;
4152 else
4153 len = count;
4154 src = (u8 *)&dump->driver_dump + off;
4155 memcpy(buf, src, len);
4156 buf += len;
4157 off += len;
4158 count -= len;
4159 }
4160
4161 off -= sizeof(dump->driver_dump);
4162
4d4dd706
KSS
4163 if (ioa_cfg->sis64)
4164 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4165 (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
4166 sizeof(struct ipr_sdt_entry));
4167 else
4168 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4169 (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
4170
4171 if (count && off < sdt_end) {
4172 if (off + count > sdt_end)
4173 len = sdt_end - off;
1da177e4
LT
4174 else
4175 len = count;
4176 src = (u8 *)&dump->ioa_dump + off;
4177 memcpy(buf, src, len);
4178 buf += len;
4179 off += len;
4180 count -= len;
4181 }
4182
4d4dd706 4183 off -= sdt_end;
1da177e4
LT
4184
4185 while (count) {
4186 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
4187 len = PAGE_ALIGN(off) - off;
4188 else
4189 len = count;
4190 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
4191 src += off & ~PAGE_MASK;
4192 memcpy(buf, src, len);
4193 buf += len;
4194 off += len;
4195 count -= len;
4196 }
4197
4198 kref_put(&dump->kref, ipr_release_dump);
4199 return rc;
4200}
4201
4202/**
4203 * ipr_alloc_dump - Prepare for adapter dump
4204 * @ioa_cfg: ioa config struct
4205 *
4206 * Return value:
4207 * 0 on success / other on failure
4208 **/
4209static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4210{
4211 struct ipr_dump *dump;
4d4dd706 4212 __be32 **ioa_data;
1da177e4
LT
4213 unsigned long lock_flags = 0;
4214
0bc42e35 4215 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
1da177e4
LT
4216
4217 if (!dump) {
4218 ipr_err("Dump memory allocation failed\n");
4219 return -ENOMEM;
4220 }
4221
4d4dd706
KSS
4222 if (ioa_cfg->sis64)
4223 ioa_data = vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4224 else
4225 ioa_data = vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4226
4227 if (!ioa_data) {
4228 ipr_err("Dump memory allocation failed\n");
4229 kfree(dump);
4230 return -ENOMEM;
4231 }
4232
4233 dump->ioa_dump.ioa_data = ioa_data;
4234
1da177e4
LT
4235 kref_init(&dump->kref);
4236 dump->ioa_cfg = ioa_cfg;
4237
4238 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4239
4240 if (INACTIVE != ioa_cfg->sdt_state) {
4241 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4d4dd706 4242 vfree(dump->ioa_dump.ioa_data);
1da177e4
LT
4243 kfree(dump);
4244 return 0;
4245 }
4246
4247 ioa_cfg->dump = dump;
4248 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
56d6aa33 4249 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
1da177e4
LT
4250 ioa_cfg->dump_taken = 1;
4251 schedule_work(&ioa_cfg->work_q);
4252 }
4253 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4254
1da177e4
LT
4255 return 0;
4256}
4257
4258/**
4259 * ipr_free_dump - Free adapter dump memory
4260 * @ioa_cfg: ioa config struct
4261 *
4262 * Return value:
4263 * 0 on success / other on failure
4264 **/
4265static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4266{
4267 struct ipr_dump *dump;
4268 unsigned long lock_flags = 0;
4269
4270 ENTER;
4271
4272 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4273 dump = ioa_cfg->dump;
4274 if (!dump) {
4275 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4276 return 0;
4277 }
4278
4279 ioa_cfg->dump = NULL;
4280 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4281
4282 kref_put(&dump->kref, ipr_release_dump);
4283
4284 LEAVE;
4285 return 0;
4286}
4287
4288/**
4289 * ipr_write_dump - Setup dump state of adapter
2c3c8bea 4290 * @filp: open sysfs file
1da177e4 4291 * @kobj: kobject struct
91a69029 4292 * @bin_attr: bin_attribute struct
1da177e4
LT
4293 * @buf: buffer
4294 * @off: offset
4295 * @count: buffer size
4296 *
4297 * Return value:
4298 * number of bytes printed to buffer
4299 **/
2c3c8bea 4300static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
91a69029
ZR
4301 struct bin_attribute *bin_attr,
4302 char *buf, loff_t off, size_t count)
1da177e4 4303{
ee959b00 4304 struct device *cdev = container_of(kobj, struct device, kobj);
1da177e4
LT
4305 struct Scsi_Host *shost = class_to_shost(cdev);
4306 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4307 int rc;
4308
4309 if (!capable(CAP_SYS_ADMIN))
4310 return -EACCES;
4311
4312 if (buf[0] == '1')
4313 rc = ipr_alloc_dump(ioa_cfg);
4314 else if (buf[0] == '0')
4315 rc = ipr_free_dump(ioa_cfg);
4316 else
4317 return -EINVAL;
4318
4319 if (rc)
4320 return rc;
4321 else
4322 return count;
4323}
4324
4325static struct bin_attribute ipr_dump_attr = {
4326 .attr = {
4327 .name = "dump",
4328 .mode = S_IRUSR | S_IWUSR,
4329 },
4330 .size = 0,
4331 .read = ipr_read_dump,
4332 .write = ipr_write_dump
4333};
4334#else
4335static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4336#endif
4337
4338/**
4339 * ipr_change_queue_depth - Change the device's queue depth
4340 * @sdev: scsi device struct
4341 * @qdepth: depth to set
e881a172 4342 * @reason: calling context
1da177e4
LT
4343 *
4344 * Return value:
4345 * actual depth set
4346 **/
db5ed4df 4347static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
1da177e4 4348{
35a39691
BK
4349 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4350 struct ipr_resource_entry *res;
4351 unsigned long lock_flags = 0;
4352
4353 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4354 res = (struct ipr_resource_entry *)sdev->hostdata;
4355
4356 if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4357 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4358 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4359
db5ed4df 4360 scsi_change_queue_depth(sdev, qdepth);
1da177e4
LT
4361 return sdev->queue_depth;
4362}
4363
1da177e4
LT
4364/**
4365 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4366 * @dev: device struct
46d74563 4367 * @attr: device attribute structure
1da177e4
LT
4368 * @buf: buffer
4369 *
4370 * Return value:
4371 * number of bytes printed to buffer
4372 **/
10523b3b 4373static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
1da177e4
LT
4374{
4375 struct scsi_device *sdev = to_scsi_device(dev);
4376 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4377 struct ipr_resource_entry *res;
4378 unsigned long lock_flags = 0;
4379 ssize_t len = -ENXIO;
4380
4381 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4382 res = (struct ipr_resource_entry *)sdev->hostdata;
4383 if (res)
3e7ebdfa 4384 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
1da177e4
LT
4385 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4386 return len;
4387}
4388
4389static struct device_attribute ipr_adapter_handle_attr = {
4390 .attr = {
4391 .name = "adapter_handle",
4392 .mode = S_IRUSR,
4393 },
4394 .show = ipr_show_adapter_handle
4395};
4396
3e7ebdfa 4397/**
5adcbeb3
WB
4398 * ipr_show_resource_path - Show the resource path or the resource address for
4399 * this device.
3e7ebdfa 4400 * @dev: device struct
46d74563 4401 * @attr: device attribute structure
3e7ebdfa
WB
4402 * @buf: buffer
4403 *
4404 * Return value:
4405 * number of bytes printed to buffer
4406 **/
4407static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4408{
4409 struct scsi_device *sdev = to_scsi_device(dev);
4410 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4411 struct ipr_resource_entry *res;
4412 unsigned long lock_flags = 0;
4413 ssize_t len = -ENXIO;
4414 char buffer[IPR_MAX_RES_PATH_LENGTH];
4415
4416 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4417 res = (struct ipr_resource_entry *)sdev->hostdata;
5adcbeb3 4418 if (res && ioa_cfg->sis64)
3e7ebdfa 4419 len = snprintf(buf, PAGE_SIZE, "%s\n",
b3b3b407
BK
4420 __ipr_format_res_path(res->res_path, buffer,
4421 sizeof(buffer)));
5adcbeb3
WB
4422 else if (res)
4423 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4424 res->bus, res->target, res->lun);
4425
3e7ebdfa
WB
4426 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4427 return len;
4428}
4429
4430static struct device_attribute ipr_resource_path_attr = {
4431 .attr = {
4432 .name = "resource_path",
75576bb9 4433 .mode = S_IRUGO,
3e7ebdfa
WB
4434 },
4435 .show = ipr_show_resource_path
4436};
4437
46d74563
WB
4438/**
4439 * ipr_show_device_id - Show the device_id for this device.
4440 * @dev: device struct
4441 * @attr: device attribute structure
4442 * @buf: buffer
4443 *
4444 * Return value:
4445 * number of bytes printed to buffer
4446 **/
4447static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4448{
4449 struct scsi_device *sdev = to_scsi_device(dev);
4450 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4451 struct ipr_resource_entry *res;
4452 unsigned long lock_flags = 0;
4453 ssize_t len = -ENXIO;
4454
4455 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4456 res = (struct ipr_resource_entry *)sdev->hostdata;
4457 if (res && ioa_cfg->sis64)
4458 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->dev_id);
4459 else if (res)
4460 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4461
4462 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4463 return len;
4464}
4465
4466static struct device_attribute ipr_device_id_attr = {
4467 .attr = {
4468 .name = "device_id",
4469 .mode = S_IRUGO,
4470 },
4471 .show = ipr_show_device_id
4472};
4473
75576bb9
WB
4474/**
4475 * ipr_show_resource_type - Show the resource type for this device.
4476 * @dev: device struct
46d74563 4477 * @attr: device attribute structure
75576bb9
WB
4478 * @buf: buffer
4479 *
4480 * Return value:
4481 * number of bytes printed to buffer
4482 **/
4483static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4484{
4485 struct scsi_device *sdev = to_scsi_device(dev);
4486 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4487 struct ipr_resource_entry *res;
4488 unsigned long lock_flags = 0;
4489 ssize_t len = -ENXIO;
4490
4491 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4492 res = (struct ipr_resource_entry *)sdev->hostdata;
4493
4494 if (res)
4495 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4496
4497 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4498 return len;
4499}
4500
4501static struct device_attribute ipr_resource_type_attr = {
4502 .attr = {
4503 .name = "resource_type",
4504 .mode = S_IRUGO,
4505 },
4506 .show = ipr_show_resource_type
4507};
4508
f8ee25d7
WX
4509/**
4510 * ipr_show_raw_mode - Show the adapter's raw mode
4511 * @dev: class device struct
4512 * @buf: buffer
4513 *
4514 * Return value:
4515 * number of bytes printed to buffer
4516 **/
4517static ssize_t ipr_show_raw_mode(struct device *dev,
4518 struct device_attribute *attr, char *buf)
4519{
4520 struct scsi_device *sdev = to_scsi_device(dev);
4521 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4522 struct ipr_resource_entry *res;
4523 unsigned long lock_flags = 0;
4524 ssize_t len;
4525
4526 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4527 res = (struct ipr_resource_entry *)sdev->hostdata;
4528 if (res)
4529 len = snprintf(buf, PAGE_SIZE, "%d\n", res->raw_mode);
4530 else
4531 len = -ENXIO;
4532 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4533 return len;
4534}
4535
4536/**
4537 * ipr_store_raw_mode - Change the adapter's raw mode
4538 * @dev: class device struct
4539 * @buf: buffer
4540 *
4541 * Return value:
4542 * number of bytes printed to buffer
4543 **/
4544static ssize_t ipr_store_raw_mode(struct device *dev,
4545 struct device_attribute *attr,
4546 const char *buf, size_t count)
4547{
4548 struct scsi_device *sdev = to_scsi_device(dev);
4549 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4550 struct ipr_resource_entry *res;
4551 unsigned long lock_flags = 0;
4552 ssize_t len;
4553
4554 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4555 res = (struct ipr_resource_entry *)sdev->hostdata;
4556 if (res) {
4557 if (ioa_cfg->sis64 && ipr_is_af_dasd_device(res)) {
4558 res->raw_mode = simple_strtoul(buf, NULL, 10);
4559 len = strlen(buf);
4560 if (res->sdev)
4561 sdev_printk(KERN_INFO, res->sdev, "raw mode is %s\n",
4562 res->raw_mode ? "enabled" : "disabled");
4563 } else
4564 len = -EINVAL;
4565 } else
4566 len = -ENXIO;
4567 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4568 return len;
4569}
4570
4571static struct device_attribute ipr_raw_mode_attr = {
4572 .attr = {
4573 .name = "raw_mode",
4574 .mode = S_IRUGO | S_IWUSR,
4575 },
4576 .show = ipr_show_raw_mode,
4577 .store = ipr_store_raw_mode
4578};
4579
1da177e4
LT
4580static struct device_attribute *ipr_dev_attrs[] = {
4581 &ipr_adapter_handle_attr,
3e7ebdfa 4582 &ipr_resource_path_attr,
46d74563 4583 &ipr_device_id_attr,
75576bb9 4584 &ipr_resource_type_attr,
f8ee25d7 4585 &ipr_raw_mode_attr,
1da177e4
LT
4586 NULL,
4587};
4588
4589/**
4590 * ipr_biosparam - Return the HSC mapping
4591 * @sdev: scsi device struct
4592 * @block_device: block device pointer
4593 * @capacity: capacity of the device
4594 * @parm: Array containing returned HSC values.
4595 *
4596 * This function generates the HSC parms that fdisk uses.
4597 * We want to make sure we return something that places partitions
4598 * on 4k boundaries for best performance with the IOA.
4599 *
4600 * Return value:
4601 * 0 on success
4602 **/
4603static int ipr_biosparam(struct scsi_device *sdev,
4604 struct block_device *block_device,
4605 sector_t capacity, int *parm)
4606{
4607 int heads, sectors;
4608 sector_t cylinders;
4609
4610 heads = 128;
4611 sectors = 32;
4612
4613 cylinders = capacity;
4614 sector_div(cylinders, (128 * 32));
4615
4616 /* return result */
4617 parm[0] = heads;
4618 parm[1] = sectors;
4619 parm[2] = cylinders;
4620
4621 return 0;
4622}
4623
35a39691
BK
4624/**
4625 * ipr_find_starget - Find target based on bus/target.
4626 * @starget: scsi target struct
4627 *
4628 * Return value:
4629 * resource entry pointer if found / NULL if not found
4630 **/
4631static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4632{
4633 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4634 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4635 struct ipr_resource_entry *res;
4636
4637 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3e7ebdfa 4638 if ((res->bus == starget->channel) &&
0ee1d714 4639 (res->target == starget->id)) {
35a39691
BK
4640 return res;
4641 }
4642 }
4643
4644 return NULL;
4645}
4646
4647static struct ata_port_info sata_port_info;
4648
4649/**
4650 * ipr_target_alloc - Prepare for commands to a SCSI target
4651 * @starget: scsi target struct
4652 *
4653 * If the device is a SATA device, this function allocates an
4654 * ATA port with libata, else it does nothing.
4655 *
4656 * Return value:
4657 * 0 on success / non-0 on failure
4658 **/
4659static int ipr_target_alloc(struct scsi_target *starget)
4660{
4661 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4662 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4663 struct ipr_sata_port *sata_port;
4664 struct ata_port *ap;
4665 struct ipr_resource_entry *res;
4666 unsigned long lock_flags;
4667
4668 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4669 res = ipr_find_starget(starget);
4670 starget->hostdata = NULL;
4671
4672 if (res && ipr_is_gata(res)) {
4673 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4674 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4675 if (!sata_port)
4676 return -ENOMEM;
4677
4678 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4679 if (ap) {
4680 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4681 sata_port->ioa_cfg = ioa_cfg;
4682 sata_port->ap = ap;
4683 sata_port->res = res;
4684
4685 res->sata_port = sata_port;
4686 ap->private_data = sata_port;
4687 starget->hostdata = sata_port;
4688 } else {
4689 kfree(sata_port);
4690 return -ENOMEM;
4691 }
4692 }
4693 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4694
4695 return 0;
4696}
4697
4698/**
4699 * ipr_target_destroy - Destroy a SCSI target
4700 * @starget: scsi target struct
4701 *
4702 * If the device was a SATA device, this function frees the libata
4703 * ATA port, else it does nothing.
4704 *
4705 **/
4706static void ipr_target_destroy(struct scsi_target *starget)
4707{
4708 struct ipr_sata_port *sata_port = starget->hostdata;
3e7ebdfa
WB
4709 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4710 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4711
4712 if (ioa_cfg->sis64) {
0ee1d714
BK
4713 if (!ipr_find_starget(starget)) {
4714 if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4715 clear_bit(starget->id, ioa_cfg->array_ids);
4716 else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4717 clear_bit(starget->id, ioa_cfg->vset_ids);
4718 else if (starget->channel == 0)
4719 clear_bit(starget->id, ioa_cfg->target_ids);
4720 }
3e7ebdfa 4721 }
35a39691
BK
4722
4723 if (sata_port) {
4724 starget->hostdata = NULL;
4725 ata_sas_port_destroy(sata_port->ap);
4726 kfree(sata_port);
4727 }
4728}
4729
4730/**
4731 * ipr_find_sdev - Find device based on bus/target/lun.
4732 * @sdev: scsi device struct
4733 *
4734 * Return value:
4735 * resource entry pointer if found / NULL if not found
4736 **/
4737static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4738{
4739 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4740 struct ipr_resource_entry *res;
4741
4742 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3e7ebdfa
WB
4743 if ((res->bus == sdev->channel) &&
4744 (res->target == sdev->id) &&
4745 (res->lun == sdev->lun))
35a39691
BK
4746 return res;
4747 }
4748
4749 return NULL;
4750}
4751
1da177e4
LT
4752/**
4753 * ipr_slave_destroy - Unconfigure a SCSI device
4754 * @sdev: scsi device struct
4755 *
4756 * Return value:
4757 * nothing
4758 **/
4759static void ipr_slave_destroy(struct scsi_device *sdev)
4760{
4761 struct ipr_resource_entry *res;
4762 struct ipr_ioa_cfg *ioa_cfg;
4763 unsigned long lock_flags = 0;
4764
4765 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4766
4767 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4768 res = (struct ipr_resource_entry *) sdev->hostdata;
4769 if (res) {
35a39691 4770 if (res->sata_port)
3e4ec344 4771 res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
1da177e4
LT
4772 sdev->hostdata = NULL;
4773 res->sdev = NULL;
35a39691 4774 res->sata_port = NULL;
1da177e4
LT
4775 }
4776 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4777}
4778
4779/**
4780 * ipr_slave_configure - Configure a SCSI device
4781 * @sdev: scsi device struct
4782 *
4783 * This function configures the specified scsi device.
4784 *
4785 * Return value:
4786 * 0 on success
4787 **/
4788static int ipr_slave_configure(struct scsi_device *sdev)
4789{
4790 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4791 struct ipr_resource_entry *res;
dd406ef8 4792 struct ata_port *ap = NULL;
1da177e4 4793 unsigned long lock_flags = 0;
3e7ebdfa 4794 char buffer[IPR_MAX_RES_PATH_LENGTH];
1da177e4
LT
4795
4796 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4797 res = sdev->hostdata;
4798 if (res) {
4799 if (ipr_is_af_dasd_device(res))
4800 sdev->type = TYPE_RAID;
0726ce26 4801 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
1da177e4 4802 sdev->scsi_level = 4;
0726ce26 4803 sdev->no_uld_attach = 1;
4804 }
1da177e4 4805 if (ipr_is_vset_device(res)) {
60654e25 4806 sdev->scsi_level = SCSI_SPC_3;
242f9dcb
JA
4807 blk_queue_rq_timeout(sdev->request_queue,
4808 IPR_VSET_RW_TIMEOUT);
086fa5ff 4809 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
1da177e4 4810 }
dd406ef8
BK
4811 if (ipr_is_gata(res) && res->sata_port)
4812 ap = res->sata_port->ap;
4813 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4814
4815 if (ap) {
db5ed4df 4816 scsi_change_queue_depth(sdev, IPR_MAX_CMD_PER_ATA_LUN);
dd406ef8 4817 ata_sas_slave_configure(sdev, ap);
c8b09f6f
CH
4818 }
4819
3e7ebdfa
WB
4820 if (ioa_cfg->sis64)
4821 sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
b3b3b407
BK
4822 ipr_format_res_path(ioa_cfg,
4823 res->res_path, buffer, sizeof(buffer)));
dd406ef8 4824 return 0;
1da177e4
LT
4825 }
4826 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4827 return 0;
4828}
4829
35a39691
BK
4830/**
4831 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4832 * @sdev: scsi device struct
4833 *
4834 * This function initializes an ATA port so that future commands
4835 * sent through queuecommand will work.
4836 *
4837 * Return value:
4838 * 0 on success
4839 **/
4840static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4841{
4842 struct ipr_sata_port *sata_port = NULL;
4843 int rc = -ENXIO;
4844
4845 ENTER;
4846 if (sdev->sdev_target)
4847 sata_port = sdev->sdev_target->hostdata;
b2024459 4848 if (sata_port) {
35a39691 4849 rc = ata_sas_port_init(sata_port->ap);
b2024459
DW
4850 if (rc == 0)
4851 rc = ata_sas_sync_probe(sata_port->ap);
4852 }
4853
35a39691
BK
4854 if (rc)
4855 ipr_slave_destroy(sdev);
4856
4857 LEAVE;
4858 return rc;
4859}
4860
1da177e4
LT
4861/**
4862 * ipr_slave_alloc - Prepare for commands to a device.
4863 * @sdev: scsi device struct
4864 *
4865 * This function saves a pointer to the resource entry
4866 * in the scsi device struct if the device exists. We
4867 * can then use this pointer in ipr_queuecommand when
4868 * handling new commands.
4869 *
4870 * Return value:
692aebfc 4871 * 0 on success / -ENXIO if device does not exist
1da177e4
LT
4872 **/
4873static int ipr_slave_alloc(struct scsi_device *sdev)
4874{
4875 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4876 struct ipr_resource_entry *res;
4877 unsigned long lock_flags;
692aebfc 4878 int rc = -ENXIO;
1da177e4
LT
4879
4880 sdev->hostdata = NULL;
4881
4882 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4883
35a39691
BK
4884 res = ipr_find_sdev(sdev);
4885 if (res) {
4886 res->sdev = sdev;
4887 res->add_to_ml = 0;
4888 res->in_erp = 0;
4889 sdev->hostdata = res;
4890 if (!ipr_is_naca_model(res))
4891 res->needs_sync_complete = 1;
4892 rc = 0;
4893 if (ipr_is_gata(res)) {
4894 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4895 return ipr_ata_slave_alloc(sdev);
1da177e4
LT
4896 }
4897 }
4898
4899 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4900
692aebfc 4901 return rc;
1da177e4
LT
4902}
4903
6cdb0817
BK
4904/**
4905 * ipr_match_lun - Match function for specified LUN
4906 * @ipr_cmd: ipr command struct
4907 * @device: device to match (sdev)
4908 *
4909 * Returns:
4910 * 1 if command matches sdev / 0 if command does not match sdev
4911 **/
4912static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device)
4913{
4914 if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device)
4915 return 1;
4916 return 0;
4917}
4918
4919/**
4920 * ipr_wait_for_ops - Wait for matching commands to complete
4921 * @ipr_cmd: ipr command struct
4922 * @device: device to match (sdev)
4923 * @match: match function to use
4924 *
4925 * Returns:
4926 * SUCCESS / FAILED
4927 **/
4928static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
4929 int (*match)(struct ipr_cmnd *, void *))
4930{
4931 struct ipr_cmnd *ipr_cmd;
4932 int wait;
4933 unsigned long flags;
4934 struct ipr_hrr_queue *hrrq;
4935 signed long timeout = IPR_ABORT_TASK_TIMEOUT;
4936 DECLARE_COMPLETION_ONSTACK(comp);
4937
4938 ENTER;
4939 do {
4940 wait = 0;
4941
4942 for_each_hrrq(hrrq, ioa_cfg) {
4943 spin_lock_irqsave(hrrq->lock, flags);
4944 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
4945 if (match(ipr_cmd, device)) {
4946 ipr_cmd->eh_comp = &comp;
4947 wait++;
4948 }
4949 }
4950 spin_unlock_irqrestore(hrrq->lock, flags);
4951 }
4952
4953 if (wait) {
4954 timeout = wait_for_completion_timeout(&comp, timeout);
4955
4956 if (!timeout) {
4957 wait = 0;
4958
4959 for_each_hrrq(hrrq, ioa_cfg) {
4960 spin_lock_irqsave(hrrq->lock, flags);
4961 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
4962 if (match(ipr_cmd, device)) {
4963 ipr_cmd->eh_comp = NULL;
4964 wait++;
4965 }
4966 }
4967 spin_unlock_irqrestore(hrrq->lock, flags);
4968 }
4969
4970 if (wait)
4971 dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n");
4972 LEAVE;
4973 return wait ? FAILED : SUCCESS;
4974 }
4975 }
4976 } while (wait);
4977
4978 LEAVE;
4979 return SUCCESS;
4980}
4981
70233ac5 4982static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
1da177e4
LT
4983{
4984 struct ipr_ioa_cfg *ioa_cfg;
70233ac5 4985 unsigned long lock_flags = 0;
4986 int rc = SUCCESS;
1da177e4
LT
4987
4988 ENTER;
70233ac5 4989 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
4990 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1da177e4 4991
96b04db9 4992 if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
70233ac5 4993 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
a92fa25c
KSS
4994 dev_err(&ioa_cfg->pdev->dev,
4995 "Adapter being reset as a result of error recovery.\n");
1da177e4 4996
a92fa25c
KSS
4997 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4998 ioa_cfg->sdt_state = GET_DUMP;
4999 }
1da177e4 5000
70233ac5 5001 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5002 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5003 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
df0ae249 5004
70233ac5 5005 /* If we got hit with a host reset while we were already resetting
5006 the adapter for some reason, and the reset failed. */
5007 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
5008 ipr_trace;
5009 rc = FAILED;
5010 }
df0ae249 5011
70233ac5 5012 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5013 LEAVE;
df0ae249
JG
5014 return rc;
5015}
5016
c6513096
BK
5017/**
5018 * ipr_device_reset - Reset the device
5019 * @ioa_cfg: ioa config struct
5020 * @res: resource entry struct
5021 *
5022 * This function issues a device reset to the affected device.
5023 * If the device is a SCSI device, a LUN reset will be sent
5024 * to the device first. If that does not work, a target reset
35a39691
BK
5025 * will be sent. If the device is a SATA device, a PHY reset will
5026 * be sent.
c6513096
BK
5027 *
5028 * Return value:
5029 * 0 on success / non-zero on failure
5030 **/
5031static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
5032 struct ipr_resource_entry *res)
5033{
5034 struct ipr_cmnd *ipr_cmd;
5035 struct ipr_ioarcb *ioarcb;
5036 struct ipr_cmd_pkt *cmd_pkt;
35a39691 5037 struct ipr_ioarcb_ata_regs *regs;
c6513096
BK
5038 u32 ioasc;
5039
5040 ENTER;
5041 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5042 ioarcb = &ipr_cmd->ioarcb;
5043 cmd_pkt = &ioarcb->cmd_pkt;
a32c055f
WB
5044
5045 if (ipr_cmd->ioa_cfg->sis64) {
5046 regs = &ipr_cmd->i.ata_ioadl.regs;
5047 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
5048 } else
5049 regs = &ioarcb->u.add_data.u.regs;
c6513096 5050
3e7ebdfa 5051 ioarcb->res_handle = res->res_handle;
c6513096
BK
5052 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5053 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
35a39691
BK
5054 if (ipr_is_gata(res)) {
5055 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
a32c055f 5056 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
35a39691
BK
5057 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
5058 }
c6513096
BK
5059
5060 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
96d21f00 5061 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
05a6538a 5062 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
96d21f00
WB
5063 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
5064 if (ipr_cmd->ioa_cfg->sis64)
5065 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
5066 sizeof(struct ipr_ioasa_gata));
5067 else
5068 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
5069 sizeof(struct ipr_ioasa_gata));
5070 }
c6513096
BK
5071
5072 LEAVE;
203fa3fe 5073 return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
c6513096
BK
5074}
5075
35a39691
BK
5076/**
5077 * ipr_sata_reset - Reset the SATA port
cc0680a5 5078 * @link: SATA link to reset
35a39691
BK
5079 * @classes: class of the attached device
5080 *
cc0680a5 5081 * This function issues a SATA phy reset to the affected ATA link.
35a39691
BK
5082 *
5083 * Return value:
5084 * 0 on success / non-zero on failure
5085 **/
cc0680a5 5086static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
120bda35 5087 unsigned long deadline)
35a39691 5088{
cc0680a5 5089 struct ipr_sata_port *sata_port = link->ap->private_data;
35a39691
BK
5090 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5091 struct ipr_resource_entry *res;
5092 unsigned long lock_flags = 0;
5093 int rc = -ENXIO;
5094
5095 ENTER;
5096 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
203fa3fe 5097 while (ioa_cfg->in_reset_reload) {
73d98ff0
BK
5098 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5099 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5100 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5101 }
5102
35a39691
BK
5103 res = sata_port->res;
5104 if (res) {
5105 rc = ipr_device_reset(ioa_cfg, res);
3e7ebdfa 5106 *classes = res->ata_class;
35a39691
BK
5107 }
5108
5109 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5110 LEAVE;
5111 return rc;
5112}
5113
1da177e4
LT
5114/**
5115 * ipr_eh_dev_reset - Reset the device
5116 * @scsi_cmd: scsi command struct
5117 *
5118 * This function issues a device reset to the affected device.
5119 * A LUN reset will be sent to the device first. If that does
5120 * not work, a target reset will be sent.
5121 *
5122 * Return value:
5123 * SUCCESS / FAILED
5124 **/
203fa3fe 5125static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
1da177e4
LT
5126{
5127 struct ipr_cmnd *ipr_cmd;
5128 struct ipr_ioa_cfg *ioa_cfg;
5129 struct ipr_resource_entry *res;
35a39691
BK
5130 struct ata_port *ap;
5131 int rc = 0;
05a6538a 5132 struct ipr_hrr_queue *hrrq;
1da177e4
LT
5133
5134 ENTER;
5135 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5136 res = scsi_cmd->device->hostdata;
5137
eeb88307 5138 if (!res)
1da177e4
LT
5139 return FAILED;
5140
5141 /*
5142 * If we are currently going through reset/reload, return failed. This will force the
5143 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
5144 * reset to complete
5145 */
5146 if (ioa_cfg->in_reset_reload)
5147 return FAILED;
56d6aa33 5148 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
1da177e4
LT
5149 return FAILED;
5150
05a6538a 5151 for_each_hrrq(hrrq, ioa_cfg) {
56d6aa33 5152 spin_lock(&hrrq->_lock);
05a6538a 5153 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
5154 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
5155 if (ipr_cmd->scsi_cmd)
5156 ipr_cmd->done = ipr_scsi_eh_done;
5157 if (ipr_cmd->qc)
5158 ipr_cmd->done = ipr_sata_eh_done;
5159 if (ipr_cmd->qc &&
5160 !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
5161 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
5162 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
5163 }
7402ecef 5164 }
1da177e4 5165 }
56d6aa33 5166 spin_unlock(&hrrq->_lock);
1da177e4 5167 }
1da177e4 5168 res->resetting_device = 1;
fb3ed3cb 5169 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
35a39691
BK
5170
5171 if (ipr_is_gata(res) && res->sata_port) {
5172 ap = res->sata_port->ap;
5173 spin_unlock_irq(scsi_cmd->device->host->host_lock);
a1efdaba 5174 ata_std_error_handler(ap);
35a39691 5175 spin_lock_irq(scsi_cmd->device->host->host_lock);
5af23d26 5176
05a6538a 5177 for_each_hrrq(hrrq, ioa_cfg) {
56d6aa33 5178 spin_lock(&hrrq->_lock);
05a6538a 5179 list_for_each_entry(ipr_cmd,
5180 &hrrq->hrrq_pending_q, queue) {
5181 if (ipr_cmd->ioarcb.res_handle ==
5182 res->res_handle) {
5183 rc = -EIO;
5184 break;
5185 }
5af23d26 5186 }
56d6aa33 5187 spin_unlock(&hrrq->_lock);
5af23d26 5188 }
35a39691
BK
5189 } else
5190 rc = ipr_device_reset(ioa_cfg, res);
1da177e4 5191 res->resetting_device = 0;
0b1f8d44 5192 res->reset_occurred = 1;
1da177e4 5193
1da177e4 5194 LEAVE;
203fa3fe 5195 return rc ? FAILED : SUCCESS;
1da177e4
LT
5196}
5197
203fa3fe 5198static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
94d0e7b8
JG
5199{
5200 int rc;
6cdb0817
BK
5201 struct ipr_ioa_cfg *ioa_cfg;
5202
5203 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
94d0e7b8
JG
5204
5205 spin_lock_irq(cmd->device->host->host_lock);
5206 rc = __ipr_eh_dev_reset(cmd);
5207 spin_unlock_irq(cmd->device->host->host_lock);
5208
6cdb0817
BK
5209 if (rc == SUCCESS)
5210 rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
5211
94d0e7b8
JG
5212 return rc;
5213}
5214
1da177e4
LT
5215/**
5216 * ipr_bus_reset_done - Op done function for bus reset.
5217 * @ipr_cmd: ipr command struct
5218 *
5219 * This function is the op done function for a bus reset
5220 *
5221 * Return value:
5222 * none
5223 **/
5224static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
5225{
5226 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5227 struct ipr_resource_entry *res;
5228
5229 ENTER;
3e7ebdfa
WB
5230 if (!ioa_cfg->sis64)
5231 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5232 if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
5233 scsi_report_bus_reset(ioa_cfg->host, res->bus);
5234 break;
5235 }
1da177e4 5236 }
1da177e4
LT
5237
5238 /*
5239 * If abort has not completed, indicate the reset has, else call the
5240 * abort's done function to wake the sleeping eh thread
5241 */
5242 if (ipr_cmd->sibling->sibling)
5243 ipr_cmd->sibling->sibling = NULL;
5244 else
5245 ipr_cmd->sibling->done(ipr_cmd->sibling);
5246
05a6538a 5247 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
5248 LEAVE;
5249}
5250
5251/**
5252 * ipr_abort_timeout - An abort task has timed out
5253 * @ipr_cmd: ipr command struct
5254 *
5255 * This function handles when an abort task times out. If this
5256 * happens we issue a bus reset since we have resources tied
5257 * up that must be freed before returning to the midlayer.
5258 *
5259 * Return value:
5260 * none
5261 **/
5262static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
5263{
5264 struct ipr_cmnd *reset_cmd;
5265 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5266 struct ipr_cmd_pkt *cmd_pkt;
5267 unsigned long lock_flags = 0;
5268
5269 ENTER;
5270 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5271 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
5272 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5273 return;
5274 }
5275
fb3ed3cb 5276 sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
1da177e4
LT
5277 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5278 ipr_cmd->sibling = reset_cmd;
5279 reset_cmd->sibling = ipr_cmd;
5280 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
5281 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
5282 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5283 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5284 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
5285
5286 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5287 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5288 LEAVE;
5289}
5290
5291/**
5292 * ipr_cancel_op - Cancel specified op
5293 * @scsi_cmd: scsi command struct
5294 *
5295 * This function cancels specified op.
5296 *
5297 * Return value:
5298 * SUCCESS / FAILED
5299 **/
203fa3fe 5300static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
1da177e4
LT
5301{
5302 struct ipr_cmnd *ipr_cmd;
5303 struct ipr_ioa_cfg *ioa_cfg;
5304 struct ipr_resource_entry *res;
5305 struct ipr_cmd_pkt *cmd_pkt;
a92fa25c 5306 u32 ioasc, int_reg;
1da177e4 5307 int op_found = 0;
05a6538a 5308 struct ipr_hrr_queue *hrrq;
1da177e4
LT
5309
5310 ENTER;
5311 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5312 res = scsi_cmd->device->hostdata;
5313
8fa728a2
JG
5314 /* If we are currently going through reset/reload, return failed.
5315 * This will force the mid-layer to call ipr_eh_host_reset,
5316 * which will then go to sleep and wait for the reset to complete
5317 */
56d6aa33 5318 if (ioa_cfg->in_reset_reload ||
5319 ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
8fa728a2 5320 return FAILED;
a92fa25c
KSS
5321 if (!res)
5322 return FAILED;
5323
5324 /*
5325 * If we are aborting a timed out op, chances are that the timeout was caused
5326 * by a still not detected EEH error. In such cases, reading a register will
5327 * trigger the EEH recovery infrastructure.
5328 */
5329 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5330
5331 if (!ipr_is_gscsi(res))
1da177e4
LT
5332 return FAILED;
5333
05a6538a 5334 for_each_hrrq(hrrq, ioa_cfg) {
56d6aa33 5335 spin_lock(&hrrq->_lock);
05a6538a 5336 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
5337 if (ipr_cmd->scsi_cmd == scsi_cmd) {
5338 ipr_cmd->done = ipr_scsi_eh_done;
5339 op_found = 1;
5340 break;
5341 }
1da177e4 5342 }
56d6aa33 5343 spin_unlock(&hrrq->_lock);
1da177e4
LT
5344 }
5345
5346 if (!op_found)
5347 return SUCCESS;
5348
5349 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3e7ebdfa 5350 ipr_cmd->ioarcb.res_handle = res->res_handle;
1da177e4
LT
5351 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5352 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5353 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5354 ipr_cmd->u.sdev = scsi_cmd->device;
5355
fb3ed3cb
BK
5356 scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
5357 scsi_cmd->cmnd[0]);
1da177e4 5358 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
96d21f00 5359 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
5360
5361 /*
5362 * If the abort task timed out and we sent a bus reset, we will get
5363 * one the following responses to the abort
5364 */
5365 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
5366 ioasc = 0;
5367 ipr_trace;
5368 }
5369
c4ee22a3 5370 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
ee0a90fa 5371 if (!ipr_is_naca_model(res))
5372 res->needs_sync_complete = 1;
1da177e4
LT
5373
5374 LEAVE;
203fa3fe 5375 return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
1da177e4
LT
5376}
5377
5378/**
5379 * ipr_eh_abort - Abort a single op
5380 * @scsi_cmd: scsi command struct
5381 *
5382 * Return value:
f688f96d
BK
5383 * 0 if scan in progress / 1 if scan is complete
5384 **/
5385static int ipr_scan_finished(struct Scsi_Host *shost, unsigned long elapsed_time)
5386{
5387 unsigned long lock_flags;
5388 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
5389 int rc = 0;
5390
5391 spin_lock_irqsave(shost->host_lock, lock_flags);
5392 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done)
5393 rc = 1;
5394 if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2))
5395 rc = 1;
5396 spin_unlock_irqrestore(shost->host_lock, lock_flags);
5397 return rc;
5398}
5399
5400/**
5401 * ipr_eh_host_reset - Reset the host adapter
5402 * @scsi_cmd: scsi command struct
5403 *
5404 * Return value:
1da177e4
LT
5405 * SUCCESS / FAILED
5406 **/
203fa3fe 5407static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
1da177e4 5408{
8fa728a2
JG
5409 unsigned long flags;
5410 int rc;
6cdb0817 5411 struct ipr_ioa_cfg *ioa_cfg;
1da177e4
LT
5412
5413 ENTER;
1da177e4 5414
6cdb0817
BK
5415 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5416
8fa728a2
JG
5417 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
5418 rc = ipr_cancel_op(scsi_cmd);
5419 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
1da177e4 5420
6cdb0817
BK
5421 if (rc == SUCCESS)
5422 rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun);
1da177e4 5423 LEAVE;
8fa728a2 5424 return rc;
1da177e4
LT
5425}
5426
5427/**
5428 * ipr_handle_other_interrupt - Handle "other" interrupts
5429 * @ioa_cfg: ioa config struct
634651fa 5430 * @int_reg: interrupt register
1da177e4
LT
5431 *
5432 * Return value:
5433 * IRQ_NONE / IRQ_HANDLED
5434 **/
634651fa 5435static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
630ad831 5436 u32 int_reg)
1da177e4
LT
5437{
5438 irqreturn_t rc = IRQ_HANDLED;
7dacb64f 5439 u32 int_mask_reg;
56d6aa33 5440
7dacb64f
WB
5441 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5442 int_reg &= ~int_mask_reg;
5443
5444 /* If an interrupt on the adapter did not occur, ignore it.
5445 * Or in the case of SIS 64, check for a stage change interrupt.
5446 */
5447 if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5448 if (ioa_cfg->sis64) {
5449 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5450 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5451 if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5452
5453 /* clear stage change */
5454 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5455 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5456 list_del(&ioa_cfg->reset_cmd->queue);
5457 del_timer(&ioa_cfg->reset_cmd->timer);
5458 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5459 return IRQ_HANDLED;
5460 }
5461 }
5462
5463 return IRQ_NONE;
5464 }
1da177e4
LT
5465
5466 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5467 /* Mask the interrupt */
5468 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
1da177e4
LT
5469 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5470
5471 list_del(&ioa_cfg->reset_cmd->queue);
5472 del_timer(&ioa_cfg->reset_cmd->timer);
5473 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
7dacb64f 5474 } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
7dd21308
BK
5475 if (ioa_cfg->clear_isr) {
5476 if (ipr_debug && printk_ratelimit())
5477 dev_err(&ioa_cfg->pdev->dev,
5478 "Spurious interrupt detected. 0x%08X\n", int_reg);
5479 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5480 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5481 return IRQ_NONE;
5482 }
1da177e4
LT
5483 } else {
5484 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5485 ioa_cfg->ioa_unit_checked = 1;
05a6538a 5486 else if (int_reg & IPR_PCII_NO_HOST_RRQ)
5487 dev_err(&ioa_cfg->pdev->dev,
5488 "No Host RRQ. 0x%08X\n", int_reg);
1da177e4
LT
5489 else
5490 dev_err(&ioa_cfg->pdev->dev,
5491 "Permanent IOA failure. 0x%08X\n", int_reg);
5492
5493 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5494 ioa_cfg->sdt_state = GET_DUMP;
5495
5496 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5497 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5498 }
56d6aa33 5499
1da177e4
LT
5500 return rc;
5501}
5502
3feeb89d
WB
5503/**
5504 * ipr_isr_eh - Interrupt service routine error handler
5505 * @ioa_cfg: ioa config struct
5506 * @msg: message to log
5507 *
5508 * Return value:
5509 * none
5510 **/
05a6538a 5511static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
3feeb89d
WB
5512{
5513 ioa_cfg->errors_logged++;
05a6538a 5514 dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
3feeb89d
WB
5515
5516 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5517 ioa_cfg->sdt_state = GET_DUMP;
5518
5519 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5520}
5521
b53d124a 5522static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
05a6538a 5523 struct list_head *doneq)
5524{
5525 u32 ioasc;
5526 u16 cmd_index;
5527 struct ipr_cmnd *ipr_cmd;
5528 struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
5529 int num_hrrq = 0;
5530
5531 /* If interrupts are disabled, ignore the interrupt */
56d6aa33 5532 if (!hrr_queue->allow_interrupts)
05a6538a 5533 return 0;
5534
5535 while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5536 hrr_queue->toggle_bit) {
5537
5538 cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
5539 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
5540 IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5541
5542 if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
5543 cmd_index < hrr_queue->min_cmd_id)) {
5544 ipr_isr_eh(ioa_cfg,
5545 "Invalid response handle from IOA: ",
5546 cmd_index);
5547 break;
5548 }
5549
5550 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5551 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5552
5553 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5554
5555 list_move_tail(&ipr_cmd->queue, doneq);
5556
5557 if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
5558 hrr_queue->hrrq_curr++;
5559 } else {
5560 hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
5561 hrr_queue->toggle_bit ^= 1u;
5562 }
5563 num_hrrq++;
b53d124a 5564 if (budget > 0 && num_hrrq >= budget)
5565 break;
05a6538a 5566 }
b53d124a 5567
05a6538a 5568 return num_hrrq;
5569}
b53d124a 5570
5571static int ipr_iopoll(struct blk_iopoll *iop, int budget)
5572{
5573 struct ipr_ioa_cfg *ioa_cfg;
5574 struct ipr_hrr_queue *hrrq;
5575 struct ipr_cmnd *ipr_cmd, *temp;
5576 unsigned long hrrq_flags;
5577 int completed_ops;
5578 LIST_HEAD(doneq);
5579
5580 hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
5581 ioa_cfg = hrrq->ioa_cfg;
5582
5583 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5584 completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
5585
5586 if (completed_ops < budget)
5587 blk_iopoll_complete(iop);
5588 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5589
5590 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5591 list_del(&ipr_cmd->queue);
5592 del_timer(&ipr_cmd->timer);
5593 ipr_cmd->fast_done(ipr_cmd);
5594 }
5595
5596 return completed_ops;
5597}
5598
1da177e4
LT
5599/**
5600 * ipr_isr - Interrupt service routine
5601 * @irq: irq number
5602 * @devp: pointer to ioa config struct
1da177e4
LT
5603 *
5604 * Return value:
5605 * IRQ_NONE / IRQ_HANDLED
5606 **/
7d12e780 5607static irqreturn_t ipr_isr(int irq, void *devp)
1da177e4 5608{
05a6538a 5609 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5610 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
56d6aa33 5611 unsigned long hrrq_flags = 0;
7dacb64f 5612 u32 int_reg = 0;
3feeb89d 5613 int num_hrrq = 0;
7dacb64f 5614 int irq_none = 0;
172cd6e1 5615 struct ipr_cmnd *ipr_cmd, *temp;
1da177e4 5616 irqreturn_t rc = IRQ_NONE;
172cd6e1 5617 LIST_HEAD(doneq);
1da177e4 5618
56d6aa33 5619 spin_lock_irqsave(hrrq->lock, hrrq_flags);
1da177e4 5620 /* If interrupts are disabled, ignore the interrupt */
56d6aa33 5621 if (!hrrq->allow_interrupts) {
5622 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
1da177e4
LT
5623 return IRQ_NONE;
5624 }
5625
1da177e4 5626 while (1) {
b53d124a 5627 if (ipr_process_hrrq(hrrq, -1, &doneq)) {
5628 rc = IRQ_HANDLED;
1da177e4 5629
b53d124a 5630 if (!ioa_cfg->clear_isr)
5631 break;
7dd21308 5632
1da177e4 5633 /* Clear the PCI interrupt */
a5442ba4 5634 num_hrrq = 0;
3feeb89d 5635 do {
b53d124a 5636 writel(IPR_PCII_HRRQ_UPDATED,
5637 ioa_cfg->regs.clr_interrupt_reg32);
7dacb64f 5638 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
3feeb89d 5639 } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
b53d124a 5640 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
3feeb89d 5641
7dacb64f
WB
5642 } else if (rc == IRQ_NONE && irq_none == 0) {
5643 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5644 irq_none++;
a5442ba4
WB
5645 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5646 int_reg & IPR_PCII_HRRQ_UPDATED) {
b53d124a 5647 ipr_isr_eh(ioa_cfg,
5648 "Error clearing HRRQ: ", num_hrrq);
172cd6e1 5649 rc = IRQ_HANDLED;
b53d124a 5650 break;
1da177e4
LT
5651 } else
5652 break;
5653 }
5654
5655 if (unlikely(rc == IRQ_NONE))
634651fa 5656 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
1da177e4 5657
56d6aa33 5658 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
172cd6e1
BK
5659 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5660 list_del(&ipr_cmd->queue);
5661 del_timer(&ipr_cmd->timer);
5662 ipr_cmd->fast_done(ipr_cmd);
5663 }
05a6538a 5664 return rc;
5665}
5666
5667/**
5668 * ipr_isr_mhrrq - Interrupt service routine
5669 * @irq: irq number
5670 * @devp: pointer to ioa config struct
5671 *
5672 * Return value:
5673 * IRQ_NONE / IRQ_HANDLED
5674 **/
5675static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5676{
5677 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
b53d124a 5678 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
56d6aa33 5679 unsigned long hrrq_flags = 0;
05a6538a 5680 struct ipr_cmnd *ipr_cmd, *temp;
5681 irqreturn_t rc = IRQ_NONE;
5682 LIST_HEAD(doneq);
172cd6e1 5683
56d6aa33 5684 spin_lock_irqsave(hrrq->lock, hrrq_flags);
05a6538a 5685
5686 /* If interrupts are disabled, ignore the interrupt */
56d6aa33 5687 if (!hrrq->allow_interrupts) {
5688 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
05a6538a 5689 return IRQ_NONE;
5690 }
5691
89f8b33c 5692 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
b53d124a 5693 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5694 hrrq->toggle_bit) {
5695 if (!blk_iopoll_sched_prep(&hrrq->iopoll))
5696 blk_iopoll_sched(&hrrq->iopoll);
5697 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5698 return IRQ_HANDLED;
5699 }
5700 } else {
5701 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5702 hrrq->toggle_bit)
05a6538a 5703
b53d124a 5704 if (ipr_process_hrrq(hrrq, -1, &doneq))
5705 rc = IRQ_HANDLED;
5706 }
05a6538a 5707
56d6aa33 5708 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
05a6538a 5709
5710 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5711 list_del(&ipr_cmd->queue);
5712 del_timer(&ipr_cmd->timer);
5713 ipr_cmd->fast_done(ipr_cmd);
5714 }
1da177e4
LT
5715 return rc;
5716}
5717
a32c055f
WB
5718/**
5719 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5720 * @ioa_cfg: ioa config struct
5721 * @ipr_cmd: ipr command struct
5722 *
5723 * Return value:
5724 * 0 on success / -1 on failure
5725 **/
5726static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5727 struct ipr_cmnd *ipr_cmd)
5728{
5729 int i, nseg;
5730 struct scatterlist *sg;
5731 u32 length;
5732 u32 ioadl_flags = 0;
5733 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5734 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5735 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5736
5737 length = scsi_bufflen(scsi_cmd);
5738 if (!length)
5739 return 0;
5740
5741 nseg = scsi_dma_map(scsi_cmd);
5742 if (nseg < 0) {
51f52a47 5743 if (printk_ratelimit())
d73341bf 5744 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
a32c055f
WB
5745 return -1;
5746 }
5747
5748 ipr_cmd->dma_use_sg = nseg;
5749
438b0331 5750 ioarcb->data_transfer_length = cpu_to_be32(length);
b8803b1c
WB
5751 ioarcb->ioadl_len =
5752 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
438b0331 5753
a32c055f
WB
5754 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5755 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5756 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5757 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5758 ioadl_flags = IPR_IOADL_FLAGS_READ;
5759
5760 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5761 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5762 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5763 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5764 }
5765
5766 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5767 return 0;
5768}
5769
1da177e4
LT
5770/**
5771 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5772 * @ioa_cfg: ioa config struct
5773 * @ipr_cmd: ipr command struct
5774 *
5775 * Return value:
5776 * 0 on success / -1 on failure
5777 **/
5778static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5779 struct ipr_cmnd *ipr_cmd)
5780{
63015bc9
FT
5781 int i, nseg;
5782 struct scatterlist *sg;
1da177e4
LT
5783 u32 length;
5784 u32 ioadl_flags = 0;
5785 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5786 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
a32c055f 5787 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1da177e4 5788
63015bc9
FT
5789 length = scsi_bufflen(scsi_cmd);
5790 if (!length)
1da177e4
LT
5791 return 0;
5792
63015bc9
FT
5793 nseg = scsi_dma_map(scsi_cmd);
5794 if (nseg < 0) {
d73341bf 5795 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
63015bc9
FT
5796 return -1;
5797 }
51b1c7e1 5798
63015bc9
FT
5799 ipr_cmd->dma_use_sg = nseg;
5800
5801 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5802 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5803 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
a32c055f
WB
5804 ioarcb->data_transfer_length = cpu_to_be32(length);
5805 ioarcb->ioadl_len =
63015bc9
FT
5806 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5807 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5808 ioadl_flags = IPR_IOADL_FLAGS_READ;
5809 ioarcb->read_data_transfer_length = cpu_to_be32(length);
5810 ioarcb->read_ioadl_len =
5811 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5812 }
1da177e4 5813
a32c055f
WB
5814 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5815 ioadl = ioarcb->u.add_data.u.ioadl;
5816 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5817 offsetof(struct ipr_ioarcb, u.add_data));
63015bc9
FT
5818 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5819 }
1da177e4 5820
63015bc9
FT
5821 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5822 ioadl[i].flags_and_data_len =
5823 cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5824 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
1da177e4
LT
5825 }
5826
63015bc9
FT
5827 ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5828 return 0;
1da177e4
LT
5829}
5830
1da177e4
LT
5831/**
5832 * ipr_erp_done - Process completion of ERP for a device
5833 * @ipr_cmd: ipr command struct
5834 *
5835 * This function copies the sense buffer into the scsi_cmd
5836 * struct and pushes the scsi_done function.
5837 *
5838 * Return value:
5839 * nothing
5840 **/
5841static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5842{
5843 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5844 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
96d21f00 5845 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
5846
5847 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5848 scsi_cmd->result |= (DID_ERROR << 16);
fb3ed3cb
BK
5849 scmd_printk(KERN_ERR, scsi_cmd,
5850 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
1da177e4
LT
5851 } else {
5852 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
5853 SCSI_SENSE_BUFFERSIZE);
5854 }
5855
5856 if (res) {
ee0a90fa 5857 if (!ipr_is_naca_model(res))
5858 res->needs_sync_complete = 1;
1da177e4
LT
5859 res->in_erp = 0;
5860 }
63015bc9 5861 scsi_dma_unmap(ipr_cmd->scsi_cmd);
05a6538a 5862 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
5863 scsi_cmd->scsi_done(scsi_cmd);
5864}
5865
5866/**
5867 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
5868 * @ipr_cmd: ipr command struct
5869 *
5870 * Return value:
5871 * none
5872 **/
5873static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
5874{
51b1c7e1 5875 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
96d21f00 5876 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
a32c055f 5877 dma_addr_t dma_addr = ipr_cmd->dma_addr;
1da177e4
LT
5878
5879 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
a32c055f 5880 ioarcb->data_transfer_length = 0;
1da177e4 5881 ioarcb->read_data_transfer_length = 0;
a32c055f 5882 ioarcb->ioadl_len = 0;
1da177e4 5883 ioarcb->read_ioadl_len = 0;
96d21f00
WB
5884 ioasa->hdr.ioasc = 0;
5885 ioasa->hdr.residual_data_len = 0;
a32c055f
WB
5886
5887 if (ipr_cmd->ioa_cfg->sis64)
5888 ioarcb->u.sis64_addr_data.data_ioadl_addr =
5889 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
5890 else {
5891 ioarcb->write_ioadl_addr =
5892 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
5893 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5894 }
1da177e4
LT
5895}
5896
5897/**
5898 * ipr_erp_request_sense - Send request sense to a device
5899 * @ipr_cmd: ipr command struct
5900 *
5901 * This function sends a request sense to a device as a result
5902 * of a check condition.
5903 *
5904 * Return value:
5905 * nothing
5906 **/
5907static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
5908{
5909 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
96d21f00 5910 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
5911
5912 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5913 ipr_erp_done(ipr_cmd);
5914 return;
5915 }
5916
5917 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5918
5919 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
5920 cmd_pkt->cdb[0] = REQUEST_SENSE;
5921 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
5922 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
5923 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5924 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
5925
a32c055f
WB
5926 ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
5927 SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
5928
5929 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
5930 IPR_REQUEST_SENSE_TIMEOUT * 2);
5931}
5932
5933/**
5934 * ipr_erp_cancel_all - Send cancel all to a device
5935 * @ipr_cmd: ipr command struct
5936 *
5937 * This function sends a cancel all to a device to clear the
5938 * queue. If we are running TCQ on the device, QERR is set to 1,
5939 * which means all outstanding ops have been dropped on the floor.
5940 * Cancel all will return them to us.
5941 *
5942 * Return value:
5943 * nothing
5944 **/
5945static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
5946{
5947 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5948 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5949 struct ipr_cmd_pkt *cmd_pkt;
5950
5951 res->in_erp = 1;
5952
5953 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5954
17ea0126 5955 if (!scsi_cmd->device->simple_tags) {
1da177e4
LT
5956 ipr_erp_request_sense(ipr_cmd);
5957 return;
5958 }
5959
5960 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5961 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5962 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5963
5964 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
5965 IPR_CANCEL_ALL_TIMEOUT);
5966}
5967
5968/**
5969 * ipr_dump_ioasa - Dump contents of IOASA
5970 * @ioa_cfg: ioa config struct
5971 * @ipr_cmd: ipr command struct
fe964d0a 5972 * @res: resource entry struct
1da177e4
LT
5973 *
5974 * This function is invoked by the interrupt handler when ops
5975 * fail. It will log the IOASA if appropriate. Only called
5976 * for GPDD ops.
5977 *
5978 * Return value:
5979 * none
5980 **/
5981static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
fe964d0a 5982 struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
1da177e4
LT
5983{
5984 int i;
5985 u16 data_len;
b0692dd4 5986 u32 ioasc, fd_ioasc;
96d21f00 5987 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
1da177e4
LT
5988 __be32 *ioasa_data = (__be32 *)ioasa;
5989 int error_index;
5990
96d21f00
WB
5991 ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
5992 fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
1da177e4
LT
5993
5994 if (0 == ioasc)
5995 return;
5996
5997 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
5998 return;
5999
b0692dd4
BK
6000 if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
6001 error_index = ipr_get_error(fd_ioasc);
6002 else
6003 error_index = ipr_get_error(ioasc);
1da177e4
LT
6004
6005 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
6006 /* Don't log an error if the IOA already logged one */
96d21f00 6007 if (ioasa->hdr.ilid != 0)
1da177e4
LT
6008 return;
6009
cc9bd5d4
BK
6010 if (!ipr_is_gscsi(res))
6011 return;
6012
1da177e4
LT
6013 if (ipr_error_table[error_index].log_ioasa == 0)
6014 return;
6015 }
6016
fe964d0a 6017 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
1da177e4 6018
96d21f00
WB
6019 data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
6020 if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
6021 data_len = sizeof(struct ipr_ioasa64);
6022 else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
1da177e4 6023 data_len = sizeof(struct ipr_ioasa);
1da177e4
LT
6024
6025 ipr_err("IOASA Dump:\n");
6026
6027 for (i = 0; i < data_len / 4; i += 4) {
6028 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
6029 be32_to_cpu(ioasa_data[i]),
6030 be32_to_cpu(ioasa_data[i+1]),
6031 be32_to_cpu(ioasa_data[i+2]),
6032 be32_to_cpu(ioasa_data[i+3]));
6033 }
6034}
6035
6036/**
6037 * ipr_gen_sense - Generate SCSI sense data from an IOASA
6038 * @ioasa: IOASA
6039 * @sense_buf: sense data buffer
6040 *
6041 * Return value:
6042 * none
6043 **/
6044static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
6045{
6046 u32 failing_lba;
6047 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
6048 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
96d21f00
WB
6049 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6050 u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
1da177e4
LT
6051
6052 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
6053
6054 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
6055 return;
6056
6057 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
6058
6059 if (ipr_is_vset_device(res) &&
6060 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
6061 ioasa->u.vset.failing_lba_hi != 0) {
6062 sense_buf[0] = 0x72;
6063 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
6064 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
6065 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
6066
6067 sense_buf[7] = 12;
6068 sense_buf[8] = 0;
6069 sense_buf[9] = 0x0A;
6070 sense_buf[10] = 0x80;
6071
6072 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
6073
6074 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
6075 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
6076 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
6077 sense_buf[15] = failing_lba & 0x000000ff;
6078
6079 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6080
6081 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
6082 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
6083 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
6084 sense_buf[19] = failing_lba & 0x000000ff;
6085 } else {
6086 sense_buf[0] = 0x70;
6087 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
6088 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
6089 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
6090
6091 /* Illegal request */
6092 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
96d21f00 6093 (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
1da177e4
LT
6094 sense_buf[7] = 10; /* additional length */
6095
6096 /* IOARCB was in error */
6097 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
6098 sense_buf[15] = 0xC0;
6099 else /* Parameter data was invalid */
6100 sense_buf[15] = 0x80;
6101
6102 sense_buf[16] =
6103 ((IPR_FIELD_POINTER_MASK &
96d21f00 6104 be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
1da177e4
LT
6105 sense_buf[17] =
6106 (IPR_FIELD_POINTER_MASK &
96d21f00 6107 be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
1da177e4
LT
6108 } else {
6109 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
6110 if (ipr_is_vset_device(res))
6111 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6112 else
6113 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
6114
6115 sense_buf[0] |= 0x80; /* Or in the Valid bit */
6116 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
6117 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
6118 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
6119 sense_buf[6] = failing_lba & 0x000000ff;
6120 }
6121
6122 sense_buf[7] = 6; /* additional length */
6123 }
6124 }
6125}
6126
ee0a90fa 6127/**
6128 * ipr_get_autosense - Copy autosense data to sense buffer
6129 * @ipr_cmd: ipr command struct
6130 *
6131 * This function copies the autosense buffer to the buffer
6132 * in the scsi_cmd, if there is autosense available.
6133 *
6134 * Return value:
6135 * 1 if autosense was available / 0 if not
6136 **/
6137static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
6138{
96d21f00
WB
6139 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6140 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
ee0a90fa 6141
96d21f00 6142 if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
ee0a90fa 6143 return 0;
6144
96d21f00
WB
6145 if (ipr_cmd->ioa_cfg->sis64)
6146 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
6147 min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
6148 SCSI_SENSE_BUFFERSIZE));
6149 else
6150 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
6151 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
6152 SCSI_SENSE_BUFFERSIZE));
ee0a90fa 6153 return 1;
6154}
6155
1da177e4
LT
6156/**
6157 * ipr_erp_start - Process an error response for a SCSI op
6158 * @ioa_cfg: ioa config struct
6159 * @ipr_cmd: ipr command struct
6160 *
6161 * This function determines whether or not to initiate ERP
6162 * on the affected device.
6163 *
6164 * Return value:
6165 * nothing
6166 **/
6167static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
6168 struct ipr_cmnd *ipr_cmd)
6169{
6170 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6171 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
96d21f00 6172 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
8a048994 6173 u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
1da177e4
LT
6174
6175 if (!res) {
6176 ipr_scsi_eh_done(ipr_cmd);
6177 return;
6178 }
6179
8a048994 6180 if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
1da177e4
LT
6181 ipr_gen_sense(ipr_cmd);
6182
cc9bd5d4
BK
6183 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6184
8a048994 6185 switch (masked_ioasc) {
1da177e4 6186 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
ee0a90fa 6187 if (ipr_is_naca_model(res))
6188 scsi_cmd->result |= (DID_ABORT << 16);
6189 else
6190 scsi_cmd->result |= (DID_IMM_RETRY << 16);
1da177e4
LT
6191 break;
6192 case IPR_IOASC_IR_RESOURCE_HANDLE:
b0df54bb 6193 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
1da177e4
LT
6194 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6195 break;
6196 case IPR_IOASC_HW_SEL_TIMEOUT:
6197 scsi_cmd->result |= (DID_NO_CONNECT << 16);
ee0a90fa 6198 if (!ipr_is_naca_model(res))
6199 res->needs_sync_complete = 1;
1da177e4
LT
6200 break;
6201 case IPR_IOASC_SYNC_REQUIRED:
6202 if (!res->in_erp)
6203 res->needs_sync_complete = 1;
6204 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6205 break;
6206 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
b0df54bb 6207 case IPR_IOASA_IR_DUAL_IOA_DISABLED:
1da177e4
LT
6208 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
6209 break;
6210 case IPR_IOASC_BUS_WAS_RESET:
6211 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
6212 /*
6213 * Report the bus reset and ask for a retry. The device
6214 * will give CC/UA the next command.
6215 */
6216 if (!res->resetting_device)
6217 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
6218 scsi_cmd->result |= (DID_ERROR << 16);
ee0a90fa 6219 if (!ipr_is_naca_model(res))
6220 res->needs_sync_complete = 1;
1da177e4
LT
6221 break;
6222 case IPR_IOASC_HW_DEV_BUS_STATUS:
6223 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
6224 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
ee0a90fa 6225 if (!ipr_get_autosense(ipr_cmd)) {
6226 if (!ipr_is_naca_model(res)) {
6227 ipr_erp_cancel_all(ipr_cmd);
6228 return;
6229 }
6230 }
1da177e4 6231 }
ee0a90fa 6232 if (!ipr_is_naca_model(res))
6233 res->needs_sync_complete = 1;
1da177e4
LT
6234 break;
6235 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
6236 break;
f8ee25d7
WX
6237 case IPR_IOASC_IR_NON_OPTIMIZED:
6238 if (res->raw_mode) {
6239 res->raw_mode = 0;
6240 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6241 } else
6242 scsi_cmd->result |= (DID_ERROR << 16);
6243 break;
1da177e4 6244 default:
5b7304fb
BK
6245 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6246 scsi_cmd->result |= (DID_ERROR << 16);
ee0a90fa 6247 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
1da177e4
LT
6248 res->needs_sync_complete = 1;
6249 break;
6250 }
6251
63015bc9 6252 scsi_dma_unmap(ipr_cmd->scsi_cmd);
05a6538a 6253 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
6254 scsi_cmd->scsi_done(scsi_cmd);
6255}
6256
6257/**
6258 * ipr_scsi_done - mid-layer done function
6259 * @ipr_cmd: ipr command struct
6260 *
6261 * This function is invoked by the interrupt handler for
6262 * ops generated by the SCSI mid-layer
6263 *
6264 * Return value:
6265 * none
6266 **/
6267static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
6268{
6269 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6270 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
96d21f00 6271 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
36b8e180 6272 unsigned long lock_flags;
1da177e4 6273
96d21f00 6274 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
1da177e4
LT
6275
6276 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
172cd6e1
BK
6277 scsi_dma_unmap(scsi_cmd);
6278
36b8e180 6279 spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
05a6538a 6280 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4 6281 scsi_cmd->scsi_done(scsi_cmd);
36b8e180 6282 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags);
172cd6e1 6283 } else {
36b8e180
BK
6284 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6285 spin_lock(&ipr_cmd->hrrq->_lock);
1da177e4 6286 ipr_erp_start(ioa_cfg, ipr_cmd);
36b8e180
BK
6287 spin_unlock(&ipr_cmd->hrrq->_lock);
6288 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
172cd6e1 6289 }
1da177e4
LT
6290}
6291
1da177e4
LT
6292/**
6293 * ipr_queuecommand - Queue a mid-layer request
00bfef2c 6294 * @shost: scsi host struct
1da177e4 6295 * @scsi_cmd: scsi command struct
1da177e4
LT
6296 *
6297 * This function queues a request generated by the mid-layer.
6298 *
6299 * Return value:
6300 * 0 on success
6301 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6302 * SCSI_MLQUEUE_HOST_BUSY if host is busy
6303 **/
00bfef2c
BK
6304static int ipr_queuecommand(struct Scsi_Host *shost,
6305 struct scsi_cmnd *scsi_cmd)
1da177e4
LT
6306{
6307 struct ipr_ioa_cfg *ioa_cfg;
6308 struct ipr_resource_entry *res;
6309 struct ipr_ioarcb *ioarcb;
6310 struct ipr_cmnd *ipr_cmd;
56d6aa33 6311 unsigned long hrrq_flags, lock_flags;
d12f1576 6312 int rc;
05a6538a 6313 struct ipr_hrr_queue *hrrq;
6314 int hrrq_id;
1da177e4 6315
00bfef2c
BK
6316 ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
6317
1da177e4 6318 scsi_cmd->result = (DID_OK << 16);
00bfef2c 6319 res = scsi_cmd->device->hostdata;
56d6aa33 6320
6321 if (ipr_is_gata(res) && res->sata_port) {
6322 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6323 rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
6324 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6325 return rc;
6326 }
6327
05a6538a 6328 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6329 hrrq = &ioa_cfg->hrrq[hrrq_id];
1da177e4 6330
56d6aa33 6331 spin_lock_irqsave(hrrq->lock, hrrq_flags);
1da177e4
LT
6332 /*
6333 * We are currently blocking all devices due to a host reset
6334 * We have told the host to stop giving us new requests, but
6335 * ERP ops don't count. FIXME
6336 */
bfae7820 6337 if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
56d6aa33 6338 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
1da177e4 6339 return SCSI_MLQUEUE_HOST_BUSY;
00bfef2c 6340 }
1da177e4
LT
6341
6342 /*
6343 * FIXME - Create scsi_set_host_offline interface
6344 * and the ioa_is_dead check can be removed
6345 */
bfae7820 6346 if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
56d6aa33 6347 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
00bfef2c 6348 goto err_nodev;
1da177e4
LT
6349 }
6350
05a6538a 6351 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6352 if (ipr_cmd == NULL) {
56d6aa33 6353 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
05a6538a 6354 return SCSI_MLQUEUE_HOST_BUSY;
6355 }
56d6aa33 6356 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
35a39691 6357
172cd6e1 6358 ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
1da177e4 6359 ioarcb = &ipr_cmd->ioarcb;
1da177e4
LT
6360
6361 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
6362 ipr_cmd->scsi_cmd = scsi_cmd;
172cd6e1 6363 ipr_cmd->done = ipr_scsi_eh_done;
1da177e4
LT
6364
6365 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
6366 if (scsi_cmd->underflow == 0)
6367 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6368
1da177e4 6369 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
0b1f8d44
WX
6370 if (ipr_is_gscsi(res) && res->reset_occurred) {
6371 res->reset_occurred = 0;
ab6c10b1 6372 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
0b1f8d44 6373 }
1da177e4 6374 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
50668633
CH
6375 if (scsi_cmd->flags & SCMD_TAGGED)
6376 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_SIMPLE_TASK;
6377 else
6378 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_UNTAGGED_TASK;
1da177e4
LT
6379 }
6380
6381 if (scsi_cmd->cmnd[0] >= 0xC0 &&
05a6538a 6382 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
1da177e4 6383 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
05a6538a 6384 }
f8ee25d7
WX
6385 if (res->raw_mode && ipr_is_af_dasd_device(res))
6386 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_PIPE;
1da177e4 6387
d12f1576
DC
6388 if (ioa_cfg->sis64)
6389 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
6390 else
6391 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
1da177e4 6392
56d6aa33 6393 spin_lock_irqsave(hrrq->lock, hrrq_flags);
6394 if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
05a6538a 6395 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
56d6aa33 6396 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
00bfef2c
BK
6397 if (!rc)
6398 scsi_dma_unmap(scsi_cmd);
a5fb407e 6399 return SCSI_MLQUEUE_HOST_BUSY;
1da177e4
LT
6400 }
6401
56d6aa33 6402 if (unlikely(hrrq->ioa_is_dead)) {
05a6538a 6403 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
56d6aa33 6404 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
00bfef2c
BK
6405 scsi_dma_unmap(scsi_cmd);
6406 goto err_nodev;
6407 }
6408
6409 ioarcb->res_handle = res->res_handle;
6410 if (res->needs_sync_complete) {
6411 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
6412 res->needs_sync_complete = 0;
6413 }
05a6538a 6414 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
00bfef2c 6415 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
a5fb407e 6416 ipr_send_command(ipr_cmd);
56d6aa33 6417 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
1da177e4 6418 return 0;
1da177e4 6419
00bfef2c 6420err_nodev:
56d6aa33 6421 spin_lock_irqsave(hrrq->lock, hrrq_flags);
00bfef2c
BK
6422 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
6423 scsi_cmd->result = (DID_NO_CONNECT << 16);
6424 scsi_cmd->scsi_done(scsi_cmd);
56d6aa33 6425 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
00bfef2c
BK
6426 return 0;
6427}
f281233d 6428
35a39691
BK
6429/**
6430 * ipr_ioctl - IOCTL handler
6431 * @sdev: scsi device struct
6432 * @cmd: IOCTL cmd
6433 * @arg: IOCTL arg
6434 *
6435 * Return value:
6436 * 0 on success / other on failure
6437 **/
bd705f2d 6438static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
35a39691
BK
6439{
6440 struct ipr_resource_entry *res;
6441
6442 res = (struct ipr_resource_entry *)sdev->hostdata;
0ce3a7e5
BK
6443 if (res && ipr_is_gata(res)) {
6444 if (cmd == HDIO_GET_IDENTITY)
6445 return -ENOTTY;
94be9a58 6446 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
0ce3a7e5 6447 }
35a39691
BK
6448
6449 return -EINVAL;
6450}
6451
1da177e4
LT
6452/**
6453 * ipr_info - Get information about the card/driver
6454 * @scsi_host: scsi host struct
6455 *
6456 * Return value:
6457 * pointer to buffer with description string
6458 **/
203fa3fe 6459static const char *ipr_ioa_info(struct Scsi_Host *host)
1da177e4
LT
6460{
6461 static char buffer[512];
6462 struct ipr_ioa_cfg *ioa_cfg;
6463 unsigned long lock_flags = 0;
6464
6465 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
6466
6467 spin_lock_irqsave(host->host_lock, lock_flags);
6468 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
6469 spin_unlock_irqrestore(host->host_lock, lock_flags);
6470
6471 return buffer;
6472}
6473
6474static struct scsi_host_template driver_template = {
6475 .module = THIS_MODULE,
6476 .name = "IPR",
6477 .info = ipr_ioa_info,
35a39691 6478 .ioctl = ipr_ioctl,
1da177e4
LT
6479 .queuecommand = ipr_queuecommand,
6480 .eh_abort_handler = ipr_eh_abort,
6481 .eh_device_reset_handler = ipr_eh_dev_reset,
6482 .eh_host_reset_handler = ipr_eh_host_reset,
6483 .slave_alloc = ipr_slave_alloc,
6484 .slave_configure = ipr_slave_configure,
6485 .slave_destroy = ipr_slave_destroy,
f688f96d 6486 .scan_finished = ipr_scan_finished,
35a39691
BK
6487 .target_alloc = ipr_target_alloc,
6488 .target_destroy = ipr_target_destroy,
1da177e4 6489 .change_queue_depth = ipr_change_queue_depth,
1da177e4
LT
6490 .bios_param = ipr_biosparam,
6491 .can_queue = IPR_MAX_COMMANDS,
6492 .this_id = -1,
6493 .sg_tablesize = IPR_MAX_SGLIST,
6494 .max_sectors = IPR_IOA_MAX_SECTORS,
6495 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
6496 .use_clustering = ENABLE_CLUSTERING,
6497 .shost_attrs = ipr_ioa_attrs,
6498 .sdev_attrs = ipr_dev_attrs,
54b2b50c 6499 .proc_name = IPR_NAME,
2ecb204d 6500 .use_blk_tags = 1,
1da177e4
LT
6501};
6502
35a39691
BK
6503/**
6504 * ipr_ata_phy_reset - libata phy_reset handler
6505 * @ap: ata port to reset
6506 *
6507 **/
6508static void ipr_ata_phy_reset(struct ata_port *ap)
6509{
6510 unsigned long flags;
6511 struct ipr_sata_port *sata_port = ap->private_data;
6512 struct ipr_resource_entry *res = sata_port->res;
6513 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6514 int rc;
6515
6516 ENTER;
6517 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
203fa3fe 6518 while (ioa_cfg->in_reset_reload) {
35a39691
BK
6519 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6520 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6521 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6522 }
6523
56d6aa33 6524 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
35a39691
BK
6525 goto out_unlock;
6526
6527 rc = ipr_device_reset(ioa_cfg, res);
6528
6529 if (rc) {
3e4ec344 6530 ap->link.device[0].class = ATA_DEV_NONE;
35a39691
BK
6531 goto out_unlock;
6532 }
6533
3e7ebdfa
WB
6534 ap->link.device[0].class = res->ata_class;
6535 if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
3e4ec344 6536 ap->link.device[0].class = ATA_DEV_NONE;
35a39691
BK
6537
6538out_unlock:
6539 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6540 LEAVE;
6541}
6542
6543/**
6544 * ipr_ata_post_internal - Cleanup after an internal command
6545 * @qc: ATA queued command
6546 *
6547 * Return value:
6548 * none
6549 **/
6550static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6551{
6552 struct ipr_sata_port *sata_port = qc->ap->private_data;
6553 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6554 struct ipr_cmnd *ipr_cmd;
05a6538a 6555 struct ipr_hrr_queue *hrrq;
35a39691
BK
6556 unsigned long flags;
6557
6558 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
203fa3fe 6559 while (ioa_cfg->in_reset_reload) {
73d98ff0
BK
6560 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6561 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6562 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6563 }
6564
05a6538a 6565 for_each_hrrq(hrrq, ioa_cfg) {
56d6aa33 6566 spin_lock(&hrrq->_lock);
05a6538a 6567 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
6568 if (ipr_cmd->qc == qc) {
6569 ipr_device_reset(ioa_cfg, sata_port->res);
6570 break;
6571 }
35a39691 6572 }
56d6aa33 6573 spin_unlock(&hrrq->_lock);
35a39691
BK
6574 }
6575 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6576}
6577
35a39691
BK
6578/**
6579 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6580 * @regs: destination
6581 * @tf: source ATA taskfile
6582 *
6583 * Return value:
6584 * none
6585 **/
6586static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
6587 struct ata_taskfile *tf)
6588{
6589 regs->feature = tf->feature;
6590 regs->nsect = tf->nsect;
6591 regs->lbal = tf->lbal;
6592 regs->lbam = tf->lbam;
6593 regs->lbah = tf->lbah;
6594 regs->device = tf->device;
6595 regs->command = tf->command;
6596 regs->hob_feature = tf->hob_feature;
6597 regs->hob_nsect = tf->hob_nsect;
6598 regs->hob_lbal = tf->hob_lbal;
6599 regs->hob_lbam = tf->hob_lbam;
6600 regs->hob_lbah = tf->hob_lbah;
6601 regs->ctl = tf->ctl;
6602}
6603
6604/**
6605 * ipr_sata_done - done function for SATA commands
6606 * @ipr_cmd: ipr command struct
6607 *
6608 * This function is invoked by the interrupt handler for
6609 * ops generated by the SCSI mid-layer to SATA devices
6610 *
6611 * Return value:
6612 * none
6613 **/
6614static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6615{
6616 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6617 struct ata_queued_cmd *qc = ipr_cmd->qc;
6618 struct ipr_sata_port *sata_port = qc->ap->private_data;
6619 struct ipr_resource_entry *res = sata_port->res;
96d21f00 6620 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
35a39691 6621
56d6aa33 6622 spin_lock(&ipr_cmd->hrrq->_lock);
96d21f00
WB
6623 if (ipr_cmd->ioa_cfg->sis64)
6624 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6625 sizeof(struct ipr_ioasa_gata));
6626 else
6627 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
6628 sizeof(struct ipr_ioasa_gata));
35a39691
BK
6629 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6630
96d21f00 6631 if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
3e7ebdfa 6632 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
35a39691
BK
6633
6634 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
96d21f00 6635 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
35a39691 6636 else
96d21f00 6637 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
05a6538a 6638 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
56d6aa33 6639 spin_unlock(&ipr_cmd->hrrq->_lock);
35a39691
BK
6640 ata_qc_complete(qc);
6641}
6642
a32c055f
WB
6643/**
6644 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6645 * @ipr_cmd: ipr command struct
6646 * @qc: ATA queued command
6647 *
6648 **/
6649static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6650 struct ata_queued_cmd *qc)
6651{
6652 u32 ioadl_flags = 0;
6653 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
1ac7c26d 6654 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64;
a32c055f
WB
6655 struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6656 int len = qc->nbytes;
6657 struct scatterlist *sg;
6658 unsigned int si;
6659 dma_addr_t dma_addr = ipr_cmd->dma_addr;
6660
6661 if (len == 0)
6662 return;
6663
6664 if (qc->dma_dir == DMA_TO_DEVICE) {
6665 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6666 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6667 } else if (qc->dma_dir == DMA_FROM_DEVICE)
6668 ioadl_flags = IPR_IOADL_FLAGS_READ;
6669
6670 ioarcb->data_transfer_length = cpu_to_be32(len);
6671 ioarcb->ioadl_len =
6672 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6673 ioarcb->u.sis64_addr_data.data_ioadl_addr =
1ac7c26d 6674 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64));
a32c055f
WB
6675
6676 for_each_sg(qc->sg, sg, qc->n_elem, si) {
6677 ioadl64->flags = cpu_to_be32(ioadl_flags);
6678 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
6679 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
6680
6681 last_ioadl64 = ioadl64;
6682 ioadl64++;
6683 }
6684
6685 if (likely(last_ioadl64))
6686 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6687}
6688
35a39691
BK
6689/**
6690 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6691 * @ipr_cmd: ipr command struct
6692 * @qc: ATA queued command
6693 *
6694 **/
6695static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6696 struct ata_queued_cmd *qc)
6697{
6698 u32 ioadl_flags = 0;
6699 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
a32c055f 6700 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3be6cbd7 6701 struct ipr_ioadl_desc *last_ioadl = NULL;
dde20207 6702 int len = qc->nbytes;
35a39691 6703 struct scatterlist *sg;
ff2aeb1e 6704 unsigned int si;
35a39691
BK
6705
6706 if (len == 0)
6707 return;
6708
6709 if (qc->dma_dir == DMA_TO_DEVICE) {
6710 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6711 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
a32c055f
WB
6712 ioarcb->data_transfer_length = cpu_to_be32(len);
6713 ioarcb->ioadl_len =
35a39691
BK
6714 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6715 } else if (qc->dma_dir == DMA_FROM_DEVICE) {
6716 ioadl_flags = IPR_IOADL_FLAGS_READ;
6717 ioarcb->read_data_transfer_length = cpu_to_be32(len);
6718 ioarcb->read_ioadl_len =
6719 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6720 }
6721
ff2aeb1e 6722 for_each_sg(qc->sg, sg, qc->n_elem, si) {
35a39691
BK
6723 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6724 ioadl->address = cpu_to_be32(sg_dma_address(sg));
3be6cbd7
JG
6725
6726 last_ioadl = ioadl;
6727 ioadl++;
35a39691 6728 }
3be6cbd7
JG
6729
6730 if (likely(last_ioadl))
6731 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
35a39691
BK
6732}
6733
56d6aa33 6734/**
6735 * ipr_qc_defer - Get a free ipr_cmd
6736 * @qc: queued command
6737 *
6738 * Return value:
6739 * 0 if success
6740 **/
6741static int ipr_qc_defer(struct ata_queued_cmd *qc)
6742{
6743 struct ata_port *ap = qc->ap;
6744 struct ipr_sata_port *sata_port = ap->private_data;
6745 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6746 struct ipr_cmnd *ipr_cmd;
6747 struct ipr_hrr_queue *hrrq;
6748 int hrrq_id;
6749
6750 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6751 hrrq = &ioa_cfg->hrrq[hrrq_id];
6752
6753 qc->lldd_task = NULL;
6754 spin_lock(&hrrq->_lock);
6755 if (unlikely(hrrq->ioa_is_dead)) {
6756 spin_unlock(&hrrq->_lock);
6757 return 0;
6758 }
6759
6760 if (unlikely(!hrrq->allow_cmds)) {
6761 spin_unlock(&hrrq->_lock);
6762 return ATA_DEFER_LINK;
6763 }
6764
6765 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6766 if (ipr_cmd == NULL) {
6767 spin_unlock(&hrrq->_lock);
6768 return ATA_DEFER_LINK;
6769 }
6770
6771 qc->lldd_task = ipr_cmd;
6772 spin_unlock(&hrrq->_lock);
6773 return 0;
6774}
6775
35a39691
BK
6776/**
6777 * ipr_qc_issue - Issue a SATA qc to a device
6778 * @qc: queued command
6779 *
6780 * Return value:
6781 * 0 if success
6782 **/
6783static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
6784{
6785 struct ata_port *ap = qc->ap;
6786 struct ipr_sata_port *sata_port = ap->private_data;
6787 struct ipr_resource_entry *res = sata_port->res;
6788 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6789 struct ipr_cmnd *ipr_cmd;
6790 struct ipr_ioarcb *ioarcb;
6791 struct ipr_ioarcb_ata_regs *regs;
6792
56d6aa33 6793 if (qc->lldd_task == NULL)
6794 ipr_qc_defer(qc);
6795
6796 ipr_cmd = qc->lldd_task;
6797 if (ipr_cmd == NULL)
0feeed82 6798 return AC_ERR_SYSTEM;
35a39691 6799
56d6aa33 6800 qc->lldd_task = NULL;
6801 spin_lock(&ipr_cmd->hrrq->_lock);
6802 if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
6803 ipr_cmd->hrrq->ioa_is_dead)) {
6804 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6805 spin_unlock(&ipr_cmd->hrrq->_lock);
6806 return AC_ERR_SYSTEM;
6807 }
6808
05a6538a 6809 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
35a39691 6810 ioarcb = &ipr_cmd->ioarcb;
35a39691 6811
a32c055f
WB
6812 if (ioa_cfg->sis64) {
6813 regs = &ipr_cmd->i.ata_ioadl.regs;
6814 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
6815 } else
6816 regs = &ioarcb->u.add_data.u.regs;
6817
6818 memset(regs, 0, sizeof(*regs));
6819 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
35a39691 6820
56d6aa33 6821 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
35a39691
BK
6822 ipr_cmd->qc = qc;
6823 ipr_cmd->done = ipr_sata_done;
3e7ebdfa 6824 ipr_cmd->ioarcb.res_handle = res->res_handle;
35a39691
BK
6825 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
6826 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6827 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
dde20207 6828 ipr_cmd->dma_use_sg = qc->n_elem;
35a39691 6829
a32c055f
WB
6830 if (ioa_cfg->sis64)
6831 ipr_build_ata_ioadl64(ipr_cmd, qc);
6832 else
6833 ipr_build_ata_ioadl(ipr_cmd, qc);
6834
35a39691
BK
6835 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
6836 ipr_copy_sata_tf(regs, &qc->tf);
6837 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
3e7ebdfa 6838 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
35a39691
BK
6839
6840 switch (qc->tf.protocol) {
6841 case ATA_PROT_NODATA:
6842 case ATA_PROT_PIO:
6843 break;
6844
6845 case ATA_PROT_DMA:
6846 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6847 break;
6848
0dc36888
TH
6849 case ATAPI_PROT_PIO:
6850 case ATAPI_PROT_NODATA:
35a39691
BK
6851 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6852 break;
6853
0dc36888 6854 case ATAPI_PROT_DMA:
35a39691
BK
6855 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6856 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6857 break;
6858
6859 default:
6860 WARN_ON(1);
56d6aa33 6861 spin_unlock(&ipr_cmd->hrrq->_lock);
0feeed82 6862 return AC_ERR_INVALID;
35a39691
BK
6863 }
6864
a32c055f 6865 ipr_send_command(ipr_cmd);
56d6aa33 6866 spin_unlock(&ipr_cmd->hrrq->_lock);
a32c055f 6867
35a39691
BK
6868 return 0;
6869}
6870
4c9bf4e7
TH
6871/**
6872 * ipr_qc_fill_rtf - Read result TF
6873 * @qc: ATA queued command
6874 *
6875 * Return value:
6876 * true
6877 **/
6878static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
6879{
6880 struct ipr_sata_port *sata_port = qc->ap->private_data;
6881 struct ipr_ioasa_gata *g = &sata_port->ioasa;
6882 struct ata_taskfile *tf = &qc->result_tf;
6883
6884 tf->feature = g->error;
6885 tf->nsect = g->nsect;
6886 tf->lbal = g->lbal;
6887 tf->lbam = g->lbam;
6888 tf->lbah = g->lbah;
6889 tf->device = g->device;
6890 tf->command = g->status;
6891 tf->hob_nsect = g->hob_nsect;
6892 tf->hob_lbal = g->hob_lbal;
6893 tf->hob_lbam = g->hob_lbam;
6894 tf->hob_lbah = g->hob_lbah;
4c9bf4e7
TH
6895
6896 return true;
6897}
6898
35a39691 6899static struct ata_port_operations ipr_sata_ops = {
35a39691 6900 .phy_reset = ipr_ata_phy_reset,
a1efdaba 6901 .hardreset = ipr_sata_reset,
35a39691 6902 .post_internal_cmd = ipr_ata_post_internal,
35a39691 6903 .qc_prep = ata_noop_qc_prep,
56d6aa33 6904 .qc_defer = ipr_qc_defer,
35a39691 6905 .qc_issue = ipr_qc_issue,
4c9bf4e7 6906 .qc_fill_rtf = ipr_qc_fill_rtf,
35a39691
BK
6907 .port_start = ata_sas_port_start,
6908 .port_stop = ata_sas_port_stop
6909};
6910
6911static struct ata_port_info sata_port_info = {
5067c046
SL
6912 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
6913 ATA_FLAG_SAS_HOST,
0f2e0330
SS
6914 .pio_mask = ATA_PIO4_ONLY,
6915 .mwdma_mask = ATA_MWDMA2,
6916 .udma_mask = ATA_UDMA6,
35a39691
BK
6917 .port_ops = &ipr_sata_ops
6918};
6919
1da177e4
LT
6920#ifdef CONFIG_PPC_PSERIES
6921static const u16 ipr_blocked_processors[] = {
d3dbeef6
ME
6922 PVR_NORTHSTAR,
6923 PVR_PULSAR,
6924 PVR_POWER4,
6925 PVR_ICESTAR,
6926 PVR_SSTAR,
6927 PVR_POWER4p,
6928 PVR_630,
6929 PVR_630p
1da177e4
LT
6930};
6931
6932/**
6933 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
6934 * @ioa_cfg: ioa cfg struct
6935 *
6936 * Adapters that use Gemstone revision < 3.1 do not work reliably on
6937 * certain pSeries hardware. This function determines if the given
6938 * adapter is in one of these confgurations or not.
6939 *
6940 * Return value:
6941 * 1 if adapter is not supported / 0 if adapter is supported
6942 **/
6943static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
6944{
1da177e4
LT
6945 int i;
6946
44c10138 6947 if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
203fa3fe 6948 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
d3dbeef6 6949 if (pvr_version_is(ipr_blocked_processors[i]))
44c10138 6950 return 1;
1da177e4
LT
6951 }
6952 }
6953 return 0;
6954}
6955#else
6956#define ipr_invalid_adapter(ioa_cfg) 0
6957#endif
6958
6959/**
6960 * ipr_ioa_bringdown_done - IOA bring down completion.
6961 * @ipr_cmd: ipr command struct
6962 *
6963 * This function processes the completion of an adapter bring down.
6964 * It wakes any reset sleepers.
6965 *
6966 * Return value:
6967 * IPR_RC_JOB_RETURN
6968 **/
6969static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
6970{
6971 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
96b04db9 6972 int i;
1da177e4
LT
6973
6974 ENTER;
bfae7820
BK
6975 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
6976 ipr_trace;
6977 spin_unlock_irq(ioa_cfg->host->host_lock);
6978 scsi_unblock_requests(ioa_cfg->host);
6979 spin_lock_irq(ioa_cfg->host->host_lock);
6980 }
6981
1da177e4
LT
6982 ioa_cfg->in_reset_reload = 0;
6983 ioa_cfg->reset_retries = 0;
96b04db9 6984 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
6985 spin_lock(&ioa_cfg->hrrq[i]._lock);
6986 ioa_cfg->hrrq[i].ioa_is_dead = 1;
6987 spin_unlock(&ioa_cfg->hrrq[i]._lock);
6988 }
6989 wmb();
6990
05a6538a 6991 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4 6992 wake_up_all(&ioa_cfg->reset_wait_q);
1da177e4
LT
6993 LEAVE;
6994
6995 return IPR_RC_JOB_RETURN;
6996}
6997
6998/**
6999 * ipr_ioa_reset_done - IOA reset completion.
7000 * @ipr_cmd: ipr command struct
7001 *
7002 * This function processes the completion of an adapter reset.
7003 * It schedules any necessary mid-layer add/removes and
7004 * wakes any reset sleepers.
7005 *
7006 * Return value:
7007 * IPR_RC_JOB_RETURN
7008 **/
7009static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
7010{
7011 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7012 struct ipr_resource_entry *res;
7013 struct ipr_hostrcb *hostrcb, *temp;
56d6aa33 7014 int i = 0, j;
1da177e4
LT
7015
7016 ENTER;
7017 ioa_cfg->in_reset_reload = 0;
56d6aa33 7018 for (j = 0; j < ioa_cfg->hrrq_num; j++) {
7019 spin_lock(&ioa_cfg->hrrq[j]._lock);
7020 ioa_cfg->hrrq[j].allow_cmds = 1;
7021 spin_unlock(&ioa_cfg->hrrq[j]._lock);
7022 }
7023 wmb();
1da177e4 7024 ioa_cfg->reset_cmd = NULL;
3d1d0da6 7025 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
1da177e4
LT
7026
7027 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
f688f96d 7028 if (res->add_to_ml || res->del_from_ml) {
1da177e4
LT
7029 ipr_trace;
7030 break;
7031 }
7032 }
7033 schedule_work(&ioa_cfg->work_q);
7034
7035 list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
7036 list_del(&hostrcb->queue);
7037 if (i++ < IPR_NUM_LOG_HCAMS)
7038 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
7039 else
7040 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
7041 }
7042
6bb04170 7043 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
1da177e4
LT
7044 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
7045
7046 ioa_cfg->reset_retries = 0;
05a6538a 7047 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
7048 wake_up_all(&ioa_cfg->reset_wait_q);
7049
30237853 7050 spin_unlock(ioa_cfg->host->host_lock);
1da177e4 7051 scsi_unblock_requests(ioa_cfg->host);
30237853 7052 spin_lock(ioa_cfg->host->host_lock);
1da177e4 7053
56d6aa33 7054 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
1da177e4
LT
7055 scsi_block_requests(ioa_cfg->host);
7056
f688f96d 7057 schedule_work(&ioa_cfg->work_q);
1da177e4
LT
7058 LEAVE;
7059 return IPR_RC_JOB_RETURN;
7060}
7061
7062/**
7063 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
7064 * @supported_dev: supported device struct
7065 * @vpids: vendor product id struct
7066 *
7067 * Return value:
7068 * none
7069 **/
7070static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
7071 struct ipr_std_inq_vpids *vpids)
7072{
7073 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
7074 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
7075 supported_dev->num_records = 1;
7076 supported_dev->data_length =
7077 cpu_to_be16(sizeof(struct ipr_supported_device));
7078 supported_dev->reserved = 0;
7079}
7080
7081/**
7082 * ipr_set_supported_devs - Send Set Supported Devices for a device
7083 * @ipr_cmd: ipr command struct
7084 *
a32c055f 7085 * This function sends a Set Supported Devices to the adapter
1da177e4
LT
7086 *
7087 * Return value:
7088 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7089 **/
7090static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
7091{
7092 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7093 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
1da177e4
LT
7094 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7095 struct ipr_resource_entry *res = ipr_cmd->u.res;
7096
7097 ipr_cmd->job_step = ipr_ioa_reset_done;
7098
7099 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
e4fbf44e 7100 if (!ipr_is_scsi_disk(res))
1da177e4
LT
7101 continue;
7102
7103 ipr_cmd->u.res = res;
3e7ebdfa 7104 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
1da177e4
LT
7105
7106 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7107 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7108 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7109
7110 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
3e7ebdfa 7111 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
1da177e4
LT
7112 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
7113 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
7114
a32c055f
WB
7115 ipr_init_ioadl(ipr_cmd,
7116 ioa_cfg->vpd_cbs_dma +
7117 offsetof(struct ipr_misc_cbs, supp_dev),
7118 sizeof(struct ipr_supported_device),
7119 IPR_IOADL_FLAGS_WRITE_LAST);
1da177e4
LT
7120
7121 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7122 IPR_SET_SUP_DEVICE_TIMEOUT);
7123
3e7ebdfa
WB
7124 if (!ioa_cfg->sis64)
7125 ipr_cmd->job_step = ipr_set_supported_devs;
05a6538a 7126 LEAVE;
1da177e4
LT
7127 return IPR_RC_JOB_RETURN;
7128 }
7129
05a6538a 7130 LEAVE;
1da177e4
LT
7131 return IPR_RC_JOB_CONTINUE;
7132}
7133
7134/**
7135 * ipr_get_mode_page - Locate specified mode page
7136 * @mode_pages: mode page buffer
7137 * @page_code: page code to find
7138 * @len: minimum required length for mode page
7139 *
7140 * Return value:
7141 * pointer to mode page / NULL on failure
7142 **/
7143static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
7144 u32 page_code, u32 len)
7145{
7146 struct ipr_mode_page_hdr *mode_hdr;
7147 u32 page_length;
7148 u32 length;
7149
7150 if (!mode_pages || (mode_pages->hdr.length == 0))
7151 return NULL;
7152
7153 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
7154 mode_hdr = (struct ipr_mode_page_hdr *)
7155 (mode_pages->data + mode_pages->hdr.block_desc_len);
7156
7157 while (length) {
7158 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
7159 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
7160 return mode_hdr;
7161 break;
7162 } else {
7163 page_length = (sizeof(struct ipr_mode_page_hdr) +
7164 mode_hdr->page_length);
7165 length -= page_length;
7166 mode_hdr = (struct ipr_mode_page_hdr *)
7167 ((unsigned long)mode_hdr + page_length);
7168 }
7169 }
7170 return NULL;
7171}
7172
7173/**
7174 * ipr_check_term_power - Check for term power errors
7175 * @ioa_cfg: ioa config struct
7176 * @mode_pages: IOAFP mode pages buffer
7177 *
7178 * Check the IOAFP's mode page 28 for term power errors
7179 *
7180 * Return value:
7181 * nothing
7182 **/
7183static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
7184 struct ipr_mode_pages *mode_pages)
7185{
7186 int i;
7187 int entry_length;
7188 struct ipr_dev_bus_entry *bus;
7189 struct ipr_mode_page28 *mode_page;
7190
7191 mode_page = ipr_get_mode_page(mode_pages, 0x28,
7192 sizeof(struct ipr_mode_page28));
7193
7194 entry_length = mode_page->entry_length;
7195
7196 bus = mode_page->bus;
7197
7198 for (i = 0; i < mode_page->num_entries; i++) {
7199 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
7200 dev_err(&ioa_cfg->pdev->dev,
7201 "Term power is absent on scsi bus %d\n",
7202 bus->res_addr.bus);
7203 }
7204
7205 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
7206 }
7207}
7208
7209/**
7210 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
7211 * @ioa_cfg: ioa config struct
7212 *
7213 * Looks through the config table checking for SES devices. If
7214 * the SES device is in the SES table indicating a maximum SCSI
7215 * bus speed, the speed is limited for the bus.
7216 *
7217 * Return value:
7218 * none
7219 **/
7220static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
7221{
7222 u32 max_xfer_rate;
7223 int i;
7224
7225 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7226 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
7227 ioa_cfg->bus_attr[i].bus_width);
7228
7229 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
7230 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
7231 }
7232}
7233
7234/**
7235 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
7236 * @ioa_cfg: ioa config struct
7237 * @mode_pages: mode page 28 buffer
7238 *
7239 * Updates mode page 28 based on driver configuration
7240 *
7241 * Return value:
7242 * none
7243 **/
7244static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
203fa3fe 7245 struct ipr_mode_pages *mode_pages)
1da177e4
LT
7246{
7247 int i, entry_length;
7248 struct ipr_dev_bus_entry *bus;
7249 struct ipr_bus_attributes *bus_attr;
7250 struct ipr_mode_page28 *mode_page;
7251
7252 mode_page = ipr_get_mode_page(mode_pages, 0x28,
7253 sizeof(struct ipr_mode_page28));
7254
7255 entry_length = mode_page->entry_length;
7256
7257 /* Loop for each device bus entry */
7258 for (i = 0, bus = mode_page->bus;
7259 i < mode_page->num_entries;
7260 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
7261 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
7262 dev_err(&ioa_cfg->pdev->dev,
7263 "Invalid resource address reported: 0x%08X\n",
7264 IPR_GET_PHYS_LOC(bus->res_addr));
7265 continue;
7266 }
7267
7268 bus_attr = &ioa_cfg->bus_attr[i];
7269 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
7270 bus->bus_width = bus_attr->bus_width;
7271 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
7272 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
7273 if (bus_attr->qas_enabled)
7274 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
7275 else
7276 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
7277 }
7278}
7279
7280/**
7281 * ipr_build_mode_select - Build a mode select command
7282 * @ipr_cmd: ipr command struct
7283 * @res_handle: resource handle to send command to
7284 * @parm: Byte 2 of Mode Sense command
7285 * @dma_addr: DMA buffer address
7286 * @xfer_len: data transfer length
7287 *
7288 * Return value:
7289 * none
7290 **/
7291static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
a32c055f
WB
7292 __be32 res_handle, u8 parm,
7293 dma_addr_t dma_addr, u8 xfer_len)
1da177e4 7294{
1da177e4
LT
7295 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7296
7297 ioarcb->res_handle = res_handle;
7298 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7299 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7300 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
7301 ioarcb->cmd_pkt.cdb[1] = parm;
7302 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7303
a32c055f 7304 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
1da177e4
LT
7305}
7306
7307/**
7308 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
7309 * @ipr_cmd: ipr command struct
7310 *
7311 * This function sets up the SCSI bus attributes and sends
7312 * a Mode Select for Page 28 to activate them.
7313 *
7314 * Return value:
7315 * IPR_RC_JOB_RETURN
7316 **/
7317static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
7318{
7319 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7320 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7321 int length;
7322
7323 ENTER;
4733804c
BK
7324 ipr_scsi_bus_speed_limit(ioa_cfg);
7325 ipr_check_term_power(ioa_cfg, mode_pages);
7326 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
7327 length = mode_pages->hdr.length + 1;
7328 mode_pages->hdr.length = 0;
1da177e4
LT
7329
7330 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7331 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7332 length);
7333
f72919ec
WB
7334 ipr_cmd->job_step = ipr_set_supported_devs;
7335 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7336 struct ipr_resource_entry, queue);
1da177e4
LT
7337 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7338
7339 LEAVE;
7340 return IPR_RC_JOB_RETURN;
7341}
7342
7343/**
7344 * ipr_build_mode_sense - Builds a mode sense command
7345 * @ipr_cmd: ipr command struct
7346 * @res: resource entry struct
7347 * @parm: Byte 2 of mode sense command
7348 * @dma_addr: DMA address of mode sense buffer
7349 * @xfer_len: Size of DMA buffer
7350 *
7351 * Return value:
7352 * none
7353 **/
7354static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
7355 __be32 res_handle,
a32c055f 7356 u8 parm, dma_addr_t dma_addr, u8 xfer_len)
1da177e4 7357{
1da177e4
LT
7358 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7359
7360 ioarcb->res_handle = res_handle;
7361 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
7362 ioarcb->cmd_pkt.cdb[2] = parm;
7363 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7364 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7365
a32c055f 7366 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
7367}
7368
dfed823e 7369/**
7370 * ipr_reset_cmd_failed - Handle failure of IOA reset command
7371 * @ipr_cmd: ipr command struct
7372 *
7373 * This function handles the failure of an IOA bringup command.
7374 *
7375 * Return value:
7376 * IPR_RC_JOB_RETURN
7377 **/
7378static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
7379{
7380 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
96d21f00 7381 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
dfed823e 7382
7383 dev_err(&ioa_cfg->pdev->dev,
7384 "0x%02X failed with IOASC: 0x%08X\n",
7385 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
7386
7387 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
05a6538a 7388 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
dfed823e 7389 return IPR_RC_JOB_RETURN;
7390}
7391
7392/**
7393 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7394 * @ipr_cmd: ipr command struct
7395 *
7396 * This function handles the failure of a Mode Sense to the IOAFP.
7397 * Some adapters do not handle all mode pages.
7398 *
7399 * Return value:
7400 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7401 **/
7402static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
7403{
f72919ec 7404 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
96d21f00 7405 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
dfed823e 7406
7407 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
f72919ec
WB
7408 ipr_cmd->job_step = ipr_set_supported_devs;
7409 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7410 struct ipr_resource_entry, queue);
dfed823e 7411 return IPR_RC_JOB_CONTINUE;
7412 }
7413
7414 return ipr_reset_cmd_failed(ipr_cmd);
7415}
7416
1da177e4
LT
7417/**
7418 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7419 * @ipr_cmd: ipr command struct
7420 *
7421 * This function send a Page 28 mode sense to the IOA to
7422 * retrieve SCSI bus attributes.
7423 *
7424 * Return value:
7425 * IPR_RC_JOB_RETURN
7426 **/
7427static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
7428{
7429 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7430
7431 ENTER;
7432 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7433 0x28, ioa_cfg->vpd_cbs_dma +
7434 offsetof(struct ipr_misc_cbs, mode_pages),
7435 sizeof(struct ipr_mode_pages));
7436
7437 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
dfed823e 7438 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
1da177e4
LT
7439
7440 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7441
7442 LEAVE;
7443 return IPR_RC_JOB_RETURN;
7444}
7445
ac09c349
BK
7446/**
7447 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7448 * @ipr_cmd: ipr command struct
7449 *
7450 * This function enables dual IOA RAID support if possible.
7451 *
7452 * Return value:
7453 * IPR_RC_JOB_RETURN
7454 **/
7455static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
7456{
7457 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7458 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7459 struct ipr_mode_page24 *mode_page;
7460 int length;
7461
7462 ENTER;
7463 mode_page = ipr_get_mode_page(mode_pages, 0x24,
7464 sizeof(struct ipr_mode_page24));
7465
7466 if (mode_page)
7467 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
7468
7469 length = mode_pages->hdr.length + 1;
7470 mode_pages->hdr.length = 0;
7471
7472 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7473 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7474 length);
7475
7476 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7477 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7478
7479 LEAVE;
7480 return IPR_RC_JOB_RETURN;
7481}
7482
7483/**
7484 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7485 * @ipr_cmd: ipr command struct
7486 *
7487 * This function handles the failure of a Mode Sense to the IOAFP.
7488 * Some adapters do not handle all mode pages.
7489 *
7490 * Return value:
7491 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7492 **/
7493static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
7494{
96d21f00 7495 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
ac09c349
BK
7496
7497 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7498 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7499 return IPR_RC_JOB_CONTINUE;
7500 }
7501
7502 return ipr_reset_cmd_failed(ipr_cmd);
7503}
7504
7505/**
7506 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7507 * @ipr_cmd: ipr command struct
7508 *
7509 * This function send a mode sense to the IOA to retrieve
7510 * the IOA Advanced Function Control mode page.
7511 *
7512 * Return value:
7513 * IPR_RC_JOB_RETURN
7514 **/
7515static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
7516{
7517 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7518
7519 ENTER;
7520 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7521 0x24, ioa_cfg->vpd_cbs_dma +
7522 offsetof(struct ipr_misc_cbs, mode_pages),
7523 sizeof(struct ipr_mode_pages));
7524
7525 ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
7526 ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
7527
7528 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7529
7530 LEAVE;
7531 return IPR_RC_JOB_RETURN;
7532}
7533
1da177e4
LT
7534/**
7535 * ipr_init_res_table - Initialize the resource table
7536 * @ipr_cmd: ipr command struct
7537 *
7538 * This function looks through the existing resource table, comparing
7539 * it with the config table. This function will take care of old/new
7540 * devices and schedule adding/removing them from the mid-layer
7541 * as appropriate.
7542 *
7543 * Return value:
7544 * IPR_RC_JOB_CONTINUE
7545 **/
7546static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
7547{
7548 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7549 struct ipr_resource_entry *res, *temp;
3e7ebdfa
WB
7550 struct ipr_config_table_entry_wrapper cfgtew;
7551 int entries, found, flag, i;
1da177e4
LT
7552 LIST_HEAD(old_res);
7553
7554 ENTER;
3e7ebdfa
WB
7555 if (ioa_cfg->sis64)
7556 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
7557 else
7558 flag = ioa_cfg->u.cfg_table->hdr.flags;
7559
7560 if (flag & IPR_UCODE_DOWNLOAD_REQ)
1da177e4
LT
7561 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
7562
7563 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
7564 list_move_tail(&res->queue, &old_res);
7565
3e7ebdfa 7566 if (ioa_cfg->sis64)
438b0331 7567 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
3e7ebdfa
WB
7568 else
7569 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
7570
7571 for (i = 0; i < entries; i++) {
7572 if (ioa_cfg->sis64)
7573 cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
7574 else
7575 cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
1da177e4
LT
7576 found = 0;
7577
7578 list_for_each_entry_safe(res, temp, &old_res, queue) {
3e7ebdfa 7579 if (ipr_is_same_device(res, &cfgtew)) {
1da177e4
LT
7580 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7581 found = 1;
7582 break;
7583 }
7584 }
7585
7586 if (!found) {
7587 if (list_empty(&ioa_cfg->free_res_q)) {
7588 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
7589 break;
7590 }
7591
7592 found = 1;
7593 res = list_entry(ioa_cfg->free_res_q.next,
7594 struct ipr_resource_entry, queue);
7595 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
3e7ebdfa 7596 ipr_init_res_entry(res, &cfgtew);
1da177e4 7597 res->add_to_ml = 1;
56115598
WB
7598 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
7599 res->sdev->allow_restart = 1;
1da177e4
LT
7600
7601 if (found)
3e7ebdfa 7602 ipr_update_res_entry(res, &cfgtew);
1da177e4
LT
7603 }
7604
7605 list_for_each_entry_safe(res, temp, &old_res, queue) {
7606 if (res->sdev) {
7607 res->del_from_ml = 1;
3e7ebdfa 7608 res->res_handle = IPR_INVALID_RES_HANDLE;
1da177e4 7609 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
1da177e4
LT
7610 }
7611 }
7612
3e7ebdfa
WB
7613 list_for_each_entry_safe(res, temp, &old_res, queue) {
7614 ipr_clear_res_target(res);
7615 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
7616 }
7617
ac09c349
BK
7618 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7619 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
7620 else
7621 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
1da177e4
LT
7622
7623 LEAVE;
7624 return IPR_RC_JOB_CONTINUE;
7625}
7626
7627/**
7628 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7629 * @ipr_cmd: ipr command struct
7630 *
7631 * This function sends a Query IOA Configuration command
7632 * to the adapter to retrieve the IOA configuration table.
7633 *
7634 * Return value:
7635 * IPR_RC_JOB_RETURN
7636 **/
7637static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
7638{
7639 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7640 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
1da177e4 7641 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
ac09c349 7642 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
1da177e4
LT
7643
7644 ENTER;
ac09c349
BK
7645 if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7646 ioa_cfg->dual_raid = 1;
1da177e4
LT
7647 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7648 ucode_vpd->major_release, ucode_vpd->card_type,
7649 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7650 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7651 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7652
7653 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
438b0331 7654 ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
3e7ebdfa
WB
7655 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7656 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
1da177e4 7657
3e7ebdfa 7658 ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
a32c055f 7659 IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
7660
7661 ipr_cmd->job_step = ipr_init_res_table;
7662
7663 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7664
7665 LEAVE;
7666 return IPR_RC_JOB_RETURN;
7667}
7668
7669/**
7670 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7671 * @ipr_cmd: ipr command struct
7672 *
7673 * This utility function sends an inquiry to the adapter.
7674 *
7675 * Return value:
7676 * none
7677 **/
7678static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
a32c055f 7679 dma_addr_t dma_addr, u8 xfer_len)
1da177e4
LT
7680{
7681 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
1da177e4
LT
7682
7683 ENTER;
7684 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7685 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7686
7687 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
7688 ioarcb->cmd_pkt.cdb[1] = flags;
7689 ioarcb->cmd_pkt.cdb[2] = page;
7690 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7691
a32c055f 7692 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
7693
7694 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7695 LEAVE;
7696}
7697
62275040 7698/**
7699 * ipr_inquiry_page_supported - Is the given inquiry page supported
7700 * @page0: inquiry page 0 buffer
7701 * @page: page code.
7702 *
7703 * This function determines if the specified inquiry page is supported.
7704 *
7705 * Return value:
7706 * 1 if page is supported / 0 if not
7707 **/
7708static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
7709{
7710 int i;
7711
7712 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
7713 if (page0->page[i] == page)
7714 return 1;
7715
7716 return 0;
7717}
7718
ac09c349
BK
7719/**
7720 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
7721 * @ipr_cmd: ipr command struct
7722 *
7723 * This function sends a Page 0xD0 inquiry to the adapter
7724 * to retrieve adapter capabilities.
7725 *
7726 * Return value:
7727 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7728 **/
7729static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
7730{
7731 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7732 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
7733 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7734
7735 ENTER;
7736 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7737 memset(cap, 0, sizeof(*cap));
7738
7739 if (ipr_inquiry_page_supported(page0, 0xD0)) {
7740 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
7741 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
7742 sizeof(struct ipr_inquiry_cap));
7743 return IPR_RC_JOB_RETURN;
7744 }
7745
7746 LEAVE;
7747 return IPR_RC_JOB_CONTINUE;
7748}
7749
1da177e4
LT
7750/**
7751 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
7752 * @ipr_cmd: ipr command struct
7753 *
7754 * This function sends a Page 3 inquiry to the adapter
7755 * to retrieve software VPD information.
7756 *
7757 * Return value:
7758 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7759 **/
7760static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
62275040 7761{
7762 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
62275040 7763
7764 ENTER;
7765
ac09c349 7766 ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
62275040 7767
7768 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
7769 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
7770 sizeof(struct ipr_inquiry_page3));
7771
7772 LEAVE;
7773 return IPR_RC_JOB_RETURN;
7774}
7775
7776/**
7777 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
7778 * @ipr_cmd: ipr command struct
7779 *
7780 * This function sends a Page 0 inquiry to the adapter
7781 * to retrieve supported inquiry pages.
7782 *
7783 * Return value:
7784 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7785 **/
7786static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
1da177e4
LT
7787{
7788 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7789 char type[5];
7790
7791 ENTER;
7792
7793 /* Grab the type out of the VPD and store it away */
7794 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
7795 type[4] = '\0';
7796 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
7797
f688f96d
BK
7798 if (ipr_invalid_adapter(ioa_cfg)) {
7799 dev_err(&ioa_cfg->pdev->dev,
7800 "Adapter not supported in this hardware configuration.\n");
7801
7802 if (!ipr_testmode) {
7803 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
7804 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7805 list_add_tail(&ipr_cmd->queue,
7806 &ioa_cfg->hrrq->hrrq_free_q);
7807 return IPR_RC_JOB_RETURN;
7808 }
7809 }
7810
62275040 7811 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
1da177e4 7812
62275040 7813 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
7814 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
7815 sizeof(struct ipr_inquiry_page0));
1da177e4
LT
7816
7817 LEAVE;
7818 return IPR_RC_JOB_RETURN;
7819}
7820
7821/**
7822 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
7823 * @ipr_cmd: ipr command struct
7824 *
7825 * This function sends a standard inquiry to the adapter.
7826 *
7827 * Return value:
7828 * IPR_RC_JOB_RETURN
7829 **/
7830static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
7831{
7832 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7833
7834 ENTER;
62275040 7835 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
1da177e4
LT
7836
7837 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
7838 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
7839 sizeof(struct ipr_ioa_vpd));
7840
7841 LEAVE;
7842 return IPR_RC_JOB_RETURN;
7843}
7844
7845/**
214777ba 7846 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
1da177e4
LT
7847 * @ipr_cmd: ipr command struct
7848 *
7849 * This function send an Identify Host Request Response Queue
7850 * command to establish the HRRQ with the adapter.
7851 *
7852 * Return value:
7853 * IPR_RC_JOB_RETURN
7854 **/
214777ba 7855static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
1da177e4
LT
7856{
7857 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7858 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
05a6538a 7859 struct ipr_hrr_queue *hrrq;
1da177e4
LT
7860
7861 ENTER;
05a6538a 7862 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
1da177e4
LT
7863 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
7864
56d6aa33 7865 if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
7866 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
1da177e4 7867
05a6538a 7868 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
7869 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1da177e4 7870
05a6538a 7871 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7872 if (ioa_cfg->sis64)
7873 ioarcb->cmd_pkt.cdb[1] = 0x1;
214777ba 7874
05a6538a 7875 if (ioa_cfg->nvectors == 1)
7876 ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
7877 else
7878 ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
7879
7880 ioarcb->cmd_pkt.cdb[2] =
7881 ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
7882 ioarcb->cmd_pkt.cdb[3] =
7883 ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
7884 ioarcb->cmd_pkt.cdb[4] =
7885 ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
7886 ioarcb->cmd_pkt.cdb[5] =
7887 ((u64) hrrq->host_rrq_dma) & 0xff;
7888 ioarcb->cmd_pkt.cdb[7] =
7889 ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
7890 ioarcb->cmd_pkt.cdb[8] =
7891 (sizeof(u32) * hrrq->size) & 0xff;
7892
7893 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
56d6aa33 7894 ioarcb->cmd_pkt.cdb[9] =
7895 ioa_cfg->identify_hrrq_index;
1da177e4 7896
05a6538a 7897 if (ioa_cfg->sis64) {
7898 ioarcb->cmd_pkt.cdb[10] =
7899 ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
7900 ioarcb->cmd_pkt.cdb[11] =
7901 ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
7902 ioarcb->cmd_pkt.cdb[12] =
7903 ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
7904 ioarcb->cmd_pkt.cdb[13] =
7905 ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
7906 }
7907
7908 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
56d6aa33 7909 ioarcb->cmd_pkt.cdb[14] =
7910 ioa_cfg->identify_hrrq_index;
05a6538a 7911
7912 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7913 IPR_INTERNAL_TIMEOUT);
7914
56d6aa33 7915 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
7916 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
05a6538a 7917
7918 LEAVE;
7919 return IPR_RC_JOB_RETURN;
05a6538a 7920 }
7921
1da177e4 7922 LEAVE;
05a6538a 7923 return IPR_RC_JOB_CONTINUE;
1da177e4
LT
7924}
7925
7926/**
7927 * ipr_reset_timer_done - Adapter reset timer function
7928 * @ipr_cmd: ipr command struct
7929 *
7930 * Description: This function is used in adapter reset processing
7931 * for timing events. If the reset_cmd pointer in the IOA
7932 * config struct is not this adapter's we are doing nested
7933 * resets and fail_all_ops will take care of freeing the
7934 * command block.
7935 *
7936 * Return value:
7937 * none
7938 **/
7939static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
7940{
7941 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7942 unsigned long lock_flags = 0;
7943
7944 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7945
7946 if (ioa_cfg->reset_cmd == ipr_cmd) {
7947 list_del(&ipr_cmd->queue);
7948 ipr_cmd->done(ipr_cmd);
7949 }
7950
7951 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7952}
7953
7954/**
7955 * ipr_reset_start_timer - Start a timer for adapter reset job
7956 * @ipr_cmd: ipr command struct
7957 * @timeout: timeout value
7958 *
7959 * Description: This function is used in adapter reset processing
7960 * for timing events. If the reset_cmd pointer in the IOA
7961 * config struct is not this adapter's we are doing nested
7962 * resets and fail_all_ops will take care of freeing the
7963 * command block.
7964 *
7965 * Return value:
7966 * none
7967 **/
7968static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
7969 unsigned long timeout)
7970{
05a6538a 7971
7972 ENTER;
7973 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1da177e4
LT
7974 ipr_cmd->done = ipr_reset_ioa_job;
7975
7976 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7977 ipr_cmd->timer.expires = jiffies + timeout;
7978 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
7979 add_timer(&ipr_cmd->timer);
7980}
7981
7982/**
7983 * ipr_init_ioa_mem - Initialize ioa_cfg control block
7984 * @ioa_cfg: ioa cfg struct
7985 *
7986 * Return value:
7987 * nothing
7988 **/
7989static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
7990{
05a6538a 7991 struct ipr_hrr_queue *hrrq;
1da177e4 7992
05a6538a 7993 for_each_hrrq(hrrq, ioa_cfg) {
56d6aa33 7994 spin_lock(&hrrq->_lock);
05a6538a 7995 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
7996
7997 /* Initialize Host RRQ pointers */
7998 hrrq->hrrq_start = hrrq->host_rrq;
7999 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
8000 hrrq->hrrq_curr = hrrq->hrrq_start;
8001 hrrq->toggle_bit = 1;
56d6aa33 8002 spin_unlock(&hrrq->_lock);
05a6538a 8003 }
56d6aa33 8004 wmb();
05a6538a 8005
56d6aa33 8006 ioa_cfg->identify_hrrq_index = 0;
8007 if (ioa_cfg->hrrq_num == 1)
8008 atomic_set(&ioa_cfg->hrrq_index, 0);
8009 else
8010 atomic_set(&ioa_cfg->hrrq_index, 1);
1da177e4
LT
8011
8012 /* Zero out config table */
3e7ebdfa 8013 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
1da177e4
LT
8014}
8015
214777ba
WB
8016/**
8017 * ipr_reset_next_stage - Process IPL stage change based on feedback register.
8018 * @ipr_cmd: ipr command struct
8019 *
8020 * Return value:
8021 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8022 **/
8023static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
8024{
8025 unsigned long stage, stage_time;
8026 u32 feedback;
8027 volatile u32 int_reg;
8028 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8029 u64 maskval = 0;
8030
8031 feedback = readl(ioa_cfg->regs.init_feedback_reg);
8032 stage = feedback & IPR_IPL_INIT_STAGE_MASK;
8033 stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
8034
8035 ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
8036
8037 /* sanity check the stage_time value */
438b0331
WB
8038 if (stage_time == 0)
8039 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
8040 else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
214777ba
WB
8041 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
8042 else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
8043 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
8044
8045 if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
8046 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
8047 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8048 stage_time = ioa_cfg->transop_timeout;
8049 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8050 } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
1df79ca4
WB
8051 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
8052 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8053 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8054 maskval = IPR_PCII_IPL_STAGE_CHANGE;
8055 maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
8056 writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
8057 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8058 return IPR_RC_JOB_CONTINUE;
8059 }
214777ba
WB
8060 }
8061
8062 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
8063 ipr_cmd->timer.expires = jiffies + stage_time * HZ;
8064 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
8065 ipr_cmd->done = ipr_reset_ioa_job;
8066 add_timer(&ipr_cmd->timer);
05a6538a 8067
8068 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
214777ba
WB
8069
8070 return IPR_RC_JOB_RETURN;
8071}
8072
1da177e4
LT
8073/**
8074 * ipr_reset_enable_ioa - Enable the IOA following a reset.
8075 * @ipr_cmd: ipr command struct
8076 *
8077 * This function reinitializes some control blocks and
8078 * enables destructive diagnostics on the adapter.
8079 *
8080 * Return value:
8081 * IPR_RC_JOB_RETURN
8082 **/
8083static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
8084{
8085 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8086 volatile u32 int_reg;
7be96900 8087 volatile u64 maskval;
56d6aa33 8088 int i;
1da177e4
LT
8089
8090 ENTER;
214777ba 8091 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
1da177e4
LT
8092 ipr_init_ioa_mem(ioa_cfg);
8093
56d6aa33 8094 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8095 spin_lock(&ioa_cfg->hrrq[i]._lock);
8096 ioa_cfg->hrrq[i].allow_interrupts = 1;
8097 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8098 }
8099 wmb();
8701f185
WB
8100 if (ioa_cfg->sis64) {
8101 /* Set the adapter to the correct endian mode. */
8102 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8103 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8104 }
8105
7be96900 8106 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
1da177e4
LT
8107
8108 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8109 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
214777ba 8110 ioa_cfg->regs.clr_interrupt_mask_reg32);
1da177e4
LT
8111 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8112 return IPR_RC_JOB_CONTINUE;
8113 }
8114
8115 /* Enable destructive diagnostics on IOA */
214777ba
WB
8116 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
8117
7be96900
WB
8118 if (ioa_cfg->sis64) {
8119 maskval = IPR_PCII_IPL_STAGE_CHANGE;
8120 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
8121 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
8122 } else
8123 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
1da177e4 8124
1da177e4
LT
8125 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8126
8127 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
8128
214777ba
WB
8129 if (ioa_cfg->sis64) {
8130 ipr_cmd->job_step = ipr_reset_next_stage;
8131 return IPR_RC_JOB_CONTINUE;
8132 }
8133
1da177e4 8134 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
5469cb5b 8135 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
1da177e4
LT
8136 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
8137 ipr_cmd->done = ipr_reset_ioa_job;
8138 add_timer(&ipr_cmd->timer);
05a6538a 8139 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1da177e4
LT
8140
8141 LEAVE;
8142 return IPR_RC_JOB_RETURN;
8143}
8144
8145/**
8146 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
8147 * @ipr_cmd: ipr command struct
8148 *
8149 * This function is invoked when an adapter dump has run out
8150 * of processing time.
8151 *
8152 * Return value:
8153 * IPR_RC_JOB_CONTINUE
8154 **/
8155static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
8156{
8157 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8158
8159 if (ioa_cfg->sdt_state == GET_DUMP)
41e9a696
BK
8160 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8161 else if (ioa_cfg->sdt_state == READ_DUMP)
1da177e4
LT
8162 ioa_cfg->sdt_state = ABORT_DUMP;
8163
4c647e90 8164 ioa_cfg->dump_timeout = 1;
1da177e4
LT
8165 ipr_cmd->job_step = ipr_reset_alert;
8166
8167 return IPR_RC_JOB_CONTINUE;
8168}
8169
8170/**
8171 * ipr_unit_check_no_data - Log a unit check/no data error log
8172 * @ioa_cfg: ioa config struct
8173 *
8174 * Logs an error indicating the adapter unit checked, but for some
8175 * reason, we were unable to fetch the unit check buffer.
8176 *
8177 * Return value:
8178 * nothing
8179 **/
8180static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
8181{
8182 ioa_cfg->errors_logged++;
8183 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
8184}
8185
8186/**
8187 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
8188 * @ioa_cfg: ioa config struct
8189 *
8190 * Fetches the unit check buffer from the adapter by clocking the data
8191 * through the mailbox register.
8192 *
8193 * Return value:
8194 * nothing
8195 **/
8196static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
8197{
8198 unsigned long mailbox;
8199 struct ipr_hostrcb *hostrcb;
8200 struct ipr_uc_sdt sdt;
8201 int rc, length;
65f56475 8202 u32 ioasc;
1da177e4
LT
8203
8204 mailbox = readl(ioa_cfg->ioa_mailbox);
8205
dcbad00e 8206 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
1da177e4
LT
8207 ipr_unit_check_no_data(ioa_cfg);
8208 return;
8209 }
8210
8211 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
8212 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
8213 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
8214
dcbad00e
WB
8215 if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
8216 ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
8217 (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
1da177e4
LT
8218 ipr_unit_check_no_data(ioa_cfg);
8219 return;
8220 }
8221
8222 /* Find length of the first sdt entry (UC buffer) */
dcbad00e
WB
8223 if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
8224 length = be32_to_cpu(sdt.entry[0].end_token);
8225 else
8226 length = (be32_to_cpu(sdt.entry[0].end_token) -
8227 be32_to_cpu(sdt.entry[0].start_token)) &
8228 IPR_FMT2_MBX_ADDR_MASK;
1da177e4
LT
8229
8230 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
8231 struct ipr_hostrcb, queue);
8232 list_del(&hostrcb->queue);
8233 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
8234
8235 rc = ipr_get_ldump_data_section(ioa_cfg,
dcbad00e 8236 be32_to_cpu(sdt.entry[0].start_token),
1da177e4
LT
8237 (__be32 *)&hostrcb->hcam,
8238 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
8239
65f56475 8240 if (!rc) {
1da177e4 8241 ipr_handle_log_data(ioa_cfg, hostrcb);
4565e370 8242 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
65f56475
BK
8243 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
8244 ioa_cfg->sdt_state == GET_DUMP)
8245 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8246 } else
1da177e4
LT
8247 ipr_unit_check_no_data(ioa_cfg);
8248
8249 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
8250}
8251
110def85
WB
8252/**
8253 * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
8254 * @ipr_cmd: ipr command struct
8255 *
8256 * Description: This function will call to get the unit check buffer.
8257 *
8258 * Return value:
8259 * IPR_RC_JOB_RETURN
8260 **/
8261static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
8262{
8263 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8264
8265 ENTER;
8266 ioa_cfg->ioa_unit_checked = 0;
8267 ipr_get_unit_check_buffer(ioa_cfg);
8268 ipr_cmd->job_step = ipr_reset_alert;
8269 ipr_reset_start_timer(ipr_cmd, 0);
8270
8271 LEAVE;
8272 return IPR_RC_JOB_RETURN;
8273}
8274
1da177e4
LT
8275/**
8276 * ipr_reset_restore_cfg_space - Restore PCI config space.
8277 * @ipr_cmd: ipr command struct
8278 *
8279 * Description: This function restores the saved PCI config space of
8280 * the adapter, fails all outstanding ops back to the callers, and
8281 * fetches the dump/unit check if applicable to this reset.
8282 *
8283 * Return value:
8284 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8285 **/
8286static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
8287{
8288 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
630ad831 8289 u32 int_reg;
1da177e4
LT
8290
8291 ENTER;
99c965dd 8292 ioa_cfg->pdev->state_saved = true;
1d3c16a8 8293 pci_restore_state(ioa_cfg->pdev);
1da177e4
LT
8294
8295 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
96d21f00 8296 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
1da177e4
LT
8297 return IPR_RC_JOB_CONTINUE;
8298 }
8299
8300 ipr_fail_all_ops(ioa_cfg);
8301
8701f185
WB
8302 if (ioa_cfg->sis64) {
8303 /* Set the adapter to the correct endian mode. */
8304 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8305 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8306 }
8307
1da177e4 8308 if (ioa_cfg->ioa_unit_checked) {
110def85
WB
8309 if (ioa_cfg->sis64) {
8310 ipr_cmd->job_step = ipr_reset_get_unit_check_job;
8311 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
8312 return IPR_RC_JOB_RETURN;
8313 } else {
8314 ioa_cfg->ioa_unit_checked = 0;
8315 ipr_get_unit_check_buffer(ioa_cfg);
8316 ipr_cmd->job_step = ipr_reset_alert;
8317 ipr_reset_start_timer(ipr_cmd, 0);
8318 return IPR_RC_JOB_RETURN;
8319 }
1da177e4
LT
8320 }
8321
8322 if (ioa_cfg->in_ioa_bringdown) {
8323 ipr_cmd->job_step = ipr_ioa_bringdown_done;
8324 } else {
8325 ipr_cmd->job_step = ipr_reset_enable_ioa;
8326
8327 if (GET_DUMP == ioa_cfg->sdt_state) {
41e9a696 8328 ioa_cfg->sdt_state = READ_DUMP;
4c647e90 8329 ioa_cfg->dump_timeout = 0;
4d4dd706
KSS
8330 if (ioa_cfg->sis64)
8331 ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
8332 else
8333 ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
1da177e4
LT
8334 ipr_cmd->job_step = ipr_reset_wait_for_dump;
8335 schedule_work(&ioa_cfg->work_q);
8336 return IPR_RC_JOB_RETURN;
8337 }
8338 }
8339
438b0331 8340 LEAVE;
1da177e4
LT
8341 return IPR_RC_JOB_CONTINUE;
8342}
8343
e619e1a7
BK
8344/**
8345 * ipr_reset_bist_done - BIST has completed on the adapter.
8346 * @ipr_cmd: ipr command struct
8347 *
8348 * Description: Unblock config space and resume the reset process.
8349 *
8350 * Return value:
8351 * IPR_RC_JOB_CONTINUE
8352 **/
8353static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
8354{
fb51ccbf
JK
8355 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8356
e619e1a7 8357 ENTER;
fb51ccbf
JK
8358 if (ioa_cfg->cfg_locked)
8359 pci_cfg_access_unlock(ioa_cfg->pdev);
8360 ioa_cfg->cfg_locked = 0;
e619e1a7
BK
8361 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
8362 LEAVE;
8363 return IPR_RC_JOB_CONTINUE;
8364}
8365
1da177e4
LT
8366/**
8367 * ipr_reset_start_bist - Run BIST on the adapter.
8368 * @ipr_cmd: ipr command struct
8369 *
8370 * Description: This function runs BIST on the adapter, then delays 2 seconds.
8371 *
8372 * Return value:
8373 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8374 **/
8375static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
8376{
8377 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
cb237ef7 8378 int rc = PCIBIOS_SUCCESSFUL;
1da177e4
LT
8379
8380 ENTER;
cb237ef7
WB
8381 if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
8382 writel(IPR_UPROCI_SIS64_START_BIST,
8383 ioa_cfg->regs.set_uproc_interrupt_reg32);
8384 else
8385 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
8386
8387 if (rc == PCIBIOS_SUCCESSFUL) {
e619e1a7 8388 ipr_cmd->job_step = ipr_reset_bist_done;
1da177e4
LT
8389 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8390 rc = IPR_RC_JOB_RETURN;
cb237ef7 8391 } else {
fb51ccbf
JK
8392 if (ioa_cfg->cfg_locked)
8393 pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
8394 ioa_cfg->cfg_locked = 0;
cb237ef7
WB
8395 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8396 rc = IPR_RC_JOB_CONTINUE;
1da177e4
LT
8397 }
8398
8399 LEAVE;
8400 return rc;
8401}
8402
463fc696
BK
8403/**
8404 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8405 * @ipr_cmd: ipr command struct
8406 *
8407 * Description: This clears PCI reset to the adapter and delays two seconds.
8408 *
8409 * Return value:
8410 * IPR_RC_JOB_RETURN
8411 **/
8412static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
8413{
8414 ENTER;
463fc696
BK
8415 ipr_cmd->job_step = ipr_reset_bist_done;
8416 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8417 LEAVE;
8418 return IPR_RC_JOB_RETURN;
8419}
8420
2796ca5e
BK
8421/**
8422 * ipr_reset_reset_work - Pulse a PCIe fundamental reset
8423 * @work: work struct
8424 *
8425 * Description: This pulses warm reset to a slot.
8426 *
8427 **/
8428static void ipr_reset_reset_work(struct work_struct *work)
8429{
8430 struct ipr_cmnd *ipr_cmd = container_of(work, struct ipr_cmnd, work);
8431 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8432 struct pci_dev *pdev = ioa_cfg->pdev;
8433 unsigned long lock_flags = 0;
8434
8435 ENTER;
8436 pci_set_pcie_reset_state(pdev, pcie_warm_reset);
8437 msleep(jiffies_to_msecs(IPR_PCI_RESET_TIMEOUT));
8438 pci_set_pcie_reset_state(pdev, pcie_deassert_reset);
8439
8440 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8441 if (ioa_cfg->reset_cmd == ipr_cmd)
8442 ipr_reset_ioa_job(ipr_cmd);
8443 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8444 LEAVE;
8445}
8446
463fc696
BK
8447/**
8448 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8449 * @ipr_cmd: ipr command struct
8450 *
8451 * Description: This asserts PCI reset to the adapter.
8452 *
8453 * Return value:
8454 * IPR_RC_JOB_RETURN
8455 **/
8456static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
8457{
8458 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
463fc696
BK
8459
8460 ENTER;
2796ca5e
BK
8461 INIT_WORK(&ipr_cmd->work, ipr_reset_reset_work);
8462 queue_work(ioa_cfg->reset_work_q, &ipr_cmd->work);
463fc696 8463 ipr_cmd->job_step = ipr_reset_slot_reset_done;
463fc696
BK
8464 LEAVE;
8465 return IPR_RC_JOB_RETURN;
8466}
8467
fb51ccbf
JK
8468/**
8469 * ipr_reset_block_config_access_wait - Wait for permission to block config access
8470 * @ipr_cmd: ipr command struct
8471 *
8472 * Description: This attempts to block config access to the IOA.
8473 *
8474 * Return value:
8475 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8476 **/
8477static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
8478{
8479 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8480 int rc = IPR_RC_JOB_CONTINUE;
8481
8482 if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
8483 ioa_cfg->cfg_locked = 1;
8484 ipr_cmd->job_step = ioa_cfg->reset;
8485 } else {
8486 if (ipr_cmd->u.time_left) {
8487 rc = IPR_RC_JOB_RETURN;
8488 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8489 ipr_reset_start_timer(ipr_cmd,
8490 IPR_CHECK_FOR_RESET_TIMEOUT);
8491 } else {
8492 ipr_cmd->job_step = ioa_cfg->reset;
8493 dev_err(&ioa_cfg->pdev->dev,
8494 "Timed out waiting to lock config access. Resetting anyway.\n");
8495 }
8496 }
8497
8498 return rc;
8499}
8500
8501/**
8502 * ipr_reset_block_config_access - Block config access to the IOA
8503 * @ipr_cmd: ipr command struct
8504 *
8505 * Description: This attempts to block config access to the IOA
8506 *
8507 * Return value:
8508 * IPR_RC_JOB_CONTINUE
8509 **/
8510static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
8511{
8512 ipr_cmd->ioa_cfg->cfg_locked = 0;
8513 ipr_cmd->job_step = ipr_reset_block_config_access_wait;
8514 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8515 return IPR_RC_JOB_CONTINUE;
8516}
8517
1da177e4
LT
8518/**
8519 * ipr_reset_allowed - Query whether or not IOA can be reset
8520 * @ioa_cfg: ioa config struct
8521 *
8522 * Return value:
8523 * 0 if reset not allowed / non-zero if reset is allowed
8524 **/
8525static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
8526{
8527 volatile u32 temp_reg;
8528
8529 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8530 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
8531}
8532
8533/**
8534 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8535 * @ipr_cmd: ipr command struct
8536 *
8537 * Description: This function waits for adapter permission to run BIST,
8538 * then runs BIST. If the adapter does not give permission after a
8539 * reasonable time, we will reset the adapter anyway. The impact of
8540 * resetting the adapter without warning the adapter is the risk of
8541 * losing the persistent error log on the adapter. If the adapter is
8542 * reset while it is writing to the flash on the adapter, the flash
8543 * segment will have bad ECC and be zeroed.
8544 *
8545 * Return value:
8546 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8547 **/
8548static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
8549{
8550 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8551 int rc = IPR_RC_JOB_RETURN;
8552
8553 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
8554 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8555 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8556 } else {
fb51ccbf 8557 ipr_cmd->job_step = ipr_reset_block_config_access;
1da177e4
LT
8558 rc = IPR_RC_JOB_CONTINUE;
8559 }
8560
8561 return rc;
8562}
8563
8564/**
8701f185 8565 * ipr_reset_alert - Alert the adapter of a pending reset
1da177e4
LT
8566 * @ipr_cmd: ipr command struct
8567 *
8568 * Description: This function alerts the adapter that it will be reset.
8569 * If memory space is not currently enabled, proceed directly
8570 * to running BIST on the adapter. The timer must always be started
8571 * so we guarantee we do not run BIST from ipr_isr.
8572 *
8573 * Return value:
8574 * IPR_RC_JOB_RETURN
8575 **/
8576static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
8577{
8578 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8579 u16 cmd_reg;
8580 int rc;
8581
8582 ENTER;
8583 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
8584
8585 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
8586 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
214777ba 8587 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
1da177e4
LT
8588 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
8589 } else {
fb51ccbf 8590 ipr_cmd->job_step = ipr_reset_block_config_access;
1da177e4
LT
8591 }
8592
8593 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8594 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8595
8596 LEAVE;
8597 return IPR_RC_JOB_RETURN;
8598}
8599
4fdd7c7a
BK
8600/**
8601 * ipr_reset_quiesce_done - Complete IOA disconnect
8602 * @ipr_cmd: ipr command struct
8603 *
8604 * Description: Freeze the adapter to complete quiesce processing
8605 *
8606 * Return value:
8607 * IPR_RC_JOB_CONTINUE
8608 **/
8609static int ipr_reset_quiesce_done(struct ipr_cmnd *ipr_cmd)
8610{
8611 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8612
8613 ENTER;
8614 ipr_cmd->job_step = ipr_ioa_bringdown_done;
8615 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8616 LEAVE;
8617 return IPR_RC_JOB_CONTINUE;
8618}
8619
8620/**
8621 * ipr_reset_cancel_hcam_done - Check for outstanding commands
8622 * @ipr_cmd: ipr command struct
8623 *
8624 * Description: Ensure nothing is outstanding to the IOA and
8625 * proceed with IOA disconnect. Otherwise reset the IOA.
8626 *
8627 * Return value:
8628 * IPR_RC_JOB_RETURN / IPR_RC_JOB_CONTINUE
8629 **/
8630static int ipr_reset_cancel_hcam_done(struct ipr_cmnd *ipr_cmd)
8631{
8632 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8633 struct ipr_cmnd *loop_cmd;
8634 struct ipr_hrr_queue *hrrq;
8635 int rc = IPR_RC_JOB_CONTINUE;
8636 int count = 0;
8637
8638 ENTER;
8639 ipr_cmd->job_step = ipr_reset_quiesce_done;
8640
8641 for_each_hrrq(hrrq, ioa_cfg) {
8642 spin_lock(&hrrq->_lock);
8643 list_for_each_entry(loop_cmd, &hrrq->hrrq_pending_q, queue) {
8644 count++;
8645 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8646 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
8647 rc = IPR_RC_JOB_RETURN;
8648 break;
8649 }
8650 spin_unlock(&hrrq->_lock);
8651
8652 if (count)
8653 break;
8654 }
8655
8656 LEAVE;
8657 return rc;
8658}
8659
8660/**
8661 * ipr_reset_cancel_hcam - Cancel outstanding HCAMs
8662 * @ipr_cmd: ipr command struct
8663 *
8664 * Description: Cancel any oustanding HCAMs to the IOA.
8665 *
8666 * Return value:
8667 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8668 **/
8669static int ipr_reset_cancel_hcam(struct ipr_cmnd *ipr_cmd)
8670{
8671 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8672 int rc = IPR_RC_JOB_CONTINUE;
8673 struct ipr_cmd_pkt *cmd_pkt;
8674 struct ipr_cmnd *hcam_cmd;
8675 struct ipr_hrr_queue *hrrq = &ioa_cfg->hrrq[IPR_INIT_HRRQ];
8676
8677 ENTER;
8678 ipr_cmd->job_step = ipr_reset_cancel_hcam_done;
8679
8680 if (!hrrq->ioa_is_dead) {
8681 if (!list_empty(&ioa_cfg->hostrcb_pending_q)) {
8682 list_for_each_entry(hcam_cmd, &hrrq->hrrq_pending_q, queue) {
8683 if (hcam_cmd->ioarcb.cmd_pkt.cdb[0] != IPR_HOST_CONTROLLED_ASYNC)
8684 continue;
8685
8686 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8687 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8688 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
8689 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
8690 cmd_pkt->cdb[0] = IPR_CANCEL_REQUEST;
8691 cmd_pkt->cdb[1] = IPR_CANCEL_64BIT_IOARCB;
8692 cmd_pkt->cdb[10] = ((u64) hcam_cmd->dma_addr >> 56) & 0xff;
8693 cmd_pkt->cdb[11] = ((u64) hcam_cmd->dma_addr >> 48) & 0xff;
8694 cmd_pkt->cdb[12] = ((u64) hcam_cmd->dma_addr >> 40) & 0xff;
8695 cmd_pkt->cdb[13] = ((u64) hcam_cmd->dma_addr >> 32) & 0xff;
8696 cmd_pkt->cdb[2] = ((u64) hcam_cmd->dma_addr >> 24) & 0xff;
8697 cmd_pkt->cdb[3] = ((u64) hcam_cmd->dma_addr >> 16) & 0xff;
8698 cmd_pkt->cdb[4] = ((u64) hcam_cmd->dma_addr >> 8) & 0xff;
8699 cmd_pkt->cdb[5] = ((u64) hcam_cmd->dma_addr) & 0xff;
8700
8701 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8702 IPR_CANCEL_TIMEOUT);
8703
8704 rc = IPR_RC_JOB_RETURN;
8705 ipr_cmd->job_step = ipr_reset_cancel_hcam;
8706 break;
8707 }
8708 }
8709 } else
8710 ipr_cmd->job_step = ipr_reset_alert;
8711
8712 LEAVE;
8713 return rc;
8714}
8715
1da177e4
LT
8716/**
8717 * ipr_reset_ucode_download_done - Microcode download completion
8718 * @ipr_cmd: ipr command struct
8719 *
8720 * Description: This function unmaps the microcode download buffer.
8721 *
8722 * Return value:
8723 * IPR_RC_JOB_CONTINUE
8724 **/
8725static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
8726{
8727 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8728 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8729
d73341bf 8730 dma_unmap_sg(&ioa_cfg->pdev->dev, sglist->scatterlist,
1da177e4
LT
8731 sglist->num_sg, DMA_TO_DEVICE);
8732
8733 ipr_cmd->job_step = ipr_reset_alert;
8734 return IPR_RC_JOB_CONTINUE;
8735}
8736
8737/**
8738 * ipr_reset_ucode_download - Download microcode to the adapter
8739 * @ipr_cmd: ipr command struct
8740 *
8741 * Description: This function checks to see if it there is microcode
8742 * to download to the adapter. If there is, a download is performed.
8743 *
8744 * Return value:
8745 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8746 **/
8747static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
8748{
8749 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8750 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8751
8752 ENTER;
8753 ipr_cmd->job_step = ipr_reset_alert;
8754
8755 if (!sglist)
8756 return IPR_RC_JOB_CONTINUE;
8757
8758 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8759 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
8760 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
8761 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
8762 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
8763 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
8764 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
8765
a32c055f
WB
8766 if (ioa_cfg->sis64)
8767 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
8768 else
8769 ipr_build_ucode_ioadl(ipr_cmd, sglist);
1da177e4
LT
8770 ipr_cmd->job_step = ipr_reset_ucode_download_done;
8771
8772 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8773 IPR_WRITE_BUFFER_TIMEOUT);
8774
8775 LEAVE;
8776 return IPR_RC_JOB_RETURN;
8777}
8778
8779/**
8780 * ipr_reset_shutdown_ioa - Shutdown the adapter
8781 * @ipr_cmd: ipr command struct
8782 *
8783 * Description: This function issues an adapter shutdown of the
8784 * specified type to the specified adapter as part of the
8785 * adapter reset job.
8786 *
8787 * Return value:
8788 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8789 **/
8790static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
8791{
8792 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8793 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
8794 unsigned long timeout;
8795 int rc = IPR_RC_JOB_CONTINUE;
8796
8797 ENTER;
4fdd7c7a
BK
8798 if (shutdown_type == IPR_SHUTDOWN_QUIESCE)
8799 ipr_cmd->job_step = ipr_reset_cancel_hcam;
8800 else if (shutdown_type != IPR_SHUTDOWN_NONE &&
56d6aa33 8801 !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
1da177e4
LT
8802 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8803 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8804 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
8805 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
8806
ac09c349
BK
8807 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
8808 timeout = IPR_SHUTDOWN_TIMEOUT;
1da177e4
LT
8809 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
8810 timeout = IPR_INTERNAL_TIMEOUT;
ac09c349
BK
8811 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
8812 timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
1da177e4 8813 else
ac09c349 8814 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
1da177e4
LT
8815
8816 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
8817
8818 rc = IPR_RC_JOB_RETURN;
8819 ipr_cmd->job_step = ipr_reset_ucode_download;
8820 } else
8821 ipr_cmd->job_step = ipr_reset_alert;
8822
8823 LEAVE;
8824 return rc;
8825}
8826
8827/**
8828 * ipr_reset_ioa_job - Adapter reset job
8829 * @ipr_cmd: ipr command struct
8830 *
8831 * Description: This function is the job router for the adapter reset job.
8832 *
8833 * Return value:
8834 * none
8835 **/
8836static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
8837{
8838 u32 rc, ioasc;
1da177e4
LT
8839 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8840
8841 do {
96d21f00 8842 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
8843
8844 if (ioa_cfg->reset_cmd != ipr_cmd) {
8845 /*
8846 * We are doing nested adapter resets and this is
8847 * not the current reset job.
8848 */
05a6538a 8849 list_add_tail(&ipr_cmd->queue,
8850 &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
8851 return;
8852 }
8853
8854 if (IPR_IOASC_SENSE_KEY(ioasc)) {
dfed823e 8855 rc = ipr_cmd->job_step_failed(ipr_cmd);
8856 if (rc == IPR_RC_JOB_RETURN)
8857 return;
1da177e4
LT
8858 }
8859
8860 ipr_reinit_ipr_cmnd(ipr_cmd);
dfed823e 8861 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
1da177e4 8862 rc = ipr_cmd->job_step(ipr_cmd);
203fa3fe 8863 } while (rc == IPR_RC_JOB_CONTINUE);
1da177e4
LT
8864}
8865
8866/**
8867 * _ipr_initiate_ioa_reset - Initiate an adapter reset
8868 * @ioa_cfg: ioa config struct
8869 * @job_step: first job step of reset job
8870 * @shutdown_type: shutdown type
8871 *
8872 * Description: This function will initiate the reset of the given adapter
8873 * starting at the selected job step.
8874 * If the caller needs to wait on the completion of the reset,
8875 * the caller must sleep on the reset_wait_q.
8876 *
8877 * Return value:
8878 * none
8879 **/
8880static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8881 int (*job_step) (struct ipr_cmnd *),
8882 enum ipr_shutdown_type shutdown_type)
8883{
8884 struct ipr_cmnd *ipr_cmd;
56d6aa33 8885 int i;
1da177e4
LT
8886
8887 ioa_cfg->in_reset_reload = 1;
56d6aa33 8888 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8889 spin_lock(&ioa_cfg->hrrq[i]._lock);
8890 ioa_cfg->hrrq[i].allow_cmds = 0;
8891 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8892 }
8893 wmb();
bfae7820
BK
8894 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa)
8895 scsi_block_requests(ioa_cfg->host);
1da177e4
LT
8896
8897 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
8898 ioa_cfg->reset_cmd = ipr_cmd;
8899 ipr_cmd->job_step = job_step;
8900 ipr_cmd->u.shutdown_type = shutdown_type;
8901
8902 ipr_reset_ioa_job(ipr_cmd);
8903}
8904
8905/**
8906 * ipr_initiate_ioa_reset - Initiate an adapter reset
8907 * @ioa_cfg: ioa config struct
8908 * @shutdown_type: shutdown type
8909 *
8910 * Description: This function will initiate the reset of the given adapter.
8911 * If the caller needs to wait on the completion of the reset,
8912 * the caller must sleep on the reset_wait_q.
8913 *
8914 * Return value:
8915 * none
8916 **/
8917static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8918 enum ipr_shutdown_type shutdown_type)
8919{
56d6aa33 8920 int i;
8921
8922 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
1da177e4
LT
8923 return;
8924
41e9a696
BK
8925 if (ioa_cfg->in_reset_reload) {
8926 if (ioa_cfg->sdt_state == GET_DUMP)
8927 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8928 else if (ioa_cfg->sdt_state == READ_DUMP)
8929 ioa_cfg->sdt_state = ABORT_DUMP;
8930 }
1da177e4
LT
8931
8932 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
8933 dev_err(&ioa_cfg->pdev->dev,
8934 "IOA taken offline - error recovery failed\n");
8935
8936 ioa_cfg->reset_retries = 0;
56d6aa33 8937 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8938 spin_lock(&ioa_cfg->hrrq[i]._lock);
8939 ioa_cfg->hrrq[i].ioa_is_dead = 1;
8940 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8941 }
8942 wmb();
1da177e4
LT
8943
8944 if (ioa_cfg->in_ioa_bringdown) {
8945 ioa_cfg->reset_cmd = NULL;
8946 ioa_cfg->in_reset_reload = 0;
8947 ipr_fail_all_ops(ioa_cfg);
8948 wake_up_all(&ioa_cfg->reset_wait_q);
8949
bfae7820
BK
8950 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
8951 spin_unlock_irq(ioa_cfg->host->host_lock);
8952 scsi_unblock_requests(ioa_cfg->host);
8953 spin_lock_irq(ioa_cfg->host->host_lock);
8954 }
1da177e4
LT
8955 return;
8956 } else {
8957 ioa_cfg->in_ioa_bringdown = 1;
8958 shutdown_type = IPR_SHUTDOWN_NONE;
8959 }
8960 }
8961
8962 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
8963 shutdown_type);
8964}
8965
f8a88b19
LV
8966/**
8967 * ipr_reset_freeze - Hold off all I/O activity
8968 * @ipr_cmd: ipr command struct
8969 *
8970 * Description: If the PCI slot is frozen, hold off all I/O
8971 * activity; then, as soon as the slot is available again,
8972 * initiate an adapter reset.
8973 */
8974static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
8975{
56d6aa33 8976 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8977 int i;
8978
f8a88b19 8979 /* Disallow new interrupts, avoid loop */
56d6aa33 8980 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8981 spin_lock(&ioa_cfg->hrrq[i]._lock);
8982 ioa_cfg->hrrq[i].allow_interrupts = 0;
8983 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8984 }
8985 wmb();
05a6538a 8986 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
f8a88b19
LV
8987 ipr_cmd->done = ipr_reset_ioa_job;
8988 return IPR_RC_JOB_RETURN;
8989}
8990
6270e593
BK
8991/**
8992 * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
8993 * @pdev: PCI device struct
8994 *
8995 * Description: This routine is called to tell us that the MMIO
8996 * access to the IOA has been restored
8997 */
8998static pci_ers_result_t ipr_pci_mmio_enabled(struct pci_dev *pdev)
8999{
9000 unsigned long flags = 0;
9001 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9002
9003 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9004 if (!ioa_cfg->probe_done)
9005 pci_save_state(pdev);
9006 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9007 return PCI_ERS_RESULT_NEED_RESET;
9008}
9009
f8a88b19
LV
9010/**
9011 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
9012 * @pdev: PCI device struct
9013 *
9014 * Description: This routine is called to tell us that the PCI bus
9015 * is down. Can't do anything here, except put the device driver
9016 * into a holding pattern, waiting for the PCI bus to come back.
9017 */
9018static void ipr_pci_frozen(struct pci_dev *pdev)
9019{
9020 unsigned long flags = 0;
9021 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9022
9023 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6270e593
BK
9024 if (ioa_cfg->probe_done)
9025 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
f8a88b19
LV
9026 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9027}
9028
9029/**
9030 * ipr_pci_slot_reset - Called when PCI slot has been reset.
9031 * @pdev: PCI device struct
9032 *
9033 * Description: This routine is called by the pci error recovery
9034 * code after the PCI slot has been reset, just before we
9035 * should resume normal operations.
9036 */
9037static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
9038{
9039 unsigned long flags = 0;
9040 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9041
9042 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6270e593
BK
9043 if (ioa_cfg->probe_done) {
9044 if (ioa_cfg->needs_warm_reset)
9045 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9046 else
9047 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
9048 IPR_SHUTDOWN_NONE);
9049 } else
9050 wake_up_all(&ioa_cfg->eeh_wait_q);
f8a88b19
LV
9051 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9052 return PCI_ERS_RESULT_RECOVERED;
9053}
9054
9055/**
9056 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
9057 * @pdev: PCI device struct
9058 *
9059 * Description: This routine is called when the PCI bus has
9060 * permanently failed.
9061 */
9062static void ipr_pci_perm_failure(struct pci_dev *pdev)
9063{
9064 unsigned long flags = 0;
9065 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
56d6aa33 9066 int i;
f8a88b19
LV
9067
9068 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6270e593
BK
9069 if (ioa_cfg->probe_done) {
9070 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
9071 ioa_cfg->sdt_state = ABORT_DUMP;
9072 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
9073 ioa_cfg->in_ioa_bringdown = 1;
9074 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9075 spin_lock(&ioa_cfg->hrrq[i]._lock);
9076 ioa_cfg->hrrq[i].allow_cmds = 0;
9077 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9078 }
9079 wmb();
9080 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9081 } else
9082 wake_up_all(&ioa_cfg->eeh_wait_q);
f8a88b19
LV
9083 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9084}
9085
9086/**
9087 * ipr_pci_error_detected - Called when a PCI error is detected.
9088 * @pdev: PCI device struct
9089 * @state: PCI channel state
9090 *
9091 * Description: Called when a PCI error is detected.
9092 *
9093 * Return value:
9094 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
9095 */
9096static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
9097 pci_channel_state_t state)
9098{
9099 switch (state) {
9100 case pci_channel_io_frozen:
9101 ipr_pci_frozen(pdev);
6270e593 9102 return PCI_ERS_RESULT_CAN_RECOVER;
f8a88b19
LV
9103 case pci_channel_io_perm_failure:
9104 ipr_pci_perm_failure(pdev);
9105 return PCI_ERS_RESULT_DISCONNECT;
9106 break;
9107 default:
9108 break;
9109 }
9110 return PCI_ERS_RESULT_NEED_RESET;
9111}
9112
1da177e4
LT
9113/**
9114 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
9115 * @ioa_cfg: ioa cfg struct
9116 *
9117 * Description: This is the second phase of adapter intialization
9118 * This function takes care of initilizing the adapter to the point
9119 * where it can accept new commands.
9120
9121 * Return value:
b1c11812 9122 * 0 on success / -EIO on failure
1da177e4 9123 **/
6f039790 9124static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
1da177e4
LT
9125{
9126 int rc = 0;
9127 unsigned long host_lock_flags = 0;
9128
9129 ENTER;
9130 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9131 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
6270e593 9132 ioa_cfg->probe_done = 1;
ce155cce 9133 if (ioa_cfg->needs_hard_reset) {
9134 ioa_cfg->needs_hard_reset = 0;
9135 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9136 } else
9137 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
9138 IPR_SHUTDOWN_NONE);
1da177e4 9139 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
1da177e4
LT
9140
9141 LEAVE;
9142 return rc;
9143}
9144
9145/**
9146 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
9147 * @ioa_cfg: ioa config struct
9148 *
9149 * Return value:
9150 * none
9151 **/
9152static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9153{
9154 int i;
9155
a65e8f12
BK
9156 if (ioa_cfg->ipr_cmnd_list) {
9157 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9158 if (ioa_cfg->ipr_cmnd_list[i])
9159 dma_pool_free(ioa_cfg->ipr_cmd_pool,
9160 ioa_cfg->ipr_cmnd_list[i],
9161 ioa_cfg->ipr_cmnd_list_dma[i]);
1da177e4 9162
a65e8f12
BK
9163 ioa_cfg->ipr_cmnd_list[i] = NULL;
9164 }
1da177e4
LT
9165 }
9166
9167 if (ioa_cfg->ipr_cmd_pool)
d73341bf 9168 dma_pool_destroy(ioa_cfg->ipr_cmd_pool);
1da177e4 9169
89aad428
BK
9170 kfree(ioa_cfg->ipr_cmnd_list);
9171 kfree(ioa_cfg->ipr_cmnd_list_dma);
9172 ioa_cfg->ipr_cmnd_list = NULL;
9173 ioa_cfg->ipr_cmnd_list_dma = NULL;
1da177e4
LT
9174 ioa_cfg->ipr_cmd_pool = NULL;
9175}
9176
9177/**
9178 * ipr_free_mem - Frees memory allocated for an adapter
9179 * @ioa_cfg: ioa cfg struct
9180 *
9181 * Return value:
9182 * nothing
9183 **/
9184static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
9185{
9186 int i;
9187
9188 kfree(ioa_cfg->res_entries);
d73341bf
AB
9189 dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_misc_cbs),
9190 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
1da177e4 9191 ipr_free_cmd_blks(ioa_cfg);
05a6538a 9192
9193 for (i = 0; i < ioa_cfg->hrrq_num; i++)
d73341bf
AB
9194 dma_free_coherent(&ioa_cfg->pdev->dev,
9195 sizeof(u32) * ioa_cfg->hrrq[i].size,
9196 ioa_cfg->hrrq[i].host_rrq,
9197 ioa_cfg->hrrq[i].host_rrq_dma);
05a6538a 9198
d73341bf
AB
9199 dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size,
9200 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
1da177e4
LT
9201
9202 for (i = 0; i < IPR_NUM_HCAMS; i++) {
d73341bf
AB
9203 dma_free_coherent(&ioa_cfg->pdev->dev,
9204 sizeof(struct ipr_hostrcb),
9205 ioa_cfg->hostrcb[i],
9206 ioa_cfg->hostrcb_dma[i]);
1da177e4
LT
9207 }
9208
9209 ipr_free_dump(ioa_cfg);
1da177e4
LT
9210 kfree(ioa_cfg->trace);
9211}
9212
9213/**
2796ca5e
BK
9214 * ipr_free_irqs - Free all allocated IRQs for the adapter.
9215 * @ioa_cfg: ipr cfg struct
1da177e4 9216 *
2796ca5e 9217 * This function frees all allocated IRQs for the
1da177e4
LT
9218 * specified adapter.
9219 *
9220 * Return value:
9221 * none
9222 **/
2796ca5e 9223static void ipr_free_irqs(struct ipr_ioa_cfg *ioa_cfg)
1da177e4
LT
9224{
9225 struct pci_dev *pdev = ioa_cfg->pdev;
9226
05a6538a 9227 if (ioa_cfg->intr_flag == IPR_USE_MSI ||
9228 ioa_cfg->intr_flag == IPR_USE_MSIX) {
9229 int i;
9230 for (i = 0; i < ioa_cfg->nvectors; i++)
9231 free_irq(ioa_cfg->vectors_info[i].vec,
2796ca5e 9232 &ioa_cfg->hrrq[i]);
05a6538a 9233 } else
9234 free_irq(pdev->irq, &ioa_cfg->hrrq[0]);
9235
56d6aa33 9236 if (ioa_cfg->intr_flag == IPR_USE_MSI) {
05a6538a 9237 pci_disable_msi(pdev);
56d6aa33 9238 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
9239 } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
05a6538a 9240 pci_disable_msix(pdev);
56d6aa33 9241 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
9242 }
2796ca5e 9243}
05a6538a 9244
2796ca5e
BK
9245/**
9246 * ipr_free_all_resources - Free all allocated resources for an adapter.
9247 * @ipr_cmd: ipr command struct
9248 *
9249 * This function frees all allocated resources for the
9250 * specified adapter.
9251 *
9252 * Return value:
9253 * none
9254 **/
9255static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
9256{
9257 struct pci_dev *pdev = ioa_cfg->pdev;
05a6538a 9258
2796ca5e
BK
9259 ENTER;
9260 ipr_free_irqs(ioa_cfg);
9261 if (ioa_cfg->reset_work_q)
9262 destroy_workqueue(ioa_cfg->reset_work_q);
1da177e4
LT
9263 iounmap(ioa_cfg->hdw_dma_regs);
9264 pci_release_regions(pdev);
9265 ipr_free_mem(ioa_cfg);
9266 scsi_host_put(ioa_cfg->host);
9267 pci_disable_device(pdev);
9268 LEAVE;
9269}
9270
9271/**
9272 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
9273 * @ioa_cfg: ioa config struct
9274 *
9275 * Return value:
9276 * 0 on success / -ENOMEM on allocation failure
9277 **/
6f039790 9278static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
1da177e4
LT
9279{
9280 struct ipr_cmnd *ipr_cmd;
9281 struct ipr_ioarcb *ioarcb;
9282 dma_addr_t dma_addr;
05a6538a 9283 int i, entries_each_hrrq, hrrq_id = 0;
1da177e4 9284
d73341bf 9285 ioa_cfg->ipr_cmd_pool = dma_pool_create(IPR_NAME, &ioa_cfg->pdev->dev,
203fa3fe 9286 sizeof(struct ipr_cmnd), 512, 0);
1da177e4
LT
9287
9288 if (!ioa_cfg->ipr_cmd_pool)
9289 return -ENOMEM;
9290
89aad428
BK
9291 ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
9292 ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
9293
9294 if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
9295 ipr_free_cmd_blks(ioa_cfg);
9296 return -ENOMEM;
9297 }
9298
05a6538a 9299 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9300 if (ioa_cfg->hrrq_num > 1) {
9301 if (i == 0) {
9302 entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
9303 ioa_cfg->hrrq[i].min_cmd_id = 0;
9304 ioa_cfg->hrrq[i].max_cmd_id =
9305 (entries_each_hrrq - 1);
9306 } else {
9307 entries_each_hrrq =
9308 IPR_NUM_BASE_CMD_BLKS/
9309 (ioa_cfg->hrrq_num - 1);
9310 ioa_cfg->hrrq[i].min_cmd_id =
9311 IPR_NUM_INTERNAL_CMD_BLKS +
9312 (i - 1) * entries_each_hrrq;
9313 ioa_cfg->hrrq[i].max_cmd_id =
9314 (IPR_NUM_INTERNAL_CMD_BLKS +
9315 i * entries_each_hrrq - 1);
9316 }
9317 } else {
9318 entries_each_hrrq = IPR_NUM_CMD_BLKS;
9319 ioa_cfg->hrrq[i].min_cmd_id = 0;
9320 ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
9321 }
9322 ioa_cfg->hrrq[i].size = entries_each_hrrq;
9323 }
9324
9325 BUG_ON(ioa_cfg->hrrq_num == 0);
9326
9327 i = IPR_NUM_CMD_BLKS -
9328 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
9329 if (i > 0) {
9330 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
9331 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
9332 }
9333
1da177e4 9334 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
d73341bf 9335 ipr_cmd = dma_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
1da177e4
LT
9336
9337 if (!ipr_cmd) {
9338 ipr_free_cmd_blks(ioa_cfg);
9339 return -ENOMEM;
9340 }
9341
9342 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
9343 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
9344 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
9345
9346 ioarcb = &ipr_cmd->ioarcb;
a32c055f
WB
9347 ipr_cmd->dma_addr = dma_addr;
9348 if (ioa_cfg->sis64)
9349 ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
9350 else
9351 ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
9352
1da177e4 9353 ioarcb->host_response_handle = cpu_to_be32(i << 2);
a32c055f
WB
9354 if (ioa_cfg->sis64) {
9355 ioarcb->u.sis64_addr_data.data_ioadl_addr =
9356 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
9357 ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
96d21f00 9358 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
a32c055f
WB
9359 } else {
9360 ioarcb->write_ioadl_addr =
9361 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
9362 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
9363 ioarcb->ioasa_host_pci_addr =
96d21f00 9364 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
a32c055f 9365 }
1da177e4
LT
9366 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
9367 ipr_cmd->cmd_index = i;
9368 ipr_cmd->ioa_cfg = ioa_cfg;
9369 ipr_cmd->sense_buffer_dma = dma_addr +
9370 offsetof(struct ipr_cmnd, sense_buffer);
9371
05a6538a 9372 ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
9373 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
9374 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9375 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
9376 hrrq_id++;
1da177e4
LT
9377 }
9378
9379 return 0;
9380}
9381
9382/**
9383 * ipr_alloc_mem - Allocate memory for an adapter
9384 * @ioa_cfg: ioa config struct
9385 *
9386 * Return value:
9387 * 0 on success / non-zero for error
9388 **/
6f039790 9389static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
1da177e4
LT
9390{
9391 struct pci_dev *pdev = ioa_cfg->pdev;
9392 int i, rc = -ENOMEM;
9393
9394 ENTER;
0bc42e35 9395 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
3e7ebdfa 9396 ioa_cfg->max_devs_supported, GFP_KERNEL);
1da177e4
LT
9397
9398 if (!ioa_cfg->res_entries)
9399 goto out;
9400
3e7ebdfa 9401 for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
1da177e4 9402 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
3e7ebdfa
WB
9403 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
9404 }
1da177e4 9405
d73341bf
AB
9406 ioa_cfg->vpd_cbs = dma_alloc_coherent(&pdev->dev,
9407 sizeof(struct ipr_misc_cbs),
9408 &ioa_cfg->vpd_cbs_dma,
9409 GFP_KERNEL);
1da177e4
LT
9410
9411 if (!ioa_cfg->vpd_cbs)
9412 goto out_free_res_entries;
9413
9414 if (ipr_alloc_cmd_blks(ioa_cfg))
9415 goto out_free_vpd_cbs;
9416
05a6538a 9417 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
d73341bf 9418 ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev,
05a6538a 9419 sizeof(u32) * ioa_cfg->hrrq[i].size,
d73341bf
AB
9420 &ioa_cfg->hrrq[i].host_rrq_dma,
9421 GFP_KERNEL);
05a6538a 9422
9423 if (!ioa_cfg->hrrq[i].host_rrq) {
9424 while (--i > 0)
d73341bf 9425 dma_free_coherent(&pdev->dev,
05a6538a 9426 sizeof(u32) * ioa_cfg->hrrq[i].size,
9427 ioa_cfg->hrrq[i].host_rrq,
9428 ioa_cfg->hrrq[i].host_rrq_dma);
9429 goto out_ipr_free_cmd_blocks;
9430 }
9431 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
9432 }
1da177e4 9433
d73341bf
AB
9434 ioa_cfg->u.cfg_table = dma_alloc_coherent(&pdev->dev,
9435 ioa_cfg->cfg_table_size,
9436 &ioa_cfg->cfg_table_dma,
9437 GFP_KERNEL);
1da177e4 9438
3e7ebdfa 9439 if (!ioa_cfg->u.cfg_table)
1da177e4
LT
9440 goto out_free_host_rrq;
9441
9442 for (i = 0; i < IPR_NUM_HCAMS; i++) {
d73341bf
AB
9443 ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev,
9444 sizeof(struct ipr_hostrcb),
9445 &ioa_cfg->hostrcb_dma[i],
9446 GFP_KERNEL);
1da177e4
LT
9447
9448 if (!ioa_cfg->hostrcb[i])
9449 goto out_free_hostrcb_dma;
9450
9451 ioa_cfg->hostrcb[i]->hostrcb_dma =
9452 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
49dc6a18 9453 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
1da177e4
LT
9454 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
9455 }
9456
0bc42e35 9457 ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
1da177e4
LT
9458 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
9459
9460 if (!ioa_cfg->trace)
9461 goto out_free_hostrcb_dma;
9462
1da177e4
LT
9463 rc = 0;
9464out:
9465 LEAVE;
9466 return rc;
9467
9468out_free_hostrcb_dma:
9469 while (i-- > 0) {
d73341bf
AB
9470 dma_free_coherent(&pdev->dev, sizeof(struct ipr_hostrcb),
9471 ioa_cfg->hostrcb[i],
9472 ioa_cfg->hostrcb_dma[i]);
1da177e4 9473 }
d73341bf
AB
9474 dma_free_coherent(&pdev->dev, ioa_cfg->cfg_table_size,
9475 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
1da177e4 9476out_free_host_rrq:
05a6538a 9477 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
d73341bf
AB
9478 dma_free_coherent(&pdev->dev,
9479 sizeof(u32) * ioa_cfg->hrrq[i].size,
9480 ioa_cfg->hrrq[i].host_rrq,
9481 ioa_cfg->hrrq[i].host_rrq_dma);
05a6538a 9482 }
1da177e4
LT
9483out_ipr_free_cmd_blocks:
9484 ipr_free_cmd_blks(ioa_cfg);
9485out_free_vpd_cbs:
d73341bf
AB
9486 dma_free_coherent(&pdev->dev, sizeof(struct ipr_misc_cbs),
9487 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
1da177e4
LT
9488out_free_res_entries:
9489 kfree(ioa_cfg->res_entries);
9490 goto out;
9491}
9492
9493/**
9494 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9495 * @ioa_cfg: ioa config struct
9496 *
9497 * Return value:
9498 * none
9499 **/
6f039790 9500static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
1da177e4
LT
9501{
9502 int i;
9503
9504 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
9505 ioa_cfg->bus_attr[i].bus = i;
9506 ioa_cfg->bus_attr[i].qas_enabled = 0;
9507 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
9508 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
9509 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
9510 else
9511 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
9512 }
9513}
9514
6270e593
BK
9515/**
9516 * ipr_init_regs - Initialize IOA registers
9517 * @ioa_cfg: ioa config struct
9518 *
9519 * Return value:
9520 * none
9521 **/
9522static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg)
9523{
9524 const struct ipr_interrupt_offsets *p;
9525 struct ipr_interrupts *t;
9526 void __iomem *base;
9527
9528 p = &ioa_cfg->chip_cfg->regs;
9529 t = &ioa_cfg->regs;
9530 base = ioa_cfg->hdw_dma_regs;
9531
9532 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
9533 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
9534 t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
9535 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
9536 t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
9537 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
9538 t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
9539 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
9540 t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
9541 t->ioarrin_reg = base + p->ioarrin_reg;
9542 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
9543 t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
9544 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
9545 t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
9546 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
9547 t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
9548
9549 if (ioa_cfg->sis64) {
9550 t->init_feedback_reg = base + p->init_feedback_reg;
9551 t->dump_addr_reg = base + p->dump_addr_reg;
9552 t->dump_data_reg = base + p->dump_data_reg;
9553 t->endian_swap_reg = base + p->endian_swap_reg;
9554 }
9555}
9556
1da177e4
LT
9557/**
9558 * ipr_init_ioa_cfg - Initialize IOA config struct
9559 * @ioa_cfg: ioa config struct
9560 * @host: scsi host struct
9561 * @pdev: PCI dev struct
9562 *
9563 * Return value:
9564 * none
9565 **/
6f039790
GKH
9566static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9567 struct Scsi_Host *host, struct pci_dev *pdev)
1da177e4 9568{
6270e593 9569 int i;
1da177e4
LT
9570
9571 ioa_cfg->host = host;
9572 ioa_cfg->pdev = pdev;
9573 ioa_cfg->log_level = ipr_log_level;
3d1d0da6 9574 ioa_cfg->doorbell = IPR_DOORBELL;
1da177e4
LT
9575 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
9576 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
1da177e4
LT
9577 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
9578 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
9579 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
9580 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
9581
1da177e4
LT
9582 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
9583 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
9584 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
9585 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
c4028958 9586 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
1da177e4 9587 init_waitqueue_head(&ioa_cfg->reset_wait_q);
95fecd90 9588 init_waitqueue_head(&ioa_cfg->msi_wait_q);
6270e593 9589 init_waitqueue_head(&ioa_cfg->eeh_wait_q);
1da177e4
LT
9590 ioa_cfg->sdt_state = INACTIVE;
9591
9592 ipr_initialize_bus_attr(ioa_cfg);
3e7ebdfa 9593 ioa_cfg->max_devs_supported = ipr_max_devs;
1da177e4 9594
3e7ebdfa
WB
9595 if (ioa_cfg->sis64) {
9596 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
9597 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
9598 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
9599 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
6270e593
BK
9600 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9601 + ((sizeof(struct ipr_config_table_entry64)
9602 * ioa_cfg->max_devs_supported)));
3e7ebdfa
WB
9603 } else {
9604 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
9605 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
9606 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
9607 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
6270e593
BK
9608 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9609 + ((sizeof(struct ipr_config_table_entry)
9610 * ioa_cfg->max_devs_supported)));
3e7ebdfa 9611 }
6270e593 9612
f688f96d 9613 host->max_channel = IPR_VSET_BUS;
1da177e4
LT
9614 host->unique_id = host->host_no;
9615 host->max_cmd_len = IPR_MAX_CDB_LEN;
89aad428 9616 host->can_queue = ioa_cfg->max_cmds;
1da177e4
LT
9617 pci_set_drvdata(pdev, ioa_cfg);
9618
6270e593
BK
9619 for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) {
9620 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
9621 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
9622 spin_lock_init(&ioa_cfg->hrrq[i]._lock);
9623 if (i == 0)
9624 ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
9625 else
9626 ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
dcbad00e 9627 }
1da177e4
LT
9628}
9629
9630/**
1be7bd82 9631 * ipr_get_chip_info - Find adapter chip information
1da177e4
LT
9632 * @dev_id: PCI device id struct
9633 *
9634 * Return value:
1be7bd82 9635 * ptr to chip information on success / NULL on failure
1da177e4 9636 **/
6f039790 9637static const struct ipr_chip_t *
1be7bd82 9638ipr_get_chip_info(const struct pci_device_id *dev_id)
1da177e4
LT
9639{
9640 int i;
9641
1da177e4
LT
9642 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
9643 if (ipr_chip[i].vendor == dev_id->vendor &&
9644 ipr_chip[i].device == dev_id->device)
1be7bd82 9645 return &ipr_chip[i];
1da177e4
LT
9646 return NULL;
9647}
9648
6270e593
BK
9649/**
9650 * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
9651 * during probe time
9652 * @ioa_cfg: ioa config struct
9653 *
9654 * Return value:
9655 * None
9656 **/
9657static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
9658{
9659 struct pci_dev *pdev = ioa_cfg->pdev;
9660
9661 if (pci_channel_offline(pdev)) {
9662 wait_event_timeout(ioa_cfg->eeh_wait_q,
9663 !pci_channel_offline(pdev),
9664 IPR_PCI_ERROR_RECOVERY_TIMEOUT);
9665 pci_restore_state(pdev);
9666 }
9667}
9668
05a6538a 9669static int ipr_enable_msix(struct ipr_ioa_cfg *ioa_cfg)
9670{
9671 struct msix_entry entries[IPR_MAX_MSIX_VECTORS];
60e76b77 9672 int i, vectors;
05a6538a 9673
9674 for (i = 0; i < ARRAY_SIZE(entries); ++i)
9675 entries[i].entry = i;
9676
60e76b77
AG
9677 vectors = pci_enable_msix_range(ioa_cfg->pdev,
9678 entries, 1, ipr_number_of_msix);
9679 if (vectors < 0) {
6270e593 9680 ipr_wait_for_pci_err_recovery(ioa_cfg);
60e76b77 9681 return vectors;
05a6538a 9682 }
9683
60e76b77
AG
9684 for (i = 0; i < vectors; i++)
9685 ioa_cfg->vectors_info[i].vec = entries[i].vector;
9686 ioa_cfg->nvectors = vectors;
05a6538a 9687
60e76b77 9688 return 0;
05a6538a 9689}
9690
9691static int ipr_enable_msi(struct ipr_ioa_cfg *ioa_cfg)
9692{
60e76b77 9693 int i, vectors;
05a6538a 9694
60e76b77
AG
9695 vectors = pci_enable_msi_range(ioa_cfg->pdev, 1, ipr_number_of_msix);
9696 if (vectors < 0) {
6270e593 9697 ipr_wait_for_pci_err_recovery(ioa_cfg);
60e76b77 9698 return vectors;
05a6538a 9699 }
9700
60e76b77
AG
9701 for (i = 0; i < vectors; i++)
9702 ioa_cfg->vectors_info[i].vec = ioa_cfg->pdev->irq + i;
9703 ioa_cfg->nvectors = vectors;
05a6538a 9704
60e76b77 9705 return 0;
05a6538a 9706}
9707
9708static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
9709{
9710 int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
9711
9712 for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
9713 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
9714 "host%d-%d", ioa_cfg->host->host_no, vec_idx);
9715 ioa_cfg->vectors_info[vec_idx].
9716 desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
9717 }
9718}
9719
9720static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg)
9721{
9722 int i, rc;
9723
9724 for (i = 1; i < ioa_cfg->nvectors; i++) {
9725 rc = request_irq(ioa_cfg->vectors_info[i].vec,
9726 ipr_isr_mhrrq,
9727 0,
9728 ioa_cfg->vectors_info[i].desc,
9729 &ioa_cfg->hrrq[i]);
9730 if (rc) {
9731 while (--i >= 0)
9732 free_irq(ioa_cfg->vectors_info[i].vec,
9733 &ioa_cfg->hrrq[i]);
9734 return rc;
9735 }
9736 }
9737 return 0;
9738}
9739
95fecd90
WB
9740/**
9741 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
9742 * @pdev: PCI device struct
9743 *
9744 * Description: Simply set the msi_received flag to 1 indicating that
9745 * Message Signaled Interrupts are supported.
9746 *
9747 * Return value:
9748 * 0 on success / non-zero on failure
9749 **/
6f039790 9750static irqreturn_t ipr_test_intr(int irq, void *devp)
95fecd90
WB
9751{
9752 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
9753 unsigned long lock_flags = 0;
9754 irqreturn_t rc = IRQ_HANDLED;
9755
05a6538a 9756 dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
95fecd90
WB
9757 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9758
9759 ioa_cfg->msi_received = 1;
9760 wake_up(&ioa_cfg->msi_wait_q);
9761
9762 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9763 return rc;
9764}
9765
9766/**
9767 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
9768 * @pdev: PCI device struct
9769 *
60e76b77 9770 * Description: The return value from pci_enable_msi_range() can not always be
95fecd90
WB
9771 * trusted. This routine sets up and initiates a test interrupt to determine
9772 * if the interrupt is received via the ipr_test_intr() service routine.
9773 * If the tests fails, the driver will fall back to LSI.
9774 *
9775 * Return value:
9776 * 0 on success / non-zero on failure
9777 **/
6f039790 9778static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
95fecd90
WB
9779{
9780 int rc;
9781 volatile u32 int_reg;
9782 unsigned long lock_flags = 0;
9783
9784 ENTER;
9785
9786 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9787 init_waitqueue_head(&ioa_cfg->msi_wait_q);
9788 ioa_cfg->msi_received = 0;
9789 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
214777ba 9790 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
95fecd90
WB
9791 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
9792 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9793
f19799f4 9794 if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9795 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
9796 else
9797 rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
95fecd90
WB
9798 if (rc) {
9799 dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
9800 return rc;
9801 } else if (ipr_debug)
9802 dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
9803
214777ba 9804 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
95fecd90
WB
9805 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
9806 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
56d6aa33 9807 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
95fecd90
WB
9808 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9809
95fecd90
WB
9810 if (!ioa_cfg->msi_received) {
9811 /* MSI test failed */
9812 dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n");
9813 rc = -EOPNOTSUPP;
9814 } else if (ipr_debug)
9815 dev_info(&pdev->dev, "MSI test succeeded.\n");
9816
9817 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9818
f19799f4 9819 if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9820 free_irq(ioa_cfg->vectors_info[0].vec, ioa_cfg);
9821 else
9822 free_irq(pdev->irq, ioa_cfg);
95fecd90
WB
9823
9824 LEAVE;
9825
9826 return rc;
9827}
9828
05a6538a 9829 /* ipr_probe_ioa - Allocates memory and does first stage of initialization
1da177e4
LT
9830 * @pdev: PCI device struct
9831 * @dev_id: PCI device id struct
9832 *
9833 * Return value:
9834 * 0 on success / non-zero on failure
9835 **/
6f039790
GKH
9836static int ipr_probe_ioa(struct pci_dev *pdev,
9837 const struct pci_device_id *dev_id)
1da177e4
LT
9838{
9839 struct ipr_ioa_cfg *ioa_cfg;
9840 struct Scsi_Host *host;
9841 unsigned long ipr_regs_pci;
9842 void __iomem *ipr_regs;
a2a65a3e 9843 int rc = PCIBIOS_SUCCESSFUL;
473b1e8e 9844 volatile u32 mask, uproc, interrupts;
feccada9 9845 unsigned long lock_flags, driver_lock_flags;
1da177e4
LT
9846
9847 ENTER;
9848
1da177e4 9849 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
1da177e4
LT
9850 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
9851
9852 if (!host) {
9853 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
9854 rc = -ENOMEM;
6270e593 9855 goto out;
1da177e4
LT
9856 }
9857
9858 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
9859 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
8d8e7d13 9860 ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
1da177e4 9861
1be7bd82 9862 ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
1da177e4 9863
1be7bd82 9864 if (!ioa_cfg->ipr_chip) {
1da177e4
LT
9865 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
9866 dev_id->vendor, dev_id->device);
9867 goto out_scsi_host_put;
9868 }
9869
a32c055f
WB
9870 /* set SIS 32 or SIS 64 */
9871 ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
1be7bd82 9872 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
7dd21308 9873 ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
89aad428 9874 ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
1be7bd82 9875
5469cb5b
BK
9876 if (ipr_transop_timeout)
9877 ioa_cfg->transop_timeout = ipr_transop_timeout;
9878 else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
9879 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
9880 else
9881 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
9882
44c10138 9883 ioa_cfg->revid = pdev->revision;
463fc696 9884
6270e593
BK
9885 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
9886
1da177e4
LT
9887 ipr_regs_pci = pci_resource_start(pdev, 0);
9888
9889 rc = pci_request_regions(pdev, IPR_NAME);
9890 if (rc < 0) {
9891 dev_err(&pdev->dev,
9892 "Couldn't register memory range of registers\n");
9893 goto out_scsi_host_put;
9894 }
9895
6270e593
BK
9896 rc = pci_enable_device(pdev);
9897
9898 if (rc || pci_channel_offline(pdev)) {
9899 if (pci_channel_offline(pdev)) {
9900 ipr_wait_for_pci_err_recovery(ioa_cfg);
9901 rc = pci_enable_device(pdev);
9902 }
9903
9904 if (rc) {
9905 dev_err(&pdev->dev, "Cannot enable adapter\n");
9906 ipr_wait_for_pci_err_recovery(ioa_cfg);
9907 goto out_release_regions;
9908 }
9909 }
9910
25729a7f 9911 ipr_regs = pci_ioremap_bar(pdev, 0);
1da177e4
LT
9912
9913 if (!ipr_regs) {
9914 dev_err(&pdev->dev,
9915 "Couldn't map memory range of registers\n");
9916 rc = -ENOMEM;
6270e593 9917 goto out_disable;
1da177e4
LT
9918 }
9919
9920 ioa_cfg->hdw_dma_regs = ipr_regs;
9921 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
9922 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
9923
6270e593 9924 ipr_init_regs(ioa_cfg);
1da177e4 9925
a32c055f 9926 if (ioa_cfg->sis64) {
869404cb 9927 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
a32c055f 9928 if (rc < 0) {
869404cb
AB
9929 dev_dbg(&pdev->dev, "Failed to set 64 bit DMA mask\n");
9930 rc = dma_set_mask_and_coherent(&pdev->dev,
9931 DMA_BIT_MASK(32));
a32c055f 9932 }
a32c055f 9933 } else
869404cb 9934 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
a32c055f 9935
1da177e4 9936 if (rc < 0) {
869404cb 9937 dev_err(&pdev->dev, "Failed to set DMA mask\n");
1da177e4
LT
9938 goto cleanup_nomem;
9939 }
9940
9941 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
9942 ioa_cfg->chip_cfg->cache_line_size);
9943
9944 if (rc != PCIBIOS_SUCCESSFUL) {
9945 dev_err(&pdev->dev, "Write of cache line size failed\n");
6270e593 9946 ipr_wait_for_pci_err_recovery(ioa_cfg);
1da177e4
LT
9947 rc = -EIO;
9948 goto cleanup_nomem;
9949 }
9950
6270e593
BK
9951 /* Issue MMIO read to ensure card is not in EEH */
9952 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
9953 ipr_wait_for_pci_err_recovery(ioa_cfg);
9954
05a6538a 9955 if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
9956 dev_err(&pdev->dev, "The max number of MSIX is %d\n",
9957 IPR_MAX_MSIX_VECTORS);
9958 ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
9959 }
9960
9961 if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
56d6aa33 9962 ipr_enable_msix(ioa_cfg) == 0)
05a6538a 9963 ioa_cfg->intr_flag = IPR_USE_MSIX;
9964 else if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
56d6aa33 9965 ipr_enable_msi(ioa_cfg) == 0)
05a6538a 9966 ioa_cfg->intr_flag = IPR_USE_MSI;
9967 else {
9968 ioa_cfg->intr_flag = IPR_USE_LSI;
9969 ioa_cfg->nvectors = 1;
9970 dev_info(&pdev->dev, "Cannot enable MSI.\n");
9971 }
9972
6270e593
BK
9973 pci_set_master(pdev);
9974
9975 if (pci_channel_offline(pdev)) {
9976 ipr_wait_for_pci_err_recovery(ioa_cfg);
9977 pci_set_master(pdev);
9978 if (pci_channel_offline(pdev)) {
9979 rc = -EIO;
9980 goto out_msi_disable;
9981 }
9982 }
9983
05a6538a 9984 if (ioa_cfg->intr_flag == IPR_USE_MSI ||
9985 ioa_cfg->intr_flag == IPR_USE_MSIX) {
95fecd90 9986 rc = ipr_test_msi(ioa_cfg, pdev);
05a6538a 9987 if (rc == -EOPNOTSUPP) {
6270e593 9988 ipr_wait_for_pci_err_recovery(ioa_cfg);
05a6538a 9989 if (ioa_cfg->intr_flag == IPR_USE_MSI) {
9990 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
9991 pci_disable_msi(pdev);
9992 } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
9993 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
9994 pci_disable_msix(pdev);
9995 }
9996
9997 ioa_cfg->intr_flag = IPR_USE_LSI;
9998 ioa_cfg->nvectors = 1;
9999 }
95fecd90
WB
10000 else if (rc)
10001 goto out_msi_disable;
05a6538a 10002 else {
10003 if (ioa_cfg->intr_flag == IPR_USE_MSI)
10004 dev_info(&pdev->dev,
10005 "Request for %d MSIs succeeded with starting IRQ: %d\n",
10006 ioa_cfg->nvectors, pdev->irq);
10007 else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
10008 dev_info(&pdev->dev,
10009 "Request for %d MSIXs succeeded.",
10010 ioa_cfg->nvectors);
10011 }
10012 }
10013
10014 ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
10015 (unsigned int)num_online_cpus(),
10016 (unsigned int)IPR_MAX_HRRQ_NUM);
95fecd90 10017
1da177e4 10018 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
f170c684 10019 goto out_msi_disable;
1da177e4
LT
10020
10021 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
f170c684 10022 goto out_msi_disable;
1da177e4
LT
10023
10024 rc = ipr_alloc_mem(ioa_cfg);
10025 if (rc < 0) {
10026 dev_err(&pdev->dev,
10027 "Couldn't allocate enough memory for device driver!\n");
f170c684 10028 goto out_msi_disable;
1da177e4
LT
10029 }
10030
6270e593
BK
10031 /* Save away PCI config space for use following IOA reset */
10032 rc = pci_save_state(pdev);
10033
10034 if (rc != PCIBIOS_SUCCESSFUL) {
10035 dev_err(&pdev->dev, "Failed to save PCI config space\n");
10036 rc = -EIO;
10037 goto cleanup_nolog;
10038 }
10039
ce155cce 10040 /*
10041 * If HRRQ updated interrupt is not masked, or reset alert is set,
10042 * the card is in an unknown state and needs a hard reset
10043 */
214777ba
WB
10044 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
10045 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
10046 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
ce155cce 10047 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
10048 ioa_cfg->needs_hard_reset = 1;
5d7c20b7 10049 if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
473b1e8e
BK
10050 ioa_cfg->needs_hard_reset = 1;
10051 if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
10052 ioa_cfg->ioa_unit_checked = 1;
ce155cce 10053
56d6aa33 10054 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1da177e4 10055 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
56d6aa33 10056 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1da177e4 10057
05a6538a 10058 if (ioa_cfg->intr_flag == IPR_USE_MSI
10059 || ioa_cfg->intr_flag == IPR_USE_MSIX) {
10060 name_msi_vectors(ioa_cfg);
10061 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_isr,
10062 0,
10063 ioa_cfg->vectors_info[0].desc,
10064 &ioa_cfg->hrrq[0]);
10065 if (!rc)
10066 rc = ipr_request_other_msi_irqs(ioa_cfg);
10067 } else {
10068 rc = request_irq(pdev->irq, ipr_isr,
10069 IRQF_SHARED,
10070 IPR_NAME, &ioa_cfg->hrrq[0]);
10071 }
1da177e4
LT
10072 if (rc) {
10073 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
10074 pdev->irq, rc);
10075 goto cleanup_nolog;
10076 }
10077
463fc696
BK
10078 if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
10079 (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
10080 ioa_cfg->needs_warm_reset = 1;
10081 ioa_cfg->reset = ipr_reset_slot_reset;
2796ca5e
BK
10082
10083 ioa_cfg->reset_work_q = alloc_ordered_workqueue("ipr_reset_%d",
10084 WQ_MEM_RECLAIM, host->host_no);
10085
10086 if (!ioa_cfg->reset_work_q) {
10087 dev_err(&pdev->dev, "Couldn't register reset workqueue\n");
10088 goto out_free_irq;
10089 }
463fc696
BK
10090 } else
10091 ioa_cfg->reset = ipr_reset_start_bist;
10092
feccada9 10093 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
1da177e4 10094 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
feccada9 10095 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
1da177e4
LT
10096
10097 LEAVE;
10098out:
10099 return rc;
10100
2796ca5e
BK
10101out_free_irq:
10102 ipr_free_irqs(ioa_cfg);
1da177e4
LT
10103cleanup_nolog:
10104 ipr_free_mem(ioa_cfg);
95fecd90 10105out_msi_disable:
6270e593 10106 ipr_wait_for_pci_err_recovery(ioa_cfg);
05a6538a 10107 if (ioa_cfg->intr_flag == IPR_USE_MSI)
10108 pci_disable_msi(pdev);
10109 else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
10110 pci_disable_msix(pdev);
f170c684
JL
10111cleanup_nomem:
10112 iounmap(ipr_regs);
6270e593
BK
10113out_disable:
10114 pci_disable_device(pdev);
1da177e4
LT
10115out_release_regions:
10116 pci_release_regions(pdev);
10117out_scsi_host_put:
10118 scsi_host_put(host);
1da177e4
LT
10119 goto out;
10120}
10121
1da177e4
LT
10122/**
10123 * ipr_initiate_ioa_bringdown - Bring down an adapter
10124 * @ioa_cfg: ioa config struct
10125 * @shutdown_type: shutdown type
10126 *
10127 * Description: This function will initiate bringing down the adapter.
10128 * This consists of issuing an IOA shutdown to the adapter
10129 * to flush the cache, and running BIST.
10130 * If the caller needs to wait on the completion of the reset,
10131 * the caller must sleep on the reset_wait_q.
10132 *
10133 * Return value:
10134 * none
10135 **/
10136static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
10137 enum ipr_shutdown_type shutdown_type)
10138{
10139 ENTER;
10140 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
10141 ioa_cfg->sdt_state = ABORT_DUMP;
10142 ioa_cfg->reset_retries = 0;
10143 ioa_cfg->in_ioa_bringdown = 1;
10144 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
10145 LEAVE;
10146}
10147
10148/**
10149 * __ipr_remove - Remove a single adapter
10150 * @pdev: pci device struct
10151 *
10152 * Adapter hot plug remove entry point.
10153 *
10154 * Return value:
10155 * none
10156 **/
10157static void __ipr_remove(struct pci_dev *pdev)
10158{
10159 unsigned long host_lock_flags = 0;
10160 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
bfae7820 10161 int i;
feccada9 10162 unsigned long driver_lock_flags;
1da177e4
LT
10163 ENTER;
10164
10165 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
203fa3fe 10166 while (ioa_cfg->in_reset_reload) {
970ea294
BK
10167 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10168 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10169 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10170 }
10171
bfae7820
BK
10172 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
10173 spin_lock(&ioa_cfg->hrrq[i]._lock);
10174 ioa_cfg->hrrq[i].removing_ioa = 1;
10175 spin_unlock(&ioa_cfg->hrrq[i]._lock);
10176 }
10177 wmb();
1da177e4
LT
10178 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
10179
10180 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10181 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
43829731 10182 flush_work(&ioa_cfg->work_q);
2796ca5e
BK
10183 if (ioa_cfg->reset_work_q)
10184 flush_workqueue(ioa_cfg->reset_work_q);
9077a944 10185 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
1da177e4
LT
10186 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10187
feccada9 10188 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
1da177e4 10189 list_del(&ioa_cfg->queue);
feccada9 10190 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
1da177e4
LT
10191
10192 if (ioa_cfg->sdt_state == ABORT_DUMP)
10193 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
10194 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10195
10196 ipr_free_all_resources(ioa_cfg);
10197
10198 LEAVE;
10199}
10200
10201/**
10202 * ipr_remove - IOA hot plug remove entry point
10203 * @pdev: pci device struct
10204 *
10205 * Adapter hot plug remove entry point.
10206 *
10207 * Return value:
10208 * none
10209 **/
6f039790 10210static void ipr_remove(struct pci_dev *pdev)
1da177e4
LT
10211{
10212 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10213
10214 ENTER;
10215
ee959b00 10216 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4 10217 &ipr_trace_attr);
ee959b00 10218 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
10219 &ipr_dump_attr);
10220 scsi_remove_host(ioa_cfg->host);
10221
10222 __ipr_remove(pdev);
10223
10224 LEAVE;
10225}
10226
10227/**
10228 * ipr_probe - Adapter hot plug add entry point
10229 *
10230 * Return value:
10231 * 0 on success / non-zero on failure
10232 **/
6f039790 10233static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
1da177e4
LT
10234{
10235 struct ipr_ioa_cfg *ioa_cfg;
b53d124a 10236 int rc, i;
1da177e4
LT
10237
10238 rc = ipr_probe_ioa(pdev, dev_id);
10239
10240 if (rc)
10241 return rc;
10242
10243 ioa_cfg = pci_get_drvdata(pdev);
10244 rc = ipr_probe_ioa_part2(ioa_cfg);
10245
10246 if (rc) {
10247 __ipr_remove(pdev);
10248 return rc;
10249 }
10250
10251 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
10252
10253 if (rc) {
10254 __ipr_remove(pdev);
10255 return rc;
10256 }
10257
ee959b00 10258 rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
10259 &ipr_trace_attr);
10260
10261 if (rc) {
10262 scsi_remove_host(ioa_cfg->host);
10263 __ipr_remove(pdev);
10264 return rc;
10265 }
10266
ee959b00 10267 rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
10268 &ipr_dump_attr);
10269
10270 if (rc) {
ee959b00 10271 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
10272 &ipr_trace_attr);
10273 scsi_remove_host(ioa_cfg->host);
10274 __ipr_remove(pdev);
10275 return rc;
10276 }
10277
10278 scsi_scan_host(ioa_cfg->host);
b53d124a 10279 ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
10280
89f8b33c 10281 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
b53d124a 10282 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
10283 blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
10284 ioa_cfg->iopoll_weight, ipr_iopoll);
10285 blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
10286 }
10287 }
10288
1da177e4
LT
10289 schedule_work(&ioa_cfg->work_q);
10290 return 0;
10291}
10292
10293/**
10294 * ipr_shutdown - Shutdown handler.
d18c3db5 10295 * @pdev: pci device struct
1da177e4
LT
10296 *
10297 * This function is invoked upon system shutdown/reboot. It will issue
10298 * an adapter shutdown to the adapter to flush the write cache.
10299 *
10300 * Return value:
10301 * none
10302 **/
d18c3db5 10303static void ipr_shutdown(struct pci_dev *pdev)
1da177e4 10304{
d18c3db5 10305 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
1da177e4 10306 unsigned long lock_flags = 0;
4fdd7c7a 10307 enum ipr_shutdown_type shutdown_type = IPR_SHUTDOWN_NORMAL;
b53d124a 10308 int i;
1da177e4
LT
10309
10310 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
89f8b33c 10311 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
b53d124a 10312 ioa_cfg->iopoll_weight = 0;
10313 for (i = 1; i < ioa_cfg->hrrq_num; i++)
10314 blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
10315 }
10316
203fa3fe 10317 while (ioa_cfg->in_reset_reload) {
970ea294
BK
10318 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10319 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10320 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10321 }
10322
4fdd7c7a
BK
10323 if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64)
10324 shutdown_type = IPR_SHUTDOWN_QUIESCE;
10325
10326 ipr_initiate_ioa_bringdown(ioa_cfg, shutdown_type);
1da177e4
LT
10327 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10328 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4fdd7c7a 10329 if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) {
2796ca5e 10330 ipr_free_irqs(ioa_cfg);
4fdd7c7a
BK
10331 pci_disable_device(ioa_cfg->pdev);
10332 }
1da177e4
LT
10333}
10334
6f039790 10335static struct pci_device_id ipr_pci_table[] = {
1da177e4 10336 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 10337 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
1da177e4 10338 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 10339 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
1da177e4 10340 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 10341 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
1da177e4 10342 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 10343 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
1da177e4 10344 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6d84c944 10345 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
1da177e4 10346 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6d84c944 10347 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
1da177e4 10348 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6d84c944 10349 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
86f51436 10350 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
5469cb5b
BK
10351 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
10352 IPR_USE_LONG_TRANSOP_TIMEOUT },
86f51436 10353 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
6d84c944 10354 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
86f51436 10355 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
22d2e402
BK
10356 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10357 IPR_USE_LONG_TRANSOP_TIMEOUT },
60e7486b 10358 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
5469cb5b
BK
10359 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10360 IPR_USE_LONG_TRANSOP_TIMEOUT },
86f51436 10361 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
6d84c944 10362 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
86f51436 10363 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
22d2e402
BK
10364 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10365 IPR_USE_LONG_TRANSOP_TIMEOUT},
60e7486b 10366 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
5469cb5b
BK
10367 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10368 IPR_USE_LONG_TRANSOP_TIMEOUT },
185eb31c 10369 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
22d2e402
BK
10370 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
10371 IPR_USE_LONG_TRANSOP_TIMEOUT },
185eb31c
BK
10372 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10373 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
b0f56d3d
WB
10374 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10375 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
60e7486b 10376 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
5469cb5b 10377 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
463fc696 10378 IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
1da177e4 10379 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
6d84c944 10380 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
1da177e4 10381 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
6d84c944 10382 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
86f51436 10383 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
5469cb5b
BK
10384 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
10385 IPR_USE_LONG_TRANSOP_TIMEOUT },
60e7486b 10386 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
5469cb5b
BK
10387 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
10388 IPR_USE_LONG_TRANSOP_TIMEOUT },
d7b4627f
WB
10389 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10390 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
10391 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10392 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
10393 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10394 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
b8d5d568 10395 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10396 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
5a918353
WB
10397 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10398 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
32622bde
WB
10399 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10400 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
cd9b3d04 10401 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
d7b4627f 10402 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
cd9b3d04 10403 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
d7b4627f 10404 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
cd9b3d04 10405 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
d7b4627f 10406 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
cd9b3d04
WB
10407 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10408 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
10409 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
d7b4627f 10410 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
b8d5d568 10411 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10412 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
10413 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10414 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
10415 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10416 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
10417 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10418 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
43c5fdaf 10419 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10420 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 },
10421 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
f94d9964
WX
10422 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57DA, 0, 0, 0 },
10423 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
43c5fdaf 10424 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 },
10425 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10426 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 },
10427 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10428 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57ED, 0, 0, 0 },
10429 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10430 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EE, 0, 0, 0 },
10431 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10432 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 },
10433 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10434 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 },
5eeac3e9
WX
10435 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10436 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCA, 0, 0, 0 },
10437 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10438 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 },
10439 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10440 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 },
1da177e4
LT
10441 { }
10442};
10443MODULE_DEVICE_TABLE(pci, ipr_pci_table);
10444
a55b2d21 10445static const struct pci_error_handlers ipr_err_handler = {
f8a88b19 10446 .error_detected = ipr_pci_error_detected,
6270e593 10447 .mmio_enabled = ipr_pci_mmio_enabled,
f8a88b19
LV
10448 .slot_reset = ipr_pci_slot_reset,
10449};
10450
1da177e4
LT
10451static struct pci_driver ipr_driver = {
10452 .name = IPR_NAME,
10453 .id_table = ipr_pci_table,
10454 .probe = ipr_probe,
6f039790 10455 .remove = ipr_remove,
d18c3db5 10456 .shutdown = ipr_shutdown,
f8a88b19 10457 .err_handler = &ipr_err_handler,
1da177e4
LT
10458};
10459
f72919ec
WB
10460/**
10461 * ipr_halt_done - Shutdown prepare completion
10462 *
10463 * Return value:
10464 * none
10465 **/
10466static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
10467{
05a6538a 10468 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
f72919ec
WB
10469}
10470
10471/**
10472 * ipr_halt - Issue shutdown prepare to all adapters
10473 *
10474 * Return value:
10475 * NOTIFY_OK on success / NOTIFY_DONE on failure
10476 **/
10477static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
10478{
10479 struct ipr_cmnd *ipr_cmd;
10480 struct ipr_ioa_cfg *ioa_cfg;
feccada9 10481 unsigned long flags = 0, driver_lock_flags;
f72919ec
WB
10482
10483 if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
10484 return NOTIFY_DONE;
10485
feccada9 10486 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
f72919ec
WB
10487
10488 list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
10489 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
4fdd7c7a
BK
10490 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
10491 (ipr_fast_reboot && event == SYS_RESTART && ioa_cfg->sis64)) {
f72919ec
WB
10492 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10493 continue;
10494 }
10495
10496 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
10497 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
10498 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
10499 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
10500 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
10501
10502 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
10503 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10504 }
feccada9 10505 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
f72919ec
WB
10506
10507 return NOTIFY_OK;
10508}
10509
10510static struct notifier_block ipr_notifier = {
10511 ipr_halt, NULL, 0
10512};
10513
1da177e4
LT
10514/**
10515 * ipr_init - Module entry point
10516 *
10517 * Return value:
10518 * 0 on success / negative value on failure
10519 **/
10520static int __init ipr_init(void)
10521{
10522 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
10523 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
10524
f72919ec 10525 register_reboot_notifier(&ipr_notifier);
dcbccbde 10526 return pci_register_driver(&ipr_driver);
1da177e4
LT
10527}
10528
10529/**
10530 * ipr_exit - Module unload
10531 *
10532 * Module unload entry point.
10533 *
10534 * Return value:
10535 * none
10536 **/
10537static void __exit ipr_exit(void)
10538{
f72919ec 10539 unregister_reboot_notifier(&ipr_notifier);
1da177e4
LT
10540 pci_unregister_driver(&ipr_driver);
10541}
10542
10543module_init(ipr_init);
10544module_exit(ipr_exit);
This page took 1.549519 seconds and 5 git commands to generate.