ipr: Fix incorrect trace indexing
[deliverable/linux.git] / drivers / scsi / ipr.c
CommitLineData
1da177e4
LT
1/*
2 * ipr.c -- driver for IBM Power Linux RAID adapters
3 *
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5 *
6 * Copyright (C) 2003, 2004 IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24/*
25 * Notes:
26 *
27 * This driver is used to control the following SCSI adapters:
28 *
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30 *
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
35 *
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
43 * - Hot spare
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
47 * by adding disks
48 *
49 * Driver Features:
50 * - Tagged command queuing
51 * - Adapter microcode download
52 * - PCI hot plug
53 * - SCSI device hot plug
54 *
55 */
56
1da177e4
LT
57#include <linux/fs.h>
58#include <linux/init.h>
59#include <linux/types.h>
60#include <linux/errno.h>
61#include <linux/kernel.h>
5a0e3ad6 62#include <linux/slab.h>
4d4dd706 63#include <linux/vmalloc.h>
1da177e4
LT
64#include <linux/ioport.h>
65#include <linux/delay.h>
66#include <linux/pci.h>
67#include <linux/wait.h>
68#include <linux/spinlock.h>
69#include <linux/sched.h>
70#include <linux/interrupt.h>
71#include <linux/blkdev.h>
72#include <linux/firmware.h>
73#include <linux/module.h>
74#include <linux/moduleparam.h>
35a39691 75#include <linux/libata.h>
0ce3a7e5 76#include <linux/hdreg.h>
f72919ec 77#include <linux/reboot.h>
3e7ebdfa 78#include <linux/stringify.h>
1da177e4
LT
79#include <asm/io.h>
80#include <asm/irq.h>
81#include <asm/processor.h>
82#include <scsi/scsi.h>
83#include <scsi/scsi_host.h>
84#include <scsi/scsi_tcq.h>
85#include <scsi/scsi_eh.h>
86#include <scsi/scsi_cmnd.h>
1da177e4
LT
87#include "ipr.h"
88
89/*
90 * Global Data
91 */
b7d68ca3 92static LIST_HEAD(ipr_ioa_head);
1da177e4
LT
93static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
94static unsigned int ipr_max_speed = 1;
95static int ipr_testmode = 0;
96static unsigned int ipr_fastfail = 0;
5469cb5b 97static unsigned int ipr_transop_timeout = 0;
d3c74871 98static unsigned int ipr_debug = 0;
3e7ebdfa 99static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
ac09c349 100static unsigned int ipr_dual_ioa_raid = 1;
05a6538a 101static unsigned int ipr_number_of_msix = 2;
4fdd7c7a 102static unsigned int ipr_fast_reboot;
1da177e4
LT
103static DEFINE_SPINLOCK(ipr_driver_lock);
104
105/* This table describes the differences between DMA controller chips */
106static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
60e7486b 107 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
1da177e4 108 .mailbox = 0x0042C,
89aad428 109 .max_cmds = 100,
1da177e4 110 .cache_line_size = 0x20,
7dd21308 111 .clear_isr = 1,
b53d124a 112 .iopoll_weight = 0,
1da177e4
LT
113 {
114 .set_interrupt_mask_reg = 0x0022C,
115 .clr_interrupt_mask_reg = 0x00230,
214777ba 116 .clr_interrupt_mask_reg32 = 0x00230,
1da177e4 117 .sense_interrupt_mask_reg = 0x0022C,
214777ba 118 .sense_interrupt_mask_reg32 = 0x0022C,
1da177e4 119 .clr_interrupt_reg = 0x00228,
214777ba 120 .clr_interrupt_reg32 = 0x00228,
1da177e4 121 .sense_interrupt_reg = 0x00224,
214777ba 122 .sense_interrupt_reg32 = 0x00224,
1da177e4
LT
123 .ioarrin_reg = 0x00404,
124 .sense_uproc_interrupt_reg = 0x00214,
214777ba 125 .sense_uproc_interrupt_reg32 = 0x00214,
1da177e4 126 .set_uproc_interrupt_reg = 0x00214,
214777ba
WB
127 .set_uproc_interrupt_reg32 = 0x00214,
128 .clr_uproc_interrupt_reg = 0x00218,
129 .clr_uproc_interrupt_reg32 = 0x00218
1da177e4
LT
130 }
131 },
132 { /* Snipe and Scamp */
133 .mailbox = 0x0052C,
89aad428 134 .max_cmds = 100,
1da177e4 135 .cache_line_size = 0x20,
7dd21308 136 .clear_isr = 1,
b53d124a 137 .iopoll_weight = 0,
1da177e4
LT
138 {
139 .set_interrupt_mask_reg = 0x00288,
140 .clr_interrupt_mask_reg = 0x0028C,
214777ba 141 .clr_interrupt_mask_reg32 = 0x0028C,
1da177e4 142 .sense_interrupt_mask_reg = 0x00288,
214777ba 143 .sense_interrupt_mask_reg32 = 0x00288,
1da177e4 144 .clr_interrupt_reg = 0x00284,
214777ba 145 .clr_interrupt_reg32 = 0x00284,
1da177e4 146 .sense_interrupt_reg = 0x00280,
214777ba 147 .sense_interrupt_reg32 = 0x00280,
1da177e4
LT
148 .ioarrin_reg = 0x00504,
149 .sense_uproc_interrupt_reg = 0x00290,
214777ba 150 .sense_uproc_interrupt_reg32 = 0x00290,
1da177e4 151 .set_uproc_interrupt_reg = 0x00290,
214777ba
WB
152 .set_uproc_interrupt_reg32 = 0x00290,
153 .clr_uproc_interrupt_reg = 0x00294,
154 .clr_uproc_interrupt_reg32 = 0x00294
1da177e4
LT
155 }
156 },
a74c1639 157 { /* CRoC */
110def85 158 .mailbox = 0x00044,
89aad428 159 .max_cmds = 1000,
a74c1639 160 .cache_line_size = 0x20,
7dd21308 161 .clear_isr = 0,
b53d124a 162 .iopoll_weight = 64,
a74c1639
WB
163 {
164 .set_interrupt_mask_reg = 0x00010,
165 .clr_interrupt_mask_reg = 0x00018,
214777ba 166 .clr_interrupt_mask_reg32 = 0x0001C,
a74c1639 167 .sense_interrupt_mask_reg = 0x00010,
214777ba 168 .sense_interrupt_mask_reg32 = 0x00014,
a74c1639 169 .clr_interrupt_reg = 0x00008,
214777ba 170 .clr_interrupt_reg32 = 0x0000C,
a74c1639 171 .sense_interrupt_reg = 0x00000,
214777ba 172 .sense_interrupt_reg32 = 0x00004,
a74c1639
WB
173 .ioarrin_reg = 0x00070,
174 .sense_uproc_interrupt_reg = 0x00020,
214777ba 175 .sense_uproc_interrupt_reg32 = 0x00024,
a74c1639 176 .set_uproc_interrupt_reg = 0x00020,
214777ba 177 .set_uproc_interrupt_reg32 = 0x00024,
dcbad00e 178 .clr_uproc_interrupt_reg = 0x00028,
214777ba
WB
179 .clr_uproc_interrupt_reg32 = 0x0002C,
180 .init_feedback_reg = 0x0005C,
dcbad00e 181 .dump_addr_reg = 0x00064,
8701f185
WB
182 .dump_data_reg = 0x00068,
183 .endian_swap_reg = 0x00084
a74c1639
WB
184 }
185 },
1da177e4
LT
186};
187
188static const struct ipr_chip_t ipr_chip[] = {
cb237ef7
WB
189 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
190 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
191 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
192 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
193 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
194 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
195 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
196 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
cd9b3d04 197 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
1da177e4
LT
198};
199
203fa3fe 200static int ipr_max_bus_speeds[] = {
1da177e4
LT
201 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
202};
203
204MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
205MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
206module_param_named(max_speed, ipr_max_speed, uint, 0);
207MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
208module_param_named(log_level, ipr_log_level, uint, 0);
209MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
210module_param_named(testmode, ipr_testmode, int, 0);
211MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
2cf22be0 212module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
1da177e4
LT
213MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
214module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
215MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
2cf22be0 216module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
d3c74871 217MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
ac09c349
BK
218module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
219MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
3e7ebdfa
WB
220module_param_named(max_devs, ipr_max_devs, int, 0);
221MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
222 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
05a6538a 223module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
6634ff7c 224MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16). (default:2)");
4fdd7c7a
BK
225module_param_named(fast_reboot, ipr_fast_reboot, int, S_IRUGO | S_IWUSR);
226MODULE_PARM_DESC(fast_reboot, "Skip adapter shutdown during reboot. Set to 1 to enable. (default: 0)");
1da177e4
LT
227MODULE_LICENSE("GPL");
228MODULE_VERSION(IPR_DRIVER_VERSION);
229
1da177e4
LT
230/* A constant array of IOASCs/URCs/Error Messages */
231static const
232struct ipr_error_table_t ipr_error_table[] = {
933916f3 233 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
234 "8155: An unknown error was received"},
235 {0x00330000, 0, 0,
236 "Soft underlength error"},
237 {0x005A0000, 0, 0,
238 "Command to be cancelled not found"},
239 {0x00808000, 0, 0,
240 "Qualified success"},
933916f3 241 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 242 "FFFE: Soft device bus error recovered by the IOA"},
933916f3 243 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 244 "4101: Soft device bus fabric error"},
5aa3a333
WB
245 {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
246 "FFFC: Logical block guard error recovered by the device"},
247 {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
248 "FFFC: Logical block reference tag error recovered by the device"},
249 {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
250 "4171: Recovered scatter list tag / sequence number error"},
251 {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
252 "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
253 {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
254 "4171: Recovered logical block sequence number error on IOA to Host transfer"},
255 {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
256 "FFFD: Recovered logical block reference tag error detected by the IOA"},
257 {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
258 "FFFD: Logical block guard error recovered by the IOA"},
933916f3 259 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 260 "FFF9: Device sector reassign successful"},
933916f3 261 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 262 "FFF7: Media error recovered by device rewrite procedures"},
933916f3 263 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 264 "7001: IOA sector reassignment successful"},
933916f3 265 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 266 "FFF9: Soft media error. Sector reassignment recommended"},
933916f3 267 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 268 "FFF7: Media error recovered by IOA rewrite procedures"},
933916f3 269 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 270 "FF3D: Soft PCI bus error recovered by the IOA"},
933916f3 271 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 272 "FFF6: Device hardware error recovered by the IOA"},
933916f3 273 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 274 "FFF6: Device hardware error recovered by the device"},
933916f3 275 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 276 "FF3D: Soft IOA error recovered by the IOA"},
933916f3 277 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 278 "FFFA: Undefined device response recovered by the IOA"},
933916f3 279 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 280 "FFF6: Device bus error, message or command phase"},
933916f3 281 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
35a39691 282 "FFFE: Task Management Function failed"},
933916f3 283 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 284 "FFF6: Failure prediction threshold exceeded"},
933916f3 285 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 286 "8009: Impending cache battery pack failure"},
ed7bd661 287 {0x02040100, 0, 0,
288 "Logical Unit in process of becoming ready"},
289 {0x02040200, 0, 0,
290 "Initializing command required"},
1da177e4
LT
291 {0x02040400, 0, 0,
292 "34FF: Disk device format in progress"},
ed7bd661 293 {0x02040C00, 0, 0,
294 "Logical unit not accessible, target port in unavailable state"},
65f56475
BK
295 {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
296 "9070: IOA requested reset"},
1da177e4
LT
297 {0x023F0000, 0, 0,
298 "Synchronization required"},
ed7bd661 299 {0x02408500, 0, 0,
300 "IOA microcode download required"},
301 {0x02408600, 0, 0,
302 "Device bus connection is prohibited by host"},
1da177e4
LT
303 {0x024E0000, 0, 0,
304 "No ready, IOA shutdown"},
305 {0x025A0000, 0, 0,
306 "Not ready, IOA has been shutdown"},
933916f3 307 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
308 "3020: Storage subsystem configuration error"},
309 {0x03110B00, 0, 0,
310 "FFF5: Medium error, data unreadable, recommend reassign"},
311 {0x03110C00, 0, 0,
312 "7000: Medium error, data unreadable, do not reassign"},
933916f3 313 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 314 "FFF3: Disk media format bad"},
933916f3 315 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 316 "3002: Addressed device failed to respond to selection"},
933916f3 317 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 318 "3100: Device bus error"},
933916f3 319 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
320 "3109: IOA timed out a device command"},
321 {0x04088000, 0, 0,
322 "3120: SCSI bus is not operational"},
933916f3 323 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 324 "4100: Hard device bus fabric error"},
5aa3a333
WB
325 {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
326 "310C: Logical block guard error detected by the device"},
327 {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
328 "310C: Logical block reference tag error detected by the device"},
329 {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
330 "4170: Scatter list tag / sequence number error"},
331 {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
332 "8150: Logical block CRC error on IOA to Host transfer"},
333 {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
334 "4170: Logical block sequence number error on IOA to Host transfer"},
335 {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
336 "310D: Logical block reference tag error detected by the IOA"},
337 {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
338 "310D: Logical block guard error detected by the IOA"},
933916f3 339 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 340 "9000: IOA reserved area data check"},
933916f3 341 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 342 "9001: IOA reserved area invalid data pattern"},
933916f3 343 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 344 "9002: IOA reserved area LRC error"},
5aa3a333
WB
345 {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
346 "Hardware Error, IOA metadata access error"},
933916f3 347 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 348 "102E: Out of alternate sectors for disk storage"},
933916f3 349 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 350 "FFF4: Data transfer underlength error"},
933916f3 351 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 352 "FFF4: Data transfer overlength error"},
933916f3 353 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 354 "3400: Logical unit failure"},
933916f3 355 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 356 "FFF4: Device microcode is corrupt"},
933916f3 357 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
358 "8150: PCI bus error"},
359 {0x04430000, 1, 0,
360 "Unsupported device bus message received"},
933916f3 361 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 362 "FFF4: Disk device problem"},
933916f3 363 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 364 "8150: Permanent IOA failure"},
933916f3 365 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 366 "3010: Disk device returned wrong response to IOA"},
933916f3 367 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
368 "8151: IOA microcode error"},
369 {0x04448500, 0, 0,
370 "Device bus status error"},
933916f3 371 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 372 "8157: IOA error requiring IOA reset to recover"},
35a39691
BK
373 {0x04448700, 0, 0,
374 "ATA device status error"},
1da177e4
LT
375 {0x04490000, 0, 0,
376 "Message reject received from the device"},
933916f3 377 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 378 "8008: A permanent cache battery pack failure occurred"},
933916f3 379 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 380 "9090: Disk unit has been modified after the last known status"},
933916f3 381 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 382 "9081: IOA detected device error"},
933916f3 383 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 384 "9082: IOA detected device error"},
933916f3 385 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 386 "3110: Device bus error, message or command phase"},
933916f3 387 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
35a39691 388 "3110: SAS Command / Task Management Function failed"},
933916f3 389 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 390 "9091: Incorrect hardware configuration change has been detected"},
933916f3 391 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 392 "9073: Invalid multi-adapter configuration"},
933916f3 393 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 394 "4010: Incorrect connection between cascaded expanders"},
933916f3 395 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 396 "4020: Connections exceed IOA design limits"},
933916f3 397 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 398 "4030: Incorrect multipath connection"},
933916f3 399 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 400 "4110: Unsupported enclosure function"},
ed7bd661 401 {0x04679800, 0, IPR_DEFAULT_LOG_LEVEL,
402 "4120: SAS cable VPD cannot be read"},
933916f3 403 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
404 "FFF4: Command to logical unit failed"},
405 {0x05240000, 1, 0,
406 "Illegal request, invalid request type or request packet"},
407 {0x05250000, 0, 0,
408 "Illegal request, invalid resource handle"},
b0df54bb 409 {0x05258000, 0, 0,
410 "Illegal request, commands not allowed to this device"},
411 {0x05258100, 0, 0,
412 "Illegal request, command not allowed to a secondary adapter"},
5aa3a333
WB
413 {0x05258200, 0, 0,
414 "Illegal request, command not allowed to a non-optimized resource"},
1da177e4
LT
415 {0x05260000, 0, 0,
416 "Illegal request, invalid field in parameter list"},
417 {0x05260100, 0, 0,
418 "Illegal request, parameter not supported"},
419 {0x05260200, 0, 0,
420 "Illegal request, parameter value invalid"},
421 {0x052C0000, 0, 0,
422 "Illegal request, command sequence error"},
b0df54bb 423 {0x052C8000, 1, 0,
424 "Illegal request, dual adapter support not enabled"},
ed7bd661 425 {0x052C8100, 1, 0,
426 "Illegal request, another cable connector was physically disabled"},
427 {0x054E8000, 1, 0,
428 "Illegal request, inconsistent group id/group count"},
933916f3 429 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 430 "9031: Array protection temporarily suspended, protection resuming"},
933916f3 431 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 432 "9040: Array protection temporarily suspended, protection resuming"},
ed7bd661 433 {0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL,
434 "4080: IOA exceeded maximum operating temperature"},
435 {0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL,
436 "4085: Service required"},
933916f3 437 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 438 "3140: Device bus not ready to ready transition"},
933916f3 439 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
440 "FFFB: SCSI bus was reset"},
441 {0x06290500, 0, 0,
442 "FFFE: SCSI bus transition to single ended"},
443 {0x06290600, 0, 0,
444 "FFFE: SCSI bus transition to LVD"},
933916f3 445 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 446 "FFFB: SCSI bus was reset by another initiator"},
933916f3 447 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 448 "3029: A device replacement has occurred"},
ed7bd661 449 {0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL,
450 "4102: Device bus fabric performance degradation"},
933916f3 451 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 452 "9051: IOA cache data exists for a missing or failed device"},
933916f3 453 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 454 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
933916f3 455 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 456 "9025: Disk unit is not supported at its physical location"},
933916f3 457 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 458 "3020: IOA detected a SCSI bus configuration error"},
933916f3 459 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 460 "3150: SCSI bus configuration error"},
933916f3 461 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 462 "9074: Asymmetric advanced function disk configuration"},
933916f3 463 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 464 "4040: Incomplete multipath connection between IOA and enclosure"},
933916f3 465 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 466 "4041: Incomplete multipath connection between enclosure and device"},
933916f3 467 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 468 "9075: Incomplete multipath connection between IOA and remote IOA"},
933916f3 469 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 470 "9076: Configuration error, missing remote IOA"},
933916f3 471 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 472 "4050: Enclosure does not support a required multipath function"},
ed7bd661 473 {0x06679800, 0, IPR_DEFAULT_LOG_LEVEL,
474 "4121: Configuration error, required cable is missing"},
475 {0x06679900, 0, IPR_DEFAULT_LOG_LEVEL,
476 "4122: Cable is not plugged into the correct location on remote IOA"},
477 {0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL,
478 "4123: Configuration error, invalid cable vital product data"},
479 {0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL,
480 "4124: Configuration error, both cable ends are plugged into the same IOA"},
b75424fc
WB
481 {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
482 "4070: Logically bad block written on device"},
933916f3 483 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 484 "9041: Array protection temporarily suspended"},
933916f3 485 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 486 "9042: Corrupt array parity detected on specified device"},
933916f3 487 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 488 "9030: Array no longer protected due to missing or failed disk unit"},
933916f3 489 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 490 "9071: Link operational transition"},
933916f3 491 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 492 "9072: Link not operational transition"},
933916f3 493 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 494 "9032: Array exposed but still protected"},
e435340c
BK
495 {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
496 "70DD: Device forced failed by disrupt device command"},
933916f3 497 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 498 "4061: Multipath redundancy level got better"},
933916f3 499 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 500 "4060: Multipath redundancy level got worse"},
f8ee25d7
WX
501 {0x06808100, 0, IPR_DEFAULT_LOG_LEVEL,
502 "9083: Device raw mode enabled"},
503 {0x06808200, 0, IPR_DEFAULT_LOG_LEVEL,
504 "9084: Device raw mode disabled"},
1da177e4
LT
505 {0x07270000, 0, 0,
506 "Failure due to other device"},
933916f3 507 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 508 "9008: IOA does not support functions expected by devices"},
933916f3 509 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 510 "9010: Cache data associated with attached devices cannot be found"},
933916f3 511 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 512 "9011: Cache data belongs to devices other than those attached"},
933916f3 513 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 514 "9020: Array missing 2 or more devices with only 1 device present"},
933916f3 515 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 516 "9021: Array missing 2 or more devices with 2 or more devices present"},
933916f3 517 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 518 "9022: Exposed array is missing a required device"},
933916f3 519 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 520 "9023: Array member(s) not at required physical locations"},
933916f3 521 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 522 "9024: Array not functional due to present hardware configuration"},
933916f3 523 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 524 "9026: Array not functional due to present hardware configuration"},
933916f3 525 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 526 "9027: Array is missing a device and parity is out of sync"},
933916f3 527 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 528 "9028: Maximum number of arrays already exist"},
933916f3 529 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 530 "9050: Required cache data cannot be located for a disk unit"},
933916f3 531 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 532 "9052: Cache data exists for a device that has been modified"},
933916f3 533 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 534 "9054: IOA resources not available due to previous problems"},
933916f3 535 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 536 "9092: Disk unit requires initialization before use"},
933916f3 537 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 538 "9029: Incorrect hardware configuration change has been detected"},
933916f3 539 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 540 "9060: One or more disk pairs are missing from an array"},
933916f3 541 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 542 "9061: One or more disks are missing from an array"},
933916f3 543 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 544 "9062: One or more disks are missing from an array"},
933916f3 545 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 546 "9063: Maximum number of functional arrays has been exceeded"},
ed7bd661 547 {0x07279A00, 0, 0,
548 "Data protect, other volume set problem"},
1da177e4
LT
549 {0x0B260000, 0, 0,
550 "Aborted command, invalid descriptor"},
ed7bd661 551 {0x0B3F9000, 0, 0,
552 "Target operating conditions have changed, dual adapter takeover"},
553 {0x0B530200, 0, 0,
554 "Aborted command, medium removal prevented"},
1da177e4 555 {0x0B5A0000, 0, 0,
ed7bd661 556 "Command terminated by host"},
557 {0x0B5B8000, 0, 0,
558 "Aborted command, command terminated by host"}
1da177e4
LT
559};
560
561static const struct ipr_ses_table_entry ipr_ses_table[] = {
562 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
563 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
564 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
565 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
566 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
567 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
568 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
569 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
570 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
571 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
572 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
573 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
574 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
575};
576
577/*
578 * Function Prototypes
579 */
580static int ipr_reset_alert(struct ipr_cmnd *);
581static void ipr_process_ccn(struct ipr_cmnd *);
582static void ipr_process_error(struct ipr_cmnd *);
583static void ipr_reset_ioa_job(struct ipr_cmnd *);
584static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
585 enum ipr_shutdown_type);
586
587#ifdef CONFIG_SCSI_IPR_TRACE
588/**
589 * ipr_trc_hook - Add a trace entry to the driver trace
590 * @ipr_cmd: ipr command struct
591 * @type: trace type
592 * @add_data: additional data
593 *
594 * Return value:
595 * none
596 **/
597static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
598 u8 type, u32 add_data)
599{
600 struct ipr_trace_entry *trace_entry;
601 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
bb7c5433 602 unsigned int trace_index;
1da177e4 603
bb7c5433
BK
604 trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK;
605 trace_entry = &ioa_cfg->trace[trace_index];
1da177e4
LT
606 trace_entry->time = jiffies;
607 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
608 trace_entry->type = type;
a32c055f
WB
609 if (ipr_cmd->ioa_cfg->sis64)
610 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
611 else
612 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
35a39691 613 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
1da177e4
LT
614 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
615 trace_entry->u.add_data = add_data;
56d6aa33 616 wmb();
1da177e4
LT
617}
618#else
203fa3fe 619#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
1da177e4
LT
620#endif
621
172cd6e1
BK
622/**
623 * ipr_lock_and_done - Acquire lock and complete command
624 * @ipr_cmd: ipr command struct
625 *
626 * Return value:
627 * none
628 **/
629static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
630{
631 unsigned long lock_flags;
632 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
633
634 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
635 ipr_cmd->done(ipr_cmd);
636 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
637}
638
1da177e4
LT
639/**
640 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
641 * @ipr_cmd: ipr command struct
642 *
643 * Return value:
644 * none
645 **/
646static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
647{
648 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
96d21f00
WB
649 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
650 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
a32c055f 651 dma_addr_t dma_addr = ipr_cmd->dma_addr;
05a6538a 652 int hrrq_id;
1da177e4 653
05a6538a 654 hrrq_id = ioarcb->cmd_pkt.hrrq_id;
1da177e4 655 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
05a6538a 656 ioarcb->cmd_pkt.hrrq_id = hrrq_id;
a32c055f 657 ioarcb->data_transfer_length = 0;
1da177e4 658 ioarcb->read_data_transfer_length = 0;
a32c055f 659 ioarcb->ioadl_len = 0;
1da177e4 660 ioarcb->read_ioadl_len = 0;
a32c055f 661
96d21f00 662 if (ipr_cmd->ioa_cfg->sis64) {
a32c055f
WB
663 ioarcb->u.sis64_addr_data.data_ioadl_addr =
664 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
96d21f00
WB
665 ioasa64->u.gata.status = 0;
666 } else {
a32c055f
WB
667 ioarcb->write_ioadl_addr =
668 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
669 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
96d21f00 670 ioasa->u.gata.status = 0;
a32c055f
WB
671 }
672
96d21f00
WB
673 ioasa->hdr.ioasc = 0;
674 ioasa->hdr.residual_data_len = 0;
1da177e4 675 ipr_cmd->scsi_cmd = NULL;
35a39691 676 ipr_cmd->qc = NULL;
1da177e4
LT
677 ipr_cmd->sense_buffer[0] = 0;
678 ipr_cmd->dma_use_sg = 0;
679}
680
681/**
682 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
683 * @ipr_cmd: ipr command struct
684 *
685 * Return value:
686 * none
687 **/
172cd6e1
BK
688static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
689 void (*fast_done) (struct ipr_cmnd *))
1da177e4
LT
690{
691 ipr_reinit_ipr_cmnd(ipr_cmd);
692 ipr_cmd->u.scratch = 0;
693 ipr_cmd->sibling = NULL;
6cdb0817 694 ipr_cmd->eh_comp = NULL;
172cd6e1 695 ipr_cmd->fast_done = fast_done;
1da177e4
LT
696 init_timer(&ipr_cmd->timer);
697}
698
699/**
00bfef2c 700 * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
1da177e4
LT
701 * @ioa_cfg: ioa config struct
702 *
703 * Return value:
704 * pointer to ipr command struct
705 **/
706static
05a6538a 707struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
1da177e4 708{
05a6538a 709 struct ipr_cmnd *ipr_cmd = NULL;
710
711 if (likely(!list_empty(&hrrq->hrrq_free_q))) {
712 ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
713 struct ipr_cmnd, queue);
714 list_del(&ipr_cmd->queue);
715 }
1da177e4 716
1da177e4
LT
717
718 return ipr_cmd;
719}
720
00bfef2c
BK
721/**
722 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
723 * @ioa_cfg: ioa config struct
724 *
725 * Return value:
726 * pointer to ipr command struct
727 **/
728static
729struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
730{
05a6538a 731 struct ipr_cmnd *ipr_cmd =
732 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
172cd6e1 733 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
00bfef2c
BK
734 return ipr_cmd;
735}
736
1da177e4
LT
737/**
738 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
739 * @ioa_cfg: ioa config struct
740 * @clr_ints: interrupts to clear
741 *
742 * This function masks all interrupts on the adapter, then clears the
743 * interrupts specified in the mask
744 *
745 * Return value:
746 * none
747 **/
748static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
749 u32 clr_ints)
750{
751 volatile u32 int_reg;
56d6aa33 752 int i;
1da177e4
LT
753
754 /* Stop new interrupts */
56d6aa33 755 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
756 spin_lock(&ioa_cfg->hrrq[i]._lock);
757 ioa_cfg->hrrq[i].allow_interrupts = 0;
758 spin_unlock(&ioa_cfg->hrrq[i]._lock);
759 }
760 wmb();
1da177e4
LT
761
762 /* Set interrupt mask to stop all new interrupts */
214777ba
WB
763 if (ioa_cfg->sis64)
764 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
765 else
766 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
1da177e4
LT
767
768 /* Clear any pending interrupts */
214777ba
WB
769 if (ioa_cfg->sis64)
770 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
771 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
1da177e4
LT
772 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
773}
774
775/**
776 * ipr_save_pcix_cmd_reg - Save PCI-X command register
777 * @ioa_cfg: ioa config struct
778 *
779 * Return value:
780 * 0 on success / -EIO on failure
781 **/
782static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
783{
784 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
785
7dce0e1c
BK
786 if (pcix_cmd_reg == 0)
787 return 0;
1da177e4
LT
788
789 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
790 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
791 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
792 return -EIO;
793 }
794
795 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
796 return 0;
797}
798
799/**
800 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
801 * @ioa_cfg: ioa config struct
802 *
803 * Return value:
804 * 0 on success / -EIO on failure
805 **/
806static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
807{
808 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
809
810 if (pcix_cmd_reg) {
811 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
812 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
813 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
814 return -EIO;
815 }
1da177e4
LT
816 }
817
818 return 0;
819}
820
35a39691
BK
821/**
822 * ipr_sata_eh_done - done function for aborted SATA commands
823 * @ipr_cmd: ipr command struct
824 *
825 * This function is invoked for ops generated to SATA
826 * devices which are being aborted.
827 *
828 * Return value:
829 * none
830 **/
831static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
832{
35a39691
BK
833 struct ata_queued_cmd *qc = ipr_cmd->qc;
834 struct ipr_sata_port *sata_port = qc->ap->private_data;
835
836 qc->err_mask |= AC_ERR_OTHER;
837 sata_port->ioasa.status |= ATA_BUSY;
05a6538a 838 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
35a39691
BK
839 ata_qc_complete(qc);
840}
841
1da177e4
LT
842/**
843 * ipr_scsi_eh_done - mid-layer done function for aborted ops
844 * @ipr_cmd: ipr command struct
845 *
846 * This function is invoked by the interrupt handler for
847 * ops generated by the SCSI mid-layer which are being aborted.
848 *
849 * Return value:
850 * none
851 **/
852static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
853{
1da177e4
LT
854 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
855
856 scsi_cmd->result |= (DID_ERROR << 16);
857
63015bc9 858 scsi_dma_unmap(ipr_cmd->scsi_cmd);
1da177e4 859 scsi_cmd->scsi_done(scsi_cmd);
6cdb0817
BK
860 if (ipr_cmd->eh_comp)
861 complete(ipr_cmd->eh_comp);
05a6538a 862 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
863}
864
865/**
866 * ipr_fail_all_ops - Fails all outstanding ops.
867 * @ioa_cfg: ioa config struct
868 *
869 * This function fails all outstanding ops.
870 *
871 * Return value:
872 * none
873 **/
874static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
875{
876 struct ipr_cmnd *ipr_cmd, *temp;
05a6538a 877 struct ipr_hrr_queue *hrrq;
1da177e4
LT
878
879 ENTER;
05a6538a 880 for_each_hrrq(hrrq, ioa_cfg) {
56d6aa33 881 spin_lock(&hrrq->_lock);
05a6538a 882 list_for_each_entry_safe(ipr_cmd,
883 temp, &hrrq->hrrq_pending_q, queue) {
884 list_del(&ipr_cmd->queue);
1da177e4 885
05a6538a 886 ipr_cmd->s.ioasa.hdr.ioasc =
887 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
888 ipr_cmd->s.ioasa.hdr.ilid =
889 cpu_to_be32(IPR_DRIVER_ILID);
1da177e4 890
05a6538a 891 if (ipr_cmd->scsi_cmd)
892 ipr_cmd->done = ipr_scsi_eh_done;
893 else if (ipr_cmd->qc)
894 ipr_cmd->done = ipr_sata_eh_done;
1da177e4 895
05a6538a 896 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
897 IPR_IOASC_IOA_WAS_RESET);
898 del_timer(&ipr_cmd->timer);
899 ipr_cmd->done(ipr_cmd);
900 }
56d6aa33 901 spin_unlock(&hrrq->_lock);
1da177e4 902 }
1da177e4
LT
903 LEAVE;
904}
905
a32c055f
WB
906/**
907 * ipr_send_command - Send driver initiated requests.
908 * @ipr_cmd: ipr command struct
909 *
910 * This function sends a command to the adapter using the correct write call.
911 * In the case of sis64, calculate the ioarcb size required. Then or in the
912 * appropriate bits.
913 *
914 * Return value:
915 * none
916 **/
917static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
918{
919 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
920 dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
921
922 if (ioa_cfg->sis64) {
923 /* The default size is 256 bytes */
924 send_dma_addr |= 0x1;
925
926 /* If the number of ioadls * size of ioadl > 128 bytes,
927 then use a 512 byte ioarcb */
928 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
929 send_dma_addr |= 0x4;
930 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
931 } else
932 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
933}
934
1da177e4
LT
935/**
936 * ipr_do_req - Send driver initiated requests.
937 * @ipr_cmd: ipr command struct
938 * @done: done function
939 * @timeout_func: timeout function
940 * @timeout: timeout value
941 *
942 * This function sends the specified command to the adapter with the
943 * timeout given. The done function is invoked on command completion.
944 *
945 * Return value:
946 * none
947 **/
948static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
949 void (*done) (struct ipr_cmnd *),
950 void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
951{
05a6538a 952 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1da177e4
LT
953
954 ipr_cmd->done = done;
955
956 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
957 ipr_cmd->timer.expires = jiffies + timeout;
958 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
959
960 add_timer(&ipr_cmd->timer);
961
962 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
963
a32c055f 964 ipr_send_command(ipr_cmd);
1da177e4
LT
965}
966
967/**
968 * ipr_internal_cmd_done - Op done function for an internally generated op.
969 * @ipr_cmd: ipr command struct
970 *
971 * This function is the op done function for an internally generated,
972 * blocking op. It simply wakes the sleeping thread.
973 *
974 * Return value:
975 * none
976 **/
977static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
978{
979 if (ipr_cmd->sibling)
980 ipr_cmd->sibling = NULL;
981 else
982 complete(&ipr_cmd->completion);
983}
984
a32c055f
WB
985/**
986 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
987 * @ipr_cmd: ipr command struct
988 * @dma_addr: dma address
989 * @len: transfer length
990 * @flags: ioadl flag value
991 *
992 * This function initializes an ioadl in the case where there is only a single
993 * descriptor.
994 *
995 * Return value:
996 * nothing
997 **/
998static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
999 u32 len, int flags)
1000{
1001 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1002 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
1003
1004 ipr_cmd->dma_use_sg = 1;
1005
1006 if (ipr_cmd->ioa_cfg->sis64) {
1007 ioadl64->flags = cpu_to_be32(flags);
1008 ioadl64->data_len = cpu_to_be32(len);
1009 ioadl64->address = cpu_to_be64(dma_addr);
1010
1011 ipr_cmd->ioarcb.ioadl_len =
1012 cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
1013 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1014 } else {
1015 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
1016 ioadl->address = cpu_to_be32(dma_addr);
1017
1018 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
1019 ipr_cmd->ioarcb.read_ioadl_len =
1020 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1021 ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
1022 } else {
1023 ipr_cmd->ioarcb.ioadl_len =
1024 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1025 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1026 }
1027 }
1028}
1029
1da177e4
LT
1030/**
1031 * ipr_send_blocking_cmd - Send command and sleep on its completion.
1032 * @ipr_cmd: ipr command struct
1033 * @timeout_func: function to invoke if command times out
1034 * @timeout: timeout
1035 *
1036 * Return value:
1037 * none
1038 **/
1039static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
1040 void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
1041 u32 timeout)
1042{
1043 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1044
1045 init_completion(&ipr_cmd->completion);
1046 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
1047
1048 spin_unlock_irq(ioa_cfg->host->host_lock);
1049 wait_for_completion(&ipr_cmd->completion);
1050 spin_lock_irq(ioa_cfg->host->host_lock);
1051}
1052
05a6538a 1053static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1054{
1055 if (ioa_cfg->hrrq_num == 1)
56d6aa33 1056 return 0;
1057 else
1058 return (atomic_add_return(1, &ioa_cfg->hrrq_index) % (ioa_cfg->hrrq_num - 1)) + 1;
05a6538a 1059}
1060
1da177e4
LT
1061/**
1062 * ipr_send_hcam - Send an HCAM to the adapter.
1063 * @ioa_cfg: ioa config struct
1064 * @type: HCAM type
1065 * @hostrcb: hostrcb struct
1066 *
1067 * This function will send a Host Controlled Async command to the adapter.
1068 * If HCAMs are currently not allowed to be issued to the adapter, it will
1069 * place the hostrcb on the free queue.
1070 *
1071 * Return value:
1072 * none
1073 **/
1074static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
1075 struct ipr_hostrcb *hostrcb)
1076{
1077 struct ipr_cmnd *ipr_cmd;
1078 struct ipr_ioarcb *ioarcb;
1079
56d6aa33 1080 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1da177e4 1081 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
05a6538a 1082 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1da177e4
LT
1083 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1084
1085 ipr_cmd->u.hostrcb = hostrcb;
1086 ioarcb = &ipr_cmd->ioarcb;
1087
1088 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1089 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
1090 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
1091 ioarcb->cmd_pkt.cdb[1] = type;
1092 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
1093 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
1094
a32c055f
WB
1095 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
1096 sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
1097
1098 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
1099 ipr_cmd->done = ipr_process_ccn;
1100 else
1101 ipr_cmd->done = ipr_process_error;
1102
1103 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
1104
a32c055f 1105 ipr_send_command(ipr_cmd);
1da177e4
LT
1106 } else {
1107 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1108 }
1109}
1110
3e7ebdfa
WB
1111/**
1112 * ipr_update_ata_class - Update the ata class in the resource entry
1113 * @res: resource entry struct
1114 * @proto: cfgte device bus protocol value
1115 *
1116 * Return value:
1117 * none
1118 **/
1119static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
1120{
203fa3fe 1121 switch (proto) {
3e7ebdfa
WB
1122 case IPR_PROTO_SATA:
1123 case IPR_PROTO_SAS_STP:
1124 res->ata_class = ATA_DEV_ATA;
1125 break;
1126 case IPR_PROTO_SATA_ATAPI:
1127 case IPR_PROTO_SAS_STP_ATAPI:
1128 res->ata_class = ATA_DEV_ATAPI;
1129 break;
1130 default:
1131 res->ata_class = ATA_DEV_UNKNOWN;
1132 break;
1133 };
1134}
1135
1da177e4
LT
1136/**
1137 * ipr_init_res_entry - Initialize a resource entry struct.
1138 * @res: resource entry struct
3e7ebdfa 1139 * @cfgtew: config table entry wrapper struct
1da177e4
LT
1140 *
1141 * Return value:
1142 * none
1143 **/
3e7ebdfa
WB
1144static void ipr_init_res_entry(struct ipr_resource_entry *res,
1145 struct ipr_config_table_entry_wrapper *cfgtew)
1da177e4 1146{
3e7ebdfa
WB
1147 int found = 0;
1148 unsigned int proto;
1149 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1150 struct ipr_resource_entry *gscsi_res = NULL;
1151
ee0a90fa 1152 res->needs_sync_complete = 0;
1da177e4
LT
1153 res->in_erp = 0;
1154 res->add_to_ml = 0;
1155 res->del_from_ml = 0;
1156 res->resetting_device = 0;
0b1f8d44 1157 res->reset_occurred = 0;
1da177e4 1158 res->sdev = NULL;
35a39691 1159 res->sata_port = NULL;
3e7ebdfa
WB
1160
1161 if (ioa_cfg->sis64) {
1162 proto = cfgtew->u.cfgte64->proto;
1163 res->res_flags = cfgtew->u.cfgte64->res_flags;
1164 res->qmodel = IPR_QUEUEING_MODEL64(res);
438b0331 1165 res->type = cfgtew->u.cfgte64->res_type;
3e7ebdfa
WB
1166
1167 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1168 sizeof(res->res_path));
1169
1170 res->bus = 0;
0cb992ed
WB
1171 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1172 sizeof(res->dev_lun.scsi_lun));
3e7ebdfa
WB
1173 res->lun = scsilun_to_int(&res->dev_lun);
1174
1175 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1176 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1177 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1178 found = 1;
1179 res->target = gscsi_res->target;
1180 break;
1181 }
1182 }
1183 if (!found) {
1184 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1185 ioa_cfg->max_devs_supported);
1186 set_bit(res->target, ioa_cfg->target_ids);
1187 }
3e7ebdfa
WB
1188 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1189 res->bus = IPR_IOAFP_VIRTUAL_BUS;
1190 res->target = 0;
1191 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1192 res->bus = IPR_ARRAY_VIRTUAL_BUS;
1193 res->target = find_first_zero_bit(ioa_cfg->array_ids,
1194 ioa_cfg->max_devs_supported);
1195 set_bit(res->target, ioa_cfg->array_ids);
1196 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1197 res->bus = IPR_VSET_VIRTUAL_BUS;
1198 res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1199 ioa_cfg->max_devs_supported);
1200 set_bit(res->target, ioa_cfg->vset_ids);
1201 } else {
1202 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1203 ioa_cfg->max_devs_supported);
1204 set_bit(res->target, ioa_cfg->target_ids);
1205 }
1206 } else {
1207 proto = cfgtew->u.cfgte->proto;
1208 res->qmodel = IPR_QUEUEING_MODEL(res);
1209 res->flags = cfgtew->u.cfgte->flags;
1210 if (res->flags & IPR_IS_IOA_RESOURCE)
1211 res->type = IPR_RES_TYPE_IOAFP;
1212 else
1213 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1214
1215 res->bus = cfgtew->u.cfgte->res_addr.bus;
1216 res->target = cfgtew->u.cfgte->res_addr.target;
1217 res->lun = cfgtew->u.cfgte->res_addr.lun;
46d74563 1218 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
3e7ebdfa
WB
1219 }
1220
1221 ipr_update_ata_class(res, proto);
1222}
1223
1224/**
1225 * ipr_is_same_device - Determine if two devices are the same.
1226 * @res: resource entry struct
1227 * @cfgtew: config table entry wrapper struct
1228 *
1229 * Return value:
1230 * 1 if the devices are the same / 0 otherwise
1231 **/
1232static int ipr_is_same_device(struct ipr_resource_entry *res,
1233 struct ipr_config_table_entry_wrapper *cfgtew)
1234{
1235 if (res->ioa_cfg->sis64) {
1236 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1237 sizeof(cfgtew->u.cfgte64->dev_id)) &&
0cb992ed 1238 !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
3e7ebdfa
WB
1239 sizeof(cfgtew->u.cfgte64->lun))) {
1240 return 1;
1241 }
1242 } else {
1243 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1244 res->target == cfgtew->u.cfgte->res_addr.target &&
1245 res->lun == cfgtew->u.cfgte->res_addr.lun)
1246 return 1;
1247 }
1248
1249 return 0;
1250}
1251
1252/**
b3b3b407 1253 * __ipr_format_res_path - Format the resource path for printing.
3e7ebdfa
WB
1254 * @res_path: resource path
1255 * @buf: buffer
b3b3b407 1256 * @len: length of buffer provided
3e7ebdfa
WB
1257 *
1258 * Return value:
1259 * pointer to buffer
1260 **/
b3b3b407 1261static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
3e7ebdfa
WB
1262{
1263 int i;
5adcbeb3 1264 char *p = buffer;
3e7ebdfa 1265
46d74563 1266 *p = '\0';
5adcbeb3
WB
1267 p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1268 for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1269 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
3e7ebdfa
WB
1270
1271 return buffer;
1272}
1273
b3b3b407
BK
1274/**
1275 * ipr_format_res_path - Format the resource path for printing.
1276 * @ioa_cfg: ioa config struct
1277 * @res_path: resource path
1278 * @buf: buffer
1279 * @len: length of buffer provided
1280 *
1281 * Return value:
1282 * pointer to buffer
1283 **/
1284static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
1285 u8 *res_path, char *buffer, int len)
1286{
1287 char *p = buffer;
1288
1289 *p = '\0';
1290 p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
1291 __ipr_format_res_path(res_path, p, len - (buffer - p));
1292 return buffer;
1293}
1294
3e7ebdfa
WB
1295/**
1296 * ipr_update_res_entry - Update the resource entry.
1297 * @res: resource entry struct
1298 * @cfgtew: config table entry wrapper struct
1299 *
1300 * Return value:
1301 * none
1302 **/
1303static void ipr_update_res_entry(struct ipr_resource_entry *res,
1304 struct ipr_config_table_entry_wrapper *cfgtew)
1305{
1306 char buffer[IPR_MAX_RES_PATH_LENGTH];
1307 unsigned int proto;
1308 int new_path = 0;
1309
1310 if (res->ioa_cfg->sis64) {
1311 res->flags = cfgtew->u.cfgte64->flags;
1312 res->res_flags = cfgtew->u.cfgte64->res_flags;
75576bb9 1313 res->type = cfgtew->u.cfgte64->res_type;
3e7ebdfa
WB
1314
1315 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1316 sizeof(struct ipr_std_inq_data));
1317
1318 res->qmodel = IPR_QUEUEING_MODEL64(res);
1319 proto = cfgtew->u.cfgte64->proto;
1320 res->res_handle = cfgtew->u.cfgte64->res_handle;
1321 res->dev_id = cfgtew->u.cfgte64->dev_id;
1322
1323 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1324 sizeof(res->dev_lun.scsi_lun));
1325
1326 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1327 sizeof(res->res_path))) {
1328 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1329 sizeof(res->res_path));
1330 new_path = 1;
1331 }
1332
1333 if (res->sdev && new_path)
1334 sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
b3b3b407
BK
1335 ipr_format_res_path(res->ioa_cfg,
1336 res->res_path, buffer, sizeof(buffer)));
3e7ebdfa
WB
1337 } else {
1338 res->flags = cfgtew->u.cfgte->flags;
1339 if (res->flags & IPR_IS_IOA_RESOURCE)
1340 res->type = IPR_RES_TYPE_IOAFP;
1341 else
1342 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1343
1344 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1345 sizeof(struct ipr_std_inq_data));
1346
1347 res->qmodel = IPR_QUEUEING_MODEL(res);
1348 proto = cfgtew->u.cfgte->proto;
1349 res->res_handle = cfgtew->u.cfgte->res_handle;
1350 }
1351
1352 ipr_update_ata_class(res, proto);
1353}
1354
1355/**
1356 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1357 * for the resource.
1358 * @res: resource entry struct
1359 * @cfgtew: config table entry wrapper struct
1360 *
1361 * Return value:
1362 * none
1363 **/
1364static void ipr_clear_res_target(struct ipr_resource_entry *res)
1365{
1366 struct ipr_resource_entry *gscsi_res = NULL;
1367 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1368
1369 if (!ioa_cfg->sis64)
1370 return;
1371
1372 if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1373 clear_bit(res->target, ioa_cfg->array_ids);
1374 else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1375 clear_bit(res->target, ioa_cfg->vset_ids);
1376 else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1377 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1378 if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1379 return;
1380 clear_bit(res->target, ioa_cfg->target_ids);
1381
1382 } else if (res->bus == 0)
1383 clear_bit(res->target, ioa_cfg->target_ids);
1da177e4
LT
1384}
1385
1386/**
1387 * ipr_handle_config_change - Handle a config change from the adapter
1388 * @ioa_cfg: ioa config struct
1389 * @hostrcb: hostrcb
1390 *
1391 * Return value:
1392 * none
1393 **/
1394static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
3e7ebdfa 1395 struct ipr_hostrcb *hostrcb)
1da177e4
LT
1396{
1397 struct ipr_resource_entry *res = NULL;
3e7ebdfa
WB
1398 struct ipr_config_table_entry_wrapper cfgtew;
1399 __be32 cc_res_handle;
1400
1da177e4
LT
1401 u32 is_ndn = 1;
1402
3e7ebdfa
WB
1403 if (ioa_cfg->sis64) {
1404 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1405 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1406 } else {
1407 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1408 cc_res_handle = cfgtew.u.cfgte->res_handle;
1409 }
1da177e4
LT
1410
1411 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3e7ebdfa 1412 if (res->res_handle == cc_res_handle) {
1da177e4
LT
1413 is_ndn = 0;
1414 break;
1415 }
1416 }
1417
1418 if (is_ndn) {
1419 if (list_empty(&ioa_cfg->free_res_q)) {
1420 ipr_send_hcam(ioa_cfg,
1421 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1422 hostrcb);
1423 return;
1424 }
1425
1426 res = list_entry(ioa_cfg->free_res_q.next,
1427 struct ipr_resource_entry, queue);
1428
1429 list_del(&res->queue);
3e7ebdfa 1430 ipr_init_res_entry(res, &cfgtew);
1da177e4
LT
1431 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1432 }
1433
3e7ebdfa 1434 ipr_update_res_entry(res, &cfgtew);
1da177e4
LT
1435
1436 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1437 if (res->sdev) {
1da177e4 1438 res->del_from_ml = 1;
3e7ebdfa 1439 res->res_handle = IPR_INVALID_RES_HANDLE;
f688f96d 1440 schedule_work(&ioa_cfg->work_q);
3e7ebdfa
WB
1441 } else {
1442 ipr_clear_res_target(res);
1da177e4 1443 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3e7ebdfa 1444 }
5767a1c4 1445 } else if (!res->sdev || res->del_from_ml) {
1da177e4 1446 res->add_to_ml = 1;
f688f96d 1447 schedule_work(&ioa_cfg->work_q);
1da177e4
LT
1448 }
1449
1450 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1451}
1452
1453/**
1454 * ipr_process_ccn - Op done function for a CCN.
1455 * @ipr_cmd: ipr command struct
1456 *
1457 * This function is the op done function for a configuration
1458 * change notification host controlled async from the adapter.
1459 *
1460 * Return value:
1461 * none
1462 **/
1463static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1464{
1465 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1466 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
96d21f00 1467 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
1468
1469 list_del(&hostrcb->queue);
05a6538a 1470 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
1471
1472 if (ioasc) {
4fdd7c7a
BK
1473 if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
1474 ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST)
1da177e4
LT
1475 dev_err(&ioa_cfg->pdev->dev,
1476 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1477
1478 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1479 } else {
1480 ipr_handle_config_change(ioa_cfg, hostrcb);
1481 }
1482}
1483
8cf093e2
BK
1484/**
1485 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1486 * @i: index into buffer
1487 * @buf: string to modify
1488 *
1489 * This function will strip all trailing whitespace, pad the end
1490 * of the string with a single space, and NULL terminate the string.
1491 *
1492 * Return value:
1493 * new length of string
1494 **/
1495static int strip_and_pad_whitespace(int i, char *buf)
1496{
1497 while (i && buf[i] == ' ')
1498 i--;
1499 buf[i+1] = ' ';
1500 buf[i+2] = '\0';
1501 return i + 2;
1502}
1503
1504/**
1505 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1506 * @prefix: string to print at start of printk
1507 * @hostrcb: hostrcb pointer
1508 * @vpd: vendor/product id/sn struct
1509 *
1510 * Return value:
1511 * none
1512 **/
1513static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1514 struct ipr_vpd *vpd)
1515{
1516 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1517 int i = 0;
1518
1519 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1520 i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1521
1522 memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1523 i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1524
1525 memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1526 buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1527
1528 ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1529}
1530
1da177e4
LT
1531/**
1532 * ipr_log_vpd - Log the passed VPD to the error log.
cfc32139 1533 * @vpd: vendor/product id/sn struct
1da177e4
LT
1534 *
1535 * Return value:
1536 * none
1537 **/
cfc32139 1538static void ipr_log_vpd(struct ipr_vpd *vpd)
1da177e4
LT
1539{
1540 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1541 + IPR_SERIAL_NUM_LEN];
1542
cfc32139 1543 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1544 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1da177e4
LT
1545 IPR_PROD_ID_LEN);
1546 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1547 ipr_err("Vendor/Product ID: %s\n", buffer);
1548
cfc32139 1549 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1da177e4
LT
1550 buffer[IPR_SERIAL_NUM_LEN] = '\0';
1551 ipr_err(" Serial Number: %s\n", buffer);
1552}
1553
8cf093e2
BK
1554/**
1555 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1556 * @prefix: string to print at start of printk
1557 * @hostrcb: hostrcb pointer
1558 * @vpd: vendor/product id/sn/wwn struct
1559 *
1560 * Return value:
1561 * none
1562 **/
1563static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1564 struct ipr_ext_vpd *vpd)
1565{
1566 ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1567 ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1568 be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1569}
1570
ee0f05b8 1571/**
1572 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1573 * @vpd: vendor/product id/sn/wwn struct
1574 *
1575 * Return value:
1576 * none
1577 **/
1578static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1579{
1580 ipr_log_vpd(&vpd->vpd);
1581 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1582 be32_to_cpu(vpd->wwid[1]));
1583}
1584
1585/**
1586 * ipr_log_enhanced_cache_error - Log a cache error.
1587 * @ioa_cfg: ioa config struct
1588 * @hostrcb: hostrcb struct
1589 *
1590 * Return value:
1591 * none
1592 **/
1593static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1594 struct ipr_hostrcb *hostrcb)
1595{
4565e370
WB
1596 struct ipr_hostrcb_type_12_error *error;
1597
1598 if (ioa_cfg->sis64)
1599 error = &hostrcb->hcam.u.error64.u.type_12_error;
1600 else
1601 error = &hostrcb->hcam.u.error.u.type_12_error;
ee0f05b8 1602
1603 ipr_err("-----Current Configuration-----\n");
1604 ipr_err("Cache Directory Card Information:\n");
1605 ipr_log_ext_vpd(&error->ioa_vpd);
1606 ipr_err("Adapter Card Information:\n");
1607 ipr_log_ext_vpd(&error->cfc_vpd);
1608
1609 ipr_err("-----Expected Configuration-----\n");
1610 ipr_err("Cache Directory Card Information:\n");
1611 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1612 ipr_err("Adapter Card Information:\n");
1613 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1614
1615 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1616 be32_to_cpu(error->ioa_data[0]),
1617 be32_to_cpu(error->ioa_data[1]),
1618 be32_to_cpu(error->ioa_data[2]));
1619}
1620
1da177e4
LT
1621/**
1622 * ipr_log_cache_error - Log a cache error.
1623 * @ioa_cfg: ioa config struct
1624 * @hostrcb: hostrcb struct
1625 *
1626 * Return value:
1627 * none
1628 **/
1629static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1630 struct ipr_hostrcb *hostrcb)
1631{
1632 struct ipr_hostrcb_type_02_error *error =
1633 &hostrcb->hcam.u.error.u.type_02_error;
1634
1635 ipr_err("-----Current Configuration-----\n");
1636 ipr_err("Cache Directory Card Information:\n");
cfc32139 1637 ipr_log_vpd(&error->ioa_vpd);
1da177e4 1638 ipr_err("Adapter Card Information:\n");
cfc32139 1639 ipr_log_vpd(&error->cfc_vpd);
1da177e4
LT
1640
1641 ipr_err("-----Expected Configuration-----\n");
1642 ipr_err("Cache Directory Card Information:\n");
cfc32139 1643 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1da177e4 1644 ipr_err("Adapter Card Information:\n");
cfc32139 1645 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1da177e4
LT
1646
1647 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1648 be32_to_cpu(error->ioa_data[0]),
1649 be32_to_cpu(error->ioa_data[1]),
1650 be32_to_cpu(error->ioa_data[2]));
1651}
1652
ee0f05b8 1653/**
1654 * ipr_log_enhanced_config_error - Log a configuration error.
1655 * @ioa_cfg: ioa config struct
1656 * @hostrcb: hostrcb struct
1657 *
1658 * Return value:
1659 * none
1660 **/
1661static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1662 struct ipr_hostrcb *hostrcb)
1663{
1664 int errors_logged, i;
1665 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1666 struct ipr_hostrcb_type_13_error *error;
1667
1668 error = &hostrcb->hcam.u.error.u.type_13_error;
1669 errors_logged = be32_to_cpu(error->errors_logged);
1670
1671 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1672 be32_to_cpu(error->errors_detected), errors_logged);
1673
1674 dev_entry = error->dev;
1675
1676 for (i = 0; i < errors_logged; i++, dev_entry++) {
1677 ipr_err_separator;
1678
1679 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1680 ipr_log_ext_vpd(&dev_entry->vpd);
1681
1682 ipr_err("-----New Device Information-----\n");
1683 ipr_log_ext_vpd(&dev_entry->new_vpd);
1684
1685 ipr_err("Cache Directory Card Information:\n");
1686 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1687
1688 ipr_err("Adapter Card Information:\n");
1689 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1690 }
1691}
1692
4565e370
WB
1693/**
1694 * ipr_log_sis64_config_error - Log a device error.
1695 * @ioa_cfg: ioa config struct
1696 * @hostrcb: hostrcb struct
1697 *
1698 * Return value:
1699 * none
1700 **/
1701static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1702 struct ipr_hostrcb *hostrcb)
1703{
1704 int errors_logged, i;
1705 struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1706 struct ipr_hostrcb_type_23_error *error;
1707 char buffer[IPR_MAX_RES_PATH_LENGTH];
1708
1709 error = &hostrcb->hcam.u.error64.u.type_23_error;
1710 errors_logged = be32_to_cpu(error->errors_logged);
1711
1712 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1713 be32_to_cpu(error->errors_detected), errors_logged);
1714
1715 dev_entry = error->dev;
1716
1717 for (i = 0; i < errors_logged; i++, dev_entry++) {
1718 ipr_err_separator;
1719
1720 ipr_err("Device %d : %s", i + 1,
b3b3b407
BK
1721 __ipr_format_res_path(dev_entry->res_path,
1722 buffer, sizeof(buffer)));
4565e370
WB
1723 ipr_log_ext_vpd(&dev_entry->vpd);
1724
1725 ipr_err("-----New Device Information-----\n");
1726 ipr_log_ext_vpd(&dev_entry->new_vpd);
1727
1728 ipr_err("Cache Directory Card Information:\n");
1729 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1730
1731 ipr_err("Adapter Card Information:\n");
1732 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1733 }
1734}
1735
1da177e4
LT
1736/**
1737 * ipr_log_config_error - Log a configuration error.
1738 * @ioa_cfg: ioa config struct
1739 * @hostrcb: hostrcb struct
1740 *
1741 * Return value:
1742 * none
1743 **/
1744static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1745 struct ipr_hostrcb *hostrcb)
1746{
1747 int errors_logged, i;
1748 struct ipr_hostrcb_device_data_entry *dev_entry;
1749 struct ipr_hostrcb_type_03_error *error;
1750
1751 error = &hostrcb->hcam.u.error.u.type_03_error;
1752 errors_logged = be32_to_cpu(error->errors_logged);
1753
1754 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1755 be32_to_cpu(error->errors_detected), errors_logged);
1756
cfc32139 1757 dev_entry = error->dev;
1da177e4
LT
1758
1759 for (i = 0; i < errors_logged; i++, dev_entry++) {
1760 ipr_err_separator;
1761
fa15b1f6 1762 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
cfc32139 1763 ipr_log_vpd(&dev_entry->vpd);
1da177e4
LT
1764
1765 ipr_err("-----New Device Information-----\n");
cfc32139 1766 ipr_log_vpd(&dev_entry->new_vpd);
1da177e4
LT
1767
1768 ipr_err("Cache Directory Card Information:\n");
cfc32139 1769 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1da177e4
LT
1770
1771 ipr_err("Adapter Card Information:\n");
cfc32139 1772 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1da177e4
LT
1773
1774 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1775 be32_to_cpu(dev_entry->ioa_data[0]),
1776 be32_to_cpu(dev_entry->ioa_data[1]),
1777 be32_to_cpu(dev_entry->ioa_data[2]),
1778 be32_to_cpu(dev_entry->ioa_data[3]),
1779 be32_to_cpu(dev_entry->ioa_data[4]));
1780 }
1781}
1782
ee0f05b8 1783/**
1784 * ipr_log_enhanced_array_error - Log an array configuration error.
1785 * @ioa_cfg: ioa config struct
1786 * @hostrcb: hostrcb struct
1787 *
1788 * Return value:
1789 * none
1790 **/
1791static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1792 struct ipr_hostrcb *hostrcb)
1793{
1794 int i, num_entries;
1795 struct ipr_hostrcb_type_14_error *error;
1796 struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1797 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1798
1799 error = &hostrcb->hcam.u.error.u.type_14_error;
1800
1801 ipr_err_separator;
1802
1803 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1804 error->protection_level,
1805 ioa_cfg->host->host_no,
1806 error->last_func_vset_res_addr.bus,
1807 error->last_func_vset_res_addr.target,
1808 error->last_func_vset_res_addr.lun);
1809
1810 ipr_err_separator;
1811
1812 array_entry = error->array_member;
1813 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
7262026f 1814 ARRAY_SIZE(error->array_member));
ee0f05b8 1815
1816 for (i = 0; i < num_entries; i++, array_entry++) {
1817 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1818 continue;
1819
1820 if (be32_to_cpu(error->exposed_mode_adn) == i)
1821 ipr_err("Exposed Array Member %d:\n", i);
1822 else
1823 ipr_err("Array Member %d:\n", i);
1824
1825 ipr_log_ext_vpd(&array_entry->vpd);
1826 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1827 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1828 "Expected Location");
1829
1830 ipr_err_separator;
1831 }
1832}
1833
1da177e4
LT
1834/**
1835 * ipr_log_array_error - Log an array configuration error.
1836 * @ioa_cfg: ioa config struct
1837 * @hostrcb: hostrcb struct
1838 *
1839 * Return value:
1840 * none
1841 **/
1842static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1843 struct ipr_hostrcb *hostrcb)
1844{
1845 int i;
1846 struct ipr_hostrcb_type_04_error *error;
1847 struct ipr_hostrcb_array_data_entry *array_entry;
1848 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1849
1850 error = &hostrcb->hcam.u.error.u.type_04_error;
1851
1852 ipr_err_separator;
1853
1854 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1855 error->protection_level,
1856 ioa_cfg->host->host_no,
1857 error->last_func_vset_res_addr.bus,
1858 error->last_func_vset_res_addr.target,
1859 error->last_func_vset_res_addr.lun);
1860
1861 ipr_err_separator;
1862
1863 array_entry = error->array_member;
1864
1865 for (i = 0; i < 18; i++) {
cfc32139 1866 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1da177e4
LT
1867 continue;
1868
fa15b1f6 1869 if (be32_to_cpu(error->exposed_mode_adn) == i)
1da177e4 1870 ipr_err("Exposed Array Member %d:\n", i);
fa15b1f6 1871 else
1da177e4 1872 ipr_err("Array Member %d:\n", i);
1da177e4 1873
cfc32139 1874 ipr_log_vpd(&array_entry->vpd);
1da177e4 1875
fa15b1f6 1876 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1877 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1878 "Expected Location");
1da177e4
LT
1879
1880 ipr_err_separator;
1881
1882 if (i == 9)
1883 array_entry = error->array_member2;
1884 else
1885 array_entry++;
1886 }
1887}
1888
1889/**
b0df54bb 1890 * ipr_log_hex_data - Log additional hex IOA error data.
ac719aba 1891 * @ioa_cfg: ioa config struct
b0df54bb 1892 * @data: IOA error data
1893 * @len: data length
1da177e4
LT
1894 *
1895 * Return value:
1896 * none
1897 **/
ac719aba 1898static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
1da177e4
LT
1899{
1900 int i;
1da177e4 1901
b0df54bb 1902 if (len == 0)
1da177e4
LT
1903 return;
1904
ac719aba
BK
1905 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1906 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1907
b0df54bb 1908 for (i = 0; i < len / 4; i += 4) {
1da177e4 1909 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
b0df54bb 1910 be32_to_cpu(data[i]),
1911 be32_to_cpu(data[i+1]),
1912 be32_to_cpu(data[i+2]),
1913 be32_to_cpu(data[i+3]));
1da177e4
LT
1914 }
1915}
1916
ee0f05b8 1917/**
1918 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1919 * @ioa_cfg: ioa config struct
1920 * @hostrcb: hostrcb struct
1921 *
1922 * Return value:
1923 * none
1924 **/
1925static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1926 struct ipr_hostrcb *hostrcb)
1927{
1928 struct ipr_hostrcb_type_17_error *error;
1929
4565e370
WB
1930 if (ioa_cfg->sis64)
1931 error = &hostrcb->hcam.u.error64.u.type_17_error;
1932 else
1933 error = &hostrcb->hcam.u.error.u.type_17_error;
1934
ee0f05b8 1935 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
ca54cb8c 1936 strim(error->failure_reason);
ee0f05b8 1937
8cf093e2
BK
1938 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1939 be32_to_cpu(hostrcb->hcam.u.error.prc));
1940 ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
ac719aba 1941 ipr_log_hex_data(ioa_cfg, error->data,
ee0f05b8 1942 be32_to_cpu(hostrcb->hcam.length) -
1943 (offsetof(struct ipr_hostrcb_error, u) +
1944 offsetof(struct ipr_hostrcb_type_17_error, data)));
1945}
1946
b0df54bb 1947/**
1948 * ipr_log_dual_ioa_error - Log a dual adapter error.
1949 * @ioa_cfg: ioa config struct
1950 * @hostrcb: hostrcb struct
1951 *
1952 * Return value:
1953 * none
1954 **/
1955static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1956 struct ipr_hostrcb *hostrcb)
1957{
1958 struct ipr_hostrcb_type_07_error *error;
1959
1960 error = &hostrcb->hcam.u.error.u.type_07_error;
1961 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
ca54cb8c 1962 strim(error->failure_reason);
b0df54bb 1963
8cf093e2
BK
1964 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1965 be32_to_cpu(hostrcb->hcam.u.error.prc));
1966 ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
ac719aba 1967 ipr_log_hex_data(ioa_cfg, error->data,
b0df54bb 1968 be32_to_cpu(hostrcb->hcam.length) -
1969 (offsetof(struct ipr_hostrcb_error, u) +
1970 offsetof(struct ipr_hostrcb_type_07_error, data)));
1971}
1972
49dc6a18
BK
1973static const struct {
1974 u8 active;
1975 char *desc;
1976} path_active_desc[] = {
1977 { IPR_PATH_NO_INFO, "Path" },
1978 { IPR_PATH_ACTIVE, "Active path" },
1979 { IPR_PATH_NOT_ACTIVE, "Inactive path" }
1980};
1981
1982static const struct {
1983 u8 state;
1984 char *desc;
1985} path_state_desc[] = {
1986 { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1987 { IPR_PATH_HEALTHY, "is healthy" },
1988 { IPR_PATH_DEGRADED, "is degraded" },
1989 { IPR_PATH_FAILED, "is failed" }
1990};
1991
1992/**
1993 * ipr_log_fabric_path - Log a fabric path error
1994 * @hostrcb: hostrcb struct
1995 * @fabric: fabric descriptor
1996 *
1997 * Return value:
1998 * none
1999 **/
2000static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
2001 struct ipr_hostrcb_fabric_desc *fabric)
2002{
2003 int i, j;
2004 u8 path_state = fabric->path_state;
2005 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2006 u8 state = path_state & IPR_PATH_STATE_MASK;
2007
2008 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2009 if (path_active_desc[i].active != active)
2010 continue;
2011
2012 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2013 if (path_state_desc[j].state != state)
2014 continue;
2015
2016 if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
2017 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
2018 path_active_desc[i].desc, path_state_desc[j].desc,
2019 fabric->ioa_port);
2020 } else if (fabric->cascaded_expander == 0xff) {
2021 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
2022 path_active_desc[i].desc, path_state_desc[j].desc,
2023 fabric->ioa_port, fabric->phy);
2024 } else if (fabric->phy == 0xff) {
2025 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
2026 path_active_desc[i].desc, path_state_desc[j].desc,
2027 fabric->ioa_port, fabric->cascaded_expander);
2028 } else {
2029 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
2030 path_active_desc[i].desc, path_state_desc[j].desc,
2031 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2032 }
2033 return;
2034 }
2035 }
2036
2037 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
2038 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2039}
2040
4565e370
WB
2041/**
2042 * ipr_log64_fabric_path - Log a fabric path error
2043 * @hostrcb: hostrcb struct
2044 * @fabric: fabric descriptor
2045 *
2046 * Return value:
2047 * none
2048 **/
2049static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
2050 struct ipr_hostrcb64_fabric_desc *fabric)
2051{
2052 int i, j;
2053 u8 path_state = fabric->path_state;
2054 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2055 u8 state = path_state & IPR_PATH_STATE_MASK;
2056 char buffer[IPR_MAX_RES_PATH_LENGTH];
2057
2058 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2059 if (path_active_desc[i].active != active)
2060 continue;
2061
2062 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2063 if (path_state_desc[j].state != state)
2064 continue;
2065
2066 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
2067 path_active_desc[i].desc, path_state_desc[j].desc,
b3b3b407
BK
2068 ipr_format_res_path(hostrcb->ioa_cfg,
2069 fabric->res_path,
2070 buffer, sizeof(buffer)));
4565e370
WB
2071 return;
2072 }
2073 }
2074
2075 ipr_err("Path state=%02X Resource Path=%s\n", path_state,
b3b3b407
BK
2076 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
2077 buffer, sizeof(buffer)));
4565e370
WB
2078}
2079
49dc6a18
BK
2080static const struct {
2081 u8 type;
2082 char *desc;
2083} path_type_desc[] = {
2084 { IPR_PATH_CFG_IOA_PORT, "IOA port" },
2085 { IPR_PATH_CFG_EXP_PORT, "Expander port" },
2086 { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
2087 { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
2088};
2089
2090static const struct {
2091 u8 status;
2092 char *desc;
2093} path_status_desc[] = {
2094 { IPR_PATH_CFG_NO_PROB, "Functional" },
2095 { IPR_PATH_CFG_DEGRADED, "Degraded" },
2096 { IPR_PATH_CFG_FAILED, "Failed" },
2097 { IPR_PATH_CFG_SUSPECT, "Suspect" },
2098 { IPR_PATH_NOT_DETECTED, "Missing" },
2099 { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
2100};
2101
2102static const char *link_rate[] = {
2103 "unknown",
2104 "disabled",
2105 "phy reset problem",
2106 "spinup hold",
2107 "port selector",
2108 "unknown",
2109 "unknown",
2110 "unknown",
2111 "1.5Gbps",
2112 "3.0Gbps",
2113 "unknown",
2114 "unknown",
2115 "unknown",
2116 "unknown",
2117 "unknown",
2118 "unknown"
2119};
2120
2121/**
2122 * ipr_log_path_elem - Log a fabric path element.
2123 * @hostrcb: hostrcb struct
2124 * @cfg: fabric path element struct
2125 *
2126 * Return value:
2127 * none
2128 **/
2129static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
2130 struct ipr_hostrcb_config_element *cfg)
2131{
2132 int i, j;
2133 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2134 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2135
2136 if (type == IPR_PATH_CFG_NOT_EXIST)
2137 return;
2138
2139 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2140 if (path_type_desc[i].type != type)
2141 continue;
2142
2143 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2144 if (path_status_desc[j].status != status)
2145 continue;
2146
2147 if (type == IPR_PATH_CFG_IOA_PORT) {
2148 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2149 path_status_desc[j].desc, path_type_desc[i].desc,
2150 cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2151 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2152 } else {
2153 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2154 ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2155 path_status_desc[j].desc, path_type_desc[i].desc,
2156 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2157 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2158 } else if (cfg->cascaded_expander == 0xff) {
2159 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2160 "WWN=%08X%08X\n", path_status_desc[j].desc,
2161 path_type_desc[i].desc, cfg->phy,
2162 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2163 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2164 } else if (cfg->phy == 0xff) {
2165 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2166 "WWN=%08X%08X\n", path_status_desc[j].desc,
2167 path_type_desc[i].desc, cfg->cascaded_expander,
2168 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2169 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2170 } else {
2171 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2172 "WWN=%08X%08X\n", path_status_desc[j].desc,
2173 path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2174 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2175 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2176 }
2177 }
2178 return;
2179 }
2180 }
2181
2182 ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2183 "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2184 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2185 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2186}
2187
4565e370
WB
2188/**
2189 * ipr_log64_path_elem - Log a fabric path element.
2190 * @hostrcb: hostrcb struct
2191 * @cfg: fabric path element struct
2192 *
2193 * Return value:
2194 * none
2195 **/
2196static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2197 struct ipr_hostrcb64_config_element *cfg)
2198{
2199 int i, j;
2200 u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2201 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2202 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2203 char buffer[IPR_MAX_RES_PATH_LENGTH];
2204
2205 if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2206 return;
2207
2208 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2209 if (path_type_desc[i].type != type)
2210 continue;
2211
2212 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2213 if (path_status_desc[j].status != status)
2214 continue;
2215
2216 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2217 path_status_desc[j].desc, path_type_desc[i].desc,
b3b3b407
BK
2218 ipr_format_res_path(hostrcb->ioa_cfg,
2219 cfg->res_path, buffer, sizeof(buffer)),
2220 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2221 be32_to_cpu(cfg->wwid[0]),
2222 be32_to_cpu(cfg->wwid[1]));
4565e370
WB
2223 return;
2224 }
2225 }
2226 ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2227 "WWN=%08X%08X\n", cfg->type_status,
b3b3b407
BK
2228 ipr_format_res_path(hostrcb->ioa_cfg,
2229 cfg->res_path, buffer, sizeof(buffer)),
2230 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2231 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
4565e370
WB
2232}
2233
49dc6a18
BK
2234/**
2235 * ipr_log_fabric_error - Log a fabric error.
2236 * @ioa_cfg: ioa config struct
2237 * @hostrcb: hostrcb struct
2238 *
2239 * Return value:
2240 * none
2241 **/
2242static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2243 struct ipr_hostrcb *hostrcb)
2244{
2245 struct ipr_hostrcb_type_20_error *error;
2246 struct ipr_hostrcb_fabric_desc *fabric;
2247 struct ipr_hostrcb_config_element *cfg;
2248 int i, add_len;
2249
2250 error = &hostrcb->hcam.u.error.u.type_20_error;
2251 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2252 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2253
2254 add_len = be32_to_cpu(hostrcb->hcam.length) -
2255 (offsetof(struct ipr_hostrcb_error, u) +
2256 offsetof(struct ipr_hostrcb_type_20_error, desc));
2257
2258 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2259 ipr_log_fabric_path(hostrcb, fabric);
2260 for_each_fabric_cfg(fabric, cfg)
2261 ipr_log_path_elem(hostrcb, cfg);
2262
2263 add_len -= be16_to_cpu(fabric->length);
2264 fabric = (struct ipr_hostrcb_fabric_desc *)
2265 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2266 }
2267
ac719aba 2268 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
49dc6a18
BK
2269}
2270
4565e370
WB
2271/**
2272 * ipr_log_sis64_array_error - Log a sis64 array error.
2273 * @ioa_cfg: ioa config struct
2274 * @hostrcb: hostrcb struct
2275 *
2276 * Return value:
2277 * none
2278 **/
2279static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2280 struct ipr_hostrcb *hostrcb)
2281{
2282 int i, num_entries;
2283 struct ipr_hostrcb_type_24_error *error;
2284 struct ipr_hostrcb64_array_data_entry *array_entry;
2285 char buffer[IPR_MAX_RES_PATH_LENGTH];
2286 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2287
2288 error = &hostrcb->hcam.u.error64.u.type_24_error;
2289
2290 ipr_err_separator;
2291
2292 ipr_err("RAID %s Array Configuration: %s\n",
2293 error->protection_level,
b3b3b407
BK
2294 ipr_format_res_path(ioa_cfg, error->last_res_path,
2295 buffer, sizeof(buffer)));
4565e370
WB
2296
2297 ipr_err_separator;
2298
2299 array_entry = error->array_member;
7262026f
WB
2300 num_entries = min_t(u32, error->num_entries,
2301 ARRAY_SIZE(error->array_member));
4565e370
WB
2302
2303 for (i = 0; i < num_entries; i++, array_entry++) {
2304
2305 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2306 continue;
2307
2308 if (error->exposed_mode_adn == i)
2309 ipr_err("Exposed Array Member %d:\n", i);
2310 else
2311 ipr_err("Array Member %d:\n", i);
2312
2313 ipr_err("Array Member %d:\n", i);
2314 ipr_log_ext_vpd(&array_entry->vpd);
7262026f 2315 ipr_err("Current Location: %s\n",
b3b3b407
BK
2316 ipr_format_res_path(ioa_cfg, array_entry->res_path,
2317 buffer, sizeof(buffer)));
7262026f 2318 ipr_err("Expected Location: %s\n",
b3b3b407
BK
2319 ipr_format_res_path(ioa_cfg,
2320 array_entry->expected_res_path,
2321 buffer, sizeof(buffer)));
4565e370
WB
2322
2323 ipr_err_separator;
2324 }
2325}
2326
2327/**
2328 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2329 * @ioa_cfg: ioa config struct
2330 * @hostrcb: hostrcb struct
2331 *
2332 * Return value:
2333 * none
2334 **/
2335static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2336 struct ipr_hostrcb *hostrcb)
2337{
2338 struct ipr_hostrcb_type_30_error *error;
2339 struct ipr_hostrcb64_fabric_desc *fabric;
2340 struct ipr_hostrcb64_config_element *cfg;
2341 int i, add_len;
2342
2343 error = &hostrcb->hcam.u.error64.u.type_30_error;
2344
2345 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2346 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2347
2348 add_len = be32_to_cpu(hostrcb->hcam.length) -
2349 (offsetof(struct ipr_hostrcb64_error, u) +
2350 offsetof(struct ipr_hostrcb_type_30_error, desc));
2351
2352 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2353 ipr_log64_fabric_path(hostrcb, fabric);
2354 for_each_fabric_cfg(fabric, cfg)
2355 ipr_log64_path_elem(hostrcb, cfg);
2356
2357 add_len -= be16_to_cpu(fabric->length);
2358 fabric = (struct ipr_hostrcb64_fabric_desc *)
2359 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2360 }
2361
2362 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2363}
2364
b0df54bb 2365/**
2366 * ipr_log_generic_error - Log an adapter error.
2367 * @ioa_cfg: ioa config struct
2368 * @hostrcb: hostrcb struct
2369 *
2370 * Return value:
2371 * none
2372 **/
2373static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2374 struct ipr_hostrcb *hostrcb)
2375{
ac719aba 2376 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
b0df54bb 2377 be32_to_cpu(hostrcb->hcam.length));
2378}
2379
169b9ec8
WX
2380/**
2381 * ipr_log_sis64_device_error - Log a cache error.
2382 * @ioa_cfg: ioa config struct
2383 * @hostrcb: hostrcb struct
2384 *
2385 * Return value:
2386 * none
2387 **/
2388static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg,
2389 struct ipr_hostrcb *hostrcb)
2390{
2391 struct ipr_hostrcb_type_21_error *error;
2392 char buffer[IPR_MAX_RES_PATH_LENGTH];
2393
2394 error = &hostrcb->hcam.u.error64.u.type_21_error;
2395
2396 ipr_err("-----Failing Device Information-----\n");
2397 ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n",
2398 be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]),
2399 be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3]));
2400 ipr_err("Device Resource Path: %s\n",
2401 __ipr_format_res_path(error->res_path,
2402 buffer, sizeof(buffer)));
2403 error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0';
2404 error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0';
2405 ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc);
2406 ipr_err("Secondary Problem Description: %s\n", error->second_problem_desc);
2407 ipr_err("SCSI Sense Data:\n");
2408 ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data));
2409 ipr_err("SCSI Command Descriptor Block: \n");
2410 ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb));
2411
2412 ipr_err("Additional IOA Data:\n");
2413 ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error));
2414}
2415
1da177e4
LT
2416/**
2417 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2418 * @ioasc: IOASC
2419 *
2420 * This function will return the index of into the ipr_error_table
2421 * for the specified IOASC. If the IOASC is not in the table,
2422 * 0 will be returned, which points to the entry used for unknown errors.
2423 *
2424 * Return value:
2425 * index into the ipr_error_table
2426 **/
2427static u32 ipr_get_error(u32 ioasc)
2428{
2429 int i;
2430
2431 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
35a39691 2432 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
1da177e4
LT
2433 return i;
2434
2435 return 0;
2436}
2437
2438/**
2439 * ipr_handle_log_data - Log an adapter error.
2440 * @ioa_cfg: ioa config struct
2441 * @hostrcb: hostrcb struct
2442 *
2443 * This function logs an adapter error to the system.
2444 *
2445 * Return value:
2446 * none
2447 **/
2448static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2449 struct ipr_hostrcb *hostrcb)
2450{
2451 u32 ioasc;
2452 int error_index;
3185ea63 2453 struct ipr_hostrcb_type_21_error *error;
1da177e4
LT
2454
2455 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2456 return;
2457
2458 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2459 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2460
4565e370
WB
2461 if (ioa_cfg->sis64)
2462 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2463 else
2464 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
1da177e4 2465
4565e370
WB
2466 if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2467 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
1da177e4
LT
2468 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2469 scsi_report_bus_reset(ioa_cfg->host,
4565e370 2470 hostrcb->hcam.u.error.fd_res_addr.bus);
1da177e4
LT
2471 }
2472
2473 error_index = ipr_get_error(ioasc);
2474
2475 if (!ipr_error_table[error_index].log_hcam)
2476 return;
2477
3185ea63 2478 if (ioasc == IPR_IOASC_HW_CMD_FAILED &&
2479 hostrcb->hcam.overlay_id == IPR_HOST_RCB_OVERLAY_ID_21) {
2480 error = &hostrcb->hcam.u.error64.u.type_21_error;
2481
2482 if (((be32_to_cpu(error->sense_data[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST &&
2483 ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
2484 return;
2485 }
2486
49dc6a18 2487 ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
1da177e4
LT
2488
2489 /* Set indication we have logged an error */
2490 ioa_cfg->errors_logged++;
2491
933916f3 2492 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
1da177e4 2493 return;
cf852037 2494 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2495 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
1da177e4
LT
2496
2497 switch (hostrcb->hcam.overlay_id) {
1da177e4
LT
2498 case IPR_HOST_RCB_OVERLAY_ID_2:
2499 ipr_log_cache_error(ioa_cfg, hostrcb);
2500 break;
2501 case IPR_HOST_RCB_OVERLAY_ID_3:
2502 ipr_log_config_error(ioa_cfg, hostrcb);
2503 break;
2504 case IPR_HOST_RCB_OVERLAY_ID_4:
2505 case IPR_HOST_RCB_OVERLAY_ID_6:
2506 ipr_log_array_error(ioa_cfg, hostrcb);
2507 break;
b0df54bb 2508 case IPR_HOST_RCB_OVERLAY_ID_7:
2509 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2510 break;
ee0f05b8 2511 case IPR_HOST_RCB_OVERLAY_ID_12:
2512 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2513 break;
2514 case IPR_HOST_RCB_OVERLAY_ID_13:
2515 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2516 break;
2517 case IPR_HOST_RCB_OVERLAY_ID_14:
2518 case IPR_HOST_RCB_OVERLAY_ID_16:
2519 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2520 break;
2521 case IPR_HOST_RCB_OVERLAY_ID_17:
2522 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2523 break;
49dc6a18
BK
2524 case IPR_HOST_RCB_OVERLAY_ID_20:
2525 ipr_log_fabric_error(ioa_cfg, hostrcb);
2526 break;
169b9ec8
WX
2527 case IPR_HOST_RCB_OVERLAY_ID_21:
2528 ipr_log_sis64_device_error(ioa_cfg, hostrcb);
2529 break;
4565e370
WB
2530 case IPR_HOST_RCB_OVERLAY_ID_23:
2531 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2532 break;
2533 case IPR_HOST_RCB_OVERLAY_ID_24:
2534 case IPR_HOST_RCB_OVERLAY_ID_26:
2535 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2536 break;
2537 case IPR_HOST_RCB_OVERLAY_ID_30:
2538 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2539 break;
cf852037 2540 case IPR_HOST_RCB_OVERLAY_ID_1:
1da177e4 2541 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
1da177e4 2542 default:
a9cfca96 2543 ipr_log_generic_error(ioa_cfg, hostrcb);
1da177e4
LT
2544 break;
2545 }
2546}
2547
2548/**
2549 * ipr_process_error - Op done function for an adapter error log.
2550 * @ipr_cmd: ipr command struct
2551 *
2552 * This function is the op done function for an error log host
2553 * controlled async from the adapter. It will log the error and
2554 * send the HCAM back to the adapter.
2555 *
2556 * Return value:
2557 * none
2558 **/
2559static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2560{
2561 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2562 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
96d21f00 2563 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
4565e370
WB
2564 u32 fd_ioasc;
2565
2566 if (ioa_cfg->sis64)
2567 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2568 else
2569 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
1da177e4
LT
2570
2571 list_del(&hostrcb->queue);
05a6538a 2572 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
2573
2574 if (!ioasc) {
2575 ipr_handle_log_data(ioa_cfg, hostrcb);
65f56475
BK
2576 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2577 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
4fdd7c7a
BK
2578 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
2579 ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST) {
1da177e4
LT
2580 dev_err(&ioa_cfg->pdev->dev,
2581 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2582 }
2583
2584 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2585}
2586
2587/**
2588 * ipr_timeout - An internally generated op has timed out.
2589 * @ipr_cmd: ipr command struct
2590 *
2591 * This function blocks host requests and initiates an
2592 * adapter reset.
2593 *
2594 * Return value:
2595 * none
2596 **/
2597static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
2598{
2599 unsigned long lock_flags = 0;
2600 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2601
2602 ENTER;
2603 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2604
2605 ioa_cfg->errors_logged++;
2606 dev_err(&ioa_cfg->pdev->dev,
2607 "Adapter being reset due to command timeout.\n");
2608
2609 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2610 ioa_cfg->sdt_state = GET_DUMP;
2611
2612 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2613 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2614
2615 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2616 LEAVE;
2617}
2618
2619/**
2620 * ipr_oper_timeout - Adapter timed out transitioning to operational
2621 * @ipr_cmd: ipr command struct
2622 *
2623 * This function blocks host requests and initiates an
2624 * adapter reset.
2625 *
2626 * Return value:
2627 * none
2628 **/
2629static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
2630{
2631 unsigned long lock_flags = 0;
2632 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2633
2634 ENTER;
2635 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2636
2637 ioa_cfg->errors_logged++;
2638 dev_err(&ioa_cfg->pdev->dev,
2639 "Adapter timed out transitioning to operational.\n");
2640
2641 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2642 ioa_cfg->sdt_state = GET_DUMP;
2643
2644 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2645 if (ipr_fastfail)
2646 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2647 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2648 }
2649
2650 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2651 LEAVE;
2652}
2653
1da177e4
LT
2654/**
2655 * ipr_find_ses_entry - Find matching SES in SES table
2656 * @res: resource entry struct of SES
2657 *
2658 * Return value:
2659 * pointer to SES table entry / NULL on failure
2660 **/
2661static const struct ipr_ses_table_entry *
2662ipr_find_ses_entry(struct ipr_resource_entry *res)
2663{
2664 int i, j, matches;
3e7ebdfa 2665 struct ipr_std_inq_vpids *vpids;
1da177e4
LT
2666 const struct ipr_ses_table_entry *ste = ipr_ses_table;
2667
2668 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2669 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2670 if (ste->compare_product_id_byte[j] == 'X') {
3e7ebdfa
WB
2671 vpids = &res->std_inq_data.vpids;
2672 if (vpids->product_id[j] == ste->product_id[j])
1da177e4
LT
2673 matches++;
2674 else
2675 break;
2676 } else
2677 matches++;
2678 }
2679
2680 if (matches == IPR_PROD_ID_LEN)
2681 return ste;
2682 }
2683
2684 return NULL;
2685}
2686
2687/**
2688 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2689 * @ioa_cfg: ioa config struct
2690 * @bus: SCSI bus
2691 * @bus_width: bus width
2692 *
2693 * Return value:
2694 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2695 * For a 2-byte wide SCSI bus, the maximum transfer speed is
2696 * twice the maximum transfer rate (e.g. for a wide enabled bus,
2697 * max 160MHz = max 320MB/sec).
2698 **/
2699static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2700{
2701 struct ipr_resource_entry *res;
2702 const struct ipr_ses_table_entry *ste;
2703 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2704
2705 /* Loop through each config table entry in the config table buffer */
2706 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3e7ebdfa 2707 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
1da177e4
LT
2708 continue;
2709
3e7ebdfa 2710 if (bus != res->bus)
1da177e4
LT
2711 continue;
2712
2713 if (!(ste = ipr_find_ses_entry(res)))
2714 continue;
2715
2716 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2717 }
2718
2719 return max_xfer_rate;
2720}
2721
2722/**
2723 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2724 * @ioa_cfg: ioa config struct
2725 * @max_delay: max delay in micro-seconds to wait
2726 *
2727 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2728 *
2729 * Return value:
2730 * 0 on success / other on failure
2731 **/
2732static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2733{
2734 volatile u32 pcii_reg;
2735 int delay = 1;
2736
2737 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2738 while (delay < max_delay) {
2739 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2740
2741 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2742 return 0;
2743
2744 /* udelay cannot be used if delay is more than a few milliseconds */
2745 if ((delay / 1000) > MAX_UDELAY_MS)
2746 mdelay(delay / 1000);
2747 else
2748 udelay(delay);
2749
2750 delay += delay;
2751 }
2752 return -EIO;
2753}
2754
dcbad00e
WB
2755/**
2756 * ipr_get_sis64_dump_data_section - Dump IOA memory
2757 * @ioa_cfg: ioa config struct
2758 * @start_addr: adapter address to dump
2759 * @dest: destination kernel buffer
2760 * @length_in_words: length to dump in 4 byte words
2761 *
2762 * Return value:
2763 * 0 on success
2764 **/
2765static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2766 u32 start_addr,
2767 __be32 *dest, u32 length_in_words)
2768{
2769 int i;
2770
2771 for (i = 0; i < length_in_words; i++) {
2772 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2773 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2774 dest++;
2775 }
2776
2777 return 0;
2778}
2779
1da177e4
LT
2780/**
2781 * ipr_get_ldump_data_section - Dump IOA memory
2782 * @ioa_cfg: ioa config struct
2783 * @start_addr: adapter address to dump
2784 * @dest: destination kernel buffer
2785 * @length_in_words: length to dump in 4 byte words
2786 *
2787 * Return value:
2788 * 0 on success / -EIO on failure
2789 **/
2790static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2791 u32 start_addr,
2792 __be32 *dest, u32 length_in_words)
2793{
2794 volatile u32 temp_pcii_reg;
2795 int i, delay = 0;
2796
dcbad00e
WB
2797 if (ioa_cfg->sis64)
2798 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2799 dest, length_in_words);
2800
1da177e4
LT
2801 /* Write IOA interrupt reg starting LDUMP state */
2802 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
214777ba 2803 ioa_cfg->regs.set_uproc_interrupt_reg32);
1da177e4
LT
2804
2805 /* Wait for IO debug acknowledge */
2806 if (ipr_wait_iodbg_ack(ioa_cfg,
2807 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2808 dev_err(&ioa_cfg->pdev->dev,
2809 "IOA dump long data transfer timeout\n");
2810 return -EIO;
2811 }
2812
2813 /* Signal LDUMP interlocked - clear IO debug ack */
2814 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2815 ioa_cfg->regs.clr_interrupt_reg);
2816
2817 /* Write Mailbox with starting address */
2818 writel(start_addr, ioa_cfg->ioa_mailbox);
2819
2820 /* Signal address valid - clear IOA Reset alert */
2821 writel(IPR_UPROCI_RESET_ALERT,
214777ba 2822 ioa_cfg->regs.clr_uproc_interrupt_reg32);
1da177e4
LT
2823
2824 for (i = 0; i < length_in_words; i++) {
2825 /* Wait for IO debug acknowledge */
2826 if (ipr_wait_iodbg_ack(ioa_cfg,
2827 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2828 dev_err(&ioa_cfg->pdev->dev,
2829 "IOA dump short data transfer timeout\n");
2830 return -EIO;
2831 }
2832
2833 /* Read data from mailbox and increment destination pointer */
2834 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2835 dest++;
2836
2837 /* For all but the last word of data, signal data received */
2838 if (i < (length_in_words - 1)) {
2839 /* Signal dump data received - Clear IO debug Ack */
2840 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2841 ioa_cfg->regs.clr_interrupt_reg);
2842 }
2843 }
2844
2845 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2846 writel(IPR_UPROCI_RESET_ALERT,
214777ba 2847 ioa_cfg->regs.set_uproc_interrupt_reg32);
1da177e4
LT
2848
2849 writel(IPR_UPROCI_IO_DEBUG_ALERT,
214777ba 2850 ioa_cfg->regs.clr_uproc_interrupt_reg32);
1da177e4
LT
2851
2852 /* Signal dump data received - Clear IO debug Ack */
2853 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2854 ioa_cfg->regs.clr_interrupt_reg);
2855
2856 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2857 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2858 temp_pcii_reg =
214777ba 2859 readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
1da177e4
LT
2860
2861 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2862 return 0;
2863
2864 udelay(10);
2865 delay += 10;
2866 }
2867
2868 return 0;
2869}
2870
2871#ifdef CONFIG_SCSI_IPR_DUMP
2872/**
2873 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2874 * @ioa_cfg: ioa config struct
2875 * @pci_address: adapter address
2876 * @length: length of data to copy
2877 *
2878 * Copy data from PCI adapter to kernel buffer.
2879 * Note: length MUST be a 4 byte multiple
2880 * Return value:
2881 * 0 on success / other on failure
2882 **/
2883static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2884 unsigned long pci_address, u32 length)
2885{
2886 int bytes_copied = 0;
4d4dd706 2887 int cur_len, rc, rem_len, rem_page_len, max_dump_size;
1da177e4
LT
2888 __be32 *page;
2889 unsigned long lock_flags = 0;
2890 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2891
4d4dd706
KSS
2892 if (ioa_cfg->sis64)
2893 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2894 else
2895 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2896
1da177e4 2897 while (bytes_copied < length &&
4d4dd706 2898 (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
1da177e4
LT
2899 if (ioa_dump->page_offset >= PAGE_SIZE ||
2900 ioa_dump->page_offset == 0) {
2901 page = (__be32 *)__get_free_page(GFP_ATOMIC);
2902
2903 if (!page) {
2904 ipr_trace;
2905 return bytes_copied;
2906 }
2907
2908 ioa_dump->page_offset = 0;
2909 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2910 ioa_dump->next_page_index++;
2911 } else
2912 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2913
2914 rem_len = length - bytes_copied;
2915 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2916 cur_len = min(rem_len, rem_page_len);
2917
2918 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2919 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2920 rc = -EIO;
2921 } else {
2922 rc = ipr_get_ldump_data_section(ioa_cfg,
2923 pci_address + bytes_copied,
2924 &page[ioa_dump->page_offset / 4],
2925 (cur_len / sizeof(u32)));
2926 }
2927 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2928
2929 if (!rc) {
2930 ioa_dump->page_offset += cur_len;
2931 bytes_copied += cur_len;
2932 } else {
2933 ipr_trace;
2934 break;
2935 }
2936 schedule();
2937 }
2938
2939 return bytes_copied;
2940}
2941
2942/**
2943 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2944 * @hdr: dump entry header struct
2945 *
2946 * Return value:
2947 * nothing
2948 **/
2949static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2950{
2951 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2952 hdr->num_elems = 1;
2953 hdr->offset = sizeof(*hdr);
2954 hdr->status = IPR_DUMP_STATUS_SUCCESS;
2955}
2956
2957/**
2958 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2959 * @ioa_cfg: ioa config struct
2960 * @driver_dump: driver dump struct
2961 *
2962 * Return value:
2963 * nothing
2964 **/
2965static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2966 struct ipr_driver_dump *driver_dump)
2967{
2968 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2969
2970 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2971 driver_dump->ioa_type_entry.hdr.len =
2972 sizeof(struct ipr_dump_ioa_type_entry) -
2973 sizeof(struct ipr_dump_entry_header);
2974 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2975 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2976 driver_dump->ioa_type_entry.type = ioa_cfg->type;
2977 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2978 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2979 ucode_vpd->minor_release[1];
2980 driver_dump->hdr.num_entries++;
2981}
2982
2983/**
2984 * ipr_dump_version_data - Fill in the driver version in the dump.
2985 * @ioa_cfg: ioa config struct
2986 * @driver_dump: driver dump struct
2987 *
2988 * Return value:
2989 * nothing
2990 **/
2991static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2992 struct ipr_driver_dump *driver_dump)
2993{
2994 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2995 driver_dump->version_entry.hdr.len =
2996 sizeof(struct ipr_dump_version_entry) -
2997 sizeof(struct ipr_dump_entry_header);
2998 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2999 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
3000 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
3001 driver_dump->hdr.num_entries++;
3002}
3003
3004/**
3005 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
3006 * @ioa_cfg: ioa config struct
3007 * @driver_dump: driver dump struct
3008 *
3009 * Return value:
3010 * nothing
3011 **/
3012static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
3013 struct ipr_driver_dump *driver_dump)
3014{
3015 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
3016 driver_dump->trace_entry.hdr.len =
3017 sizeof(struct ipr_dump_trace_entry) -
3018 sizeof(struct ipr_dump_entry_header);
3019 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3020 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
3021 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
3022 driver_dump->hdr.num_entries++;
3023}
3024
3025/**
3026 * ipr_dump_location_data - Fill in the IOA location in the dump.
3027 * @ioa_cfg: ioa config struct
3028 * @driver_dump: driver dump struct
3029 *
3030 * Return value:
3031 * nothing
3032 **/
3033static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
3034 struct ipr_driver_dump *driver_dump)
3035{
3036 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
3037 driver_dump->location_entry.hdr.len =
3038 sizeof(struct ipr_dump_location_entry) -
3039 sizeof(struct ipr_dump_entry_header);
3040 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3041 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
71610f55 3042 strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
1da177e4
LT
3043 driver_dump->hdr.num_entries++;
3044}
3045
3046/**
3047 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
3048 * @ioa_cfg: ioa config struct
3049 * @dump: dump struct
3050 *
3051 * Return value:
3052 * nothing
3053 **/
3054static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
3055{
3056 unsigned long start_addr, sdt_word;
3057 unsigned long lock_flags = 0;
3058 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
3059 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
4d4dd706
KSS
3060 u32 num_entries, max_num_entries, start_off, end_off;
3061 u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
1da177e4 3062 struct ipr_sdt *sdt;
dcbad00e 3063 int valid = 1;
1da177e4
LT
3064 int i;
3065
3066 ENTER;
3067
3068 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3069
41e9a696 3070 if (ioa_cfg->sdt_state != READ_DUMP) {
1da177e4
LT
3071 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3072 return;
3073 }
3074
110def85
WB
3075 if (ioa_cfg->sis64) {
3076 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3077 ssleep(IPR_DUMP_DELAY_SECONDS);
3078 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3079 }
3080
1da177e4
LT
3081 start_addr = readl(ioa_cfg->ioa_mailbox);
3082
dcbad00e 3083 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
1da177e4
LT
3084 dev_err(&ioa_cfg->pdev->dev,
3085 "Invalid dump table format: %lx\n", start_addr);
3086 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3087 return;
3088 }
3089
3090 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
3091
3092 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
3093
3094 /* Initialize the overall dump header */
3095 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
3096 driver_dump->hdr.num_entries = 1;
3097 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
3098 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
3099 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
3100 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
3101
3102 ipr_dump_version_data(ioa_cfg, driver_dump);
3103 ipr_dump_location_data(ioa_cfg, driver_dump);
3104 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
3105 ipr_dump_trace_data(ioa_cfg, driver_dump);
3106
3107 /* Update dump_header */
3108 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
3109
3110 /* IOA Dump entry */
3111 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
1da177e4
LT
3112 ioa_dump->hdr.len = 0;
3113 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3114 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
3115
3116 /* First entries in sdt are actually a list of dump addresses and
3117 lengths to gather the real dump data. sdt represents the pointer
3118 to the ioa generated dump table. Dump data will be extracted based
3119 on entries in this table */
3120 sdt = &ioa_dump->sdt;
3121
4d4dd706
KSS
3122 if (ioa_cfg->sis64) {
3123 max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
3124 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
3125 } else {
3126 max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
3127 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
3128 }
3129
3130 bytes_to_copy = offsetof(struct ipr_sdt, entry) +
3131 (max_num_entries * sizeof(struct ipr_sdt_entry));
1da177e4 3132 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
4d4dd706 3133 bytes_to_copy / sizeof(__be32));
1da177e4
LT
3134
3135 /* Smart Dump table is ready to use and the first entry is valid */
dcbad00e
WB
3136 if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
3137 (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
1da177e4
LT
3138 dev_err(&ioa_cfg->pdev->dev,
3139 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3140 rc, be32_to_cpu(sdt->hdr.state));
3141 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
3142 ioa_cfg->sdt_state = DUMP_OBTAINED;
3143 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3144 return;
3145 }
3146
3147 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
3148
4d4dd706
KSS
3149 if (num_entries > max_num_entries)
3150 num_entries = max_num_entries;
3151
3152 /* Update dump length to the actual data to be copied */
3153 dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
3154 if (ioa_cfg->sis64)
3155 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
3156 else
3157 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
1da177e4
LT
3158
3159 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3160
3161 for (i = 0; i < num_entries; i++) {
4d4dd706 3162 if (ioa_dump->hdr.len > max_dump_size) {
1da177e4
LT
3163 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3164 break;
3165 }
3166
3167 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
dcbad00e
WB
3168 sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3169 if (ioa_cfg->sis64)
3170 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3171 else {
3172 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3173 end_off = be32_to_cpu(sdt->entry[i].end_token);
3174
3175 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3176 bytes_to_copy = end_off - start_off;
3177 else
3178 valid = 0;
3179 }
3180 if (valid) {
4d4dd706 3181 if (bytes_to_copy > max_dump_size) {
1da177e4
LT
3182 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3183 continue;
3184 }
3185
3186 /* Copy data from adapter to driver buffers */
3187 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3188 bytes_to_copy);
3189
3190 ioa_dump->hdr.len += bytes_copied;
3191
3192 if (bytes_copied != bytes_to_copy) {
3193 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3194 break;
3195 }
3196 }
3197 }
3198 }
3199
3200 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3201
3202 /* Update dump_header */
3203 driver_dump->hdr.len += ioa_dump->hdr.len;
3204 wmb();
3205 ioa_cfg->sdt_state = DUMP_OBTAINED;
3206 LEAVE;
3207}
3208
3209#else
203fa3fe 3210#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
1da177e4
LT
3211#endif
3212
3213/**
3214 * ipr_release_dump - Free adapter dump memory
3215 * @kref: kref struct
3216 *
3217 * Return value:
3218 * nothing
3219 **/
3220static void ipr_release_dump(struct kref *kref)
3221{
203fa3fe 3222 struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
1da177e4
LT
3223 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3224 unsigned long lock_flags = 0;
3225 int i;
3226
3227 ENTER;
3228 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3229 ioa_cfg->dump = NULL;
3230 ioa_cfg->sdt_state = INACTIVE;
3231 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3232
3233 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3234 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3235
4d4dd706 3236 vfree(dump->ioa_dump.ioa_data);
1da177e4
LT
3237 kfree(dump);
3238 LEAVE;
3239}
3240
3241/**
3242 * ipr_worker_thread - Worker thread
c4028958 3243 * @work: ioa config struct
1da177e4
LT
3244 *
3245 * Called at task level from a work thread. This function takes care
3246 * of adding and removing device from the mid-layer as configuration
3247 * changes are detected by the adapter.
3248 *
3249 * Return value:
3250 * nothing
3251 **/
c4028958 3252static void ipr_worker_thread(struct work_struct *work)
1da177e4
LT
3253{
3254 unsigned long lock_flags;
3255 struct ipr_resource_entry *res;
3256 struct scsi_device *sdev;
3257 struct ipr_dump *dump;
c4028958
DH
3258 struct ipr_ioa_cfg *ioa_cfg =
3259 container_of(work, struct ipr_ioa_cfg, work_q);
1da177e4
LT
3260 u8 bus, target, lun;
3261 int did_work;
3262
3263 ENTER;
3264 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3265
41e9a696 3266 if (ioa_cfg->sdt_state == READ_DUMP) {
1da177e4
LT
3267 dump = ioa_cfg->dump;
3268 if (!dump) {
3269 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3270 return;
3271 }
3272 kref_get(&dump->kref);
3273 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3274 ipr_get_ioa_dump(ioa_cfg, dump);
3275 kref_put(&dump->kref, ipr_release_dump);
3276
3277 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4c647e90 3278 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
1da177e4
LT
3279 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3280 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3281 return;
3282 }
3283
3284restart:
3285 do {
3286 did_work = 0;
f688f96d 3287 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1da177e4
LT
3288 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3289 return;
3290 }
3291
3292 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3293 if (res->del_from_ml && res->sdev) {
3294 did_work = 1;
3295 sdev = res->sdev;
3296 if (!scsi_device_get(sdev)) {
5767a1c4
KSS
3297 if (!res->add_to_ml)
3298 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3299 else
3300 res->del_from_ml = 0;
1da177e4
LT
3301 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3302 scsi_remove_device(sdev);
3303 scsi_device_put(sdev);
3304 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3305 }
3306 break;
3307 }
3308 }
203fa3fe 3309 } while (did_work);
1da177e4
LT
3310
3311 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3312 if (res->add_to_ml) {
3e7ebdfa
WB
3313 bus = res->bus;
3314 target = res->target;
3315 lun = res->lun;
1121b794 3316 res->add_to_ml = 0;
1da177e4
LT
3317 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3318 scsi_add_device(ioa_cfg->host, bus, target, lun);
3319 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3320 goto restart;
3321 }
3322 }
3323
f688f96d 3324 ioa_cfg->scan_done = 1;
1da177e4 3325 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ee959b00 3326 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
1da177e4
LT
3327 LEAVE;
3328}
3329
3330#ifdef CONFIG_SCSI_IPR_TRACE
3331/**
3332 * ipr_read_trace - Dump the adapter trace
2c3c8bea 3333 * @filp: open sysfs file
1da177e4 3334 * @kobj: kobject struct
91a69029 3335 * @bin_attr: bin_attribute struct
1da177e4
LT
3336 * @buf: buffer
3337 * @off: offset
3338 * @count: buffer size
3339 *
3340 * Return value:
3341 * number of bytes printed to buffer
3342 **/
2c3c8bea 3343static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
91a69029
ZR
3344 struct bin_attribute *bin_attr,
3345 char *buf, loff_t off, size_t count)
1da177e4 3346{
ee959b00
TJ
3347 struct device *dev = container_of(kobj, struct device, kobj);
3348 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3349 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3350 unsigned long lock_flags = 0;
d777aaf3 3351 ssize_t ret;
1da177e4
LT
3352
3353 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
d777aaf3
AM
3354 ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3355 IPR_TRACE_SIZE);
1da177e4 3356 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
d777aaf3
AM
3357
3358 return ret;
1da177e4
LT
3359}
3360
3361static struct bin_attribute ipr_trace_attr = {
3362 .attr = {
3363 .name = "trace",
3364 .mode = S_IRUGO,
3365 },
3366 .size = 0,
3367 .read = ipr_read_trace,
3368};
3369#endif
3370
3371/**
3372 * ipr_show_fw_version - Show the firmware version
ee959b00
TJ
3373 * @dev: class device struct
3374 * @buf: buffer
1da177e4
LT
3375 *
3376 * Return value:
3377 * number of bytes printed to buffer
3378 **/
ee959b00
TJ
3379static ssize_t ipr_show_fw_version(struct device *dev,
3380 struct device_attribute *attr, char *buf)
1da177e4 3381{
ee959b00 3382 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3383 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3384 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3385 unsigned long lock_flags = 0;
3386 int len;
3387
3388 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3389 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3390 ucode_vpd->major_release, ucode_vpd->card_type,
3391 ucode_vpd->minor_release[0],
3392 ucode_vpd->minor_release[1]);
3393 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3394 return len;
3395}
3396
ee959b00 3397static struct device_attribute ipr_fw_version_attr = {
1da177e4
LT
3398 .attr = {
3399 .name = "fw_version",
3400 .mode = S_IRUGO,
3401 },
3402 .show = ipr_show_fw_version,
3403};
3404
3405/**
3406 * ipr_show_log_level - Show the adapter's error logging level
ee959b00
TJ
3407 * @dev: class device struct
3408 * @buf: buffer
1da177e4
LT
3409 *
3410 * Return value:
3411 * number of bytes printed to buffer
3412 **/
ee959b00
TJ
3413static ssize_t ipr_show_log_level(struct device *dev,
3414 struct device_attribute *attr, char *buf)
1da177e4 3415{
ee959b00 3416 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3417 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3418 unsigned long lock_flags = 0;
3419 int len;
3420
3421 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3422 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3423 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3424 return len;
3425}
3426
3427/**
3428 * ipr_store_log_level - Change the adapter's error logging level
ee959b00
TJ
3429 * @dev: class device struct
3430 * @buf: buffer
1da177e4
LT
3431 *
3432 * Return value:
3433 * number of bytes printed to buffer
3434 **/
ee959b00 3435static ssize_t ipr_store_log_level(struct device *dev,
203fa3fe 3436 struct device_attribute *attr,
1da177e4
LT
3437 const char *buf, size_t count)
3438{
ee959b00 3439 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3440 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3441 unsigned long lock_flags = 0;
3442
3443 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3444 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3445 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3446 return strlen(buf);
3447}
3448
ee959b00 3449static struct device_attribute ipr_log_level_attr = {
1da177e4
LT
3450 .attr = {
3451 .name = "log_level",
3452 .mode = S_IRUGO | S_IWUSR,
3453 },
3454 .show = ipr_show_log_level,
3455 .store = ipr_store_log_level
3456};
3457
3458/**
3459 * ipr_store_diagnostics - IOA Diagnostics interface
ee959b00
TJ
3460 * @dev: device struct
3461 * @buf: buffer
3462 * @count: buffer size
1da177e4
LT
3463 *
3464 * This function will reset the adapter and wait a reasonable
3465 * amount of time for any errors that the adapter might log.
3466 *
3467 * Return value:
3468 * count on success / other on failure
3469 **/
ee959b00
TJ
3470static ssize_t ipr_store_diagnostics(struct device *dev,
3471 struct device_attribute *attr,
1da177e4
LT
3472 const char *buf, size_t count)
3473{
ee959b00 3474 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3475 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3476 unsigned long lock_flags = 0;
3477 int rc = count;
3478
3479 if (!capable(CAP_SYS_ADMIN))
3480 return -EACCES;
3481
1da177e4 3482 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
203fa3fe 3483 while (ioa_cfg->in_reset_reload) {
970ea294
BK
3484 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3485 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3486 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3487 }
3488
1da177e4
LT
3489 ioa_cfg->errors_logged = 0;
3490 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3491
3492 if (ioa_cfg->in_reset_reload) {
3493 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3494 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3495
3496 /* Wait for a second for any errors to be logged */
3497 msleep(1000);
3498 } else {
3499 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3500 return -EIO;
3501 }
3502
3503 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3504 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3505 rc = -EIO;
3506 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3507
3508 return rc;
3509}
3510
ee959b00 3511static struct device_attribute ipr_diagnostics_attr = {
1da177e4
LT
3512 .attr = {
3513 .name = "run_diagnostics",
3514 .mode = S_IWUSR,
3515 },
3516 .store = ipr_store_diagnostics
3517};
3518
f37eb54b 3519/**
3520 * ipr_show_adapter_state - Show the adapter's state
ee959b00
TJ
3521 * @class_dev: device struct
3522 * @buf: buffer
f37eb54b 3523 *
3524 * Return value:
3525 * number of bytes printed to buffer
3526 **/
ee959b00
TJ
3527static ssize_t ipr_show_adapter_state(struct device *dev,
3528 struct device_attribute *attr, char *buf)
f37eb54b 3529{
ee959b00 3530 struct Scsi_Host *shost = class_to_shost(dev);
f37eb54b 3531 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3532 unsigned long lock_flags = 0;
3533 int len;
3534
3535 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
56d6aa33 3536 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
f37eb54b 3537 len = snprintf(buf, PAGE_SIZE, "offline\n");
3538 else
3539 len = snprintf(buf, PAGE_SIZE, "online\n");
3540 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3541 return len;
3542}
3543
3544/**
3545 * ipr_store_adapter_state - Change adapter state
ee959b00
TJ
3546 * @dev: device struct
3547 * @buf: buffer
3548 * @count: buffer size
f37eb54b 3549 *
3550 * This function will change the adapter's state.
3551 *
3552 * Return value:
3553 * count on success / other on failure
3554 **/
ee959b00
TJ
3555static ssize_t ipr_store_adapter_state(struct device *dev,
3556 struct device_attribute *attr,
f37eb54b 3557 const char *buf, size_t count)
3558{
ee959b00 3559 struct Scsi_Host *shost = class_to_shost(dev);
f37eb54b 3560 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3561 unsigned long lock_flags;
56d6aa33 3562 int result = count, i;
f37eb54b 3563
3564 if (!capable(CAP_SYS_ADMIN))
3565 return -EACCES;
3566
3567 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
56d6aa33 3568 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3569 !strncmp(buf, "online", 6)) {
3570 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
3571 spin_lock(&ioa_cfg->hrrq[i]._lock);
3572 ioa_cfg->hrrq[i].ioa_is_dead = 0;
3573 spin_unlock(&ioa_cfg->hrrq[i]._lock);
3574 }
3575 wmb();
f37eb54b 3576 ioa_cfg->reset_retries = 0;
3577 ioa_cfg->in_ioa_bringdown = 0;
3578 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3579 }
3580 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3581 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3582
3583 return result;
3584}
3585
ee959b00 3586static struct device_attribute ipr_ioa_state_attr = {
f37eb54b 3587 .attr = {
49dd0961 3588 .name = "online_state",
f37eb54b 3589 .mode = S_IRUGO | S_IWUSR,
3590 },
3591 .show = ipr_show_adapter_state,
3592 .store = ipr_store_adapter_state
3593};
3594
1da177e4
LT
3595/**
3596 * ipr_store_reset_adapter - Reset the adapter
ee959b00
TJ
3597 * @dev: device struct
3598 * @buf: buffer
3599 * @count: buffer size
1da177e4
LT
3600 *
3601 * This function will reset the adapter.
3602 *
3603 * Return value:
3604 * count on success / other on failure
3605 **/
ee959b00
TJ
3606static ssize_t ipr_store_reset_adapter(struct device *dev,
3607 struct device_attribute *attr,
1da177e4
LT
3608 const char *buf, size_t count)
3609{
ee959b00 3610 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3611 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3612 unsigned long lock_flags;
3613 int result = count;
3614
3615 if (!capable(CAP_SYS_ADMIN))
3616 return -EACCES;
3617
3618 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3619 if (!ioa_cfg->in_reset_reload)
3620 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3621 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3622 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3623
3624 return result;
3625}
3626
ee959b00 3627static struct device_attribute ipr_ioa_reset_attr = {
1da177e4
LT
3628 .attr = {
3629 .name = "reset_host",
3630 .mode = S_IWUSR,
3631 },
3632 .store = ipr_store_reset_adapter
3633};
3634
b53d124a 3635static int ipr_iopoll(struct blk_iopoll *iop, int budget);
3636 /**
3637 * ipr_show_iopoll_weight - Show ipr polling mode
3638 * @dev: class device struct
3639 * @buf: buffer
3640 *
3641 * Return value:
3642 * number of bytes printed to buffer
3643 **/
3644static ssize_t ipr_show_iopoll_weight(struct device *dev,
3645 struct device_attribute *attr, char *buf)
3646{
3647 struct Scsi_Host *shost = class_to_shost(dev);
3648 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3649 unsigned long lock_flags = 0;
3650 int len;
3651
3652 spin_lock_irqsave(shost->host_lock, lock_flags);
3653 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
3654 spin_unlock_irqrestore(shost->host_lock, lock_flags);
3655
3656 return len;
3657}
3658
3659/**
3660 * ipr_store_iopoll_weight - Change the adapter's polling mode
3661 * @dev: class device struct
3662 * @buf: buffer
3663 *
3664 * Return value:
3665 * number of bytes printed to buffer
3666 **/
3667static ssize_t ipr_store_iopoll_weight(struct device *dev,
3668 struct device_attribute *attr,
3669 const char *buf, size_t count)
3670{
3671 struct Scsi_Host *shost = class_to_shost(dev);
3672 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3673 unsigned long user_iopoll_weight;
3674 unsigned long lock_flags = 0;
3675 int i;
3676
3677 if (!ioa_cfg->sis64) {
3678 dev_info(&ioa_cfg->pdev->dev, "blk-iopoll not supported on this adapter\n");
3679 return -EINVAL;
3680 }
3681 if (kstrtoul(buf, 10, &user_iopoll_weight))
3682 return -EINVAL;
3683
3684 if (user_iopoll_weight > 256) {
3685 dev_info(&ioa_cfg->pdev->dev, "Invalid blk-iopoll weight. It must be less than 256\n");
3686 return -EINVAL;
3687 }
3688
3689 if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
3690 dev_info(&ioa_cfg->pdev->dev, "Current blk-iopoll weight has the same weight\n");
3691 return strlen(buf);
3692 }
3693
89f8b33c 3694 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
b53d124a 3695 for (i = 1; i < ioa_cfg->hrrq_num; i++)
3696 blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
3697 }
3698
3699 spin_lock_irqsave(shost->host_lock, lock_flags);
3700 ioa_cfg->iopoll_weight = user_iopoll_weight;
89f8b33c 3701 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
b53d124a 3702 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
3703 blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
3704 ioa_cfg->iopoll_weight, ipr_iopoll);
3705 blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
3706 }
3707 }
3708 spin_unlock_irqrestore(shost->host_lock, lock_flags);
3709
3710 return strlen(buf);
3711}
3712
3713static struct device_attribute ipr_iopoll_weight_attr = {
3714 .attr = {
3715 .name = "iopoll_weight",
3716 .mode = S_IRUGO | S_IWUSR,
3717 },
3718 .show = ipr_show_iopoll_weight,
3719 .store = ipr_store_iopoll_weight
3720};
3721
1da177e4
LT
3722/**
3723 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3724 * @buf_len: buffer length
3725 *
3726 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3727 * list to use for microcode download
3728 *
3729 * Return value:
3730 * pointer to sglist / NULL on failure
3731 **/
3732static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3733{
3734 int sg_size, order, bsize_elem, num_elem, i, j;
3735 struct ipr_sglist *sglist;
3736 struct scatterlist *scatterlist;
3737 struct page *page;
3738
3739 /* Get the minimum size per scatter/gather element */
3740 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3741
3742 /* Get the actual size per element */
3743 order = get_order(sg_size);
3744
3745 /* Determine the actual number of bytes per element */
3746 bsize_elem = PAGE_SIZE * (1 << order);
3747
3748 /* Determine the actual number of sg entries needed */
3749 if (buf_len % bsize_elem)
3750 num_elem = (buf_len / bsize_elem) + 1;
3751 else
3752 num_elem = buf_len / bsize_elem;
3753
3754 /* Allocate a scatter/gather list for the DMA */
0bc42e35 3755 sglist = kzalloc(sizeof(struct ipr_sglist) +
1da177e4
LT
3756 (sizeof(struct scatterlist) * (num_elem - 1)),
3757 GFP_KERNEL);
3758
3759 if (sglist == NULL) {
3760 ipr_trace;
3761 return NULL;
3762 }
3763
1da177e4 3764 scatterlist = sglist->scatterlist;
45711f1a 3765 sg_init_table(scatterlist, num_elem);
1da177e4
LT
3766
3767 sglist->order = order;
3768 sglist->num_sg = num_elem;
3769
3770 /* Allocate a bunch of sg elements */
3771 for (i = 0; i < num_elem; i++) {
3772 page = alloc_pages(GFP_KERNEL, order);
3773 if (!page) {
3774 ipr_trace;
3775
3776 /* Free up what we already allocated */
3777 for (j = i - 1; j >= 0; j--)
45711f1a 3778 __free_pages(sg_page(&scatterlist[j]), order);
1da177e4
LT
3779 kfree(sglist);
3780 return NULL;
3781 }
3782
642f1490 3783 sg_set_page(&scatterlist[i], page, 0, 0);
1da177e4
LT
3784 }
3785
3786 return sglist;
3787}
3788
3789/**
3790 * ipr_free_ucode_buffer - Frees a microcode download buffer
3791 * @p_dnld: scatter/gather list pointer
3792 *
3793 * Free a DMA'able ucode download buffer previously allocated with
3794 * ipr_alloc_ucode_buffer
3795 *
3796 * Return value:
3797 * nothing
3798 **/
3799static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3800{
3801 int i;
3802
3803 for (i = 0; i < sglist->num_sg; i++)
45711f1a 3804 __free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
1da177e4
LT
3805
3806 kfree(sglist);
3807}
3808
3809/**
3810 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3811 * @sglist: scatter/gather list pointer
3812 * @buffer: buffer pointer
3813 * @len: buffer length
3814 *
3815 * Copy a microcode image from a user buffer into a buffer allocated by
3816 * ipr_alloc_ucode_buffer
3817 *
3818 * Return value:
3819 * 0 on success / other on failure
3820 **/
3821static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3822 u8 *buffer, u32 len)
3823{
3824 int bsize_elem, i, result = 0;
3825 struct scatterlist *scatterlist;
3826 void *kaddr;
3827
3828 /* Determine the actual number of bytes per element */
3829 bsize_elem = PAGE_SIZE * (1 << sglist->order);
3830
3831 scatterlist = sglist->scatterlist;
3832
3833 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
45711f1a
JA
3834 struct page *page = sg_page(&scatterlist[i]);
3835
3836 kaddr = kmap(page);
1da177e4 3837 memcpy(kaddr, buffer, bsize_elem);
45711f1a 3838 kunmap(page);
1da177e4
LT
3839
3840 scatterlist[i].length = bsize_elem;
3841
3842 if (result != 0) {
3843 ipr_trace;
3844 return result;
3845 }
3846 }
3847
3848 if (len % bsize_elem) {
45711f1a
JA
3849 struct page *page = sg_page(&scatterlist[i]);
3850
3851 kaddr = kmap(page);
1da177e4 3852 memcpy(kaddr, buffer, len % bsize_elem);
45711f1a 3853 kunmap(page);
1da177e4
LT
3854
3855 scatterlist[i].length = len % bsize_elem;
3856 }
3857
3858 sglist->buffer_len = len;
3859 return result;
3860}
3861
a32c055f
WB
3862/**
3863 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3864 * @ipr_cmd: ipr command struct
3865 * @sglist: scatter/gather list
3866 *
3867 * Builds a microcode download IOA data list (IOADL).
3868 *
3869 **/
3870static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3871 struct ipr_sglist *sglist)
3872{
3873 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3874 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3875 struct scatterlist *scatterlist = sglist->scatterlist;
3876 int i;
3877
3878 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3879 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3880 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3881
3882 ioarcb->ioadl_len =
3883 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3884 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3885 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3886 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3887 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3888 }
3889
3890 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3891}
3892
1da177e4 3893/**
12baa420 3894 * ipr_build_ucode_ioadl - Build a microcode download IOADL
1da177e4
LT
3895 * @ipr_cmd: ipr command struct
3896 * @sglist: scatter/gather list
1da177e4 3897 *
12baa420 3898 * Builds a microcode download IOA data list (IOADL).
1da177e4 3899 *
1da177e4 3900 **/
12baa420 3901static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3902 struct ipr_sglist *sglist)
1da177e4 3903{
1da177e4 3904 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
a32c055f 3905 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1da177e4
LT
3906 struct scatterlist *scatterlist = sglist->scatterlist;
3907 int i;
3908
12baa420 3909 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
1da177e4 3910 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
a32c055f
WB
3911 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3912
3913 ioarcb->ioadl_len =
1da177e4
LT
3914 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3915
3916 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3917 ioadl[i].flags_and_data_len =
3918 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3919 ioadl[i].address =
3920 cpu_to_be32(sg_dma_address(&scatterlist[i]));
3921 }
3922
12baa420 3923 ioadl[i-1].flags_and_data_len |=
3924 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3925}
3926
3927/**
3928 * ipr_update_ioa_ucode - Update IOA's microcode
3929 * @ioa_cfg: ioa config struct
3930 * @sglist: scatter/gather list
3931 *
3932 * Initiate an adapter reset to update the IOA's microcode
3933 *
3934 * Return value:
3935 * 0 on success / -EIO on failure
3936 **/
3937static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3938 struct ipr_sglist *sglist)
3939{
3940 unsigned long lock_flags;
3941
3942 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
203fa3fe 3943 while (ioa_cfg->in_reset_reload) {
970ea294
BK
3944 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3945 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3946 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3947 }
12baa420 3948
3949 if (ioa_cfg->ucode_sglist) {
3950 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3951 dev_err(&ioa_cfg->pdev->dev,
3952 "Microcode download already in progress\n");
3953 return -EIO;
1da177e4 3954 }
12baa420 3955
d73341bf
AB
3956 sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev,
3957 sglist->scatterlist, sglist->num_sg,
3958 DMA_TO_DEVICE);
12baa420 3959
3960 if (!sglist->num_dma_sg) {
3961 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3962 dev_err(&ioa_cfg->pdev->dev,
3963 "Failed to map microcode download buffer!\n");
1da177e4
LT
3964 return -EIO;
3965 }
3966
12baa420 3967 ioa_cfg->ucode_sglist = sglist;
3968 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3969 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3970 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3971
3972 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3973 ioa_cfg->ucode_sglist = NULL;
3974 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1da177e4
LT
3975 return 0;
3976}
3977
3978/**
3979 * ipr_store_update_fw - Update the firmware on the adapter
ee959b00
TJ
3980 * @class_dev: device struct
3981 * @buf: buffer
3982 * @count: buffer size
1da177e4
LT
3983 *
3984 * This function will update the firmware on the adapter.
3985 *
3986 * Return value:
3987 * count on success / other on failure
3988 **/
ee959b00
TJ
3989static ssize_t ipr_store_update_fw(struct device *dev,
3990 struct device_attribute *attr,
3991 const char *buf, size_t count)
1da177e4 3992{
ee959b00 3993 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3994 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3995 struct ipr_ucode_image_header *image_hdr;
3996 const struct firmware *fw_entry;
3997 struct ipr_sglist *sglist;
1da177e4
LT
3998 char fname[100];
3999 char *src;
4000 int len, result, dnld_size;
4001
4002 if (!capable(CAP_SYS_ADMIN))
4003 return -EACCES;
4004
4005 len = snprintf(fname, 99, "%s", buf);
4006 fname[len-1] = '\0';
4007
203fa3fe 4008 if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
1da177e4
LT
4009 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
4010 return -EIO;
4011 }
4012
4013 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
4014
1da177e4
LT
4015 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
4016 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
4017 sglist = ipr_alloc_ucode_buffer(dnld_size);
4018
4019 if (!sglist) {
4020 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
4021 release_firmware(fw_entry);
4022 return -ENOMEM;
4023 }
4024
4025 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
4026
4027 if (result) {
4028 dev_err(&ioa_cfg->pdev->dev,
4029 "Microcode buffer copy to DMA buffer failed\n");
12baa420 4030 goto out;
1da177e4
LT
4031 }
4032
14ed9cc7
WB
4033 ipr_info("Updating microcode, please be patient. This may take up to 30 minutes.\n");
4034
12baa420 4035 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
1da177e4 4036
12baa420 4037 if (!result)
4038 result = count;
4039out:
1da177e4
LT
4040 ipr_free_ucode_buffer(sglist);
4041 release_firmware(fw_entry);
12baa420 4042 return result;
1da177e4
LT
4043}
4044
ee959b00 4045static struct device_attribute ipr_update_fw_attr = {
1da177e4
LT
4046 .attr = {
4047 .name = "update_fw",
4048 .mode = S_IWUSR,
4049 },
4050 .store = ipr_store_update_fw
4051};
4052
75576bb9
WB
4053/**
4054 * ipr_show_fw_type - Show the adapter's firmware type.
4055 * @dev: class device struct
4056 * @buf: buffer
4057 *
4058 * Return value:
4059 * number of bytes printed to buffer
4060 **/
4061static ssize_t ipr_show_fw_type(struct device *dev,
4062 struct device_attribute *attr, char *buf)
4063{
4064 struct Scsi_Host *shost = class_to_shost(dev);
4065 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4066 unsigned long lock_flags = 0;
4067 int len;
4068
4069 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4070 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
4071 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4072 return len;
4073}
4074
4075static struct device_attribute ipr_ioa_fw_type_attr = {
4076 .attr = {
4077 .name = "fw_type",
4078 .mode = S_IRUGO,
4079 },
4080 .show = ipr_show_fw_type
4081};
4082
ee959b00 4083static struct device_attribute *ipr_ioa_attrs[] = {
1da177e4
LT
4084 &ipr_fw_version_attr,
4085 &ipr_log_level_attr,
4086 &ipr_diagnostics_attr,
f37eb54b 4087 &ipr_ioa_state_attr,
1da177e4
LT
4088 &ipr_ioa_reset_attr,
4089 &ipr_update_fw_attr,
75576bb9 4090 &ipr_ioa_fw_type_attr,
b53d124a 4091 &ipr_iopoll_weight_attr,
1da177e4
LT
4092 NULL,
4093};
4094
4095#ifdef CONFIG_SCSI_IPR_DUMP
4096/**
4097 * ipr_read_dump - Dump the adapter
2c3c8bea 4098 * @filp: open sysfs file
1da177e4 4099 * @kobj: kobject struct
91a69029 4100 * @bin_attr: bin_attribute struct
1da177e4
LT
4101 * @buf: buffer
4102 * @off: offset
4103 * @count: buffer size
4104 *
4105 * Return value:
4106 * number of bytes printed to buffer
4107 **/
2c3c8bea 4108static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
91a69029
ZR
4109 struct bin_attribute *bin_attr,
4110 char *buf, loff_t off, size_t count)
1da177e4 4111{
ee959b00 4112 struct device *cdev = container_of(kobj, struct device, kobj);
1da177e4
LT
4113 struct Scsi_Host *shost = class_to_shost(cdev);
4114 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4115 struct ipr_dump *dump;
4116 unsigned long lock_flags = 0;
4117 char *src;
4d4dd706 4118 int len, sdt_end;
1da177e4
LT
4119 size_t rc = count;
4120
4121 if (!capable(CAP_SYS_ADMIN))
4122 return -EACCES;
4123
4124 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4125 dump = ioa_cfg->dump;
4126
4127 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
4128 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4129 return 0;
4130 }
4131 kref_get(&dump->kref);
4132 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4133
4134 if (off > dump->driver_dump.hdr.len) {
4135 kref_put(&dump->kref, ipr_release_dump);
4136 return 0;
4137 }
4138
4139 if (off + count > dump->driver_dump.hdr.len) {
4140 count = dump->driver_dump.hdr.len - off;
4141 rc = count;
4142 }
4143
4144 if (count && off < sizeof(dump->driver_dump)) {
4145 if (off + count > sizeof(dump->driver_dump))
4146 len = sizeof(dump->driver_dump) - off;
4147 else
4148 len = count;
4149 src = (u8 *)&dump->driver_dump + off;
4150 memcpy(buf, src, len);
4151 buf += len;
4152 off += len;
4153 count -= len;
4154 }
4155
4156 off -= sizeof(dump->driver_dump);
4157
4d4dd706
KSS
4158 if (ioa_cfg->sis64)
4159 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4160 (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
4161 sizeof(struct ipr_sdt_entry));
4162 else
4163 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4164 (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
4165
4166 if (count && off < sdt_end) {
4167 if (off + count > sdt_end)
4168 len = sdt_end - off;
1da177e4
LT
4169 else
4170 len = count;
4171 src = (u8 *)&dump->ioa_dump + off;
4172 memcpy(buf, src, len);
4173 buf += len;
4174 off += len;
4175 count -= len;
4176 }
4177
4d4dd706 4178 off -= sdt_end;
1da177e4
LT
4179
4180 while (count) {
4181 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
4182 len = PAGE_ALIGN(off) - off;
4183 else
4184 len = count;
4185 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
4186 src += off & ~PAGE_MASK;
4187 memcpy(buf, src, len);
4188 buf += len;
4189 off += len;
4190 count -= len;
4191 }
4192
4193 kref_put(&dump->kref, ipr_release_dump);
4194 return rc;
4195}
4196
4197/**
4198 * ipr_alloc_dump - Prepare for adapter dump
4199 * @ioa_cfg: ioa config struct
4200 *
4201 * Return value:
4202 * 0 on success / other on failure
4203 **/
4204static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4205{
4206 struct ipr_dump *dump;
4d4dd706 4207 __be32 **ioa_data;
1da177e4
LT
4208 unsigned long lock_flags = 0;
4209
0bc42e35 4210 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
1da177e4
LT
4211
4212 if (!dump) {
4213 ipr_err("Dump memory allocation failed\n");
4214 return -ENOMEM;
4215 }
4216
4d4dd706
KSS
4217 if (ioa_cfg->sis64)
4218 ioa_data = vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4219 else
4220 ioa_data = vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4221
4222 if (!ioa_data) {
4223 ipr_err("Dump memory allocation failed\n");
4224 kfree(dump);
4225 return -ENOMEM;
4226 }
4227
4228 dump->ioa_dump.ioa_data = ioa_data;
4229
1da177e4
LT
4230 kref_init(&dump->kref);
4231 dump->ioa_cfg = ioa_cfg;
4232
4233 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4234
4235 if (INACTIVE != ioa_cfg->sdt_state) {
4236 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4d4dd706 4237 vfree(dump->ioa_dump.ioa_data);
1da177e4
LT
4238 kfree(dump);
4239 return 0;
4240 }
4241
4242 ioa_cfg->dump = dump;
4243 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
56d6aa33 4244 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
1da177e4
LT
4245 ioa_cfg->dump_taken = 1;
4246 schedule_work(&ioa_cfg->work_q);
4247 }
4248 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4249
1da177e4
LT
4250 return 0;
4251}
4252
4253/**
4254 * ipr_free_dump - Free adapter dump memory
4255 * @ioa_cfg: ioa config struct
4256 *
4257 * Return value:
4258 * 0 on success / other on failure
4259 **/
4260static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4261{
4262 struct ipr_dump *dump;
4263 unsigned long lock_flags = 0;
4264
4265 ENTER;
4266
4267 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4268 dump = ioa_cfg->dump;
4269 if (!dump) {
4270 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4271 return 0;
4272 }
4273
4274 ioa_cfg->dump = NULL;
4275 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4276
4277 kref_put(&dump->kref, ipr_release_dump);
4278
4279 LEAVE;
4280 return 0;
4281}
4282
4283/**
4284 * ipr_write_dump - Setup dump state of adapter
2c3c8bea 4285 * @filp: open sysfs file
1da177e4 4286 * @kobj: kobject struct
91a69029 4287 * @bin_attr: bin_attribute struct
1da177e4
LT
4288 * @buf: buffer
4289 * @off: offset
4290 * @count: buffer size
4291 *
4292 * Return value:
4293 * number of bytes printed to buffer
4294 **/
2c3c8bea 4295static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
91a69029
ZR
4296 struct bin_attribute *bin_attr,
4297 char *buf, loff_t off, size_t count)
1da177e4 4298{
ee959b00 4299 struct device *cdev = container_of(kobj, struct device, kobj);
1da177e4
LT
4300 struct Scsi_Host *shost = class_to_shost(cdev);
4301 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4302 int rc;
4303
4304 if (!capable(CAP_SYS_ADMIN))
4305 return -EACCES;
4306
4307 if (buf[0] == '1')
4308 rc = ipr_alloc_dump(ioa_cfg);
4309 else if (buf[0] == '0')
4310 rc = ipr_free_dump(ioa_cfg);
4311 else
4312 return -EINVAL;
4313
4314 if (rc)
4315 return rc;
4316 else
4317 return count;
4318}
4319
4320static struct bin_attribute ipr_dump_attr = {
4321 .attr = {
4322 .name = "dump",
4323 .mode = S_IRUSR | S_IWUSR,
4324 },
4325 .size = 0,
4326 .read = ipr_read_dump,
4327 .write = ipr_write_dump
4328};
4329#else
4330static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4331#endif
4332
4333/**
4334 * ipr_change_queue_depth - Change the device's queue depth
4335 * @sdev: scsi device struct
4336 * @qdepth: depth to set
e881a172 4337 * @reason: calling context
1da177e4
LT
4338 *
4339 * Return value:
4340 * actual depth set
4341 **/
db5ed4df 4342static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
1da177e4 4343{
35a39691
BK
4344 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4345 struct ipr_resource_entry *res;
4346 unsigned long lock_flags = 0;
4347
4348 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4349 res = (struct ipr_resource_entry *)sdev->hostdata;
4350
4351 if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4352 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4353 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4354
db5ed4df 4355 scsi_change_queue_depth(sdev, qdepth);
1da177e4
LT
4356 return sdev->queue_depth;
4357}
4358
1da177e4
LT
4359/**
4360 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4361 * @dev: device struct
46d74563 4362 * @attr: device attribute structure
1da177e4
LT
4363 * @buf: buffer
4364 *
4365 * Return value:
4366 * number of bytes printed to buffer
4367 **/
10523b3b 4368static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
1da177e4
LT
4369{
4370 struct scsi_device *sdev = to_scsi_device(dev);
4371 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4372 struct ipr_resource_entry *res;
4373 unsigned long lock_flags = 0;
4374 ssize_t len = -ENXIO;
4375
4376 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4377 res = (struct ipr_resource_entry *)sdev->hostdata;
4378 if (res)
3e7ebdfa 4379 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
1da177e4
LT
4380 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4381 return len;
4382}
4383
4384static struct device_attribute ipr_adapter_handle_attr = {
4385 .attr = {
4386 .name = "adapter_handle",
4387 .mode = S_IRUSR,
4388 },
4389 .show = ipr_show_adapter_handle
4390};
4391
3e7ebdfa 4392/**
5adcbeb3
WB
4393 * ipr_show_resource_path - Show the resource path or the resource address for
4394 * this device.
3e7ebdfa 4395 * @dev: device struct
46d74563 4396 * @attr: device attribute structure
3e7ebdfa
WB
4397 * @buf: buffer
4398 *
4399 * Return value:
4400 * number of bytes printed to buffer
4401 **/
4402static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4403{
4404 struct scsi_device *sdev = to_scsi_device(dev);
4405 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4406 struct ipr_resource_entry *res;
4407 unsigned long lock_flags = 0;
4408 ssize_t len = -ENXIO;
4409 char buffer[IPR_MAX_RES_PATH_LENGTH];
4410
4411 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4412 res = (struct ipr_resource_entry *)sdev->hostdata;
5adcbeb3 4413 if (res && ioa_cfg->sis64)
3e7ebdfa 4414 len = snprintf(buf, PAGE_SIZE, "%s\n",
b3b3b407
BK
4415 __ipr_format_res_path(res->res_path, buffer,
4416 sizeof(buffer)));
5adcbeb3
WB
4417 else if (res)
4418 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4419 res->bus, res->target, res->lun);
4420
3e7ebdfa
WB
4421 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4422 return len;
4423}
4424
4425static struct device_attribute ipr_resource_path_attr = {
4426 .attr = {
4427 .name = "resource_path",
75576bb9 4428 .mode = S_IRUGO,
3e7ebdfa
WB
4429 },
4430 .show = ipr_show_resource_path
4431};
4432
46d74563
WB
4433/**
4434 * ipr_show_device_id - Show the device_id for this device.
4435 * @dev: device struct
4436 * @attr: device attribute structure
4437 * @buf: buffer
4438 *
4439 * Return value:
4440 * number of bytes printed to buffer
4441 **/
4442static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4443{
4444 struct scsi_device *sdev = to_scsi_device(dev);
4445 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4446 struct ipr_resource_entry *res;
4447 unsigned long lock_flags = 0;
4448 ssize_t len = -ENXIO;
4449
4450 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4451 res = (struct ipr_resource_entry *)sdev->hostdata;
4452 if (res && ioa_cfg->sis64)
4453 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->dev_id);
4454 else if (res)
4455 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4456
4457 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4458 return len;
4459}
4460
4461static struct device_attribute ipr_device_id_attr = {
4462 .attr = {
4463 .name = "device_id",
4464 .mode = S_IRUGO,
4465 },
4466 .show = ipr_show_device_id
4467};
4468
75576bb9
WB
4469/**
4470 * ipr_show_resource_type - Show the resource type for this device.
4471 * @dev: device struct
46d74563 4472 * @attr: device attribute structure
75576bb9
WB
4473 * @buf: buffer
4474 *
4475 * Return value:
4476 * number of bytes printed to buffer
4477 **/
4478static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4479{
4480 struct scsi_device *sdev = to_scsi_device(dev);
4481 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4482 struct ipr_resource_entry *res;
4483 unsigned long lock_flags = 0;
4484 ssize_t len = -ENXIO;
4485
4486 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4487 res = (struct ipr_resource_entry *)sdev->hostdata;
4488
4489 if (res)
4490 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4491
4492 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4493 return len;
4494}
4495
4496static struct device_attribute ipr_resource_type_attr = {
4497 .attr = {
4498 .name = "resource_type",
4499 .mode = S_IRUGO,
4500 },
4501 .show = ipr_show_resource_type
4502};
4503
f8ee25d7
WX
4504/**
4505 * ipr_show_raw_mode - Show the adapter's raw mode
4506 * @dev: class device struct
4507 * @buf: buffer
4508 *
4509 * Return value:
4510 * number of bytes printed to buffer
4511 **/
4512static ssize_t ipr_show_raw_mode(struct device *dev,
4513 struct device_attribute *attr, char *buf)
4514{
4515 struct scsi_device *sdev = to_scsi_device(dev);
4516 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4517 struct ipr_resource_entry *res;
4518 unsigned long lock_flags = 0;
4519 ssize_t len;
4520
4521 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4522 res = (struct ipr_resource_entry *)sdev->hostdata;
4523 if (res)
4524 len = snprintf(buf, PAGE_SIZE, "%d\n", res->raw_mode);
4525 else
4526 len = -ENXIO;
4527 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4528 return len;
4529}
4530
4531/**
4532 * ipr_store_raw_mode - Change the adapter's raw mode
4533 * @dev: class device struct
4534 * @buf: buffer
4535 *
4536 * Return value:
4537 * number of bytes printed to buffer
4538 **/
4539static ssize_t ipr_store_raw_mode(struct device *dev,
4540 struct device_attribute *attr,
4541 const char *buf, size_t count)
4542{
4543 struct scsi_device *sdev = to_scsi_device(dev);
4544 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4545 struct ipr_resource_entry *res;
4546 unsigned long lock_flags = 0;
4547 ssize_t len;
4548
4549 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4550 res = (struct ipr_resource_entry *)sdev->hostdata;
4551 if (res) {
4552 if (ioa_cfg->sis64 && ipr_is_af_dasd_device(res)) {
4553 res->raw_mode = simple_strtoul(buf, NULL, 10);
4554 len = strlen(buf);
4555 if (res->sdev)
4556 sdev_printk(KERN_INFO, res->sdev, "raw mode is %s\n",
4557 res->raw_mode ? "enabled" : "disabled");
4558 } else
4559 len = -EINVAL;
4560 } else
4561 len = -ENXIO;
4562 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4563 return len;
4564}
4565
4566static struct device_attribute ipr_raw_mode_attr = {
4567 .attr = {
4568 .name = "raw_mode",
4569 .mode = S_IRUGO | S_IWUSR,
4570 },
4571 .show = ipr_show_raw_mode,
4572 .store = ipr_store_raw_mode
4573};
4574
1da177e4
LT
4575static struct device_attribute *ipr_dev_attrs[] = {
4576 &ipr_adapter_handle_attr,
3e7ebdfa 4577 &ipr_resource_path_attr,
46d74563 4578 &ipr_device_id_attr,
75576bb9 4579 &ipr_resource_type_attr,
f8ee25d7 4580 &ipr_raw_mode_attr,
1da177e4
LT
4581 NULL,
4582};
4583
4584/**
4585 * ipr_biosparam - Return the HSC mapping
4586 * @sdev: scsi device struct
4587 * @block_device: block device pointer
4588 * @capacity: capacity of the device
4589 * @parm: Array containing returned HSC values.
4590 *
4591 * This function generates the HSC parms that fdisk uses.
4592 * We want to make sure we return something that places partitions
4593 * on 4k boundaries for best performance with the IOA.
4594 *
4595 * Return value:
4596 * 0 on success
4597 **/
4598static int ipr_biosparam(struct scsi_device *sdev,
4599 struct block_device *block_device,
4600 sector_t capacity, int *parm)
4601{
4602 int heads, sectors;
4603 sector_t cylinders;
4604
4605 heads = 128;
4606 sectors = 32;
4607
4608 cylinders = capacity;
4609 sector_div(cylinders, (128 * 32));
4610
4611 /* return result */
4612 parm[0] = heads;
4613 parm[1] = sectors;
4614 parm[2] = cylinders;
4615
4616 return 0;
4617}
4618
35a39691
BK
4619/**
4620 * ipr_find_starget - Find target based on bus/target.
4621 * @starget: scsi target struct
4622 *
4623 * Return value:
4624 * resource entry pointer if found / NULL if not found
4625 **/
4626static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4627{
4628 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4629 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4630 struct ipr_resource_entry *res;
4631
4632 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3e7ebdfa 4633 if ((res->bus == starget->channel) &&
0ee1d714 4634 (res->target == starget->id)) {
35a39691
BK
4635 return res;
4636 }
4637 }
4638
4639 return NULL;
4640}
4641
4642static struct ata_port_info sata_port_info;
4643
4644/**
4645 * ipr_target_alloc - Prepare for commands to a SCSI target
4646 * @starget: scsi target struct
4647 *
4648 * If the device is a SATA device, this function allocates an
4649 * ATA port with libata, else it does nothing.
4650 *
4651 * Return value:
4652 * 0 on success / non-0 on failure
4653 **/
4654static int ipr_target_alloc(struct scsi_target *starget)
4655{
4656 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4657 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4658 struct ipr_sata_port *sata_port;
4659 struct ata_port *ap;
4660 struct ipr_resource_entry *res;
4661 unsigned long lock_flags;
4662
4663 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4664 res = ipr_find_starget(starget);
4665 starget->hostdata = NULL;
4666
4667 if (res && ipr_is_gata(res)) {
4668 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4669 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4670 if (!sata_port)
4671 return -ENOMEM;
4672
4673 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4674 if (ap) {
4675 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4676 sata_port->ioa_cfg = ioa_cfg;
4677 sata_port->ap = ap;
4678 sata_port->res = res;
4679
4680 res->sata_port = sata_port;
4681 ap->private_data = sata_port;
4682 starget->hostdata = sata_port;
4683 } else {
4684 kfree(sata_port);
4685 return -ENOMEM;
4686 }
4687 }
4688 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4689
4690 return 0;
4691}
4692
4693/**
4694 * ipr_target_destroy - Destroy a SCSI target
4695 * @starget: scsi target struct
4696 *
4697 * If the device was a SATA device, this function frees the libata
4698 * ATA port, else it does nothing.
4699 *
4700 **/
4701static void ipr_target_destroy(struct scsi_target *starget)
4702{
4703 struct ipr_sata_port *sata_port = starget->hostdata;
3e7ebdfa
WB
4704 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4705 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4706
4707 if (ioa_cfg->sis64) {
0ee1d714
BK
4708 if (!ipr_find_starget(starget)) {
4709 if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4710 clear_bit(starget->id, ioa_cfg->array_ids);
4711 else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4712 clear_bit(starget->id, ioa_cfg->vset_ids);
4713 else if (starget->channel == 0)
4714 clear_bit(starget->id, ioa_cfg->target_ids);
4715 }
3e7ebdfa 4716 }
35a39691
BK
4717
4718 if (sata_port) {
4719 starget->hostdata = NULL;
4720 ata_sas_port_destroy(sata_port->ap);
4721 kfree(sata_port);
4722 }
4723}
4724
4725/**
4726 * ipr_find_sdev - Find device based on bus/target/lun.
4727 * @sdev: scsi device struct
4728 *
4729 * Return value:
4730 * resource entry pointer if found / NULL if not found
4731 **/
4732static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4733{
4734 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4735 struct ipr_resource_entry *res;
4736
4737 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3e7ebdfa
WB
4738 if ((res->bus == sdev->channel) &&
4739 (res->target == sdev->id) &&
4740 (res->lun == sdev->lun))
35a39691
BK
4741 return res;
4742 }
4743
4744 return NULL;
4745}
4746
1da177e4
LT
4747/**
4748 * ipr_slave_destroy - Unconfigure a SCSI device
4749 * @sdev: scsi device struct
4750 *
4751 * Return value:
4752 * nothing
4753 **/
4754static void ipr_slave_destroy(struct scsi_device *sdev)
4755{
4756 struct ipr_resource_entry *res;
4757 struct ipr_ioa_cfg *ioa_cfg;
4758 unsigned long lock_flags = 0;
4759
4760 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4761
4762 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4763 res = (struct ipr_resource_entry *) sdev->hostdata;
4764 if (res) {
35a39691 4765 if (res->sata_port)
3e4ec344 4766 res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
1da177e4
LT
4767 sdev->hostdata = NULL;
4768 res->sdev = NULL;
35a39691 4769 res->sata_port = NULL;
1da177e4
LT
4770 }
4771 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4772}
4773
4774/**
4775 * ipr_slave_configure - Configure a SCSI device
4776 * @sdev: scsi device struct
4777 *
4778 * This function configures the specified scsi device.
4779 *
4780 * Return value:
4781 * 0 on success
4782 **/
4783static int ipr_slave_configure(struct scsi_device *sdev)
4784{
4785 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4786 struct ipr_resource_entry *res;
dd406ef8 4787 struct ata_port *ap = NULL;
1da177e4 4788 unsigned long lock_flags = 0;
3e7ebdfa 4789 char buffer[IPR_MAX_RES_PATH_LENGTH];
1da177e4
LT
4790
4791 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4792 res = sdev->hostdata;
4793 if (res) {
4794 if (ipr_is_af_dasd_device(res))
4795 sdev->type = TYPE_RAID;
0726ce26 4796 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
1da177e4 4797 sdev->scsi_level = 4;
0726ce26 4798 sdev->no_uld_attach = 1;
4799 }
1da177e4 4800 if (ipr_is_vset_device(res)) {
60654e25 4801 sdev->scsi_level = SCSI_SPC_3;
242f9dcb
JA
4802 blk_queue_rq_timeout(sdev->request_queue,
4803 IPR_VSET_RW_TIMEOUT);
086fa5ff 4804 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
1da177e4 4805 }
dd406ef8
BK
4806 if (ipr_is_gata(res) && res->sata_port)
4807 ap = res->sata_port->ap;
4808 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4809
4810 if (ap) {
db5ed4df 4811 scsi_change_queue_depth(sdev, IPR_MAX_CMD_PER_ATA_LUN);
dd406ef8 4812 ata_sas_slave_configure(sdev, ap);
c8b09f6f
CH
4813 }
4814
3e7ebdfa
WB
4815 if (ioa_cfg->sis64)
4816 sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
b3b3b407
BK
4817 ipr_format_res_path(ioa_cfg,
4818 res->res_path, buffer, sizeof(buffer)));
dd406ef8 4819 return 0;
1da177e4
LT
4820 }
4821 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4822 return 0;
4823}
4824
35a39691
BK
4825/**
4826 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4827 * @sdev: scsi device struct
4828 *
4829 * This function initializes an ATA port so that future commands
4830 * sent through queuecommand will work.
4831 *
4832 * Return value:
4833 * 0 on success
4834 **/
4835static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4836{
4837 struct ipr_sata_port *sata_port = NULL;
4838 int rc = -ENXIO;
4839
4840 ENTER;
4841 if (sdev->sdev_target)
4842 sata_port = sdev->sdev_target->hostdata;
b2024459 4843 if (sata_port) {
35a39691 4844 rc = ata_sas_port_init(sata_port->ap);
b2024459
DW
4845 if (rc == 0)
4846 rc = ata_sas_sync_probe(sata_port->ap);
4847 }
4848
35a39691
BK
4849 if (rc)
4850 ipr_slave_destroy(sdev);
4851
4852 LEAVE;
4853 return rc;
4854}
4855
1da177e4
LT
4856/**
4857 * ipr_slave_alloc - Prepare for commands to a device.
4858 * @sdev: scsi device struct
4859 *
4860 * This function saves a pointer to the resource entry
4861 * in the scsi device struct if the device exists. We
4862 * can then use this pointer in ipr_queuecommand when
4863 * handling new commands.
4864 *
4865 * Return value:
692aebfc 4866 * 0 on success / -ENXIO if device does not exist
1da177e4
LT
4867 **/
4868static int ipr_slave_alloc(struct scsi_device *sdev)
4869{
4870 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4871 struct ipr_resource_entry *res;
4872 unsigned long lock_flags;
692aebfc 4873 int rc = -ENXIO;
1da177e4
LT
4874
4875 sdev->hostdata = NULL;
4876
4877 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4878
35a39691
BK
4879 res = ipr_find_sdev(sdev);
4880 if (res) {
4881 res->sdev = sdev;
4882 res->add_to_ml = 0;
4883 res->in_erp = 0;
4884 sdev->hostdata = res;
4885 if (!ipr_is_naca_model(res))
4886 res->needs_sync_complete = 1;
4887 rc = 0;
4888 if (ipr_is_gata(res)) {
4889 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4890 return ipr_ata_slave_alloc(sdev);
1da177e4
LT
4891 }
4892 }
4893
4894 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4895
692aebfc 4896 return rc;
1da177e4
LT
4897}
4898
6cdb0817
BK
4899/**
4900 * ipr_match_lun - Match function for specified LUN
4901 * @ipr_cmd: ipr command struct
4902 * @device: device to match (sdev)
4903 *
4904 * Returns:
4905 * 1 if command matches sdev / 0 if command does not match sdev
4906 **/
4907static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device)
4908{
4909 if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device)
4910 return 1;
4911 return 0;
4912}
4913
4914/**
4915 * ipr_wait_for_ops - Wait for matching commands to complete
4916 * @ipr_cmd: ipr command struct
4917 * @device: device to match (sdev)
4918 * @match: match function to use
4919 *
4920 * Returns:
4921 * SUCCESS / FAILED
4922 **/
4923static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
4924 int (*match)(struct ipr_cmnd *, void *))
4925{
4926 struct ipr_cmnd *ipr_cmd;
4927 int wait;
4928 unsigned long flags;
4929 struct ipr_hrr_queue *hrrq;
4930 signed long timeout = IPR_ABORT_TASK_TIMEOUT;
4931 DECLARE_COMPLETION_ONSTACK(comp);
4932
4933 ENTER;
4934 do {
4935 wait = 0;
4936
4937 for_each_hrrq(hrrq, ioa_cfg) {
4938 spin_lock_irqsave(hrrq->lock, flags);
4939 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
4940 if (match(ipr_cmd, device)) {
4941 ipr_cmd->eh_comp = &comp;
4942 wait++;
4943 }
4944 }
4945 spin_unlock_irqrestore(hrrq->lock, flags);
4946 }
4947
4948 if (wait) {
4949 timeout = wait_for_completion_timeout(&comp, timeout);
4950
4951 if (!timeout) {
4952 wait = 0;
4953
4954 for_each_hrrq(hrrq, ioa_cfg) {
4955 spin_lock_irqsave(hrrq->lock, flags);
4956 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
4957 if (match(ipr_cmd, device)) {
4958 ipr_cmd->eh_comp = NULL;
4959 wait++;
4960 }
4961 }
4962 spin_unlock_irqrestore(hrrq->lock, flags);
4963 }
4964
4965 if (wait)
4966 dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n");
4967 LEAVE;
4968 return wait ? FAILED : SUCCESS;
4969 }
4970 }
4971 } while (wait);
4972
4973 LEAVE;
4974 return SUCCESS;
4975}
4976
70233ac5 4977static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
1da177e4
LT
4978{
4979 struct ipr_ioa_cfg *ioa_cfg;
70233ac5 4980 unsigned long lock_flags = 0;
4981 int rc = SUCCESS;
1da177e4
LT
4982
4983 ENTER;
70233ac5 4984 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
4985 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1da177e4 4986
96b04db9 4987 if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
70233ac5 4988 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
a92fa25c
KSS
4989 dev_err(&ioa_cfg->pdev->dev,
4990 "Adapter being reset as a result of error recovery.\n");
1da177e4 4991
a92fa25c
KSS
4992 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4993 ioa_cfg->sdt_state = GET_DUMP;
4994 }
1da177e4 4995
70233ac5 4996 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4997 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4998 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
df0ae249 4999
70233ac5 5000 /* If we got hit with a host reset while we were already resetting
5001 the adapter for some reason, and the reset failed. */
5002 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
5003 ipr_trace;
5004 rc = FAILED;
5005 }
df0ae249 5006
70233ac5 5007 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5008 LEAVE;
df0ae249
JG
5009 return rc;
5010}
5011
c6513096
BK
5012/**
5013 * ipr_device_reset - Reset the device
5014 * @ioa_cfg: ioa config struct
5015 * @res: resource entry struct
5016 *
5017 * This function issues a device reset to the affected device.
5018 * If the device is a SCSI device, a LUN reset will be sent
5019 * to the device first. If that does not work, a target reset
35a39691
BK
5020 * will be sent. If the device is a SATA device, a PHY reset will
5021 * be sent.
c6513096
BK
5022 *
5023 * Return value:
5024 * 0 on success / non-zero on failure
5025 **/
5026static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
5027 struct ipr_resource_entry *res)
5028{
5029 struct ipr_cmnd *ipr_cmd;
5030 struct ipr_ioarcb *ioarcb;
5031 struct ipr_cmd_pkt *cmd_pkt;
35a39691 5032 struct ipr_ioarcb_ata_regs *regs;
c6513096
BK
5033 u32 ioasc;
5034
5035 ENTER;
5036 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5037 ioarcb = &ipr_cmd->ioarcb;
5038 cmd_pkt = &ioarcb->cmd_pkt;
a32c055f
WB
5039
5040 if (ipr_cmd->ioa_cfg->sis64) {
5041 regs = &ipr_cmd->i.ata_ioadl.regs;
5042 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
5043 } else
5044 regs = &ioarcb->u.add_data.u.regs;
c6513096 5045
3e7ebdfa 5046 ioarcb->res_handle = res->res_handle;
c6513096
BK
5047 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5048 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
35a39691
BK
5049 if (ipr_is_gata(res)) {
5050 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
a32c055f 5051 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
35a39691
BK
5052 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
5053 }
c6513096
BK
5054
5055 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
96d21f00 5056 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
05a6538a 5057 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
96d21f00
WB
5058 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
5059 if (ipr_cmd->ioa_cfg->sis64)
5060 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
5061 sizeof(struct ipr_ioasa_gata));
5062 else
5063 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
5064 sizeof(struct ipr_ioasa_gata));
5065 }
c6513096
BK
5066
5067 LEAVE;
203fa3fe 5068 return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
c6513096
BK
5069}
5070
35a39691
BK
5071/**
5072 * ipr_sata_reset - Reset the SATA port
cc0680a5 5073 * @link: SATA link to reset
35a39691
BK
5074 * @classes: class of the attached device
5075 *
cc0680a5 5076 * This function issues a SATA phy reset to the affected ATA link.
35a39691
BK
5077 *
5078 * Return value:
5079 * 0 on success / non-zero on failure
5080 **/
cc0680a5 5081static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
120bda35 5082 unsigned long deadline)
35a39691 5083{
cc0680a5 5084 struct ipr_sata_port *sata_port = link->ap->private_data;
35a39691
BK
5085 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5086 struct ipr_resource_entry *res;
5087 unsigned long lock_flags = 0;
5088 int rc = -ENXIO;
5089
5090 ENTER;
5091 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
203fa3fe 5092 while (ioa_cfg->in_reset_reload) {
73d98ff0
BK
5093 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5094 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5095 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5096 }
5097
35a39691
BK
5098 res = sata_port->res;
5099 if (res) {
5100 rc = ipr_device_reset(ioa_cfg, res);
3e7ebdfa 5101 *classes = res->ata_class;
35a39691
BK
5102 }
5103
5104 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5105 LEAVE;
5106 return rc;
5107}
5108
1da177e4
LT
5109/**
5110 * ipr_eh_dev_reset - Reset the device
5111 * @scsi_cmd: scsi command struct
5112 *
5113 * This function issues a device reset to the affected device.
5114 * A LUN reset will be sent to the device first. If that does
5115 * not work, a target reset will be sent.
5116 *
5117 * Return value:
5118 * SUCCESS / FAILED
5119 **/
203fa3fe 5120static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
1da177e4
LT
5121{
5122 struct ipr_cmnd *ipr_cmd;
5123 struct ipr_ioa_cfg *ioa_cfg;
5124 struct ipr_resource_entry *res;
35a39691
BK
5125 struct ata_port *ap;
5126 int rc = 0;
05a6538a 5127 struct ipr_hrr_queue *hrrq;
1da177e4
LT
5128
5129 ENTER;
5130 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5131 res = scsi_cmd->device->hostdata;
5132
eeb88307 5133 if (!res)
1da177e4
LT
5134 return FAILED;
5135
5136 /*
5137 * If we are currently going through reset/reload, return failed. This will force the
5138 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
5139 * reset to complete
5140 */
5141 if (ioa_cfg->in_reset_reload)
5142 return FAILED;
56d6aa33 5143 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
1da177e4
LT
5144 return FAILED;
5145
05a6538a 5146 for_each_hrrq(hrrq, ioa_cfg) {
56d6aa33 5147 spin_lock(&hrrq->_lock);
05a6538a 5148 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
5149 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
5150 if (ipr_cmd->scsi_cmd)
5151 ipr_cmd->done = ipr_scsi_eh_done;
5152 if (ipr_cmd->qc)
5153 ipr_cmd->done = ipr_sata_eh_done;
5154 if (ipr_cmd->qc &&
5155 !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
5156 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
5157 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
5158 }
7402ecef 5159 }
1da177e4 5160 }
56d6aa33 5161 spin_unlock(&hrrq->_lock);
1da177e4 5162 }
1da177e4 5163 res->resetting_device = 1;
fb3ed3cb 5164 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
35a39691
BK
5165
5166 if (ipr_is_gata(res) && res->sata_port) {
5167 ap = res->sata_port->ap;
5168 spin_unlock_irq(scsi_cmd->device->host->host_lock);
a1efdaba 5169 ata_std_error_handler(ap);
35a39691 5170 spin_lock_irq(scsi_cmd->device->host->host_lock);
5af23d26 5171
05a6538a 5172 for_each_hrrq(hrrq, ioa_cfg) {
56d6aa33 5173 spin_lock(&hrrq->_lock);
05a6538a 5174 list_for_each_entry(ipr_cmd,
5175 &hrrq->hrrq_pending_q, queue) {
5176 if (ipr_cmd->ioarcb.res_handle ==
5177 res->res_handle) {
5178 rc = -EIO;
5179 break;
5180 }
5af23d26 5181 }
56d6aa33 5182 spin_unlock(&hrrq->_lock);
5af23d26 5183 }
35a39691
BK
5184 } else
5185 rc = ipr_device_reset(ioa_cfg, res);
1da177e4 5186 res->resetting_device = 0;
0b1f8d44 5187 res->reset_occurred = 1;
1da177e4 5188
1da177e4 5189 LEAVE;
203fa3fe 5190 return rc ? FAILED : SUCCESS;
1da177e4
LT
5191}
5192
203fa3fe 5193static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
94d0e7b8
JG
5194{
5195 int rc;
6cdb0817
BK
5196 struct ipr_ioa_cfg *ioa_cfg;
5197
5198 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
94d0e7b8
JG
5199
5200 spin_lock_irq(cmd->device->host->host_lock);
5201 rc = __ipr_eh_dev_reset(cmd);
5202 spin_unlock_irq(cmd->device->host->host_lock);
5203
6cdb0817
BK
5204 if (rc == SUCCESS)
5205 rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
5206
94d0e7b8
JG
5207 return rc;
5208}
5209
1da177e4
LT
5210/**
5211 * ipr_bus_reset_done - Op done function for bus reset.
5212 * @ipr_cmd: ipr command struct
5213 *
5214 * This function is the op done function for a bus reset
5215 *
5216 * Return value:
5217 * none
5218 **/
5219static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
5220{
5221 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5222 struct ipr_resource_entry *res;
5223
5224 ENTER;
3e7ebdfa
WB
5225 if (!ioa_cfg->sis64)
5226 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5227 if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
5228 scsi_report_bus_reset(ioa_cfg->host, res->bus);
5229 break;
5230 }
1da177e4 5231 }
1da177e4
LT
5232
5233 /*
5234 * If abort has not completed, indicate the reset has, else call the
5235 * abort's done function to wake the sleeping eh thread
5236 */
5237 if (ipr_cmd->sibling->sibling)
5238 ipr_cmd->sibling->sibling = NULL;
5239 else
5240 ipr_cmd->sibling->done(ipr_cmd->sibling);
5241
05a6538a 5242 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
5243 LEAVE;
5244}
5245
5246/**
5247 * ipr_abort_timeout - An abort task has timed out
5248 * @ipr_cmd: ipr command struct
5249 *
5250 * This function handles when an abort task times out. If this
5251 * happens we issue a bus reset since we have resources tied
5252 * up that must be freed before returning to the midlayer.
5253 *
5254 * Return value:
5255 * none
5256 **/
5257static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
5258{
5259 struct ipr_cmnd *reset_cmd;
5260 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5261 struct ipr_cmd_pkt *cmd_pkt;
5262 unsigned long lock_flags = 0;
5263
5264 ENTER;
5265 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5266 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
5267 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5268 return;
5269 }
5270
fb3ed3cb 5271 sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
1da177e4
LT
5272 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5273 ipr_cmd->sibling = reset_cmd;
5274 reset_cmd->sibling = ipr_cmd;
5275 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
5276 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
5277 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5278 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5279 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
5280
5281 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5282 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5283 LEAVE;
5284}
5285
5286/**
5287 * ipr_cancel_op - Cancel specified op
5288 * @scsi_cmd: scsi command struct
5289 *
5290 * This function cancels specified op.
5291 *
5292 * Return value:
5293 * SUCCESS / FAILED
5294 **/
203fa3fe 5295static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
1da177e4
LT
5296{
5297 struct ipr_cmnd *ipr_cmd;
5298 struct ipr_ioa_cfg *ioa_cfg;
5299 struct ipr_resource_entry *res;
5300 struct ipr_cmd_pkt *cmd_pkt;
a92fa25c 5301 u32 ioasc, int_reg;
1da177e4 5302 int op_found = 0;
05a6538a 5303 struct ipr_hrr_queue *hrrq;
1da177e4
LT
5304
5305 ENTER;
5306 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5307 res = scsi_cmd->device->hostdata;
5308
8fa728a2
JG
5309 /* If we are currently going through reset/reload, return failed.
5310 * This will force the mid-layer to call ipr_eh_host_reset,
5311 * which will then go to sleep and wait for the reset to complete
5312 */
56d6aa33 5313 if (ioa_cfg->in_reset_reload ||
5314 ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
8fa728a2 5315 return FAILED;
a92fa25c
KSS
5316 if (!res)
5317 return FAILED;
5318
5319 /*
5320 * If we are aborting a timed out op, chances are that the timeout was caused
5321 * by a still not detected EEH error. In such cases, reading a register will
5322 * trigger the EEH recovery infrastructure.
5323 */
5324 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5325
5326 if (!ipr_is_gscsi(res))
1da177e4
LT
5327 return FAILED;
5328
05a6538a 5329 for_each_hrrq(hrrq, ioa_cfg) {
56d6aa33 5330 spin_lock(&hrrq->_lock);
05a6538a 5331 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
5332 if (ipr_cmd->scsi_cmd == scsi_cmd) {
5333 ipr_cmd->done = ipr_scsi_eh_done;
5334 op_found = 1;
5335 break;
5336 }
1da177e4 5337 }
56d6aa33 5338 spin_unlock(&hrrq->_lock);
1da177e4
LT
5339 }
5340
5341 if (!op_found)
5342 return SUCCESS;
5343
5344 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3e7ebdfa 5345 ipr_cmd->ioarcb.res_handle = res->res_handle;
1da177e4
LT
5346 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5347 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5348 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5349 ipr_cmd->u.sdev = scsi_cmd->device;
5350
fb3ed3cb
BK
5351 scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
5352 scsi_cmd->cmnd[0]);
1da177e4 5353 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
96d21f00 5354 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
5355
5356 /*
5357 * If the abort task timed out and we sent a bus reset, we will get
5358 * one the following responses to the abort
5359 */
5360 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
5361 ioasc = 0;
5362 ipr_trace;
5363 }
5364
c4ee22a3 5365 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
ee0a90fa 5366 if (!ipr_is_naca_model(res))
5367 res->needs_sync_complete = 1;
1da177e4
LT
5368
5369 LEAVE;
203fa3fe 5370 return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
1da177e4
LT
5371}
5372
5373/**
5374 * ipr_eh_abort - Abort a single op
5375 * @scsi_cmd: scsi command struct
5376 *
5377 * Return value:
f688f96d
BK
5378 * 0 if scan in progress / 1 if scan is complete
5379 **/
5380static int ipr_scan_finished(struct Scsi_Host *shost, unsigned long elapsed_time)
5381{
5382 unsigned long lock_flags;
5383 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
5384 int rc = 0;
5385
5386 spin_lock_irqsave(shost->host_lock, lock_flags);
5387 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done)
5388 rc = 1;
5389 if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2))
5390 rc = 1;
5391 spin_unlock_irqrestore(shost->host_lock, lock_flags);
5392 return rc;
5393}
5394
5395/**
5396 * ipr_eh_host_reset - Reset the host adapter
5397 * @scsi_cmd: scsi command struct
5398 *
5399 * Return value:
1da177e4
LT
5400 * SUCCESS / FAILED
5401 **/
203fa3fe 5402static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
1da177e4 5403{
8fa728a2
JG
5404 unsigned long flags;
5405 int rc;
6cdb0817 5406 struct ipr_ioa_cfg *ioa_cfg;
1da177e4
LT
5407
5408 ENTER;
1da177e4 5409
6cdb0817
BK
5410 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5411
8fa728a2
JG
5412 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
5413 rc = ipr_cancel_op(scsi_cmd);
5414 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
1da177e4 5415
6cdb0817
BK
5416 if (rc == SUCCESS)
5417 rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun);
1da177e4 5418 LEAVE;
8fa728a2 5419 return rc;
1da177e4
LT
5420}
5421
5422/**
5423 * ipr_handle_other_interrupt - Handle "other" interrupts
5424 * @ioa_cfg: ioa config struct
634651fa 5425 * @int_reg: interrupt register
1da177e4
LT
5426 *
5427 * Return value:
5428 * IRQ_NONE / IRQ_HANDLED
5429 **/
634651fa 5430static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
630ad831 5431 u32 int_reg)
1da177e4
LT
5432{
5433 irqreturn_t rc = IRQ_HANDLED;
7dacb64f 5434 u32 int_mask_reg;
56d6aa33 5435
7dacb64f
WB
5436 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5437 int_reg &= ~int_mask_reg;
5438
5439 /* If an interrupt on the adapter did not occur, ignore it.
5440 * Or in the case of SIS 64, check for a stage change interrupt.
5441 */
5442 if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5443 if (ioa_cfg->sis64) {
5444 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5445 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5446 if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5447
5448 /* clear stage change */
5449 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5450 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5451 list_del(&ioa_cfg->reset_cmd->queue);
5452 del_timer(&ioa_cfg->reset_cmd->timer);
5453 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5454 return IRQ_HANDLED;
5455 }
5456 }
5457
5458 return IRQ_NONE;
5459 }
1da177e4
LT
5460
5461 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5462 /* Mask the interrupt */
5463 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
1da177e4
LT
5464 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5465
5466 list_del(&ioa_cfg->reset_cmd->queue);
5467 del_timer(&ioa_cfg->reset_cmd->timer);
5468 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
7dacb64f 5469 } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
7dd21308
BK
5470 if (ioa_cfg->clear_isr) {
5471 if (ipr_debug && printk_ratelimit())
5472 dev_err(&ioa_cfg->pdev->dev,
5473 "Spurious interrupt detected. 0x%08X\n", int_reg);
5474 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5475 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5476 return IRQ_NONE;
5477 }
1da177e4
LT
5478 } else {
5479 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5480 ioa_cfg->ioa_unit_checked = 1;
05a6538a 5481 else if (int_reg & IPR_PCII_NO_HOST_RRQ)
5482 dev_err(&ioa_cfg->pdev->dev,
5483 "No Host RRQ. 0x%08X\n", int_reg);
1da177e4
LT
5484 else
5485 dev_err(&ioa_cfg->pdev->dev,
5486 "Permanent IOA failure. 0x%08X\n", int_reg);
5487
5488 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5489 ioa_cfg->sdt_state = GET_DUMP;
5490
5491 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5492 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5493 }
56d6aa33 5494
1da177e4
LT
5495 return rc;
5496}
5497
3feeb89d
WB
5498/**
5499 * ipr_isr_eh - Interrupt service routine error handler
5500 * @ioa_cfg: ioa config struct
5501 * @msg: message to log
5502 *
5503 * Return value:
5504 * none
5505 **/
05a6538a 5506static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
3feeb89d
WB
5507{
5508 ioa_cfg->errors_logged++;
05a6538a 5509 dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
3feeb89d
WB
5510
5511 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5512 ioa_cfg->sdt_state = GET_DUMP;
5513
5514 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5515}
5516
b53d124a 5517static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
05a6538a 5518 struct list_head *doneq)
5519{
5520 u32 ioasc;
5521 u16 cmd_index;
5522 struct ipr_cmnd *ipr_cmd;
5523 struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
5524 int num_hrrq = 0;
5525
5526 /* If interrupts are disabled, ignore the interrupt */
56d6aa33 5527 if (!hrr_queue->allow_interrupts)
05a6538a 5528 return 0;
5529
5530 while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5531 hrr_queue->toggle_bit) {
5532
5533 cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
5534 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
5535 IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5536
5537 if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
5538 cmd_index < hrr_queue->min_cmd_id)) {
5539 ipr_isr_eh(ioa_cfg,
5540 "Invalid response handle from IOA: ",
5541 cmd_index);
5542 break;
5543 }
5544
5545 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5546 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5547
5548 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5549
5550 list_move_tail(&ipr_cmd->queue, doneq);
5551
5552 if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
5553 hrr_queue->hrrq_curr++;
5554 } else {
5555 hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
5556 hrr_queue->toggle_bit ^= 1u;
5557 }
5558 num_hrrq++;
b53d124a 5559 if (budget > 0 && num_hrrq >= budget)
5560 break;
05a6538a 5561 }
b53d124a 5562
05a6538a 5563 return num_hrrq;
5564}
b53d124a 5565
5566static int ipr_iopoll(struct blk_iopoll *iop, int budget)
5567{
5568 struct ipr_ioa_cfg *ioa_cfg;
5569 struct ipr_hrr_queue *hrrq;
5570 struct ipr_cmnd *ipr_cmd, *temp;
5571 unsigned long hrrq_flags;
5572 int completed_ops;
5573 LIST_HEAD(doneq);
5574
5575 hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
5576 ioa_cfg = hrrq->ioa_cfg;
5577
5578 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5579 completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
5580
5581 if (completed_ops < budget)
5582 blk_iopoll_complete(iop);
5583 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5584
5585 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5586 list_del(&ipr_cmd->queue);
5587 del_timer(&ipr_cmd->timer);
5588 ipr_cmd->fast_done(ipr_cmd);
5589 }
5590
5591 return completed_ops;
5592}
5593
1da177e4
LT
5594/**
5595 * ipr_isr - Interrupt service routine
5596 * @irq: irq number
5597 * @devp: pointer to ioa config struct
1da177e4
LT
5598 *
5599 * Return value:
5600 * IRQ_NONE / IRQ_HANDLED
5601 **/
7d12e780 5602static irqreturn_t ipr_isr(int irq, void *devp)
1da177e4 5603{
05a6538a 5604 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5605 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
56d6aa33 5606 unsigned long hrrq_flags = 0;
7dacb64f 5607 u32 int_reg = 0;
3feeb89d 5608 int num_hrrq = 0;
7dacb64f 5609 int irq_none = 0;
172cd6e1 5610 struct ipr_cmnd *ipr_cmd, *temp;
1da177e4 5611 irqreturn_t rc = IRQ_NONE;
172cd6e1 5612 LIST_HEAD(doneq);
1da177e4 5613
56d6aa33 5614 spin_lock_irqsave(hrrq->lock, hrrq_flags);
1da177e4 5615 /* If interrupts are disabled, ignore the interrupt */
56d6aa33 5616 if (!hrrq->allow_interrupts) {
5617 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
1da177e4
LT
5618 return IRQ_NONE;
5619 }
5620
1da177e4 5621 while (1) {
b53d124a 5622 if (ipr_process_hrrq(hrrq, -1, &doneq)) {
5623 rc = IRQ_HANDLED;
1da177e4 5624
b53d124a 5625 if (!ioa_cfg->clear_isr)
5626 break;
7dd21308 5627
1da177e4 5628 /* Clear the PCI interrupt */
a5442ba4 5629 num_hrrq = 0;
3feeb89d 5630 do {
b53d124a 5631 writel(IPR_PCII_HRRQ_UPDATED,
5632 ioa_cfg->regs.clr_interrupt_reg32);
7dacb64f 5633 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
3feeb89d 5634 } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
b53d124a 5635 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
3feeb89d 5636
7dacb64f
WB
5637 } else if (rc == IRQ_NONE && irq_none == 0) {
5638 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5639 irq_none++;
a5442ba4
WB
5640 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5641 int_reg & IPR_PCII_HRRQ_UPDATED) {
b53d124a 5642 ipr_isr_eh(ioa_cfg,
5643 "Error clearing HRRQ: ", num_hrrq);
172cd6e1 5644 rc = IRQ_HANDLED;
b53d124a 5645 break;
1da177e4
LT
5646 } else
5647 break;
5648 }
5649
5650 if (unlikely(rc == IRQ_NONE))
634651fa 5651 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
1da177e4 5652
56d6aa33 5653 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
172cd6e1
BK
5654 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5655 list_del(&ipr_cmd->queue);
5656 del_timer(&ipr_cmd->timer);
5657 ipr_cmd->fast_done(ipr_cmd);
5658 }
05a6538a 5659 return rc;
5660}
5661
5662/**
5663 * ipr_isr_mhrrq - Interrupt service routine
5664 * @irq: irq number
5665 * @devp: pointer to ioa config struct
5666 *
5667 * Return value:
5668 * IRQ_NONE / IRQ_HANDLED
5669 **/
5670static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5671{
5672 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
b53d124a 5673 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
56d6aa33 5674 unsigned long hrrq_flags = 0;
05a6538a 5675 struct ipr_cmnd *ipr_cmd, *temp;
5676 irqreturn_t rc = IRQ_NONE;
5677 LIST_HEAD(doneq);
172cd6e1 5678
56d6aa33 5679 spin_lock_irqsave(hrrq->lock, hrrq_flags);
05a6538a 5680
5681 /* If interrupts are disabled, ignore the interrupt */
56d6aa33 5682 if (!hrrq->allow_interrupts) {
5683 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
05a6538a 5684 return IRQ_NONE;
5685 }
5686
89f8b33c 5687 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
b53d124a 5688 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5689 hrrq->toggle_bit) {
5690 if (!blk_iopoll_sched_prep(&hrrq->iopoll))
5691 blk_iopoll_sched(&hrrq->iopoll);
5692 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5693 return IRQ_HANDLED;
5694 }
5695 } else {
5696 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5697 hrrq->toggle_bit)
05a6538a 5698
b53d124a 5699 if (ipr_process_hrrq(hrrq, -1, &doneq))
5700 rc = IRQ_HANDLED;
5701 }
05a6538a 5702
56d6aa33 5703 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
05a6538a 5704
5705 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5706 list_del(&ipr_cmd->queue);
5707 del_timer(&ipr_cmd->timer);
5708 ipr_cmd->fast_done(ipr_cmd);
5709 }
1da177e4
LT
5710 return rc;
5711}
5712
a32c055f
WB
5713/**
5714 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5715 * @ioa_cfg: ioa config struct
5716 * @ipr_cmd: ipr command struct
5717 *
5718 * Return value:
5719 * 0 on success / -1 on failure
5720 **/
5721static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5722 struct ipr_cmnd *ipr_cmd)
5723{
5724 int i, nseg;
5725 struct scatterlist *sg;
5726 u32 length;
5727 u32 ioadl_flags = 0;
5728 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5729 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5730 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5731
5732 length = scsi_bufflen(scsi_cmd);
5733 if (!length)
5734 return 0;
5735
5736 nseg = scsi_dma_map(scsi_cmd);
5737 if (nseg < 0) {
51f52a47 5738 if (printk_ratelimit())
d73341bf 5739 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
a32c055f
WB
5740 return -1;
5741 }
5742
5743 ipr_cmd->dma_use_sg = nseg;
5744
438b0331 5745 ioarcb->data_transfer_length = cpu_to_be32(length);
b8803b1c
WB
5746 ioarcb->ioadl_len =
5747 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
438b0331 5748
a32c055f
WB
5749 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5750 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5751 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5752 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5753 ioadl_flags = IPR_IOADL_FLAGS_READ;
5754
5755 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5756 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5757 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5758 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5759 }
5760
5761 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5762 return 0;
5763}
5764
1da177e4
LT
5765/**
5766 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5767 * @ioa_cfg: ioa config struct
5768 * @ipr_cmd: ipr command struct
5769 *
5770 * Return value:
5771 * 0 on success / -1 on failure
5772 **/
5773static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5774 struct ipr_cmnd *ipr_cmd)
5775{
63015bc9
FT
5776 int i, nseg;
5777 struct scatterlist *sg;
1da177e4
LT
5778 u32 length;
5779 u32 ioadl_flags = 0;
5780 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5781 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
a32c055f 5782 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1da177e4 5783
63015bc9
FT
5784 length = scsi_bufflen(scsi_cmd);
5785 if (!length)
1da177e4
LT
5786 return 0;
5787
63015bc9
FT
5788 nseg = scsi_dma_map(scsi_cmd);
5789 if (nseg < 0) {
d73341bf 5790 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
63015bc9
FT
5791 return -1;
5792 }
51b1c7e1 5793
63015bc9
FT
5794 ipr_cmd->dma_use_sg = nseg;
5795
5796 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5797 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5798 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
a32c055f
WB
5799 ioarcb->data_transfer_length = cpu_to_be32(length);
5800 ioarcb->ioadl_len =
63015bc9
FT
5801 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5802 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5803 ioadl_flags = IPR_IOADL_FLAGS_READ;
5804 ioarcb->read_data_transfer_length = cpu_to_be32(length);
5805 ioarcb->read_ioadl_len =
5806 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5807 }
1da177e4 5808
a32c055f
WB
5809 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5810 ioadl = ioarcb->u.add_data.u.ioadl;
5811 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5812 offsetof(struct ipr_ioarcb, u.add_data));
63015bc9
FT
5813 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5814 }
1da177e4 5815
63015bc9
FT
5816 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5817 ioadl[i].flags_and_data_len =
5818 cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5819 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
1da177e4
LT
5820 }
5821
63015bc9
FT
5822 ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5823 return 0;
1da177e4
LT
5824}
5825
1da177e4
LT
5826/**
5827 * ipr_erp_done - Process completion of ERP for a device
5828 * @ipr_cmd: ipr command struct
5829 *
5830 * This function copies the sense buffer into the scsi_cmd
5831 * struct and pushes the scsi_done function.
5832 *
5833 * Return value:
5834 * nothing
5835 **/
5836static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5837{
5838 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5839 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
96d21f00 5840 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
5841
5842 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5843 scsi_cmd->result |= (DID_ERROR << 16);
fb3ed3cb
BK
5844 scmd_printk(KERN_ERR, scsi_cmd,
5845 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
1da177e4
LT
5846 } else {
5847 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
5848 SCSI_SENSE_BUFFERSIZE);
5849 }
5850
5851 if (res) {
ee0a90fa 5852 if (!ipr_is_naca_model(res))
5853 res->needs_sync_complete = 1;
1da177e4
LT
5854 res->in_erp = 0;
5855 }
63015bc9 5856 scsi_dma_unmap(ipr_cmd->scsi_cmd);
05a6538a 5857 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
5858 scsi_cmd->scsi_done(scsi_cmd);
5859}
5860
5861/**
5862 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
5863 * @ipr_cmd: ipr command struct
5864 *
5865 * Return value:
5866 * none
5867 **/
5868static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
5869{
51b1c7e1 5870 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
96d21f00 5871 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
a32c055f 5872 dma_addr_t dma_addr = ipr_cmd->dma_addr;
1da177e4
LT
5873
5874 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
a32c055f 5875 ioarcb->data_transfer_length = 0;
1da177e4 5876 ioarcb->read_data_transfer_length = 0;
a32c055f 5877 ioarcb->ioadl_len = 0;
1da177e4 5878 ioarcb->read_ioadl_len = 0;
96d21f00
WB
5879 ioasa->hdr.ioasc = 0;
5880 ioasa->hdr.residual_data_len = 0;
a32c055f
WB
5881
5882 if (ipr_cmd->ioa_cfg->sis64)
5883 ioarcb->u.sis64_addr_data.data_ioadl_addr =
5884 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
5885 else {
5886 ioarcb->write_ioadl_addr =
5887 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
5888 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5889 }
1da177e4
LT
5890}
5891
5892/**
5893 * ipr_erp_request_sense - Send request sense to a device
5894 * @ipr_cmd: ipr command struct
5895 *
5896 * This function sends a request sense to a device as a result
5897 * of a check condition.
5898 *
5899 * Return value:
5900 * nothing
5901 **/
5902static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
5903{
5904 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
96d21f00 5905 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
5906
5907 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5908 ipr_erp_done(ipr_cmd);
5909 return;
5910 }
5911
5912 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5913
5914 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
5915 cmd_pkt->cdb[0] = REQUEST_SENSE;
5916 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
5917 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
5918 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5919 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
5920
a32c055f
WB
5921 ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
5922 SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
5923
5924 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
5925 IPR_REQUEST_SENSE_TIMEOUT * 2);
5926}
5927
5928/**
5929 * ipr_erp_cancel_all - Send cancel all to a device
5930 * @ipr_cmd: ipr command struct
5931 *
5932 * This function sends a cancel all to a device to clear the
5933 * queue. If we are running TCQ on the device, QERR is set to 1,
5934 * which means all outstanding ops have been dropped on the floor.
5935 * Cancel all will return them to us.
5936 *
5937 * Return value:
5938 * nothing
5939 **/
5940static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
5941{
5942 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5943 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5944 struct ipr_cmd_pkt *cmd_pkt;
5945
5946 res->in_erp = 1;
5947
5948 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5949
17ea0126 5950 if (!scsi_cmd->device->simple_tags) {
1da177e4
LT
5951 ipr_erp_request_sense(ipr_cmd);
5952 return;
5953 }
5954
5955 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5956 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5957 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5958
5959 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
5960 IPR_CANCEL_ALL_TIMEOUT);
5961}
5962
5963/**
5964 * ipr_dump_ioasa - Dump contents of IOASA
5965 * @ioa_cfg: ioa config struct
5966 * @ipr_cmd: ipr command struct
fe964d0a 5967 * @res: resource entry struct
1da177e4
LT
5968 *
5969 * This function is invoked by the interrupt handler when ops
5970 * fail. It will log the IOASA if appropriate. Only called
5971 * for GPDD ops.
5972 *
5973 * Return value:
5974 * none
5975 **/
5976static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
fe964d0a 5977 struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
1da177e4
LT
5978{
5979 int i;
5980 u16 data_len;
b0692dd4 5981 u32 ioasc, fd_ioasc;
96d21f00 5982 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
1da177e4
LT
5983 __be32 *ioasa_data = (__be32 *)ioasa;
5984 int error_index;
5985
96d21f00
WB
5986 ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
5987 fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
1da177e4
LT
5988
5989 if (0 == ioasc)
5990 return;
5991
5992 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
5993 return;
5994
b0692dd4
BK
5995 if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
5996 error_index = ipr_get_error(fd_ioasc);
5997 else
5998 error_index = ipr_get_error(ioasc);
1da177e4
LT
5999
6000 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
6001 /* Don't log an error if the IOA already logged one */
96d21f00 6002 if (ioasa->hdr.ilid != 0)
1da177e4
LT
6003 return;
6004
cc9bd5d4
BK
6005 if (!ipr_is_gscsi(res))
6006 return;
6007
1da177e4
LT
6008 if (ipr_error_table[error_index].log_ioasa == 0)
6009 return;
6010 }
6011
fe964d0a 6012 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
1da177e4 6013
96d21f00
WB
6014 data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
6015 if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
6016 data_len = sizeof(struct ipr_ioasa64);
6017 else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
1da177e4 6018 data_len = sizeof(struct ipr_ioasa);
1da177e4
LT
6019
6020 ipr_err("IOASA Dump:\n");
6021
6022 for (i = 0; i < data_len / 4; i += 4) {
6023 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
6024 be32_to_cpu(ioasa_data[i]),
6025 be32_to_cpu(ioasa_data[i+1]),
6026 be32_to_cpu(ioasa_data[i+2]),
6027 be32_to_cpu(ioasa_data[i+3]));
6028 }
6029}
6030
6031/**
6032 * ipr_gen_sense - Generate SCSI sense data from an IOASA
6033 * @ioasa: IOASA
6034 * @sense_buf: sense data buffer
6035 *
6036 * Return value:
6037 * none
6038 **/
6039static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
6040{
6041 u32 failing_lba;
6042 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
6043 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
96d21f00
WB
6044 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6045 u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
1da177e4
LT
6046
6047 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
6048
6049 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
6050 return;
6051
6052 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
6053
6054 if (ipr_is_vset_device(res) &&
6055 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
6056 ioasa->u.vset.failing_lba_hi != 0) {
6057 sense_buf[0] = 0x72;
6058 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
6059 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
6060 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
6061
6062 sense_buf[7] = 12;
6063 sense_buf[8] = 0;
6064 sense_buf[9] = 0x0A;
6065 sense_buf[10] = 0x80;
6066
6067 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
6068
6069 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
6070 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
6071 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
6072 sense_buf[15] = failing_lba & 0x000000ff;
6073
6074 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6075
6076 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
6077 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
6078 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
6079 sense_buf[19] = failing_lba & 0x000000ff;
6080 } else {
6081 sense_buf[0] = 0x70;
6082 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
6083 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
6084 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
6085
6086 /* Illegal request */
6087 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
96d21f00 6088 (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
1da177e4
LT
6089 sense_buf[7] = 10; /* additional length */
6090
6091 /* IOARCB was in error */
6092 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
6093 sense_buf[15] = 0xC0;
6094 else /* Parameter data was invalid */
6095 sense_buf[15] = 0x80;
6096
6097 sense_buf[16] =
6098 ((IPR_FIELD_POINTER_MASK &
96d21f00 6099 be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
1da177e4
LT
6100 sense_buf[17] =
6101 (IPR_FIELD_POINTER_MASK &
96d21f00 6102 be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
1da177e4
LT
6103 } else {
6104 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
6105 if (ipr_is_vset_device(res))
6106 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6107 else
6108 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
6109
6110 sense_buf[0] |= 0x80; /* Or in the Valid bit */
6111 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
6112 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
6113 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
6114 sense_buf[6] = failing_lba & 0x000000ff;
6115 }
6116
6117 sense_buf[7] = 6; /* additional length */
6118 }
6119 }
6120}
6121
ee0a90fa 6122/**
6123 * ipr_get_autosense - Copy autosense data to sense buffer
6124 * @ipr_cmd: ipr command struct
6125 *
6126 * This function copies the autosense buffer to the buffer
6127 * in the scsi_cmd, if there is autosense available.
6128 *
6129 * Return value:
6130 * 1 if autosense was available / 0 if not
6131 **/
6132static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
6133{
96d21f00
WB
6134 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6135 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
ee0a90fa 6136
96d21f00 6137 if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
ee0a90fa 6138 return 0;
6139
96d21f00
WB
6140 if (ipr_cmd->ioa_cfg->sis64)
6141 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
6142 min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
6143 SCSI_SENSE_BUFFERSIZE));
6144 else
6145 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
6146 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
6147 SCSI_SENSE_BUFFERSIZE));
ee0a90fa 6148 return 1;
6149}
6150
1da177e4
LT
6151/**
6152 * ipr_erp_start - Process an error response for a SCSI op
6153 * @ioa_cfg: ioa config struct
6154 * @ipr_cmd: ipr command struct
6155 *
6156 * This function determines whether or not to initiate ERP
6157 * on the affected device.
6158 *
6159 * Return value:
6160 * nothing
6161 **/
6162static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
6163 struct ipr_cmnd *ipr_cmd)
6164{
6165 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6166 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
96d21f00 6167 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
8a048994 6168 u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
1da177e4
LT
6169
6170 if (!res) {
6171 ipr_scsi_eh_done(ipr_cmd);
6172 return;
6173 }
6174
8a048994 6175 if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
1da177e4
LT
6176 ipr_gen_sense(ipr_cmd);
6177
cc9bd5d4
BK
6178 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6179
8a048994 6180 switch (masked_ioasc) {
1da177e4 6181 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
ee0a90fa 6182 if (ipr_is_naca_model(res))
6183 scsi_cmd->result |= (DID_ABORT << 16);
6184 else
6185 scsi_cmd->result |= (DID_IMM_RETRY << 16);
1da177e4
LT
6186 break;
6187 case IPR_IOASC_IR_RESOURCE_HANDLE:
b0df54bb 6188 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
1da177e4
LT
6189 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6190 break;
6191 case IPR_IOASC_HW_SEL_TIMEOUT:
6192 scsi_cmd->result |= (DID_NO_CONNECT << 16);
ee0a90fa 6193 if (!ipr_is_naca_model(res))
6194 res->needs_sync_complete = 1;
1da177e4
LT
6195 break;
6196 case IPR_IOASC_SYNC_REQUIRED:
6197 if (!res->in_erp)
6198 res->needs_sync_complete = 1;
6199 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6200 break;
6201 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
b0df54bb 6202 case IPR_IOASA_IR_DUAL_IOA_DISABLED:
1da177e4
LT
6203 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
6204 break;
6205 case IPR_IOASC_BUS_WAS_RESET:
6206 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
6207 /*
6208 * Report the bus reset and ask for a retry. The device
6209 * will give CC/UA the next command.
6210 */
6211 if (!res->resetting_device)
6212 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
6213 scsi_cmd->result |= (DID_ERROR << 16);
ee0a90fa 6214 if (!ipr_is_naca_model(res))
6215 res->needs_sync_complete = 1;
1da177e4
LT
6216 break;
6217 case IPR_IOASC_HW_DEV_BUS_STATUS:
6218 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
6219 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
ee0a90fa 6220 if (!ipr_get_autosense(ipr_cmd)) {
6221 if (!ipr_is_naca_model(res)) {
6222 ipr_erp_cancel_all(ipr_cmd);
6223 return;
6224 }
6225 }
1da177e4 6226 }
ee0a90fa 6227 if (!ipr_is_naca_model(res))
6228 res->needs_sync_complete = 1;
1da177e4
LT
6229 break;
6230 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
6231 break;
f8ee25d7
WX
6232 case IPR_IOASC_IR_NON_OPTIMIZED:
6233 if (res->raw_mode) {
6234 res->raw_mode = 0;
6235 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6236 } else
6237 scsi_cmd->result |= (DID_ERROR << 16);
6238 break;
1da177e4 6239 default:
5b7304fb
BK
6240 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6241 scsi_cmd->result |= (DID_ERROR << 16);
ee0a90fa 6242 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
1da177e4
LT
6243 res->needs_sync_complete = 1;
6244 break;
6245 }
6246
63015bc9 6247 scsi_dma_unmap(ipr_cmd->scsi_cmd);
05a6538a 6248 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
6249 scsi_cmd->scsi_done(scsi_cmd);
6250}
6251
6252/**
6253 * ipr_scsi_done - mid-layer done function
6254 * @ipr_cmd: ipr command struct
6255 *
6256 * This function is invoked by the interrupt handler for
6257 * ops generated by the SCSI mid-layer
6258 *
6259 * Return value:
6260 * none
6261 **/
6262static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
6263{
6264 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6265 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
96d21f00 6266 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
36b8e180 6267 unsigned long lock_flags;
1da177e4 6268
96d21f00 6269 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
1da177e4
LT
6270
6271 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
172cd6e1
BK
6272 scsi_dma_unmap(scsi_cmd);
6273
36b8e180 6274 spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
05a6538a 6275 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4 6276 scsi_cmd->scsi_done(scsi_cmd);
36b8e180 6277 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags);
172cd6e1 6278 } else {
36b8e180
BK
6279 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6280 spin_lock(&ipr_cmd->hrrq->_lock);
1da177e4 6281 ipr_erp_start(ioa_cfg, ipr_cmd);
36b8e180
BK
6282 spin_unlock(&ipr_cmd->hrrq->_lock);
6283 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
172cd6e1 6284 }
1da177e4
LT
6285}
6286
1da177e4
LT
6287/**
6288 * ipr_queuecommand - Queue a mid-layer request
00bfef2c 6289 * @shost: scsi host struct
1da177e4 6290 * @scsi_cmd: scsi command struct
1da177e4
LT
6291 *
6292 * This function queues a request generated by the mid-layer.
6293 *
6294 * Return value:
6295 * 0 on success
6296 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6297 * SCSI_MLQUEUE_HOST_BUSY if host is busy
6298 **/
00bfef2c
BK
6299static int ipr_queuecommand(struct Scsi_Host *shost,
6300 struct scsi_cmnd *scsi_cmd)
1da177e4
LT
6301{
6302 struct ipr_ioa_cfg *ioa_cfg;
6303 struct ipr_resource_entry *res;
6304 struct ipr_ioarcb *ioarcb;
6305 struct ipr_cmnd *ipr_cmd;
56d6aa33 6306 unsigned long hrrq_flags, lock_flags;
d12f1576 6307 int rc;
05a6538a 6308 struct ipr_hrr_queue *hrrq;
6309 int hrrq_id;
1da177e4 6310
00bfef2c
BK
6311 ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
6312
1da177e4 6313 scsi_cmd->result = (DID_OK << 16);
00bfef2c 6314 res = scsi_cmd->device->hostdata;
56d6aa33 6315
6316 if (ipr_is_gata(res) && res->sata_port) {
6317 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6318 rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
6319 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6320 return rc;
6321 }
6322
05a6538a 6323 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6324 hrrq = &ioa_cfg->hrrq[hrrq_id];
1da177e4 6325
56d6aa33 6326 spin_lock_irqsave(hrrq->lock, hrrq_flags);
1da177e4
LT
6327 /*
6328 * We are currently blocking all devices due to a host reset
6329 * We have told the host to stop giving us new requests, but
6330 * ERP ops don't count. FIXME
6331 */
bfae7820 6332 if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
56d6aa33 6333 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
1da177e4 6334 return SCSI_MLQUEUE_HOST_BUSY;
00bfef2c 6335 }
1da177e4
LT
6336
6337 /*
6338 * FIXME - Create scsi_set_host_offline interface
6339 * and the ioa_is_dead check can be removed
6340 */
bfae7820 6341 if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
56d6aa33 6342 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
00bfef2c 6343 goto err_nodev;
1da177e4
LT
6344 }
6345
05a6538a 6346 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6347 if (ipr_cmd == NULL) {
56d6aa33 6348 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
05a6538a 6349 return SCSI_MLQUEUE_HOST_BUSY;
6350 }
56d6aa33 6351 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
35a39691 6352
172cd6e1 6353 ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
1da177e4 6354 ioarcb = &ipr_cmd->ioarcb;
1da177e4
LT
6355
6356 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
6357 ipr_cmd->scsi_cmd = scsi_cmd;
172cd6e1 6358 ipr_cmd->done = ipr_scsi_eh_done;
1da177e4
LT
6359
6360 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
6361 if (scsi_cmd->underflow == 0)
6362 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6363
1da177e4 6364 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
0b1f8d44
WX
6365 if (ipr_is_gscsi(res) && res->reset_occurred) {
6366 res->reset_occurred = 0;
ab6c10b1 6367 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
0b1f8d44 6368 }
1da177e4 6369 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
50668633
CH
6370 if (scsi_cmd->flags & SCMD_TAGGED)
6371 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_SIMPLE_TASK;
6372 else
6373 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_UNTAGGED_TASK;
1da177e4
LT
6374 }
6375
6376 if (scsi_cmd->cmnd[0] >= 0xC0 &&
05a6538a 6377 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
1da177e4 6378 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
05a6538a 6379 }
f8ee25d7
WX
6380 if (res->raw_mode && ipr_is_af_dasd_device(res))
6381 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_PIPE;
1da177e4 6382
d12f1576
DC
6383 if (ioa_cfg->sis64)
6384 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
6385 else
6386 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
1da177e4 6387
56d6aa33 6388 spin_lock_irqsave(hrrq->lock, hrrq_flags);
6389 if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
05a6538a 6390 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
56d6aa33 6391 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
00bfef2c
BK
6392 if (!rc)
6393 scsi_dma_unmap(scsi_cmd);
a5fb407e 6394 return SCSI_MLQUEUE_HOST_BUSY;
1da177e4
LT
6395 }
6396
56d6aa33 6397 if (unlikely(hrrq->ioa_is_dead)) {
05a6538a 6398 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
56d6aa33 6399 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
00bfef2c
BK
6400 scsi_dma_unmap(scsi_cmd);
6401 goto err_nodev;
6402 }
6403
6404 ioarcb->res_handle = res->res_handle;
6405 if (res->needs_sync_complete) {
6406 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
6407 res->needs_sync_complete = 0;
6408 }
05a6538a 6409 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
00bfef2c 6410 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
a5fb407e 6411 ipr_send_command(ipr_cmd);
56d6aa33 6412 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
1da177e4 6413 return 0;
1da177e4 6414
00bfef2c 6415err_nodev:
56d6aa33 6416 spin_lock_irqsave(hrrq->lock, hrrq_flags);
00bfef2c
BK
6417 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
6418 scsi_cmd->result = (DID_NO_CONNECT << 16);
6419 scsi_cmd->scsi_done(scsi_cmd);
56d6aa33 6420 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
00bfef2c
BK
6421 return 0;
6422}
f281233d 6423
35a39691
BK
6424/**
6425 * ipr_ioctl - IOCTL handler
6426 * @sdev: scsi device struct
6427 * @cmd: IOCTL cmd
6428 * @arg: IOCTL arg
6429 *
6430 * Return value:
6431 * 0 on success / other on failure
6432 **/
bd705f2d 6433static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
35a39691
BK
6434{
6435 struct ipr_resource_entry *res;
6436
6437 res = (struct ipr_resource_entry *)sdev->hostdata;
0ce3a7e5
BK
6438 if (res && ipr_is_gata(res)) {
6439 if (cmd == HDIO_GET_IDENTITY)
6440 return -ENOTTY;
94be9a58 6441 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
0ce3a7e5 6442 }
35a39691
BK
6443
6444 return -EINVAL;
6445}
6446
1da177e4
LT
6447/**
6448 * ipr_info - Get information about the card/driver
6449 * @scsi_host: scsi host struct
6450 *
6451 * Return value:
6452 * pointer to buffer with description string
6453 **/
203fa3fe 6454static const char *ipr_ioa_info(struct Scsi_Host *host)
1da177e4
LT
6455{
6456 static char buffer[512];
6457 struct ipr_ioa_cfg *ioa_cfg;
6458 unsigned long lock_flags = 0;
6459
6460 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
6461
6462 spin_lock_irqsave(host->host_lock, lock_flags);
6463 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
6464 spin_unlock_irqrestore(host->host_lock, lock_flags);
6465
6466 return buffer;
6467}
6468
6469static struct scsi_host_template driver_template = {
6470 .module = THIS_MODULE,
6471 .name = "IPR",
6472 .info = ipr_ioa_info,
35a39691 6473 .ioctl = ipr_ioctl,
1da177e4
LT
6474 .queuecommand = ipr_queuecommand,
6475 .eh_abort_handler = ipr_eh_abort,
6476 .eh_device_reset_handler = ipr_eh_dev_reset,
6477 .eh_host_reset_handler = ipr_eh_host_reset,
6478 .slave_alloc = ipr_slave_alloc,
6479 .slave_configure = ipr_slave_configure,
6480 .slave_destroy = ipr_slave_destroy,
f688f96d 6481 .scan_finished = ipr_scan_finished,
35a39691
BK
6482 .target_alloc = ipr_target_alloc,
6483 .target_destroy = ipr_target_destroy,
1da177e4 6484 .change_queue_depth = ipr_change_queue_depth,
1da177e4
LT
6485 .bios_param = ipr_biosparam,
6486 .can_queue = IPR_MAX_COMMANDS,
6487 .this_id = -1,
6488 .sg_tablesize = IPR_MAX_SGLIST,
6489 .max_sectors = IPR_IOA_MAX_SECTORS,
6490 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
6491 .use_clustering = ENABLE_CLUSTERING,
6492 .shost_attrs = ipr_ioa_attrs,
6493 .sdev_attrs = ipr_dev_attrs,
54b2b50c 6494 .proc_name = IPR_NAME,
2ecb204d 6495 .use_blk_tags = 1,
1da177e4
LT
6496};
6497
35a39691
BK
6498/**
6499 * ipr_ata_phy_reset - libata phy_reset handler
6500 * @ap: ata port to reset
6501 *
6502 **/
6503static void ipr_ata_phy_reset(struct ata_port *ap)
6504{
6505 unsigned long flags;
6506 struct ipr_sata_port *sata_port = ap->private_data;
6507 struct ipr_resource_entry *res = sata_port->res;
6508 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6509 int rc;
6510
6511 ENTER;
6512 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
203fa3fe 6513 while (ioa_cfg->in_reset_reload) {
35a39691
BK
6514 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6515 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6516 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6517 }
6518
56d6aa33 6519 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
35a39691
BK
6520 goto out_unlock;
6521
6522 rc = ipr_device_reset(ioa_cfg, res);
6523
6524 if (rc) {
3e4ec344 6525 ap->link.device[0].class = ATA_DEV_NONE;
35a39691
BK
6526 goto out_unlock;
6527 }
6528
3e7ebdfa
WB
6529 ap->link.device[0].class = res->ata_class;
6530 if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
3e4ec344 6531 ap->link.device[0].class = ATA_DEV_NONE;
35a39691
BK
6532
6533out_unlock:
6534 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6535 LEAVE;
6536}
6537
6538/**
6539 * ipr_ata_post_internal - Cleanup after an internal command
6540 * @qc: ATA queued command
6541 *
6542 * Return value:
6543 * none
6544 **/
6545static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6546{
6547 struct ipr_sata_port *sata_port = qc->ap->private_data;
6548 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6549 struct ipr_cmnd *ipr_cmd;
05a6538a 6550 struct ipr_hrr_queue *hrrq;
35a39691
BK
6551 unsigned long flags;
6552
6553 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
203fa3fe 6554 while (ioa_cfg->in_reset_reload) {
73d98ff0
BK
6555 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6556 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6557 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6558 }
6559
05a6538a 6560 for_each_hrrq(hrrq, ioa_cfg) {
56d6aa33 6561 spin_lock(&hrrq->_lock);
05a6538a 6562 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
6563 if (ipr_cmd->qc == qc) {
6564 ipr_device_reset(ioa_cfg, sata_port->res);
6565 break;
6566 }
35a39691 6567 }
56d6aa33 6568 spin_unlock(&hrrq->_lock);
35a39691
BK
6569 }
6570 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6571}
6572
35a39691
BK
6573/**
6574 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6575 * @regs: destination
6576 * @tf: source ATA taskfile
6577 *
6578 * Return value:
6579 * none
6580 **/
6581static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
6582 struct ata_taskfile *tf)
6583{
6584 regs->feature = tf->feature;
6585 regs->nsect = tf->nsect;
6586 regs->lbal = tf->lbal;
6587 regs->lbam = tf->lbam;
6588 regs->lbah = tf->lbah;
6589 regs->device = tf->device;
6590 regs->command = tf->command;
6591 regs->hob_feature = tf->hob_feature;
6592 regs->hob_nsect = tf->hob_nsect;
6593 regs->hob_lbal = tf->hob_lbal;
6594 regs->hob_lbam = tf->hob_lbam;
6595 regs->hob_lbah = tf->hob_lbah;
6596 regs->ctl = tf->ctl;
6597}
6598
6599/**
6600 * ipr_sata_done - done function for SATA commands
6601 * @ipr_cmd: ipr command struct
6602 *
6603 * This function is invoked by the interrupt handler for
6604 * ops generated by the SCSI mid-layer to SATA devices
6605 *
6606 * Return value:
6607 * none
6608 **/
6609static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6610{
6611 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6612 struct ata_queued_cmd *qc = ipr_cmd->qc;
6613 struct ipr_sata_port *sata_port = qc->ap->private_data;
6614 struct ipr_resource_entry *res = sata_port->res;
96d21f00 6615 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
35a39691 6616
56d6aa33 6617 spin_lock(&ipr_cmd->hrrq->_lock);
96d21f00
WB
6618 if (ipr_cmd->ioa_cfg->sis64)
6619 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6620 sizeof(struct ipr_ioasa_gata));
6621 else
6622 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
6623 sizeof(struct ipr_ioasa_gata));
35a39691
BK
6624 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6625
96d21f00 6626 if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
3e7ebdfa 6627 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
35a39691
BK
6628
6629 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
96d21f00 6630 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
35a39691 6631 else
96d21f00 6632 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
05a6538a 6633 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
56d6aa33 6634 spin_unlock(&ipr_cmd->hrrq->_lock);
35a39691
BK
6635 ata_qc_complete(qc);
6636}
6637
a32c055f
WB
6638/**
6639 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6640 * @ipr_cmd: ipr command struct
6641 * @qc: ATA queued command
6642 *
6643 **/
6644static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6645 struct ata_queued_cmd *qc)
6646{
6647 u32 ioadl_flags = 0;
6648 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
1ac7c26d 6649 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64;
a32c055f
WB
6650 struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6651 int len = qc->nbytes;
6652 struct scatterlist *sg;
6653 unsigned int si;
6654 dma_addr_t dma_addr = ipr_cmd->dma_addr;
6655
6656 if (len == 0)
6657 return;
6658
6659 if (qc->dma_dir == DMA_TO_DEVICE) {
6660 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6661 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6662 } else if (qc->dma_dir == DMA_FROM_DEVICE)
6663 ioadl_flags = IPR_IOADL_FLAGS_READ;
6664
6665 ioarcb->data_transfer_length = cpu_to_be32(len);
6666 ioarcb->ioadl_len =
6667 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6668 ioarcb->u.sis64_addr_data.data_ioadl_addr =
1ac7c26d 6669 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64));
a32c055f
WB
6670
6671 for_each_sg(qc->sg, sg, qc->n_elem, si) {
6672 ioadl64->flags = cpu_to_be32(ioadl_flags);
6673 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
6674 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
6675
6676 last_ioadl64 = ioadl64;
6677 ioadl64++;
6678 }
6679
6680 if (likely(last_ioadl64))
6681 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6682}
6683
35a39691
BK
6684/**
6685 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6686 * @ipr_cmd: ipr command struct
6687 * @qc: ATA queued command
6688 *
6689 **/
6690static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6691 struct ata_queued_cmd *qc)
6692{
6693 u32 ioadl_flags = 0;
6694 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
a32c055f 6695 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3be6cbd7 6696 struct ipr_ioadl_desc *last_ioadl = NULL;
dde20207 6697 int len = qc->nbytes;
35a39691 6698 struct scatterlist *sg;
ff2aeb1e 6699 unsigned int si;
35a39691
BK
6700
6701 if (len == 0)
6702 return;
6703
6704 if (qc->dma_dir == DMA_TO_DEVICE) {
6705 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6706 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
a32c055f
WB
6707 ioarcb->data_transfer_length = cpu_to_be32(len);
6708 ioarcb->ioadl_len =
35a39691
BK
6709 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6710 } else if (qc->dma_dir == DMA_FROM_DEVICE) {
6711 ioadl_flags = IPR_IOADL_FLAGS_READ;
6712 ioarcb->read_data_transfer_length = cpu_to_be32(len);
6713 ioarcb->read_ioadl_len =
6714 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6715 }
6716
ff2aeb1e 6717 for_each_sg(qc->sg, sg, qc->n_elem, si) {
35a39691
BK
6718 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6719 ioadl->address = cpu_to_be32(sg_dma_address(sg));
3be6cbd7
JG
6720
6721 last_ioadl = ioadl;
6722 ioadl++;
35a39691 6723 }
3be6cbd7
JG
6724
6725 if (likely(last_ioadl))
6726 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
35a39691
BK
6727}
6728
56d6aa33 6729/**
6730 * ipr_qc_defer - Get a free ipr_cmd
6731 * @qc: queued command
6732 *
6733 * Return value:
6734 * 0 if success
6735 **/
6736static int ipr_qc_defer(struct ata_queued_cmd *qc)
6737{
6738 struct ata_port *ap = qc->ap;
6739 struct ipr_sata_port *sata_port = ap->private_data;
6740 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6741 struct ipr_cmnd *ipr_cmd;
6742 struct ipr_hrr_queue *hrrq;
6743 int hrrq_id;
6744
6745 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6746 hrrq = &ioa_cfg->hrrq[hrrq_id];
6747
6748 qc->lldd_task = NULL;
6749 spin_lock(&hrrq->_lock);
6750 if (unlikely(hrrq->ioa_is_dead)) {
6751 spin_unlock(&hrrq->_lock);
6752 return 0;
6753 }
6754
6755 if (unlikely(!hrrq->allow_cmds)) {
6756 spin_unlock(&hrrq->_lock);
6757 return ATA_DEFER_LINK;
6758 }
6759
6760 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6761 if (ipr_cmd == NULL) {
6762 spin_unlock(&hrrq->_lock);
6763 return ATA_DEFER_LINK;
6764 }
6765
6766 qc->lldd_task = ipr_cmd;
6767 spin_unlock(&hrrq->_lock);
6768 return 0;
6769}
6770
35a39691
BK
6771/**
6772 * ipr_qc_issue - Issue a SATA qc to a device
6773 * @qc: queued command
6774 *
6775 * Return value:
6776 * 0 if success
6777 **/
6778static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
6779{
6780 struct ata_port *ap = qc->ap;
6781 struct ipr_sata_port *sata_port = ap->private_data;
6782 struct ipr_resource_entry *res = sata_port->res;
6783 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6784 struct ipr_cmnd *ipr_cmd;
6785 struct ipr_ioarcb *ioarcb;
6786 struct ipr_ioarcb_ata_regs *regs;
6787
56d6aa33 6788 if (qc->lldd_task == NULL)
6789 ipr_qc_defer(qc);
6790
6791 ipr_cmd = qc->lldd_task;
6792 if (ipr_cmd == NULL)
0feeed82 6793 return AC_ERR_SYSTEM;
35a39691 6794
56d6aa33 6795 qc->lldd_task = NULL;
6796 spin_lock(&ipr_cmd->hrrq->_lock);
6797 if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
6798 ipr_cmd->hrrq->ioa_is_dead)) {
6799 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6800 spin_unlock(&ipr_cmd->hrrq->_lock);
6801 return AC_ERR_SYSTEM;
6802 }
6803
05a6538a 6804 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
35a39691 6805 ioarcb = &ipr_cmd->ioarcb;
35a39691 6806
a32c055f
WB
6807 if (ioa_cfg->sis64) {
6808 regs = &ipr_cmd->i.ata_ioadl.regs;
6809 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
6810 } else
6811 regs = &ioarcb->u.add_data.u.regs;
6812
6813 memset(regs, 0, sizeof(*regs));
6814 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
35a39691 6815
56d6aa33 6816 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
35a39691
BK
6817 ipr_cmd->qc = qc;
6818 ipr_cmd->done = ipr_sata_done;
3e7ebdfa 6819 ipr_cmd->ioarcb.res_handle = res->res_handle;
35a39691
BK
6820 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
6821 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6822 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
dde20207 6823 ipr_cmd->dma_use_sg = qc->n_elem;
35a39691 6824
a32c055f
WB
6825 if (ioa_cfg->sis64)
6826 ipr_build_ata_ioadl64(ipr_cmd, qc);
6827 else
6828 ipr_build_ata_ioadl(ipr_cmd, qc);
6829
35a39691
BK
6830 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
6831 ipr_copy_sata_tf(regs, &qc->tf);
6832 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
3e7ebdfa 6833 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
35a39691
BK
6834
6835 switch (qc->tf.protocol) {
6836 case ATA_PROT_NODATA:
6837 case ATA_PROT_PIO:
6838 break;
6839
6840 case ATA_PROT_DMA:
6841 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6842 break;
6843
0dc36888
TH
6844 case ATAPI_PROT_PIO:
6845 case ATAPI_PROT_NODATA:
35a39691
BK
6846 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6847 break;
6848
0dc36888 6849 case ATAPI_PROT_DMA:
35a39691
BK
6850 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6851 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6852 break;
6853
6854 default:
6855 WARN_ON(1);
56d6aa33 6856 spin_unlock(&ipr_cmd->hrrq->_lock);
0feeed82 6857 return AC_ERR_INVALID;
35a39691
BK
6858 }
6859
a32c055f 6860 ipr_send_command(ipr_cmd);
56d6aa33 6861 spin_unlock(&ipr_cmd->hrrq->_lock);
a32c055f 6862
35a39691
BK
6863 return 0;
6864}
6865
4c9bf4e7
TH
6866/**
6867 * ipr_qc_fill_rtf - Read result TF
6868 * @qc: ATA queued command
6869 *
6870 * Return value:
6871 * true
6872 **/
6873static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
6874{
6875 struct ipr_sata_port *sata_port = qc->ap->private_data;
6876 struct ipr_ioasa_gata *g = &sata_port->ioasa;
6877 struct ata_taskfile *tf = &qc->result_tf;
6878
6879 tf->feature = g->error;
6880 tf->nsect = g->nsect;
6881 tf->lbal = g->lbal;
6882 tf->lbam = g->lbam;
6883 tf->lbah = g->lbah;
6884 tf->device = g->device;
6885 tf->command = g->status;
6886 tf->hob_nsect = g->hob_nsect;
6887 tf->hob_lbal = g->hob_lbal;
6888 tf->hob_lbam = g->hob_lbam;
6889 tf->hob_lbah = g->hob_lbah;
4c9bf4e7
TH
6890
6891 return true;
6892}
6893
35a39691 6894static struct ata_port_operations ipr_sata_ops = {
35a39691 6895 .phy_reset = ipr_ata_phy_reset,
a1efdaba 6896 .hardreset = ipr_sata_reset,
35a39691 6897 .post_internal_cmd = ipr_ata_post_internal,
35a39691 6898 .qc_prep = ata_noop_qc_prep,
56d6aa33 6899 .qc_defer = ipr_qc_defer,
35a39691 6900 .qc_issue = ipr_qc_issue,
4c9bf4e7 6901 .qc_fill_rtf = ipr_qc_fill_rtf,
35a39691
BK
6902 .port_start = ata_sas_port_start,
6903 .port_stop = ata_sas_port_stop
6904};
6905
6906static struct ata_port_info sata_port_info = {
5067c046
SL
6907 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
6908 ATA_FLAG_SAS_HOST,
0f2e0330
SS
6909 .pio_mask = ATA_PIO4_ONLY,
6910 .mwdma_mask = ATA_MWDMA2,
6911 .udma_mask = ATA_UDMA6,
35a39691
BK
6912 .port_ops = &ipr_sata_ops
6913};
6914
1da177e4
LT
6915#ifdef CONFIG_PPC_PSERIES
6916static const u16 ipr_blocked_processors[] = {
d3dbeef6
ME
6917 PVR_NORTHSTAR,
6918 PVR_PULSAR,
6919 PVR_POWER4,
6920 PVR_ICESTAR,
6921 PVR_SSTAR,
6922 PVR_POWER4p,
6923 PVR_630,
6924 PVR_630p
1da177e4
LT
6925};
6926
6927/**
6928 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
6929 * @ioa_cfg: ioa cfg struct
6930 *
6931 * Adapters that use Gemstone revision < 3.1 do not work reliably on
6932 * certain pSeries hardware. This function determines if the given
6933 * adapter is in one of these confgurations or not.
6934 *
6935 * Return value:
6936 * 1 if adapter is not supported / 0 if adapter is supported
6937 **/
6938static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
6939{
1da177e4
LT
6940 int i;
6941
44c10138 6942 if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
203fa3fe 6943 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
d3dbeef6 6944 if (pvr_version_is(ipr_blocked_processors[i]))
44c10138 6945 return 1;
1da177e4
LT
6946 }
6947 }
6948 return 0;
6949}
6950#else
6951#define ipr_invalid_adapter(ioa_cfg) 0
6952#endif
6953
6954/**
6955 * ipr_ioa_bringdown_done - IOA bring down completion.
6956 * @ipr_cmd: ipr command struct
6957 *
6958 * This function processes the completion of an adapter bring down.
6959 * It wakes any reset sleepers.
6960 *
6961 * Return value:
6962 * IPR_RC_JOB_RETURN
6963 **/
6964static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
6965{
6966 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
96b04db9 6967 int i;
1da177e4
LT
6968
6969 ENTER;
bfae7820
BK
6970 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
6971 ipr_trace;
6972 spin_unlock_irq(ioa_cfg->host->host_lock);
6973 scsi_unblock_requests(ioa_cfg->host);
6974 spin_lock_irq(ioa_cfg->host->host_lock);
6975 }
6976
1da177e4
LT
6977 ioa_cfg->in_reset_reload = 0;
6978 ioa_cfg->reset_retries = 0;
96b04db9 6979 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
6980 spin_lock(&ioa_cfg->hrrq[i]._lock);
6981 ioa_cfg->hrrq[i].ioa_is_dead = 1;
6982 spin_unlock(&ioa_cfg->hrrq[i]._lock);
6983 }
6984 wmb();
6985
05a6538a 6986 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4 6987 wake_up_all(&ioa_cfg->reset_wait_q);
1da177e4
LT
6988 LEAVE;
6989
6990 return IPR_RC_JOB_RETURN;
6991}
6992
6993/**
6994 * ipr_ioa_reset_done - IOA reset completion.
6995 * @ipr_cmd: ipr command struct
6996 *
6997 * This function processes the completion of an adapter reset.
6998 * It schedules any necessary mid-layer add/removes and
6999 * wakes any reset sleepers.
7000 *
7001 * Return value:
7002 * IPR_RC_JOB_RETURN
7003 **/
7004static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
7005{
7006 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7007 struct ipr_resource_entry *res;
7008 struct ipr_hostrcb *hostrcb, *temp;
56d6aa33 7009 int i = 0, j;
1da177e4
LT
7010
7011 ENTER;
7012 ioa_cfg->in_reset_reload = 0;
56d6aa33 7013 for (j = 0; j < ioa_cfg->hrrq_num; j++) {
7014 spin_lock(&ioa_cfg->hrrq[j]._lock);
7015 ioa_cfg->hrrq[j].allow_cmds = 1;
7016 spin_unlock(&ioa_cfg->hrrq[j]._lock);
7017 }
7018 wmb();
1da177e4 7019 ioa_cfg->reset_cmd = NULL;
3d1d0da6 7020 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
1da177e4
LT
7021
7022 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
f688f96d 7023 if (res->add_to_ml || res->del_from_ml) {
1da177e4
LT
7024 ipr_trace;
7025 break;
7026 }
7027 }
7028 schedule_work(&ioa_cfg->work_q);
7029
7030 list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
7031 list_del(&hostrcb->queue);
7032 if (i++ < IPR_NUM_LOG_HCAMS)
7033 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
7034 else
7035 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
7036 }
7037
6bb04170 7038 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
1da177e4
LT
7039 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
7040
7041 ioa_cfg->reset_retries = 0;
05a6538a 7042 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
7043 wake_up_all(&ioa_cfg->reset_wait_q);
7044
30237853 7045 spin_unlock(ioa_cfg->host->host_lock);
1da177e4 7046 scsi_unblock_requests(ioa_cfg->host);
30237853 7047 spin_lock(ioa_cfg->host->host_lock);
1da177e4 7048
56d6aa33 7049 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
1da177e4
LT
7050 scsi_block_requests(ioa_cfg->host);
7051
f688f96d 7052 schedule_work(&ioa_cfg->work_q);
1da177e4
LT
7053 LEAVE;
7054 return IPR_RC_JOB_RETURN;
7055}
7056
7057/**
7058 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
7059 * @supported_dev: supported device struct
7060 * @vpids: vendor product id struct
7061 *
7062 * Return value:
7063 * none
7064 **/
7065static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
7066 struct ipr_std_inq_vpids *vpids)
7067{
7068 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
7069 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
7070 supported_dev->num_records = 1;
7071 supported_dev->data_length =
7072 cpu_to_be16(sizeof(struct ipr_supported_device));
7073 supported_dev->reserved = 0;
7074}
7075
7076/**
7077 * ipr_set_supported_devs - Send Set Supported Devices for a device
7078 * @ipr_cmd: ipr command struct
7079 *
a32c055f 7080 * This function sends a Set Supported Devices to the adapter
1da177e4
LT
7081 *
7082 * Return value:
7083 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7084 **/
7085static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
7086{
7087 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7088 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
1da177e4
LT
7089 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7090 struct ipr_resource_entry *res = ipr_cmd->u.res;
7091
7092 ipr_cmd->job_step = ipr_ioa_reset_done;
7093
7094 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
e4fbf44e 7095 if (!ipr_is_scsi_disk(res))
1da177e4
LT
7096 continue;
7097
7098 ipr_cmd->u.res = res;
3e7ebdfa 7099 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
1da177e4
LT
7100
7101 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7102 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7103 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7104
7105 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
3e7ebdfa 7106 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
1da177e4
LT
7107 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
7108 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
7109
a32c055f
WB
7110 ipr_init_ioadl(ipr_cmd,
7111 ioa_cfg->vpd_cbs_dma +
7112 offsetof(struct ipr_misc_cbs, supp_dev),
7113 sizeof(struct ipr_supported_device),
7114 IPR_IOADL_FLAGS_WRITE_LAST);
1da177e4
LT
7115
7116 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7117 IPR_SET_SUP_DEVICE_TIMEOUT);
7118
3e7ebdfa
WB
7119 if (!ioa_cfg->sis64)
7120 ipr_cmd->job_step = ipr_set_supported_devs;
05a6538a 7121 LEAVE;
1da177e4
LT
7122 return IPR_RC_JOB_RETURN;
7123 }
7124
05a6538a 7125 LEAVE;
1da177e4
LT
7126 return IPR_RC_JOB_CONTINUE;
7127}
7128
7129/**
7130 * ipr_get_mode_page - Locate specified mode page
7131 * @mode_pages: mode page buffer
7132 * @page_code: page code to find
7133 * @len: minimum required length for mode page
7134 *
7135 * Return value:
7136 * pointer to mode page / NULL on failure
7137 **/
7138static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
7139 u32 page_code, u32 len)
7140{
7141 struct ipr_mode_page_hdr *mode_hdr;
7142 u32 page_length;
7143 u32 length;
7144
7145 if (!mode_pages || (mode_pages->hdr.length == 0))
7146 return NULL;
7147
7148 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
7149 mode_hdr = (struct ipr_mode_page_hdr *)
7150 (mode_pages->data + mode_pages->hdr.block_desc_len);
7151
7152 while (length) {
7153 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
7154 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
7155 return mode_hdr;
7156 break;
7157 } else {
7158 page_length = (sizeof(struct ipr_mode_page_hdr) +
7159 mode_hdr->page_length);
7160 length -= page_length;
7161 mode_hdr = (struct ipr_mode_page_hdr *)
7162 ((unsigned long)mode_hdr + page_length);
7163 }
7164 }
7165 return NULL;
7166}
7167
7168/**
7169 * ipr_check_term_power - Check for term power errors
7170 * @ioa_cfg: ioa config struct
7171 * @mode_pages: IOAFP mode pages buffer
7172 *
7173 * Check the IOAFP's mode page 28 for term power errors
7174 *
7175 * Return value:
7176 * nothing
7177 **/
7178static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
7179 struct ipr_mode_pages *mode_pages)
7180{
7181 int i;
7182 int entry_length;
7183 struct ipr_dev_bus_entry *bus;
7184 struct ipr_mode_page28 *mode_page;
7185
7186 mode_page = ipr_get_mode_page(mode_pages, 0x28,
7187 sizeof(struct ipr_mode_page28));
7188
7189 entry_length = mode_page->entry_length;
7190
7191 bus = mode_page->bus;
7192
7193 for (i = 0; i < mode_page->num_entries; i++) {
7194 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
7195 dev_err(&ioa_cfg->pdev->dev,
7196 "Term power is absent on scsi bus %d\n",
7197 bus->res_addr.bus);
7198 }
7199
7200 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
7201 }
7202}
7203
7204/**
7205 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
7206 * @ioa_cfg: ioa config struct
7207 *
7208 * Looks through the config table checking for SES devices. If
7209 * the SES device is in the SES table indicating a maximum SCSI
7210 * bus speed, the speed is limited for the bus.
7211 *
7212 * Return value:
7213 * none
7214 **/
7215static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
7216{
7217 u32 max_xfer_rate;
7218 int i;
7219
7220 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7221 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
7222 ioa_cfg->bus_attr[i].bus_width);
7223
7224 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
7225 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
7226 }
7227}
7228
7229/**
7230 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
7231 * @ioa_cfg: ioa config struct
7232 * @mode_pages: mode page 28 buffer
7233 *
7234 * Updates mode page 28 based on driver configuration
7235 *
7236 * Return value:
7237 * none
7238 **/
7239static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
203fa3fe 7240 struct ipr_mode_pages *mode_pages)
1da177e4
LT
7241{
7242 int i, entry_length;
7243 struct ipr_dev_bus_entry *bus;
7244 struct ipr_bus_attributes *bus_attr;
7245 struct ipr_mode_page28 *mode_page;
7246
7247 mode_page = ipr_get_mode_page(mode_pages, 0x28,
7248 sizeof(struct ipr_mode_page28));
7249
7250 entry_length = mode_page->entry_length;
7251
7252 /* Loop for each device bus entry */
7253 for (i = 0, bus = mode_page->bus;
7254 i < mode_page->num_entries;
7255 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
7256 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
7257 dev_err(&ioa_cfg->pdev->dev,
7258 "Invalid resource address reported: 0x%08X\n",
7259 IPR_GET_PHYS_LOC(bus->res_addr));
7260 continue;
7261 }
7262
7263 bus_attr = &ioa_cfg->bus_attr[i];
7264 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
7265 bus->bus_width = bus_attr->bus_width;
7266 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
7267 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
7268 if (bus_attr->qas_enabled)
7269 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
7270 else
7271 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
7272 }
7273}
7274
7275/**
7276 * ipr_build_mode_select - Build a mode select command
7277 * @ipr_cmd: ipr command struct
7278 * @res_handle: resource handle to send command to
7279 * @parm: Byte 2 of Mode Sense command
7280 * @dma_addr: DMA buffer address
7281 * @xfer_len: data transfer length
7282 *
7283 * Return value:
7284 * none
7285 **/
7286static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
a32c055f
WB
7287 __be32 res_handle, u8 parm,
7288 dma_addr_t dma_addr, u8 xfer_len)
1da177e4 7289{
1da177e4
LT
7290 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7291
7292 ioarcb->res_handle = res_handle;
7293 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7294 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7295 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
7296 ioarcb->cmd_pkt.cdb[1] = parm;
7297 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7298
a32c055f 7299 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
1da177e4
LT
7300}
7301
7302/**
7303 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
7304 * @ipr_cmd: ipr command struct
7305 *
7306 * This function sets up the SCSI bus attributes and sends
7307 * a Mode Select for Page 28 to activate them.
7308 *
7309 * Return value:
7310 * IPR_RC_JOB_RETURN
7311 **/
7312static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
7313{
7314 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7315 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7316 int length;
7317
7318 ENTER;
4733804c
BK
7319 ipr_scsi_bus_speed_limit(ioa_cfg);
7320 ipr_check_term_power(ioa_cfg, mode_pages);
7321 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
7322 length = mode_pages->hdr.length + 1;
7323 mode_pages->hdr.length = 0;
1da177e4
LT
7324
7325 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7326 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7327 length);
7328
f72919ec
WB
7329 ipr_cmd->job_step = ipr_set_supported_devs;
7330 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7331 struct ipr_resource_entry, queue);
1da177e4
LT
7332 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7333
7334 LEAVE;
7335 return IPR_RC_JOB_RETURN;
7336}
7337
7338/**
7339 * ipr_build_mode_sense - Builds a mode sense command
7340 * @ipr_cmd: ipr command struct
7341 * @res: resource entry struct
7342 * @parm: Byte 2 of mode sense command
7343 * @dma_addr: DMA address of mode sense buffer
7344 * @xfer_len: Size of DMA buffer
7345 *
7346 * Return value:
7347 * none
7348 **/
7349static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
7350 __be32 res_handle,
a32c055f 7351 u8 parm, dma_addr_t dma_addr, u8 xfer_len)
1da177e4 7352{
1da177e4
LT
7353 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7354
7355 ioarcb->res_handle = res_handle;
7356 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
7357 ioarcb->cmd_pkt.cdb[2] = parm;
7358 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7359 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7360
a32c055f 7361 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
7362}
7363
dfed823e 7364/**
7365 * ipr_reset_cmd_failed - Handle failure of IOA reset command
7366 * @ipr_cmd: ipr command struct
7367 *
7368 * This function handles the failure of an IOA bringup command.
7369 *
7370 * Return value:
7371 * IPR_RC_JOB_RETURN
7372 **/
7373static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
7374{
7375 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
96d21f00 7376 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
dfed823e 7377
7378 dev_err(&ioa_cfg->pdev->dev,
7379 "0x%02X failed with IOASC: 0x%08X\n",
7380 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
7381
7382 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
05a6538a 7383 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
dfed823e 7384 return IPR_RC_JOB_RETURN;
7385}
7386
7387/**
7388 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7389 * @ipr_cmd: ipr command struct
7390 *
7391 * This function handles the failure of a Mode Sense to the IOAFP.
7392 * Some adapters do not handle all mode pages.
7393 *
7394 * Return value:
7395 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7396 **/
7397static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
7398{
f72919ec 7399 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
96d21f00 7400 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
dfed823e 7401
7402 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
f72919ec
WB
7403 ipr_cmd->job_step = ipr_set_supported_devs;
7404 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7405 struct ipr_resource_entry, queue);
dfed823e 7406 return IPR_RC_JOB_CONTINUE;
7407 }
7408
7409 return ipr_reset_cmd_failed(ipr_cmd);
7410}
7411
1da177e4
LT
7412/**
7413 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7414 * @ipr_cmd: ipr command struct
7415 *
7416 * This function send a Page 28 mode sense to the IOA to
7417 * retrieve SCSI bus attributes.
7418 *
7419 * Return value:
7420 * IPR_RC_JOB_RETURN
7421 **/
7422static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
7423{
7424 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7425
7426 ENTER;
7427 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7428 0x28, ioa_cfg->vpd_cbs_dma +
7429 offsetof(struct ipr_misc_cbs, mode_pages),
7430 sizeof(struct ipr_mode_pages));
7431
7432 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
dfed823e 7433 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
1da177e4
LT
7434
7435 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7436
7437 LEAVE;
7438 return IPR_RC_JOB_RETURN;
7439}
7440
ac09c349
BK
7441/**
7442 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7443 * @ipr_cmd: ipr command struct
7444 *
7445 * This function enables dual IOA RAID support if possible.
7446 *
7447 * Return value:
7448 * IPR_RC_JOB_RETURN
7449 **/
7450static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
7451{
7452 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7453 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7454 struct ipr_mode_page24 *mode_page;
7455 int length;
7456
7457 ENTER;
7458 mode_page = ipr_get_mode_page(mode_pages, 0x24,
7459 sizeof(struct ipr_mode_page24));
7460
7461 if (mode_page)
7462 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
7463
7464 length = mode_pages->hdr.length + 1;
7465 mode_pages->hdr.length = 0;
7466
7467 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7468 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7469 length);
7470
7471 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7472 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7473
7474 LEAVE;
7475 return IPR_RC_JOB_RETURN;
7476}
7477
7478/**
7479 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7480 * @ipr_cmd: ipr command struct
7481 *
7482 * This function handles the failure of a Mode Sense to the IOAFP.
7483 * Some adapters do not handle all mode pages.
7484 *
7485 * Return value:
7486 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7487 **/
7488static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
7489{
96d21f00 7490 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
ac09c349
BK
7491
7492 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7493 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7494 return IPR_RC_JOB_CONTINUE;
7495 }
7496
7497 return ipr_reset_cmd_failed(ipr_cmd);
7498}
7499
7500/**
7501 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7502 * @ipr_cmd: ipr command struct
7503 *
7504 * This function send a mode sense to the IOA to retrieve
7505 * the IOA Advanced Function Control mode page.
7506 *
7507 * Return value:
7508 * IPR_RC_JOB_RETURN
7509 **/
7510static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
7511{
7512 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7513
7514 ENTER;
7515 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7516 0x24, ioa_cfg->vpd_cbs_dma +
7517 offsetof(struct ipr_misc_cbs, mode_pages),
7518 sizeof(struct ipr_mode_pages));
7519
7520 ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
7521 ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
7522
7523 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7524
7525 LEAVE;
7526 return IPR_RC_JOB_RETURN;
7527}
7528
1da177e4
LT
7529/**
7530 * ipr_init_res_table - Initialize the resource table
7531 * @ipr_cmd: ipr command struct
7532 *
7533 * This function looks through the existing resource table, comparing
7534 * it with the config table. This function will take care of old/new
7535 * devices and schedule adding/removing them from the mid-layer
7536 * as appropriate.
7537 *
7538 * Return value:
7539 * IPR_RC_JOB_CONTINUE
7540 **/
7541static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
7542{
7543 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7544 struct ipr_resource_entry *res, *temp;
3e7ebdfa
WB
7545 struct ipr_config_table_entry_wrapper cfgtew;
7546 int entries, found, flag, i;
1da177e4
LT
7547 LIST_HEAD(old_res);
7548
7549 ENTER;
3e7ebdfa
WB
7550 if (ioa_cfg->sis64)
7551 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
7552 else
7553 flag = ioa_cfg->u.cfg_table->hdr.flags;
7554
7555 if (flag & IPR_UCODE_DOWNLOAD_REQ)
1da177e4
LT
7556 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
7557
7558 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
7559 list_move_tail(&res->queue, &old_res);
7560
3e7ebdfa 7561 if (ioa_cfg->sis64)
438b0331 7562 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
3e7ebdfa
WB
7563 else
7564 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
7565
7566 for (i = 0; i < entries; i++) {
7567 if (ioa_cfg->sis64)
7568 cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
7569 else
7570 cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
1da177e4
LT
7571 found = 0;
7572
7573 list_for_each_entry_safe(res, temp, &old_res, queue) {
3e7ebdfa 7574 if (ipr_is_same_device(res, &cfgtew)) {
1da177e4
LT
7575 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7576 found = 1;
7577 break;
7578 }
7579 }
7580
7581 if (!found) {
7582 if (list_empty(&ioa_cfg->free_res_q)) {
7583 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
7584 break;
7585 }
7586
7587 found = 1;
7588 res = list_entry(ioa_cfg->free_res_q.next,
7589 struct ipr_resource_entry, queue);
7590 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
3e7ebdfa 7591 ipr_init_res_entry(res, &cfgtew);
1da177e4 7592 res->add_to_ml = 1;
56115598
WB
7593 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
7594 res->sdev->allow_restart = 1;
1da177e4
LT
7595
7596 if (found)
3e7ebdfa 7597 ipr_update_res_entry(res, &cfgtew);
1da177e4
LT
7598 }
7599
7600 list_for_each_entry_safe(res, temp, &old_res, queue) {
7601 if (res->sdev) {
7602 res->del_from_ml = 1;
3e7ebdfa 7603 res->res_handle = IPR_INVALID_RES_HANDLE;
1da177e4 7604 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
1da177e4
LT
7605 }
7606 }
7607
3e7ebdfa
WB
7608 list_for_each_entry_safe(res, temp, &old_res, queue) {
7609 ipr_clear_res_target(res);
7610 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
7611 }
7612
ac09c349
BK
7613 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7614 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
7615 else
7616 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
1da177e4
LT
7617
7618 LEAVE;
7619 return IPR_RC_JOB_CONTINUE;
7620}
7621
7622/**
7623 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7624 * @ipr_cmd: ipr command struct
7625 *
7626 * This function sends a Query IOA Configuration command
7627 * to the adapter to retrieve the IOA configuration table.
7628 *
7629 * Return value:
7630 * IPR_RC_JOB_RETURN
7631 **/
7632static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
7633{
7634 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7635 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
1da177e4 7636 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
ac09c349 7637 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
1da177e4
LT
7638
7639 ENTER;
ac09c349
BK
7640 if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7641 ioa_cfg->dual_raid = 1;
1da177e4
LT
7642 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7643 ucode_vpd->major_release, ucode_vpd->card_type,
7644 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7645 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7646 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7647
7648 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
438b0331 7649 ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
3e7ebdfa
WB
7650 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7651 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
1da177e4 7652
3e7ebdfa 7653 ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
a32c055f 7654 IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
7655
7656 ipr_cmd->job_step = ipr_init_res_table;
7657
7658 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7659
7660 LEAVE;
7661 return IPR_RC_JOB_RETURN;
7662}
7663
7664/**
7665 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7666 * @ipr_cmd: ipr command struct
7667 *
7668 * This utility function sends an inquiry to the adapter.
7669 *
7670 * Return value:
7671 * none
7672 **/
7673static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
a32c055f 7674 dma_addr_t dma_addr, u8 xfer_len)
1da177e4
LT
7675{
7676 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
1da177e4
LT
7677
7678 ENTER;
7679 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7680 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7681
7682 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
7683 ioarcb->cmd_pkt.cdb[1] = flags;
7684 ioarcb->cmd_pkt.cdb[2] = page;
7685 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7686
a32c055f 7687 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
7688
7689 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7690 LEAVE;
7691}
7692
62275040 7693/**
7694 * ipr_inquiry_page_supported - Is the given inquiry page supported
7695 * @page0: inquiry page 0 buffer
7696 * @page: page code.
7697 *
7698 * This function determines if the specified inquiry page is supported.
7699 *
7700 * Return value:
7701 * 1 if page is supported / 0 if not
7702 **/
7703static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
7704{
7705 int i;
7706
7707 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
7708 if (page0->page[i] == page)
7709 return 1;
7710
7711 return 0;
7712}
7713
ac09c349
BK
7714/**
7715 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
7716 * @ipr_cmd: ipr command struct
7717 *
7718 * This function sends a Page 0xD0 inquiry to the adapter
7719 * to retrieve adapter capabilities.
7720 *
7721 * Return value:
7722 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7723 **/
7724static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
7725{
7726 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7727 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
7728 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7729
7730 ENTER;
7731 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7732 memset(cap, 0, sizeof(*cap));
7733
7734 if (ipr_inquiry_page_supported(page0, 0xD0)) {
7735 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
7736 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
7737 sizeof(struct ipr_inquiry_cap));
7738 return IPR_RC_JOB_RETURN;
7739 }
7740
7741 LEAVE;
7742 return IPR_RC_JOB_CONTINUE;
7743}
7744
1da177e4
LT
7745/**
7746 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
7747 * @ipr_cmd: ipr command struct
7748 *
7749 * This function sends a Page 3 inquiry to the adapter
7750 * to retrieve software VPD information.
7751 *
7752 * Return value:
7753 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7754 **/
7755static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
62275040 7756{
7757 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
62275040 7758
7759 ENTER;
7760
ac09c349 7761 ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
62275040 7762
7763 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
7764 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
7765 sizeof(struct ipr_inquiry_page3));
7766
7767 LEAVE;
7768 return IPR_RC_JOB_RETURN;
7769}
7770
7771/**
7772 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
7773 * @ipr_cmd: ipr command struct
7774 *
7775 * This function sends a Page 0 inquiry to the adapter
7776 * to retrieve supported inquiry pages.
7777 *
7778 * Return value:
7779 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7780 **/
7781static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
1da177e4
LT
7782{
7783 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7784 char type[5];
7785
7786 ENTER;
7787
7788 /* Grab the type out of the VPD and store it away */
7789 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
7790 type[4] = '\0';
7791 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
7792
f688f96d
BK
7793 if (ipr_invalid_adapter(ioa_cfg)) {
7794 dev_err(&ioa_cfg->pdev->dev,
7795 "Adapter not supported in this hardware configuration.\n");
7796
7797 if (!ipr_testmode) {
7798 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
7799 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7800 list_add_tail(&ipr_cmd->queue,
7801 &ioa_cfg->hrrq->hrrq_free_q);
7802 return IPR_RC_JOB_RETURN;
7803 }
7804 }
7805
62275040 7806 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
1da177e4 7807
62275040 7808 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
7809 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
7810 sizeof(struct ipr_inquiry_page0));
1da177e4
LT
7811
7812 LEAVE;
7813 return IPR_RC_JOB_RETURN;
7814}
7815
7816/**
7817 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
7818 * @ipr_cmd: ipr command struct
7819 *
7820 * This function sends a standard inquiry to the adapter.
7821 *
7822 * Return value:
7823 * IPR_RC_JOB_RETURN
7824 **/
7825static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
7826{
7827 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7828
7829 ENTER;
62275040 7830 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
1da177e4
LT
7831
7832 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
7833 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
7834 sizeof(struct ipr_ioa_vpd));
7835
7836 LEAVE;
7837 return IPR_RC_JOB_RETURN;
7838}
7839
7840/**
214777ba 7841 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
1da177e4
LT
7842 * @ipr_cmd: ipr command struct
7843 *
7844 * This function send an Identify Host Request Response Queue
7845 * command to establish the HRRQ with the adapter.
7846 *
7847 * Return value:
7848 * IPR_RC_JOB_RETURN
7849 **/
214777ba 7850static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
1da177e4
LT
7851{
7852 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7853 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
05a6538a 7854 struct ipr_hrr_queue *hrrq;
1da177e4
LT
7855
7856 ENTER;
05a6538a 7857 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
1da177e4
LT
7858 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
7859
56d6aa33 7860 if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
7861 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
1da177e4 7862
05a6538a 7863 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
7864 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1da177e4 7865
05a6538a 7866 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7867 if (ioa_cfg->sis64)
7868 ioarcb->cmd_pkt.cdb[1] = 0x1;
214777ba 7869
05a6538a 7870 if (ioa_cfg->nvectors == 1)
7871 ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
7872 else
7873 ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
7874
7875 ioarcb->cmd_pkt.cdb[2] =
7876 ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
7877 ioarcb->cmd_pkt.cdb[3] =
7878 ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
7879 ioarcb->cmd_pkt.cdb[4] =
7880 ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
7881 ioarcb->cmd_pkt.cdb[5] =
7882 ((u64) hrrq->host_rrq_dma) & 0xff;
7883 ioarcb->cmd_pkt.cdb[7] =
7884 ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
7885 ioarcb->cmd_pkt.cdb[8] =
7886 (sizeof(u32) * hrrq->size) & 0xff;
7887
7888 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
56d6aa33 7889 ioarcb->cmd_pkt.cdb[9] =
7890 ioa_cfg->identify_hrrq_index;
1da177e4 7891
05a6538a 7892 if (ioa_cfg->sis64) {
7893 ioarcb->cmd_pkt.cdb[10] =
7894 ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
7895 ioarcb->cmd_pkt.cdb[11] =
7896 ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
7897 ioarcb->cmd_pkt.cdb[12] =
7898 ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
7899 ioarcb->cmd_pkt.cdb[13] =
7900 ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
7901 }
7902
7903 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
56d6aa33 7904 ioarcb->cmd_pkt.cdb[14] =
7905 ioa_cfg->identify_hrrq_index;
05a6538a 7906
7907 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7908 IPR_INTERNAL_TIMEOUT);
7909
56d6aa33 7910 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
7911 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
05a6538a 7912
7913 LEAVE;
7914 return IPR_RC_JOB_RETURN;
05a6538a 7915 }
7916
1da177e4 7917 LEAVE;
05a6538a 7918 return IPR_RC_JOB_CONTINUE;
1da177e4
LT
7919}
7920
7921/**
7922 * ipr_reset_timer_done - Adapter reset timer function
7923 * @ipr_cmd: ipr command struct
7924 *
7925 * Description: This function is used in adapter reset processing
7926 * for timing events. If the reset_cmd pointer in the IOA
7927 * config struct is not this adapter's we are doing nested
7928 * resets and fail_all_ops will take care of freeing the
7929 * command block.
7930 *
7931 * Return value:
7932 * none
7933 **/
7934static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
7935{
7936 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7937 unsigned long lock_flags = 0;
7938
7939 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7940
7941 if (ioa_cfg->reset_cmd == ipr_cmd) {
7942 list_del(&ipr_cmd->queue);
7943 ipr_cmd->done(ipr_cmd);
7944 }
7945
7946 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7947}
7948
7949/**
7950 * ipr_reset_start_timer - Start a timer for adapter reset job
7951 * @ipr_cmd: ipr command struct
7952 * @timeout: timeout value
7953 *
7954 * Description: This function is used in adapter reset processing
7955 * for timing events. If the reset_cmd pointer in the IOA
7956 * config struct is not this adapter's we are doing nested
7957 * resets and fail_all_ops will take care of freeing the
7958 * command block.
7959 *
7960 * Return value:
7961 * none
7962 **/
7963static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
7964 unsigned long timeout)
7965{
05a6538a 7966
7967 ENTER;
7968 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1da177e4
LT
7969 ipr_cmd->done = ipr_reset_ioa_job;
7970
7971 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7972 ipr_cmd->timer.expires = jiffies + timeout;
7973 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
7974 add_timer(&ipr_cmd->timer);
7975}
7976
7977/**
7978 * ipr_init_ioa_mem - Initialize ioa_cfg control block
7979 * @ioa_cfg: ioa cfg struct
7980 *
7981 * Return value:
7982 * nothing
7983 **/
7984static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
7985{
05a6538a 7986 struct ipr_hrr_queue *hrrq;
1da177e4 7987
05a6538a 7988 for_each_hrrq(hrrq, ioa_cfg) {
56d6aa33 7989 spin_lock(&hrrq->_lock);
05a6538a 7990 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
7991
7992 /* Initialize Host RRQ pointers */
7993 hrrq->hrrq_start = hrrq->host_rrq;
7994 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
7995 hrrq->hrrq_curr = hrrq->hrrq_start;
7996 hrrq->toggle_bit = 1;
56d6aa33 7997 spin_unlock(&hrrq->_lock);
05a6538a 7998 }
56d6aa33 7999 wmb();
05a6538a 8000
56d6aa33 8001 ioa_cfg->identify_hrrq_index = 0;
8002 if (ioa_cfg->hrrq_num == 1)
8003 atomic_set(&ioa_cfg->hrrq_index, 0);
8004 else
8005 atomic_set(&ioa_cfg->hrrq_index, 1);
1da177e4
LT
8006
8007 /* Zero out config table */
3e7ebdfa 8008 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
1da177e4
LT
8009}
8010
214777ba
WB
8011/**
8012 * ipr_reset_next_stage - Process IPL stage change based on feedback register.
8013 * @ipr_cmd: ipr command struct
8014 *
8015 * Return value:
8016 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8017 **/
8018static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
8019{
8020 unsigned long stage, stage_time;
8021 u32 feedback;
8022 volatile u32 int_reg;
8023 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8024 u64 maskval = 0;
8025
8026 feedback = readl(ioa_cfg->regs.init_feedback_reg);
8027 stage = feedback & IPR_IPL_INIT_STAGE_MASK;
8028 stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
8029
8030 ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
8031
8032 /* sanity check the stage_time value */
438b0331
WB
8033 if (stage_time == 0)
8034 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
8035 else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
214777ba
WB
8036 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
8037 else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
8038 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
8039
8040 if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
8041 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
8042 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8043 stage_time = ioa_cfg->transop_timeout;
8044 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8045 } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
1df79ca4
WB
8046 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
8047 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8048 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8049 maskval = IPR_PCII_IPL_STAGE_CHANGE;
8050 maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
8051 writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
8052 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8053 return IPR_RC_JOB_CONTINUE;
8054 }
214777ba
WB
8055 }
8056
8057 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
8058 ipr_cmd->timer.expires = jiffies + stage_time * HZ;
8059 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
8060 ipr_cmd->done = ipr_reset_ioa_job;
8061 add_timer(&ipr_cmd->timer);
05a6538a 8062
8063 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
214777ba
WB
8064
8065 return IPR_RC_JOB_RETURN;
8066}
8067
1da177e4
LT
8068/**
8069 * ipr_reset_enable_ioa - Enable the IOA following a reset.
8070 * @ipr_cmd: ipr command struct
8071 *
8072 * This function reinitializes some control blocks and
8073 * enables destructive diagnostics on the adapter.
8074 *
8075 * Return value:
8076 * IPR_RC_JOB_RETURN
8077 **/
8078static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
8079{
8080 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8081 volatile u32 int_reg;
7be96900 8082 volatile u64 maskval;
56d6aa33 8083 int i;
1da177e4
LT
8084
8085 ENTER;
214777ba 8086 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
1da177e4
LT
8087 ipr_init_ioa_mem(ioa_cfg);
8088
56d6aa33 8089 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8090 spin_lock(&ioa_cfg->hrrq[i]._lock);
8091 ioa_cfg->hrrq[i].allow_interrupts = 1;
8092 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8093 }
8094 wmb();
8701f185
WB
8095 if (ioa_cfg->sis64) {
8096 /* Set the adapter to the correct endian mode. */
8097 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8098 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8099 }
8100
7be96900 8101 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
1da177e4
LT
8102
8103 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8104 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
214777ba 8105 ioa_cfg->regs.clr_interrupt_mask_reg32);
1da177e4
LT
8106 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8107 return IPR_RC_JOB_CONTINUE;
8108 }
8109
8110 /* Enable destructive diagnostics on IOA */
214777ba
WB
8111 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
8112
7be96900
WB
8113 if (ioa_cfg->sis64) {
8114 maskval = IPR_PCII_IPL_STAGE_CHANGE;
8115 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
8116 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
8117 } else
8118 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
1da177e4 8119
1da177e4
LT
8120 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8121
8122 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
8123
214777ba
WB
8124 if (ioa_cfg->sis64) {
8125 ipr_cmd->job_step = ipr_reset_next_stage;
8126 return IPR_RC_JOB_CONTINUE;
8127 }
8128
1da177e4 8129 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
5469cb5b 8130 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
1da177e4
LT
8131 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
8132 ipr_cmd->done = ipr_reset_ioa_job;
8133 add_timer(&ipr_cmd->timer);
05a6538a 8134 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1da177e4
LT
8135
8136 LEAVE;
8137 return IPR_RC_JOB_RETURN;
8138}
8139
8140/**
8141 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
8142 * @ipr_cmd: ipr command struct
8143 *
8144 * This function is invoked when an adapter dump has run out
8145 * of processing time.
8146 *
8147 * Return value:
8148 * IPR_RC_JOB_CONTINUE
8149 **/
8150static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
8151{
8152 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8153
8154 if (ioa_cfg->sdt_state == GET_DUMP)
41e9a696
BK
8155 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8156 else if (ioa_cfg->sdt_state == READ_DUMP)
1da177e4
LT
8157 ioa_cfg->sdt_state = ABORT_DUMP;
8158
4c647e90 8159 ioa_cfg->dump_timeout = 1;
1da177e4
LT
8160 ipr_cmd->job_step = ipr_reset_alert;
8161
8162 return IPR_RC_JOB_CONTINUE;
8163}
8164
8165/**
8166 * ipr_unit_check_no_data - Log a unit check/no data error log
8167 * @ioa_cfg: ioa config struct
8168 *
8169 * Logs an error indicating the adapter unit checked, but for some
8170 * reason, we were unable to fetch the unit check buffer.
8171 *
8172 * Return value:
8173 * nothing
8174 **/
8175static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
8176{
8177 ioa_cfg->errors_logged++;
8178 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
8179}
8180
8181/**
8182 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
8183 * @ioa_cfg: ioa config struct
8184 *
8185 * Fetches the unit check buffer from the adapter by clocking the data
8186 * through the mailbox register.
8187 *
8188 * Return value:
8189 * nothing
8190 **/
8191static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
8192{
8193 unsigned long mailbox;
8194 struct ipr_hostrcb *hostrcb;
8195 struct ipr_uc_sdt sdt;
8196 int rc, length;
65f56475 8197 u32 ioasc;
1da177e4
LT
8198
8199 mailbox = readl(ioa_cfg->ioa_mailbox);
8200
dcbad00e 8201 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
1da177e4
LT
8202 ipr_unit_check_no_data(ioa_cfg);
8203 return;
8204 }
8205
8206 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
8207 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
8208 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
8209
dcbad00e
WB
8210 if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
8211 ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
8212 (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
1da177e4
LT
8213 ipr_unit_check_no_data(ioa_cfg);
8214 return;
8215 }
8216
8217 /* Find length of the first sdt entry (UC buffer) */
dcbad00e
WB
8218 if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
8219 length = be32_to_cpu(sdt.entry[0].end_token);
8220 else
8221 length = (be32_to_cpu(sdt.entry[0].end_token) -
8222 be32_to_cpu(sdt.entry[0].start_token)) &
8223 IPR_FMT2_MBX_ADDR_MASK;
1da177e4
LT
8224
8225 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
8226 struct ipr_hostrcb, queue);
8227 list_del(&hostrcb->queue);
8228 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
8229
8230 rc = ipr_get_ldump_data_section(ioa_cfg,
dcbad00e 8231 be32_to_cpu(sdt.entry[0].start_token),
1da177e4
LT
8232 (__be32 *)&hostrcb->hcam,
8233 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
8234
65f56475 8235 if (!rc) {
1da177e4 8236 ipr_handle_log_data(ioa_cfg, hostrcb);
4565e370 8237 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
65f56475
BK
8238 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
8239 ioa_cfg->sdt_state == GET_DUMP)
8240 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8241 } else
1da177e4
LT
8242 ipr_unit_check_no_data(ioa_cfg);
8243
8244 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
8245}
8246
110def85
WB
8247/**
8248 * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
8249 * @ipr_cmd: ipr command struct
8250 *
8251 * Description: This function will call to get the unit check buffer.
8252 *
8253 * Return value:
8254 * IPR_RC_JOB_RETURN
8255 **/
8256static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
8257{
8258 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8259
8260 ENTER;
8261 ioa_cfg->ioa_unit_checked = 0;
8262 ipr_get_unit_check_buffer(ioa_cfg);
8263 ipr_cmd->job_step = ipr_reset_alert;
8264 ipr_reset_start_timer(ipr_cmd, 0);
8265
8266 LEAVE;
8267 return IPR_RC_JOB_RETURN;
8268}
8269
1da177e4
LT
8270/**
8271 * ipr_reset_restore_cfg_space - Restore PCI config space.
8272 * @ipr_cmd: ipr command struct
8273 *
8274 * Description: This function restores the saved PCI config space of
8275 * the adapter, fails all outstanding ops back to the callers, and
8276 * fetches the dump/unit check if applicable to this reset.
8277 *
8278 * Return value:
8279 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8280 **/
8281static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
8282{
8283 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
630ad831 8284 u32 int_reg;
1da177e4
LT
8285
8286 ENTER;
99c965dd 8287 ioa_cfg->pdev->state_saved = true;
1d3c16a8 8288 pci_restore_state(ioa_cfg->pdev);
1da177e4
LT
8289
8290 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
96d21f00 8291 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
1da177e4
LT
8292 return IPR_RC_JOB_CONTINUE;
8293 }
8294
8295 ipr_fail_all_ops(ioa_cfg);
8296
8701f185
WB
8297 if (ioa_cfg->sis64) {
8298 /* Set the adapter to the correct endian mode. */
8299 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8300 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8301 }
8302
1da177e4 8303 if (ioa_cfg->ioa_unit_checked) {
110def85
WB
8304 if (ioa_cfg->sis64) {
8305 ipr_cmd->job_step = ipr_reset_get_unit_check_job;
8306 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
8307 return IPR_RC_JOB_RETURN;
8308 } else {
8309 ioa_cfg->ioa_unit_checked = 0;
8310 ipr_get_unit_check_buffer(ioa_cfg);
8311 ipr_cmd->job_step = ipr_reset_alert;
8312 ipr_reset_start_timer(ipr_cmd, 0);
8313 return IPR_RC_JOB_RETURN;
8314 }
1da177e4
LT
8315 }
8316
8317 if (ioa_cfg->in_ioa_bringdown) {
8318 ipr_cmd->job_step = ipr_ioa_bringdown_done;
8319 } else {
8320 ipr_cmd->job_step = ipr_reset_enable_ioa;
8321
8322 if (GET_DUMP == ioa_cfg->sdt_state) {
41e9a696 8323 ioa_cfg->sdt_state = READ_DUMP;
4c647e90 8324 ioa_cfg->dump_timeout = 0;
4d4dd706
KSS
8325 if (ioa_cfg->sis64)
8326 ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
8327 else
8328 ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
1da177e4
LT
8329 ipr_cmd->job_step = ipr_reset_wait_for_dump;
8330 schedule_work(&ioa_cfg->work_q);
8331 return IPR_RC_JOB_RETURN;
8332 }
8333 }
8334
438b0331 8335 LEAVE;
1da177e4
LT
8336 return IPR_RC_JOB_CONTINUE;
8337}
8338
e619e1a7
BK
8339/**
8340 * ipr_reset_bist_done - BIST has completed on the adapter.
8341 * @ipr_cmd: ipr command struct
8342 *
8343 * Description: Unblock config space and resume the reset process.
8344 *
8345 * Return value:
8346 * IPR_RC_JOB_CONTINUE
8347 **/
8348static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
8349{
fb51ccbf
JK
8350 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8351
e619e1a7 8352 ENTER;
fb51ccbf
JK
8353 if (ioa_cfg->cfg_locked)
8354 pci_cfg_access_unlock(ioa_cfg->pdev);
8355 ioa_cfg->cfg_locked = 0;
e619e1a7
BK
8356 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
8357 LEAVE;
8358 return IPR_RC_JOB_CONTINUE;
8359}
8360
1da177e4
LT
8361/**
8362 * ipr_reset_start_bist - Run BIST on the adapter.
8363 * @ipr_cmd: ipr command struct
8364 *
8365 * Description: This function runs BIST on the adapter, then delays 2 seconds.
8366 *
8367 * Return value:
8368 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8369 **/
8370static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
8371{
8372 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
cb237ef7 8373 int rc = PCIBIOS_SUCCESSFUL;
1da177e4
LT
8374
8375 ENTER;
cb237ef7
WB
8376 if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
8377 writel(IPR_UPROCI_SIS64_START_BIST,
8378 ioa_cfg->regs.set_uproc_interrupt_reg32);
8379 else
8380 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
8381
8382 if (rc == PCIBIOS_SUCCESSFUL) {
e619e1a7 8383 ipr_cmd->job_step = ipr_reset_bist_done;
1da177e4
LT
8384 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8385 rc = IPR_RC_JOB_RETURN;
cb237ef7 8386 } else {
fb51ccbf
JK
8387 if (ioa_cfg->cfg_locked)
8388 pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
8389 ioa_cfg->cfg_locked = 0;
cb237ef7
WB
8390 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8391 rc = IPR_RC_JOB_CONTINUE;
1da177e4
LT
8392 }
8393
8394 LEAVE;
8395 return rc;
8396}
8397
463fc696
BK
8398/**
8399 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8400 * @ipr_cmd: ipr command struct
8401 *
8402 * Description: This clears PCI reset to the adapter and delays two seconds.
8403 *
8404 * Return value:
8405 * IPR_RC_JOB_RETURN
8406 **/
8407static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
8408{
8409 ENTER;
463fc696
BK
8410 ipr_cmd->job_step = ipr_reset_bist_done;
8411 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8412 LEAVE;
8413 return IPR_RC_JOB_RETURN;
8414}
8415
2796ca5e
BK
8416/**
8417 * ipr_reset_reset_work - Pulse a PCIe fundamental reset
8418 * @work: work struct
8419 *
8420 * Description: This pulses warm reset to a slot.
8421 *
8422 **/
8423static void ipr_reset_reset_work(struct work_struct *work)
8424{
8425 struct ipr_cmnd *ipr_cmd = container_of(work, struct ipr_cmnd, work);
8426 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8427 struct pci_dev *pdev = ioa_cfg->pdev;
8428 unsigned long lock_flags = 0;
8429
8430 ENTER;
8431 pci_set_pcie_reset_state(pdev, pcie_warm_reset);
8432 msleep(jiffies_to_msecs(IPR_PCI_RESET_TIMEOUT));
8433 pci_set_pcie_reset_state(pdev, pcie_deassert_reset);
8434
8435 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8436 if (ioa_cfg->reset_cmd == ipr_cmd)
8437 ipr_reset_ioa_job(ipr_cmd);
8438 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8439 LEAVE;
8440}
8441
463fc696
BK
8442/**
8443 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8444 * @ipr_cmd: ipr command struct
8445 *
8446 * Description: This asserts PCI reset to the adapter.
8447 *
8448 * Return value:
8449 * IPR_RC_JOB_RETURN
8450 **/
8451static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
8452{
8453 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
463fc696
BK
8454
8455 ENTER;
2796ca5e
BK
8456 INIT_WORK(&ipr_cmd->work, ipr_reset_reset_work);
8457 queue_work(ioa_cfg->reset_work_q, &ipr_cmd->work);
463fc696 8458 ipr_cmd->job_step = ipr_reset_slot_reset_done;
463fc696
BK
8459 LEAVE;
8460 return IPR_RC_JOB_RETURN;
8461}
8462
fb51ccbf
JK
8463/**
8464 * ipr_reset_block_config_access_wait - Wait for permission to block config access
8465 * @ipr_cmd: ipr command struct
8466 *
8467 * Description: This attempts to block config access to the IOA.
8468 *
8469 * Return value:
8470 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8471 **/
8472static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
8473{
8474 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8475 int rc = IPR_RC_JOB_CONTINUE;
8476
8477 if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
8478 ioa_cfg->cfg_locked = 1;
8479 ipr_cmd->job_step = ioa_cfg->reset;
8480 } else {
8481 if (ipr_cmd->u.time_left) {
8482 rc = IPR_RC_JOB_RETURN;
8483 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8484 ipr_reset_start_timer(ipr_cmd,
8485 IPR_CHECK_FOR_RESET_TIMEOUT);
8486 } else {
8487 ipr_cmd->job_step = ioa_cfg->reset;
8488 dev_err(&ioa_cfg->pdev->dev,
8489 "Timed out waiting to lock config access. Resetting anyway.\n");
8490 }
8491 }
8492
8493 return rc;
8494}
8495
8496/**
8497 * ipr_reset_block_config_access - Block config access to the IOA
8498 * @ipr_cmd: ipr command struct
8499 *
8500 * Description: This attempts to block config access to the IOA
8501 *
8502 * Return value:
8503 * IPR_RC_JOB_CONTINUE
8504 **/
8505static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
8506{
8507 ipr_cmd->ioa_cfg->cfg_locked = 0;
8508 ipr_cmd->job_step = ipr_reset_block_config_access_wait;
8509 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8510 return IPR_RC_JOB_CONTINUE;
8511}
8512
1da177e4
LT
8513/**
8514 * ipr_reset_allowed - Query whether or not IOA can be reset
8515 * @ioa_cfg: ioa config struct
8516 *
8517 * Return value:
8518 * 0 if reset not allowed / non-zero if reset is allowed
8519 **/
8520static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
8521{
8522 volatile u32 temp_reg;
8523
8524 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8525 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
8526}
8527
8528/**
8529 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8530 * @ipr_cmd: ipr command struct
8531 *
8532 * Description: This function waits for adapter permission to run BIST,
8533 * then runs BIST. If the adapter does not give permission after a
8534 * reasonable time, we will reset the adapter anyway. The impact of
8535 * resetting the adapter without warning the adapter is the risk of
8536 * losing the persistent error log on the adapter. If the adapter is
8537 * reset while it is writing to the flash on the adapter, the flash
8538 * segment will have bad ECC and be zeroed.
8539 *
8540 * Return value:
8541 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8542 **/
8543static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
8544{
8545 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8546 int rc = IPR_RC_JOB_RETURN;
8547
8548 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
8549 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8550 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8551 } else {
fb51ccbf 8552 ipr_cmd->job_step = ipr_reset_block_config_access;
1da177e4
LT
8553 rc = IPR_RC_JOB_CONTINUE;
8554 }
8555
8556 return rc;
8557}
8558
8559/**
8701f185 8560 * ipr_reset_alert - Alert the adapter of a pending reset
1da177e4
LT
8561 * @ipr_cmd: ipr command struct
8562 *
8563 * Description: This function alerts the adapter that it will be reset.
8564 * If memory space is not currently enabled, proceed directly
8565 * to running BIST on the adapter. The timer must always be started
8566 * so we guarantee we do not run BIST from ipr_isr.
8567 *
8568 * Return value:
8569 * IPR_RC_JOB_RETURN
8570 **/
8571static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
8572{
8573 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8574 u16 cmd_reg;
8575 int rc;
8576
8577 ENTER;
8578 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
8579
8580 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
8581 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
214777ba 8582 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
1da177e4
LT
8583 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
8584 } else {
fb51ccbf 8585 ipr_cmd->job_step = ipr_reset_block_config_access;
1da177e4
LT
8586 }
8587
8588 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8589 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8590
8591 LEAVE;
8592 return IPR_RC_JOB_RETURN;
8593}
8594
4fdd7c7a
BK
8595/**
8596 * ipr_reset_quiesce_done - Complete IOA disconnect
8597 * @ipr_cmd: ipr command struct
8598 *
8599 * Description: Freeze the adapter to complete quiesce processing
8600 *
8601 * Return value:
8602 * IPR_RC_JOB_CONTINUE
8603 **/
8604static int ipr_reset_quiesce_done(struct ipr_cmnd *ipr_cmd)
8605{
8606 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8607
8608 ENTER;
8609 ipr_cmd->job_step = ipr_ioa_bringdown_done;
8610 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8611 LEAVE;
8612 return IPR_RC_JOB_CONTINUE;
8613}
8614
8615/**
8616 * ipr_reset_cancel_hcam_done - Check for outstanding commands
8617 * @ipr_cmd: ipr command struct
8618 *
8619 * Description: Ensure nothing is outstanding to the IOA and
8620 * proceed with IOA disconnect. Otherwise reset the IOA.
8621 *
8622 * Return value:
8623 * IPR_RC_JOB_RETURN / IPR_RC_JOB_CONTINUE
8624 **/
8625static int ipr_reset_cancel_hcam_done(struct ipr_cmnd *ipr_cmd)
8626{
8627 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8628 struct ipr_cmnd *loop_cmd;
8629 struct ipr_hrr_queue *hrrq;
8630 int rc = IPR_RC_JOB_CONTINUE;
8631 int count = 0;
8632
8633 ENTER;
8634 ipr_cmd->job_step = ipr_reset_quiesce_done;
8635
8636 for_each_hrrq(hrrq, ioa_cfg) {
8637 spin_lock(&hrrq->_lock);
8638 list_for_each_entry(loop_cmd, &hrrq->hrrq_pending_q, queue) {
8639 count++;
8640 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8641 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
8642 rc = IPR_RC_JOB_RETURN;
8643 break;
8644 }
8645 spin_unlock(&hrrq->_lock);
8646
8647 if (count)
8648 break;
8649 }
8650
8651 LEAVE;
8652 return rc;
8653}
8654
8655/**
8656 * ipr_reset_cancel_hcam - Cancel outstanding HCAMs
8657 * @ipr_cmd: ipr command struct
8658 *
8659 * Description: Cancel any oustanding HCAMs to the IOA.
8660 *
8661 * Return value:
8662 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8663 **/
8664static int ipr_reset_cancel_hcam(struct ipr_cmnd *ipr_cmd)
8665{
8666 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8667 int rc = IPR_RC_JOB_CONTINUE;
8668 struct ipr_cmd_pkt *cmd_pkt;
8669 struct ipr_cmnd *hcam_cmd;
8670 struct ipr_hrr_queue *hrrq = &ioa_cfg->hrrq[IPR_INIT_HRRQ];
8671
8672 ENTER;
8673 ipr_cmd->job_step = ipr_reset_cancel_hcam_done;
8674
8675 if (!hrrq->ioa_is_dead) {
8676 if (!list_empty(&ioa_cfg->hostrcb_pending_q)) {
8677 list_for_each_entry(hcam_cmd, &hrrq->hrrq_pending_q, queue) {
8678 if (hcam_cmd->ioarcb.cmd_pkt.cdb[0] != IPR_HOST_CONTROLLED_ASYNC)
8679 continue;
8680
8681 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8682 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8683 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
8684 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
8685 cmd_pkt->cdb[0] = IPR_CANCEL_REQUEST;
8686 cmd_pkt->cdb[1] = IPR_CANCEL_64BIT_IOARCB;
8687 cmd_pkt->cdb[10] = ((u64) hcam_cmd->dma_addr >> 56) & 0xff;
8688 cmd_pkt->cdb[11] = ((u64) hcam_cmd->dma_addr >> 48) & 0xff;
8689 cmd_pkt->cdb[12] = ((u64) hcam_cmd->dma_addr >> 40) & 0xff;
8690 cmd_pkt->cdb[13] = ((u64) hcam_cmd->dma_addr >> 32) & 0xff;
8691 cmd_pkt->cdb[2] = ((u64) hcam_cmd->dma_addr >> 24) & 0xff;
8692 cmd_pkt->cdb[3] = ((u64) hcam_cmd->dma_addr >> 16) & 0xff;
8693 cmd_pkt->cdb[4] = ((u64) hcam_cmd->dma_addr >> 8) & 0xff;
8694 cmd_pkt->cdb[5] = ((u64) hcam_cmd->dma_addr) & 0xff;
8695
8696 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8697 IPR_CANCEL_TIMEOUT);
8698
8699 rc = IPR_RC_JOB_RETURN;
8700 ipr_cmd->job_step = ipr_reset_cancel_hcam;
8701 break;
8702 }
8703 }
8704 } else
8705 ipr_cmd->job_step = ipr_reset_alert;
8706
8707 LEAVE;
8708 return rc;
8709}
8710
1da177e4
LT
8711/**
8712 * ipr_reset_ucode_download_done - Microcode download completion
8713 * @ipr_cmd: ipr command struct
8714 *
8715 * Description: This function unmaps the microcode download buffer.
8716 *
8717 * Return value:
8718 * IPR_RC_JOB_CONTINUE
8719 **/
8720static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
8721{
8722 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8723 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8724
d73341bf 8725 dma_unmap_sg(&ioa_cfg->pdev->dev, sglist->scatterlist,
1da177e4
LT
8726 sglist->num_sg, DMA_TO_DEVICE);
8727
8728 ipr_cmd->job_step = ipr_reset_alert;
8729 return IPR_RC_JOB_CONTINUE;
8730}
8731
8732/**
8733 * ipr_reset_ucode_download - Download microcode to the adapter
8734 * @ipr_cmd: ipr command struct
8735 *
8736 * Description: This function checks to see if it there is microcode
8737 * to download to the adapter. If there is, a download is performed.
8738 *
8739 * Return value:
8740 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8741 **/
8742static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
8743{
8744 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8745 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8746
8747 ENTER;
8748 ipr_cmd->job_step = ipr_reset_alert;
8749
8750 if (!sglist)
8751 return IPR_RC_JOB_CONTINUE;
8752
8753 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8754 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
8755 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
8756 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
8757 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
8758 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
8759 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
8760
a32c055f
WB
8761 if (ioa_cfg->sis64)
8762 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
8763 else
8764 ipr_build_ucode_ioadl(ipr_cmd, sglist);
1da177e4
LT
8765 ipr_cmd->job_step = ipr_reset_ucode_download_done;
8766
8767 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8768 IPR_WRITE_BUFFER_TIMEOUT);
8769
8770 LEAVE;
8771 return IPR_RC_JOB_RETURN;
8772}
8773
8774/**
8775 * ipr_reset_shutdown_ioa - Shutdown the adapter
8776 * @ipr_cmd: ipr command struct
8777 *
8778 * Description: This function issues an adapter shutdown of the
8779 * specified type to the specified adapter as part of the
8780 * adapter reset job.
8781 *
8782 * Return value:
8783 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8784 **/
8785static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
8786{
8787 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8788 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
8789 unsigned long timeout;
8790 int rc = IPR_RC_JOB_CONTINUE;
8791
8792 ENTER;
4fdd7c7a
BK
8793 if (shutdown_type == IPR_SHUTDOWN_QUIESCE)
8794 ipr_cmd->job_step = ipr_reset_cancel_hcam;
8795 else if (shutdown_type != IPR_SHUTDOWN_NONE &&
56d6aa33 8796 !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
1da177e4
LT
8797 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8798 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8799 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
8800 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
8801
ac09c349
BK
8802 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
8803 timeout = IPR_SHUTDOWN_TIMEOUT;
1da177e4
LT
8804 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
8805 timeout = IPR_INTERNAL_TIMEOUT;
ac09c349
BK
8806 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
8807 timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
1da177e4 8808 else
ac09c349 8809 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
1da177e4
LT
8810
8811 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
8812
8813 rc = IPR_RC_JOB_RETURN;
8814 ipr_cmd->job_step = ipr_reset_ucode_download;
8815 } else
8816 ipr_cmd->job_step = ipr_reset_alert;
8817
8818 LEAVE;
8819 return rc;
8820}
8821
8822/**
8823 * ipr_reset_ioa_job - Adapter reset job
8824 * @ipr_cmd: ipr command struct
8825 *
8826 * Description: This function is the job router for the adapter reset job.
8827 *
8828 * Return value:
8829 * none
8830 **/
8831static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
8832{
8833 u32 rc, ioasc;
1da177e4
LT
8834 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8835
8836 do {
96d21f00 8837 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
8838
8839 if (ioa_cfg->reset_cmd != ipr_cmd) {
8840 /*
8841 * We are doing nested adapter resets and this is
8842 * not the current reset job.
8843 */
05a6538a 8844 list_add_tail(&ipr_cmd->queue,
8845 &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
8846 return;
8847 }
8848
8849 if (IPR_IOASC_SENSE_KEY(ioasc)) {
dfed823e 8850 rc = ipr_cmd->job_step_failed(ipr_cmd);
8851 if (rc == IPR_RC_JOB_RETURN)
8852 return;
1da177e4
LT
8853 }
8854
8855 ipr_reinit_ipr_cmnd(ipr_cmd);
dfed823e 8856 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
1da177e4 8857 rc = ipr_cmd->job_step(ipr_cmd);
203fa3fe 8858 } while (rc == IPR_RC_JOB_CONTINUE);
1da177e4
LT
8859}
8860
8861/**
8862 * _ipr_initiate_ioa_reset - Initiate an adapter reset
8863 * @ioa_cfg: ioa config struct
8864 * @job_step: first job step of reset job
8865 * @shutdown_type: shutdown type
8866 *
8867 * Description: This function will initiate the reset of the given adapter
8868 * starting at the selected job step.
8869 * If the caller needs to wait on the completion of the reset,
8870 * the caller must sleep on the reset_wait_q.
8871 *
8872 * Return value:
8873 * none
8874 **/
8875static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8876 int (*job_step) (struct ipr_cmnd *),
8877 enum ipr_shutdown_type shutdown_type)
8878{
8879 struct ipr_cmnd *ipr_cmd;
56d6aa33 8880 int i;
1da177e4
LT
8881
8882 ioa_cfg->in_reset_reload = 1;
56d6aa33 8883 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8884 spin_lock(&ioa_cfg->hrrq[i]._lock);
8885 ioa_cfg->hrrq[i].allow_cmds = 0;
8886 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8887 }
8888 wmb();
bfae7820
BK
8889 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa)
8890 scsi_block_requests(ioa_cfg->host);
1da177e4
LT
8891
8892 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
8893 ioa_cfg->reset_cmd = ipr_cmd;
8894 ipr_cmd->job_step = job_step;
8895 ipr_cmd->u.shutdown_type = shutdown_type;
8896
8897 ipr_reset_ioa_job(ipr_cmd);
8898}
8899
8900/**
8901 * ipr_initiate_ioa_reset - Initiate an adapter reset
8902 * @ioa_cfg: ioa config struct
8903 * @shutdown_type: shutdown type
8904 *
8905 * Description: This function will initiate the reset of the given adapter.
8906 * If the caller needs to wait on the completion of the reset,
8907 * the caller must sleep on the reset_wait_q.
8908 *
8909 * Return value:
8910 * none
8911 **/
8912static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8913 enum ipr_shutdown_type shutdown_type)
8914{
56d6aa33 8915 int i;
8916
8917 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
1da177e4
LT
8918 return;
8919
41e9a696
BK
8920 if (ioa_cfg->in_reset_reload) {
8921 if (ioa_cfg->sdt_state == GET_DUMP)
8922 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8923 else if (ioa_cfg->sdt_state == READ_DUMP)
8924 ioa_cfg->sdt_state = ABORT_DUMP;
8925 }
1da177e4
LT
8926
8927 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
8928 dev_err(&ioa_cfg->pdev->dev,
8929 "IOA taken offline - error recovery failed\n");
8930
8931 ioa_cfg->reset_retries = 0;
56d6aa33 8932 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8933 spin_lock(&ioa_cfg->hrrq[i]._lock);
8934 ioa_cfg->hrrq[i].ioa_is_dead = 1;
8935 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8936 }
8937 wmb();
1da177e4
LT
8938
8939 if (ioa_cfg->in_ioa_bringdown) {
8940 ioa_cfg->reset_cmd = NULL;
8941 ioa_cfg->in_reset_reload = 0;
8942 ipr_fail_all_ops(ioa_cfg);
8943 wake_up_all(&ioa_cfg->reset_wait_q);
8944
bfae7820
BK
8945 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
8946 spin_unlock_irq(ioa_cfg->host->host_lock);
8947 scsi_unblock_requests(ioa_cfg->host);
8948 spin_lock_irq(ioa_cfg->host->host_lock);
8949 }
1da177e4
LT
8950 return;
8951 } else {
8952 ioa_cfg->in_ioa_bringdown = 1;
8953 shutdown_type = IPR_SHUTDOWN_NONE;
8954 }
8955 }
8956
8957 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
8958 shutdown_type);
8959}
8960
f8a88b19
LV
8961/**
8962 * ipr_reset_freeze - Hold off all I/O activity
8963 * @ipr_cmd: ipr command struct
8964 *
8965 * Description: If the PCI slot is frozen, hold off all I/O
8966 * activity; then, as soon as the slot is available again,
8967 * initiate an adapter reset.
8968 */
8969static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
8970{
56d6aa33 8971 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8972 int i;
8973
f8a88b19 8974 /* Disallow new interrupts, avoid loop */
56d6aa33 8975 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8976 spin_lock(&ioa_cfg->hrrq[i]._lock);
8977 ioa_cfg->hrrq[i].allow_interrupts = 0;
8978 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8979 }
8980 wmb();
05a6538a 8981 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
f8a88b19
LV
8982 ipr_cmd->done = ipr_reset_ioa_job;
8983 return IPR_RC_JOB_RETURN;
8984}
8985
6270e593
BK
8986/**
8987 * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
8988 * @pdev: PCI device struct
8989 *
8990 * Description: This routine is called to tell us that the MMIO
8991 * access to the IOA has been restored
8992 */
8993static pci_ers_result_t ipr_pci_mmio_enabled(struct pci_dev *pdev)
8994{
8995 unsigned long flags = 0;
8996 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8997
8998 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8999 if (!ioa_cfg->probe_done)
9000 pci_save_state(pdev);
9001 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9002 return PCI_ERS_RESULT_NEED_RESET;
9003}
9004
f8a88b19
LV
9005/**
9006 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
9007 * @pdev: PCI device struct
9008 *
9009 * Description: This routine is called to tell us that the PCI bus
9010 * is down. Can't do anything here, except put the device driver
9011 * into a holding pattern, waiting for the PCI bus to come back.
9012 */
9013static void ipr_pci_frozen(struct pci_dev *pdev)
9014{
9015 unsigned long flags = 0;
9016 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9017
9018 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6270e593
BK
9019 if (ioa_cfg->probe_done)
9020 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
f8a88b19
LV
9021 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9022}
9023
9024/**
9025 * ipr_pci_slot_reset - Called when PCI slot has been reset.
9026 * @pdev: PCI device struct
9027 *
9028 * Description: This routine is called by the pci error recovery
9029 * code after the PCI slot has been reset, just before we
9030 * should resume normal operations.
9031 */
9032static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
9033{
9034 unsigned long flags = 0;
9035 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9036
9037 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6270e593
BK
9038 if (ioa_cfg->probe_done) {
9039 if (ioa_cfg->needs_warm_reset)
9040 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9041 else
9042 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
9043 IPR_SHUTDOWN_NONE);
9044 } else
9045 wake_up_all(&ioa_cfg->eeh_wait_q);
f8a88b19
LV
9046 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9047 return PCI_ERS_RESULT_RECOVERED;
9048}
9049
9050/**
9051 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
9052 * @pdev: PCI device struct
9053 *
9054 * Description: This routine is called when the PCI bus has
9055 * permanently failed.
9056 */
9057static void ipr_pci_perm_failure(struct pci_dev *pdev)
9058{
9059 unsigned long flags = 0;
9060 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
56d6aa33 9061 int i;
f8a88b19
LV
9062
9063 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6270e593
BK
9064 if (ioa_cfg->probe_done) {
9065 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
9066 ioa_cfg->sdt_state = ABORT_DUMP;
9067 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
9068 ioa_cfg->in_ioa_bringdown = 1;
9069 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9070 spin_lock(&ioa_cfg->hrrq[i]._lock);
9071 ioa_cfg->hrrq[i].allow_cmds = 0;
9072 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9073 }
9074 wmb();
9075 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9076 } else
9077 wake_up_all(&ioa_cfg->eeh_wait_q);
f8a88b19
LV
9078 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9079}
9080
9081/**
9082 * ipr_pci_error_detected - Called when a PCI error is detected.
9083 * @pdev: PCI device struct
9084 * @state: PCI channel state
9085 *
9086 * Description: Called when a PCI error is detected.
9087 *
9088 * Return value:
9089 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
9090 */
9091static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
9092 pci_channel_state_t state)
9093{
9094 switch (state) {
9095 case pci_channel_io_frozen:
9096 ipr_pci_frozen(pdev);
6270e593 9097 return PCI_ERS_RESULT_CAN_RECOVER;
f8a88b19
LV
9098 case pci_channel_io_perm_failure:
9099 ipr_pci_perm_failure(pdev);
9100 return PCI_ERS_RESULT_DISCONNECT;
9101 break;
9102 default:
9103 break;
9104 }
9105 return PCI_ERS_RESULT_NEED_RESET;
9106}
9107
1da177e4
LT
9108/**
9109 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
9110 * @ioa_cfg: ioa cfg struct
9111 *
9112 * Description: This is the second phase of adapter intialization
9113 * This function takes care of initilizing the adapter to the point
9114 * where it can accept new commands.
9115
9116 * Return value:
b1c11812 9117 * 0 on success / -EIO on failure
1da177e4 9118 **/
6f039790 9119static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
1da177e4
LT
9120{
9121 int rc = 0;
9122 unsigned long host_lock_flags = 0;
9123
9124 ENTER;
9125 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9126 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
6270e593 9127 ioa_cfg->probe_done = 1;
ce155cce 9128 if (ioa_cfg->needs_hard_reset) {
9129 ioa_cfg->needs_hard_reset = 0;
9130 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9131 } else
9132 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
9133 IPR_SHUTDOWN_NONE);
1da177e4 9134 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
1da177e4
LT
9135
9136 LEAVE;
9137 return rc;
9138}
9139
9140/**
9141 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
9142 * @ioa_cfg: ioa config struct
9143 *
9144 * Return value:
9145 * none
9146 **/
9147static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9148{
9149 int i;
9150
a65e8f12
BK
9151 if (ioa_cfg->ipr_cmnd_list) {
9152 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9153 if (ioa_cfg->ipr_cmnd_list[i])
9154 dma_pool_free(ioa_cfg->ipr_cmd_pool,
9155 ioa_cfg->ipr_cmnd_list[i],
9156 ioa_cfg->ipr_cmnd_list_dma[i]);
1da177e4 9157
a65e8f12
BK
9158 ioa_cfg->ipr_cmnd_list[i] = NULL;
9159 }
1da177e4
LT
9160 }
9161
9162 if (ioa_cfg->ipr_cmd_pool)
d73341bf 9163 dma_pool_destroy(ioa_cfg->ipr_cmd_pool);
1da177e4 9164
89aad428
BK
9165 kfree(ioa_cfg->ipr_cmnd_list);
9166 kfree(ioa_cfg->ipr_cmnd_list_dma);
9167 ioa_cfg->ipr_cmnd_list = NULL;
9168 ioa_cfg->ipr_cmnd_list_dma = NULL;
1da177e4
LT
9169 ioa_cfg->ipr_cmd_pool = NULL;
9170}
9171
9172/**
9173 * ipr_free_mem - Frees memory allocated for an adapter
9174 * @ioa_cfg: ioa cfg struct
9175 *
9176 * Return value:
9177 * nothing
9178 **/
9179static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
9180{
9181 int i;
9182
9183 kfree(ioa_cfg->res_entries);
d73341bf
AB
9184 dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_misc_cbs),
9185 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
1da177e4 9186 ipr_free_cmd_blks(ioa_cfg);
05a6538a 9187
9188 for (i = 0; i < ioa_cfg->hrrq_num; i++)
d73341bf
AB
9189 dma_free_coherent(&ioa_cfg->pdev->dev,
9190 sizeof(u32) * ioa_cfg->hrrq[i].size,
9191 ioa_cfg->hrrq[i].host_rrq,
9192 ioa_cfg->hrrq[i].host_rrq_dma);
05a6538a 9193
d73341bf
AB
9194 dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size,
9195 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
1da177e4
LT
9196
9197 for (i = 0; i < IPR_NUM_HCAMS; i++) {
d73341bf
AB
9198 dma_free_coherent(&ioa_cfg->pdev->dev,
9199 sizeof(struct ipr_hostrcb),
9200 ioa_cfg->hostrcb[i],
9201 ioa_cfg->hostrcb_dma[i]);
1da177e4
LT
9202 }
9203
9204 ipr_free_dump(ioa_cfg);
1da177e4
LT
9205 kfree(ioa_cfg->trace);
9206}
9207
9208/**
2796ca5e
BK
9209 * ipr_free_irqs - Free all allocated IRQs for the adapter.
9210 * @ioa_cfg: ipr cfg struct
1da177e4 9211 *
2796ca5e 9212 * This function frees all allocated IRQs for the
1da177e4
LT
9213 * specified adapter.
9214 *
9215 * Return value:
9216 * none
9217 **/
2796ca5e 9218static void ipr_free_irqs(struct ipr_ioa_cfg *ioa_cfg)
1da177e4
LT
9219{
9220 struct pci_dev *pdev = ioa_cfg->pdev;
9221
05a6538a 9222 if (ioa_cfg->intr_flag == IPR_USE_MSI ||
9223 ioa_cfg->intr_flag == IPR_USE_MSIX) {
9224 int i;
9225 for (i = 0; i < ioa_cfg->nvectors; i++)
9226 free_irq(ioa_cfg->vectors_info[i].vec,
2796ca5e 9227 &ioa_cfg->hrrq[i]);
05a6538a 9228 } else
9229 free_irq(pdev->irq, &ioa_cfg->hrrq[0]);
9230
56d6aa33 9231 if (ioa_cfg->intr_flag == IPR_USE_MSI) {
05a6538a 9232 pci_disable_msi(pdev);
56d6aa33 9233 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
9234 } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
05a6538a 9235 pci_disable_msix(pdev);
56d6aa33 9236 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
9237 }
2796ca5e 9238}
05a6538a 9239
2796ca5e
BK
9240/**
9241 * ipr_free_all_resources - Free all allocated resources for an adapter.
9242 * @ipr_cmd: ipr command struct
9243 *
9244 * This function frees all allocated resources for the
9245 * specified adapter.
9246 *
9247 * Return value:
9248 * none
9249 **/
9250static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
9251{
9252 struct pci_dev *pdev = ioa_cfg->pdev;
05a6538a 9253
2796ca5e
BK
9254 ENTER;
9255 ipr_free_irqs(ioa_cfg);
9256 if (ioa_cfg->reset_work_q)
9257 destroy_workqueue(ioa_cfg->reset_work_q);
1da177e4
LT
9258 iounmap(ioa_cfg->hdw_dma_regs);
9259 pci_release_regions(pdev);
9260 ipr_free_mem(ioa_cfg);
9261 scsi_host_put(ioa_cfg->host);
9262 pci_disable_device(pdev);
9263 LEAVE;
9264}
9265
9266/**
9267 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
9268 * @ioa_cfg: ioa config struct
9269 *
9270 * Return value:
9271 * 0 on success / -ENOMEM on allocation failure
9272 **/
6f039790 9273static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
1da177e4
LT
9274{
9275 struct ipr_cmnd *ipr_cmd;
9276 struct ipr_ioarcb *ioarcb;
9277 dma_addr_t dma_addr;
05a6538a 9278 int i, entries_each_hrrq, hrrq_id = 0;
1da177e4 9279
d73341bf 9280 ioa_cfg->ipr_cmd_pool = dma_pool_create(IPR_NAME, &ioa_cfg->pdev->dev,
203fa3fe 9281 sizeof(struct ipr_cmnd), 512, 0);
1da177e4
LT
9282
9283 if (!ioa_cfg->ipr_cmd_pool)
9284 return -ENOMEM;
9285
89aad428
BK
9286 ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
9287 ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
9288
9289 if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
9290 ipr_free_cmd_blks(ioa_cfg);
9291 return -ENOMEM;
9292 }
9293
05a6538a 9294 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9295 if (ioa_cfg->hrrq_num > 1) {
9296 if (i == 0) {
9297 entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
9298 ioa_cfg->hrrq[i].min_cmd_id = 0;
9299 ioa_cfg->hrrq[i].max_cmd_id =
9300 (entries_each_hrrq - 1);
9301 } else {
9302 entries_each_hrrq =
9303 IPR_NUM_BASE_CMD_BLKS/
9304 (ioa_cfg->hrrq_num - 1);
9305 ioa_cfg->hrrq[i].min_cmd_id =
9306 IPR_NUM_INTERNAL_CMD_BLKS +
9307 (i - 1) * entries_each_hrrq;
9308 ioa_cfg->hrrq[i].max_cmd_id =
9309 (IPR_NUM_INTERNAL_CMD_BLKS +
9310 i * entries_each_hrrq - 1);
9311 }
9312 } else {
9313 entries_each_hrrq = IPR_NUM_CMD_BLKS;
9314 ioa_cfg->hrrq[i].min_cmd_id = 0;
9315 ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
9316 }
9317 ioa_cfg->hrrq[i].size = entries_each_hrrq;
9318 }
9319
9320 BUG_ON(ioa_cfg->hrrq_num == 0);
9321
9322 i = IPR_NUM_CMD_BLKS -
9323 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
9324 if (i > 0) {
9325 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
9326 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
9327 }
9328
1da177e4 9329 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
d73341bf 9330 ipr_cmd = dma_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
1da177e4
LT
9331
9332 if (!ipr_cmd) {
9333 ipr_free_cmd_blks(ioa_cfg);
9334 return -ENOMEM;
9335 }
9336
9337 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
9338 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
9339 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
9340
9341 ioarcb = &ipr_cmd->ioarcb;
a32c055f
WB
9342 ipr_cmd->dma_addr = dma_addr;
9343 if (ioa_cfg->sis64)
9344 ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
9345 else
9346 ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
9347
1da177e4 9348 ioarcb->host_response_handle = cpu_to_be32(i << 2);
a32c055f
WB
9349 if (ioa_cfg->sis64) {
9350 ioarcb->u.sis64_addr_data.data_ioadl_addr =
9351 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
9352 ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
96d21f00 9353 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
a32c055f
WB
9354 } else {
9355 ioarcb->write_ioadl_addr =
9356 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
9357 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
9358 ioarcb->ioasa_host_pci_addr =
96d21f00 9359 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
a32c055f 9360 }
1da177e4
LT
9361 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
9362 ipr_cmd->cmd_index = i;
9363 ipr_cmd->ioa_cfg = ioa_cfg;
9364 ipr_cmd->sense_buffer_dma = dma_addr +
9365 offsetof(struct ipr_cmnd, sense_buffer);
9366
05a6538a 9367 ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
9368 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
9369 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9370 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
9371 hrrq_id++;
1da177e4
LT
9372 }
9373
9374 return 0;
9375}
9376
9377/**
9378 * ipr_alloc_mem - Allocate memory for an adapter
9379 * @ioa_cfg: ioa config struct
9380 *
9381 * Return value:
9382 * 0 on success / non-zero for error
9383 **/
6f039790 9384static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
1da177e4
LT
9385{
9386 struct pci_dev *pdev = ioa_cfg->pdev;
9387 int i, rc = -ENOMEM;
9388
9389 ENTER;
0bc42e35 9390 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
3e7ebdfa 9391 ioa_cfg->max_devs_supported, GFP_KERNEL);
1da177e4
LT
9392
9393 if (!ioa_cfg->res_entries)
9394 goto out;
9395
3e7ebdfa 9396 for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
1da177e4 9397 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
3e7ebdfa
WB
9398 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
9399 }
1da177e4 9400
d73341bf
AB
9401 ioa_cfg->vpd_cbs = dma_alloc_coherent(&pdev->dev,
9402 sizeof(struct ipr_misc_cbs),
9403 &ioa_cfg->vpd_cbs_dma,
9404 GFP_KERNEL);
1da177e4
LT
9405
9406 if (!ioa_cfg->vpd_cbs)
9407 goto out_free_res_entries;
9408
9409 if (ipr_alloc_cmd_blks(ioa_cfg))
9410 goto out_free_vpd_cbs;
9411
05a6538a 9412 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
d73341bf 9413 ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev,
05a6538a 9414 sizeof(u32) * ioa_cfg->hrrq[i].size,
d73341bf
AB
9415 &ioa_cfg->hrrq[i].host_rrq_dma,
9416 GFP_KERNEL);
05a6538a 9417
9418 if (!ioa_cfg->hrrq[i].host_rrq) {
9419 while (--i > 0)
d73341bf 9420 dma_free_coherent(&pdev->dev,
05a6538a 9421 sizeof(u32) * ioa_cfg->hrrq[i].size,
9422 ioa_cfg->hrrq[i].host_rrq,
9423 ioa_cfg->hrrq[i].host_rrq_dma);
9424 goto out_ipr_free_cmd_blocks;
9425 }
9426 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
9427 }
1da177e4 9428
d73341bf
AB
9429 ioa_cfg->u.cfg_table = dma_alloc_coherent(&pdev->dev,
9430 ioa_cfg->cfg_table_size,
9431 &ioa_cfg->cfg_table_dma,
9432 GFP_KERNEL);
1da177e4 9433
3e7ebdfa 9434 if (!ioa_cfg->u.cfg_table)
1da177e4
LT
9435 goto out_free_host_rrq;
9436
9437 for (i = 0; i < IPR_NUM_HCAMS; i++) {
d73341bf
AB
9438 ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev,
9439 sizeof(struct ipr_hostrcb),
9440 &ioa_cfg->hostrcb_dma[i],
9441 GFP_KERNEL);
1da177e4
LT
9442
9443 if (!ioa_cfg->hostrcb[i])
9444 goto out_free_hostrcb_dma;
9445
9446 ioa_cfg->hostrcb[i]->hostrcb_dma =
9447 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
49dc6a18 9448 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
1da177e4
LT
9449 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
9450 }
9451
0bc42e35 9452 ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
1da177e4
LT
9453 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
9454
9455 if (!ioa_cfg->trace)
9456 goto out_free_hostrcb_dma;
9457
1da177e4
LT
9458 rc = 0;
9459out:
9460 LEAVE;
9461 return rc;
9462
9463out_free_hostrcb_dma:
9464 while (i-- > 0) {
d73341bf
AB
9465 dma_free_coherent(&pdev->dev, sizeof(struct ipr_hostrcb),
9466 ioa_cfg->hostrcb[i],
9467 ioa_cfg->hostrcb_dma[i]);
1da177e4 9468 }
d73341bf
AB
9469 dma_free_coherent(&pdev->dev, ioa_cfg->cfg_table_size,
9470 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
1da177e4 9471out_free_host_rrq:
05a6538a 9472 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
d73341bf
AB
9473 dma_free_coherent(&pdev->dev,
9474 sizeof(u32) * ioa_cfg->hrrq[i].size,
9475 ioa_cfg->hrrq[i].host_rrq,
9476 ioa_cfg->hrrq[i].host_rrq_dma);
05a6538a 9477 }
1da177e4
LT
9478out_ipr_free_cmd_blocks:
9479 ipr_free_cmd_blks(ioa_cfg);
9480out_free_vpd_cbs:
d73341bf
AB
9481 dma_free_coherent(&pdev->dev, sizeof(struct ipr_misc_cbs),
9482 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
1da177e4
LT
9483out_free_res_entries:
9484 kfree(ioa_cfg->res_entries);
9485 goto out;
9486}
9487
9488/**
9489 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9490 * @ioa_cfg: ioa config struct
9491 *
9492 * Return value:
9493 * none
9494 **/
6f039790 9495static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
1da177e4
LT
9496{
9497 int i;
9498
9499 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
9500 ioa_cfg->bus_attr[i].bus = i;
9501 ioa_cfg->bus_attr[i].qas_enabled = 0;
9502 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
9503 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
9504 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
9505 else
9506 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
9507 }
9508}
9509
6270e593
BK
9510/**
9511 * ipr_init_regs - Initialize IOA registers
9512 * @ioa_cfg: ioa config struct
9513 *
9514 * Return value:
9515 * none
9516 **/
9517static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg)
9518{
9519 const struct ipr_interrupt_offsets *p;
9520 struct ipr_interrupts *t;
9521 void __iomem *base;
9522
9523 p = &ioa_cfg->chip_cfg->regs;
9524 t = &ioa_cfg->regs;
9525 base = ioa_cfg->hdw_dma_regs;
9526
9527 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
9528 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
9529 t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
9530 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
9531 t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
9532 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
9533 t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
9534 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
9535 t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
9536 t->ioarrin_reg = base + p->ioarrin_reg;
9537 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
9538 t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
9539 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
9540 t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
9541 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
9542 t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
9543
9544 if (ioa_cfg->sis64) {
9545 t->init_feedback_reg = base + p->init_feedback_reg;
9546 t->dump_addr_reg = base + p->dump_addr_reg;
9547 t->dump_data_reg = base + p->dump_data_reg;
9548 t->endian_swap_reg = base + p->endian_swap_reg;
9549 }
9550}
9551
1da177e4
LT
9552/**
9553 * ipr_init_ioa_cfg - Initialize IOA config struct
9554 * @ioa_cfg: ioa config struct
9555 * @host: scsi host struct
9556 * @pdev: PCI dev struct
9557 *
9558 * Return value:
9559 * none
9560 **/
6f039790
GKH
9561static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9562 struct Scsi_Host *host, struct pci_dev *pdev)
1da177e4 9563{
6270e593 9564 int i;
1da177e4
LT
9565
9566 ioa_cfg->host = host;
9567 ioa_cfg->pdev = pdev;
9568 ioa_cfg->log_level = ipr_log_level;
3d1d0da6 9569 ioa_cfg->doorbell = IPR_DOORBELL;
1da177e4
LT
9570 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
9571 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
1da177e4
LT
9572 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
9573 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
9574 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
9575 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
9576
1da177e4
LT
9577 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
9578 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
9579 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
9580 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
c4028958 9581 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
1da177e4 9582 init_waitqueue_head(&ioa_cfg->reset_wait_q);
95fecd90 9583 init_waitqueue_head(&ioa_cfg->msi_wait_q);
6270e593 9584 init_waitqueue_head(&ioa_cfg->eeh_wait_q);
1da177e4
LT
9585 ioa_cfg->sdt_state = INACTIVE;
9586
9587 ipr_initialize_bus_attr(ioa_cfg);
3e7ebdfa 9588 ioa_cfg->max_devs_supported = ipr_max_devs;
1da177e4 9589
3e7ebdfa
WB
9590 if (ioa_cfg->sis64) {
9591 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
9592 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
9593 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
9594 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
6270e593
BK
9595 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9596 + ((sizeof(struct ipr_config_table_entry64)
9597 * ioa_cfg->max_devs_supported)));
3e7ebdfa
WB
9598 } else {
9599 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
9600 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
9601 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
9602 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
6270e593
BK
9603 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9604 + ((sizeof(struct ipr_config_table_entry)
9605 * ioa_cfg->max_devs_supported)));
3e7ebdfa 9606 }
6270e593 9607
f688f96d 9608 host->max_channel = IPR_VSET_BUS;
1da177e4
LT
9609 host->unique_id = host->host_no;
9610 host->max_cmd_len = IPR_MAX_CDB_LEN;
89aad428 9611 host->can_queue = ioa_cfg->max_cmds;
1da177e4
LT
9612 pci_set_drvdata(pdev, ioa_cfg);
9613
6270e593
BK
9614 for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) {
9615 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
9616 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
9617 spin_lock_init(&ioa_cfg->hrrq[i]._lock);
9618 if (i == 0)
9619 ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
9620 else
9621 ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
dcbad00e 9622 }
1da177e4
LT
9623}
9624
9625/**
1be7bd82 9626 * ipr_get_chip_info - Find adapter chip information
1da177e4
LT
9627 * @dev_id: PCI device id struct
9628 *
9629 * Return value:
1be7bd82 9630 * ptr to chip information on success / NULL on failure
1da177e4 9631 **/
6f039790 9632static const struct ipr_chip_t *
1be7bd82 9633ipr_get_chip_info(const struct pci_device_id *dev_id)
1da177e4
LT
9634{
9635 int i;
9636
1da177e4
LT
9637 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
9638 if (ipr_chip[i].vendor == dev_id->vendor &&
9639 ipr_chip[i].device == dev_id->device)
1be7bd82 9640 return &ipr_chip[i];
1da177e4
LT
9641 return NULL;
9642}
9643
6270e593
BK
9644/**
9645 * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
9646 * during probe time
9647 * @ioa_cfg: ioa config struct
9648 *
9649 * Return value:
9650 * None
9651 **/
9652static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
9653{
9654 struct pci_dev *pdev = ioa_cfg->pdev;
9655
9656 if (pci_channel_offline(pdev)) {
9657 wait_event_timeout(ioa_cfg->eeh_wait_q,
9658 !pci_channel_offline(pdev),
9659 IPR_PCI_ERROR_RECOVERY_TIMEOUT);
9660 pci_restore_state(pdev);
9661 }
9662}
9663
05a6538a 9664static int ipr_enable_msix(struct ipr_ioa_cfg *ioa_cfg)
9665{
9666 struct msix_entry entries[IPR_MAX_MSIX_VECTORS];
60e76b77 9667 int i, vectors;
05a6538a 9668
9669 for (i = 0; i < ARRAY_SIZE(entries); ++i)
9670 entries[i].entry = i;
9671
60e76b77
AG
9672 vectors = pci_enable_msix_range(ioa_cfg->pdev,
9673 entries, 1, ipr_number_of_msix);
9674 if (vectors < 0) {
6270e593 9675 ipr_wait_for_pci_err_recovery(ioa_cfg);
60e76b77 9676 return vectors;
05a6538a 9677 }
9678
60e76b77
AG
9679 for (i = 0; i < vectors; i++)
9680 ioa_cfg->vectors_info[i].vec = entries[i].vector;
9681 ioa_cfg->nvectors = vectors;
05a6538a 9682
60e76b77 9683 return 0;
05a6538a 9684}
9685
9686static int ipr_enable_msi(struct ipr_ioa_cfg *ioa_cfg)
9687{
60e76b77 9688 int i, vectors;
05a6538a 9689
60e76b77
AG
9690 vectors = pci_enable_msi_range(ioa_cfg->pdev, 1, ipr_number_of_msix);
9691 if (vectors < 0) {
6270e593 9692 ipr_wait_for_pci_err_recovery(ioa_cfg);
60e76b77 9693 return vectors;
05a6538a 9694 }
9695
60e76b77
AG
9696 for (i = 0; i < vectors; i++)
9697 ioa_cfg->vectors_info[i].vec = ioa_cfg->pdev->irq + i;
9698 ioa_cfg->nvectors = vectors;
05a6538a 9699
60e76b77 9700 return 0;
05a6538a 9701}
9702
9703static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
9704{
9705 int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
9706
9707 for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
9708 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
9709 "host%d-%d", ioa_cfg->host->host_no, vec_idx);
9710 ioa_cfg->vectors_info[vec_idx].
9711 desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
9712 }
9713}
9714
9715static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg)
9716{
9717 int i, rc;
9718
9719 for (i = 1; i < ioa_cfg->nvectors; i++) {
9720 rc = request_irq(ioa_cfg->vectors_info[i].vec,
9721 ipr_isr_mhrrq,
9722 0,
9723 ioa_cfg->vectors_info[i].desc,
9724 &ioa_cfg->hrrq[i]);
9725 if (rc) {
9726 while (--i >= 0)
9727 free_irq(ioa_cfg->vectors_info[i].vec,
9728 &ioa_cfg->hrrq[i]);
9729 return rc;
9730 }
9731 }
9732 return 0;
9733}
9734
95fecd90
WB
9735/**
9736 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
9737 * @pdev: PCI device struct
9738 *
9739 * Description: Simply set the msi_received flag to 1 indicating that
9740 * Message Signaled Interrupts are supported.
9741 *
9742 * Return value:
9743 * 0 on success / non-zero on failure
9744 **/
6f039790 9745static irqreturn_t ipr_test_intr(int irq, void *devp)
95fecd90
WB
9746{
9747 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
9748 unsigned long lock_flags = 0;
9749 irqreturn_t rc = IRQ_HANDLED;
9750
05a6538a 9751 dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
95fecd90
WB
9752 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9753
9754 ioa_cfg->msi_received = 1;
9755 wake_up(&ioa_cfg->msi_wait_q);
9756
9757 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9758 return rc;
9759}
9760
9761/**
9762 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
9763 * @pdev: PCI device struct
9764 *
60e76b77 9765 * Description: The return value from pci_enable_msi_range() can not always be
95fecd90
WB
9766 * trusted. This routine sets up and initiates a test interrupt to determine
9767 * if the interrupt is received via the ipr_test_intr() service routine.
9768 * If the tests fails, the driver will fall back to LSI.
9769 *
9770 * Return value:
9771 * 0 on success / non-zero on failure
9772 **/
6f039790 9773static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
95fecd90
WB
9774{
9775 int rc;
9776 volatile u32 int_reg;
9777 unsigned long lock_flags = 0;
9778
9779 ENTER;
9780
9781 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9782 init_waitqueue_head(&ioa_cfg->msi_wait_q);
9783 ioa_cfg->msi_received = 0;
9784 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
214777ba 9785 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
95fecd90
WB
9786 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
9787 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9788
f19799f4 9789 if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9790 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
9791 else
9792 rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
95fecd90
WB
9793 if (rc) {
9794 dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
9795 return rc;
9796 } else if (ipr_debug)
9797 dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
9798
214777ba 9799 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
95fecd90
WB
9800 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
9801 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
56d6aa33 9802 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
95fecd90
WB
9803 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9804
95fecd90
WB
9805 if (!ioa_cfg->msi_received) {
9806 /* MSI test failed */
9807 dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n");
9808 rc = -EOPNOTSUPP;
9809 } else if (ipr_debug)
9810 dev_info(&pdev->dev, "MSI test succeeded.\n");
9811
9812 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9813
f19799f4 9814 if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9815 free_irq(ioa_cfg->vectors_info[0].vec, ioa_cfg);
9816 else
9817 free_irq(pdev->irq, ioa_cfg);
95fecd90
WB
9818
9819 LEAVE;
9820
9821 return rc;
9822}
9823
05a6538a 9824 /* ipr_probe_ioa - Allocates memory and does first stage of initialization
1da177e4
LT
9825 * @pdev: PCI device struct
9826 * @dev_id: PCI device id struct
9827 *
9828 * Return value:
9829 * 0 on success / non-zero on failure
9830 **/
6f039790
GKH
9831static int ipr_probe_ioa(struct pci_dev *pdev,
9832 const struct pci_device_id *dev_id)
1da177e4
LT
9833{
9834 struct ipr_ioa_cfg *ioa_cfg;
9835 struct Scsi_Host *host;
9836 unsigned long ipr_regs_pci;
9837 void __iomem *ipr_regs;
a2a65a3e 9838 int rc = PCIBIOS_SUCCESSFUL;
473b1e8e 9839 volatile u32 mask, uproc, interrupts;
feccada9 9840 unsigned long lock_flags, driver_lock_flags;
1da177e4
LT
9841
9842 ENTER;
9843
1da177e4 9844 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
1da177e4
LT
9845 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
9846
9847 if (!host) {
9848 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
9849 rc = -ENOMEM;
6270e593 9850 goto out;
1da177e4
LT
9851 }
9852
9853 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
9854 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
8d8e7d13 9855 ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
1da177e4 9856
1be7bd82 9857 ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
1da177e4 9858
1be7bd82 9859 if (!ioa_cfg->ipr_chip) {
1da177e4
LT
9860 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
9861 dev_id->vendor, dev_id->device);
9862 goto out_scsi_host_put;
9863 }
9864
a32c055f
WB
9865 /* set SIS 32 or SIS 64 */
9866 ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
1be7bd82 9867 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
7dd21308 9868 ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
89aad428 9869 ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
1be7bd82 9870
5469cb5b
BK
9871 if (ipr_transop_timeout)
9872 ioa_cfg->transop_timeout = ipr_transop_timeout;
9873 else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
9874 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
9875 else
9876 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
9877
44c10138 9878 ioa_cfg->revid = pdev->revision;
463fc696 9879
6270e593
BK
9880 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
9881
1da177e4
LT
9882 ipr_regs_pci = pci_resource_start(pdev, 0);
9883
9884 rc = pci_request_regions(pdev, IPR_NAME);
9885 if (rc < 0) {
9886 dev_err(&pdev->dev,
9887 "Couldn't register memory range of registers\n");
9888 goto out_scsi_host_put;
9889 }
9890
6270e593
BK
9891 rc = pci_enable_device(pdev);
9892
9893 if (rc || pci_channel_offline(pdev)) {
9894 if (pci_channel_offline(pdev)) {
9895 ipr_wait_for_pci_err_recovery(ioa_cfg);
9896 rc = pci_enable_device(pdev);
9897 }
9898
9899 if (rc) {
9900 dev_err(&pdev->dev, "Cannot enable adapter\n");
9901 ipr_wait_for_pci_err_recovery(ioa_cfg);
9902 goto out_release_regions;
9903 }
9904 }
9905
25729a7f 9906 ipr_regs = pci_ioremap_bar(pdev, 0);
1da177e4
LT
9907
9908 if (!ipr_regs) {
9909 dev_err(&pdev->dev,
9910 "Couldn't map memory range of registers\n");
9911 rc = -ENOMEM;
6270e593 9912 goto out_disable;
1da177e4
LT
9913 }
9914
9915 ioa_cfg->hdw_dma_regs = ipr_regs;
9916 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
9917 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
9918
6270e593 9919 ipr_init_regs(ioa_cfg);
1da177e4 9920
a32c055f 9921 if (ioa_cfg->sis64) {
869404cb 9922 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
a32c055f 9923 if (rc < 0) {
869404cb
AB
9924 dev_dbg(&pdev->dev, "Failed to set 64 bit DMA mask\n");
9925 rc = dma_set_mask_and_coherent(&pdev->dev,
9926 DMA_BIT_MASK(32));
a32c055f 9927 }
a32c055f 9928 } else
869404cb 9929 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
a32c055f 9930
1da177e4 9931 if (rc < 0) {
869404cb 9932 dev_err(&pdev->dev, "Failed to set DMA mask\n");
1da177e4
LT
9933 goto cleanup_nomem;
9934 }
9935
9936 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
9937 ioa_cfg->chip_cfg->cache_line_size);
9938
9939 if (rc != PCIBIOS_SUCCESSFUL) {
9940 dev_err(&pdev->dev, "Write of cache line size failed\n");
6270e593 9941 ipr_wait_for_pci_err_recovery(ioa_cfg);
1da177e4
LT
9942 rc = -EIO;
9943 goto cleanup_nomem;
9944 }
9945
6270e593
BK
9946 /* Issue MMIO read to ensure card is not in EEH */
9947 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
9948 ipr_wait_for_pci_err_recovery(ioa_cfg);
9949
05a6538a 9950 if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
9951 dev_err(&pdev->dev, "The max number of MSIX is %d\n",
9952 IPR_MAX_MSIX_VECTORS);
9953 ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
9954 }
9955
9956 if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
56d6aa33 9957 ipr_enable_msix(ioa_cfg) == 0)
05a6538a 9958 ioa_cfg->intr_flag = IPR_USE_MSIX;
9959 else if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
56d6aa33 9960 ipr_enable_msi(ioa_cfg) == 0)
05a6538a 9961 ioa_cfg->intr_flag = IPR_USE_MSI;
9962 else {
9963 ioa_cfg->intr_flag = IPR_USE_LSI;
9964 ioa_cfg->nvectors = 1;
9965 dev_info(&pdev->dev, "Cannot enable MSI.\n");
9966 }
9967
6270e593
BK
9968 pci_set_master(pdev);
9969
9970 if (pci_channel_offline(pdev)) {
9971 ipr_wait_for_pci_err_recovery(ioa_cfg);
9972 pci_set_master(pdev);
9973 if (pci_channel_offline(pdev)) {
9974 rc = -EIO;
9975 goto out_msi_disable;
9976 }
9977 }
9978
05a6538a 9979 if (ioa_cfg->intr_flag == IPR_USE_MSI ||
9980 ioa_cfg->intr_flag == IPR_USE_MSIX) {
95fecd90 9981 rc = ipr_test_msi(ioa_cfg, pdev);
05a6538a 9982 if (rc == -EOPNOTSUPP) {
6270e593 9983 ipr_wait_for_pci_err_recovery(ioa_cfg);
05a6538a 9984 if (ioa_cfg->intr_flag == IPR_USE_MSI) {
9985 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
9986 pci_disable_msi(pdev);
9987 } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
9988 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
9989 pci_disable_msix(pdev);
9990 }
9991
9992 ioa_cfg->intr_flag = IPR_USE_LSI;
9993 ioa_cfg->nvectors = 1;
9994 }
95fecd90
WB
9995 else if (rc)
9996 goto out_msi_disable;
05a6538a 9997 else {
9998 if (ioa_cfg->intr_flag == IPR_USE_MSI)
9999 dev_info(&pdev->dev,
10000 "Request for %d MSIs succeeded with starting IRQ: %d\n",
10001 ioa_cfg->nvectors, pdev->irq);
10002 else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
10003 dev_info(&pdev->dev,
10004 "Request for %d MSIXs succeeded.",
10005 ioa_cfg->nvectors);
10006 }
10007 }
10008
10009 ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
10010 (unsigned int)num_online_cpus(),
10011 (unsigned int)IPR_MAX_HRRQ_NUM);
95fecd90 10012
1da177e4 10013 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
f170c684 10014 goto out_msi_disable;
1da177e4
LT
10015
10016 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
f170c684 10017 goto out_msi_disable;
1da177e4
LT
10018
10019 rc = ipr_alloc_mem(ioa_cfg);
10020 if (rc < 0) {
10021 dev_err(&pdev->dev,
10022 "Couldn't allocate enough memory for device driver!\n");
f170c684 10023 goto out_msi_disable;
1da177e4
LT
10024 }
10025
6270e593
BK
10026 /* Save away PCI config space for use following IOA reset */
10027 rc = pci_save_state(pdev);
10028
10029 if (rc != PCIBIOS_SUCCESSFUL) {
10030 dev_err(&pdev->dev, "Failed to save PCI config space\n");
10031 rc = -EIO;
10032 goto cleanup_nolog;
10033 }
10034
ce155cce 10035 /*
10036 * If HRRQ updated interrupt is not masked, or reset alert is set,
10037 * the card is in an unknown state and needs a hard reset
10038 */
214777ba
WB
10039 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
10040 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
10041 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
ce155cce 10042 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
10043 ioa_cfg->needs_hard_reset = 1;
5d7c20b7 10044 if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
473b1e8e
BK
10045 ioa_cfg->needs_hard_reset = 1;
10046 if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
10047 ioa_cfg->ioa_unit_checked = 1;
ce155cce 10048
56d6aa33 10049 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1da177e4 10050 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
56d6aa33 10051 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1da177e4 10052
05a6538a 10053 if (ioa_cfg->intr_flag == IPR_USE_MSI
10054 || ioa_cfg->intr_flag == IPR_USE_MSIX) {
10055 name_msi_vectors(ioa_cfg);
10056 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_isr,
10057 0,
10058 ioa_cfg->vectors_info[0].desc,
10059 &ioa_cfg->hrrq[0]);
10060 if (!rc)
10061 rc = ipr_request_other_msi_irqs(ioa_cfg);
10062 } else {
10063 rc = request_irq(pdev->irq, ipr_isr,
10064 IRQF_SHARED,
10065 IPR_NAME, &ioa_cfg->hrrq[0]);
10066 }
1da177e4
LT
10067 if (rc) {
10068 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
10069 pdev->irq, rc);
10070 goto cleanup_nolog;
10071 }
10072
463fc696
BK
10073 if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
10074 (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
10075 ioa_cfg->needs_warm_reset = 1;
10076 ioa_cfg->reset = ipr_reset_slot_reset;
2796ca5e
BK
10077
10078 ioa_cfg->reset_work_q = alloc_ordered_workqueue("ipr_reset_%d",
10079 WQ_MEM_RECLAIM, host->host_no);
10080
10081 if (!ioa_cfg->reset_work_q) {
10082 dev_err(&pdev->dev, "Couldn't register reset workqueue\n");
10083 goto out_free_irq;
10084 }
463fc696
BK
10085 } else
10086 ioa_cfg->reset = ipr_reset_start_bist;
10087
feccada9 10088 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
1da177e4 10089 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
feccada9 10090 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
1da177e4
LT
10091
10092 LEAVE;
10093out:
10094 return rc;
10095
2796ca5e
BK
10096out_free_irq:
10097 ipr_free_irqs(ioa_cfg);
1da177e4
LT
10098cleanup_nolog:
10099 ipr_free_mem(ioa_cfg);
95fecd90 10100out_msi_disable:
6270e593 10101 ipr_wait_for_pci_err_recovery(ioa_cfg);
05a6538a 10102 if (ioa_cfg->intr_flag == IPR_USE_MSI)
10103 pci_disable_msi(pdev);
10104 else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
10105 pci_disable_msix(pdev);
f170c684
JL
10106cleanup_nomem:
10107 iounmap(ipr_regs);
6270e593
BK
10108out_disable:
10109 pci_disable_device(pdev);
1da177e4
LT
10110out_release_regions:
10111 pci_release_regions(pdev);
10112out_scsi_host_put:
10113 scsi_host_put(host);
1da177e4
LT
10114 goto out;
10115}
10116
1da177e4
LT
10117/**
10118 * ipr_initiate_ioa_bringdown - Bring down an adapter
10119 * @ioa_cfg: ioa config struct
10120 * @shutdown_type: shutdown type
10121 *
10122 * Description: This function will initiate bringing down the adapter.
10123 * This consists of issuing an IOA shutdown to the adapter
10124 * to flush the cache, and running BIST.
10125 * If the caller needs to wait on the completion of the reset,
10126 * the caller must sleep on the reset_wait_q.
10127 *
10128 * Return value:
10129 * none
10130 **/
10131static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
10132 enum ipr_shutdown_type shutdown_type)
10133{
10134 ENTER;
10135 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
10136 ioa_cfg->sdt_state = ABORT_DUMP;
10137 ioa_cfg->reset_retries = 0;
10138 ioa_cfg->in_ioa_bringdown = 1;
10139 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
10140 LEAVE;
10141}
10142
10143/**
10144 * __ipr_remove - Remove a single adapter
10145 * @pdev: pci device struct
10146 *
10147 * Adapter hot plug remove entry point.
10148 *
10149 * Return value:
10150 * none
10151 **/
10152static void __ipr_remove(struct pci_dev *pdev)
10153{
10154 unsigned long host_lock_flags = 0;
10155 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
bfae7820 10156 int i;
feccada9 10157 unsigned long driver_lock_flags;
1da177e4
LT
10158 ENTER;
10159
10160 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
203fa3fe 10161 while (ioa_cfg->in_reset_reload) {
970ea294
BK
10162 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10163 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10164 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10165 }
10166
bfae7820
BK
10167 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
10168 spin_lock(&ioa_cfg->hrrq[i]._lock);
10169 ioa_cfg->hrrq[i].removing_ioa = 1;
10170 spin_unlock(&ioa_cfg->hrrq[i]._lock);
10171 }
10172 wmb();
1da177e4
LT
10173 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
10174
10175 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10176 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
43829731 10177 flush_work(&ioa_cfg->work_q);
2796ca5e
BK
10178 if (ioa_cfg->reset_work_q)
10179 flush_workqueue(ioa_cfg->reset_work_q);
9077a944 10180 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
1da177e4
LT
10181 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10182
feccada9 10183 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
1da177e4 10184 list_del(&ioa_cfg->queue);
feccada9 10185 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
1da177e4
LT
10186
10187 if (ioa_cfg->sdt_state == ABORT_DUMP)
10188 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
10189 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10190
10191 ipr_free_all_resources(ioa_cfg);
10192
10193 LEAVE;
10194}
10195
10196/**
10197 * ipr_remove - IOA hot plug remove entry point
10198 * @pdev: pci device struct
10199 *
10200 * Adapter hot plug remove entry point.
10201 *
10202 * Return value:
10203 * none
10204 **/
6f039790 10205static void ipr_remove(struct pci_dev *pdev)
1da177e4
LT
10206{
10207 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10208
10209 ENTER;
10210
ee959b00 10211 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4 10212 &ipr_trace_attr);
ee959b00 10213 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
10214 &ipr_dump_attr);
10215 scsi_remove_host(ioa_cfg->host);
10216
10217 __ipr_remove(pdev);
10218
10219 LEAVE;
10220}
10221
10222/**
10223 * ipr_probe - Adapter hot plug add entry point
10224 *
10225 * Return value:
10226 * 0 on success / non-zero on failure
10227 **/
6f039790 10228static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
1da177e4
LT
10229{
10230 struct ipr_ioa_cfg *ioa_cfg;
b53d124a 10231 int rc, i;
1da177e4
LT
10232
10233 rc = ipr_probe_ioa(pdev, dev_id);
10234
10235 if (rc)
10236 return rc;
10237
10238 ioa_cfg = pci_get_drvdata(pdev);
10239 rc = ipr_probe_ioa_part2(ioa_cfg);
10240
10241 if (rc) {
10242 __ipr_remove(pdev);
10243 return rc;
10244 }
10245
10246 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
10247
10248 if (rc) {
10249 __ipr_remove(pdev);
10250 return rc;
10251 }
10252
ee959b00 10253 rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
10254 &ipr_trace_attr);
10255
10256 if (rc) {
10257 scsi_remove_host(ioa_cfg->host);
10258 __ipr_remove(pdev);
10259 return rc;
10260 }
10261
ee959b00 10262 rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
10263 &ipr_dump_attr);
10264
10265 if (rc) {
ee959b00 10266 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
10267 &ipr_trace_attr);
10268 scsi_remove_host(ioa_cfg->host);
10269 __ipr_remove(pdev);
10270 return rc;
10271 }
10272
10273 scsi_scan_host(ioa_cfg->host);
b53d124a 10274 ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
10275
89f8b33c 10276 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
b53d124a 10277 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
10278 blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
10279 ioa_cfg->iopoll_weight, ipr_iopoll);
10280 blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
10281 }
10282 }
10283
1da177e4
LT
10284 schedule_work(&ioa_cfg->work_q);
10285 return 0;
10286}
10287
10288/**
10289 * ipr_shutdown - Shutdown handler.
d18c3db5 10290 * @pdev: pci device struct
1da177e4
LT
10291 *
10292 * This function is invoked upon system shutdown/reboot. It will issue
10293 * an adapter shutdown to the adapter to flush the write cache.
10294 *
10295 * Return value:
10296 * none
10297 **/
d18c3db5 10298static void ipr_shutdown(struct pci_dev *pdev)
1da177e4 10299{
d18c3db5 10300 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
1da177e4 10301 unsigned long lock_flags = 0;
4fdd7c7a 10302 enum ipr_shutdown_type shutdown_type = IPR_SHUTDOWN_NORMAL;
b53d124a 10303 int i;
1da177e4
LT
10304
10305 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
89f8b33c 10306 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
b53d124a 10307 ioa_cfg->iopoll_weight = 0;
10308 for (i = 1; i < ioa_cfg->hrrq_num; i++)
10309 blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
10310 }
10311
203fa3fe 10312 while (ioa_cfg->in_reset_reload) {
970ea294
BK
10313 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10314 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10315 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10316 }
10317
4fdd7c7a
BK
10318 if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64)
10319 shutdown_type = IPR_SHUTDOWN_QUIESCE;
10320
10321 ipr_initiate_ioa_bringdown(ioa_cfg, shutdown_type);
1da177e4
LT
10322 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10323 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4fdd7c7a 10324 if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) {
2796ca5e 10325 ipr_free_irqs(ioa_cfg);
4fdd7c7a
BK
10326 pci_disable_device(ioa_cfg->pdev);
10327 }
1da177e4
LT
10328}
10329
6f039790 10330static struct pci_device_id ipr_pci_table[] = {
1da177e4 10331 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 10332 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
1da177e4 10333 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 10334 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
1da177e4 10335 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 10336 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
1da177e4 10337 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 10338 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
1da177e4 10339 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6d84c944 10340 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
1da177e4 10341 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6d84c944 10342 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
1da177e4 10343 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6d84c944 10344 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
86f51436 10345 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
5469cb5b
BK
10346 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
10347 IPR_USE_LONG_TRANSOP_TIMEOUT },
86f51436 10348 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
6d84c944 10349 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
86f51436 10350 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
22d2e402
BK
10351 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10352 IPR_USE_LONG_TRANSOP_TIMEOUT },
60e7486b 10353 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
5469cb5b
BK
10354 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10355 IPR_USE_LONG_TRANSOP_TIMEOUT },
86f51436 10356 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
6d84c944 10357 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
86f51436 10358 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
22d2e402
BK
10359 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10360 IPR_USE_LONG_TRANSOP_TIMEOUT},
60e7486b 10361 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
5469cb5b
BK
10362 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10363 IPR_USE_LONG_TRANSOP_TIMEOUT },
185eb31c 10364 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
22d2e402
BK
10365 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
10366 IPR_USE_LONG_TRANSOP_TIMEOUT },
185eb31c
BK
10367 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10368 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
b0f56d3d
WB
10369 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10370 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
60e7486b 10371 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
5469cb5b 10372 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
463fc696 10373 IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
1da177e4 10374 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
6d84c944 10375 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
1da177e4 10376 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
6d84c944 10377 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
86f51436 10378 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
5469cb5b
BK
10379 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
10380 IPR_USE_LONG_TRANSOP_TIMEOUT },
60e7486b 10381 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
5469cb5b
BK
10382 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
10383 IPR_USE_LONG_TRANSOP_TIMEOUT },
d7b4627f
WB
10384 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10385 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
10386 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10387 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
10388 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10389 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
b8d5d568 10390 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10391 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
5a918353
WB
10392 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10393 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
32622bde
WB
10394 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10395 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
cd9b3d04 10396 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
d7b4627f 10397 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
cd9b3d04 10398 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
d7b4627f 10399 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
cd9b3d04 10400 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
d7b4627f 10401 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
cd9b3d04
WB
10402 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10403 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
10404 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
d7b4627f 10405 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
b8d5d568 10406 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10407 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
10408 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10409 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
10410 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10411 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
10412 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10413 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
43c5fdaf 10414 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10415 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 },
10416 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
f94d9964
WX
10417 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57DA, 0, 0, 0 },
10418 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
43c5fdaf 10419 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 },
10420 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10421 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 },
10422 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10423 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57ED, 0, 0, 0 },
10424 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10425 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EE, 0, 0, 0 },
10426 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10427 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 },
10428 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10429 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 },
5eeac3e9
WX
10430 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10431 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCA, 0, 0, 0 },
10432 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10433 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 },
10434 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10435 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 },
1da177e4
LT
10436 { }
10437};
10438MODULE_DEVICE_TABLE(pci, ipr_pci_table);
10439
a55b2d21 10440static const struct pci_error_handlers ipr_err_handler = {
f8a88b19 10441 .error_detected = ipr_pci_error_detected,
6270e593 10442 .mmio_enabled = ipr_pci_mmio_enabled,
f8a88b19
LV
10443 .slot_reset = ipr_pci_slot_reset,
10444};
10445
1da177e4
LT
10446static struct pci_driver ipr_driver = {
10447 .name = IPR_NAME,
10448 .id_table = ipr_pci_table,
10449 .probe = ipr_probe,
6f039790 10450 .remove = ipr_remove,
d18c3db5 10451 .shutdown = ipr_shutdown,
f8a88b19 10452 .err_handler = &ipr_err_handler,
1da177e4
LT
10453};
10454
f72919ec
WB
10455/**
10456 * ipr_halt_done - Shutdown prepare completion
10457 *
10458 * Return value:
10459 * none
10460 **/
10461static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
10462{
05a6538a 10463 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
f72919ec
WB
10464}
10465
10466/**
10467 * ipr_halt - Issue shutdown prepare to all adapters
10468 *
10469 * Return value:
10470 * NOTIFY_OK on success / NOTIFY_DONE on failure
10471 **/
10472static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
10473{
10474 struct ipr_cmnd *ipr_cmd;
10475 struct ipr_ioa_cfg *ioa_cfg;
feccada9 10476 unsigned long flags = 0, driver_lock_flags;
f72919ec
WB
10477
10478 if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
10479 return NOTIFY_DONE;
10480
feccada9 10481 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
f72919ec
WB
10482
10483 list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
10484 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
4fdd7c7a
BK
10485 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
10486 (ipr_fast_reboot && event == SYS_RESTART && ioa_cfg->sis64)) {
f72919ec
WB
10487 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10488 continue;
10489 }
10490
10491 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
10492 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
10493 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
10494 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
10495 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
10496
10497 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
10498 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10499 }
feccada9 10500 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
f72919ec
WB
10501
10502 return NOTIFY_OK;
10503}
10504
10505static struct notifier_block ipr_notifier = {
10506 ipr_halt, NULL, 0
10507};
10508
1da177e4
LT
10509/**
10510 * ipr_init - Module entry point
10511 *
10512 * Return value:
10513 * 0 on success / negative value on failure
10514 **/
10515static int __init ipr_init(void)
10516{
10517 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
10518 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
10519
f72919ec 10520 register_reboot_notifier(&ipr_notifier);
dcbccbde 10521 return pci_register_driver(&ipr_driver);
1da177e4
LT
10522}
10523
10524/**
10525 * ipr_exit - Module unload
10526 *
10527 * Module unload entry point.
10528 *
10529 * Return value:
10530 * none
10531 **/
10532static void __exit ipr_exit(void)
10533{
f72919ec 10534 unregister_reboot_notifier(&ipr_notifier);
1da177e4
LT
10535 pci_unregister_driver(&ipr_driver);
10536}
10537
10538module_init(ipr_init);
10539module_exit(ipr_exit);
This page took 1.768594 seconds and 5 git commands to generate.