Merge git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-2.6-nmw
[deliverable/linux.git] / drivers / scsi / dpt_i2o.c
1 /***************************************************************************
2 dpti.c - description
3 -------------------
4 begin : Thu Sep 7 2000
5 copyright : (C) 2000 by Adaptec
6
7 July 30, 2001 First version being submitted
8 for inclusion in the kernel. V2.4
9
10 See Documentation/scsi/dpti.txt for history, notes, license info
11 and credits
12 ***************************************************************************/
13
14 /***************************************************************************
15 * *
16 * This program is free software; you can redistribute it and/or modify *
17 * it under the terms of the GNU General Public License as published by *
18 * the Free Software Foundation; either version 2 of the License, or *
19 * (at your option) any later version. *
20 * *
21 ***************************************************************************/
22 /***************************************************************************
23 * Sat Dec 20 2003 Go Taniguchi <go@turbolinux.co.jp>
24 - Support 2.6 kernel and DMA-mapping
25 - ioctl fix for raid tools
26 - use schedule_timeout in long long loop
27 **************************************************************************/
28
29 /*#define DEBUG 1 */
30 /*#define UARTDELAY 1 */
31
32 /* On the real kernel ADDR32 should always be zero for 2.4. GFP_HIGH allocates
33 high pages. Keep the macro around because of the broken unmerged ia64 tree */
34
35 #define ADDR32 (0)
36
37 #include <linux/module.h>
38
39 MODULE_AUTHOR("Deanna Bonds, with _lots_ of help from Mark Salyzyn");
40 MODULE_DESCRIPTION("Adaptec I2O RAID Driver");
41
42 ////////////////////////////////////////////////////////////////
43
44 #include <linux/ioctl.h> /* For SCSI-Passthrough */
45 #include <asm/uaccess.h>
46
47 #include <linux/stat.h>
48 #include <linux/slab.h> /* for kmalloc() */
49 #include <linux/pci.h> /* for PCI support */
50 #include <linux/proc_fs.h>
51 #include <linux/blkdev.h>
52 #include <linux/delay.h> /* for udelay */
53 #include <linux/interrupt.h>
54 #include <linux/kernel.h> /* for printk */
55 #include <linux/sched.h>
56 #include <linux/reboot.h>
57 #include <linux/spinlock.h>
58 #include <linux/smp_lock.h>
59 #include <linux/dma-mapping.h>
60
61 #include <linux/timer.h>
62 #include <linux/string.h>
63 #include <linux/ioport.h>
64 #include <linux/mutex.h>
65
66 #include <asm/processor.h> /* for boot_cpu_data */
67 #include <asm/pgtable.h>
68 #include <asm/io.h> /* for virt_to_bus, etc. */
69
70 #include <scsi/scsi.h>
71 #include <scsi/scsi_cmnd.h>
72 #include <scsi/scsi_device.h>
73 #include <scsi/scsi_host.h>
74 #include <scsi/scsi_tcq.h>
75
76 #include "dpt/dptsig.h"
77 #include "dpti.h"
78
79 /*============================================================================
80 * Create a binary signature - this is read by dptsig
81 * Needed for our management apps
82 *============================================================================
83 */
84 static dpt_sig_S DPTI_sig = {
85 {'d', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION,
86 #ifdef __i386__
87 PROC_INTEL, PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM,
88 #elif defined(__ia64__)
89 PROC_INTEL, PROC_IA64,
90 #elif defined(__sparc__)
91 PROC_ULTRASPARC, PROC_ULTRASPARC,
92 #elif defined(__alpha__)
93 PROC_ALPHA, PROC_ALPHA,
94 #else
95 (-1),(-1),
96 #endif
97 FT_HBADRVR, 0, OEM_DPT, OS_LINUX, CAP_OVERLAP, DEV_ALL,
98 ADF_ALL_SC5, 0, 0, DPT_VERSION, DPT_REVISION, DPT_SUBREVISION,
99 DPT_MONTH, DPT_DAY, DPT_YEAR, "Adaptec Linux I2O RAID Driver"
100 };
101
102
103
104
105 /*============================================================================
106 * Globals
107 *============================================================================
108 */
109
110 static DEFINE_MUTEX(adpt_configuration_lock);
111
112 static struct i2o_sys_tbl *sys_tbl = NULL;
113 static int sys_tbl_ind = 0;
114 static int sys_tbl_len = 0;
115
116 static adpt_hba* hba_chain = NULL;
117 static int hba_count = 0;
118
119 static const struct file_operations adpt_fops = {
120 .ioctl = adpt_ioctl,
121 .open = adpt_open,
122 .release = adpt_close
123 };
124
125 #ifdef REBOOT_NOTIFIER
126 static struct notifier_block adpt_reboot_notifier =
127 {
128 adpt_reboot_event,
129 NULL,
130 0
131 };
132 #endif
133
134 /* Structures and definitions for synchronous message posting.
135 * See adpt_i2o_post_wait() for description
136 * */
137 struct adpt_i2o_post_wait_data
138 {
139 int status;
140 u32 id;
141 adpt_wait_queue_head_t *wq;
142 struct adpt_i2o_post_wait_data *next;
143 };
144
145 static struct adpt_i2o_post_wait_data *adpt_post_wait_queue = NULL;
146 static u32 adpt_post_wait_id = 0;
147 static DEFINE_SPINLOCK(adpt_post_wait_lock);
148
149
150 /*============================================================================
151 * Functions
152 *============================================================================
153 */
154
155 static u8 adpt_read_blink_led(adpt_hba* host)
156 {
157 if(host->FwDebugBLEDflag_P != 0) {
158 if( readb(host->FwDebugBLEDflag_P) == 0xbc ){
159 return readb(host->FwDebugBLEDvalue_P);
160 }
161 }
162 return 0;
163 }
164
165 /*============================================================================
166 * Scsi host template interface functions
167 *============================================================================
168 */
169
170 static struct pci_device_id dptids[] = {
171 { PCI_DPT_VENDOR_ID, PCI_DPT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
172 { PCI_DPT_VENDOR_ID, PCI_DPT_RAPTOR_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
173 { 0, }
174 };
175 MODULE_DEVICE_TABLE(pci,dptids);
176
177 static int adpt_detect(struct scsi_host_template* sht)
178 {
179 struct pci_dev *pDev = NULL;
180 adpt_hba* pHba;
181
182 adpt_init();
183
184 PINFO("Detecting Adaptec I2O RAID controllers...\n");
185
186 /* search for all Adatpec I2O RAID cards */
187 while ((pDev = pci_get_device( PCI_DPT_VENDOR_ID, PCI_ANY_ID, pDev))) {
188 if(pDev->device == PCI_DPT_DEVICE_ID ||
189 pDev->device == PCI_DPT_RAPTOR_DEVICE_ID){
190 if(adpt_install_hba(sht, pDev) ){
191 PERROR("Could not Init an I2O RAID device\n");
192 PERROR("Will not try to detect others.\n");
193 return hba_count-1;
194 }
195 pci_dev_get(pDev);
196 }
197 }
198
199 /* In INIT state, Activate IOPs */
200 for (pHba = hba_chain; pHba; pHba = pHba->next) {
201 // Activate does get status , init outbound, and get hrt
202 if (adpt_i2o_activate_hba(pHba) < 0) {
203 adpt_i2o_delete_hba(pHba);
204 }
205 }
206
207
208 /* Active IOPs in HOLD state */
209
210 rebuild_sys_tab:
211 if (hba_chain == NULL)
212 return 0;
213
214 /*
215 * If build_sys_table fails, we kill everything and bail
216 * as we can't init the IOPs w/o a system table
217 */
218 if (adpt_i2o_build_sys_table() < 0) {
219 adpt_i2o_sys_shutdown();
220 return 0;
221 }
222
223 PDEBUG("HBA's in HOLD state\n");
224
225 /* If IOP don't get online, we need to rebuild the System table */
226 for (pHba = hba_chain; pHba; pHba = pHba->next) {
227 if (adpt_i2o_online_hba(pHba) < 0) {
228 adpt_i2o_delete_hba(pHba);
229 goto rebuild_sys_tab;
230 }
231 }
232
233 /* Active IOPs now in OPERATIONAL state */
234 PDEBUG("HBA's in OPERATIONAL state\n");
235
236 printk("dpti: If you have a lot of devices this could take a few minutes.\n");
237 for (pHba = hba_chain; pHba; pHba = pHba->next) {
238 printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name);
239 if (adpt_i2o_lct_get(pHba) < 0){
240 adpt_i2o_delete_hba(pHba);
241 continue;
242 }
243
244 if (adpt_i2o_parse_lct(pHba) < 0){
245 adpt_i2o_delete_hba(pHba);
246 continue;
247 }
248 adpt_inquiry(pHba);
249 }
250
251 for (pHba = hba_chain; pHba; pHba = pHba->next) {
252 if( adpt_scsi_register(pHba,sht) < 0){
253 adpt_i2o_delete_hba(pHba);
254 continue;
255 }
256 pHba->initialized = TRUE;
257 pHba->state &= ~DPTI_STATE_RESET;
258 }
259
260 // Register our control device node
261 // nodes will need to be created in /dev to access this
262 // the nodes can not be created from within the driver
263 if (hba_count && register_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER, &adpt_fops)) {
264 adpt_i2o_sys_shutdown();
265 return 0;
266 }
267 return hba_count;
268 }
269
270
271 /*
272 * scsi_unregister will be called AFTER we return.
273 */
274 static int adpt_release(struct Scsi_Host *host)
275 {
276 adpt_hba* pHba = (adpt_hba*) host->hostdata[0];
277 // adpt_i2o_quiesce_hba(pHba);
278 adpt_i2o_delete_hba(pHba);
279 scsi_unregister(host);
280 return 0;
281 }
282
283
284 static void adpt_inquiry(adpt_hba* pHba)
285 {
286 u32 msg[14];
287 u32 *mptr;
288 u32 *lenptr;
289 int direction;
290 int scsidir;
291 u32 len;
292 u32 reqlen;
293 u8* buf;
294 u8 scb[16];
295 s32 rcode;
296
297 memset(msg, 0, sizeof(msg));
298 buf = kmalloc(80,GFP_KERNEL|ADDR32);
299 if(!buf){
300 printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name);
301 return;
302 }
303 memset((void*)buf, 0, 36);
304
305 len = 36;
306 direction = 0x00000000;
307 scsidir =0x40000000; // DATA IN (iop<--dev)
308
309 reqlen = 14; // SINGLE SGE
310 /* Stick the headers on */
311 msg[0] = reqlen<<16 | SGL_OFFSET_12;
312 msg[1] = (0xff<<24|HOST_TID<<12|ADAPTER_TID);
313 msg[2] = 0;
314 msg[3] = 0;
315 // Adaptec/DPT Private stuff
316 msg[4] = I2O_CMD_SCSI_EXEC|DPT_ORGANIZATION_ID<<16;
317 msg[5] = ADAPTER_TID | 1<<16 /* Interpret*/;
318 /* Direction, disconnect ok | sense data | simple queue , CDBLen */
319 // I2O_SCB_FLAG_ENABLE_DISCONNECT |
320 // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
321 // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
322 msg[6] = scsidir|0x20a00000| 6 /* cmd len*/;
323
324 mptr=msg+7;
325
326 memset(scb, 0, sizeof(scb));
327 // Write SCSI command into the message - always 16 byte block
328 scb[0] = INQUIRY;
329 scb[1] = 0;
330 scb[2] = 0;
331 scb[3] = 0;
332 scb[4] = 36;
333 scb[5] = 0;
334 // Don't care about the rest of scb
335
336 memcpy(mptr, scb, sizeof(scb));
337 mptr+=4;
338 lenptr=mptr++; /* Remember me - fill in when we know */
339
340 /* Now fill in the SGList and command */
341 *lenptr = len;
342 *mptr++ = 0xD0000000|direction|len;
343 *mptr++ = virt_to_bus(buf);
344
345 // Send it on it's way
346 rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120);
347 if (rcode != 0) {
348 sprintf(pHba->detail, "Adaptec I2O RAID");
349 printk(KERN_INFO "%s: Inquiry Error (%d)\n",pHba->name,rcode);
350 if (rcode != -ETIME && rcode != -EINTR)
351 kfree(buf);
352 } else {
353 memset(pHba->detail, 0, sizeof(pHba->detail));
354 memcpy(&(pHba->detail), "Vendor: Adaptec ", 16);
355 memcpy(&(pHba->detail[16]), " Model: ", 8);
356 memcpy(&(pHba->detail[24]), (u8*) &buf[16], 16);
357 memcpy(&(pHba->detail[40]), " FW: ", 4);
358 memcpy(&(pHba->detail[44]), (u8*) &buf[32], 4);
359 pHba->detail[48] = '\0'; /* precautionary */
360 kfree(buf);
361 }
362 adpt_i2o_status_get(pHba);
363 return ;
364 }
365
366
367 static int adpt_slave_configure(struct scsi_device * device)
368 {
369 struct Scsi_Host *host = device->host;
370 adpt_hba* pHba;
371
372 pHba = (adpt_hba *) host->hostdata[0];
373
374 if (host->can_queue && device->tagged_supported) {
375 scsi_adjust_queue_depth(device, MSG_SIMPLE_TAG,
376 host->can_queue - 1);
377 } else {
378 scsi_adjust_queue_depth(device, 0, 1);
379 }
380 return 0;
381 }
382
383 static int adpt_queue(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *))
384 {
385 adpt_hba* pHba = NULL;
386 struct adpt_device* pDev = NULL; /* dpt per device information */
387
388 cmd->scsi_done = done;
389 /*
390 * SCSI REQUEST_SENSE commands will be executed automatically by the
391 * Host Adapter for any errors, so they should not be executed
392 * explicitly unless the Sense Data is zero indicating that no error
393 * occurred.
394 */
395
396 if ((cmd->cmnd[0] == REQUEST_SENSE) && (cmd->sense_buffer[0] != 0)) {
397 cmd->result = (DID_OK << 16);
398 cmd->scsi_done(cmd);
399 return 0;
400 }
401
402 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
403 if (!pHba) {
404 return FAILED;
405 }
406
407 rmb();
408 /*
409 * TODO: I need to block here if I am processing ioctl cmds
410 * but if the outstanding cmds all finish before the ioctl,
411 * the scsi-core will not know to start sending cmds to me again.
412 * I need to a way to restart the scsi-cores queues or should I block
413 * calling scsi_done on the outstanding cmds instead
414 * for now we don't set the IOCTL state
415 */
416 if(((pHba->state) & DPTI_STATE_IOCTL) || ((pHba->state) & DPTI_STATE_RESET)) {
417 pHba->host->last_reset = jiffies;
418 pHba->host->resetting = 1;
419 return 1;
420 }
421
422 // TODO if the cmd->device if offline then I may need to issue a bus rescan
423 // followed by a get_lct to see if the device is there anymore
424 if((pDev = (struct adpt_device*) (cmd->device->hostdata)) == NULL) {
425 /*
426 * First command request for this device. Set up a pointer
427 * to the device structure. This should be a TEST_UNIT_READY
428 * command from scan_scsis_single.
429 */
430 if ((pDev = adpt_find_device(pHba, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun)) == NULL) {
431 // TODO: if any luns are at this bus, scsi id then fake a TEST_UNIT_READY and INQUIRY response
432 // with type 7F (for all luns less than the max for this bus,id) so the lun scan will continue.
433 cmd->result = (DID_NO_CONNECT << 16);
434 cmd->scsi_done(cmd);
435 return 0;
436 }
437 cmd->device->hostdata = pDev;
438 }
439 pDev->pScsi_dev = cmd->device;
440
441 /*
442 * If we are being called from when the device is being reset,
443 * delay processing of the command until later.
444 */
445 if (pDev->state & DPTI_DEV_RESET ) {
446 return FAILED;
447 }
448 return adpt_scsi_to_i2o(pHba, cmd, pDev);
449 }
450
451 static int adpt_bios_param(struct scsi_device *sdev, struct block_device *dev,
452 sector_t capacity, int geom[])
453 {
454 int heads=-1;
455 int sectors=-1;
456 int cylinders=-1;
457
458 // *** First lets set the default geometry ****
459
460 // If the capacity is less than ox2000
461 if (capacity < 0x2000 ) { // floppy
462 heads = 18;
463 sectors = 2;
464 }
465 // else if between 0x2000 and 0x20000
466 else if (capacity < 0x20000) {
467 heads = 64;
468 sectors = 32;
469 }
470 // else if between 0x20000 and 0x40000
471 else if (capacity < 0x40000) {
472 heads = 65;
473 sectors = 63;
474 }
475 // else if between 0x4000 and 0x80000
476 else if (capacity < 0x80000) {
477 heads = 128;
478 sectors = 63;
479 }
480 // else if greater than 0x80000
481 else {
482 heads = 255;
483 sectors = 63;
484 }
485 cylinders = sector_div(capacity, heads * sectors);
486
487 // Special case if CDROM
488 if(sdev->type == 5) { // CDROM
489 heads = 252;
490 sectors = 63;
491 cylinders = 1111;
492 }
493
494 geom[0] = heads;
495 geom[1] = sectors;
496 geom[2] = cylinders;
497
498 PDEBUG("adpt_bios_param: exit\n");
499 return 0;
500 }
501
502
503 static const char *adpt_info(struct Scsi_Host *host)
504 {
505 adpt_hba* pHba;
506
507 pHba = (adpt_hba *) host->hostdata[0];
508 return (char *) (pHba->detail);
509 }
510
511 static int adpt_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset,
512 int length, int inout)
513 {
514 struct adpt_device* d;
515 int id;
516 int chan;
517 int len = 0;
518 int begin = 0;
519 int pos = 0;
520 adpt_hba* pHba;
521 int unit;
522
523 *start = buffer;
524 if (inout == TRUE) {
525 /*
526 * The user has done a write and wants us to take the
527 * data in the buffer and do something with it.
528 * proc_scsiwrite calls us with inout = 1
529 *
530 * Read data from buffer (writing to us) - NOT SUPPORTED
531 */
532 return -EINVAL;
533 }
534
535 /*
536 * inout = 0 means the user has done a read and wants information
537 * returned, so we write information about the cards into the buffer
538 * proc_scsiread() calls us with inout = 0
539 */
540
541 // Find HBA (host bus adapter) we are looking for
542 mutex_lock(&adpt_configuration_lock);
543 for (pHba = hba_chain; pHba; pHba = pHba->next) {
544 if (pHba->host == host) {
545 break; /* found adapter */
546 }
547 }
548 mutex_unlock(&adpt_configuration_lock);
549 if (pHba == NULL) {
550 return 0;
551 }
552 host = pHba->host;
553
554 len = sprintf(buffer , "Adaptec I2O RAID Driver Version: %s\n\n", DPT_I2O_VERSION);
555 len += sprintf(buffer+len, "%s\n", pHba->detail);
556 len += sprintf(buffer+len, "SCSI Host=scsi%d Control Node=/dev/%s irq=%d\n",
557 pHba->host->host_no, pHba->name, host->irq);
558 len += sprintf(buffer+len, "\tpost fifo size = %d\n\treply fifo size = %d\n\tsg table size = %d\n\n",
559 host->can_queue, (int) pHba->reply_fifo_size , host->sg_tablesize);
560
561 pos = begin + len;
562
563 /* CHECKPOINT */
564 if(pos > offset + length) {
565 goto stop_output;
566 }
567 if(pos <= offset) {
568 /*
569 * If we haven't even written to where we last left
570 * off (the last time we were called), reset the
571 * beginning pointer.
572 */
573 len = 0;
574 begin = pos;
575 }
576 len += sprintf(buffer+len, "Devices:\n");
577 for(chan = 0; chan < MAX_CHANNEL; chan++) {
578 for(id = 0; id < MAX_ID; id++) {
579 d = pHba->channel[chan].device[id];
580 while(d){
581 len += sprintf(buffer+len,"\t%-24.24s", d->pScsi_dev->vendor);
582 len += sprintf(buffer+len," Rev: %-8.8s\n", d->pScsi_dev->rev);
583 pos = begin + len;
584
585
586 /* CHECKPOINT */
587 if(pos > offset + length) {
588 goto stop_output;
589 }
590 if(pos <= offset) {
591 len = 0;
592 begin = pos;
593 }
594
595 unit = d->pI2o_dev->lct_data.tid;
596 len += sprintf(buffer+len, "\tTID=%d, (Channel=%d, Target=%d, Lun=%d) (%s)\n\n",
597 unit, (int)d->scsi_channel, (int)d->scsi_id, (int)d->scsi_lun,
598 scsi_device_online(d->pScsi_dev)? "online":"offline");
599 pos = begin + len;
600
601 /* CHECKPOINT */
602 if(pos > offset + length) {
603 goto stop_output;
604 }
605 if(pos <= offset) {
606 len = 0;
607 begin = pos;
608 }
609
610 d = d->next_lun;
611 }
612 }
613 }
614
615 /*
616 * begin is where we last checked our position with regards to offset
617 * begin is always less than offset. len is relative to begin. It
618 * is the number of bytes written past begin
619 *
620 */
621 stop_output:
622 /* stop the output and calculate the correct length */
623 *(buffer + len) = '\0';
624
625 *start = buffer + (offset - begin); /* Start of wanted data */
626 len -= (offset - begin);
627 if(len > length) {
628 len = length;
629 } else if(len < 0){
630 len = 0;
631 **start = '\0';
632 }
633 return len;
634 }
635
636
637 /*===========================================================================
638 * Error Handling routines
639 *===========================================================================
640 */
641
642 static int adpt_abort(struct scsi_cmnd * cmd)
643 {
644 adpt_hba* pHba = NULL; /* host bus adapter structure */
645 struct adpt_device* dptdevice; /* dpt per device information */
646 u32 msg[5];
647 int rcode;
648
649 if(cmd->serial_number == 0){
650 return FAILED;
651 }
652 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
653 printk(KERN_INFO"%s: Trying to Abort cmd=%ld\n",pHba->name, cmd->serial_number);
654 if ((dptdevice = (void*) (cmd->device->hostdata)) == NULL) {
655 printk(KERN_ERR "%s: Unable to abort: No device in cmnd\n",pHba->name);
656 return FAILED;
657 }
658
659 memset(msg, 0, sizeof(msg));
660 msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
661 msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid;
662 msg[2] = 0;
663 msg[3]= 0;
664 msg[4] = (u32)cmd;
665 if (pHba->host)
666 spin_lock_irq(pHba->host->host_lock);
667 rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER);
668 if (pHba->host)
669 spin_unlock_irq(pHba->host->host_lock);
670 if (rcode != 0) {
671 if(rcode == -EOPNOTSUPP ){
672 printk(KERN_INFO"%s: Abort cmd not supported\n",pHba->name);
673 return FAILED;
674 }
675 printk(KERN_INFO"%s: Abort cmd=%ld failed.\n",pHba->name, cmd->serial_number);
676 return FAILED;
677 }
678 printk(KERN_INFO"%s: Abort cmd=%ld complete.\n",pHba->name, cmd->serial_number);
679 return SUCCESS;
680 }
681
682
683 #define I2O_DEVICE_RESET 0x27
684 // This is the same for BLK and SCSI devices
685 // NOTE this is wrong in the i2o.h definitions
686 // This is not currently supported by our adapter but we issue it anyway
687 static int adpt_device_reset(struct scsi_cmnd* cmd)
688 {
689 adpt_hba* pHba;
690 u32 msg[4];
691 u32 rcode;
692 int old_state;
693 struct adpt_device* d = cmd->device->hostdata;
694
695 pHba = (void*) cmd->device->host->hostdata[0];
696 printk(KERN_INFO"%s: Trying to reset device\n",pHba->name);
697 if (!d) {
698 printk(KERN_INFO"%s: Reset Device: Device Not found\n",pHba->name);
699 return FAILED;
700 }
701 memset(msg, 0, sizeof(msg));
702 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
703 msg[1] = (I2O_DEVICE_RESET<<24|HOST_TID<<12|d->tid);
704 msg[2] = 0;
705 msg[3] = 0;
706
707 if (pHba->host)
708 spin_lock_irq(pHba->host->host_lock);
709 old_state = d->state;
710 d->state |= DPTI_DEV_RESET;
711 rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
712 d->state = old_state;
713 if (pHba->host)
714 spin_unlock_irq(pHba->host->host_lock);
715 if (rcode != 0) {
716 if(rcode == -EOPNOTSUPP ){
717 printk(KERN_INFO"%s: Device reset not supported\n",pHba->name);
718 return FAILED;
719 }
720 printk(KERN_INFO"%s: Device reset failed\n",pHba->name);
721 return FAILED;
722 } else {
723 printk(KERN_INFO"%s: Device reset successful\n",pHba->name);
724 return SUCCESS;
725 }
726 }
727
728
729 #define I2O_HBA_BUS_RESET 0x87
730 // This version of bus reset is called by the eh_error handler
731 static int adpt_bus_reset(struct scsi_cmnd* cmd)
732 {
733 adpt_hba* pHba;
734 u32 msg[4];
735 u32 rcode;
736
737 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
738 memset(msg, 0, sizeof(msg));
739 printk(KERN_WARNING"%s: Bus reset: SCSI Bus %d: tid: %d\n",pHba->name, cmd->device->channel,pHba->channel[cmd->device->channel].tid );
740 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
741 msg[1] = (I2O_HBA_BUS_RESET<<24|HOST_TID<<12|pHba->channel[cmd->device->channel].tid);
742 msg[2] = 0;
743 msg[3] = 0;
744 if (pHba->host)
745 spin_lock_irq(pHba->host->host_lock);
746 rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
747 if (pHba->host)
748 spin_unlock_irq(pHba->host->host_lock);
749 if (rcode != 0) {
750 printk(KERN_WARNING"%s: Bus reset failed.\n",pHba->name);
751 return FAILED;
752 } else {
753 printk(KERN_WARNING"%s: Bus reset success.\n",pHba->name);
754 return SUCCESS;
755 }
756 }
757
758 // This version of reset is called by the eh_error_handler
759 static int __adpt_reset(struct scsi_cmnd* cmd)
760 {
761 adpt_hba* pHba;
762 int rcode;
763 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
764 printk(KERN_WARNING"%s: Hba Reset: scsi id %d: tid: %d\n",pHba->name,cmd->device->channel,pHba->channel[cmd->device->channel].tid );
765 rcode = adpt_hba_reset(pHba);
766 if(rcode == 0){
767 printk(KERN_WARNING"%s: HBA reset complete\n",pHba->name);
768 return SUCCESS;
769 } else {
770 printk(KERN_WARNING"%s: HBA reset failed (%x)\n",pHba->name, rcode);
771 return FAILED;
772 }
773 }
774
775 static int adpt_reset(struct scsi_cmnd* cmd)
776 {
777 int rc;
778
779 spin_lock_irq(cmd->device->host->host_lock);
780 rc = __adpt_reset(cmd);
781 spin_unlock_irq(cmd->device->host->host_lock);
782
783 return rc;
784 }
785
786 // This version of reset is called by the ioctls and indirectly from eh_error_handler via adpt_reset
787 static int adpt_hba_reset(adpt_hba* pHba)
788 {
789 int rcode;
790
791 pHba->state |= DPTI_STATE_RESET;
792
793 // Activate does get status , init outbound, and get hrt
794 if ((rcode=adpt_i2o_activate_hba(pHba)) < 0) {
795 printk(KERN_ERR "%s: Could not activate\n", pHba->name);
796 adpt_i2o_delete_hba(pHba);
797 return rcode;
798 }
799
800 if ((rcode=adpt_i2o_build_sys_table()) < 0) {
801 adpt_i2o_delete_hba(pHba);
802 return rcode;
803 }
804 PDEBUG("%s: in HOLD state\n",pHba->name);
805
806 if ((rcode=adpt_i2o_online_hba(pHba)) < 0) {
807 adpt_i2o_delete_hba(pHba);
808 return rcode;
809 }
810 PDEBUG("%s: in OPERATIONAL state\n",pHba->name);
811
812 if ((rcode=adpt_i2o_lct_get(pHba)) < 0){
813 adpt_i2o_delete_hba(pHba);
814 return rcode;
815 }
816
817 if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0){
818 adpt_i2o_delete_hba(pHba);
819 return rcode;
820 }
821 pHba->state &= ~DPTI_STATE_RESET;
822
823 adpt_fail_posted_scbs(pHba);
824 return 0; /* return success */
825 }
826
827 /*===========================================================================
828 *
829 *===========================================================================
830 */
831
832
833 static void adpt_i2o_sys_shutdown(void)
834 {
835 adpt_hba *pHba, *pNext;
836 struct adpt_i2o_post_wait_data *p1, *old;
837
838 printk(KERN_INFO"Shutting down Adaptec I2O controllers.\n");
839 printk(KERN_INFO" This could take a few minutes if there are many devices attached\n");
840 /* Delete all IOPs from the controller chain */
841 /* They should have already been released by the
842 * scsi-core
843 */
844 for (pHba = hba_chain; pHba; pHba = pNext) {
845 pNext = pHba->next;
846 adpt_i2o_delete_hba(pHba);
847 }
848
849 /* Remove any timedout entries from the wait queue. */
850 // spin_lock_irqsave(&adpt_post_wait_lock, flags);
851 /* Nothing should be outstanding at this point so just
852 * free them
853 */
854 for(p1 = adpt_post_wait_queue; p1;) {
855 old = p1;
856 p1 = p1->next;
857 kfree(old);
858 }
859 // spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
860 adpt_post_wait_queue = NULL;
861
862 printk(KERN_INFO "Adaptec I2O controllers down.\n");
863 }
864
865 /*
866 * reboot/shutdown notification.
867 *
868 * - Quiesce each IOP in the system
869 *
870 */
871
872 #ifdef REBOOT_NOTIFIER
873 static int adpt_reboot_event(struct notifier_block *n, ulong code, void *p)
874 {
875
876 if(code != SYS_RESTART && code != SYS_HALT && code != SYS_POWER_OFF)
877 return NOTIFY_DONE;
878
879 adpt_i2o_sys_shutdown();
880
881 return NOTIFY_DONE;
882 }
883 #endif
884
885
886 static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev)
887 {
888
889 adpt_hba* pHba = NULL;
890 adpt_hba* p = NULL;
891 ulong base_addr0_phys = 0;
892 ulong base_addr1_phys = 0;
893 u32 hba_map0_area_size = 0;
894 u32 hba_map1_area_size = 0;
895 void __iomem *base_addr_virt = NULL;
896 void __iomem *msg_addr_virt = NULL;
897
898 int raptorFlag = FALSE;
899
900 if(pci_enable_device(pDev)) {
901 return -EINVAL;
902 }
903
904 if (pci_request_regions(pDev, "dpt_i2o")) {
905 PERROR("dpti: adpt_config_hba: pci request region failed\n");
906 return -EINVAL;
907 }
908
909 pci_set_master(pDev);
910 if (pci_set_dma_mask(pDev, DMA_64BIT_MASK) &&
911 pci_set_dma_mask(pDev, DMA_32BIT_MASK))
912 return -EINVAL;
913
914 base_addr0_phys = pci_resource_start(pDev,0);
915 hba_map0_area_size = pci_resource_len(pDev,0);
916
917 // Check if standard PCI card or single BAR Raptor
918 if(pDev->device == PCI_DPT_DEVICE_ID){
919 if(pDev->subsystem_device >=0xc032 && pDev->subsystem_device <= 0xc03b){
920 // Raptor card with this device id needs 4M
921 hba_map0_area_size = 0x400000;
922 } else { // Not Raptor - it is a PCI card
923 if(hba_map0_area_size > 0x100000 ){
924 hba_map0_area_size = 0x100000;
925 }
926 }
927 } else {// Raptor split BAR config
928 // Use BAR1 in this configuration
929 base_addr1_phys = pci_resource_start(pDev,1);
930 hba_map1_area_size = pci_resource_len(pDev,1);
931 raptorFlag = TRUE;
932 }
933
934 base_addr_virt = ioremap(base_addr0_phys,hba_map0_area_size);
935 if (!base_addr_virt) {
936 pci_release_regions(pDev);
937 PERROR("dpti: adpt_config_hba: io remap failed\n");
938 return -EINVAL;
939 }
940
941 if(raptorFlag == TRUE) {
942 msg_addr_virt = ioremap(base_addr1_phys, hba_map1_area_size );
943 if (!msg_addr_virt) {
944 PERROR("dpti: adpt_config_hba: io remap failed on BAR1\n");
945 iounmap(base_addr_virt);
946 pci_release_regions(pDev);
947 return -EINVAL;
948 }
949 } else {
950 msg_addr_virt = base_addr_virt;
951 }
952
953 // Allocate and zero the data structure
954 pHba = kmalloc(sizeof(adpt_hba), GFP_KERNEL);
955 if( pHba == NULL) {
956 if(msg_addr_virt != base_addr_virt){
957 iounmap(msg_addr_virt);
958 }
959 iounmap(base_addr_virt);
960 pci_release_regions(pDev);
961 return -ENOMEM;
962 }
963 memset(pHba, 0, sizeof(adpt_hba));
964
965 mutex_lock(&adpt_configuration_lock);
966
967 if(hba_chain != NULL){
968 for(p = hba_chain; p->next; p = p->next);
969 p->next = pHba;
970 } else {
971 hba_chain = pHba;
972 }
973 pHba->next = NULL;
974 pHba->unit = hba_count;
975 sprintf(pHba->name, "dpti%d", hba_count);
976 hba_count++;
977
978 mutex_unlock(&adpt_configuration_lock);
979
980 pHba->pDev = pDev;
981 pHba->base_addr_phys = base_addr0_phys;
982
983 // Set up the Virtual Base Address of the I2O Device
984 pHba->base_addr_virt = base_addr_virt;
985 pHba->msg_addr_virt = msg_addr_virt;
986 pHba->irq_mask = base_addr_virt+0x30;
987 pHba->post_port = base_addr_virt+0x40;
988 pHba->reply_port = base_addr_virt+0x44;
989
990 pHba->hrt = NULL;
991 pHba->lct = NULL;
992 pHba->lct_size = 0;
993 pHba->status_block = NULL;
994 pHba->post_count = 0;
995 pHba->state = DPTI_STATE_RESET;
996 pHba->pDev = pDev;
997 pHba->devices = NULL;
998
999 // Initializing the spinlocks
1000 spin_lock_init(&pHba->state_lock);
1001 spin_lock_init(&adpt_post_wait_lock);
1002
1003 if(raptorFlag == 0){
1004 printk(KERN_INFO"Adaptec I2O RAID controller %d at %p size=%x irq=%d\n",
1005 hba_count-1, base_addr_virt, hba_map0_area_size, pDev->irq);
1006 } else {
1007 printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d\n",hba_count-1, pDev->irq);
1008 printk(KERN_INFO" BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size);
1009 printk(KERN_INFO" BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size);
1010 }
1011
1012 if (request_irq (pDev->irq, adpt_isr, IRQF_SHARED, pHba->name, pHba)) {
1013 printk(KERN_ERR"%s: Couldn't register IRQ %d\n", pHba->name, pDev->irq);
1014 adpt_i2o_delete_hba(pHba);
1015 return -EINVAL;
1016 }
1017
1018 return 0;
1019 }
1020
1021
1022 static void adpt_i2o_delete_hba(adpt_hba* pHba)
1023 {
1024 adpt_hba* p1;
1025 adpt_hba* p2;
1026 struct i2o_device* d;
1027 struct i2o_device* next;
1028 int i;
1029 int j;
1030 struct adpt_device* pDev;
1031 struct adpt_device* pNext;
1032
1033
1034 mutex_lock(&adpt_configuration_lock);
1035 // scsi_unregister calls our adpt_release which
1036 // does a quiese
1037 if(pHba->host){
1038 free_irq(pHba->host->irq, pHba);
1039 }
1040 p2 = NULL;
1041 for( p1 = hba_chain; p1; p2 = p1,p1=p1->next){
1042 if(p1 == pHba) {
1043 if(p2) {
1044 p2->next = p1->next;
1045 } else {
1046 hba_chain = p1->next;
1047 }
1048 break;
1049 }
1050 }
1051
1052 hba_count--;
1053 mutex_unlock(&adpt_configuration_lock);
1054
1055 iounmap(pHba->base_addr_virt);
1056 pci_release_regions(pHba->pDev);
1057 if(pHba->msg_addr_virt != pHba->base_addr_virt){
1058 iounmap(pHba->msg_addr_virt);
1059 }
1060 kfree(pHba->hrt);
1061 kfree(pHba->lct);
1062 kfree(pHba->status_block);
1063 kfree(pHba->reply_pool);
1064
1065 for(d = pHba->devices; d ; d = next){
1066 next = d->next;
1067 kfree(d);
1068 }
1069 for(i = 0 ; i < pHba->top_scsi_channel ; i++){
1070 for(j = 0; j < MAX_ID; j++){
1071 if(pHba->channel[i].device[j] != NULL){
1072 for(pDev = pHba->channel[i].device[j]; pDev; pDev = pNext){
1073 pNext = pDev->next_lun;
1074 kfree(pDev);
1075 }
1076 }
1077 }
1078 }
1079 pci_dev_put(pHba->pDev);
1080 kfree(pHba);
1081
1082 if(hba_count <= 0){
1083 unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER);
1084 }
1085 }
1086
1087
1088 static int adpt_init(void)
1089 {
1090 printk("Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n");
1091 #ifdef REBOOT_NOTIFIER
1092 register_reboot_notifier(&adpt_reboot_notifier);
1093 #endif
1094
1095 return 0;
1096 }
1097
1098
1099 static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u32 lun)
1100 {
1101 struct adpt_device* d;
1102
1103 if(chan < 0 || chan >= MAX_CHANNEL)
1104 return NULL;
1105
1106 if( pHba->channel[chan].device == NULL){
1107 printk(KERN_DEBUG"Adaptec I2O RAID: Trying to find device before they are allocated\n");
1108 return NULL;
1109 }
1110
1111 d = pHba->channel[chan].device[id];
1112 if(!d || d->tid == 0) {
1113 return NULL;
1114 }
1115
1116 /* If it is the only lun at that address then this should match*/
1117 if(d->scsi_lun == lun){
1118 return d;
1119 }
1120
1121 /* else we need to look through all the luns */
1122 for(d=d->next_lun ; d ; d = d->next_lun){
1123 if(d->scsi_lun == lun){
1124 return d;
1125 }
1126 }
1127 return NULL;
1128 }
1129
1130
1131 static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout)
1132 {
1133 // I used my own version of the WAIT_QUEUE_HEAD
1134 // to handle some version differences
1135 // When embedded in the kernel this could go back to the vanilla one
1136 ADPT_DECLARE_WAIT_QUEUE_HEAD(adpt_wq_i2o_post);
1137 int status = 0;
1138 ulong flags = 0;
1139 struct adpt_i2o_post_wait_data *p1, *p2;
1140 struct adpt_i2o_post_wait_data *wait_data =
1141 kmalloc(sizeof(struct adpt_i2o_post_wait_data),GFP_KERNEL);
1142 DECLARE_WAITQUEUE(wait, current);
1143
1144 if (!wait_data)
1145 return -ENOMEM;
1146
1147 /*
1148 * The spin locking is needed to keep anyone from playing
1149 * with the queue pointers and id while we do the same
1150 */
1151 spin_lock_irqsave(&adpt_post_wait_lock, flags);
1152 // TODO we need a MORE unique way of getting ids
1153 // to support async LCT get
1154 wait_data->next = adpt_post_wait_queue;
1155 adpt_post_wait_queue = wait_data;
1156 adpt_post_wait_id++;
1157 adpt_post_wait_id &= 0x7fff;
1158 wait_data->id = adpt_post_wait_id;
1159 spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1160
1161 wait_data->wq = &adpt_wq_i2o_post;
1162 wait_data->status = -ETIMEDOUT;
1163
1164 add_wait_queue(&adpt_wq_i2o_post, &wait);
1165
1166 msg[2] |= 0x80000000 | ((u32)wait_data->id);
1167 timeout *= HZ;
1168 if((status = adpt_i2o_post_this(pHba, msg, len)) == 0){
1169 set_current_state(TASK_INTERRUPTIBLE);
1170 if(pHba->host)
1171 spin_unlock_irq(pHba->host->host_lock);
1172 if (!timeout)
1173 schedule();
1174 else{
1175 timeout = schedule_timeout(timeout);
1176 if (timeout == 0) {
1177 // I/O issued, but cannot get result in
1178 // specified time. Freeing resorces is
1179 // dangerous.
1180 status = -ETIME;
1181 }
1182 }
1183 if(pHba->host)
1184 spin_lock_irq(pHba->host->host_lock);
1185 }
1186 remove_wait_queue(&adpt_wq_i2o_post, &wait);
1187
1188 if(status == -ETIMEDOUT){
1189 printk(KERN_INFO"dpti%d: POST WAIT TIMEOUT\n",pHba->unit);
1190 // We will have to free the wait_data memory during shutdown
1191 return status;
1192 }
1193
1194 /* Remove the entry from the queue. */
1195 p2 = NULL;
1196 spin_lock_irqsave(&adpt_post_wait_lock, flags);
1197 for(p1 = adpt_post_wait_queue; p1; p2 = p1, p1 = p1->next) {
1198 if(p1 == wait_data) {
1199 if(p1->status == I2O_DETAIL_STATUS_UNSUPPORTED_FUNCTION ) {
1200 status = -EOPNOTSUPP;
1201 }
1202 if(p2) {
1203 p2->next = p1->next;
1204 } else {
1205 adpt_post_wait_queue = p1->next;
1206 }
1207 break;
1208 }
1209 }
1210 spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1211
1212 kfree(wait_data);
1213
1214 return status;
1215 }
1216
1217
1218 static s32 adpt_i2o_post_this(adpt_hba* pHba, u32* data, int len)
1219 {
1220
1221 u32 m = EMPTY_QUEUE;
1222 u32 __iomem *msg;
1223 ulong timeout = jiffies + 30*HZ;
1224 do {
1225 rmb();
1226 m = readl(pHba->post_port);
1227 if (m != EMPTY_QUEUE) {
1228 break;
1229 }
1230 if(time_after(jiffies,timeout)){
1231 printk(KERN_WARNING"dpti%d: Timeout waiting for message frame!\n", pHba->unit);
1232 return -ETIMEDOUT;
1233 }
1234 schedule_timeout_uninterruptible(1);
1235 } while(m == EMPTY_QUEUE);
1236
1237 msg = pHba->msg_addr_virt + m;
1238 memcpy_toio(msg, data, len);
1239 wmb();
1240
1241 //post message
1242 writel(m, pHba->post_port);
1243 wmb();
1244
1245 return 0;
1246 }
1247
1248
1249 static void adpt_i2o_post_wait_complete(u32 context, int status)
1250 {
1251 struct adpt_i2o_post_wait_data *p1 = NULL;
1252 /*
1253 * We need to search through the adpt_post_wait
1254 * queue to see if the given message is still
1255 * outstanding. If not, it means that the IOP
1256 * took longer to respond to the message than we
1257 * had allowed and timer has already expired.
1258 * Not much we can do about that except log
1259 * it for debug purposes, increase timeout, and recompile
1260 *
1261 * Lock needed to keep anyone from moving queue pointers
1262 * around while we're looking through them.
1263 */
1264
1265 context &= 0x7fff;
1266
1267 spin_lock(&adpt_post_wait_lock);
1268 for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1269 if(p1->id == context) {
1270 p1->status = status;
1271 spin_unlock(&adpt_post_wait_lock);
1272 wake_up_interruptible(p1->wq);
1273 return;
1274 }
1275 }
1276 spin_unlock(&adpt_post_wait_lock);
1277 // If this happens we lose commands that probably really completed
1278 printk(KERN_DEBUG"dpti: Could Not find task %d in wait queue\n",context);
1279 printk(KERN_DEBUG" Tasks in wait queue:\n");
1280 for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1281 printk(KERN_DEBUG" %d\n",p1->id);
1282 }
1283 return;
1284 }
1285
1286 static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
1287 {
1288 u32 msg[8];
1289 u8* status;
1290 u32 m = EMPTY_QUEUE ;
1291 ulong timeout = jiffies + (TMOUT_IOPRESET*HZ);
1292
1293 if(pHba->initialized == FALSE) { // First time reset should be quick
1294 timeout = jiffies + (25*HZ);
1295 } else {
1296 adpt_i2o_quiesce_hba(pHba);
1297 }
1298
1299 do {
1300 rmb();
1301 m = readl(pHba->post_port);
1302 if (m != EMPTY_QUEUE) {
1303 break;
1304 }
1305 if(time_after(jiffies,timeout)){
1306 printk(KERN_WARNING"Timeout waiting for message!\n");
1307 return -ETIMEDOUT;
1308 }
1309 schedule_timeout_uninterruptible(1);
1310 } while (m == EMPTY_QUEUE);
1311
1312 status = kmalloc(4, GFP_KERNEL|ADDR32);
1313 if(status == NULL) {
1314 adpt_send_nop(pHba, m);
1315 printk(KERN_ERR"IOP reset failed - no free memory.\n");
1316 return -ENOMEM;
1317 }
1318 memset(status,0,4);
1319
1320 msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0;
1321 msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID;
1322 msg[2]=0;
1323 msg[3]=0;
1324 msg[4]=0;
1325 msg[5]=0;
1326 msg[6]=virt_to_bus(status);
1327 msg[7]=0;
1328
1329 memcpy_toio(pHba->msg_addr_virt+m, msg, sizeof(msg));
1330 wmb();
1331 writel(m, pHba->post_port);
1332 wmb();
1333
1334 while(*status == 0){
1335 if(time_after(jiffies,timeout)){
1336 printk(KERN_WARNING"%s: IOP Reset Timeout\n",pHba->name);
1337 kfree(status);
1338 return -ETIMEDOUT;
1339 }
1340 rmb();
1341 schedule_timeout_uninterruptible(1);
1342 }
1343
1344 if(*status == 0x01 /*I2O_EXEC_IOP_RESET_IN_PROGRESS*/) {
1345 PDEBUG("%s: Reset in progress...\n", pHba->name);
1346 // Here we wait for message frame to become available
1347 // indicated that reset has finished
1348 do {
1349 rmb();
1350 m = readl(pHba->post_port);
1351 if (m != EMPTY_QUEUE) {
1352 break;
1353 }
1354 if(time_after(jiffies,timeout)){
1355 printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name);
1356 return -ETIMEDOUT;
1357 }
1358 schedule_timeout_uninterruptible(1);
1359 } while (m == EMPTY_QUEUE);
1360 // Flush the offset
1361 adpt_send_nop(pHba, m);
1362 }
1363 adpt_i2o_status_get(pHba);
1364 if(*status == 0x02 ||
1365 pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
1366 printk(KERN_WARNING"%s: Reset reject, trying to clear\n",
1367 pHba->name);
1368 } else {
1369 PDEBUG("%s: Reset completed.\n", pHba->name);
1370 }
1371
1372 kfree(status);
1373 #ifdef UARTDELAY
1374 // This delay is to allow someone attached to the card through the debug UART to
1375 // set up the dump levels that they want before the rest of the initialization sequence
1376 adpt_delay(20000);
1377 #endif
1378 return 0;
1379 }
1380
1381
1382 static int adpt_i2o_parse_lct(adpt_hba* pHba)
1383 {
1384 int i;
1385 int max;
1386 int tid;
1387 struct i2o_device *d;
1388 i2o_lct *lct = pHba->lct;
1389 u8 bus_no = 0;
1390 s16 scsi_id;
1391 s16 scsi_lun;
1392 u32 buf[10]; // larger than 7, or 8 ...
1393 struct adpt_device* pDev;
1394
1395 if (lct == NULL) {
1396 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
1397 return -1;
1398 }
1399
1400 max = lct->table_size;
1401 max -= 3;
1402 max /= 9;
1403
1404 for(i=0;i<max;i++) {
1405 if( lct->lct_entry[i].user_tid != 0xfff){
1406 /*
1407 * If we have hidden devices, we need to inform the upper layers about
1408 * the possible maximum id reference to handle device access when
1409 * an array is disassembled. This code has no other purpose but to
1410 * allow us future access to devices that are currently hidden
1411 * behind arrays, hotspares or have not been configured (JBOD mode).
1412 */
1413 if( lct->lct_entry[i].class_id != I2O_CLASS_RANDOM_BLOCK_STORAGE &&
1414 lct->lct_entry[i].class_id != I2O_CLASS_SCSI_PERIPHERAL &&
1415 lct->lct_entry[i].class_id != I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1416 continue;
1417 }
1418 tid = lct->lct_entry[i].tid;
1419 // I2O_DPT_DEVICE_INFO_GROUP_NO;
1420 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
1421 continue;
1422 }
1423 bus_no = buf[0]>>16;
1424 scsi_id = buf[1];
1425 scsi_lun = (buf[2]>>8 )&0xff;
1426 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1427 printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
1428 continue;
1429 }
1430 if (scsi_id >= MAX_ID){
1431 printk(KERN_WARNING"%s: SCSI ID %d out of range \n", pHba->name, bus_no);
1432 continue;
1433 }
1434 if(bus_no > pHba->top_scsi_channel){
1435 pHba->top_scsi_channel = bus_no;
1436 }
1437 if(scsi_id > pHba->top_scsi_id){
1438 pHba->top_scsi_id = scsi_id;
1439 }
1440 if(scsi_lun > pHba->top_scsi_lun){
1441 pHba->top_scsi_lun = scsi_lun;
1442 }
1443 continue;
1444 }
1445 d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
1446 if(d==NULL)
1447 {
1448 printk(KERN_CRIT"%s: Out of memory for I2O device data.\n",pHba->name);
1449 return -ENOMEM;
1450 }
1451
1452 d->controller = pHba;
1453 d->next = NULL;
1454
1455 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
1456
1457 d->flags = 0;
1458 tid = d->lct_data.tid;
1459 adpt_i2o_report_hba_unit(pHba, d);
1460 adpt_i2o_install_device(pHba, d);
1461 }
1462 bus_no = 0;
1463 for(d = pHba->devices; d ; d = d->next) {
1464 if(d->lct_data.class_id == I2O_CLASS_BUS_ADAPTER_PORT ||
1465 d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PORT){
1466 tid = d->lct_data.tid;
1467 // TODO get the bus_no from hrt-but for now they are in order
1468 //bus_no =
1469 if(bus_no > pHba->top_scsi_channel){
1470 pHba->top_scsi_channel = bus_no;
1471 }
1472 pHba->channel[bus_no].type = d->lct_data.class_id;
1473 pHba->channel[bus_no].tid = tid;
1474 if(adpt_i2o_query_scalar(pHba, tid, 0x0200, -1, buf, 28)>=0)
1475 {
1476 pHba->channel[bus_no].scsi_id = buf[1];
1477 PDEBUG("Bus %d - SCSI ID %d.\n", bus_no, buf[1]);
1478 }
1479 // TODO remove - this is just until we get from hrt
1480 bus_no++;
1481 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1482 printk(KERN_WARNING"%s: Channel number %d out of range - LCT\n", pHba->name, bus_no);
1483 break;
1484 }
1485 }
1486 }
1487
1488 // Setup adpt_device table
1489 for(d = pHba->devices; d ; d = d->next) {
1490 if(d->lct_data.class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
1491 d->lct_data.class_id == I2O_CLASS_SCSI_PERIPHERAL ||
1492 d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1493
1494 tid = d->lct_data.tid;
1495 scsi_id = -1;
1496 // I2O_DPT_DEVICE_INFO_GROUP_NO;
1497 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)>=0) {
1498 bus_no = buf[0]>>16;
1499 scsi_id = buf[1];
1500 scsi_lun = (buf[2]>>8 )&0xff;
1501 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1502 continue;
1503 }
1504 if (scsi_id >= MAX_ID) {
1505 continue;
1506 }
1507 if( pHba->channel[bus_no].device[scsi_id] == NULL){
1508 pDev = kmalloc(sizeof(struct adpt_device),GFP_KERNEL);
1509 if(pDev == NULL) {
1510 return -ENOMEM;
1511 }
1512 pHba->channel[bus_no].device[scsi_id] = pDev;
1513 memset(pDev,0,sizeof(struct adpt_device));
1514 } else {
1515 for( pDev = pHba->channel[bus_no].device[scsi_id];
1516 pDev->next_lun; pDev = pDev->next_lun){
1517 }
1518 pDev->next_lun = kmalloc(sizeof(struct adpt_device),GFP_KERNEL);
1519 if(pDev->next_lun == NULL) {
1520 return -ENOMEM;
1521 }
1522 memset(pDev->next_lun,0,sizeof(struct adpt_device));
1523 pDev = pDev->next_lun;
1524 }
1525 pDev->tid = tid;
1526 pDev->scsi_channel = bus_no;
1527 pDev->scsi_id = scsi_id;
1528 pDev->scsi_lun = scsi_lun;
1529 pDev->pI2o_dev = d;
1530 d->owner = pDev;
1531 pDev->type = (buf[0])&0xff;
1532 pDev->flags = (buf[0]>>8)&0xff;
1533 if(scsi_id > pHba->top_scsi_id){
1534 pHba->top_scsi_id = scsi_id;
1535 }
1536 if(scsi_lun > pHba->top_scsi_lun){
1537 pHba->top_scsi_lun = scsi_lun;
1538 }
1539 }
1540 if(scsi_id == -1){
1541 printk(KERN_WARNING"Could not find SCSI ID for %s\n",
1542 d->lct_data.identity_tag);
1543 }
1544 }
1545 }
1546 return 0;
1547 }
1548
1549
1550 /*
1551 * Each I2O controller has a chain of devices on it - these match
1552 * the useful parts of the LCT of the board.
1553 */
1554
1555 static int adpt_i2o_install_device(adpt_hba* pHba, struct i2o_device *d)
1556 {
1557 mutex_lock(&adpt_configuration_lock);
1558 d->controller=pHba;
1559 d->owner=NULL;
1560 d->next=pHba->devices;
1561 d->prev=NULL;
1562 if (pHba->devices != NULL){
1563 pHba->devices->prev=d;
1564 }
1565 pHba->devices=d;
1566 *d->dev_name = 0;
1567
1568 mutex_unlock(&adpt_configuration_lock);
1569 return 0;
1570 }
1571
1572 static int adpt_open(struct inode *inode, struct file *file)
1573 {
1574 int minor;
1575 adpt_hba* pHba;
1576
1577 //TODO check for root access
1578 //
1579 minor = iminor(inode);
1580 if (minor >= hba_count) {
1581 return -ENXIO;
1582 }
1583 mutex_lock(&adpt_configuration_lock);
1584 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1585 if (pHba->unit == minor) {
1586 break; /* found adapter */
1587 }
1588 }
1589 if (pHba == NULL) {
1590 mutex_unlock(&adpt_configuration_lock);
1591 return -ENXIO;
1592 }
1593
1594 // if(pHba->in_use){
1595 // mutex_unlock(&adpt_configuration_lock);
1596 // return -EBUSY;
1597 // }
1598
1599 pHba->in_use = 1;
1600 mutex_unlock(&adpt_configuration_lock);
1601
1602 return 0;
1603 }
1604
1605 static int adpt_close(struct inode *inode, struct file *file)
1606 {
1607 int minor;
1608 adpt_hba* pHba;
1609
1610 minor = iminor(inode);
1611 if (minor >= hba_count) {
1612 return -ENXIO;
1613 }
1614 mutex_lock(&adpt_configuration_lock);
1615 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1616 if (pHba->unit == minor) {
1617 break; /* found adapter */
1618 }
1619 }
1620 mutex_unlock(&adpt_configuration_lock);
1621 if (pHba == NULL) {
1622 return -ENXIO;
1623 }
1624
1625 pHba->in_use = 0;
1626
1627 return 0;
1628 }
1629
1630
1631 static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1632 {
1633 u32 msg[MAX_MESSAGE_SIZE];
1634 u32* reply = NULL;
1635 u32 size = 0;
1636 u32 reply_size = 0;
1637 u32 __user *user_msg = arg;
1638 u32 __user * user_reply = NULL;
1639 void *sg_list[pHba->sg_tablesize];
1640 u32 sg_offset = 0;
1641 u32 sg_count = 0;
1642 int sg_index = 0;
1643 u32 i = 0;
1644 u32 rcode = 0;
1645 void *p = NULL;
1646 ulong flags = 0;
1647
1648 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1649 // get user msg size in u32s
1650 if(get_user(size, &user_msg[0])){
1651 return -EFAULT;
1652 }
1653 size = size>>16;
1654
1655 user_reply = &user_msg[size];
1656 if(size > MAX_MESSAGE_SIZE){
1657 return -EFAULT;
1658 }
1659 size *= 4; // Convert to bytes
1660
1661 /* Copy in the user's I2O command */
1662 if(copy_from_user(msg, user_msg, size)) {
1663 return -EFAULT;
1664 }
1665 get_user(reply_size, &user_reply[0]);
1666 reply_size = reply_size>>16;
1667 if(reply_size > REPLY_FRAME_SIZE){
1668 reply_size = REPLY_FRAME_SIZE;
1669 }
1670 reply_size *= 4;
1671 reply = kmalloc(REPLY_FRAME_SIZE*4, GFP_KERNEL);
1672 if(reply == NULL) {
1673 printk(KERN_WARNING"%s: Could not allocate reply buffer\n",pHba->name);
1674 return -ENOMEM;
1675 }
1676 memset(reply,0,REPLY_FRAME_SIZE*4);
1677 sg_offset = (msg[0]>>4)&0xf;
1678 msg[2] = 0x40000000; // IOCTL context
1679 msg[3] = (u32)reply;
1680 memset(sg_list,0, sizeof(sg_list[0])*pHba->sg_tablesize);
1681 if(sg_offset) {
1682 // TODO 64bit fix
1683 struct sg_simple_element *sg = (struct sg_simple_element*) (msg+sg_offset);
1684 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1685 if (sg_count > pHba->sg_tablesize){
1686 printk(KERN_DEBUG"%s:IOCTL SG List too large (%u)\n", pHba->name,sg_count);
1687 kfree (reply);
1688 return -EINVAL;
1689 }
1690
1691 for(i = 0; i < sg_count; i++) {
1692 int sg_size;
1693
1694 if (!(sg[i].flag_count & 0x10000000 /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT*/)) {
1695 printk(KERN_DEBUG"%s:Bad SG element %d - not simple (%x)\n",pHba->name,i, sg[i].flag_count);
1696 rcode = -EINVAL;
1697 goto cleanup;
1698 }
1699 sg_size = sg[i].flag_count & 0xffffff;
1700 /* Allocate memory for the transfer */
1701 p = kmalloc(sg_size, GFP_KERNEL|ADDR32);
1702 if(!p) {
1703 printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
1704 pHba->name,sg_size,i,sg_count);
1705 rcode = -ENOMEM;
1706 goto cleanup;
1707 }
1708 sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame.
1709 /* Copy in the user's SG buffer if necessary */
1710 if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) {
1711 // TODO 64bit fix
1712 if (copy_from_user(p,(void __user *)sg[i].addr_bus, sg_size)) {
1713 printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i);
1714 rcode = -EFAULT;
1715 goto cleanup;
1716 }
1717 }
1718 //TODO 64bit fix
1719 sg[i].addr_bus = (u32)virt_to_bus(p);
1720 }
1721 }
1722
1723 do {
1724 if(pHba->host)
1725 spin_lock_irqsave(pHba->host->host_lock, flags);
1726 // This state stops any new commands from enterring the
1727 // controller while processing the ioctl
1728 // pHba->state |= DPTI_STATE_IOCTL;
1729 // We can't set this now - The scsi subsystem sets host_blocked and
1730 // the queue empties and stops. We need a way to restart the queue
1731 rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER);
1732 if (rcode != 0)
1733 printk("adpt_i2o_passthru: post wait failed %d %p\n",
1734 rcode, reply);
1735 // pHba->state &= ~DPTI_STATE_IOCTL;
1736 if(pHba->host)
1737 spin_unlock_irqrestore(pHba->host->host_lock, flags);
1738 } while(rcode == -ETIMEDOUT);
1739
1740 if(rcode){
1741 goto cleanup;
1742 }
1743
1744 if(sg_offset) {
1745 /* Copy back the Scatter Gather buffers back to user space */
1746 u32 j;
1747 // TODO 64bit fix
1748 struct sg_simple_element* sg;
1749 int sg_size;
1750
1751 // re-acquire the original message to handle correctly the sg copy operation
1752 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1753 // get user msg size in u32s
1754 if(get_user(size, &user_msg[0])){
1755 rcode = -EFAULT;
1756 goto cleanup;
1757 }
1758 size = size>>16;
1759 size *= 4;
1760 /* Copy in the user's I2O command */
1761 if (copy_from_user (msg, user_msg, size)) {
1762 rcode = -EFAULT;
1763 goto cleanup;
1764 }
1765 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1766
1767 // TODO 64bit fix
1768 sg = (struct sg_simple_element*)(msg + sg_offset);
1769 for (j = 0; j < sg_count; j++) {
1770 /* Copy out the SG list to user's buffer if necessary */
1771 if(! (sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) {
1772 sg_size = sg[j].flag_count & 0xffffff;
1773 // TODO 64bit fix
1774 if (copy_to_user((void __user *)sg[j].addr_bus,sg_list[j], sg_size)) {
1775 printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus);
1776 rcode = -EFAULT;
1777 goto cleanup;
1778 }
1779 }
1780 }
1781 }
1782
1783 /* Copy back the reply to user space */
1784 if (reply_size) {
1785 // we wrote our own values for context - now restore the user supplied ones
1786 if(copy_from_user(reply+2, user_msg+2, sizeof(u32)*2)) {
1787 printk(KERN_WARNING"%s: Could not copy message context FROM user\n",pHba->name);
1788 rcode = -EFAULT;
1789 }
1790 if(copy_to_user(user_reply, reply, reply_size)) {
1791 printk(KERN_WARNING"%s: Could not copy reply TO user\n",pHba->name);
1792 rcode = -EFAULT;
1793 }
1794 }
1795
1796
1797 cleanup:
1798 if (rcode != -ETIME && rcode != -EINTR)
1799 kfree (reply);
1800 while(sg_index) {
1801 if(sg_list[--sg_index]) {
1802 if (rcode != -ETIME && rcode != -EINTR)
1803 kfree(sg_list[sg_index]);
1804 }
1805 }
1806 return rcode;
1807 }
1808
1809
1810 /*
1811 * This routine returns information about the system. This does not effect
1812 * any logic and if the info is wrong - it doesn't matter.
1813 */
1814
1815 /* Get all the info we can not get from kernel services */
1816 static int adpt_system_info(void __user *buffer)
1817 {
1818 sysInfo_S si;
1819
1820 memset(&si, 0, sizeof(si));
1821
1822 si.osType = OS_LINUX;
1823 si.osMajorVersion = 0;
1824 si.osMinorVersion = 0;
1825 si.osRevision = 0;
1826 si.busType = SI_PCI_BUS;
1827 si.processorFamily = DPTI_sig.dsProcessorFamily;
1828
1829 #if defined __i386__
1830 adpt_i386_info(&si);
1831 #elif defined (__ia64__)
1832 adpt_ia64_info(&si);
1833 #elif defined(__sparc__)
1834 adpt_sparc_info(&si);
1835 #elif defined (__alpha__)
1836 adpt_alpha_info(&si);
1837 #else
1838 si.processorType = 0xff ;
1839 #endif
1840 if(copy_to_user(buffer, &si, sizeof(si))){
1841 printk(KERN_WARNING"dpti: Could not copy buffer TO user\n");
1842 return -EFAULT;
1843 }
1844
1845 return 0;
1846 }
1847
1848 #if defined __ia64__
1849 static void adpt_ia64_info(sysInfo_S* si)
1850 {
1851 // This is all the info we need for now
1852 // We will add more info as our new
1853 // managmenent utility requires it
1854 si->processorType = PROC_IA64;
1855 }
1856 #endif
1857
1858
1859 #if defined __sparc__
1860 static void adpt_sparc_info(sysInfo_S* si)
1861 {
1862 // This is all the info we need for now
1863 // We will add more info as our new
1864 // managmenent utility requires it
1865 si->processorType = PROC_ULTRASPARC;
1866 }
1867 #endif
1868
1869 #if defined __alpha__
1870 static void adpt_alpha_info(sysInfo_S* si)
1871 {
1872 // This is all the info we need for now
1873 // We will add more info as our new
1874 // managmenent utility requires it
1875 si->processorType = PROC_ALPHA;
1876 }
1877 #endif
1878
1879 #if defined __i386__
1880
1881 static void adpt_i386_info(sysInfo_S* si)
1882 {
1883 // This is all the info we need for now
1884 // We will add more info as our new
1885 // managmenent utility requires it
1886 switch (boot_cpu_data.x86) {
1887 case CPU_386:
1888 si->processorType = PROC_386;
1889 break;
1890 case CPU_486:
1891 si->processorType = PROC_486;
1892 break;
1893 case CPU_586:
1894 si->processorType = PROC_PENTIUM;
1895 break;
1896 default: // Just in case
1897 si->processorType = PROC_PENTIUM;
1898 break;
1899 }
1900 }
1901
1902 #endif
1903
1904
1905 static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd,
1906 ulong arg)
1907 {
1908 int minor;
1909 int error = 0;
1910 adpt_hba* pHba;
1911 ulong flags = 0;
1912 void __user *argp = (void __user *)arg;
1913
1914 minor = iminor(inode);
1915 if (minor >= DPTI_MAX_HBA){
1916 return -ENXIO;
1917 }
1918 mutex_lock(&adpt_configuration_lock);
1919 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1920 if (pHba->unit == minor) {
1921 break; /* found adapter */
1922 }
1923 }
1924 mutex_unlock(&adpt_configuration_lock);
1925 if(pHba == NULL){
1926 return -ENXIO;
1927 }
1928
1929 while((volatile u32) pHba->state & DPTI_STATE_RESET )
1930 schedule_timeout_uninterruptible(2);
1931
1932 switch (cmd) {
1933 // TODO: handle 3 cases
1934 case DPT_SIGNATURE:
1935 if (copy_to_user(argp, &DPTI_sig, sizeof(DPTI_sig))) {
1936 return -EFAULT;
1937 }
1938 break;
1939 case I2OUSRCMD:
1940 return adpt_i2o_passthru(pHba, argp);
1941
1942 case DPT_CTRLINFO:{
1943 drvrHBAinfo_S HbaInfo;
1944
1945 #define FLG_OSD_PCI_VALID 0x0001
1946 #define FLG_OSD_DMA 0x0002
1947 #define FLG_OSD_I2O 0x0004
1948 memset(&HbaInfo, 0, sizeof(HbaInfo));
1949 HbaInfo.drvrHBAnum = pHba->unit;
1950 HbaInfo.baseAddr = (ulong) pHba->base_addr_phys;
1951 HbaInfo.blinkState = adpt_read_blink_led(pHba);
1952 HbaInfo.pciBusNum = pHba->pDev->bus->number;
1953 HbaInfo.pciDeviceNum=PCI_SLOT(pHba->pDev->devfn);
1954 HbaInfo.Interrupt = pHba->pDev->irq;
1955 HbaInfo.hbaFlags = FLG_OSD_PCI_VALID | FLG_OSD_DMA | FLG_OSD_I2O;
1956 if(copy_to_user(argp, &HbaInfo, sizeof(HbaInfo))){
1957 printk(KERN_WARNING"%s: Could not copy HbaInfo TO user\n",pHba->name);
1958 return -EFAULT;
1959 }
1960 break;
1961 }
1962 case DPT_SYSINFO:
1963 return adpt_system_info(argp);
1964 case DPT_BLINKLED:{
1965 u32 value;
1966 value = (u32)adpt_read_blink_led(pHba);
1967 if (copy_to_user(argp, &value, sizeof(value))) {
1968 return -EFAULT;
1969 }
1970 break;
1971 }
1972 case I2ORESETCMD:
1973 if(pHba->host)
1974 spin_lock_irqsave(pHba->host->host_lock, flags);
1975 adpt_hba_reset(pHba);
1976 if(pHba->host)
1977 spin_unlock_irqrestore(pHba->host->host_lock, flags);
1978 break;
1979 case I2ORESCANCMD:
1980 adpt_rescan(pHba);
1981 break;
1982 default:
1983 return -EINVAL;
1984 }
1985
1986 return error;
1987 }
1988
1989
1990 static irqreturn_t adpt_isr(int irq, void *dev_id)
1991 {
1992 struct scsi_cmnd* cmd;
1993 adpt_hba* pHba = dev_id;
1994 u32 m;
1995 void __iomem *reply;
1996 u32 status=0;
1997 u32 context;
1998 ulong flags = 0;
1999 int handled = 0;
2000
2001 if (pHba == NULL){
2002 printk(KERN_WARNING"adpt_isr: NULL dev_id\n");
2003 return IRQ_NONE;
2004 }
2005 if(pHba->host)
2006 spin_lock_irqsave(pHba->host->host_lock, flags);
2007
2008 while( readl(pHba->irq_mask) & I2O_INTERRUPT_PENDING_B) {
2009 m = readl(pHba->reply_port);
2010 if(m == EMPTY_QUEUE){
2011 // Try twice then give up
2012 rmb();
2013 m = readl(pHba->reply_port);
2014 if(m == EMPTY_QUEUE){
2015 // This really should not happen
2016 printk(KERN_ERR"dpti: Could not get reply frame\n");
2017 goto out;
2018 }
2019 }
2020 reply = bus_to_virt(m);
2021
2022 if (readl(reply) & MSG_FAIL) {
2023 u32 old_m = readl(reply+28);
2024 void __iomem *msg;
2025 u32 old_context;
2026 PDEBUG("%s: Failed message\n",pHba->name);
2027 if(old_m >= 0x100000){
2028 printk(KERN_ERR"%s: Bad preserved MFA (%x)- dropping frame\n",pHba->name,old_m);
2029 writel(m,pHba->reply_port);
2030 continue;
2031 }
2032 // Transaction context is 0 in failed reply frame
2033 msg = pHba->msg_addr_virt + old_m;
2034 old_context = readl(msg+12);
2035 writel(old_context, reply+12);
2036 adpt_send_nop(pHba, old_m);
2037 }
2038 context = readl(reply+8);
2039 if(context & 0x40000000){ // IOCTL
2040 void *p = (void *)readl(reply+12);
2041 if( p != NULL) {
2042 memcpy_fromio(p, reply, REPLY_FRAME_SIZE * 4);
2043 }
2044 // All IOCTLs will also be post wait
2045 }
2046 if(context & 0x80000000){ // Post wait message
2047 status = readl(reply+16);
2048 if(status >> 24){
2049 status &= 0xffff; /* Get detail status */
2050 } else {
2051 status = I2O_POST_WAIT_OK;
2052 }
2053 if(!(context & 0x40000000)) {
2054 cmd = (struct scsi_cmnd*) readl(reply+12);
2055 if(cmd != NULL) {
2056 printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context);
2057 }
2058 }
2059 adpt_i2o_post_wait_complete(context, status);
2060 } else { // SCSI message
2061 cmd = (struct scsi_cmnd*) readl(reply+12);
2062 if(cmd != NULL){
2063 if(cmd->serial_number != 0) { // If not timedout
2064 adpt_i2o_to_scsi(reply, cmd);
2065 }
2066 }
2067 }
2068 writel(m, pHba->reply_port);
2069 wmb();
2070 rmb();
2071 }
2072 handled = 1;
2073 out: if(pHba->host)
2074 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2075 return IRQ_RETVAL(handled);
2076 }
2077
2078 static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* d)
2079 {
2080 int i;
2081 u32 msg[MAX_MESSAGE_SIZE];
2082 u32* mptr;
2083 u32 *lenptr;
2084 int direction;
2085 int scsidir;
2086 u32 len;
2087 u32 reqlen;
2088 s32 rcode;
2089
2090 memset(msg, 0 , sizeof(msg));
2091 len = cmd->request_bufflen;
2092 direction = 0x00000000;
2093
2094 scsidir = 0x00000000; // DATA NO XFER
2095 if(len) {
2096 /*
2097 * Set SCBFlags to indicate if data is being transferred
2098 * in or out, or no data transfer
2099 * Note: Do not have to verify index is less than 0 since
2100 * cmd->cmnd[0] is an unsigned char
2101 */
2102 switch(cmd->sc_data_direction){
2103 case DMA_FROM_DEVICE:
2104 scsidir =0x40000000; // DATA IN (iop<--dev)
2105 break;
2106 case DMA_TO_DEVICE:
2107 direction=0x04000000; // SGL OUT
2108 scsidir =0x80000000; // DATA OUT (iop-->dev)
2109 break;
2110 case DMA_NONE:
2111 break;
2112 case DMA_BIDIRECTIONAL:
2113 scsidir =0x40000000; // DATA IN (iop<--dev)
2114 // Assume In - and continue;
2115 break;
2116 default:
2117 printk(KERN_WARNING"%s: scsi opcode 0x%x not supported.\n",
2118 pHba->name, cmd->cmnd[0]);
2119 cmd->result = (DID_OK <<16) | (INITIATOR_ERROR << 8);
2120 cmd->scsi_done(cmd);
2121 return 0;
2122 }
2123 }
2124 // msg[0] is set later
2125 // I2O_CMD_SCSI_EXEC
2126 msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid);
2127 msg[2] = 0;
2128 msg[3] = (u32)cmd; /* We want the SCSI control block back */
2129 // Our cards use the transaction context as the tag for queueing
2130 // Adaptec/DPT Private stuff
2131 msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16);
2132 msg[5] = d->tid;
2133 /* Direction, disconnect ok | sense data | simple queue , CDBLen */
2134 // I2O_SCB_FLAG_ENABLE_DISCONNECT |
2135 // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
2136 // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
2137 msg[6] = scsidir|0x20a00000|cmd->cmd_len;
2138
2139 mptr=msg+7;
2140
2141 // Write SCSI command into the message - always 16 byte block
2142 memset(mptr, 0, 16);
2143 memcpy(mptr, cmd->cmnd, cmd->cmd_len);
2144 mptr+=4;
2145 lenptr=mptr++; /* Remember me - fill in when we know */
2146 reqlen = 14; // SINGLE SGE
2147 /* Now fill in the SGList and command */
2148 if(cmd->use_sg) {
2149 struct scatterlist *sg = (struct scatterlist *)cmd->request_buffer;
2150 int sg_count = pci_map_sg(pHba->pDev, sg, cmd->use_sg,
2151 cmd->sc_data_direction);
2152
2153
2154 len = 0;
2155 for(i = 0 ; i < sg_count; i++) {
2156 *mptr++ = direction|0x10000000|sg_dma_len(sg);
2157 len+=sg_dma_len(sg);
2158 *mptr++ = sg_dma_address(sg);
2159 sg++;
2160 }
2161 /* Make this an end of list */
2162 mptr[-2] = direction|0xD0000000|sg_dma_len(sg-1);
2163 reqlen = mptr - msg;
2164 *lenptr = len;
2165
2166 if(cmd->underflow && len != cmd->underflow){
2167 printk(KERN_WARNING"Cmd len %08X Cmd underflow %08X\n",
2168 len, cmd->underflow);
2169 }
2170 } else {
2171 *lenptr = len = cmd->request_bufflen;
2172 if(len == 0) {
2173 reqlen = 12;
2174 } else {
2175 *mptr++ = 0xD0000000|direction|cmd->request_bufflen;
2176 *mptr++ = pci_map_single(pHba->pDev,
2177 cmd->request_buffer,
2178 cmd->request_bufflen,
2179 cmd->sc_data_direction);
2180 }
2181 }
2182
2183 /* Stick the headers on */
2184 msg[0] = reqlen<<16 | ((reqlen > 12) ? SGL_OFFSET_12 : SGL_OFFSET_0);
2185
2186 // Send it on it's way
2187 rcode = adpt_i2o_post_this(pHba, msg, reqlen<<2);
2188 if (rcode == 0) {
2189 return 0;
2190 }
2191 return rcode;
2192 }
2193
2194
2195 static s32 adpt_scsi_register(adpt_hba* pHba,struct scsi_host_template * sht)
2196 {
2197 struct Scsi_Host *host = NULL;
2198
2199 host = scsi_register(sht, sizeof(adpt_hba*));
2200 if (host == NULL) {
2201 printk ("%s: scsi_register returned NULL\n",pHba->name);
2202 return -1;
2203 }
2204 host->hostdata[0] = (unsigned long)pHba;
2205 pHba->host = host;
2206
2207 host->irq = pHba->pDev->irq;
2208 /* no IO ports, so don't have to set host->io_port and
2209 * host->n_io_port
2210 */
2211 host->io_port = 0;
2212 host->n_io_port = 0;
2213 /* see comments in scsi_host.h */
2214 host->max_id = 16;
2215 host->max_lun = 256;
2216 host->max_channel = pHba->top_scsi_channel + 1;
2217 host->cmd_per_lun = 1;
2218 host->unique_id = (uint) pHba;
2219 host->sg_tablesize = pHba->sg_tablesize;
2220 host->can_queue = pHba->post_fifo_size;
2221
2222 return 0;
2223 }
2224
2225
2226 static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd)
2227 {
2228 adpt_hba* pHba;
2229 u32 hba_status;
2230 u32 dev_status;
2231 u32 reply_flags = readl(reply) & 0xff00; // Leave it shifted up 8 bits
2232 // I know this would look cleaner if I just read bytes
2233 // but the model I have been using for all the rest of the
2234 // io is in 4 byte words - so I keep that model
2235 u16 detailed_status = readl(reply+16) &0xffff;
2236 dev_status = (detailed_status & 0xff);
2237 hba_status = detailed_status >> 8;
2238
2239 // calculate resid for sg
2240 cmd->resid = cmd->request_bufflen - readl(reply+5);
2241
2242 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
2243
2244 cmd->sense_buffer[0] = '\0'; // initialize sense valid flag to false
2245
2246 if(!(reply_flags & MSG_FAIL)) {
2247 switch(detailed_status & I2O_SCSI_DSC_MASK) {
2248 case I2O_SCSI_DSC_SUCCESS:
2249 cmd->result = (DID_OK << 16);
2250 // handle underflow
2251 if(readl(reply+5) < cmd->underflow ) {
2252 cmd->result = (DID_ERROR <<16);
2253 printk(KERN_WARNING"%s: SCSI CMD underflow\n",pHba->name);
2254 }
2255 break;
2256 case I2O_SCSI_DSC_REQUEST_ABORTED:
2257 cmd->result = (DID_ABORT << 16);
2258 break;
2259 case I2O_SCSI_DSC_PATH_INVALID:
2260 case I2O_SCSI_DSC_DEVICE_NOT_PRESENT:
2261 case I2O_SCSI_DSC_SELECTION_TIMEOUT:
2262 case I2O_SCSI_DSC_COMMAND_TIMEOUT:
2263 case I2O_SCSI_DSC_NO_ADAPTER:
2264 case I2O_SCSI_DSC_RESOURCE_UNAVAILABLE:
2265 printk(KERN_WARNING"%s: SCSI Timeout-Device (%d,%d,%d) hba status=0x%x, dev status=0x%x, cmd=0x%x\n",
2266 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun, hba_status, dev_status, cmd->cmnd[0]);
2267 cmd->result = (DID_TIME_OUT << 16);
2268 break;
2269 case I2O_SCSI_DSC_ADAPTER_BUSY:
2270 case I2O_SCSI_DSC_BUS_BUSY:
2271 cmd->result = (DID_BUS_BUSY << 16);
2272 break;
2273 case I2O_SCSI_DSC_SCSI_BUS_RESET:
2274 case I2O_SCSI_DSC_BDR_MESSAGE_SENT:
2275 cmd->result = (DID_RESET << 16);
2276 break;
2277 case I2O_SCSI_DSC_PARITY_ERROR_FAILURE:
2278 printk(KERN_WARNING"%s: SCSI CMD parity error\n",pHba->name);
2279 cmd->result = (DID_PARITY << 16);
2280 break;
2281 case I2O_SCSI_DSC_UNABLE_TO_ABORT:
2282 case I2O_SCSI_DSC_COMPLETE_WITH_ERROR:
2283 case I2O_SCSI_DSC_UNABLE_TO_TERMINATE:
2284 case I2O_SCSI_DSC_MR_MESSAGE_RECEIVED:
2285 case I2O_SCSI_DSC_AUTOSENSE_FAILED:
2286 case I2O_SCSI_DSC_DATA_OVERRUN:
2287 case I2O_SCSI_DSC_UNEXPECTED_BUS_FREE:
2288 case I2O_SCSI_DSC_SEQUENCE_FAILURE:
2289 case I2O_SCSI_DSC_REQUEST_LENGTH_ERROR:
2290 case I2O_SCSI_DSC_PROVIDE_FAILURE:
2291 case I2O_SCSI_DSC_REQUEST_TERMINATED:
2292 case I2O_SCSI_DSC_IDE_MESSAGE_SENT:
2293 case I2O_SCSI_DSC_UNACKNOWLEDGED_EVENT:
2294 case I2O_SCSI_DSC_MESSAGE_RECEIVED:
2295 case I2O_SCSI_DSC_INVALID_CDB:
2296 case I2O_SCSI_DSC_LUN_INVALID:
2297 case I2O_SCSI_DSC_SCSI_TID_INVALID:
2298 case I2O_SCSI_DSC_FUNCTION_UNAVAILABLE:
2299 case I2O_SCSI_DSC_NO_NEXUS:
2300 case I2O_SCSI_DSC_CDB_RECEIVED:
2301 case I2O_SCSI_DSC_LUN_ALREADY_ENABLED:
2302 case I2O_SCSI_DSC_QUEUE_FROZEN:
2303 case I2O_SCSI_DSC_REQUEST_INVALID:
2304 default:
2305 printk(KERN_WARNING"%s: SCSI error %0x-Device(%d,%d,%d) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2306 pHba->name, detailed_status & I2O_SCSI_DSC_MASK, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
2307 hba_status, dev_status, cmd->cmnd[0]);
2308 cmd->result = (DID_ERROR << 16);
2309 break;
2310 }
2311
2312 // copy over the request sense data if it was a check
2313 // condition status
2314 if(dev_status == 0x02 /*CHECK_CONDITION*/) {
2315 u32 len = sizeof(cmd->sense_buffer);
2316 len = (len > 40) ? 40 : len;
2317 // Copy over the sense data
2318 memcpy_fromio(cmd->sense_buffer, (reply+28) , len);
2319 if(cmd->sense_buffer[0] == 0x70 /* class 7 */ &&
2320 cmd->sense_buffer[2] == DATA_PROTECT ){
2321 /* This is to handle an array failed */
2322 cmd->result = (DID_TIME_OUT << 16);
2323 printk(KERN_WARNING"%s: SCSI Data Protect-Device (%d,%d,%d) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2324 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
2325 hba_status, dev_status, cmd->cmnd[0]);
2326
2327 }
2328 }
2329 } else {
2330 /* In this condtion we could not talk to the tid
2331 * the card rejected it. We should signal a retry
2332 * for a limitted number of retries.
2333 */
2334 cmd->result = (DID_TIME_OUT << 16);
2335 printk(KERN_WARNING"%s: I2O MSG_FAIL - Device (%d,%d,%d) tid=%d, cmd=0x%x\n",
2336 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
2337 ((struct adpt_device*)(cmd->device->hostdata))->tid, cmd->cmnd[0]);
2338 }
2339
2340 cmd->result |= (dev_status);
2341
2342 if(cmd->scsi_done != NULL){
2343 cmd->scsi_done(cmd);
2344 }
2345 return cmd->result;
2346 }
2347
2348
2349 static s32 adpt_rescan(adpt_hba* pHba)
2350 {
2351 s32 rcode;
2352 ulong flags = 0;
2353
2354 if(pHba->host)
2355 spin_lock_irqsave(pHba->host->host_lock, flags);
2356 if ((rcode=adpt_i2o_lct_get(pHba)) < 0)
2357 goto out;
2358 if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0)
2359 goto out;
2360 rcode = 0;
2361 out: if(pHba->host)
2362 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2363 return rcode;
2364 }
2365
2366
2367 static s32 adpt_i2o_reparse_lct(adpt_hba* pHba)
2368 {
2369 int i;
2370 int max;
2371 int tid;
2372 struct i2o_device *d;
2373 i2o_lct *lct = pHba->lct;
2374 u8 bus_no = 0;
2375 s16 scsi_id;
2376 s16 scsi_lun;
2377 u32 buf[10]; // at least 8 u32's
2378 struct adpt_device* pDev = NULL;
2379 struct i2o_device* pI2o_dev = NULL;
2380
2381 if (lct == NULL) {
2382 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
2383 return -1;
2384 }
2385
2386 max = lct->table_size;
2387 max -= 3;
2388 max /= 9;
2389
2390 // Mark each drive as unscanned
2391 for (d = pHba->devices; d; d = d->next) {
2392 pDev =(struct adpt_device*) d->owner;
2393 if(!pDev){
2394 continue;
2395 }
2396 pDev->state |= DPTI_DEV_UNSCANNED;
2397 }
2398
2399 printk(KERN_INFO "%s: LCT has %d entries.\n", pHba->name,max);
2400
2401 for(i=0;i<max;i++) {
2402 if( lct->lct_entry[i].user_tid != 0xfff){
2403 continue;
2404 }
2405
2406 if( lct->lct_entry[i].class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
2407 lct->lct_entry[i].class_id == I2O_CLASS_SCSI_PERIPHERAL ||
2408 lct->lct_entry[i].class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
2409 tid = lct->lct_entry[i].tid;
2410 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
2411 printk(KERN_ERR"%s: Could not query device\n",pHba->name);
2412 continue;
2413 }
2414 bus_no = buf[0]>>16;
2415 scsi_id = buf[1];
2416 scsi_lun = (buf[2]>>8 )&0xff;
2417 pDev = pHba->channel[bus_no].device[scsi_id];
2418 /* da lun */
2419 while(pDev) {
2420 if(pDev->scsi_lun == scsi_lun) {
2421 break;
2422 }
2423 pDev = pDev->next_lun;
2424 }
2425 if(!pDev ) { // Something new add it
2426 d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
2427 if(d==NULL)
2428 {
2429 printk(KERN_CRIT "Out of memory for I2O device data.\n");
2430 return -ENOMEM;
2431 }
2432
2433 d->controller = pHba;
2434 d->next = NULL;
2435
2436 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2437
2438 d->flags = 0;
2439 adpt_i2o_report_hba_unit(pHba, d);
2440 adpt_i2o_install_device(pHba, d);
2441
2442 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
2443 printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
2444 continue;
2445 }
2446 pDev = pHba->channel[bus_no].device[scsi_id];
2447 if( pDev == NULL){
2448 pDev = kmalloc(sizeof(struct adpt_device),GFP_KERNEL);
2449 if(pDev == NULL) {
2450 return -ENOMEM;
2451 }
2452 pHba->channel[bus_no].device[scsi_id] = pDev;
2453 } else {
2454 while (pDev->next_lun) {
2455 pDev = pDev->next_lun;
2456 }
2457 pDev = pDev->next_lun = kmalloc(sizeof(struct adpt_device),GFP_KERNEL);
2458 if(pDev == NULL) {
2459 return -ENOMEM;
2460 }
2461 }
2462 memset(pDev,0,sizeof(struct adpt_device));
2463 pDev->tid = d->lct_data.tid;
2464 pDev->scsi_channel = bus_no;
2465 pDev->scsi_id = scsi_id;
2466 pDev->scsi_lun = scsi_lun;
2467 pDev->pI2o_dev = d;
2468 d->owner = pDev;
2469 pDev->type = (buf[0])&0xff;
2470 pDev->flags = (buf[0]>>8)&0xff;
2471 // Too late, SCSI system has made up it's mind, but what the hey ...
2472 if(scsi_id > pHba->top_scsi_id){
2473 pHba->top_scsi_id = scsi_id;
2474 }
2475 if(scsi_lun > pHba->top_scsi_lun){
2476 pHba->top_scsi_lun = scsi_lun;
2477 }
2478 continue;
2479 } // end of new i2o device
2480
2481 // We found an old device - check it
2482 while(pDev) {
2483 if(pDev->scsi_lun == scsi_lun) {
2484 if(!scsi_device_online(pDev->pScsi_dev)) {
2485 printk(KERN_WARNING"%s: Setting device (%d,%d,%d) back online\n",
2486 pHba->name,bus_no,scsi_id,scsi_lun);
2487 if (pDev->pScsi_dev) {
2488 scsi_device_set_state(pDev->pScsi_dev, SDEV_RUNNING);
2489 }
2490 }
2491 d = pDev->pI2o_dev;
2492 if(d->lct_data.tid != tid) { // something changed
2493 pDev->tid = tid;
2494 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2495 if (pDev->pScsi_dev) {
2496 pDev->pScsi_dev->changed = TRUE;
2497 pDev->pScsi_dev->removable = TRUE;
2498 }
2499 }
2500 // Found it - mark it scanned
2501 pDev->state = DPTI_DEV_ONLINE;
2502 break;
2503 }
2504 pDev = pDev->next_lun;
2505 }
2506 }
2507 }
2508 for (pI2o_dev = pHba->devices; pI2o_dev; pI2o_dev = pI2o_dev->next) {
2509 pDev =(struct adpt_device*) pI2o_dev->owner;
2510 if(!pDev){
2511 continue;
2512 }
2513 // Drive offline drives that previously existed but could not be found
2514 // in the LCT table
2515 if (pDev->state & DPTI_DEV_UNSCANNED){
2516 pDev->state = DPTI_DEV_OFFLINE;
2517 printk(KERN_WARNING"%s: Device (%d,%d,%d) offline\n",pHba->name,pDev->scsi_channel,pDev->scsi_id,pDev->scsi_lun);
2518 if (pDev->pScsi_dev) {
2519 scsi_device_set_state(pDev->pScsi_dev, SDEV_OFFLINE);
2520 }
2521 }
2522 }
2523 return 0;
2524 }
2525
2526 static void adpt_fail_posted_scbs(adpt_hba* pHba)
2527 {
2528 struct scsi_cmnd* cmd = NULL;
2529 struct scsi_device* d = NULL;
2530
2531 shost_for_each_device(d, pHba->host) {
2532 unsigned long flags;
2533 spin_lock_irqsave(&d->list_lock, flags);
2534 list_for_each_entry(cmd, &d->cmd_list, list) {
2535 if(cmd->serial_number == 0){
2536 continue;
2537 }
2538 cmd->result = (DID_OK << 16) | (QUEUE_FULL <<1);
2539 cmd->scsi_done(cmd);
2540 }
2541 spin_unlock_irqrestore(&d->list_lock, flags);
2542 }
2543 }
2544
2545
2546 /*============================================================================
2547 * Routines from i2o subsystem
2548 *============================================================================
2549 */
2550
2551
2552
2553 /*
2554 * Bring an I2O controller into HOLD state. See the spec.
2555 */
2556 static int adpt_i2o_activate_hba(adpt_hba* pHba)
2557 {
2558 int rcode;
2559
2560 if(pHba->initialized ) {
2561 if (adpt_i2o_status_get(pHba) < 0) {
2562 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2563 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2564 return rcode;
2565 }
2566 if (adpt_i2o_status_get(pHba) < 0) {
2567 printk(KERN_INFO "HBA not responding.\n");
2568 return -1;
2569 }
2570 }
2571
2572 if(pHba->status_block->iop_state == ADAPTER_STATE_FAULTED) {
2573 printk(KERN_CRIT "%s: hardware fault\n", pHba->name);
2574 return -1;
2575 }
2576
2577 if (pHba->status_block->iop_state == ADAPTER_STATE_READY ||
2578 pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL ||
2579 pHba->status_block->iop_state == ADAPTER_STATE_HOLD ||
2580 pHba->status_block->iop_state == ADAPTER_STATE_FAILED) {
2581 adpt_i2o_reset_hba(pHba);
2582 if (adpt_i2o_status_get(pHba) < 0 || pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
2583 printk(KERN_ERR "%s: Failed to initialize.\n", pHba->name);
2584 return -1;
2585 }
2586 }
2587 } else {
2588 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2589 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2590 return rcode;
2591 }
2592
2593 }
2594
2595 if (adpt_i2o_init_outbound_q(pHba) < 0) {
2596 return -1;
2597 }
2598
2599 /* In HOLD state */
2600
2601 if (adpt_i2o_hrt_get(pHba) < 0) {
2602 return -1;
2603 }
2604
2605 return 0;
2606 }
2607
2608 /*
2609 * Bring a controller online into OPERATIONAL state.
2610 */
2611
2612 static int adpt_i2o_online_hba(adpt_hba* pHba)
2613 {
2614 if (adpt_i2o_systab_send(pHba) < 0) {
2615 adpt_i2o_delete_hba(pHba);
2616 return -1;
2617 }
2618 /* In READY state */
2619
2620 if (adpt_i2o_enable_hba(pHba) < 0) {
2621 adpt_i2o_delete_hba(pHba);
2622 return -1;
2623 }
2624
2625 /* In OPERATIONAL state */
2626 return 0;
2627 }
2628
2629 static s32 adpt_send_nop(adpt_hba*pHba,u32 m)
2630 {
2631 u32 __iomem *msg;
2632 ulong timeout = jiffies + 5*HZ;
2633
2634 while(m == EMPTY_QUEUE){
2635 rmb();
2636 m = readl(pHba->post_port);
2637 if(m != EMPTY_QUEUE){
2638 break;
2639 }
2640 if(time_after(jiffies,timeout)){
2641 printk(KERN_ERR "%s: Timeout waiting for message frame!\n",pHba->name);
2642 return 2;
2643 }
2644 schedule_timeout_uninterruptible(1);
2645 }
2646 msg = (u32 __iomem *)(pHba->msg_addr_virt + m);
2647 writel( THREE_WORD_MSG_SIZE | SGL_OFFSET_0,&msg[0]);
2648 writel( I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0,&msg[1]);
2649 writel( 0,&msg[2]);
2650 wmb();
2651
2652 writel(m, pHba->post_port);
2653 wmb();
2654 return 0;
2655 }
2656
2657 static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
2658 {
2659 u8 *status;
2660 u32 __iomem *msg = NULL;
2661 int i;
2662 ulong timeout = jiffies + TMOUT_INITOUTBOUND*HZ;
2663 u32* ptr;
2664 u32 outbound_frame; // This had to be a 32 bit address
2665 u32 m;
2666
2667 do {
2668 rmb();
2669 m = readl(pHba->post_port);
2670 if (m != EMPTY_QUEUE) {
2671 break;
2672 }
2673
2674 if(time_after(jiffies,timeout)){
2675 printk(KERN_WARNING"%s: Timeout waiting for message frame\n",pHba->name);
2676 return -ETIMEDOUT;
2677 }
2678 schedule_timeout_uninterruptible(1);
2679 } while(m == EMPTY_QUEUE);
2680
2681 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2682
2683 status = kmalloc(4,GFP_KERNEL|ADDR32);
2684 if (status==NULL) {
2685 adpt_send_nop(pHba, m);
2686 printk(KERN_WARNING"%s: IOP reset failed - no free memory.\n",
2687 pHba->name);
2688 return -ENOMEM;
2689 }
2690 memset(status, 0, 4);
2691
2692 writel(EIGHT_WORD_MSG_SIZE| SGL_OFFSET_6, &msg[0]);
2693 writel(I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID, &msg[1]);
2694 writel(0, &msg[2]);
2695 writel(0x0106, &msg[3]); /* Transaction context */
2696 writel(4096, &msg[4]); /* Host page frame size */
2697 writel((REPLY_FRAME_SIZE)<<16|0x80, &msg[5]); /* Outbound msg frame size and Initcode */
2698 writel(0xD0000004, &msg[6]); /* Simple SG LE, EOB */
2699 writel(virt_to_bus(status), &msg[7]);
2700
2701 writel(m, pHba->post_port);
2702 wmb();
2703
2704 // Wait for the reply status to come back
2705 do {
2706 if (*status) {
2707 if (*status != 0x01 /*I2O_EXEC_OUTBOUND_INIT_IN_PROGRESS*/) {
2708 break;
2709 }
2710 }
2711 rmb();
2712 if(time_after(jiffies,timeout)){
2713 printk(KERN_WARNING"%s: Timeout Initializing\n",pHba->name);
2714 return -ETIMEDOUT;
2715 }
2716 schedule_timeout_uninterruptible(1);
2717 } while (1);
2718
2719 // If the command was successful, fill the fifo with our reply
2720 // message packets
2721 if(*status != 0x04 /*I2O_EXEC_OUTBOUND_INIT_COMPLETE*/) {
2722 kfree(status);
2723 return -2;
2724 }
2725 kfree(status);
2726
2727 kfree(pHba->reply_pool);
2728
2729 pHba->reply_pool = kmalloc(pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4, GFP_KERNEL|ADDR32);
2730 if(!pHba->reply_pool){
2731 printk(KERN_ERR"%s: Could not allocate reply pool\n",pHba->name);
2732 return -1;
2733 }
2734 memset(pHba->reply_pool, 0 , pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4);
2735
2736 ptr = pHba->reply_pool;
2737 for(i = 0; i < pHba->reply_fifo_size; i++) {
2738 outbound_frame = (u32)virt_to_bus(ptr);
2739 writel(outbound_frame, pHba->reply_port);
2740 wmb();
2741 ptr += REPLY_FRAME_SIZE;
2742 }
2743 adpt_i2o_status_get(pHba);
2744 return 0;
2745 }
2746
2747
2748 /*
2749 * I2O System Table. Contains information about
2750 * all the IOPs in the system. Used to inform IOPs
2751 * about each other's existence.
2752 *
2753 * sys_tbl_ver is the CurrentChangeIndicator that is
2754 * used by IOPs to track changes.
2755 */
2756
2757
2758
2759 static s32 adpt_i2o_status_get(adpt_hba* pHba)
2760 {
2761 ulong timeout;
2762 u32 m;
2763 u32 __iomem *msg;
2764 u8 *status_block=NULL;
2765 ulong status_block_bus;
2766
2767 if(pHba->status_block == NULL) {
2768 pHba->status_block = (i2o_status_block*)
2769 kmalloc(sizeof(i2o_status_block),GFP_KERNEL|ADDR32);
2770 if(pHba->status_block == NULL) {
2771 printk(KERN_ERR
2772 "dpti%d: Get Status Block failed; Out of memory. \n",
2773 pHba->unit);
2774 return -ENOMEM;
2775 }
2776 }
2777 memset(pHba->status_block, 0, sizeof(i2o_status_block));
2778 status_block = (u8*)(pHba->status_block);
2779 status_block_bus = virt_to_bus(pHba->status_block);
2780 timeout = jiffies+TMOUT_GETSTATUS*HZ;
2781 do {
2782 rmb();
2783 m = readl(pHba->post_port);
2784 if (m != EMPTY_QUEUE) {
2785 break;
2786 }
2787 if(time_after(jiffies,timeout)){
2788 printk(KERN_ERR "%s: Timeout waiting for message !\n",
2789 pHba->name);
2790 return -ETIMEDOUT;
2791 }
2792 schedule_timeout_uninterruptible(1);
2793 } while(m==EMPTY_QUEUE);
2794
2795
2796 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2797
2798 writel(NINE_WORD_MSG_SIZE|SGL_OFFSET_0, &msg[0]);
2799 writel(I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID, &msg[1]);
2800 writel(1, &msg[2]);
2801 writel(0, &msg[3]);
2802 writel(0, &msg[4]);
2803 writel(0, &msg[5]);
2804 writel(((u32)status_block_bus)&0xffffffff, &msg[6]);
2805 writel(0, &msg[7]);
2806 writel(sizeof(i2o_status_block), &msg[8]); // 88 bytes
2807
2808 //post message
2809 writel(m, pHba->post_port);
2810 wmb();
2811
2812 while(status_block[87]!=0xff){
2813 if(time_after(jiffies,timeout)){
2814 printk(KERN_ERR"dpti%d: Get status timeout.\n",
2815 pHba->unit);
2816 return -ETIMEDOUT;
2817 }
2818 rmb();
2819 schedule_timeout_uninterruptible(1);
2820 }
2821
2822 // Set up our number of outbound and inbound messages
2823 pHba->post_fifo_size = pHba->status_block->max_inbound_frames;
2824 if (pHba->post_fifo_size > MAX_TO_IOP_MESSAGES) {
2825 pHba->post_fifo_size = MAX_TO_IOP_MESSAGES;
2826 }
2827
2828 pHba->reply_fifo_size = pHba->status_block->max_outbound_frames;
2829 if (pHba->reply_fifo_size > MAX_FROM_IOP_MESSAGES) {
2830 pHba->reply_fifo_size = MAX_FROM_IOP_MESSAGES;
2831 }
2832
2833 // Calculate the Scatter Gather list size
2834 pHba->sg_tablesize = (pHba->status_block->inbound_frame_size * 4 -40)/ sizeof(struct sg_simple_element);
2835 if (pHba->sg_tablesize > SG_LIST_ELEMENTS) {
2836 pHba->sg_tablesize = SG_LIST_ELEMENTS;
2837 }
2838
2839
2840 #ifdef DEBUG
2841 printk("dpti%d: State = ",pHba->unit);
2842 switch(pHba->status_block->iop_state) {
2843 case 0x01:
2844 printk("INIT\n");
2845 break;
2846 case 0x02:
2847 printk("RESET\n");
2848 break;
2849 case 0x04:
2850 printk("HOLD\n");
2851 break;
2852 case 0x05:
2853 printk("READY\n");
2854 break;
2855 case 0x08:
2856 printk("OPERATIONAL\n");
2857 break;
2858 case 0x10:
2859 printk("FAILED\n");
2860 break;
2861 case 0x11:
2862 printk("FAULTED\n");
2863 break;
2864 default:
2865 printk("%x (unknown!!)\n",pHba->status_block->iop_state);
2866 }
2867 #endif
2868 return 0;
2869 }
2870
2871 /*
2872 * Get the IOP's Logical Configuration Table
2873 */
2874 static int adpt_i2o_lct_get(adpt_hba* pHba)
2875 {
2876 u32 msg[8];
2877 int ret;
2878 u32 buf[16];
2879
2880 if ((pHba->lct_size == 0) || (pHba->lct == NULL)){
2881 pHba->lct_size = pHba->status_block->expected_lct_size;
2882 }
2883 do {
2884 if (pHba->lct == NULL) {
2885 pHba->lct = kmalloc(pHba->lct_size, GFP_KERNEL|ADDR32);
2886 if(pHba->lct == NULL) {
2887 printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n",
2888 pHba->name);
2889 return -ENOMEM;
2890 }
2891 }
2892 memset(pHba->lct, 0, pHba->lct_size);
2893
2894 msg[0] = EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6;
2895 msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID;
2896 msg[2] = 0;
2897 msg[3] = 0;
2898 msg[4] = 0xFFFFFFFF; /* All devices */
2899 msg[5] = 0x00000000; /* Report now */
2900 msg[6] = 0xD0000000|pHba->lct_size;
2901 msg[7] = virt_to_bus(pHba->lct);
2902
2903 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 360))) {
2904 printk(KERN_ERR "%s: LCT Get failed (status=%#10x.\n",
2905 pHba->name, ret);
2906 printk(KERN_ERR"Adaptec: Error Reading Hardware.\n");
2907 return ret;
2908 }
2909
2910 if ((pHba->lct->table_size << 2) > pHba->lct_size) {
2911 pHba->lct_size = pHba->lct->table_size << 2;
2912 kfree(pHba->lct);
2913 pHba->lct = NULL;
2914 }
2915 } while (pHba->lct == NULL);
2916
2917 PDEBUG("%s: Hardware resource table read.\n", pHba->name);
2918
2919
2920 // I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO;
2921 if(adpt_i2o_query_scalar(pHba, 0 , 0x8000, -1, buf, sizeof(buf))>=0) {
2922 pHba->FwDebugBufferSize = buf[1];
2923 pHba->FwDebugBuffer_P = pHba->base_addr_virt + buf[0];
2924 pHba->FwDebugFlags_P = pHba->FwDebugBuffer_P + FW_DEBUG_FLAGS_OFFSET;
2925 pHba->FwDebugBLEDvalue_P = pHba->FwDebugBuffer_P + FW_DEBUG_BLED_OFFSET;
2926 pHba->FwDebugBLEDflag_P = pHba->FwDebugBLEDvalue_P + 1;
2927 pHba->FwDebugStrLength_P = pHba->FwDebugBuffer_P + FW_DEBUG_STR_LENGTH_OFFSET;
2928 pHba->FwDebugBuffer_P += buf[2];
2929 pHba->FwDebugFlags = 0;
2930 }
2931
2932 return 0;
2933 }
2934
2935 static int adpt_i2o_build_sys_table(void)
2936 {
2937 adpt_hba* pHba = NULL;
2938 int count = 0;
2939
2940 sys_tbl_len = sizeof(struct i2o_sys_tbl) + // Header + IOPs
2941 (hba_count) * sizeof(struct i2o_sys_tbl_entry);
2942
2943 kfree(sys_tbl);
2944
2945 sys_tbl = kmalloc(sys_tbl_len, GFP_KERNEL|ADDR32);
2946 if(!sys_tbl) {
2947 printk(KERN_WARNING "SysTab Set failed. Out of memory.\n");
2948 return -ENOMEM;
2949 }
2950 memset(sys_tbl, 0, sys_tbl_len);
2951
2952 sys_tbl->num_entries = hba_count;
2953 sys_tbl->version = I2OVERSION;
2954 sys_tbl->change_ind = sys_tbl_ind++;
2955
2956 for(pHba = hba_chain; pHba; pHba = pHba->next) {
2957 // Get updated Status Block so we have the latest information
2958 if (adpt_i2o_status_get(pHba)) {
2959 sys_tbl->num_entries--;
2960 continue; // try next one
2961 }
2962
2963 sys_tbl->iops[count].org_id = pHba->status_block->org_id;
2964 sys_tbl->iops[count].iop_id = pHba->unit + 2;
2965 sys_tbl->iops[count].seg_num = 0;
2966 sys_tbl->iops[count].i2o_version = pHba->status_block->i2o_version;
2967 sys_tbl->iops[count].iop_state = pHba->status_block->iop_state;
2968 sys_tbl->iops[count].msg_type = pHba->status_block->msg_type;
2969 sys_tbl->iops[count].frame_size = pHba->status_block->inbound_frame_size;
2970 sys_tbl->iops[count].last_changed = sys_tbl_ind - 1; // ??
2971 sys_tbl->iops[count].iop_capabilities = pHba->status_block->iop_capabilities;
2972 sys_tbl->iops[count].inbound_low = (u32)virt_to_bus(pHba->post_port);
2973 sys_tbl->iops[count].inbound_high = (u32)((u64)virt_to_bus(pHba->post_port)>>32);
2974
2975 count++;
2976 }
2977
2978 #ifdef DEBUG
2979 {
2980 u32 *table = (u32*)sys_tbl;
2981 printk(KERN_DEBUG"sys_tbl_len=%d in 32bit words\n",(sys_tbl_len >>2));
2982 for(count = 0; count < (sys_tbl_len >>2); count++) {
2983 printk(KERN_INFO "sys_tbl[%d] = %0#10x\n",
2984 count, table[count]);
2985 }
2986 }
2987 #endif
2988
2989 return 0;
2990 }
2991
2992
2993 /*
2994 * Dump the information block associated with a given unit (TID)
2995 */
2996
2997 static void adpt_i2o_report_hba_unit(adpt_hba* pHba, struct i2o_device *d)
2998 {
2999 char buf[64];
3000 int unit = d->lct_data.tid;
3001
3002 printk(KERN_INFO "TID %3.3d ", unit);
3003
3004 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 3, buf, 16)>=0)
3005 {
3006 buf[16]=0;
3007 printk(" Vendor: %-12.12s", buf);
3008 }
3009 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 4, buf, 16)>=0)
3010 {
3011 buf[16]=0;
3012 printk(" Device: %-12.12s", buf);
3013 }
3014 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 6, buf, 8)>=0)
3015 {
3016 buf[8]=0;
3017 printk(" Rev: %-12.12s\n", buf);
3018 }
3019 #ifdef DEBUG
3020 printk(KERN_INFO "\tClass: %.21s\n", adpt_i2o_get_class_name(d->lct_data.class_id));
3021 printk(KERN_INFO "\tSubclass: 0x%04X\n", d->lct_data.sub_class);
3022 printk(KERN_INFO "\tFlags: ");
3023
3024 if(d->lct_data.device_flags&(1<<0))
3025 printk("C"); // ConfigDialog requested
3026 if(d->lct_data.device_flags&(1<<1))
3027 printk("U"); // Multi-user capable
3028 if(!(d->lct_data.device_flags&(1<<4)))
3029 printk("P"); // Peer service enabled!
3030 if(!(d->lct_data.device_flags&(1<<5)))
3031 printk("M"); // Mgmt service enabled!
3032 printk("\n");
3033 #endif
3034 }
3035
3036 #ifdef DEBUG
3037 /*
3038 * Do i2o class name lookup
3039 */
3040 static const char *adpt_i2o_get_class_name(int class)
3041 {
3042 int idx = 16;
3043 static char *i2o_class_name[] = {
3044 "Executive",
3045 "Device Driver Module",
3046 "Block Device",
3047 "Tape Device",
3048 "LAN Interface",
3049 "WAN Interface",
3050 "Fibre Channel Port",
3051 "Fibre Channel Device",
3052 "SCSI Device",
3053 "ATE Port",
3054 "ATE Device",
3055 "Floppy Controller",
3056 "Floppy Device",
3057 "Secondary Bus Port",
3058 "Peer Transport Agent",
3059 "Peer Transport",
3060 "Unknown"
3061 };
3062
3063 switch(class&0xFFF) {
3064 case I2O_CLASS_EXECUTIVE:
3065 idx = 0; break;
3066 case I2O_CLASS_DDM:
3067 idx = 1; break;
3068 case I2O_CLASS_RANDOM_BLOCK_STORAGE:
3069 idx = 2; break;
3070 case I2O_CLASS_SEQUENTIAL_STORAGE:
3071 idx = 3; break;
3072 case I2O_CLASS_LAN:
3073 idx = 4; break;
3074 case I2O_CLASS_WAN:
3075 idx = 5; break;
3076 case I2O_CLASS_FIBRE_CHANNEL_PORT:
3077 idx = 6; break;
3078 case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
3079 idx = 7; break;
3080 case I2O_CLASS_SCSI_PERIPHERAL:
3081 idx = 8; break;
3082 case I2O_CLASS_ATE_PORT:
3083 idx = 9; break;
3084 case I2O_CLASS_ATE_PERIPHERAL:
3085 idx = 10; break;
3086 case I2O_CLASS_FLOPPY_CONTROLLER:
3087 idx = 11; break;
3088 case I2O_CLASS_FLOPPY_DEVICE:
3089 idx = 12; break;
3090 case I2O_CLASS_BUS_ADAPTER_PORT:
3091 idx = 13; break;
3092 case I2O_CLASS_PEER_TRANSPORT_AGENT:
3093 idx = 14; break;
3094 case I2O_CLASS_PEER_TRANSPORT:
3095 idx = 15; break;
3096 }
3097 return i2o_class_name[idx];
3098 }
3099 #endif
3100
3101
3102 static s32 adpt_i2o_hrt_get(adpt_hba* pHba)
3103 {
3104 u32 msg[6];
3105 int ret, size = sizeof(i2o_hrt);
3106
3107 do {
3108 if (pHba->hrt == NULL) {
3109 pHba->hrt=kmalloc(size, GFP_KERNEL|ADDR32);
3110 if (pHba->hrt == NULL) {
3111 printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", pHba->name);
3112 return -ENOMEM;
3113 }
3114 }
3115
3116 msg[0]= SIX_WORD_MSG_SIZE| SGL_OFFSET_4;
3117 msg[1]= I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID;
3118 msg[2]= 0;
3119 msg[3]= 0;
3120 msg[4]= (0xD0000000 | size); /* Simple transaction */
3121 msg[5]= virt_to_bus(pHba->hrt); /* Dump it here */
3122
3123 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg),20))) {
3124 printk(KERN_ERR "%s: Unable to get HRT (status=%#10x)\n", pHba->name, ret);
3125 return ret;
3126 }
3127
3128 if (pHba->hrt->num_entries * pHba->hrt->entry_len << 2 > size) {
3129 size = pHba->hrt->num_entries * pHba->hrt->entry_len << 2;
3130 kfree(pHba->hrt);
3131 pHba->hrt = NULL;
3132 }
3133 } while(pHba->hrt == NULL);
3134 return 0;
3135 }
3136
3137 /*
3138 * Query one scalar group value or a whole scalar group.
3139 */
3140 static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
3141 int group, int field, void *buf, int buflen)
3142 {
3143 u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field };
3144 u8 *resblk;
3145
3146 int size;
3147
3148 /* 8 bytes for header */
3149 resblk = kmalloc(sizeof(u8) * (8+buflen), GFP_KERNEL|ADDR32);
3150 if (resblk == NULL) {
3151 printk(KERN_CRIT "%s: query scalar failed; Out of memory.\n", pHba->name);
3152 return -ENOMEM;
3153 }
3154
3155 if (field == -1) /* whole group */
3156 opblk[4] = -1;
3157
3158 size = adpt_i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, pHba, tid,
3159 opblk, sizeof(opblk), resblk, sizeof(u8)*(8+buflen));
3160 if (size == -ETIME) {
3161 printk(KERN_WARNING "%s: issue params failed; Timed out.\n", pHba->name);
3162 return -ETIME;
3163 } else if (size == -EINTR) {
3164 printk(KERN_WARNING "%s: issue params failed; Interrupted.\n", pHba->name);
3165 return -EINTR;
3166 }
3167
3168 memcpy(buf, resblk+8, buflen); /* cut off header */
3169
3170 kfree(resblk);
3171 if (size < 0)
3172 return size;
3173
3174 return buflen;
3175 }
3176
3177
3178 /* Issue UTIL_PARAMS_GET or UTIL_PARAMS_SET
3179 *
3180 * This function can be used for all UtilParamsGet/Set operations.
3181 * The OperationBlock is given in opblk-buffer,
3182 * and results are returned in resblk-buffer.
3183 * Note that the minimum sized resblk is 8 bytes and contains
3184 * ResultCount, ErrorInfoSize, BlockStatus and BlockSize.
3185 */
3186 static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid,
3187 void *opblk, int oplen, void *resblk, int reslen)
3188 {
3189 u32 msg[9];
3190 u32 *res = (u32 *)resblk;
3191 int wait_status;
3192
3193 msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5;
3194 msg[1] = cmd << 24 | HOST_TID << 12 | tid;
3195 msg[2] = 0;
3196 msg[3] = 0;
3197 msg[4] = 0;
3198 msg[5] = 0x54000000 | oplen; /* OperationBlock */
3199 msg[6] = virt_to_bus(opblk);
3200 msg[7] = 0xD0000000 | reslen; /* ResultBlock */
3201 msg[8] = virt_to_bus(resblk);
3202
3203 if ((wait_status = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 20))) {
3204 printk("adpt_i2o_issue_params: post_wait failed (%p)\n", resblk);
3205 return wait_status; /* -DetailedStatus */
3206 }
3207
3208 if (res[1]&0x00FF0000) { /* BlockStatus != SUCCESS */
3209 printk(KERN_WARNING "%s: %s - Error:\n ErrorInfoSize = 0x%02x, "
3210 "BlockStatus = 0x%02x, BlockSize = 0x%04x\n",
3211 pHba->name,
3212 (cmd == I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET"
3213 : "PARAMS_GET",
3214 res[1]>>24, (res[1]>>16)&0xFF, res[1]&0xFFFF);
3215 return -((res[1] >> 16) & 0xFF); /* -BlockStatus */
3216 }
3217
3218 return 4 + ((res[1] & 0x0000FFFF) << 2); /* bytes used in resblk */
3219 }
3220
3221
3222 static s32 adpt_i2o_quiesce_hba(adpt_hba* pHba)
3223 {
3224 u32 msg[4];
3225 int ret;
3226
3227 adpt_i2o_status_get(pHba);
3228
3229 /* SysQuiesce discarded if IOP not in READY or OPERATIONAL state */
3230
3231 if((pHba->status_block->iop_state != ADAPTER_STATE_READY) &&
3232 (pHba->status_block->iop_state != ADAPTER_STATE_OPERATIONAL)){
3233 return 0;
3234 }
3235
3236 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3237 msg[1] = I2O_CMD_SYS_QUIESCE<<24|HOST_TID<<12|ADAPTER_TID;
3238 msg[2] = 0;
3239 msg[3] = 0;
3240
3241 if((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3242 printk(KERN_INFO"dpti%d: Unable to quiesce (status=%#x).\n",
3243 pHba->unit, -ret);
3244 } else {
3245 printk(KERN_INFO"dpti%d: Quiesced.\n",pHba->unit);
3246 }
3247
3248 adpt_i2o_status_get(pHba);
3249 return ret;
3250 }
3251
3252
3253 /*
3254 * Enable IOP. Allows the IOP to resume external operations.
3255 */
3256 static int adpt_i2o_enable_hba(adpt_hba* pHba)
3257 {
3258 u32 msg[4];
3259 int ret;
3260
3261 adpt_i2o_status_get(pHba);
3262 if(!pHba->status_block){
3263 return -ENOMEM;
3264 }
3265 /* Enable only allowed on READY state */
3266 if(pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL)
3267 return 0;
3268
3269 if(pHba->status_block->iop_state != ADAPTER_STATE_READY)
3270 return -EINVAL;
3271
3272 msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3273 msg[1]=I2O_CMD_SYS_ENABLE<<24|HOST_TID<<12|ADAPTER_TID;
3274 msg[2]= 0;
3275 msg[3]= 0;
3276
3277 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3278 printk(KERN_WARNING"%s: Could not enable (status=%#10x).\n",
3279 pHba->name, ret);
3280 } else {
3281 PDEBUG("%s: Enabled.\n", pHba->name);
3282 }
3283
3284 adpt_i2o_status_get(pHba);
3285 return ret;
3286 }
3287
3288
3289 static int adpt_i2o_systab_send(adpt_hba* pHba)
3290 {
3291 u32 msg[12];
3292 int ret;
3293
3294 msg[0] = I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6;
3295 msg[1] = I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID;
3296 msg[2] = 0;
3297 msg[3] = 0;
3298 msg[4] = (0<<16) | ((pHba->unit+2) << 12); /* Host 0 IOP ID (unit + 2) */
3299 msg[5] = 0; /* Segment 0 */
3300
3301 /*
3302 * Provide three SGL-elements:
3303 * System table (SysTab), Private memory space declaration and
3304 * Private i/o space declaration
3305 */
3306 msg[6] = 0x54000000 | sys_tbl_len;
3307 msg[7] = virt_to_phys(sys_tbl);
3308 msg[8] = 0x54000000 | 0;
3309 msg[9] = 0;
3310 msg[10] = 0xD4000000 | 0;
3311 msg[11] = 0;
3312
3313 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 120))) {
3314 printk(KERN_INFO "%s: Unable to set SysTab (status=%#10x).\n",
3315 pHba->name, ret);
3316 }
3317 #ifdef DEBUG
3318 else {
3319 PINFO("%s: SysTab set.\n", pHba->name);
3320 }
3321 #endif
3322
3323 return ret;
3324 }
3325
3326
3327 /*============================================================================
3328 *
3329 *============================================================================
3330 */
3331
3332
3333 #ifdef UARTDELAY
3334
3335 static static void adpt_delay(int millisec)
3336 {
3337 int i;
3338 for (i = 0; i < millisec; i++) {
3339 udelay(1000); /* delay for one millisecond */
3340 }
3341 }
3342
3343 #endif
3344
3345 static struct scsi_host_template driver_template = {
3346 .name = "dpt_i2o",
3347 .proc_name = "dpt_i2o",
3348 .proc_info = adpt_proc_info,
3349 .detect = adpt_detect,
3350 .release = adpt_release,
3351 .info = adpt_info,
3352 .queuecommand = adpt_queue,
3353 .eh_abort_handler = adpt_abort,
3354 .eh_device_reset_handler = adpt_device_reset,
3355 .eh_bus_reset_handler = adpt_bus_reset,
3356 .eh_host_reset_handler = adpt_reset,
3357 .bios_param = adpt_bios_param,
3358 .slave_configure = adpt_slave_configure,
3359 .can_queue = MAX_TO_IOP_MESSAGES,
3360 .this_id = 7,
3361 .cmd_per_lun = 1,
3362 .use_clustering = ENABLE_CLUSTERING,
3363 };
3364 #include "scsi_module.c"
3365 MODULE_LICENSE("GPL");
This page took 0.101253 seconds and 6 git commands to generate.