Merge branch 'drm-patches' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied...
[deliverable/linux.git] / drivers / block / cciss.c
1 /*
2 * Disk Array driver for HP Smart Array controllers.
3 * (C) Copyright 2000, 2007 Hewlett-Packard Development Company, L.P.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
17 * 02111-1307, USA.
18 *
19 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
20 *
21 */
22
23 #include <linux/module.h>
24 #include <linux/interrupt.h>
25 #include <linux/types.h>
26 #include <linux/pci.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/delay.h>
30 #include <linux/major.h>
31 #include <linux/fs.h>
32 #include <linux/bio.h>
33 #include <linux/blkpg.h>
34 #include <linux/timer.h>
35 #include <linux/proc_fs.h>
36 #include <linux/seq_file.h>
37 #include <linux/init.h>
38 #include <linux/hdreg.h>
39 #include <linux/spinlock.h>
40 #include <linux/compat.h>
41 #include <linux/blktrace_api.h>
42 #include <asm/uaccess.h>
43 #include <asm/io.h>
44
45 #include <linux/dma-mapping.h>
46 #include <linux/blkdev.h>
47 #include <linux/genhd.h>
48 #include <linux/completion.h>
49 #include <scsi/scsi.h>
50 #include <scsi/sg.h>
51 #include <scsi/scsi_ioctl.h>
52 #include <linux/cdrom.h>
53 #include <linux/scatterlist.h>
54
55 #define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
56 #define DRIVER_NAME "HP CISS Driver (v 3.6.20)"
57 #define DRIVER_VERSION CCISS_DRIVER_VERSION(3, 6, 20)
58
59 /* Embedded module documentation macros - see modules.h */
60 MODULE_AUTHOR("Hewlett-Packard Company");
61 MODULE_DESCRIPTION("Driver for HP Smart Array Controllers");
62 MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400"
63 " SA6i P600 P800 P400 P400i E200 E200i E500 P700m"
64 " Smart Array G2 Series SAS/SATA Controllers");
65 MODULE_VERSION("3.6.20");
66 MODULE_LICENSE("GPL");
67
68 #include "cciss_cmd.h"
69 #include "cciss.h"
70 #include <linux/cciss_ioctl.h>
71
72 /* define the PCI info for the cards we can control */
73 static const struct pci_device_id cciss_pci_device_id[] = {
74 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISS, 0x0E11, 0x4070},
75 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4080},
76 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4082},
77 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4083},
78 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x4091},
79 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409A},
80 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409B},
81 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409C},
82 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409D},
83 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSA, 0x103C, 0x3225},
84 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3223},
85 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3234},
86 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3235},
87 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3211},
88 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3212},
89 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3213},
90 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3214},
91 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3215},
92 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3237},
93 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x323D},
94 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241},
95 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243},
96 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245},
97 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247},
98 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
99 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
100 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
101 {0,}
102 };
103
104 MODULE_DEVICE_TABLE(pci, cciss_pci_device_id);
105
106 /* board_id = Subsystem Device ID & Vendor ID
107 * product = Marketing Name for the board
108 * access = Address of the struct of function pointers
109 * nr_cmds = Number of commands supported by controller
110 */
111 static struct board_type products[] = {
112 {0x40700E11, "Smart Array 5300", &SA5_access, 512},
113 {0x40800E11, "Smart Array 5i", &SA5B_access, 512},
114 {0x40820E11, "Smart Array 532", &SA5B_access, 512},
115 {0x40830E11, "Smart Array 5312", &SA5B_access, 512},
116 {0x409A0E11, "Smart Array 641", &SA5_access, 512},
117 {0x409B0E11, "Smart Array 642", &SA5_access, 512},
118 {0x409C0E11, "Smart Array 6400", &SA5_access, 512},
119 {0x409D0E11, "Smart Array 6400 EM", &SA5_access, 512},
120 {0x40910E11, "Smart Array 6i", &SA5_access, 512},
121 {0x3225103C, "Smart Array P600", &SA5_access, 512},
122 {0x3223103C, "Smart Array P800", &SA5_access, 512},
123 {0x3234103C, "Smart Array P400", &SA5_access, 512},
124 {0x3235103C, "Smart Array P400i", &SA5_access, 512},
125 {0x3211103C, "Smart Array E200i", &SA5_access, 120},
126 {0x3212103C, "Smart Array E200", &SA5_access, 120},
127 {0x3213103C, "Smart Array E200i", &SA5_access, 120},
128 {0x3214103C, "Smart Array E200i", &SA5_access, 120},
129 {0x3215103C, "Smart Array E200i", &SA5_access, 120},
130 {0x3237103C, "Smart Array E500", &SA5_access, 512},
131 {0x323D103C, "Smart Array P700m", &SA5_access, 512},
132 {0x3241103C, "Smart Array P212", &SA5_access, 384},
133 {0x3243103C, "Smart Array P410", &SA5_access, 384},
134 {0x3245103C, "Smart Array P410i", &SA5_access, 384},
135 {0x3247103C, "Smart Array P411", &SA5_access, 384},
136 {0x3249103C, "Smart Array P812", &SA5_access, 384},
137 {0xFFFF103C, "Unknown Smart Array", &SA5_access, 120},
138 };
139
140 /* How long to wait (in milliseconds) for board to go into simple mode */
141 #define MAX_CONFIG_WAIT 30000
142 #define MAX_IOCTL_CONFIG_WAIT 1000
143
144 /*define how many times we will try a command because of bus resets */
145 #define MAX_CMD_RETRIES 3
146
147 #define MAX_CTLR 32
148
149 /* Originally cciss driver only supports 8 major numbers */
150 #define MAX_CTLR_ORIG 8
151
152 static ctlr_info_t *hba[MAX_CTLR];
153
154 static void do_cciss_request(struct request_queue *q);
155 static irqreturn_t do_cciss_intr(int irq, void *dev_id);
156 static int cciss_open(struct inode *inode, struct file *filep);
157 static int cciss_release(struct inode *inode, struct file *filep);
158 static int cciss_ioctl(struct inode *inode, struct file *filep,
159 unsigned int cmd, unsigned long arg);
160 static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo);
161
162 static int cciss_revalidate(struct gendisk *disk);
163 static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk);
164 static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
165 int clear_all);
166
167 static void cciss_read_capacity(int ctlr, int logvol, int withirq,
168 sector_t *total_size, unsigned int *block_size);
169 static void cciss_read_capacity_16(int ctlr, int logvol, int withirq,
170 sector_t *total_size, unsigned int *block_size);
171 static void cciss_geometry_inquiry(int ctlr, int logvol,
172 int withirq, sector_t total_size,
173 unsigned int block_size, InquiryData_struct *inq_buff,
174 drive_info_struct *drv);
175 static void cciss_getgeometry(int cntl_num);
176 static void __devinit cciss_interrupt_mode(ctlr_info_t *, struct pci_dev *,
177 __u32);
178 static void start_io(ctlr_info_t *h);
179 static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size,
180 unsigned int use_unit_num, unsigned int log_unit,
181 __u8 page_code, unsigned char *scsi3addr, int cmd_type);
182 static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size,
183 unsigned int use_unit_num, unsigned int log_unit,
184 __u8 page_code, int cmd_type);
185
186 static void fail_all_cmds(unsigned long ctlr);
187
188 #ifdef CONFIG_PROC_FS
189 static void cciss_procinit(int i);
190 #else
191 static void cciss_procinit(int i)
192 {
193 }
194 #endif /* CONFIG_PROC_FS */
195
196 #ifdef CONFIG_COMPAT
197 static long cciss_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg);
198 #endif
199
200 static struct block_device_operations cciss_fops = {
201 .owner = THIS_MODULE,
202 .open = cciss_open,
203 .release = cciss_release,
204 .ioctl = cciss_ioctl,
205 .getgeo = cciss_getgeo,
206 #ifdef CONFIG_COMPAT
207 .compat_ioctl = cciss_compat_ioctl,
208 #endif
209 .revalidate_disk = cciss_revalidate,
210 };
211
212 /*
213 * Enqueuing and dequeuing functions for cmdlists.
214 */
215 static inline void addQ(CommandList_struct **Qptr, CommandList_struct *c)
216 {
217 if (*Qptr == NULL) {
218 *Qptr = c;
219 c->next = c->prev = c;
220 } else {
221 c->prev = (*Qptr)->prev;
222 c->next = (*Qptr);
223 (*Qptr)->prev->next = c;
224 (*Qptr)->prev = c;
225 }
226 }
227
228 static inline CommandList_struct *removeQ(CommandList_struct **Qptr,
229 CommandList_struct *c)
230 {
231 if (c && c->next != c) {
232 if (*Qptr == c)
233 *Qptr = c->next;
234 c->prev->next = c->next;
235 c->next->prev = c->prev;
236 } else {
237 *Qptr = NULL;
238 }
239 return c;
240 }
241
242 #include "cciss_scsi.c" /* For SCSI tape support */
243
244 #define RAID_UNKNOWN 6
245
246 #ifdef CONFIG_PROC_FS
247
248 /*
249 * Report information about this controller.
250 */
251 #define ENG_GIG 1000000000
252 #define ENG_GIG_FACTOR (ENG_GIG/512)
253 #define ENGAGE_SCSI "engage scsi"
254 static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
255 "UNKNOWN"
256 };
257
258 static struct proc_dir_entry *proc_cciss;
259
260 static void cciss_seq_show_header(struct seq_file *seq)
261 {
262 ctlr_info_t *h = seq->private;
263
264 seq_printf(seq, "%s: HP %s Controller\n"
265 "Board ID: 0x%08lx\n"
266 "Firmware Version: %c%c%c%c\n"
267 "IRQ: %d\n"
268 "Logical drives: %d\n"
269 "Current Q depth: %d\n"
270 "Current # commands on controller: %d\n"
271 "Max Q depth since init: %d\n"
272 "Max # commands on controller since init: %d\n"
273 "Max SG entries since init: %d\n",
274 h->devname,
275 h->product_name,
276 (unsigned long)h->board_id,
277 h->firm_ver[0], h->firm_ver[1], h->firm_ver[2],
278 h->firm_ver[3], (unsigned int)h->intr[SIMPLE_MODE_INT],
279 h->num_luns,
280 h->Qdepth, h->commands_outstanding,
281 h->maxQsinceinit, h->max_outstanding, h->maxSG);
282
283 #ifdef CONFIG_CISS_SCSI_TAPE
284 cciss_seq_tape_report(seq, h->ctlr);
285 #endif /* CONFIG_CISS_SCSI_TAPE */
286 }
287
288 static void *cciss_seq_start(struct seq_file *seq, loff_t *pos)
289 {
290 ctlr_info_t *h = seq->private;
291 unsigned ctlr = h->ctlr;
292 unsigned long flags;
293
294 /* prevent displaying bogus info during configuration
295 * or deconfiguration of a logical volume
296 */
297 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
298 if (h->busy_configuring) {
299 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
300 return ERR_PTR(-EBUSY);
301 }
302 h->busy_configuring = 1;
303 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
304
305 if (*pos == 0)
306 cciss_seq_show_header(seq);
307
308 return pos;
309 }
310
311 static int cciss_seq_show(struct seq_file *seq, void *v)
312 {
313 sector_t vol_sz, vol_sz_frac;
314 ctlr_info_t *h = seq->private;
315 unsigned ctlr = h->ctlr;
316 loff_t *pos = v;
317 drive_info_struct *drv = &h->drv[*pos];
318
319 if (*pos > h->highest_lun)
320 return 0;
321
322 if (drv->heads == 0)
323 return 0;
324
325 vol_sz = drv->nr_blocks;
326 vol_sz_frac = sector_div(vol_sz, ENG_GIG_FACTOR);
327 vol_sz_frac *= 100;
328 sector_div(vol_sz_frac, ENG_GIG_FACTOR);
329
330 if (drv->raid_level > 5)
331 drv->raid_level = RAID_UNKNOWN;
332 seq_printf(seq, "cciss/c%dd%d:"
333 "\t%4u.%02uGB\tRAID %s\n",
334 ctlr, (int) *pos, (int)vol_sz, (int)vol_sz_frac,
335 raid_label[drv->raid_level]);
336 return 0;
337 }
338
339 static void *cciss_seq_next(struct seq_file *seq, void *v, loff_t *pos)
340 {
341 ctlr_info_t *h = seq->private;
342
343 if (*pos > h->highest_lun)
344 return NULL;
345 *pos += 1;
346
347 return pos;
348 }
349
350 static void cciss_seq_stop(struct seq_file *seq, void *v)
351 {
352 ctlr_info_t *h = seq->private;
353
354 /* Only reset h->busy_configuring if we succeeded in setting
355 * it during cciss_seq_start. */
356 if (v == ERR_PTR(-EBUSY))
357 return;
358
359 h->busy_configuring = 0;
360 }
361
362 static struct seq_operations cciss_seq_ops = {
363 .start = cciss_seq_start,
364 .show = cciss_seq_show,
365 .next = cciss_seq_next,
366 .stop = cciss_seq_stop,
367 };
368
369 static int cciss_seq_open(struct inode *inode, struct file *file)
370 {
371 int ret = seq_open(file, &cciss_seq_ops);
372 struct seq_file *seq = file->private_data;
373
374 if (!ret)
375 seq->private = PDE(inode)->data;
376
377 return ret;
378 }
379
380 static ssize_t
381 cciss_proc_write(struct file *file, const char __user *buf,
382 size_t length, loff_t *ppos)
383 {
384 int err;
385 char *buffer;
386
387 #ifndef CONFIG_CISS_SCSI_TAPE
388 return -EINVAL;
389 #endif
390
391 if (!buf || length > PAGE_SIZE - 1)
392 return -EINVAL;
393
394 buffer = (char *)__get_free_page(GFP_KERNEL);
395 if (!buffer)
396 return -ENOMEM;
397
398 err = -EFAULT;
399 if (copy_from_user(buffer, buf, length))
400 goto out;
401 buffer[length] = '\0';
402
403 #ifdef CONFIG_CISS_SCSI_TAPE
404 if (strncmp(ENGAGE_SCSI, buffer, sizeof ENGAGE_SCSI - 1) == 0) {
405 struct seq_file *seq = file->private_data;
406 ctlr_info_t *h = seq->private;
407 int rc;
408
409 rc = cciss_engage_scsi(h->ctlr);
410 if (rc != 0)
411 err = -rc;
412 else
413 err = length;
414 } else
415 #endif /* CONFIG_CISS_SCSI_TAPE */
416 err = -EINVAL;
417 /* might be nice to have "disengage" too, but it's not
418 safely possible. (only 1 module use count, lock issues.) */
419
420 out:
421 free_page((unsigned long)buffer);
422 return err;
423 }
424
425 static struct file_operations cciss_proc_fops = {
426 .owner = THIS_MODULE,
427 .open = cciss_seq_open,
428 .read = seq_read,
429 .llseek = seq_lseek,
430 .release = seq_release,
431 .write = cciss_proc_write,
432 };
433
434 static void __devinit cciss_procinit(int i)
435 {
436 struct proc_dir_entry *pde;
437
438 if (proc_cciss == NULL)
439 proc_cciss = proc_mkdir("driver/cciss", NULL);
440 if (!proc_cciss)
441 return;
442 pde = proc_create_data(hba[i]->devname, S_IWUSR | S_IRUSR | S_IRGRP |
443 S_IROTH, proc_cciss,
444 &cciss_proc_fops, hba[i]);
445 }
446 #endif /* CONFIG_PROC_FS */
447
448 /*
449 * For operations that cannot sleep, a command block is allocated at init,
450 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
451 * which ones are free or in use. For operations that can wait for kmalloc
452 * to possible sleep, this routine can be called with get_from_pool set to 0.
453 * cmd_free() MUST be called with a got_from_pool set to 0 if cmd_alloc was.
454 */
455 static CommandList_struct *cmd_alloc(ctlr_info_t *h, int get_from_pool)
456 {
457 CommandList_struct *c;
458 int i;
459 u64bit temp64;
460 dma_addr_t cmd_dma_handle, err_dma_handle;
461
462 if (!get_from_pool) {
463 c = (CommandList_struct *) pci_alloc_consistent(h->pdev,
464 sizeof(CommandList_struct), &cmd_dma_handle);
465 if (c == NULL)
466 return NULL;
467 memset(c, 0, sizeof(CommandList_struct));
468
469 c->cmdindex = -1;
470
471 c->err_info = (ErrorInfo_struct *)
472 pci_alloc_consistent(h->pdev, sizeof(ErrorInfo_struct),
473 &err_dma_handle);
474
475 if (c->err_info == NULL) {
476 pci_free_consistent(h->pdev,
477 sizeof(CommandList_struct), c, cmd_dma_handle);
478 return NULL;
479 }
480 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
481 } else { /* get it out of the controllers pool */
482
483 do {
484 i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
485 if (i == h->nr_cmds)
486 return NULL;
487 } while (test_and_set_bit
488 (i & (BITS_PER_LONG - 1),
489 h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
490 #ifdef CCISS_DEBUG
491 printk(KERN_DEBUG "cciss: using command buffer %d\n", i);
492 #endif
493 c = h->cmd_pool + i;
494 memset(c, 0, sizeof(CommandList_struct));
495 cmd_dma_handle = h->cmd_pool_dhandle
496 + i * sizeof(CommandList_struct);
497 c->err_info = h->errinfo_pool + i;
498 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
499 err_dma_handle = h->errinfo_pool_dhandle
500 + i * sizeof(ErrorInfo_struct);
501 h->nr_allocs++;
502
503 c->cmdindex = i;
504 }
505
506 c->busaddr = (__u32) cmd_dma_handle;
507 temp64.val = (__u64) err_dma_handle;
508 c->ErrDesc.Addr.lower = temp64.val32.lower;
509 c->ErrDesc.Addr.upper = temp64.val32.upper;
510 c->ErrDesc.Len = sizeof(ErrorInfo_struct);
511
512 c->ctlr = h->ctlr;
513 return c;
514 }
515
516 /*
517 * Frees a command block that was previously allocated with cmd_alloc().
518 */
519 static void cmd_free(ctlr_info_t *h, CommandList_struct *c, int got_from_pool)
520 {
521 int i;
522 u64bit temp64;
523
524 if (!got_from_pool) {
525 temp64.val32.lower = c->ErrDesc.Addr.lower;
526 temp64.val32.upper = c->ErrDesc.Addr.upper;
527 pci_free_consistent(h->pdev, sizeof(ErrorInfo_struct),
528 c->err_info, (dma_addr_t) temp64.val);
529 pci_free_consistent(h->pdev, sizeof(CommandList_struct),
530 c, (dma_addr_t) c->busaddr);
531 } else {
532 i = c - h->cmd_pool;
533 clear_bit(i & (BITS_PER_LONG - 1),
534 h->cmd_pool_bits + (i / BITS_PER_LONG));
535 h->nr_frees++;
536 }
537 }
538
539 static inline ctlr_info_t *get_host(struct gendisk *disk)
540 {
541 return disk->queue->queuedata;
542 }
543
544 static inline drive_info_struct *get_drv(struct gendisk *disk)
545 {
546 return disk->private_data;
547 }
548
549 /*
550 * Open. Make sure the device is really there.
551 */
552 static int cciss_open(struct inode *inode, struct file *filep)
553 {
554 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
555 drive_info_struct *drv = get_drv(inode->i_bdev->bd_disk);
556
557 #ifdef CCISS_DEBUG
558 printk(KERN_DEBUG "cciss_open %s\n", inode->i_bdev->bd_disk->disk_name);
559 #endif /* CCISS_DEBUG */
560
561 if (host->busy_initializing || drv->busy_configuring)
562 return -EBUSY;
563 /*
564 * Root is allowed to open raw volume zero even if it's not configured
565 * so array config can still work. Root is also allowed to open any
566 * volume that has a LUN ID, so it can issue IOCTL to reread the
567 * disk information. I don't think I really like this
568 * but I'm already using way to many device nodes to claim another one
569 * for "raw controller".
570 */
571 if (drv->heads == 0) {
572 if (iminor(inode) != 0) { /* not node 0? */
573 /* if not node 0 make sure it is a partition = 0 */
574 if (iminor(inode) & 0x0f) {
575 return -ENXIO;
576 /* if it is, make sure we have a LUN ID */
577 } else if (drv->LunID == 0) {
578 return -ENXIO;
579 }
580 }
581 if (!capable(CAP_SYS_ADMIN))
582 return -EPERM;
583 }
584 drv->usage_count++;
585 host->usage_count++;
586 return 0;
587 }
588
589 /*
590 * Close. Sync first.
591 */
592 static int cciss_release(struct inode *inode, struct file *filep)
593 {
594 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
595 drive_info_struct *drv = get_drv(inode->i_bdev->bd_disk);
596
597 #ifdef CCISS_DEBUG
598 printk(KERN_DEBUG "cciss_release %s\n",
599 inode->i_bdev->bd_disk->disk_name);
600 #endif /* CCISS_DEBUG */
601
602 drv->usage_count--;
603 host->usage_count--;
604 return 0;
605 }
606
607 #ifdef CONFIG_COMPAT
608
609 static int do_ioctl(struct file *f, unsigned cmd, unsigned long arg)
610 {
611 int ret;
612 lock_kernel();
613 ret = cciss_ioctl(f->f_path.dentry->d_inode, f, cmd, arg);
614 unlock_kernel();
615 return ret;
616 }
617
618 static int cciss_ioctl32_passthru(struct file *f, unsigned cmd,
619 unsigned long arg);
620 static int cciss_ioctl32_big_passthru(struct file *f, unsigned cmd,
621 unsigned long arg);
622
623 static long cciss_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg)
624 {
625 switch (cmd) {
626 case CCISS_GETPCIINFO:
627 case CCISS_GETINTINFO:
628 case CCISS_SETINTINFO:
629 case CCISS_GETNODENAME:
630 case CCISS_SETNODENAME:
631 case CCISS_GETHEARTBEAT:
632 case CCISS_GETBUSTYPES:
633 case CCISS_GETFIRMVER:
634 case CCISS_GETDRIVVER:
635 case CCISS_REVALIDVOLS:
636 case CCISS_DEREGDISK:
637 case CCISS_REGNEWDISK:
638 case CCISS_REGNEWD:
639 case CCISS_RESCANDISK:
640 case CCISS_GETLUNINFO:
641 return do_ioctl(f, cmd, arg);
642
643 case CCISS_PASSTHRU32:
644 return cciss_ioctl32_passthru(f, cmd, arg);
645 case CCISS_BIG_PASSTHRU32:
646 return cciss_ioctl32_big_passthru(f, cmd, arg);
647
648 default:
649 return -ENOIOCTLCMD;
650 }
651 }
652
653 static int cciss_ioctl32_passthru(struct file *f, unsigned cmd,
654 unsigned long arg)
655 {
656 IOCTL32_Command_struct __user *arg32 =
657 (IOCTL32_Command_struct __user *) arg;
658 IOCTL_Command_struct arg64;
659 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
660 int err;
661 u32 cp;
662
663 err = 0;
664 err |=
665 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
666 sizeof(arg64.LUN_info));
667 err |=
668 copy_from_user(&arg64.Request, &arg32->Request,
669 sizeof(arg64.Request));
670 err |=
671 copy_from_user(&arg64.error_info, &arg32->error_info,
672 sizeof(arg64.error_info));
673 err |= get_user(arg64.buf_size, &arg32->buf_size);
674 err |= get_user(cp, &arg32->buf);
675 arg64.buf = compat_ptr(cp);
676 err |= copy_to_user(p, &arg64, sizeof(arg64));
677
678 if (err)
679 return -EFAULT;
680
681 err = do_ioctl(f, CCISS_PASSTHRU, (unsigned long)p);
682 if (err)
683 return err;
684 err |=
685 copy_in_user(&arg32->error_info, &p->error_info,
686 sizeof(arg32->error_info));
687 if (err)
688 return -EFAULT;
689 return err;
690 }
691
692 static int cciss_ioctl32_big_passthru(struct file *file, unsigned cmd,
693 unsigned long arg)
694 {
695 BIG_IOCTL32_Command_struct __user *arg32 =
696 (BIG_IOCTL32_Command_struct __user *) arg;
697 BIG_IOCTL_Command_struct arg64;
698 BIG_IOCTL_Command_struct __user *p =
699 compat_alloc_user_space(sizeof(arg64));
700 int err;
701 u32 cp;
702
703 err = 0;
704 err |=
705 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
706 sizeof(arg64.LUN_info));
707 err |=
708 copy_from_user(&arg64.Request, &arg32->Request,
709 sizeof(arg64.Request));
710 err |=
711 copy_from_user(&arg64.error_info, &arg32->error_info,
712 sizeof(arg64.error_info));
713 err |= get_user(arg64.buf_size, &arg32->buf_size);
714 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
715 err |= get_user(cp, &arg32->buf);
716 arg64.buf = compat_ptr(cp);
717 err |= copy_to_user(p, &arg64, sizeof(arg64));
718
719 if (err)
720 return -EFAULT;
721
722 err = do_ioctl(file, CCISS_BIG_PASSTHRU, (unsigned long)p);
723 if (err)
724 return err;
725 err |=
726 copy_in_user(&arg32->error_info, &p->error_info,
727 sizeof(arg32->error_info));
728 if (err)
729 return -EFAULT;
730 return err;
731 }
732 #endif
733
734 static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo)
735 {
736 drive_info_struct *drv = get_drv(bdev->bd_disk);
737
738 if (!drv->cylinders)
739 return -ENXIO;
740
741 geo->heads = drv->heads;
742 geo->sectors = drv->sectors;
743 geo->cylinders = drv->cylinders;
744 return 0;
745 }
746
747 /*
748 * ioctl
749 */
750 static int cciss_ioctl(struct inode *inode, struct file *filep,
751 unsigned int cmd, unsigned long arg)
752 {
753 struct block_device *bdev = inode->i_bdev;
754 struct gendisk *disk = bdev->bd_disk;
755 ctlr_info_t *host = get_host(disk);
756 drive_info_struct *drv = get_drv(disk);
757 int ctlr = host->ctlr;
758 void __user *argp = (void __user *)arg;
759
760 #ifdef CCISS_DEBUG
761 printk(KERN_DEBUG "cciss_ioctl: Called with cmd=%x %lx\n", cmd, arg);
762 #endif /* CCISS_DEBUG */
763
764 switch (cmd) {
765 case CCISS_GETPCIINFO:
766 {
767 cciss_pci_info_struct pciinfo;
768
769 if (!arg)
770 return -EINVAL;
771 pciinfo.domain = pci_domain_nr(host->pdev->bus);
772 pciinfo.bus = host->pdev->bus->number;
773 pciinfo.dev_fn = host->pdev->devfn;
774 pciinfo.board_id = host->board_id;
775 if (copy_to_user
776 (argp, &pciinfo, sizeof(cciss_pci_info_struct)))
777 return -EFAULT;
778 return 0;
779 }
780 case CCISS_GETINTINFO:
781 {
782 cciss_coalint_struct intinfo;
783 if (!arg)
784 return -EINVAL;
785 intinfo.delay =
786 readl(&host->cfgtable->HostWrite.CoalIntDelay);
787 intinfo.count =
788 readl(&host->cfgtable->HostWrite.CoalIntCount);
789 if (copy_to_user
790 (argp, &intinfo, sizeof(cciss_coalint_struct)))
791 return -EFAULT;
792 return 0;
793 }
794 case CCISS_SETINTINFO:
795 {
796 cciss_coalint_struct intinfo;
797 unsigned long flags;
798 int i;
799
800 if (!arg)
801 return -EINVAL;
802 if (!capable(CAP_SYS_ADMIN))
803 return -EPERM;
804 if (copy_from_user
805 (&intinfo, argp, sizeof(cciss_coalint_struct)))
806 return -EFAULT;
807 if ((intinfo.delay == 0) && (intinfo.count == 0))
808 {
809 // printk("cciss_ioctl: delay and count cannot be 0\n");
810 return -EINVAL;
811 }
812 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
813 /* Update the field, and then ring the doorbell */
814 writel(intinfo.delay,
815 &(host->cfgtable->HostWrite.CoalIntDelay));
816 writel(intinfo.count,
817 &(host->cfgtable->HostWrite.CoalIntCount));
818 writel(CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
819
820 for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
821 if (!(readl(host->vaddr + SA5_DOORBELL)
822 & CFGTBL_ChangeReq))
823 break;
824 /* delay and try again */
825 udelay(1000);
826 }
827 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
828 if (i >= MAX_IOCTL_CONFIG_WAIT)
829 return -EAGAIN;
830 return 0;
831 }
832 case CCISS_GETNODENAME:
833 {
834 NodeName_type NodeName;
835 int i;
836
837 if (!arg)
838 return -EINVAL;
839 for (i = 0; i < 16; i++)
840 NodeName[i] =
841 readb(&host->cfgtable->ServerName[i]);
842 if (copy_to_user(argp, NodeName, sizeof(NodeName_type)))
843 return -EFAULT;
844 return 0;
845 }
846 case CCISS_SETNODENAME:
847 {
848 NodeName_type NodeName;
849 unsigned long flags;
850 int i;
851
852 if (!arg)
853 return -EINVAL;
854 if (!capable(CAP_SYS_ADMIN))
855 return -EPERM;
856
857 if (copy_from_user
858 (NodeName, argp, sizeof(NodeName_type)))
859 return -EFAULT;
860
861 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
862
863 /* Update the field, and then ring the doorbell */
864 for (i = 0; i < 16; i++)
865 writeb(NodeName[i],
866 &host->cfgtable->ServerName[i]);
867
868 writel(CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
869
870 for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
871 if (!(readl(host->vaddr + SA5_DOORBELL)
872 & CFGTBL_ChangeReq))
873 break;
874 /* delay and try again */
875 udelay(1000);
876 }
877 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
878 if (i >= MAX_IOCTL_CONFIG_WAIT)
879 return -EAGAIN;
880 return 0;
881 }
882
883 case CCISS_GETHEARTBEAT:
884 {
885 Heartbeat_type heartbeat;
886
887 if (!arg)
888 return -EINVAL;
889 heartbeat = readl(&host->cfgtable->HeartBeat);
890 if (copy_to_user
891 (argp, &heartbeat, sizeof(Heartbeat_type)))
892 return -EFAULT;
893 return 0;
894 }
895 case CCISS_GETBUSTYPES:
896 {
897 BusTypes_type BusTypes;
898
899 if (!arg)
900 return -EINVAL;
901 BusTypes = readl(&host->cfgtable->BusTypes);
902 if (copy_to_user
903 (argp, &BusTypes, sizeof(BusTypes_type)))
904 return -EFAULT;
905 return 0;
906 }
907 case CCISS_GETFIRMVER:
908 {
909 FirmwareVer_type firmware;
910
911 if (!arg)
912 return -EINVAL;
913 memcpy(firmware, host->firm_ver, 4);
914
915 if (copy_to_user
916 (argp, firmware, sizeof(FirmwareVer_type)))
917 return -EFAULT;
918 return 0;
919 }
920 case CCISS_GETDRIVVER:
921 {
922 DriverVer_type DriverVer = DRIVER_VERSION;
923
924 if (!arg)
925 return -EINVAL;
926
927 if (copy_to_user
928 (argp, &DriverVer, sizeof(DriverVer_type)))
929 return -EFAULT;
930 return 0;
931 }
932
933 case CCISS_REVALIDVOLS:
934 return rebuild_lun_table(host, NULL);
935
936 case CCISS_GETLUNINFO:{
937 LogvolInfo_struct luninfo;
938
939 luninfo.LunID = drv->LunID;
940 luninfo.num_opens = drv->usage_count;
941 luninfo.num_parts = 0;
942 if (copy_to_user(argp, &luninfo,
943 sizeof(LogvolInfo_struct)))
944 return -EFAULT;
945 return 0;
946 }
947 case CCISS_DEREGDISK:
948 return rebuild_lun_table(host, disk);
949
950 case CCISS_REGNEWD:
951 return rebuild_lun_table(host, NULL);
952
953 case CCISS_PASSTHRU:
954 {
955 IOCTL_Command_struct iocommand;
956 CommandList_struct *c;
957 char *buff = NULL;
958 u64bit temp64;
959 unsigned long flags;
960 DECLARE_COMPLETION_ONSTACK(wait);
961
962 if (!arg)
963 return -EINVAL;
964
965 if (!capable(CAP_SYS_RAWIO))
966 return -EPERM;
967
968 if (copy_from_user
969 (&iocommand, argp, sizeof(IOCTL_Command_struct)))
970 return -EFAULT;
971 if ((iocommand.buf_size < 1) &&
972 (iocommand.Request.Type.Direction != XFER_NONE)) {
973 return -EINVAL;
974 }
975 #if 0 /* 'buf_size' member is 16-bits, and always smaller than kmalloc limit */
976 /* Check kmalloc limits */
977 if (iocommand.buf_size > 128000)
978 return -EINVAL;
979 #endif
980 if (iocommand.buf_size > 0) {
981 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
982 if (buff == NULL)
983 return -EFAULT;
984 }
985 if (iocommand.Request.Type.Direction == XFER_WRITE) {
986 /* Copy the data into the buffer we created */
987 if (copy_from_user
988 (buff, iocommand.buf, iocommand.buf_size)) {
989 kfree(buff);
990 return -EFAULT;
991 }
992 } else {
993 memset(buff, 0, iocommand.buf_size);
994 }
995 if ((c = cmd_alloc(host, 0)) == NULL) {
996 kfree(buff);
997 return -ENOMEM;
998 }
999 // Fill in the command type
1000 c->cmd_type = CMD_IOCTL_PEND;
1001 // Fill in Command Header
1002 c->Header.ReplyQueue = 0; // unused in simple mode
1003 if (iocommand.buf_size > 0) // buffer to fill
1004 {
1005 c->Header.SGList = 1;
1006 c->Header.SGTotal = 1;
1007 } else // no buffers to fill
1008 {
1009 c->Header.SGList = 0;
1010 c->Header.SGTotal = 0;
1011 }
1012 c->Header.LUN = iocommand.LUN_info;
1013 c->Header.Tag.lower = c->busaddr; // use the kernel address the cmd block for tag
1014
1015 // Fill in Request block
1016 c->Request = iocommand.Request;
1017
1018 // Fill in the scatter gather information
1019 if (iocommand.buf_size > 0) {
1020 temp64.val = pci_map_single(host->pdev, buff,
1021 iocommand.buf_size,
1022 PCI_DMA_BIDIRECTIONAL);
1023 c->SG[0].Addr.lower = temp64.val32.lower;
1024 c->SG[0].Addr.upper = temp64.val32.upper;
1025 c->SG[0].Len = iocommand.buf_size;
1026 c->SG[0].Ext = 0; // we are not chaining
1027 }
1028 c->waiting = &wait;
1029
1030 /* Put the request on the tail of the request queue */
1031 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1032 addQ(&host->reqQ, c);
1033 host->Qdepth++;
1034 start_io(host);
1035 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1036
1037 wait_for_completion(&wait);
1038
1039 /* unlock the buffers from DMA */
1040 temp64.val32.lower = c->SG[0].Addr.lower;
1041 temp64.val32.upper = c->SG[0].Addr.upper;
1042 pci_unmap_single(host->pdev, (dma_addr_t) temp64.val,
1043 iocommand.buf_size,
1044 PCI_DMA_BIDIRECTIONAL);
1045
1046 /* Copy the error information out */
1047 iocommand.error_info = *(c->err_info);
1048 if (copy_to_user
1049 (argp, &iocommand, sizeof(IOCTL_Command_struct))) {
1050 kfree(buff);
1051 cmd_free(host, c, 0);
1052 return -EFAULT;
1053 }
1054
1055 if (iocommand.Request.Type.Direction == XFER_READ) {
1056 /* Copy the data out of the buffer we created */
1057 if (copy_to_user
1058 (iocommand.buf, buff, iocommand.buf_size)) {
1059 kfree(buff);
1060 cmd_free(host, c, 0);
1061 return -EFAULT;
1062 }
1063 }
1064 kfree(buff);
1065 cmd_free(host, c, 0);
1066 return 0;
1067 }
1068 case CCISS_BIG_PASSTHRU:{
1069 BIG_IOCTL_Command_struct *ioc;
1070 CommandList_struct *c;
1071 unsigned char **buff = NULL;
1072 int *buff_size = NULL;
1073 u64bit temp64;
1074 unsigned long flags;
1075 BYTE sg_used = 0;
1076 int status = 0;
1077 int i;
1078 DECLARE_COMPLETION_ONSTACK(wait);
1079 __u32 left;
1080 __u32 sz;
1081 BYTE __user *data_ptr;
1082
1083 if (!arg)
1084 return -EINVAL;
1085 if (!capable(CAP_SYS_RAWIO))
1086 return -EPERM;
1087 ioc = (BIG_IOCTL_Command_struct *)
1088 kmalloc(sizeof(*ioc), GFP_KERNEL);
1089 if (!ioc) {
1090 status = -ENOMEM;
1091 goto cleanup1;
1092 }
1093 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
1094 status = -EFAULT;
1095 goto cleanup1;
1096 }
1097 if ((ioc->buf_size < 1) &&
1098 (ioc->Request.Type.Direction != XFER_NONE)) {
1099 status = -EINVAL;
1100 goto cleanup1;
1101 }
1102 /* Check kmalloc limits using all SGs */
1103 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
1104 status = -EINVAL;
1105 goto cleanup1;
1106 }
1107 if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) {
1108 status = -EINVAL;
1109 goto cleanup1;
1110 }
1111 buff =
1112 kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL);
1113 if (!buff) {
1114 status = -ENOMEM;
1115 goto cleanup1;
1116 }
1117 buff_size = kmalloc(MAXSGENTRIES * sizeof(int),
1118 GFP_KERNEL);
1119 if (!buff_size) {
1120 status = -ENOMEM;
1121 goto cleanup1;
1122 }
1123 left = ioc->buf_size;
1124 data_ptr = ioc->buf;
1125 while (left) {
1126 sz = (left >
1127 ioc->malloc_size) ? ioc->
1128 malloc_size : left;
1129 buff_size[sg_used] = sz;
1130 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
1131 if (buff[sg_used] == NULL) {
1132 status = -ENOMEM;
1133 goto cleanup1;
1134 }
1135 if (ioc->Request.Type.Direction == XFER_WRITE) {
1136 if (copy_from_user
1137 (buff[sg_used], data_ptr, sz)) {
1138 status = -ENOMEM;
1139 goto cleanup1;
1140 }
1141 } else {
1142 memset(buff[sg_used], 0, sz);
1143 }
1144 left -= sz;
1145 data_ptr += sz;
1146 sg_used++;
1147 }
1148 if ((c = cmd_alloc(host, 0)) == NULL) {
1149 status = -ENOMEM;
1150 goto cleanup1;
1151 }
1152 c->cmd_type = CMD_IOCTL_PEND;
1153 c->Header.ReplyQueue = 0;
1154
1155 if (ioc->buf_size > 0) {
1156 c->Header.SGList = sg_used;
1157 c->Header.SGTotal = sg_used;
1158 } else {
1159 c->Header.SGList = 0;
1160 c->Header.SGTotal = 0;
1161 }
1162 c->Header.LUN = ioc->LUN_info;
1163 c->Header.Tag.lower = c->busaddr;
1164
1165 c->Request = ioc->Request;
1166 if (ioc->buf_size > 0) {
1167 int i;
1168 for (i = 0; i < sg_used; i++) {
1169 temp64.val =
1170 pci_map_single(host->pdev, buff[i],
1171 buff_size[i],
1172 PCI_DMA_BIDIRECTIONAL);
1173 c->SG[i].Addr.lower =
1174 temp64.val32.lower;
1175 c->SG[i].Addr.upper =
1176 temp64.val32.upper;
1177 c->SG[i].Len = buff_size[i];
1178 c->SG[i].Ext = 0; /* we are not chaining */
1179 }
1180 }
1181 c->waiting = &wait;
1182 /* Put the request on the tail of the request queue */
1183 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1184 addQ(&host->reqQ, c);
1185 host->Qdepth++;
1186 start_io(host);
1187 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1188 wait_for_completion(&wait);
1189 /* unlock the buffers from DMA */
1190 for (i = 0; i < sg_used; i++) {
1191 temp64.val32.lower = c->SG[i].Addr.lower;
1192 temp64.val32.upper = c->SG[i].Addr.upper;
1193 pci_unmap_single(host->pdev,
1194 (dma_addr_t) temp64.val, buff_size[i],
1195 PCI_DMA_BIDIRECTIONAL);
1196 }
1197 /* Copy the error information out */
1198 ioc->error_info = *(c->err_info);
1199 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
1200 cmd_free(host, c, 0);
1201 status = -EFAULT;
1202 goto cleanup1;
1203 }
1204 if (ioc->Request.Type.Direction == XFER_READ) {
1205 /* Copy the data out of the buffer we created */
1206 BYTE __user *ptr = ioc->buf;
1207 for (i = 0; i < sg_used; i++) {
1208 if (copy_to_user
1209 (ptr, buff[i], buff_size[i])) {
1210 cmd_free(host, c, 0);
1211 status = -EFAULT;
1212 goto cleanup1;
1213 }
1214 ptr += buff_size[i];
1215 }
1216 }
1217 cmd_free(host, c, 0);
1218 status = 0;
1219 cleanup1:
1220 if (buff) {
1221 for (i = 0; i < sg_used; i++)
1222 kfree(buff[i]);
1223 kfree(buff);
1224 }
1225 kfree(buff_size);
1226 kfree(ioc);
1227 return status;
1228 }
1229
1230 /* scsi_cmd_ioctl handles these, below, though some are not */
1231 /* very meaningful for cciss. SG_IO is the main one people want. */
1232
1233 case SG_GET_VERSION_NUM:
1234 case SG_SET_TIMEOUT:
1235 case SG_GET_TIMEOUT:
1236 case SG_GET_RESERVED_SIZE:
1237 case SG_SET_RESERVED_SIZE:
1238 case SG_EMULATED_HOST:
1239 case SG_IO:
1240 case SCSI_IOCTL_SEND_COMMAND:
1241 return scsi_cmd_ioctl(filep, disk->queue, disk, cmd, argp);
1242
1243 /* scsi_cmd_ioctl would normally handle these, below, but */
1244 /* they aren't a good fit for cciss, as CD-ROMs are */
1245 /* not supported, and we don't have any bus/target/lun */
1246 /* which we present to the kernel. */
1247
1248 case CDROM_SEND_PACKET:
1249 case CDROMCLOSETRAY:
1250 case CDROMEJECT:
1251 case SCSI_IOCTL_GET_IDLUN:
1252 case SCSI_IOCTL_GET_BUS_NUMBER:
1253 default:
1254 return -ENOTTY;
1255 }
1256 }
1257
1258 static void cciss_check_queues(ctlr_info_t *h)
1259 {
1260 int start_queue = h->next_to_run;
1261 int i;
1262
1263 /* check to see if we have maxed out the number of commands that can
1264 * be placed on the queue. If so then exit. We do this check here
1265 * in case the interrupt we serviced was from an ioctl and did not
1266 * free any new commands.
1267 */
1268 if ((find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds)) == h->nr_cmds)
1269 return;
1270
1271 /* We have room on the queue for more commands. Now we need to queue
1272 * them up. We will also keep track of the next queue to run so
1273 * that every queue gets a chance to be started first.
1274 */
1275 for (i = 0; i < h->highest_lun + 1; i++) {
1276 int curr_queue = (start_queue + i) % (h->highest_lun + 1);
1277 /* make sure the disk has been added and the drive is real
1278 * because this can be called from the middle of init_one.
1279 */
1280 if (!(h->drv[curr_queue].queue) || !(h->drv[curr_queue].heads))
1281 continue;
1282 blk_start_queue(h->gendisk[curr_queue]->queue);
1283
1284 /* check to see if we have maxed out the number of commands
1285 * that can be placed on the queue.
1286 */
1287 if ((find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds)) == h->nr_cmds) {
1288 if (curr_queue == start_queue) {
1289 h->next_to_run =
1290 (start_queue + 1) % (h->highest_lun + 1);
1291 break;
1292 } else {
1293 h->next_to_run = curr_queue;
1294 break;
1295 }
1296 } else {
1297 curr_queue = (curr_queue + 1) % (h->highest_lun + 1);
1298 }
1299 }
1300 }
1301
1302 static void cciss_softirq_done(struct request *rq)
1303 {
1304 CommandList_struct *cmd = rq->completion_data;
1305 ctlr_info_t *h = hba[cmd->ctlr];
1306 unsigned long flags;
1307 u64bit temp64;
1308 int i, ddir;
1309
1310 if (cmd->Request.Type.Direction == XFER_READ)
1311 ddir = PCI_DMA_FROMDEVICE;
1312 else
1313 ddir = PCI_DMA_TODEVICE;
1314
1315 /* command did not need to be retried */
1316 /* unmap the DMA mapping for all the scatter gather elements */
1317 for (i = 0; i < cmd->Header.SGList; i++) {
1318 temp64.val32.lower = cmd->SG[i].Addr.lower;
1319 temp64.val32.upper = cmd->SG[i].Addr.upper;
1320 pci_unmap_page(h->pdev, temp64.val, cmd->SG[i].Len, ddir);
1321 }
1322
1323 #ifdef CCISS_DEBUG
1324 printk("Done with %p\n", rq);
1325 #endif /* CCISS_DEBUG */
1326
1327 if (blk_end_request(rq, (rq->errors == 0) ? 0 : -EIO, blk_rq_bytes(rq)))
1328 BUG();
1329
1330 spin_lock_irqsave(&h->lock, flags);
1331 cmd_free(h, cmd, 1);
1332 cciss_check_queues(h);
1333 spin_unlock_irqrestore(&h->lock, flags);
1334 }
1335
1336 /* This function will check the usage_count of the drive to be updated/added.
1337 * If the usage_count is zero then the drive information will be updated and
1338 * the disk will be re-registered with the kernel. If not then it will be
1339 * left alone for the next reboot. The exception to this is disk 0 which
1340 * will always be left registered with the kernel since it is also the
1341 * controller node. Any changes to disk 0 will show up on the next
1342 * reboot.
1343 */
1344 static void cciss_update_drive_info(int ctlr, int drv_index)
1345 {
1346 ctlr_info_t *h = hba[ctlr];
1347 struct gendisk *disk;
1348 InquiryData_struct *inq_buff = NULL;
1349 unsigned int block_size;
1350 sector_t total_size;
1351 unsigned long flags = 0;
1352 int ret = 0;
1353
1354 /* if the disk already exists then deregister it before proceeding */
1355 if (h->drv[drv_index].raid_level != -1) {
1356 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1357 h->drv[drv_index].busy_configuring = 1;
1358 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1359
1360 /* deregister_disk sets h->drv[drv_index].queue = NULL */
1361 /* which keeps the interrupt handler from starting */
1362 /* the queue. */
1363 ret = deregister_disk(h->gendisk[drv_index],
1364 &h->drv[drv_index], 0);
1365 h->drv[drv_index].busy_configuring = 0;
1366 }
1367
1368 /* If the disk is in use return */
1369 if (ret)
1370 return;
1371
1372 /* Get information about the disk and modify the driver structure */
1373 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
1374 if (inq_buff == NULL)
1375 goto mem_msg;
1376
1377 /* testing to see if 16-byte CDBs are already being used */
1378 if (h->cciss_read == CCISS_READ_16) {
1379 cciss_read_capacity_16(h->ctlr, drv_index, 1,
1380 &total_size, &block_size);
1381 goto geo_inq;
1382 }
1383
1384 cciss_read_capacity(ctlr, drv_index, 1,
1385 &total_size, &block_size);
1386
1387 /* if read_capacity returns all F's this volume is >2TB in size */
1388 /* so we switch to 16-byte CDB's for all read/write ops */
1389 if (total_size == 0xFFFFFFFFULL) {
1390 cciss_read_capacity_16(ctlr, drv_index, 1,
1391 &total_size, &block_size);
1392 h->cciss_read = CCISS_READ_16;
1393 h->cciss_write = CCISS_WRITE_16;
1394 } else {
1395 h->cciss_read = CCISS_READ_10;
1396 h->cciss_write = CCISS_WRITE_10;
1397 }
1398 geo_inq:
1399 cciss_geometry_inquiry(ctlr, drv_index, 1, total_size, block_size,
1400 inq_buff, &h->drv[drv_index]);
1401
1402 ++h->num_luns;
1403 disk = h->gendisk[drv_index];
1404 set_capacity(disk, h->drv[drv_index].nr_blocks);
1405
1406 /* if it's the controller it's already added */
1407 if (drv_index) {
1408 disk->queue = blk_init_queue(do_cciss_request, &h->lock);
1409 sprintf(disk->disk_name, "cciss/c%dd%d", ctlr, drv_index);
1410 disk->major = h->major;
1411 disk->first_minor = drv_index << NWD_SHIFT;
1412 disk->fops = &cciss_fops;
1413 disk->private_data = &h->drv[drv_index];
1414
1415 /* Set up queue information */
1416 blk_queue_bounce_limit(disk->queue, hba[ctlr]->pdev->dma_mask);
1417
1418 /* This is a hardware imposed limit. */
1419 blk_queue_max_hw_segments(disk->queue, MAXSGENTRIES);
1420
1421 /* This is a limit in the driver and could be eliminated. */
1422 blk_queue_max_phys_segments(disk->queue, MAXSGENTRIES);
1423
1424 blk_queue_max_sectors(disk->queue, h->cciss_max_sectors);
1425
1426 blk_queue_softirq_done(disk->queue, cciss_softirq_done);
1427
1428 disk->queue->queuedata = hba[ctlr];
1429
1430 blk_queue_hardsect_size(disk->queue,
1431 hba[ctlr]->drv[drv_index].block_size);
1432
1433 /* Make sure all queue data is written out before */
1434 /* setting h->drv[drv_index].queue, as setting this */
1435 /* allows the interrupt handler to start the queue */
1436 wmb();
1437 h->drv[drv_index].queue = disk->queue;
1438 add_disk(disk);
1439 }
1440
1441 freeret:
1442 kfree(inq_buff);
1443 return;
1444 mem_msg:
1445 printk(KERN_ERR "cciss: out of memory\n");
1446 goto freeret;
1447 }
1448
1449 /* This function will find the first index of the controllers drive array
1450 * that has a -1 for the raid_level and will return that index. This is
1451 * where new drives will be added. If the index to be returned is greater
1452 * than the highest_lun index for the controller then highest_lun is set
1453 * to this new index. If there are no available indexes then -1 is returned.
1454 */
1455 static int cciss_find_free_drive_index(int ctlr)
1456 {
1457 int i;
1458
1459 for (i = 0; i < CISS_MAX_LUN; i++) {
1460 if (hba[ctlr]->drv[i].raid_level == -1) {
1461 if (i > hba[ctlr]->highest_lun)
1462 hba[ctlr]->highest_lun = i;
1463 return i;
1464 }
1465 }
1466 return -1;
1467 }
1468
1469 /* This function will add and remove logical drives from the Logical
1470 * drive array of the controller and maintain persistency of ordering
1471 * so that mount points are preserved until the next reboot. This allows
1472 * for the removal of logical drives in the middle of the drive array
1473 * without a re-ordering of those drives.
1474 * INPUT
1475 * h = The controller to perform the operations on
1476 * del_disk = The disk to remove if specified. If the value given
1477 * is NULL then no disk is removed.
1478 */
1479 static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk)
1480 {
1481 int ctlr = h->ctlr;
1482 int num_luns;
1483 ReportLunData_struct *ld_buff = NULL;
1484 drive_info_struct *drv = NULL;
1485 int return_code;
1486 int listlength = 0;
1487 int i;
1488 int drv_found;
1489 int drv_index = 0;
1490 __u32 lunid = 0;
1491 unsigned long flags;
1492
1493 /* Set busy_configuring flag for this operation */
1494 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1495 if (h->busy_configuring) {
1496 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1497 return -EBUSY;
1498 }
1499 h->busy_configuring = 1;
1500
1501 /* if del_disk is NULL then we are being called to add a new disk
1502 * and update the logical drive table. If it is not NULL then
1503 * we will check if the disk is in use or not.
1504 */
1505 if (del_disk != NULL) {
1506 drv = get_drv(del_disk);
1507 drv->busy_configuring = 1;
1508 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1509 return_code = deregister_disk(del_disk, drv, 1);
1510 drv->busy_configuring = 0;
1511 h->busy_configuring = 0;
1512 return return_code;
1513 } else {
1514 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1515 if (!capable(CAP_SYS_RAWIO))
1516 return -EPERM;
1517
1518 ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
1519 if (ld_buff == NULL)
1520 goto mem_msg;
1521
1522 return_code = sendcmd_withirq(CISS_REPORT_LOG, ctlr, ld_buff,
1523 sizeof(ReportLunData_struct), 0,
1524 0, 0, TYPE_CMD);
1525
1526 if (return_code == IO_OK) {
1527 listlength =
1528 be32_to_cpu(*(__be32 *) ld_buff->LUNListLength);
1529 } else { /* reading number of logical volumes failed */
1530 printk(KERN_WARNING "cciss: report logical volume"
1531 " command failed\n");
1532 listlength = 0;
1533 goto freeret;
1534 }
1535
1536 num_luns = listlength / 8; /* 8 bytes per entry */
1537 if (num_luns > CISS_MAX_LUN) {
1538 num_luns = CISS_MAX_LUN;
1539 printk(KERN_WARNING "cciss: more luns configured"
1540 " on controller than can be handled by"
1541 " this driver.\n");
1542 }
1543
1544 /* Compare controller drive array to drivers drive array.
1545 * Check for updates in the drive information and any new drives
1546 * on the controller.
1547 */
1548 for (i = 0; i < num_luns; i++) {
1549 int j;
1550
1551 drv_found = 0;
1552
1553 lunid = (0xff &
1554 (unsigned int)(ld_buff->LUN[i][3])) << 24;
1555 lunid |= (0xff &
1556 (unsigned int)(ld_buff->LUN[i][2])) << 16;
1557 lunid |= (0xff &
1558 (unsigned int)(ld_buff->LUN[i][1])) << 8;
1559 lunid |= 0xff & (unsigned int)(ld_buff->LUN[i][0]);
1560
1561 /* Find if the LUN is already in the drive array
1562 * of the controller. If so then update its info
1563 * if not is use. If it does not exist then find
1564 * the first free index and add it.
1565 */
1566 for (j = 0; j <= h->highest_lun; j++) {
1567 if (h->drv[j].LunID == lunid) {
1568 drv_index = j;
1569 drv_found = 1;
1570 }
1571 }
1572
1573 /* check if the drive was found already in the array */
1574 if (!drv_found) {
1575 drv_index = cciss_find_free_drive_index(ctlr);
1576 if (drv_index == -1)
1577 goto freeret;
1578
1579 /*Check if the gendisk needs to be allocated */
1580 if (!h->gendisk[drv_index]){
1581 h->gendisk[drv_index] = alloc_disk(1 << NWD_SHIFT);
1582 if (!h->gendisk[drv_index]){
1583 printk(KERN_ERR "cciss: could not allocate new disk %d\n", drv_index);
1584 goto mem_msg;
1585 }
1586 }
1587 }
1588 h->drv[drv_index].LunID = lunid;
1589 cciss_update_drive_info(ctlr, drv_index);
1590 } /* end for */
1591 } /* end else */
1592
1593 freeret:
1594 kfree(ld_buff);
1595 h->busy_configuring = 0;
1596 /* We return -1 here to tell the ACU that we have registered/updated
1597 * all of the drives that we can and to keep it from calling us
1598 * additional times.
1599 */
1600 return -1;
1601 mem_msg:
1602 printk(KERN_ERR "cciss: out of memory\n");
1603 goto freeret;
1604 }
1605
1606 /* This function will deregister the disk and it's queue from the
1607 * kernel. It must be called with the controller lock held and the
1608 * drv structures busy_configuring flag set. It's parameters are:
1609 *
1610 * disk = This is the disk to be deregistered
1611 * drv = This is the drive_info_struct associated with the disk to be
1612 * deregistered. It contains information about the disk used
1613 * by the driver.
1614 * clear_all = This flag determines whether or not the disk information
1615 * is going to be completely cleared out and the highest_lun
1616 * reset. Sometimes we want to clear out information about
1617 * the disk in preparation for re-adding it. In this case
1618 * the highest_lun should be left unchanged and the LunID
1619 * should not be cleared.
1620 */
1621 static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
1622 int clear_all)
1623 {
1624 int i;
1625 ctlr_info_t *h = get_host(disk);
1626
1627 if (!capable(CAP_SYS_RAWIO))
1628 return -EPERM;
1629
1630 /* make sure logical volume is NOT is use */
1631 if (clear_all || (h->gendisk[0] == disk)) {
1632 if (drv->usage_count > 1)
1633 return -EBUSY;
1634 } else if (drv->usage_count > 0)
1635 return -EBUSY;
1636
1637 /* invalidate the devices and deregister the disk. If it is disk
1638 * zero do not deregister it but just zero out it's values. This
1639 * allows us to delete disk zero but keep the controller registered.
1640 */
1641 if (h->gendisk[0] != disk) {
1642 struct request_queue *q = disk->queue;
1643 if (disk->flags & GENHD_FL_UP)
1644 del_gendisk(disk);
1645 if (q) {
1646 blk_cleanup_queue(q);
1647 /* Set drv->queue to NULL so that we do not try
1648 * to call blk_start_queue on this queue in the
1649 * interrupt handler
1650 */
1651 drv->queue = NULL;
1652 }
1653 /* If clear_all is set then we are deleting the logical
1654 * drive, not just refreshing its info. For drives
1655 * other than disk 0 we will call put_disk. We do not
1656 * do this for disk 0 as we need it to be able to
1657 * configure the controller.
1658 */
1659 if (clear_all){
1660 /* This isn't pretty, but we need to find the
1661 * disk in our array and NULL our the pointer.
1662 * This is so that we will call alloc_disk if
1663 * this index is used again later.
1664 */
1665 for (i=0; i < CISS_MAX_LUN; i++){
1666 if(h->gendisk[i] == disk){
1667 h->gendisk[i] = NULL;
1668 break;
1669 }
1670 }
1671 put_disk(disk);
1672 }
1673 } else {
1674 set_capacity(disk, 0);
1675 }
1676
1677 --h->num_luns;
1678 /* zero out the disk size info */
1679 drv->nr_blocks = 0;
1680 drv->block_size = 0;
1681 drv->heads = 0;
1682 drv->sectors = 0;
1683 drv->cylinders = 0;
1684 drv->raid_level = -1; /* This can be used as a flag variable to
1685 * indicate that this element of the drive
1686 * array is free.
1687 */
1688
1689 if (clear_all) {
1690 /* check to see if it was the last disk */
1691 if (drv == h->drv + h->highest_lun) {
1692 /* if so, find the new hightest lun */
1693 int i, newhighest = -1;
1694 for (i = 0; i < h->highest_lun; i++) {
1695 /* if the disk has size > 0, it is available */
1696 if (h->drv[i].heads)
1697 newhighest = i;
1698 }
1699 h->highest_lun = newhighest;
1700 }
1701
1702 drv->LunID = 0;
1703 }
1704 return 0;
1705 }
1706
1707 static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff, size_t size, unsigned int use_unit_num, /* 0: address the controller,
1708 1: address logical volume log_unit,
1709 2: periph device address is scsi3addr */
1710 unsigned int log_unit, __u8 page_code,
1711 unsigned char *scsi3addr, int cmd_type)
1712 {
1713 ctlr_info_t *h = hba[ctlr];
1714 u64bit buff_dma_handle;
1715 int status = IO_OK;
1716
1717 c->cmd_type = CMD_IOCTL_PEND;
1718 c->Header.ReplyQueue = 0;
1719 if (buff != NULL) {
1720 c->Header.SGList = 1;
1721 c->Header.SGTotal = 1;
1722 } else {
1723 c->Header.SGList = 0;
1724 c->Header.SGTotal = 0;
1725 }
1726 c->Header.Tag.lower = c->busaddr;
1727
1728 c->Request.Type.Type = cmd_type;
1729 if (cmd_type == TYPE_CMD) {
1730 switch (cmd) {
1731 case CISS_INQUIRY:
1732 /* If the logical unit number is 0 then, this is going
1733 to controller so It's a physical command
1734 mode = 0 target = 0. So we have nothing to write.
1735 otherwise, if use_unit_num == 1,
1736 mode = 1(volume set addressing) target = LUNID
1737 otherwise, if use_unit_num == 2,
1738 mode = 0(periph dev addr) target = scsi3addr */
1739 if (use_unit_num == 1) {
1740 c->Header.LUN.LogDev.VolId =
1741 h->drv[log_unit].LunID;
1742 c->Header.LUN.LogDev.Mode = 1;
1743 } else if (use_unit_num == 2) {
1744 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr,
1745 8);
1746 c->Header.LUN.LogDev.Mode = 0;
1747 }
1748 /* are we trying to read a vital product page */
1749 if (page_code != 0) {
1750 c->Request.CDB[1] = 0x01;
1751 c->Request.CDB[2] = page_code;
1752 }
1753 c->Request.CDBLen = 6;
1754 c->Request.Type.Attribute = ATTR_SIMPLE;
1755 c->Request.Type.Direction = XFER_READ;
1756 c->Request.Timeout = 0;
1757 c->Request.CDB[0] = CISS_INQUIRY;
1758 c->Request.CDB[4] = size & 0xFF;
1759 break;
1760 case CISS_REPORT_LOG:
1761 case CISS_REPORT_PHYS:
1762 /* Talking to controller so It's a physical command
1763 mode = 00 target = 0. Nothing to write.
1764 */
1765 c->Request.CDBLen = 12;
1766 c->Request.Type.Attribute = ATTR_SIMPLE;
1767 c->Request.Type.Direction = XFER_READ;
1768 c->Request.Timeout = 0;
1769 c->Request.CDB[0] = cmd;
1770 c->Request.CDB[6] = (size >> 24) & 0xFF; //MSB
1771 c->Request.CDB[7] = (size >> 16) & 0xFF;
1772 c->Request.CDB[8] = (size >> 8) & 0xFF;
1773 c->Request.CDB[9] = size & 0xFF;
1774 break;
1775
1776 case CCISS_READ_CAPACITY:
1777 c->Header.LUN.LogDev.VolId = h->drv[log_unit].LunID;
1778 c->Header.LUN.LogDev.Mode = 1;
1779 c->Request.CDBLen = 10;
1780 c->Request.Type.Attribute = ATTR_SIMPLE;
1781 c->Request.Type.Direction = XFER_READ;
1782 c->Request.Timeout = 0;
1783 c->Request.CDB[0] = cmd;
1784 break;
1785 case CCISS_READ_CAPACITY_16:
1786 c->Header.LUN.LogDev.VolId = h->drv[log_unit].LunID;
1787 c->Header.LUN.LogDev.Mode = 1;
1788 c->Request.CDBLen = 16;
1789 c->Request.Type.Attribute = ATTR_SIMPLE;
1790 c->Request.Type.Direction = XFER_READ;
1791 c->Request.Timeout = 0;
1792 c->Request.CDB[0] = cmd;
1793 c->Request.CDB[1] = 0x10;
1794 c->Request.CDB[10] = (size >> 24) & 0xFF;
1795 c->Request.CDB[11] = (size >> 16) & 0xFF;
1796 c->Request.CDB[12] = (size >> 8) & 0xFF;
1797 c->Request.CDB[13] = size & 0xFF;
1798 c->Request.Timeout = 0;
1799 c->Request.CDB[0] = cmd;
1800 break;
1801 case CCISS_CACHE_FLUSH:
1802 c->Request.CDBLen = 12;
1803 c->Request.Type.Attribute = ATTR_SIMPLE;
1804 c->Request.Type.Direction = XFER_WRITE;
1805 c->Request.Timeout = 0;
1806 c->Request.CDB[0] = BMIC_WRITE;
1807 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
1808 break;
1809 default:
1810 printk(KERN_WARNING
1811 "cciss%d: Unknown Command 0x%c\n", ctlr, cmd);
1812 return IO_ERROR;
1813 }
1814 } else if (cmd_type == TYPE_MSG) {
1815 switch (cmd) {
1816 case 0: /* ABORT message */
1817 c->Request.CDBLen = 12;
1818 c->Request.Type.Attribute = ATTR_SIMPLE;
1819 c->Request.Type.Direction = XFER_WRITE;
1820 c->Request.Timeout = 0;
1821 c->Request.CDB[0] = cmd; /* abort */
1822 c->Request.CDB[1] = 0; /* abort a command */
1823 /* buff contains the tag of the command to abort */
1824 memcpy(&c->Request.CDB[4], buff, 8);
1825 break;
1826 case 1: /* RESET message */
1827 c->Request.CDBLen = 12;
1828 c->Request.Type.Attribute = ATTR_SIMPLE;
1829 c->Request.Type.Direction = XFER_WRITE;
1830 c->Request.Timeout = 0;
1831 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
1832 c->Request.CDB[0] = cmd; /* reset */
1833 c->Request.CDB[1] = 0x04; /* reset a LUN */
1834 break;
1835 case 3: /* No-Op message */
1836 c->Request.CDBLen = 1;
1837 c->Request.Type.Attribute = ATTR_SIMPLE;
1838 c->Request.Type.Direction = XFER_WRITE;
1839 c->Request.Timeout = 0;
1840 c->Request.CDB[0] = cmd;
1841 break;
1842 default:
1843 printk(KERN_WARNING
1844 "cciss%d: unknown message type %d\n", ctlr, cmd);
1845 return IO_ERROR;
1846 }
1847 } else {
1848 printk(KERN_WARNING
1849 "cciss%d: unknown command type %d\n", ctlr, cmd_type);
1850 return IO_ERROR;
1851 }
1852 /* Fill in the scatter gather information */
1853 if (size > 0) {
1854 buff_dma_handle.val = (__u64) pci_map_single(h->pdev,
1855 buff, size,
1856 PCI_DMA_BIDIRECTIONAL);
1857 c->SG[0].Addr.lower = buff_dma_handle.val32.lower;
1858 c->SG[0].Addr.upper = buff_dma_handle.val32.upper;
1859 c->SG[0].Len = size;
1860 c->SG[0].Ext = 0; /* we are not chaining */
1861 }
1862 return status;
1863 }
1864
1865 static int sendcmd_withirq(__u8 cmd,
1866 int ctlr,
1867 void *buff,
1868 size_t size,
1869 unsigned int use_unit_num,
1870 unsigned int log_unit, __u8 page_code, int cmd_type)
1871 {
1872 ctlr_info_t *h = hba[ctlr];
1873 CommandList_struct *c;
1874 u64bit buff_dma_handle;
1875 unsigned long flags;
1876 int return_status;
1877 DECLARE_COMPLETION_ONSTACK(wait);
1878
1879 if ((c = cmd_alloc(h, 0)) == NULL)
1880 return -ENOMEM;
1881 return_status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
1882 log_unit, page_code, NULL, cmd_type);
1883 if (return_status != IO_OK) {
1884 cmd_free(h, c, 0);
1885 return return_status;
1886 }
1887 resend_cmd2:
1888 c->waiting = &wait;
1889
1890 /* Put the request on the tail of the queue and send it */
1891 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1892 addQ(&h->reqQ, c);
1893 h->Qdepth++;
1894 start_io(h);
1895 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1896
1897 wait_for_completion(&wait);
1898
1899 if (c->err_info->CommandStatus != 0) { /* an error has occurred */
1900 switch (c->err_info->CommandStatus) {
1901 case CMD_TARGET_STATUS:
1902 printk(KERN_WARNING "cciss: cmd %p has "
1903 " completed with errors\n", c);
1904 if (c->err_info->ScsiStatus) {
1905 printk(KERN_WARNING "cciss: cmd %p "
1906 "has SCSI Status = %x\n",
1907 c, c->err_info->ScsiStatus);
1908 }
1909
1910 break;
1911 case CMD_DATA_UNDERRUN:
1912 case CMD_DATA_OVERRUN:
1913 /* expected for inquire and report lun commands */
1914 break;
1915 case CMD_INVALID:
1916 printk(KERN_WARNING "cciss: Cmd %p is "
1917 "reported invalid\n", c);
1918 return_status = IO_ERROR;
1919 break;
1920 case CMD_PROTOCOL_ERR:
1921 printk(KERN_WARNING "cciss: cmd %p has "
1922 "protocol error \n", c);
1923 return_status = IO_ERROR;
1924 break;
1925 case CMD_HARDWARE_ERR:
1926 printk(KERN_WARNING "cciss: cmd %p had "
1927 " hardware error\n", c);
1928 return_status = IO_ERROR;
1929 break;
1930 case CMD_CONNECTION_LOST:
1931 printk(KERN_WARNING "cciss: cmd %p had "
1932 "connection lost\n", c);
1933 return_status = IO_ERROR;
1934 break;
1935 case CMD_ABORTED:
1936 printk(KERN_WARNING "cciss: cmd %p was "
1937 "aborted\n", c);
1938 return_status = IO_ERROR;
1939 break;
1940 case CMD_ABORT_FAILED:
1941 printk(KERN_WARNING "cciss: cmd %p reports "
1942 "abort failed\n", c);
1943 return_status = IO_ERROR;
1944 break;
1945 case CMD_UNSOLICITED_ABORT:
1946 printk(KERN_WARNING
1947 "cciss%d: unsolicited abort %p\n", ctlr, c);
1948 if (c->retry_count < MAX_CMD_RETRIES) {
1949 printk(KERN_WARNING
1950 "cciss%d: retrying %p\n", ctlr, c);
1951 c->retry_count++;
1952 /* erase the old error information */
1953 memset(c->err_info, 0,
1954 sizeof(ErrorInfo_struct));
1955 return_status = IO_OK;
1956 INIT_COMPLETION(wait);
1957 goto resend_cmd2;
1958 }
1959 return_status = IO_ERROR;
1960 break;
1961 default:
1962 printk(KERN_WARNING "cciss: cmd %p returned "
1963 "unknown status %x\n", c,
1964 c->err_info->CommandStatus);
1965 return_status = IO_ERROR;
1966 }
1967 }
1968 /* unlock the buffers from DMA */
1969 buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
1970 buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
1971 pci_unmap_single(h->pdev, (dma_addr_t) buff_dma_handle.val,
1972 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
1973 cmd_free(h, c, 0);
1974 return return_status;
1975 }
1976
1977 static void cciss_geometry_inquiry(int ctlr, int logvol,
1978 int withirq, sector_t total_size,
1979 unsigned int block_size,
1980 InquiryData_struct *inq_buff,
1981 drive_info_struct *drv)
1982 {
1983 int return_code;
1984 unsigned long t;
1985
1986 memset(inq_buff, 0, sizeof(InquiryData_struct));
1987 if (withirq)
1988 return_code = sendcmd_withirq(CISS_INQUIRY, ctlr,
1989 inq_buff, sizeof(*inq_buff), 1,
1990 logvol, 0xC1, TYPE_CMD);
1991 else
1992 return_code = sendcmd(CISS_INQUIRY, ctlr, inq_buff,
1993 sizeof(*inq_buff), 1, logvol, 0xC1, NULL,
1994 TYPE_CMD);
1995 if (return_code == IO_OK) {
1996 if (inq_buff->data_byte[8] == 0xFF) {
1997 printk(KERN_WARNING
1998 "cciss: reading geometry failed, volume "
1999 "does not support reading geometry\n");
2000 drv->heads = 255;
2001 drv->sectors = 32; // Sectors per track
2002 drv->cylinders = total_size + 1;
2003 drv->raid_level = RAID_UNKNOWN;
2004 } else {
2005 drv->heads = inq_buff->data_byte[6];
2006 drv->sectors = inq_buff->data_byte[7];
2007 drv->cylinders = (inq_buff->data_byte[4] & 0xff) << 8;
2008 drv->cylinders += inq_buff->data_byte[5];
2009 drv->raid_level = inq_buff->data_byte[8];
2010 }
2011 drv->block_size = block_size;
2012 drv->nr_blocks = total_size + 1;
2013 t = drv->heads * drv->sectors;
2014 if (t > 1) {
2015 sector_t real_size = total_size + 1;
2016 unsigned long rem = sector_div(real_size, t);
2017 if (rem)
2018 real_size++;
2019 drv->cylinders = real_size;
2020 }
2021 } else { /* Get geometry failed */
2022 printk(KERN_WARNING "cciss: reading geometry failed\n");
2023 }
2024 printk(KERN_INFO " heads=%d, sectors=%d, cylinders=%d\n\n",
2025 drv->heads, drv->sectors, drv->cylinders);
2026 }
2027
2028 static void
2029 cciss_read_capacity(int ctlr, int logvol, int withirq, sector_t *total_size,
2030 unsigned int *block_size)
2031 {
2032 ReadCapdata_struct *buf;
2033 int return_code;
2034
2035 buf = kzalloc(sizeof(ReadCapdata_struct), GFP_KERNEL);
2036 if (!buf) {
2037 printk(KERN_WARNING "cciss: out of memory\n");
2038 return;
2039 }
2040
2041 if (withirq)
2042 return_code = sendcmd_withirq(CCISS_READ_CAPACITY,
2043 ctlr, buf, sizeof(ReadCapdata_struct),
2044 1, logvol, 0, TYPE_CMD);
2045 else
2046 return_code = sendcmd(CCISS_READ_CAPACITY,
2047 ctlr, buf, sizeof(ReadCapdata_struct),
2048 1, logvol, 0, NULL, TYPE_CMD);
2049 if (return_code == IO_OK) {
2050 *total_size = be32_to_cpu(*(__be32 *) buf->total_size);
2051 *block_size = be32_to_cpu(*(__be32 *) buf->block_size);
2052 } else { /* read capacity command failed */
2053 printk(KERN_WARNING "cciss: read capacity failed\n");
2054 *total_size = 0;
2055 *block_size = BLOCK_SIZE;
2056 }
2057 if (*total_size != 0)
2058 printk(KERN_INFO " blocks= %llu block_size= %d\n",
2059 (unsigned long long)*total_size+1, *block_size);
2060 kfree(buf);
2061 }
2062
2063 static void
2064 cciss_read_capacity_16(int ctlr, int logvol, int withirq, sector_t *total_size, unsigned int *block_size)
2065 {
2066 ReadCapdata_struct_16 *buf;
2067 int return_code;
2068
2069 buf = kzalloc(sizeof(ReadCapdata_struct_16), GFP_KERNEL);
2070 if (!buf) {
2071 printk(KERN_WARNING "cciss: out of memory\n");
2072 return;
2073 }
2074
2075 if (withirq) {
2076 return_code = sendcmd_withirq(CCISS_READ_CAPACITY_16,
2077 ctlr, buf, sizeof(ReadCapdata_struct_16),
2078 1, logvol, 0, TYPE_CMD);
2079 }
2080 else {
2081 return_code = sendcmd(CCISS_READ_CAPACITY_16,
2082 ctlr, buf, sizeof(ReadCapdata_struct_16),
2083 1, logvol, 0, NULL, TYPE_CMD);
2084 }
2085 if (return_code == IO_OK) {
2086 *total_size = be64_to_cpu(*(__be64 *) buf->total_size);
2087 *block_size = be32_to_cpu(*(__be32 *) buf->block_size);
2088 } else { /* read capacity command failed */
2089 printk(KERN_WARNING "cciss: read capacity failed\n");
2090 *total_size = 0;
2091 *block_size = BLOCK_SIZE;
2092 }
2093 printk(KERN_INFO " blocks= %llu block_size= %d\n",
2094 (unsigned long long)*total_size+1, *block_size);
2095 kfree(buf);
2096 }
2097
2098 static int cciss_revalidate(struct gendisk *disk)
2099 {
2100 ctlr_info_t *h = get_host(disk);
2101 drive_info_struct *drv = get_drv(disk);
2102 int logvol;
2103 int FOUND = 0;
2104 unsigned int block_size;
2105 sector_t total_size;
2106 InquiryData_struct *inq_buff = NULL;
2107
2108 for (logvol = 0; logvol < CISS_MAX_LUN; logvol++) {
2109 if (h->drv[logvol].LunID == drv->LunID) {
2110 FOUND = 1;
2111 break;
2112 }
2113 }
2114
2115 if (!FOUND)
2116 return 1;
2117
2118 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
2119 if (inq_buff == NULL) {
2120 printk(KERN_WARNING "cciss: out of memory\n");
2121 return 1;
2122 }
2123 if (h->cciss_read == CCISS_READ_10) {
2124 cciss_read_capacity(h->ctlr, logvol, 1,
2125 &total_size, &block_size);
2126 } else {
2127 cciss_read_capacity_16(h->ctlr, logvol, 1,
2128 &total_size, &block_size);
2129 }
2130 cciss_geometry_inquiry(h->ctlr, logvol, 1, total_size, block_size,
2131 inq_buff, drv);
2132
2133 blk_queue_hardsect_size(drv->queue, drv->block_size);
2134 set_capacity(disk, drv->nr_blocks);
2135
2136 kfree(inq_buff);
2137 return 0;
2138 }
2139
2140 /*
2141 * Wait polling for a command to complete.
2142 * The memory mapped FIFO is polled for the completion.
2143 * Used only at init time, interrupts from the HBA are disabled.
2144 */
2145 static unsigned long pollcomplete(int ctlr)
2146 {
2147 unsigned long done;
2148 int i;
2149
2150 /* Wait (up to 20 seconds) for a command to complete */
2151
2152 for (i = 20 * HZ; i > 0; i--) {
2153 done = hba[ctlr]->access.command_completed(hba[ctlr]);
2154 if (done == FIFO_EMPTY)
2155 schedule_timeout_uninterruptible(1);
2156 else
2157 return done;
2158 }
2159 /* Invalid address to tell caller we ran out of time */
2160 return 1;
2161 }
2162
2163 static int add_sendcmd_reject(__u8 cmd, int ctlr, unsigned long complete)
2164 {
2165 /* We get in here if sendcmd() is polling for completions
2166 and gets some command back that it wasn't expecting --
2167 something other than that which it just sent down.
2168 Ordinarily, that shouldn't happen, but it can happen when
2169 the scsi tape stuff gets into error handling mode, and
2170 starts using sendcmd() to try to abort commands and
2171 reset tape drives. In that case, sendcmd may pick up
2172 completions of commands that were sent to logical drives
2173 through the block i/o system, or cciss ioctls completing, etc.
2174 In that case, we need to save those completions for later
2175 processing by the interrupt handler.
2176 */
2177
2178 #ifdef CONFIG_CISS_SCSI_TAPE
2179 struct sendcmd_reject_list *srl = &hba[ctlr]->scsi_rejects;
2180
2181 /* If it's not the scsi tape stuff doing error handling, (abort */
2182 /* or reset) then we don't expect anything weird. */
2183 if (cmd != CCISS_RESET_MSG && cmd != CCISS_ABORT_MSG) {
2184 #endif
2185 printk(KERN_WARNING "cciss cciss%d: SendCmd "
2186 "Invalid command list address returned! (%lx)\n",
2187 ctlr, complete);
2188 /* not much we can do. */
2189 #ifdef CONFIG_CISS_SCSI_TAPE
2190 return 1;
2191 }
2192
2193 /* We've sent down an abort or reset, but something else
2194 has completed */
2195 if (srl->ncompletions >= (hba[ctlr]->nr_cmds + 2)) {
2196 /* Uh oh. No room to save it for later... */
2197 printk(KERN_WARNING "cciss%d: Sendcmd: Invalid command addr, "
2198 "reject list overflow, command lost!\n", ctlr);
2199 return 1;
2200 }
2201 /* Save it for later */
2202 srl->complete[srl->ncompletions] = complete;
2203 srl->ncompletions++;
2204 #endif
2205 return 0;
2206 }
2207
2208 /*
2209 * Send a command to the controller, and wait for it to complete.
2210 * Only used at init time.
2211 */
2212 static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size, unsigned int use_unit_num, /* 0: address the controller,
2213 1: address logical volume log_unit,
2214 2: periph device address is scsi3addr */
2215 unsigned int log_unit,
2216 __u8 page_code, unsigned char *scsi3addr, int cmd_type)
2217 {
2218 CommandList_struct *c;
2219 int i;
2220 unsigned long complete;
2221 ctlr_info_t *info_p = hba[ctlr];
2222 u64bit buff_dma_handle;
2223 int status, done = 0;
2224
2225 if ((c = cmd_alloc(info_p, 1)) == NULL) {
2226 printk(KERN_WARNING "cciss: unable to get memory");
2227 return IO_ERROR;
2228 }
2229 status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
2230 log_unit, page_code, scsi3addr, cmd_type);
2231 if (status != IO_OK) {
2232 cmd_free(info_p, c, 1);
2233 return status;
2234 }
2235 resend_cmd1:
2236 /*
2237 * Disable interrupt
2238 */
2239 #ifdef CCISS_DEBUG
2240 printk(KERN_DEBUG "cciss: turning intr off\n");
2241 #endif /* CCISS_DEBUG */
2242 info_p->access.set_intr_mask(info_p, CCISS_INTR_OFF);
2243
2244 /* Make sure there is room in the command FIFO */
2245 /* Actually it should be completely empty at this time */
2246 /* unless we are in here doing error handling for the scsi */
2247 /* tape side of the driver. */
2248 for (i = 200000; i > 0; i--) {
2249 /* if fifo isn't full go */
2250 if (!(info_p->access.fifo_full(info_p))) {
2251
2252 break;
2253 }
2254 udelay(10);
2255 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
2256 " waiting!\n", ctlr);
2257 }
2258 /*
2259 * Send the cmd
2260 */
2261 info_p->access.submit_command(info_p, c);
2262 done = 0;
2263 do {
2264 complete = pollcomplete(ctlr);
2265
2266 #ifdef CCISS_DEBUG
2267 printk(KERN_DEBUG "cciss: command completed\n");
2268 #endif /* CCISS_DEBUG */
2269
2270 if (complete == 1) {
2271 printk(KERN_WARNING
2272 "cciss cciss%d: SendCmd Timeout out, "
2273 "No command list address returned!\n", ctlr);
2274 status = IO_ERROR;
2275 done = 1;
2276 break;
2277 }
2278
2279 /* This will need to change for direct lookup completions */
2280 if ((complete & CISS_ERROR_BIT)
2281 && (complete & ~CISS_ERROR_BIT) == c->busaddr) {
2282 /* if data overrun or underun on Report command
2283 ignore it
2284 */
2285 if (((c->Request.CDB[0] == CISS_REPORT_LOG) ||
2286 (c->Request.CDB[0] == CISS_REPORT_PHYS) ||
2287 (c->Request.CDB[0] == CISS_INQUIRY)) &&
2288 ((c->err_info->CommandStatus ==
2289 CMD_DATA_OVERRUN) ||
2290 (c->err_info->CommandStatus == CMD_DATA_UNDERRUN)
2291 )) {
2292 complete = c->busaddr;
2293 } else {
2294 if (c->err_info->CommandStatus ==
2295 CMD_UNSOLICITED_ABORT) {
2296 printk(KERN_WARNING "cciss%d: "
2297 "unsolicited abort %p\n",
2298 ctlr, c);
2299 if (c->retry_count < MAX_CMD_RETRIES) {
2300 printk(KERN_WARNING
2301 "cciss%d: retrying %p\n",
2302 ctlr, c);
2303 c->retry_count++;
2304 /* erase the old error */
2305 /* information */
2306 memset(c->err_info, 0,
2307 sizeof
2308 (ErrorInfo_struct));
2309 goto resend_cmd1;
2310 } else {
2311 printk(KERN_WARNING
2312 "cciss%d: retried %p too "
2313 "many times\n", ctlr, c);
2314 status = IO_ERROR;
2315 goto cleanup1;
2316 }
2317 } else if (c->err_info->CommandStatus ==
2318 CMD_UNABORTABLE) {
2319 printk(KERN_WARNING
2320 "cciss%d: command could not be aborted.\n",
2321 ctlr);
2322 status = IO_ERROR;
2323 goto cleanup1;
2324 }
2325 printk(KERN_WARNING "ciss ciss%d: sendcmd"
2326 " Error %x \n", ctlr,
2327 c->err_info->CommandStatus);
2328 printk(KERN_WARNING "ciss ciss%d: sendcmd"
2329 " offensive info\n"
2330 " size %x\n num %x value %x\n",
2331 ctlr,
2332 c->err_info->MoreErrInfo.Invalid_Cmd.
2333 offense_size,
2334 c->err_info->MoreErrInfo.Invalid_Cmd.
2335 offense_num,
2336 c->err_info->MoreErrInfo.Invalid_Cmd.
2337 offense_value);
2338 status = IO_ERROR;
2339 goto cleanup1;
2340 }
2341 }
2342 /* This will need changing for direct lookup completions */
2343 if (complete != c->busaddr) {
2344 if (add_sendcmd_reject(cmd, ctlr, complete) != 0) {
2345 BUG(); /* we are pretty much hosed if we get here. */
2346 }
2347 continue;
2348 } else
2349 done = 1;
2350 } while (!done);
2351
2352 cleanup1:
2353 /* unlock the data buffer from DMA */
2354 buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
2355 buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
2356 pci_unmap_single(info_p->pdev, (dma_addr_t) buff_dma_handle.val,
2357 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
2358 #ifdef CONFIG_CISS_SCSI_TAPE
2359 /* if we saved some commands for later, process them now. */
2360 if (info_p->scsi_rejects.ncompletions > 0)
2361 do_cciss_intr(0, info_p);
2362 #endif
2363 cmd_free(info_p, c, 1);
2364 return status;
2365 }
2366
2367 /*
2368 * Map (physical) PCI mem into (virtual) kernel space
2369 */
2370 static void __iomem *remap_pci_mem(ulong base, ulong size)
2371 {
2372 ulong page_base = ((ulong) base) & PAGE_MASK;
2373 ulong page_offs = ((ulong) base) - page_base;
2374 void __iomem *page_remapped = ioremap(page_base, page_offs + size);
2375
2376 return page_remapped ? (page_remapped + page_offs) : NULL;
2377 }
2378
2379 /*
2380 * Takes jobs of the Q and sends them to the hardware, then puts it on
2381 * the Q to wait for completion.
2382 */
2383 static void start_io(ctlr_info_t *h)
2384 {
2385 CommandList_struct *c;
2386
2387 while ((c = h->reqQ) != NULL) {
2388 /* can't do anything if fifo is full */
2389 if ((h->access.fifo_full(h))) {
2390 printk(KERN_WARNING "cciss: fifo full\n");
2391 break;
2392 }
2393
2394 /* Get the first entry from the Request Q */
2395 removeQ(&(h->reqQ), c);
2396 h->Qdepth--;
2397
2398 /* Tell the controller execute command */
2399 h->access.submit_command(h, c);
2400
2401 /* Put job onto the completed Q */
2402 addQ(&(h->cmpQ), c);
2403 }
2404 }
2405
2406 /* Assumes that CCISS_LOCK(h->ctlr) is held. */
2407 /* Zeros out the error record and then resends the command back */
2408 /* to the controller */
2409 static inline void resend_cciss_cmd(ctlr_info_t *h, CommandList_struct *c)
2410 {
2411 /* erase the old error information */
2412 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
2413
2414 /* add it to software queue and then send it to the controller */
2415 addQ(&(h->reqQ), c);
2416 h->Qdepth++;
2417 if (h->Qdepth > h->maxQsinceinit)
2418 h->maxQsinceinit = h->Qdepth;
2419
2420 start_io(h);
2421 }
2422
2423 static inline unsigned int make_status_bytes(unsigned int scsi_status_byte,
2424 unsigned int msg_byte, unsigned int host_byte,
2425 unsigned int driver_byte)
2426 {
2427 /* inverse of macros in scsi.h */
2428 return (scsi_status_byte & 0xff) |
2429 ((msg_byte & 0xff) << 8) |
2430 ((host_byte & 0xff) << 16) |
2431 ((driver_byte & 0xff) << 24);
2432 }
2433
2434 static inline int evaluate_target_status(CommandList_struct *cmd)
2435 {
2436 unsigned char sense_key;
2437 unsigned char status_byte, msg_byte, host_byte, driver_byte;
2438 int error_value;
2439
2440 /* If we get in here, it means we got "target status", that is, scsi status */
2441 status_byte = cmd->err_info->ScsiStatus;
2442 driver_byte = DRIVER_OK;
2443 msg_byte = cmd->err_info->CommandStatus; /* correct? seems too device specific */
2444
2445 if (blk_pc_request(cmd->rq))
2446 host_byte = DID_PASSTHROUGH;
2447 else
2448 host_byte = DID_OK;
2449
2450 error_value = make_status_bytes(status_byte, msg_byte,
2451 host_byte, driver_byte);
2452
2453 if (cmd->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) {
2454 if (!blk_pc_request(cmd->rq))
2455 printk(KERN_WARNING "cciss: cmd %p "
2456 "has SCSI Status 0x%x\n",
2457 cmd, cmd->err_info->ScsiStatus);
2458 return error_value;
2459 }
2460
2461 /* check the sense key */
2462 sense_key = 0xf & cmd->err_info->SenseInfo[2];
2463 /* no status or recovered error */
2464 if (((sense_key == 0x0) || (sense_key == 0x1)) && !blk_pc_request(cmd->rq))
2465 error_value = 0;
2466
2467 if (!blk_pc_request(cmd->rq)) { /* Not SG_IO or similar? */
2468 if (error_value != 0)
2469 printk(KERN_WARNING "cciss: cmd %p has CHECK CONDITION"
2470 " sense key = 0x%x\n", cmd, sense_key);
2471 return error_value;
2472 }
2473
2474 /* SG_IO or similar, copy sense data back */
2475 if (cmd->rq->sense) {
2476 if (cmd->rq->sense_len > cmd->err_info->SenseLen)
2477 cmd->rq->sense_len = cmd->err_info->SenseLen;
2478 memcpy(cmd->rq->sense, cmd->err_info->SenseInfo,
2479 cmd->rq->sense_len);
2480 } else
2481 cmd->rq->sense_len = 0;
2482
2483 return error_value;
2484 }
2485
2486 /* checks the status of the job and calls complete buffers to mark all
2487 * buffers for the completed job. Note that this function does not need
2488 * to hold the hba/queue lock.
2489 */
2490 static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
2491 int timeout)
2492 {
2493 int retry_cmd = 0;
2494 struct request *rq = cmd->rq;
2495
2496 rq->errors = 0;
2497
2498 if (timeout)
2499 rq->errors = make_status_bytes(0, 0, 0, DRIVER_TIMEOUT);
2500
2501 if (cmd->err_info->CommandStatus == 0) /* no error has occurred */
2502 goto after_error_processing;
2503
2504 switch (cmd->err_info->CommandStatus) {
2505 case CMD_TARGET_STATUS:
2506 rq->errors = evaluate_target_status(cmd);
2507 break;
2508 case CMD_DATA_UNDERRUN:
2509 if (blk_fs_request(cmd->rq)) {
2510 printk(KERN_WARNING "cciss: cmd %p has"
2511 " completed with data underrun "
2512 "reported\n", cmd);
2513 cmd->rq->data_len = cmd->err_info->ResidualCnt;
2514 }
2515 break;
2516 case CMD_DATA_OVERRUN:
2517 if (blk_fs_request(cmd->rq))
2518 printk(KERN_WARNING "cciss: cmd %p has"
2519 " completed with data overrun "
2520 "reported\n", cmd);
2521 break;
2522 case CMD_INVALID:
2523 printk(KERN_WARNING "cciss: cmd %p is "
2524 "reported invalid\n", cmd);
2525 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2526 cmd->err_info->CommandStatus, DRIVER_OK,
2527 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
2528 break;
2529 case CMD_PROTOCOL_ERR:
2530 printk(KERN_WARNING "cciss: cmd %p has "
2531 "protocol error \n", cmd);
2532 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2533 cmd->err_info->CommandStatus, DRIVER_OK,
2534 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
2535 break;
2536 case CMD_HARDWARE_ERR:
2537 printk(KERN_WARNING "cciss: cmd %p had "
2538 " hardware error\n", cmd);
2539 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2540 cmd->err_info->CommandStatus, DRIVER_OK,
2541 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
2542 break;
2543 case CMD_CONNECTION_LOST:
2544 printk(KERN_WARNING "cciss: cmd %p had "
2545 "connection lost\n", cmd);
2546 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2547 cmd->err_info->CommandStatus, DRIVER_OK,
2548 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
2549 break;
2550 case CMD_ABORTED:
2551 printk(KERN_WARNING "cciss: cmd %p was "
2552 "aborted\n", cmd);
2553 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2554 cmd->err_info->CommandStatus, DRIVER_OK,
2555 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ABORT);
2556 break;
2557 case CMD_ABORT_FAILED:
2558 printk(KERN_WARNING "cciss: cmd %p reports "
2559 "abort failed\n", cmd);
2560 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2561 cmd->err_info->CommandStatus, DRIVER_OK,
2562 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
2563 break;
2564 case CMD_UNSOLICITED_ABORT:
2565 printk(KERN_WARNING "cciss%d: unsolicited "
2566 "abort %p\n", h->ctlr, cmd);
2567 if (cmd->retry_count < MAX_CMD_RETRIES) {
2568 retry_cmd = 1;
2569 printk(KERN_WARNING
2570 "cciss%d: retrying %p\n", h->ctlr, cmd);
2571 cmd->retry_count++;
2572 } else
2573 printk(KERN_WARNING
2574 "cciss%d: %p retried too "
2575 "many times\n", h->ctlr, cmd);
2576 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2577 cmd->err_info->CommandStatus, DRIVER_OK,
2578 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ABORT);
2579 break;
2580 case CMD_TIMEOUT:
2581 printk(KERN_WARNING "cciss: cmd %p timedout\n", cmd);
2582 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2583 cmd->err_info->CommandStatus, DRIVER_OK,
2584 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
2585 break;
2586 default:
2587 printk(KERN_WARNING "cciss: cmd %p returned "
2588 "unknown status %x\n", cmd,
2589 cmd->err_info->CommandStatus);
2590 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2591 cmd->err_info->CommandStatus, DRIVER_OK,
2592 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
2593 }
2594
2595 after_error_processing:
2596
2597 /* We need to return this command */
2598 if (retry_cmd) {
2599 resend_cciss_cmd(h, cmd);
2600 return;
2601 }
2602 cmd->rq->completion_data = cmd;
2603 blk_complete_request(cmd->rq);
2604 }
2605
2606 /*
2607 * Get a request and submit it to the controller.
2608 */
2609 static void do_cciss_request(struct request_queue *q)
2610 {
2611 ctlr_info_t *h = q->queuedata;
2612 CommandList_struct *c;
2613 sector_t start_blk;
2614 int seg;
2615 struct request *creq;
2616 u64bit temp64;
2617 struct scatterlist tmp_sg[MAXSGENTRIES];
2618 drive_info_struct *drv;
2619 int i, dir;
2620
2621 /* We call start_io here in case there is a command waiting on the
2622 * queue that has not been sent.
2623 */
2624 if (blk_queue_plugged(q))
2625 goto startio;
2626
2627 queue:
2628 creq = elv_next_request(q);
2629 if (!creq)
2630 goto startio;
2631
2632 BUG_ON(creq->nr_phys_segments > MAXSGENTRIES);
2633
2634 if ((c = cmd_alloc(h, 1)) == NULL)
2635 goto full;
2636
2637 blkdev_dequeue_request(creq);
2638
2639 spin_unlock_irq(q->queue_lock);
2640
2641 c->cmd_type = CMD_RWREQ;
2642 c->rq = creq;
2643
2644 /* fill in the request */
2645 drv = creq->rq_disk->private_data;
2646 c->Header.ReplyQueue = 0; // unused in simple mode
2647 /* got command from pool, so use the command block index instead */
2648 /* for direct lookups. */
2649 /* The first 2 bits are reserved for controller error reporting. */
2650 c->Header.Tag.lower = (c->cmdindex << 3);
2651 c->Header.Tag.lower |= 0x04; /* flag for direct lookup. */
2652 c->Header.LUN.LogDev.VolId = drv->LunID;
2653 c->Header.LUN.LogDev.Mode = 1;
2654 c->Request.CDBLen = 10; // 12 byte commands not in FW yet;
2655 c->Request.Type.Type = TYPE_CMD; // It is a command.
2656 c->Request.Type.Attribute = ATTR_SIMPLE;
2657 c->Request.Type.Direction =
2658 (rq_data_dir(creq) == READ) ? XFER_READ : XFER_WRITE;
2659 c->Request.Timeout = 0; // Don't time out
2660 c->Request.CDB[0] =
2661 (rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write;
2662 start_blk = creq->sector;
2663 #ifdef CCISS_DEBUG
2664 printk(KERN_DEBUG "ciss: sector =%d nr_sectors=%d\n", (int)creq->sector,
2665 (int)creq->nr_sectors);
2666 #endif /* CCISS_DEBUG */
2667
2668 sg_init_table(tmp_sg, MAXSGENTRIES);
2669 seg = blk_rq_map_sg(q, creq, tmp_sg);
2670
2671 /* get the DMA records for the setup */
2672 if (c->Request.Type.Direction == XFER_READ)
2673 dir = PCI_DMA_FROMDEVICE;
2674 else
2675 dir = PCI_DMA_TODEVICE;
2676
2677 for (i = 0; i < seg; i++) {
2678 c->SG[i].Len = tmp_sg[i].length;
2679 temp64.val = (__u64) pci_map_page(h->pdev, sg_page(&tmp_sg[i]),
2680 tmp_sg[i].offset,
2681 tmp_sg[i].length, dir);
2682 c->SG[i].Addr.lower = temp64.val32.lower;
2683 c->SG[i].Addr.upper = temp64.val32.upper;
2684 c->SG[i].Ext = 0; // we are not chaining
2685 }
2686 /* track how many SG entries we are using */
2687 if (seg > h->maxSG)
2688 h->maxSG = seg;
2689
2690 #ifdef CCISS_DEBUG
2691 printk(KERN_DEBUG "cciss: Submitting %d sectors in %d segments\n",
2692 creq->nr_sectors, seg);
2693 #endif /* CCISS_DEBUG */
2694
2695 c->Header.SGList = c->Header.SGTotal = seg;
2696 if (likely(blk_fs_request(creq))) {
2697 if(h->cciss_read == CCISS_READ_10) {
2698 c->Request.CDB[1] = 0;
2699 c->Request.CDB[2] = (start_blk >> 24) & 0xff; //MSB
2700 c->Request.CDB[3] = (start_blk >> 16) & 0xff;
2701 c->Request.CDB[4] = (start_blk >> 8) & 0xff;
2702 c->Request.CDB[5] = start_blk & 0xff;
2703 c->Request.CDB[6] = 0; // (sect >> 24) & 0xff; MSB
2704 c->Request.CDB[7] = (creq->nr_sectors >> 8) & 0xff;
2705 c->Request.CDB[8] = creq->nr_sectors & 0xff;
2706 c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0;
2707 } else {
2708 u32 upper32 = upper_32_bits(start_blk);
2709
2710 c->Request.CDBLen = 16;
2711 c->Request.CDB[1]= 0;
2712 c->Request.CDB[2]= (upper32 >> 24) & 0xff; //MSB
2713 c->Request.CDB[3]= (upper32 >> 16) & 0xff;
2714 c->Request.CDB[4]= (upper32 >> 8) & 0xff;
2715 c->Request.CDB[5]= upper32 & 0xff;
2716 c->Request.CDB[6]= (start_blk >> 24) & 0xff;
2717 c->Request.CDB[7]= (start_blk >> 16) & 0xff;
2718 c->Request.CDB[8]= (start_blk >> 8) & 0xff;
2719 c->Request.CDB[9]= start_blk & 0xff;
2720 c->Request.CDB[10]= (creq->nr_sectors >> 24) & 0xff;
2721 c->Request.CDB[11]= (creq->nr_sectors >> 16) & 0xff;
2722 c->Request.CDB[12]= (creq->nr_sectors >> 8) & 0xff;
2723 c->Request.CDB[13]= creq->nr_sectors & 0xff;
2724 c->Request.CDB[14] = c->Request.CDB[15] = 0;
2725 }
2726 } else if (blk_pc_request(creq)) {
2727 c->Request.CDBLen = creq->cmd_len;
2728 memcpy(c->Request.CDB, creq->cmd, BLK_MAX_CDB);
2729 } else {
2730 printk(KERN_WARNING "cciss%d: bad request type %d\n", h->ctlr, creq->cmd_type);
2731 BUG();
2732 }
2733
2734 spin_lock_irq(q->queue_lock);
2735
2736 addQ(&(h->reqQ), c);
2737 h->Qdepth++;
2738 if (h->Qdepth > h->maxQsinceinit)
2739 h->maxQsinceinit = h->Qdepth;
2740
2741 goto queue;
2742 full:
2743 blk_stop_queue(q);
2744 startio:
2745 /* We will already have the driver lock here so not need
2746 * to lock it.
2747 */
2748 start_io(h);
2749 }
2750
2751 static inline unsigned long get_next_completion(ctlr_info_t *h)
2752 {
2753 #ifdef CONFIG_CISS_SCSI_TAPE
2754 /* Any rejects from sendcmd() lying around? Process them first */
2755 if (h->scsi_rejects.ncompletions == 0)
2756 return h->access.command_completed(h);
2757 else {
2758 struct sendcmd_reject_list *srl;
2759 int n;
2760 srl = &h->scsi_rejects;
2761 n = --srl->ncompletions;
2762 /* printk("cciss%d: processing saved reject\n", h->ctlr); */
2763 printk("p");
2764 return srl->complete[n];
2765 }
2766 #else
2767 return h->access.command_completed(h);
2768 #endif
2769 }
2770
2771 static inline int interrupt_pending(ctlr_info_t *h)
2772 {
2773 #ifdef CONFIG_CISS_SCSI_TAPE
2774 return (h->access.intr_pending(h)
2775 || (h->scsi_rejects.ncompletions > 0));
2776 #else
2777 return h->access.intr_pending(h);
2778 #endif
2779 }
2780
2781 static inline long interrupt_not_for_us(ctlr_info_t *h)
2782 {
2783 #ifdef CONFIG_CISS_SCSI_TAPE
2784 return (((h->access.intr_pending(h) == 0) ||
2785 (h->interrupts_enabled == 0))
2786 && (h->scsi_rejects.ncompletions == 0));
2787 #else
2788 return (((h->access.intr_pending(h) == 0) ||
2789 (h->interrupts_enabled == 0)));
2790 #endif
2791 }
2792
2793 static irqreturn_t do_cciss_intr(int irq, void *dev_id)
2794 {
2795 ctlr_info_t *h = dev_id;
2796 CommandList_struct *c;
2797 unsigned long flags;
2798 __u32 a, a1, a2;
2799
2800 if (interrupt_not_for_us(h))
2801 return IRQ_NONE;
2802 /*
2803 * If there are completed commands in the completion queue,
2804 * we had better do something about it.
2805 */
2806 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
2807 while (interrupt_pending(h)) {
2808 while ((a = get_next_completion(h)) != FIFO_EMPTY) {
2809 a1 = a;
2810 if ((a & 0x04)) {
2811 a2 = (a >> 3);
2812 if (a2 >= h->nr_cmds) {
2813 printk(KERN_WARNING
2814 "cciss: controller cciss%d failed, stopping.\n",
2815 h->ctlr);
2816 fail_all_cmds(h->ctlr);
2817 return IRQ_HANDLED;
2818 }
2819
2820 c = h->cmd_pool + a2;
2821 a = c->busaddr;
2822
2823 } else {
2824 a &= ~3;
2825 if ((c = h->cmpQ) == NULL) {
2826 printk(KERN_WARNING
2827 "cciss: Completion of %08x ignored\n",
2828 a1);
2829 continue;
2830 }
2831 while (c->busaddr != a) {
2832 c = c->next;
2833 if (c == h->cmpQ)
2834 break;
2835 }
2836 }
2837 /*
2838 * If we've found the command, take it off the
2839 * completion Q and free it
2840 */
2841 if (c->busaddr == a) {
2842 removeQ(&h->cmpQ, c);
2843 if (c->cmd_type == CMD_RWREQ) {
2844 complete_command(h, c, 0);
2845 } else if (c->cmd_type == CMD_IOCTL_PEND) {
2846 complete(c->waiting);
2847 }
2848 # ifdef CONFIG_CISS_SCSI_TAPE
2849 else if (c->cmd_type == CMD_SCSI)
2850 complete_scsi_command(c, 0, a1);
2851 # endif
2852 continue;
2853 }
2854 }
2855 }
2856
2857 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
2858 return IRQ_HANDLED;
2859 }
2860
2861 /*
2862 * We cannot read the structure directly, for portability we must use
2863 * the io functions.
2864 * This is for debug only.
2865 */
2866 #ifdef CCISS_DEBUG
2867 static void print_cfg_table(CfgTable_struct *tb)
2868 {
2869 int i;
2870 char temp_name[17];
2871
2872 printk("Controller Configuration information\n");
2873 printk("------------------------------------\n");
2874 for (i = 0; i < 4; i++)
2875 temp_name[i] = readb(&(tb->Signature[i]));
2876 temp_name[4] = '\0';
2877 printk(" Signature = %s\n", temp_name);
2878 printk(" Spec Number = %d\n", readl(&(tb->SpecValence)));
2879 printk(" Transport methods supported = 0x%x\n",
2880 readl(&(tb->TransportSupport)));
2881 printk(" Transport methods active = 0x%x\n",
2882 readl(&(tb->TransportActive)));
2883 printk(" Requested transport Method = 0x%x\n",
2884 readl(&(tb->HostWrite.TransportRequest)));
2885 printk(" Coalesce Interrupt Delay = 0x%x\n",
2886 readl(&(tb->HostWrite.CoalIntDelay)));
2887 printk(" Coalesce Interrupt Count = 0x%x\n",
2888 readl(&(tb->HostWrite.CoalIntCount)));
2889 printk(" Max outstanding commands = 0x%d\n",
2890 readl(&(tb->CmdsOutMax)));
2891 printk(" Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
2892 for (i = 0; i < 16; i++)
2893 temp_name[i] = readb(&(tb->ServerName[i]));
2894 temp_name[16] = '\0';
2895 printk(" Server Name = %s\n", temp_name);
2896 printk(" Heartbeat Counter = 0x%x\n\n\n", readl(&(tb->HeartBeat)));
2897 }
2898 #endif /* CCISS_DEBUG */
2899
2900 static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
2901 {
2902 int i, offset, mem_type, bar_type;
2903 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
2904 return 0;
2905 offset = 0;
2906 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
2907 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
2908 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
2909 offset += 4;
2910 else {
2911 mem_type = pci_resource_flags(pdev, i) &
2912 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
2913 switch (mem_type) {
2914 case PCI_BASE_ADDRESS_MEM_TYPE_32:
2915 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
2916 offset += 4; /* 32 bit */
2917 break;
2918 case PCI_BASE_ADDRESS_MEM_TYPE_64:
2919 offset += 8;
2920 break;
2921 default: /* reserved in PCI 2.2 */
2922 printk(KERN_WARNING
2923 "Base address is invalid\n");
2924 return -1;
2925 break;
2926 }
2927 }
2928 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
2929 return i + 1;
2930 }
2931 return -1;
2932 }
2933
2934 /* If MSI/MSI-X is supported by the kernel we will try to enable it on
2935 * controllers that are capable. If not, we use IO-APIC mode.
2936 */
2937
2938 static void __devinit cciss_interrupt_mode(ctlr_info_t *c,
2939 struct pci_dev *pdev, __u32 board_id)
2940 {
2941 #ifdef CONFIG_PCI_MSI
2942 int err;
2943 struct msix_entry cciss_msix_entries[4] = { {0, 0}, {0, 1},
2944 {0, 2}, {0, 3}
2945 };
2946
2947 /* Some boards advertise MSI but don't really support it */
2948 if ((board_id == 0x40700E11) ||
2949 (board_id == 0x40800E11) ||
2950 (board_id == 0x40820E11) || (board_id == 0x40830E11))
2951 goto default_int_mode;
2952
2953 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) {
2954 err = pci_enable_msix(pdev, cciss_msix_entries, 4);
2955 if (!err) {
2956 c->intr[0] = cciss_msix_entries[0].vector;
2957 c->intr[1] = cciss_msix_entries[1].vector;
2958 c->intr[2] = cciss_msix_entries[2].vector;
2959 c->intr[3] = cciss_msix_entries[3].vector;
2960 c->msix_vector = 1;
2961 return;
2962 }
2963 if (err > 0) {
2964 printk(KERN_WARNING "cciss: only %d MSI-X vectors "
2965 "available\n", err);
2966 goto default_int_mode;
2967 } else {
2968 printk(KERN_WARNING "cciss: MSI-X init failed %d\n",
2969 err);
2970 goto default_int_mode;
2971 }
2972 }
2973 if (pci_find_capability(pdev, PCI_CAP_ID_MSI)) {
2974 if (!pci_enable_msi(pdev)) {
2975 c->msi_vector = 1;
2976 } else {
2977 printk(KERN_WARNING "cciss: MSI init failed\n");
2978 }
2979 }
2980 default_int_mode:
2981 #endif /* CONFIG_PCI_MSI */
2982 /* if we get here we're going to use the default interrupt mode */
2983 c->intr[SIMPLE_MODE_INT] = pdev->irq;
2984 return;
2985 }
2986
2987 static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
2988 {
2989 ushort subsystem_vendor_id, subsystem_device_id, command;
2990 __u32 board_id, scratchpad = 0;
2991 __u64 cfg_offset;
2992 __u32 cfg_base_addr;
2993 __u64 cfg_base_addr_index;
2994 int i, err;
2995
2996 /* check to see if controller has been disabled */
2997 /* BEFORE trying to enable it */
2998 (void)pci_read_config_word(pdev, PCI_COMMAND, &command);
2999 if (!(command & 0x02)) {
3000 printk(KERN_WARNING
3001 "cciss: controller appears to be disabled\n");
3002 return -ENODEV;
3003 }
3004
3005 err = pci_enable_device(pdev);
3006 if (err) {
3007 printk(KERN_ERR "cciss: Unable to Enable PCI device\n");
3008 return err;
3009 }
3010
3011 err = pci_request_regions(pdev, "cciss");
3012 if (err) {
3013 printk(KERN_ERR "cciss: Cannot obtain PCI resources, "
3014 "aborting\n");
3015 return err;
3016 }
3017
3018 subsystem_vendor_id = pdev->subsystem_vendor;
3019 subsystem_device_id = pdev->subsystem_device;
3020 board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) |
3021 subsystem_vendor_id);
3022
3023 #ifdef CCISS_DEBUG
3024 printk("command = %x\n", command);
3025 printk("irq = %x\n", pdev->irq);
3026 printk("board_id = %x\n", board_id);
3027 #endif /* CCISS_DEBUG */
3028
3029 /* If the kernel supports MSI/MSI-X we will try to enable that functionality,
3030 * else we use the IO-APIC interrupt assigned to us by system ROM.
3031 */
3032 cciss_interrupt_mode(c, pdev, board_id);
3033
3034 /*
3035 * Memory base addr is first addr , the second points to the config
3036 * table
3037 */
3038
3039 c->paddr = pci_resource_start(pdev, 0); /* addressing mode bits already removed */
3040 #ifdef CCISS_DEBUG
3041 printk("address 0 = %x\n", c->paddr);
3042 #endif /* CCISS_DEBUG */
3043 c->vaddr = remap_pci_mem(c->paddr, 0x250);
3044
3045 /* Wait for the board to become ready. (PCI hotplug needs this.)
3046 * We poll for up to 120 secs, once per 100ms. */
3047 for (i = 0; i < 1200; i++) {
3048 scratchpad = readl(c->vaddr + SA5_SCRATCHPAD_OFFSET);
3049 if (scratchpad == CCISS_FIRMWARE_READY)
3050 break;
3051 set_current_state(TASK_INTERRUPTIBLE);
3052 schedule_timeout(HZ / 10); /* wait 100ms */
3053 }
3054 if (scratchpad != CCISS_FIRMWARE_READY) {
3055 printk(KERN_WARNING "cciss: Board not ready. Timed out.\n");
3056 err = -ENODEV;
3057 goto err_out_free_res;
3058 }
3059
3060 /* get the address index number */
3061 cfg_base_addr = readl(c->vaddr + SA5_CTCFG_OFFSET);
3062 cfg_base_addr &= (__u32) 0x0000ffff;
3063 #ifdef CCISS_DEBUG
3064 printk("cfg base address = %x\n", cfg_base_addr);
3065 #endif /* CCISS_DEBUG */
3066 cfg_base_addr_index = find_PCI_BAR_index(pdev, cfg_base_addr);
3067 #ifdef CCISS_DEBUG
3068 printk("cfg base address index = %x\n", cfg_base_addr_index);
3069 #endif /* CCISS_DEBUG */
3070 if (cfg_base_addr_index == -1) {
3071 printk(KERN_WARNING "cciss: Cannot find cfg_base_addr_index\n");
3072 err = -ENODEV;
3073 goto err_out_free_res;
3074 }
3075
3076 cfg_offset = readl(c->vaddr + SA5_CTMEM_OFFSET);
3077 #ifdef CCISS_DEBUG
3078 printk("cfg offset = %x\n", cfg_offset);
3079 #endif /* CCISS_DEBUG */
3080 c->cfgtable = remap_pci_mem(pci_resource_start(pdev,
3081 cfg_base_addr_index) +
3082 cfg_offset, sizeof(CfgTable_struct));
3083 c->board_id = board_id;
3084
3085 #ifdef CCISS_DEBUG
3086 print_cfg_table(c->cfgtable);
3087 #endif /* CCISS_DEBUG */
3088
3089 for (i = 0; i < ARRAY_SIZE(products); i++) {
3090 if (board_id == products[i].board_id) {
3091 c->product_name = products[i].product_name;
3092 c->access = *(products[i].access);
3093 c->nr_cmds = products[i].nr_cmds;
3094 break;
3095 }
3096 }
3097 if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
3098 (readb(&c->cfgtable->Signature[1]) != 'I') ||
3099 (readb(&c->cfgtable->Signature[2]) != 'S') ||
3100 (readb(&c->cfgtable->Signature[3]) != 'S')) {
3101 printk("Does not appear to be a valid CISS config table\n");
3102 err = -ENODEV;
3103 goto err_out_free_res;
3104 }
3105 /* We didn't find the controller in our list. We know the
3106 * signature is valid. If it's an HP device let's try to
3107 * bind to the device and fire it up. Otherwise we bail.
3108 */
3109 if (i == ARRAY_SIZE(products)) {
3110 if (subsystem_vendor_id == PCI_VENDOR_ID_HP) {
3111 c->product_name = products[i-1].product_name;
3112 c->access = *(products[i-1].access);
3113 c->nr_cmds = products[i-1].nr_cmds;
3114 printk(KERN_WARNING "cciss: This is an unknown "
3115 "Smart Array controller.\n"
3116 "cciss: Please update to the latest driver "
3117 "available from www.hp.com.\n");
3118 } else {
3119 printk(KERN_WARNING "cciss: Sorry, I don't know how"
3120 " to access the Smart Array controller %08lx\n"
3121 , (unsigned long)board_id);
3122 err = -ENODEV;
3123 goto err_out_free_res;
3124 }
3125 }
3126 #ifdef CONFIG_X86
3127 {
3128 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
3129 __u32 prefetch;
3130 prefetch = readl(&(c->cfgtable->SCSI_Prefetch));
3131 prefetch |= 0x100;
3132 writel(prefetch, &(c->cfgtable->SCSI_Prefetch));
3133 }
3134 #endif
3135
3136 /* Disabling DMA prefetch and refetch for the P600.
3137 * An ASIC bug may result in accesses to invalid memory addresses.
3138 * We've disabled prefetch for some time now. Testing with XEN
3139 * kernels revealed a bug in the refetch if dom0 resides on a P600.
3140 */
3141 if(board_id == 0x3225103C) {
3142 __u32 dma_prefetch;
3143 __u32 dma_refetch;
3144 dma_prefetch = readl(c->vaddr + I2O_DMA1_CFG);
3145 dma_prefetch |= 0x8000;
3146 writel(dma_prefetch, c->vaddr + I2O_DMA1_CFG);
3147 pci_read_config_dword(pdev, PCI_COMMAND_PARITY, &dma_refetch);
3148 dma_refetch |= 0x1;
3149 pci_write_config_dword(pdev, PCI_COMMAND_PARITY, dma_refetch);
3150 }
3151
3152 #ifdef CCISS_DEBUG
3153 printk("Trying to put board into Simple mode\n");
3154 #endif /* CCISS_DEBUG */
3155 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
3156 /* Update the field, and then ring the doorbell */
3157 writel(CFGTBL_Trans_Simple, &(c->cfgtable->HostWrite.TransportRequest));
3158 writel(CFGTBL_ChangeReq, c->vaddr + SA5_DOORBELL);
3159
3160 /* under certain very rare conditions, this can take awhile.
3161 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
3162 * as we enter this code.) */
3163 for (i = 0; i < MAX_CONFIG_WAIT; i++) {
3164 if (!(readl(c->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
3165 break;
3166 /* delay and try again */
3167 set_current_state(TASK_INTERRUPTIBLE);
3168 schedule_timeout(10);
3169 }
3170
3171 #ifdef CCISS_DEBUG
3172 printk(KERN_DEBUG "I counter got to %d %x\n", i,
3173 readl(c->vaddr + SA5_DOORBELL));
3174 #endif /* CCISS_DEBUG */
3175 #ifdef CCISS_DEBUG
3176 print_cfg_table(c->cfgtable);
3177 #endif /* CCISS_DEBUG */
3178
3179 if (!(readl(&(c->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
3180 printk(KERN_WARNING "cciss: unable to get board into"
3181 " simple mode\n");
3182 err = -ENODEV;
3183 goto err_out_free_res;
3184 }
3185 return 0;
3186
3187 err_out_free_res:
3188 /*
3189 * Deliberately omit pci_disable_device(): it does something nasty to
3190 * Smart Array controllers that pci_enable_device does not undo
3191 */
3192 pci_release_regions(pdev);
3193 return err;
3194 }
3195
3196 /*
3197 * Gets information about the local volumes attached to the controller.
3198 */
3199 static void cciss_getgeometry(int cntl_num)
3200 {
3201 ReportLunData_struct *ld_buff;
3202 InquiryData_struct *inq_buff;
3203 int return_code;
3204 int i;
3205 int listlength = 0;
3206 __u32 lunid = 0;
3207 unsigned block_size;
3208 sector_t total_size;
3209
3210 ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
3211 if (ld_buff == NULL) {
3212 printk(KERN_ERR "cciss: out of memory\n");
3213 return;
3214 }
3215 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
3216 if (inq_buff == NULL) {
3217 printk(KERN_ERR "cciss: out of memory\n");
3218 kfree(ld_buff);
3219 return;
3220 }
3221 /* Get the firmware version */
3222 return_code = sendcmd(CISS_INQUIRY, cntl_num, inq_buff,
3223 sizeof(InquiryData_struct), 0, 0, 0, NULL,
3224 TYPE_CMD);
3225 if (return_code == IO_OK) {
3226 hba[cntl_num]->firm_ver[0] = inq_buff->data_byte[32];
3227 hba[cntl_num]->firm_ver[1] = inq_buff->data_byte[33];
3228 hba[cntl_num]->firm_ver[2] = inq_buff->data_byte[34];
3229 hba[cntl_num]->firm_ver[3] = inq_buff->data_byte[35];
3230 } else { /* send command failed */
3231
3232 printk(KERN_WARNING "cciss: unable to determine firmware"
3233 " version of controller\n");
3234 }
3235 /* Get the number of logical volumes */
3236 return_code = sendcmd(CISS_REPORT_LOG, cntl_num, ld_buff,
3237 sizeof(ReportLunData_struct), 0, 0, 0, NULL,
3238 TYPE_CMD);
3239
3240 if (return_code == IO_OK) {
3241 #ifdef CCISS_DEBUG
3242 printk("LUN Data\n--------------------------\n");
3243 #endif /* CCISS_DEBUG */
3244
3245 listlength |=
3246 (0xff & (unsigned int)(ld_buff->LUNListLength[0])) << 24;
3247 listlength |=
3248 (0xff & (unsigned int)(ld_buff->LUNListLength[1])) << 16;
3249 listlength |=
3250 (0xff & (unsigned int)(ld_buff->LUNListLength[2])) << 8;
3251 listlength |= 0xff & (unsigned int)(ld_buff->LUNListLength[3]);
3252 } else { /* reading number of logical volumes failed */
3253
3254 printk(KERN_WARNING "cciss: report logical volume"
3255 " command failed\n");
3256 listlength = 0;
3257 }
3258 hba[cntl_num]->num_luns = listlength / 8; // 8 bytes pre entry
3259 if (hba[cntl_num]->num_luns > CISS_MAX_LUN) {
3260 printk(KERN_ERR
3261 "ciss: only %d number of logical volumes supported\n",
3262 CISS_MAX_LUN);
3263 hba[cntl_num]->num_luns = CISS_MAX_LUN;
3264 }
3265 #ifdef CCISS_DEBUG
3266 printk(KERN_DEBUG "Length = %x %x %x %x = %d\n",
3267 ld_buff->LUNListLength[0], ld_buff->LUNListLength[1],
3268 ld_buff->LUNListLength[2], ld_buff->LUNListLength[3],
3269 hba[cntl_num]->num_luns);
3270 #endif /* CCISS_DEBUG */
3271
3272 hba[cntl_num]->highest_lun = hba[cntl_num]->num_luns - 1;
3273 for (i = 0; i < CISS_MAX_LUN; i++) {
3274 if (i < hba[cntl_num]->num_luns) {
3275 lunid = (0xff & (unsigned int)(ld_buff->LUN[i][3]))
3276 << 24;
3277 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][2]))
3278 << 16;
3279 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][1]))
3280 << 8;
3281 lunid |= 0xff & (unsigned int)(ld_buff->LUN[i][0]);
3282
3283 hba[cntl_num]->drv[i].LunID = lunid;
3284
3285 #ifdef CCISS_DEBUG
3286 printk(KERN_DEBUG "LUN[%d]: %x %x %x %x = %x\n", i,
3287 ld_buff->LUN[i][0], ld_buff->LUN[i][1],
3288 ld_buff->LUN[i][2], ld_buff->LUN[i][3],
3289 hba[cntl_num]->drv[i].LunID);
3290 #endif /* CCISS_DEBUG */
3291
3292 /* testing to see if 16-byte CDBs are already being used */
3293 if(hba[cntl_num]->cciss_read == CCISS_READ_16) {
3294 cciss_read_capacity_16(cntl_num, i, 0,
3295 &total_size, &block_size);
3296 goto geo_inq;
3297 }
3298 cciss_read_capacity(cntl_num, i, 0, &total_size, &block_size);
3299
3300 /* If read_capacity returns all F's the logical is >2TB */
3301 /* so we switch to 16-byte CDBs for all read/write ops */
3302 if(total_size == 0xFFFFFFFFULL) {
3303 cciss_read_capacity_16(cntl_num, i, 0,
3304 &total_size, &block_size);
3305 hba[cntl_num]->cciss_read = CCISS_READ_16;
3306 hba[cntl_num]->cciss_write = CCISS_WRITE_16;
3307 } else {
3308 hba[cntl_num]->cciss_read = CCISS_READ_10;
3309 hba[cntl_num]->cciss_write = CCISS_WRITE_10;
3310 }
3311 geo_inq:
3312 cciss_geometry_inquiry(cntl_num, i, 0, total_size,
3313 block_size, inq_buff,
3314 &hba[cntl_num]->drv[i]);
3315 } else {
3316 /* initialize raid_level to indicate a free space */
3317 hba[cntl_num]->drv[i].raid_level = -1;
3318 }
3319 }
3320 kfree(ld_buff);
3321 kfree(inq_buff);
3322 }
3323
3324 /* Function to find the first free pointer into our hba[] array */
3325 /* Returns -1 if no free entries are left. */
3326 static int alloc_cciss_hba(void)
3327 {
3328 int i;
3329
3330 for (i = 0; i < MAX_CTLR; i++) {
3331 if (!hba[i]) {
3332 ctlr_info_t *p;
3333
3334 p = kzalloc(sizeof(ctlr_info_t), GFP_KERNEL);
3335 if (!p)
3336 goto Enomem;
3337 p->gendisk[0] = alloc_disk(1 << NWD_SHIFT);
3338 if (!p->gendisk[0]) {
3339 kfree(p);
3340 goto Enomem;
3341 }
3342 hba[i] = p;
3343 return i;
3344 }
3345 }
3346 printk(KERN_WARNING "cciss: This driver supports a maximum"
3347 " of %d controllers.\n", MAX_CTLR);
3348 return -1;
3349 Enomem:
3350 printk(KERN_ERR "cciss: out of memory.\n");
3351 return -1;
3352 }
3353
3354 static void free_hba(int i)
3355 {
3356 ctlr_info_t *p = hba[i];
3357 int n;
3358
3359 hba[i] = NULL;
3360 for (n = 0; n < CISS_MAX_LUN; n++)
3361 put_disk(p->gendisk[n]);
3362 kfree(p);
3363 }
3364
3365 /*
3366 * This is it. Find all the controllers and register them. I really hate
3367 * stealing all these major device numbers.
3368 * returns the number of block devices registered.
3369 */
3370 static int __devinit cciss_init_one(struct pci_dev *pdev,
3371 const struct pci_device_id *ent)
3372 {
3373 int i;
3374 int j = 0;
3375 int rc;
3376 int dac;
3377
3378 i = alloc_cciss_hba();
3379 if (i < 0)
3380 return -1;
3381
3382 hba[i]->busy_initializing = 1;
3383
3384 if (cciss_pci_init(hba[i], pdev) != 0)
3385 goto clean1;
3386
3387 sprintf(hba[i]->devname, "cciss%d", i);
3388 hba[i]->ctlr = i;
3389 hba[i]->pdev = pdev;
3390
3391 /* configure PCI DMA stuff */
3392 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK))
3393 dac = 1;
3394 else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK))
3395 dac = 0;
3396 else {
3397 printk(KERN_ERR "cciss: no suitable DMA available\n");
3398 goto clean1;
3399 }
3400
3401 /*
3402 * register with the major number, or get a dynamic major number
3403 * by passing 0 as argument. This is done for greater than
3404 * 8 controller support.
3405 */
3406 if (i < MAX_CTLR_ORIG)
3407 hba[i]->major = COMPAQ_CISS_MAJOR + i;
3408 rc = register_blkdev(hba[i]->major, hba[i]->devname);
3409 if (rc == -EBUSY || rc == -EINVAL) {
3410 printk(KERN_ERR
3411 "cciss: Unable to get major number %d for %s "
3412 "on hba %d\n", hba[i]->major, hba[i]->devname, i);
3413 goto clean1;
3414 } else {
3415 if (i >= MAX_CTLR_ORIG)
3416 hba[i]->major = rc;
3417 }
3418
3419 /* make sure the board interrupts are off */
3420 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
3421 if (request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr,
3422 IRQF_DISABLED | IRQF_SHARED, hba[i]->devname, hba[i])) {
3423 printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
3424 hba[i]->intr[SIMPLE_MODE_INT], hba[i]->devname);
3425 goto clean2;
3426 }
3427
3428 printk(KERN_INFO "%s: <0x%x> at PCI %s IRQ %d%s using DAC\n",
3429 hba[i]->devname, pdev->device, pci_name(pdev),
3430 hba[i]->intr[SIMPLE_MODE_INT], dac ? "" : " not");
3431
3432 hba[i]->cmd_pool_bits =
3433 kmalloc(((hba[i]->nr_cmds + BITS_PER_LONG -
3434 1) / BITS_PER_LONG) * sizeof(unsigned long), GFP_KERNEL);
3435 hba[i]->cmd_pool = (CommandList_struct *)
3436 pci_alloc_consistent(hba[i]->pdev,
3437 hba[i]->nr_cmds * sizeof(CommandList_struct),
3438 &(hba[i]->cmd_pool_dhandle));
3439 hba[i]->errinfo_pool = (ErrorInfo_struct *)
3440 pci_alloc_consistent(hba[i]->pdev,
3441 hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
3442 &(hba[i]->errinfo_pool_dhandle));
3443 if ((hba[i]->cmd_pool_bits == NULL)
3444 || (hba[i]->cmd_pool == NULL)
3445 || (hba[i]->errinfo_pool == NULL)) {
3446 printk(KERN_ERR "cciss: out of memory");
3447 goto clean4;
3448 }
3449 #ifdef CONFIG_CISS_SCSI_TAPE
3450 hba[i]->scsi_rejects.complete =
3451 kmalloc(sizeof(hba[i]->scsi_rejects.complete[0]) *
3452 (hba[i]->nr_cmds + 5), GFP_KERNEL);
3453 if (hba[i]->scsi_rejects.complete == NULL) {
3454 printk(KERN_ERR "cciss: out of memory");
3455 goto clean4;
3456 }
3457 #endif
3458 spin_lock_init(&hba[i]->lock);
3459
3460 /* Initialize the pdev driver private data.
3461 have it point to hba[i]. */
3462 pci_set_drvdata(pdev, hba[i]);
3463 /* command and error info recs zeroed out before
3464 they are used */
3465 memset(hba[i]->cmd_pool_bits, 0,
3466 ((hba[i]->nr_cmds + BITS_PER_LONG -
3467 1) / BITS_PER_LONG) * sizeof(unsigned long));
3468
3469 #ifdef CCISS_DEBUG
3470 printk(KERN_DEBUG "Scanning for drives on controller cciss%d\n", i);
3471 #endif /* CCISS_DEBUG */
3472
3473 cciss_getgeometry(i);
3474
3475 cciss_scsi_setup(i);
3476
3477 /* Turn the interrupts on so we can service requests */
3478 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
3479
3480 cciss_procinit(i);
3481
3482 hba[i]->cciss_max_sectors = 2048;
3483
3484 hba[i]->busy_initializing = 0;
3485
3486 do {
3487 drive_info_struct *drv = &(hba[i]->drv[j]);
3488 struct gendisk *disk = hba[i]->gendisk[j];
3489 struct request_queue *q;
3490
3491 /* Check if the disk was allocated already */
3492 if (!disk){
3493 hba[i]->gendisk[j] = alloc_disk(1 << NWD_SHIFT);
3494 disk = hba[i]->gendisk[j];
3495 }
3496
3497 /* Check that the disk was able to be allocated */
3498 if (!disk) {
3499 printk(KERN_ERR "cciss: unable to allocate memory for disk %d\n", j);
3500 goto clean4;
3501 }
3502
3503 q = blk_init_queue(do_cciss_request, &hba[i]->lock);
3504 if (!q) {
3505 printk(KERN_ERR
3506 "cciss: unable to allocate queue for disk %d\n",
3507 j);
3508 goto clean4;
3509 }
3510 drv->queue = q;
3511
3512 blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask);
3513
3514 /* This is a hardware imposed limit. */
3515 blk_queue_max_hw_segments(q, MAXSGENTRIES);
3516
3517 /* This is a limit in the driver and could be eliminated. */
3518 blk_queue_max_phys_segments(q, MAXSGENTRIES);
3519
3520 blk_queue_max_sectors(q, hba[i]->cciss_max_sectors);
3521
3522 blk_queue_softirq_done(q, cciss_softirq_done);
3523
3524 q->queuedata = hba[i];
3525 sprintf(disk->disk_name, "cciss/c%dd%d", i, j);
3526 disk->major = hba[i]->major;
3527 disk->first_minor = j << NWD_SHIFT;
3528 disk->fops = &cciss_fops;
3529 disk->queue = q;
3530 disk->private_data = drv;
3531 disk->driverfs_dev = &pdev->dev;
3532 /* we must register the controller even if no disks exist */
3533 /* this is for the online array utilities */
3534 if (!drv->heads && j)
3535 continue;
3536 blk_queue_hardsect_size(q, drv->block_size);
3537 set_capacity(disk, drv->nr_blocks);
3538 j++;
3539 } while (j <= hba[i]->highest_lun);
3540
3541 /* Make sure all queue data is written out before */
3542 /* interrupt handler, triggered by add_disk, */
3543 /* is allowed to start them. */
3544 wmb();
3545
3546 for (j = 0; j <= hba[i]->highest_lun; j++)
3547 add_disk(hba[i]->gendisk[j]);
3548
3549 return 1;
3550
3551 clean4:
3552 #ifdef CONFIG_CISS_SCSI_TAPE
3553 kfree(hba[i]->scsi_rejects.complete);
3554 #endif
3555 kfree(hba[i]->cmd_pool_bits);
3556 if (hba[i]->cmd_pool)
3557 pci_free_consistent(hba[i]->pdev,
3558 hba[i]->nr_cmds * sizeof(CommandList_struct),
3559 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
3560 if (hba[i]->errinfo_pool)
3561 pci_free_consistent(hba[i]->pdev,
3562 hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
3563 hba[i]->errinfo_pool,
3564 hba[i]->errinfo_pool_dhandle);
3565 free_irq(hba[i]->intr[SIMPLE_MODE_INT], hba[i]);
3566 clean2:
3567 unregister_blkdev(hba[i]->major, hba[i]->devname);
3568 clean1:
3569 hba[i]->busy_initializing = 0;
3570 /* cleanup any queues that may have been initialized */
3571 for (j=0; j <= hba[i]->highest_lun; j++){
3572 drive_info_struct *drv = &(hba[i]->drv[j]);
3573 if (drv->queue)
3574 blk_cleanup_queue(drv->queue);
3575 }
3576 /*
3577 * Deliberately omit pci_disable_device(): it does something nasty to
3578 * Smart Array controllers that pci_enable_device does not undo
3579 */
3580 pci_release_regions(pdev);
3581 pci_set_drvdata(pdev, NULL);
3582 free_hba(i);
3583 return -1;
3584 }
3585
3586 static void cciss_shutdown(struct pci_dev *pdev)
3587 {
3588 ctlr_info_t *tmp_ptr;
3589 int i;
3590 char flush_buf[4];
3591 int return_code;
3592
3593 tmp_ptr = pci_get_drvdata(pdev);
3594 if (tmp_ptr == NULL)
3595 return;
3596 i = tmp_ptr->ctlr;
3597 if (hba[i] == NULL)
3598 return;
3599
3600 /* Turn board interrupts off and send the flush cache command */
3601 /* sendcmd will turn off interrupt, and send the flush...
3602 * To write all data in the battery backed cache to disks */
3603 memset(flush_buf, 0, 4);
3604 return_code = sendcmd(CCISS_CACHE_FLUSH, i, flush_buf, 4, 0, 0, 0, NULL,
3605 TYPE_CMD);
3606 if (return_code == IO_OK) {
3607 printk(KERN_INFO "Completed flushing cache on controller %d\n", i);
3608 } else {
3609 printk(KERN_WARNING "Error flushing cache on controller %d\n", i);
3610 }
3611 free_irq(hba[i]->intr[2], hba[i]);
3612 }
3613
3614 static void __devexit cciss_remove_one(struct pci_dev *pdev)
3615 {
3616 ctlr_info_t *tmp_ptr;
3617 int i, j;
3618
3619 if (pci_get_drvdata(pdev) == NULL) {
3620 printk(KERN_ERR "cciss: Unable to remove device \n");
3621 return;
3622 }
3623 tmp_ptr = pci_get_drvdata(pdev);
3624 i = tmp_ptr->ctlr;
3625 if (hba[i] == NULL) {
3626 printk(KERN_ERR "cciss: device appears to "
3627 "already be removed \n");
3628 return;
3629 }
3630
3631 remove_proc_entry(hba[i]->devname, proc_cciss);
3632 unregister_blkdev(hba[i]->major, hba[i]->devname);
3633
3634 /* remove it from the disk list */
3635 for (j = 0; j < CISS_MAX_LUN; j++) {
3636 struct gendisk *disk = hba[i]->gendisk[j];
3637 if (disk) {
3638 struct request_queue *q = disk->queue;
3639
3640 if (disk->flags & GENHD_FL_UP)
3641 del_gendisk(disk);
3642 if (q)
3643 blk_cleanup_queue(q);
3644 }
3645 }
3646
3647 cciss_unregister_scsi(i); /* unhook from SCSI subsystem */
3648
3649 cciss_shutdown(pdev);
3650
3651 #ifdef CONFIG_PCI_MSI
3652 if (hba[i]->msix_vector)
3653 pci_disable_msix(hba[i]->pdev);
3654 else if (hba[i]->msi_vector)
3655 pci_disable_msi(hba[i]->pdev);
3656 #endif /* CONFIG_PCI_MSI */
3657
3658 iounmap(hba[i]->vaddr);
3659
3660 pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(CommandList_struct),
3661 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
3662 pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
3663 hba[i]->errinfo_pool, hba[i]->errinfo_pool_dhandle);
3664 kfree(hba[i]->cmd_pool_bits);
3665 #ifdef CONFIG_CISS_SCSI_TAPE
3666 kfree(hba[i]->scsi_rejects.complete);
3667 #endif
3668 /*
3669 * Deliberately omit pci_disable_device(): it does something nasty to
3670 * Smart Array controllers that pci_enable_device does not undo
3671 */
3672 pci_release_regions(pdev);
3673 pci_set_drvdata(pdev, NULL);
3674 free_hba(i);
3675 }
3676
3677 static struct pci_driver cciss_pci_driver = {
3678 .name = "cciss",
3679 .probe = cciss_init_one,
3680 .remove = __devexit_p(cciss_remove_one),
3681 .id_table = cciss_pci_device_id, /* id_table */
3682 .shutdown = cciss_shutdown,
3683 };
3684
3685 /*
3686 * This is it. Register the PCI driver information for the cards we control
3687 * the OS will call our registered routines when it finds one of our cards.
3688 */
3689 static int __init cciss_init(void)
3690 {
3691 printk(KERN_INFO DRIVER_NAME "\n");
3692
3693 /* Register for our PCI devices */
3694 return pci_register_driver(&cciss_pci_driver);
3695 }
3696
3697 static void __exit cciss_cleanup(void)
3698 {
3699 int i;
3700
3701 pci_unregister_driver(&cciss_pci_driver);
3702 /* double check that all controller entrys have been removed */
3703 for (i = 0; i < MAX_CTLR; i++) {
3704 if (hba[i] != NULL) {
3705 printk(KERN_WARNING "cciss: had to remove"
3706 " controller %d\n", i);
3707 cciss_remove_one(hba[i]->pdev);
3708 }
3709 }
3710 remove_proc_entry("driver/cciss", NULL);
3711 }
3712
3713 static void fail_all_cmds(unsigned long ctlr)
3714 {
3715 /* If we get here, the board is apparently dead. */
3716 ctlr_info_t *h = hba[ctlr];
3717 CommandList_struct *c;
3718 unsigned long flags;
3719
3720 printk(KERN_WARNING "cciss%d: controller not responding.\n", h->ctlr);
3721 h->alive = 0; /* the controller apparently died... */
3722
3723 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
3724
3725 pci_disable_device(h->pdev); /* Make sure it is really dead. */
3726
3727 /* move everything off the request queue onto the completed queue */
3728 while ((c = h->reqQ) != NULL) {
3729 removeQ(&(h->reqQ), c);
3730 h->Qdepth--;
3731 addQ(&(h->cmpQ), c);
3732 }
3733
3734 /* Now, fail everything on the completed queue with a HW error */
3735 while ((c = h->cmpQ) != NULL) {
3736 removeQ(&h->cmpQ, c);
3737 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
3738 if (c->cmd_type == CMD_RWREQ) {
3739 complete_command(h, c, 0);
3740 } else if (c->cmd_type == CMD_IOCTL_PEND)
3741 complete(c->waiting);
3742 #ifdef CONFIG_CISS_SCSI_TAPE
3743 else if (c->cmd_type == CMD_SCSI)
3744 complete_scsi_command(c, 0, 0);
3745 #endif
3746 }
3747 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
3748 return;
3749 }
3750
3751 module_init(cciss_init);
3752 module_exit(cciss_cleanup);
This page took 0.11714 seconds and 6 git commands to generate.