[PATCH] drivers/block: Replace pci_module_init() with pci_register_driver()
[deliverable/linux.git] / drivers / block / cciss.c
1 /*
2 * Disk Array driver for HP SA 5xxx and 6xxx Controllers
3 * Copyright 2000, 2006 Hewlett-Packard Development Company, L.P.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 *
19 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
20 *
21 */
22
23 #include <linux/config.h> /* CONFIG_PROC_FS */
24 #include <linux/module.h>
25 #include <linux/interrupt.h>
26 #include <linux/types.h>
27 #include <linux/pci.h>
28 #include <linux/kernel.h>
29 #include <linux/slab.h>
30 #include <linux/delay.h>
31 #include <linux/major.h>
32 #include <linux/fs.h>
33 #include <linux/bio.h>
34 #include <linux/blkpg.h>
35 #include <linux/timer.h>
36 #include <linux/proc_fs.h>
37 #include <linux/init.h>
38 #include <linux/hdreg.h>
39 #include <linux/spinlock.h>
40 #include <linux/compat.h>
41 #include <asm/uaccess.h>
42 #include <asm/io.h>
43
44 #include <linux/dma-mapping.h>
45 #include <linux/blkdev.h>
46 #include <linux/genhd.h>
47 #include <linux/completion.h>
48
49 #define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
50 #define DRIVER_NAME "HP CISS Driver (v 2.6.10)"
51 #define DRIVER_VERSION CCISS_DRIVER_VERSION(2,6,10)
52
53 /* Embedded module documentation macros - see modules.h */
54 MODULE_AUTHOR("Hewlett-Packard Company");
55 MODULE_DESCRIPTION("Driver for HP Controller SA5xxx SA6xxx version 2.6.10");
56 MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400"
57 " SA6i P600 P800 P400 P400i E200 E200i");
58 MODULE_LICENSE("GPL");
59
60 #include "cciss_cmd.h"
61 #include "cciss.h"
62 #include <linux/cciss_ioctl.h>
63
64 /* define the PCI info for the cards we can control */
65 static const struct pci_device_id cciss_pci_device_id[] = {
66 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISS,
67 0x0E11, 0x4070, 0, 0, 0},
68 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB,
69 0x0E11, 0x4080, 0, 0, 0},
70 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB,
71 0x0E11, 0x4082, 0, 0, 0},
72 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB,
73 0x0E11, 0x4083, 0, 0, 0},
74 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
75 0x0E11, 0x409A, 0, 0, 0},
76 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
77 0x0E11, 0x409B, 0, 0, 0},
78 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
79 0x0E11, 0x409C, 0, 0, 0},
80 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
81 0x0E11, 0x409D, 0, 0, 0},
82 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
83 0x0E11, 0x4091, 0, 0, 0},
84 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSA,
85 0x103C, 0x3225, 0, 0, 0},
86 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC,
87 0x103c, 0x3223, 0, 0, 0},
88 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC,
89 0x103c, 0x3234, 0, 0, 0},
90 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC,
91 0x103c, 0x3235, 0, 0, 0},
92 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD,
93 0x103c, 0x3211, 0, 0, 0},
94 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD,
95 0x103c, 0x3212, 0, 0, 0},
96 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD,
97 0x103c, 0x3213, 0, 0, 0},
98 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD,
99 0x103c, 0x3214, 0, 0, 0},
100 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD,
101 0x103c, 0x3215, 0, 0, 0},
102 {0,}
103 };
104 MODULE_DEVICE_TABLE(pci, cciss_pci_device_id);
105
106 #define NR_PRODUCTS ARRAY_SIZE(products)
107
108 /* board_id = Subsystem Device ID & Vendor ID
109 * product = Marketing Name for the board
110 * access = Address of the struct of function pointers
111 */
112 static struct board_type products[] = {
113 { 0x40700E11, "Smart Array 5300", &SA5_access },
114 { 0x40800E11, "Smart Array 5i", &SA5B_access},
115 { 0x40820E11, "Smart Array 532", &SA5B_access},
116 { 0x40830E11, "Smart Array 5312", &SA5B_access},
117 { 0x409A0E11, "Smart Array 641", &SA5_access},
118 { 0x409B0E11, "Smart Array 642", &SA5_access},
119 { 0x409C0E11, "Smart Array 6400", &SA5_access},
120 { 0x409D0E11, "Smart Array 6400 EM", &SA5_access},
121 { 0x40910E11, "Smart Array 6i", &SA5_access},
122 { 0x3225103C, "Smart Array P600", &SA5_access},
123 { 0x3223103C, "Smart Array P800", &SA5_access},
124 { 0x3234103C, "Smart Array P400", &SA5_access},
125 { 0x3235103C, "Smart Array P400i", &SA5_access},
126 { 0x3211103C, "Smart Array E200i", &SA5_access},
127 { 0x3212103C, "Smart Array E200", &SA5_access},
128 { 0x3213103C, "Smart Array E200i", &SA5_access},
129 { 0x3214103C, "Smart Array E200i", &SA5_access},
130 { 0x3215103C, "Smart Array E200i", &SA5_access},
131 };
132
133 /* How long to wait (in millesconds) for board to go into simple mode */
134 #define MAX_CONFIG_WAIT 30000
135 #define MAX_IOCTL_CONFIG_WAIT 1000
136
137 /*define how many times we will try a command because of bus resets */
138 #define MAX_CMD_RETRIES 3
139
140 #define READ_AHEAD 1024
141 #define NR_CMDS 384 /* #commands that can be outstanding */
142 #define MAX_CTLR 32
143
144 /* Originally cciss driver only supports 8 major numbers */
145 #define MAX_CTLR_ORIG 8
146
147
148 static ctlr_info_t *hba[MAX_CTLR];
149
150 static void do_cciss_request(request_queue_t *q);
151 static irqreturn_t do_cciss_intr(int irq, void *dev_id, struct pt_regs *regs);
152 static int cciss_open(struct inode *inode, struct file *filep);
153 static int cciss_release(struct inode *inode, struct file *filep);
154 static int cciss_ioctl(struct inode *inode, struct file *filep,
155 unsigned int cmd, unsigned long arg);
156 static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo);
157
158 static int revalidate_allvol(ctlr_info_t *host);
159 static int cciss_revalidate(struct gendisk *disk);
160 static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk);
161 static int deregister_disk(struct gendisk *disk, drive_info_struct *drv, int clear_all);
162
163 static void cciss_read_capacity(int ctlr, int logvol, ReadCapdata_struct *buf,
164 int withirq, unsigned int *total_size, unsigned int *block_size);
165 static void cciss_geometry_inquiry(int ctlr, int logvol,
166 int withirq, unsigned int total_size,
167 unsigned int block_size, InquiryData_struct *inq_buff,
168 drive_info_struct *drv);
169 static void cciss_getgeometry(int cntl_num);
170 static void __devinit cciss_interrupt_mode(ctlr_info_t *, struct pci_dev *, __u32);
171 static void start_io( ctlr_info_t *h);
172 static int sendcmd( __u8 cmd, int ctlr, void *buff, size_t size,
173 unsigned int use_unit_num, unsigned int log_unit, __u8 page_code,
174 unsigned char *scsi3addr, int cmd_type);
175 static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size,
176 unsigned int use_unit_num, unsigned int log_unit, __u8 page_code,
177 int cmd_type);
178
179 static void fail_all_cmds(unsigned long ctlr);
180
181 #ifdef CONFIG_PROC_FS
182 static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
183 int length, int *eof, void *data);
184 static void cciss_procinit(int i);
185 #else
186 static void cciss_procinit(int i) {}
187 #endif /* CONFIG_PROC_FS */
188
189 #ifdef CONFIG_COMPAT
190 static long cciss_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg);
191 #endif
192
193 static struct block_device_operations cciss_fops = {
194 .owner = THIS_MODULE,
195 .open = cciss_open,
196 .release = cciss_release,
197 .ioctl = cciss_ioctl,
198 .getgeo = cciss_getgeo,
199 #ifdef CONFIG_COMPAT
200 .compat_ioctl = cciss_compat_ioctl,
201 #endif
202 .revalidate_disk= cciss_revalidate,
203 };
204
205 /*
206 * Enqueuing and dequeuing functions for cmdlists.
207 */
208 static inline void addQ(CommandList_struct **Qptr, CommandList_struct *c)
209 {
210 if (*Qptr == NULL) {
211 *Qptr = c;
212 c->next = c->prev = c;
213 } else {
214 c->prev = (*Qptr)->prev;
215 c->next = (*Qptr);
216 (*Qptr)->prev->next = c;
217 (*Qptr)->prev = c;
218 }
219 }
220
221 static inline CommandList_struct *removeQ(CommandList_struct **Qptr,
222 CommandList_struct *c)
223 {
224 if (c && c->next != c) {
225 if (*Qptr == c) *Qptr = c->next;
226 c->prev->next = c->next;
227 c->next->prev = c->prev;
228 } else {
229 *Qptr = NULL;
230 }
231 return c;
232 }
233
234 #include "cciss_scsi.c" /* For SCSI tape support */
235
236 #ifdef CONFIG_PROC_FS
237
238 /*
239 * Report information about this controller.
240 */
241 #define ENG_GIG 1000000000
242 #define ENG_GIG_FACTOR (ENG_GIG/512)
243 #define RAID_UNKNOWN 6
244 static const char *raid_label[] = {"0","4","1(1+0)","5","5+1","ADG",
245 "UNKNOWN"};
246
247 static struct proc_dir_entry *proc_cciss;
248
249 static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
250 int length, int *eof, void *data)
251 {
252 off_t pos = 0;
253 off_t len = 0;
254 int size, i, ctlr;
255 ctlr_info_t *h = (ctlr_info_t*)data;
256 drive_info_struct *drv;
257 unsigned long flags;
258 sector_t vol_sz, vol_sz_frac;
259
260 ctlr = h->ctlr;
261
262 /* prevent displaying bogus info during configuration
263 * or deconfiguration of a logical volume
264 */
265 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
266 if (h->busy_configuring) {
267 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
268 return -EBUSY;
269 }
270 h->busy_configuring = 1;
271 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
272
273 size = sprintf(buffer, "%s: HP %s Controller\n"
274 "Board ID: 0x%08lx\n"
275 "Firmware Version: %c%c%c%c\n"
276 "IRQ: %d\n"
277 "Logical drives: %d\n"
278 "Current Q depth: %d\n"
279 "Current # commands on controller: %d\n"
280 "Max Q depth since init: %d\n"
281 "Max # commands on controller since init: %d\n"
282 "Max SG entries since init: %d\n\n",
283 h->devname,
284 h->product_name,
285 (unsigned long)h->board_id,
286 h->firm_ver[0], h->firm_ver[1], h->firm_ver[2], h->firm_ver[3],
287 (unsigned int)h->intr[SIMPLE_MODE_INT],
288 h->num_luns,
289 h->Qdepth, h->commands_outstanding,
290 h->maxQsinceinit, h->max_outstanding, h->maxSG);
291
292 pos += size; len += size;
293 cciss_proc_tape_report(ctlr, buffer, &pos, &len);
294 for(i=0; i<=h->highest_lun; i++) {
295
296 drv = &h->drv[i];
297 if (drv->heads == 0)
298 continue;
299
300 vol_sz = drv->nr_blocks;
301 vol_sz_frac = sector_div(vol_sz, ENG_GIG_FACTOR);
302 vol_sz_frac *= 100;
303 sector_div(vol_sz_frac, ENG_GIG_FACTOR);
304
305 if (drv->raid_level > 5)
306 drv->raid_level = RAID_UNKNOWN;
307 size = sprintf(buffer+len, "cciss/c%dd%d:"
308 "\t%4u.%02uGB\tRAID %s\n",
309 ctlr, i, (int)vol_sz, (int)vol_sz_frac,
310 raid_label[drv->raid_level]);
311 pos += size; len += size;
312 }
313
314 *eof = 1;
315 *start = buffer+offset;
316 len -= offset;
317 if (len>length)
318 len = length;
319 h->busy_configuring = 0;
320 return len;
321 }
322
323 static int
324 cciss_proc_write(struct file *file, const char __user *buffer,
325 unsigned long count, void *data)
326 {
327 unsigned char cmd[80];
328 int len;
329 #ifdef CONFIG_CISS_SCSI_TAPE
330 ctlr_info_t *h = (ctlr_info_t *) data;
331 int rc;
332 #endif
333
334 if (count > sizeof(cmd)-1) return -EINVAL;
335 if (copy_from_user(cmd, buffer, count)) return -EFAULT;
336 cmd[count] = '\0';
337 len = strlen(cmd); // above 3 lines ensure safety
338 if (len && cmd[len-1] == '\n')
339 cmd[--len] = '\0';
340 # ifdef CONFIG_CISS_SCSI_TAPE
341 if (strcmp("engage scsi", cmd)==0) {
342 rc = cciss_engage_scsi(h->ctlr);
343 if (rc != 0) return -rc;
344 return count;
345 }
346 /* might be nice to have "disengage" too, but it's not
347 safely possible. (only 1 module use count, lock issues.) */
348 # endif
349 return -EINVAL;
350 }
351
352 /*
353 * Get us a file in /proc/cciss that says something about each controller.
354 * Create /proc/cciss if it doesn't exist yet.
355 */
356 static void __devinit cciss_procinit(int i)
357 {
358 struct proc_dir_entry *pde;
359
360 if (proc_cciss == NULL) {
361 proc_cciss = proc_mkdir("cciss", proc_root_driver);
362 if (!proc_cciss)
363 return;
364 }
365
366 pde = create_proc_read_entry(hba[i]->devname,
367 S_IWUSR | S_IRUSR | S_IRGRP | S_IROTH,
368 proc_cciss, cciss_proc_get_info, hba[i]);
369 pde->write_proc = cciss_proc_write;
370 }
371 #endif /* CONFIG_PROC_FS */
372
373 /*
374 * For operations that cannot sleep, a command block is allocated at init,
375 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
376 * which ones are free or in use. For operations that can wait for kmalloc
377 * to possible sleep, this routine can be called with get_from_pool set to 0.
378 * cmd_free() MUST be called with a got_from_pool set to 0 if cmd_alloc was.
379 */
380 static CommandList_struct * cmd_alloc(ctlr_info_t *h, int get_from_pool)
381 {
382 CommandList_struct *c;
383 int i;
384 u64bit temp64;
385 dma_addr_t cmd_dma_handle, err_dma_handle;
386
387 if (!get_from_pool)
388 {
389 c = (CommandList_struct *) pci_alloc_consistent(
390 h->pdev, sizeof(CommandList_struct), &cmd_dma_handle);
391 if(c==NULL)
392 return NULL;
393 memset(c, 0, sizeof(CommandList_struct));
394
395 c->cmdindex = -1;
396
397 c->err_info = (ErrorInfo_struct *)pci_alloc_consistent(
398 h->pdev, sizeof(ErrorInfo_struct),
399 &err_dma_handle);
400
401 if (c->err_info == NULL)
402 {
403 pci_free_consistent(h->pdev,
404 sizeof(CommandList_struct), c, cmd_dma_handle);
405 return NULL;
406 }
407 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
408 } else /* get it out of the controllers pool */
409 {
410 do {
411 i = find_first_zero_bit(h->cmd_pool_bits, NR_CMDS);
412 if (i == NR_CMDS)
413 return NULL;
414 } while(test_and_set_bit(i & (BITS_PER_LONG - 1), h->cmd_pool_bits+(i/BITS_PER_LONG)) != 0);
415 #ifdef CCISS_DEBUG
416 printk(KERN_DEBUG "cciss: using command buffer %d\n", i);
417 #endif
418 c = h->cmd_pool + i;
419 memset(c, 0, sizeof(CommandList_struct));
420 cmd_dma_handle = h->cmd_pool_dhandle
421 + i*sizeof(CommandList_struct);
422 c->err_info = h->errinfo_pool + i;
423 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
424 err_dma_handle = h->errinfo_pool_dhandle
425 + i*sizeof(ErrorInfo_struct);
426 h->nr_allocs++;
427
428 c->cmdindex = i;
429 }
430
431 c->busaddr = (__u32) cmd_dma_handle;
432 temp64.val = (__u64) err_dma_handle;
433 c->ErrDesc.Addr.lower = temp64.val32.lower;
434 c->ErrDesc.Addr.upper = temp64.val32.upper;
435 c->ErrDesc.Len = sizeof(ErrorInfo_struct);
436
437 c->ctlr = h->ctlr;
438 return c;
439
440
441 }
442
443 /*
444 * Frees a command block that was previously allocated with cmd_alloc().
445 */
446 static void cmd_free(ctlr_info_t *h, CommandList_struct *c, int got_from_pool)
447 {
448 int i;
449 u64bit temp64;
450
451 if( !got_from_pool)
452 {
453 temp64.val32.lower = c->ErrDesc.Addr.lower;
454 temp64.val32.upper = c->ErrDesc.Addr.upper;
455 pci_free_consistent(h->pdev, sizeof(ErrorInfo_struct),
456 c->err_info, (dma_addr_t) temp64.val);
457 pci_free_consistent(h->pdev, sizeof(CommandList_struct),
458 c, (dma_addr_t) c->busaddr);
459 } else
460 {
461 i = c - h->cmd_pool;
462 clear_bit(i&(BITS_PER_LONG-1), h->cmd_pool_bits+(i/BITS_PER_LONG));
463 h->nr_frees++;
464 }
465 }
466
467 static inline ctlr_info_t *get_host(struct gendisk *disk)
468 {
469 return disk->queue->queuedata;
470 }
471
472 static inline drive_info_struct *get_drv(struct gendisk *disk)
473 {
474 return disk->private_data;
475 }
476
477 /*
478 * Open. Make sure the device is really there.
479 */
480 static int cciss_open(struct inode *inode, struct file *filep)
481 {
482 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
483 drive_info_struct *drv = get_drv(inode->i_bdev->bd_disk);
484
485 #ifdef CCISS_DEBUG
486 printk(KERN_DEBUG "cciss_open %s\n", inode->i_bdev->bd_disk->disk_name);
487 #endif /* CCISS_DEBUG */
488
489 if (host->busy_initializing || drv->busy_configuring)
490 return -EBUSY;
491 /*
492 * Root is allowed to open raw volume zero even if it's not configured
493 * so array config can still work. Root is also allowed to open any
494 * volume that has a LUN ID, so it can issue IOCTL to reread the
495 * disk information. I don't think I really like this
496 * but I'm already using way to many device nodes to claim another one
497 * for "raw controller".
498 */
499 if (drv->nr_blocks == 0) {
500 if (iminor(inode) != 0) { /* not node 0? */
501 /* if not node 0 make sure it is a partition = 0 */
502 if (iminor(inode) & 0x0f) {
503 return -ENXIO;
504 /* if it is, make sure we have a LUN ID */
505 } else if (drv->LunID == 0) {
506 return -ENXIO;
507 }
508 }
509 if (!capable(CAP_SYS_ADMIN))
510 return -EPERM;
511 }
512 drv->usage_count++;
513 host->usage_count++;
514 return 0;
515 }
516 /*
517 * Close. Sync first.
518 */
519 static int cciss_release(struct inode *inode, struct file *filep)
520 {
521 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
522 drive_info_struct *drv = get_drv(inode->i_bdev->bd_disk);
523
524 #ifdef CCISS_DEBUG
525 printk(KERN_DEBUG "cciss_release %s\n", inode->i_bdev->bd_disk->disk_name);
526 #endif /* CCISS_DEBUG */
527
528 drv->usage_count--;
529 host->usage_count--;
530 return 0;
531 }
532
533 #ifdef CONFIG_COMPAT
534
535 static int do_ioctl(struct file *f, unsigned cmd, unsigned long arg)
536 {
537 int ret;
538 lock_kernel();
539 ret = cciss_ioctl(f->f_dentry->d_inode, f, cmd, arg);
540 unlock_kernel();
541 return ret;
542 }
543
544 static int cciss_ioctl32_passthru(struct file *f, unsigned cmd, unsigned long arg);
545 static int cciss_ioctl32_big_passthru(struct file *f, unsigned cmd, unsigned long arg);
546
547 static long cciss_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg)
548 {
549 switch (cmd) {
550 case CCISS_GETPCIINFO:
551 case CCISS_GETINTINFO:
552 case CCISS_SETINTINFO:
553 case CCISS_GETNODENAME:
554 case CCISS_SETNODENAME:
555 case CCISS_GETHEARTBEAT:
556 case CCISS_GETBUSTYPES:
557 case CCISS_GETFIRMVER:
558 case CCISS_GETDRIVVER:
559 case CCISS_REVALIDVOLS:
560 case CCISS_DEREGDISK:
561 case CCISS_REGNEWDISK:
562 case CCISS_REGNEWD:
563 case CCISS_RESCANDISK:
564 case CCISS_GETLUNINFO:
565 return do_ioctl(f, cmd, arg);
566
567 case CCISS_PASSTHRU32:
568 return cciss_ioctl32_passthru(f, cmd, arg);
569 case CCISS_BIG_PASSTHRU32:
570 return cciss_ioctl32_big_passthru(f, cmd, arg);
571
572 default:
573 return -ENOIOCTLCMD;
574 }
575 }
576
577 static int cciss_ioctl32_passthru(struct file *f, unsigned cmd, unsigned long arg)
578 {
579 IOCTL32_Command_struct __user *arg32 =
580 (IOCTL32_Command_struct __user *) arg;
581 IOCTL_Command_struct arg64;
582 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
583 int err;
584 u32 cp;
585
586 err = 0;
587 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, sizeof(arg64.LUN_info));
588 err |= copy_from_user(&arg64.Request, &arg32->Request, sizeof(arg64.Request));
589 err |= copy_from_user(&arg64.error_info, &arg32->error_info, sizeof(arg64.error_info));
590 err |= get_user(arg64.buf_size, &arg32->buf_size);
591 err |= get_user(cp, &arg32->buf);
592 arg64.buf = compat_ptr(cp);
593 err |= copy_to_user(p, &arg64, sizeof(arg64));
594
595 if (err)
596 return -EFAULT;
597
598 err = do_ioctl(f, CCISS_PASSTHRU, (unsigned long) p);
599 if (err)
600 return err;
601 err |= copy_in_user(&arg32->error_info, &p->error_info, sizeof(arg32->error_info));
602 if (err)
603 return -EFAULT;
604 return err;
605 }
606
607 static int cciss_ioctl32_big_passthru(struct file *file, unsigned cmd, unsigned long arg)
608 {
609 BIG_IOCTL32_Command_struct __user *arg32 =
610 (BIG_IOCTL32_Command_struct __user *) arg;
611 BIG_IOCTL_Command_struct arg64;
612 BIG_IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
613 int err;
614 u32 cp;
615
616 err = 0;
617 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, sizeof(arg64.LUN_info));
618 err |= copy_from_user(&arg64.Request, &arg32->Request, sizeof(arg64.Request));
619 err |= copy_from_user(&arg64.error_info, &arg32->error_info, sizeof(arg64.error_info));
620 err |= get_user(arg64.buf_size, &arg32->buf_size);
621 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
622 err |= get_user(cp, &arg32->buf);
623 arg64.buf = compat_ptr(cp);
624 err |= copy_to_user(p, &arg64, sizeof(arg64));
625
626 if (err)
627 return -EFAULT;
628
629 err = do_ioctl(file, CCISS_BIG_PASSTHRU, (unsigned long) p);
630 if (err)
631 return err;
632 err |= copy_in_user(&arg32->error_info, &p->error_info, sizeof(arg32->error_info));
633 if (err)
634 return -EFAULT;
635 return err;
636 }
637 #endif
638
639 static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo)
640 {
641 drive_info_struct *drv = get_drv(bdev->bd_disk);
642
643 if (!drv->cylinders)
644 return -ENXIO;
645
646 geo->heads = drv->heads;
647 geo->sectors = drv->sectors;
648 geo->cylinders = drv->cylinders;
649 return 0;
650 }
651
652 /*
653 * ioctl
654 */
655 static int cciss_ioctl(struct inode *inode, struct file *filep,
656 unsigned int cmd, unsigned long arg)
657 {
658 struct block_device *bdev = inode->i_bdev;
659 struct gendisk *disk = bdev->bd_disk;
660 ctlr_info_t *host = get_host(disk);
661 drive_info_struct *drv = get_drv(disk);
662 int ctlr = host->ctlr;
663 void __user *argp = (void __user *)arg;
664
665 #ifdef CCISS_DEBUG
666 printk(KERN_DEBUG "cciss_ioctl: Called with cmd=%x %lx\n", cmd, arg);
667 #endif /* CCISS_DEBUG */
668
669 switch(cmd) {
670 case CCISS_GETPCIINFO:
671 {
672 cciss_pci_info_struct pciinfo;
673
674 if (!arg) return -EINVAL;
675 pciinfo.domain = pci_domain_nr(host->pdev->bus);
676 pciinfo.bus = host->pdev->bus->number;
677 pciinfo.dev_fn = host->pdev->devfn;
678 pciinfo.board_id = host->board_id;
679 if (copy_to_user(argp, &pciinfo, sizeof( cciss_pci_info_struct )))
680 return -EFAULT;
681 return(0);
682 }
683 case CCISS_GETINTINFO:
684 {
685 cciss_coalint_struct intinfo;
686 if (!arg) return -EINVAL;
687 intinfo.delay = readl(&host->cfgtable->HostWrite.CoalIntDelay);
688 intinfo.count = readl(&host->cfgtable->HostWrite.CoalIntCount);
689 if (copy_to_user(argp, &intinfo, sizeof( cciss_coalint_struct )))
690 return -EFAULT;
691 return(0);
692 }
693 case CCISS_SETINTINFO:
694 {
695 cciss_coalint_struct intinfo;
696 unsigned long flags;
697 int i;
698
699 if (!arg) return -EINVAL;
700 if (!capable(CAP_SYS_ADMIN)) return -EPERM;
701 if (copy_from_user(&intinfo, argp, sizeof( cciss_coalint_struct)))
702 return -EFAULT;
703 if ( (intinfo.delay == 0 ) && (intinfo.count == 0))
704
705 {
706 // printk("cciss_ioctl: delay and count cannot be 0\n");
707 return( -EINVAL);
708 }
709 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
710 /* Update the field, and then ring the doorbell */
711 writel( intinfo.delay,
712 &(host->cfgtable->HostWrite.CoalIntDelay));
713 writel( intinfo.count,
714 &(host->cfgtable->HostWrite.CoalIntCount));
715 writel( CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
716
717 for(i=0;i<MAX_IOCTL_CONFIG_WAIT;i++) {
718 if (!(readl(host->vaddr + SA5_DOORBELL)
719 & CFGTBL_ChangeReq))
720 break;
721 /* delay and try again */
722 udelay(1000);
723 }
724 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
725 if (i >= MAX_IOCTL_CONFIG_WAIT)
726 return -EAGAIN;
727 return(0);
728 }
729 case CCISS_GETNODENAME:
730 {
731 NodeName_type NodeName;
732 int i;
733
734 if (!arg) return -EINVAL;
735 for(i=0;i<16;i++)
736 NodeName[i] = readb(&host->cfgtable->ServerName[i]);
737 if (copy_to_user(argp, NodeName, sizeof( NodeName_type)))
738 return -EFAULT;
739 return(0);
740 }
741 case CCISS_SETNODENAME:
742 {
743 NodeName_type NodeName;
744 unsigned long flags;
745 int i;
746
747 if (!arg) return -EINVAL;
748 if (!capable(CAP_SYS_ADMIN)) return -EPERM;
749
750 if (copy_from_user(NodeName, argp, sizeof( NodeName_type)))
751 return -EFAULT;
752
753 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
754
755 /* Update the field, and then ring the doorbell */
756 for(i=0;i<16;i++)
757 writeb( NodeName[i], &host->cfgtable->ServerName[i]);
758
759 writel( CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
760
761 for(i=0;i<MAX_IOCTL_CONFIG_WAIT;i++) {
762 if (!(readl(host->vaddr + SA5_DOORBELL)
763 & CFGTBL_ChangeReq))
764 break;
765 /* delay and try again */
766 udelay(1000);
767 }
768 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
769 if (i >= MAX_IOCTL_CONFIG_WAIT)
770 return -EAGAIN;
771 return(0);
772 }
773
774 case CCISS_GETHEARTBEAT:
775 {
776 Heartbeat_type heartbeat;
777
778 if (!arg) return -EINVAL;
779 heartbeat = readl(&host->cfgtable->HeartBeat);
780 if (copy_to_user(argp, &heartbeat, sizeof( Heartbeat_type)))
781 return -EFAULT;
782 return(0);
783 }
784 case CCISS_GETBUSTYPES:
785 {
786 BusTypes_type BusTypes;
787
788 if (!arg) return -EINVAL;
789 BusTypes = readl(&host->cfgtable->BusTypes);
790 if (copy_to_user(argp, &BusTypes, sizeof( BusTypes_type) ))
791 return -EFAULT;
792 return(0);
793 }
794 case CCISS_GETFIRMVER:
795 {
796 FirmwareVer_type firmware;
797
798 if (!arg) return -EINVAL;
799 memcpy(firmware, host->firm_ver, 4);
800
801 if (copy_to_user(argp, firmware, sizeof( FirmwareVer_type)))
802 return -EFAULT;
803 return(0);
804 }
805 case CCISS_GETDRIVVER:
806 {
807 DriverVer_type DriverVer = DRIVER_VERSION;
808
809 if (!arg) return -EINVAL;
810
811 if (copy_to_user(argp, &DriverVer, sizeof( DriverVer_type) ))
812 return -EFAULT;
813 return(0);
814 }
815
816 case CCISS_REVALIDVOLS:
817 if (bdev != bdev->bd_contains || drv != host->drv)
818 return -ENXIO;
819 return revalidate_allvol(host);
820
821 case CCISS_GETLUNINFO: {
822 LogvolInfo_struct luninfo;
823
824 luninfo.LunID = drv->LunID;
825 luninfo.num_opens = drv->usage_count;
826 luninfo.num_parts = 0;
827 if (copy_to_user(argp, &luninfo,
828 sizeof(LogvolInfo_struct)))
829 return -EFAULT;
830 return(0);
831 }
832 case CCISS_DEREGDISK:
833 return rebuild_lun_table(host, disk);
834
835 case CCISS_REGNEWD:
836 return rebuild_lun_table(host, NULL);
837
838 case CCISS_PASSTHRU:
839 {
840 IOCTL_Command_struct iocommand;
841 CommandList_struct *c;
842 char *buff = NULL;
843 u64bit temp64;
844 unsigned long flags;
845 DECLARE_COMPLETION(wait);
846
847 if (!arg) return -EINVAL;
848
849 if (!capable(CAP_SYS_RAWIO)) return -EPERM;
850
851 if (copy_from_user(&iocommand, argp, sizeof( IOCTL_Command_struct) ))
852 return -EFAULT;
853 if((iocommand.buf_size < 1) &&
854 (iocommand.Request.Type.Direction != XFER_NONE))
855 {
856 return -EINVAL;
857 }
858 #if 0 /* 'buf_size' member is 16-bits, and always smaller than kmalloc limit */
859 /* Check kmalloc limits */
860 if(iocommand.buf_size > 128000)
861 return -EINVAL;
862 #endif
863 if(iocommand.buf_size > 0)
864 {
865 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
866 if( buff == NULL)
867 return -EFAULT;
868 }
869 if (iocommand.Request.Type.Direction == XFER_WRITE)
870 {
871 /* Copy the data into the buffer we created */
872 if (copy_from_user(buff, iocommand.buf, iocommand.buf_size))
873 {
874 kfree(buff);
875 return -EFAULT;
876 }
877 } else {
878 memset(buff, 0, iocommand.buf_size);
879 }
880 if ((c = cmd_alloc(host , 0)) == NULL)
881 {
882 kfree(buff);
883 return -ENOMEM;
884 }
885 // Fill in the command type
886 c->cmd_type = CMD_IOCTL_PEND;
887 // Fill in Command Header
888 c->Header.ReplyQueue = 0; // unused in simple mode
889 if( iocommand.buf_size > 0) // buffer to fill
890 {
891 c->Header.SGList = 1;
892 c->Header.SGTotal= 1;
893 } else // no buffers to fill
894 {
895 c->Header.SGList = 0;
896 c->Header.SGTotal= 0;
897 }
898 c->Header.LUN = iocommand.LUN_info;
899 c->Header.Tag.lower = c->busaddr; // use the kernel address the cmd block for tag
900
901 // Fill in Request block
902 c->Request = iocommand.Request;
903
904 // Fill in the scatter gather information
905 if (iocommand.buf_size > 0 )
906 {
907 temp64.val = pci_map_single( host->pdev, buff,
908 iocommand.buf_size,
909 PCI_DMA_BIDIRECTIONAL);
910 c->SG[0].Addr.lower = temp64.val32.lower;
911 c->SG[0].Addr.upper = temp64.val32.upper;
912 c->SG[0].Len = iocommand.buf_size;
913 c->SG[0].Ext = 0; // we are not chaining
914 }
915 c->waiting = &wait;
916
917 /* Put the request on the tail of the request queue */
918 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
919 addQ(&host->reqQ, c);
920 host->Qdepth++;
921 start_io(host);
922 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
923
924 wait_for_completion(&wait);
925
926 /* unlock the buffers from DMA */
927 temp64.val32.lower = c->SG[0].Addr.lower;
928 temp64.val32.upper = c->SG[0].Addr.upper;
929 pci_unmap_single( host->pdev, (dma_addr_t) temp64.val,
930 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
931
932 /* Copy the error information out */
933 iocommand.error_info = *(c->err_info);
934 if ( copy_to_user(argp, &iocommand, sizeof( IOCTL_Command_struct) ) )
935 {
936 kfree(buff);
937 cmd_free(host, c, 0);
938 return( -EFAULT);
939 }
940
941 if (iocommand.Request.Type.Direction == XFER_READ)
942 {
943 /* Copy the data out of the buffer we created */
944 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size))
945 {
946 kfree(buff);
947 cmd_free(host, c, 0);
948 return -EFAULT;
949 }
950 }
951 kfree(buff);
952 cmd_free(host, c, 0);
953 return(0);
954 }
955 case CCISS_BIG_PASSTHRU: {
956 BIG_IOCTL_Command_struct *ioc;
957 CommandList_struct *c;
958 unsigned char **buff = NULL;
959 int *buff_size = NULL;
960 u64bit temp64;
961 unsigned long flags;
962 BYTE sg_used = 0;
963 int status = 0;
964 int i;
965 DECLARE_COMPLETION(wait);
966 __u32 left;
967 __u32 sz;
968 BYTE __user *data_ptr;
969
970 if (!arg)
971 return -EINVAL;
972 if (!capable(CAP_SYS_RAWIO))
973 return -EPERM;
974 ioc = (BIG_IOCTL_Command_struct *)
975 kmalloc(sizeof(*ioc), GFP_KERNEL);
976 if (!ioc) {
977 status = -ENOMEM;
978 goto cleanup1;
979 }
980 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
981 status = -EFAULT;
982 goto cleanup1;
983 }
984 if ((ioc->buf_size < 1) &&
985 (ioc->Request.Type.Direction != XFER_NONE)) {
986 status = -EINVAL;
987 goto cleanup1;
988 }
989 /* Check kmalloc limits using all SGs */
990 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
991 status = -EINVAL;
992 goto cleanup1;
993 }
994 if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) {
995 status = -EINVAL;
996 goto cleanup1;
997 }
998 buff = (unsigned char **) kmalloc(MAXSGENTRIES *
999 sizeof(char *), GFP_KERNEL);
1000 if (!buff) {
1001 status = -ENOMEM;
1002 goto cleanup1;
1003 }
1004 memset(buff, 0, MAXSGENTRIES);
1005 buff_size = (int *) kmalloc(MAXSGENTRIES * sizeof(int),
1006 GFP_KERNEL);
1007 if (!buff_size) {
1008 status = -ENOMEM;
1009 goto cleanup1;
1010 }
1011 left = ioc->buf_size;
1012 data_ptr = ioc->buf;
1013 while (left) {
1014 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
1015 buff_size[sg_used] = sz;
1016 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
1017 if (buff[sg_used] == NULL) {
1018 status = -ENOMEM;
1019 goto cleanup1;
1020 }
1021 if (ioc->Request.Type.Direction == XFER_WRITE) {
1022 if (copy_from_user(buff[sg_used], data_ptr, sz)) {
1023 status = -ENOMEM;
1024 goto cleanup1;
1025 }
1026 } else {
1027 memset(buff[sg_used], 0, sz);
1028 }
1029 left -= sz;
1030 data_ptr += sz;
1031 sg_used++;
1032 }
1033 if ((c = cmd_alloc(host , 0)) == NULL) {
1034 status = -ENOMEM;
1035 goto cleanup1;
1036 }
1037 c->cmd_type = CMD_IOCTL_PEND;
1038 c->Header.ReplyQueue = 0;
1039
1040 if( ioc->buf_size > 0) {
1041 c->Header.SGList = sg_used;
1042 c->Header.SGTotal= sg_used;
1043 } else {
1044 c->Header.SGList = 0;
1045 c->Header.SGTotal= 0;
1046 }
1047 c->Header.LUN = ioc->LUN_info;
1048 c->Header.Tag.lower = c->busaddr;
1049
1050 c->Request = ioc->Request;
1051 if (ioc->buf_size > 0 ) {
1052 int i;
1053 for(i=0; i<sg_used; i++) {
1054 temp64.val = pci_map_single( host->pdev, buff[i],
1055 buff_size[i],
1056 PCI_DMA_BIDIRECTIONAL);
1057 c->SG[i].Addr.lower = temp64.val32.lower;
1058 c->SG[i].Addr.upper = temp64.val32.upper;
1059 c->SG[i].Len = buff_size[i];
1060 c->SG[i].Ext = 0; /* we are not chaining */
1061 }
1062 }
1063 c->waiting = &wait;
1064 /* Put the request on the tail of the request queue */
1065 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1066 addQ(&host->reqQ, c);
1067 host->Qdepth++;
1068 start_io(host);
1069 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1070 wait_for_completion(&wait);
1071 /* unlock the buffers from DMA */
1072 for(i=0; i<sg_used; i++) {
1073 temp64.val32.lower = c->SG[i].Addr.lower;
1074 temp64.val32.upper = c->SG[i].Addr.upper;
1075 pci_unmap_single( host->pdev, (dma_addr_t) temp64.val,
1076 buff_size[i], PCI_DMA_BIDIRECTIONAL);
1077 }
1078 /* Copy the error information out */
1079 ioc->error_info = *(c->err_info);
1080 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
1081 cmd_free(host, c, 0);
1082 status = -EFAULT;
1083 goto cleanup1;
1084 }
1085 if (ioc->Request.Type.Direction == XFER_READ) {
1086 /* Copy the data out of the buffer we created */
1087 BYTE __user *ptr = ioc->buf;
1088 for(i=0; i< sg_used; i++) {
1089 if (copy_to_user(ptr, buff[i], buff_size[i])) {
1090 cmd_free(host, c, 0);
1091 status = -EFAULT;
1092 goto cleanup1;
1093 }
1094 ptr += buff_size[i];
1095 }
1096 }
1097 cmd_free(host, c, 0);
1098 status = 0;
1099 cleanup1:
1100 if (buff) {
1101 for(i=0; i<sg_used; i++)
1102 kfree(buff[i]);
1103 kfree(buff);
1104 }
1105 kfree(buff_size);
1106 kfree(ioc);
1107 return(status);
1108 }
1109 default:
1110 return -ENOTTY;
1111 }
1112
1113 }
1114
1115 /*
1116 * revalidate_allvol is for online array config utilities. After a
1117 * utility reconfigures the drives in the array, it can use this function
1118 * (through an ioctl) to make the driver zap any previous disk structs for
1119 * that controller and get new ones.
1120 *
1121 * Right now I'm using the getgeometry() function to do this, but this
1122 * function should probably be finer grained and allow you to revalidate one
1123 * particualar logical volume (instead of all of them on a particular
1124 * controller).
1125 */
1126 static int revalidate_allvol(ctlr_info_t *host)
1127 {
1128 int ctlr = host->ctlr, i;
1129 unsigned long flags;
1130
1131 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1132 if (host->usage_count > 1) {
1133 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1134 printk(KERN_WARNING "cciss: Device busy for volume"
1135 " revalidation (usage=%d)\n", host->usage_count);
1136 return -EBUSY;
1137 }
1138 host->usage_count++;
1139 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1140
1141 for(i=0; i< NWD; i++) {
1142 struct gendisk *disk = host->gendisk[i];
1143 if (disk) {
1144 request_queue_t *q = disk->queue;
1145
1146 if (disk->flags & GENHD_FL_UP)
1147 del_gendisk(disk);
1148 if (q)
1149 blk_cleanup_queue(q);
1150 }
1151 }
1152
1153 /*
1154 * Set the partition and block size structures for all volumes
1155 * on this controller to zero. We will reread all of this data
1156 */
1157 memset(host->drv, 0, sizeof(drive_info_struct)
1158 * CISS_MAX_LUN);
1159 /*
1160 * Tell the array controller not to give us any interrupts while
1161 * we check the new geometry. Then turn interrupts back on when
1162 * we're done.
1163 */
1164 host->access.set_intr_mask(host, CCISS_INTR_OFF);
1165 cciss_getgeometry(ctlr);
1166 host->access.set_intr_mask(host, CCISS_INTR_ON);
1167
1168 /* Loop through each real device */
1169 for (i = 0; i < NWD; i++) {
1170 struct gendisk *disk = host->gendisk[i];
1171 drive_info_struct *drv = &(host->drv[i]);
1172 /* we must register the controller even if no disks exist */
1173 /* this is for the online array utilities */
1174 if (!drv->heads && i)
1175 continue;
1176 blk_queue_hardsect_size(drv->queue, drv->block_size);
1177 set_capacity(disk, drv->nr_blocks);
1178 add_disk(disk);
1179 }
1180 host->usage_count--;
1181 return 0;
1182 }
1183
1184 /* This function will check the usage_count of the drive to be updated/added.
1185 * If the usage_count is zero then the drive information will be updated and
1186 * the disk will be re-registered with the kernel. If not then it will be
1187 * left alone for the next reboot. The exception to this is disk 0 which
1188 * will always be left registered with the kernel since it is also the
1189 * controller node. Any changes to disk 0 will show up on the next
1190 * reboot.
1191 */
1192 static void cciss_update_drive_info(int ctlr, int drv_index)
1193 {
1194 ctlr_info_t *h = hba[ctlr];
1195 struct gendisk *disk;
1196 ReadCapdata_struct *size_buff = NULL;
1197 InquiryData_struct *inq_buff = NULL;
1198 unsigned int block_size;
1199 unsigned int total_size;
1200 unsigned long flags = 0;
1201 int ret = 0;
1202
1203 /* if the disk already exists then deregister it before proceeding*/
1204 if (h->drv[drv_index].raid_level != -1){
1205 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1206 h->drv[drv_index].busy_configuring = 1;
1207 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1208 ret = deregister_disk(h->gendisk[drv_index],
1209 &h->drv[drv_index], 0);
1210 h->drv[drv_index].busy_configuring = 0;
1211 }
1212
1213 /* If the disk is in use return */
1214 if (ret)
1215 return;
1216
1217
1218 /* Get information about the disk and modify the driver sturcture */
1219 size_buff = kmalloc(sizeof( ReadCapdata_struct), GFP_KERNEL);
1220 if (size_buff == NULL)
1221 goto mem_msg;
1222 inq_buff = kmalloc(sizeof( InquiryData_struct), GFP_KERNEL);
1223 if (inq_buff == NULL)
1224 goto mem_msg;
1225
1226 cciss_read_capacity(ctlr, drv_index, size_buff, 1,
1227 &total_size, &block_size);
1228 cciss_geometry_inquiry(ctlr, drv_index, 1, total_size, block_size,
1229 inq_buff, &h->drv[drv_index]);
1230
1231 ++h->num_luns;
1232 disk = h->gendisk[drv_index];
1233 set_capacity(disk, h->drv[drv_index].nr_blocks);
1234
1235
1236 /* if it's the controller it's already added */
1237 if (drv_index){
1238 disk->queue = blk_init_queue(do_cciss_request, &h->lock);
1239
1240 /* Set up queue information */
1241 disk->queue->backing_dev_info.ra_pages = READ_AHEAD;
1242 blk_queue_bounce_limit(disk->queue, hba[ctlr]->pdev->dma_mask);
1243
1244 /* This is a hardware imposed limit. */
1245 blk_queue_max_hw_segments(disk->queue, MAXSGENTRIES);
1246
1247 /* This is a limit in the driver and could be eliminated. */
1248 blk_queue_max_phys_segments(disk->queue, MAXSGENTRIES);
1249
1250 blk_queue_max_sectors(disk->queue, 512);
1251
1252 disk->queue->queuedata = hba[ctlr];
1253
1254 blk_queue_hardsect_size(disk->queue,
1255 hba[ctlr]->drv[drv_index].block_size);
1256
1257 h->drv[drv_index].queue = disk->queue;
1258 add_disk(disk);
1259 }
1260
1261 freeret:
1262 kfree(size_buff);
1263 kfree(inq_buff);
1264 return;
1265 mem_msg:
1266 printk(KERN_ERR "cciss: out of memory\n");
1267 goto freeret;
1268 }
1269
1270 /* This function will find the first index of the controllers drive array
1271 * that has a -1 for the raid_level and will return that index. This is
1272 * where new drives will be added. If the index to be returned is greater
1273 * than the highest_lun index for the controller then highest_lun is set
1274 * to this new index. If there are no available indexes then -1 is returned.
1275 */
1276 static int cciss_find_free_drive_index(int ctlr)
1277 {
1278 int i;
1279
1280 for (i=0; i < CISS_MAX_LUN; i++){
1281 if (hba[ctlr]->drv[i].raid_level == -1){
1282 if (i > hba[ctlr]->highest_lun)
1283 hba[ctlr]->highest_lun = i;
1284 return i;
1285 }
1286 }
1287 return -1;
1288 }
1289
1290 /* This function will add and remove logical drives from the Logical
1291 * drive array of the controller and maintain persistancy of ordering
1292 * so that mount points are preserved until the next reboot. This allows
1293 * for the removal of logical drives in the middle of the drive array
1294 * without a re-ordering of those drives.
1295 * INPUT
1296 * h = The controller to perform the operations on
1297 * del_disk = The disk to remove if specified. If the value given
1298 * is NULL then no disk is removed.
1299 */
1300 static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk)
1301 {
1302 int ctlr = h->ctlr;
1303 int num_luns;
1304 ReportLunData_struct *ld_buff = NULL;
1305 drive_info_struct *drv = NULL;
1306 int return_code;
1307 int listlength = 0;
1308 int i;
1309 int drv_found;
1310 int drv_index = 0;
1311 __u32 lunid = 0;
1312 unsigned long flags;
1313
1314 /* Set busy_configuring flag for this operation */
1315 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1316 if (h->num_luns >= CISS_MAX_LUN){
1317 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1318 return -EINVAL;
1319 }
1320
1321 if (h->busy_configuring){
1322 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1323 return -EBUSY;
1324 }
1325 h->busy_configuring = 1;
1326
1327 /* if del_disk is NULL then we are being called to add a new disk
1328 * and update the logical drive table. If it is not NULL then
1329 * we will check if the disk is in use or not.
1330 */
1331 if (del_disk != NULL){
1332 drv = get_drv(del_disk);
1333 drv->busy_configuring = 1;
1334 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1335 return_code = deregister_disk(del_disk, drv, 1);
1336 drv->busy_configuring = 0;
1337 h->busy_configuring = 0;
1338 return return_code;
1339 } else {
1340 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1341 if (!capable(CAP_SYS_RAWIO))
1342 return -EPERM;
1343
1344 ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
1345 if (ld_buff == NULL)
1346 goto mem_msg;
1347
1348 return_code = sendcmd_withirq(CISS_REPORT_LOG, ctlr, ld_buff,
1349 sizeof(ReportLunData_struct), 0, 0, 0,
1350 TYPE_CMD);
1351
1352 if (return_code == IO_OK){
1353 listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[0])) << 24;
1354 listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[1])) << 16;
1355 listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[2])) << 8;
1356 listlength |= 0xff & (unsigned int)(ld_buff->LUNListLength[3]);
1357 } else{ /* reading number of logical volumes failed */
1358 printk(KERN_WARNING "cciss: report logical volume"
1359 " command failed\n");
1360 listlength = 0;
1361 goto freeret;
1362 }
1363
1364 num_luns = listlength / 8; /* 8 bytes per entry */
1365 if (num_luns > CISS_MAX_LUN){
1366 num_luns = CISS_MAX_LUN;
1367 printk(KERN_WARNING "cciss: more luns configured"
1368 " on controller than can be handled by"
1369 " this driver.\n");
1370 }
1371
1372 /* Compare controller drive array to drivers drive array.
1373 * Check for updates in the drive information and any new drives
1374 * on the controller.
1375 */
1376 for (i=0; i < num_luns; i++){
1377 int j;
1378
1379 drv_found = 0;
1380
1381 lunid = (0xff &
1382 (unsigned int)(ld_buff->LUN[i][3])) << 24;
1383 lunid |= (0xff &
1384 (unsigned int)(ld_buff->LUN[i][2])) << 16;
1385 lunid |= (0xff &
1386 (unsigned int)(ld_buff->LUN[i][1])) << 8;
1387 lunid |= 0xff &
1388 (unsigned int)(ld_buff->LUN[i][0]);
1389
1390 /* Find if the LUN is already in the drive array
1391 * of the controller. If so then update its info
1392 * if not is use. If it does not exist then find
1393 * the first free index and add it.
1394 */
1395 for (j=0; j <= h->highest_lun; j++){
1396 if (h->drv[j].LunID == lunid){
1397 drv_index = j;
1398 drv_found = 1;
1399 }
1400 }
1401
1402 /* check if the drive was found already in the array */
1403 if (!drv_found){
1404 drv_index = cciss_find_free_drive_index(ctlr);
1405 if (drv_index == -1)
1406 goto freeret;
1407
1408 }
1409 h->drv[drv_index].LunID = lunid;
1410 cciss_update_drive_info(ctlr, drv_index);
1411 } /* end for */
1412 } /* end else */
1413
1414 freeret:
1415 kfree(ld_buff);
1416 h->busy_configuring = 0;
1417 /* We return -1 here to tell the ACU that we have registered/updated
1418 * all of the drives that we can and to keep it from calling us
1419 * additional times.
1420 */
1421 return -1;
1422 mem_msg:
1423 printk(KERN_ERR "cciss: out of memory\n");
1424 goto freeret;
1425 }
1426
1427 /* This function will deregister the disk and it's queue from the
1428 * kernel. It must be called with the controller lock held and the
1429 * drv structures busy_configuring flag set. It's parameters are:
1430 *
1431 * disk = This is the disk to be deregistered
1432 * drv = This is the drive_info_struct associated with the disk to be
1433 * deregistered. It contains information about the disk used
1434 * by the driver.
1435 * clear_all = This flag determines whether or not the disk information
1436 * is going to be completely cleared out and the highest_lun
1437 * reset. Sometimes we want to clear out information about
1438 * the disk in preperation for re-adding it. In this case
1439 * the highest_lun should be left unchanged and the LunID
1440 * should not be cleared.
1441 */
1442 static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
1443 int clear_all)
1444 {
1445 ctlr_info_t *h = get_host(disk);
1446
1447 if (!capable(CAP_SYS_RAWIO))
1448 return -EPERM;
1449
1450 /* make sure logical volume is NOT is use */
1451 if(clear_all || (h->gendisk[0] == disk)) {
1452 if (drv->usage_count > 1)
1453 return -EBUSY;
1454 }
1455 else
1456 if( drv->usage_count > 0 )
1457 return -EBUSY;
1458
1459 /* invalidate the devices and deregister the disk. If it is disk
1460 * zero do not deregister it but just zero out it's values. This
1461 * allows us to delete disk zero but keep the controller registered.
1462 */
1463 if (h->gendisk[0] != disk){
1464 if (disk) {
1465 request_queue_t *q = disk->queue;
1466 if (disk->flags & GENHD_FL_UP)
1467 del_gendisk(disk);
1468 if (q) {
1469 blk_cleanup_queue(q);
1470 drv->queue = NULL;
1471 }
1472 }
1473 }
1474
1475 --h->num_luns;
1476 /* zero out the disk size info */
1477 drv->nr_blocks = 0;
1478 drv->block_size = 0;
1479 drv->heads = 0;
1480 drv->sectors = 0;
1481 drv->cylinders = 0;
1482 drv->raid_level = -1; /* This can be used as a flag variable to
1483 * indicate that this element of the drive
1484 * array is free.
1485 */
1486
1487 if (clear_all){
1488 /* check to see if it was the last disk */
1489 if (drv == h->drv + h->highest_lun) {
1490 /* if so, find the new hightest lun */
1491 int i, newhighest =-1;
1492 for(i=0; i<h->highest_lun; i++) {
1493 /* if the disk has size > 0, it is available */
1494 if (h->drv[i].heads)
1495 newhighest = i;
1496 }
1497 h->highest_lun = newhighest;
1498 }
1499
1500 drv->LunID = 0;
1501 }
1502 return(0);
1503 }
1504
1505 static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff,
1506 size_t size,
1507 unsigned int use_unit_num, /* 0: address the controller,
1508 1: address logical volume log_unit,
1509 2: periph device address is scsi3addr */
1510 unsigned int log_unit, __u8 page_code, unsigned char *scsi3addr,
1511 int cmd_type)
1512 {
1513 ctlr_info_t *h= hba[ctlr];
1514 u64bit buff_dma_handle;
1515 int status = IO_OK;
1516
1517 c->cmd_type = CMD_IOCTL_PEND;
1518 c->Header.ReplyQueue = 0;
1519 if( buff != NULL) {
1520 c->Header.SGList = 1;
1521 c->Header.SGTotal= 1;
1522 } else {
1523 c->Header.SGList = 0;
1524 c->Header.SGTotal= 0;
1525 }
1526 c->Header.Tag.lower = c->busaddr;
1527
1528 c->Request.Type.Type = cmd_type;
1529 if (cmd_type == TYPE_CMD) {
1530 switch(cmd) {
1531 case CISS_INQUIRY:
1532 /* If the logical unit number is 0 then, this is going
1533 to controller so It's a physical command
1534 mode = 0 target = 0. So we have nothing to write.
1535 otherwise, if use_unit_num == 1,
1536 mode = 1(volume set addressing) target = LUNID
1537 otherwise, if use_unit_num == 2,
1538 mode = 0(periph dev addr) target = scsi3addr */
1539 if (use_unit_num == 1) {
1540 c->Header.LUN.LogDev.VolId=
1541 h->drv[log_unit].LunID;
1542 c->Header.LUN.LogDev.Mode = 1;
1543 } else if (use_unit_num == 2) {
1544 memcpy(c->Header.LUN.LunAddrBytes,scsi3addr,8);
1545 c->Header.LUN.LogDev.Mode = 0;
1546 }
1547 /* are we trying to read a vital product page */
1548 if(page_code != 0) {
1549 c->Request.CDB[1] = 0x01;
1550 c->Request.CDB[2] = page_code;
1551 }
1552 c->Request.CDBLen = 6;
1553 c->Request.Type.Attribute = ATTR_SIMPLE;
1554 c->Request.Type.Direction = XFER_READ;
1555 c->Request.Timeout = 0;
1556 c->Request.CDB[0] = CISS_INQUIRY;
1557 c->Request.CDB[4] = size & 0xFF;
1558 break;
1559 case CISS_REPORT_LOG:
1560 case CISS_REPORT_PHYS:
1561 /* Talking to controller so It's a physical command
1562 mode = 00 target = 0. Nothing to write.
1563 */
1564 c->Request.CDBLen = 12;
1565 c->Request.Type.Attribute = ATTR_SIMPLE;
1566 c->Request.Type.Direction = XFER_READ;
1567 c->Request.Timeout = 0;
1568 c->Request.CDB[0] = cmd;
1569 c->Request.CDB[6] = (size >> 24) & 0xFF; //MSB
1570 c->Request.CDB[7] = (size >> 16) & 0xFF;
1571 c->Request.CDB[8] = (size >> 8) & 0xFF;
1572 c->Request.CDB[9] = size & 0xFF;
1573 break;
1574
1575 case CCISS_READ_CAPACITY:
1576 c->Header.LUN.LogDev.VolId = h->drv[log_unit].LunID;
1577 c->Header.LUN.LogDev.Mode = 1;
1578 c->Request.CDBLen = 10;
1579 c->Request.Type.Attribute = ATTR_SIMPLE;
1580 c->Request.Type.Direction = XFER_READ;
1581 c->Request.Timeout = 0;
1582 c->Request.CDB[0] = cmd;
1583 break;
1584 case CCISS_CACHE_FLUSH:
1585 c->Request.CDBLen = 12;
1586 c->Request.Type.Attribute = ATTR_SIMPLE;
1587 c->Request.Type.Direction = XFER_WRITE;
1588 c->Request.Timeout = 0;
1589 c->Request.CDB[0] = BMIC_WRITE;
1590 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
1591 break;
1592 default:
1593 printk(KERN_WARNING
1594 "cciss%d: Unknown Command 0x%c\n", ctlr, cmd);
1595 return(IO_ERROR);
1596 }
1597 } else if (cmd_type == TYPE_MSG) {
1598 switch (cmd) {
1599 case 0: /* ABORT message */
1600 c->Request.CDBLen = 12;
1601 c->Request.Type.Attribute = ATTR_SIMPLE;
1602 c->Request.Type.Direction = XFER_WRITE;
1603 c->Request.Timeout = 0;
1604 c->Request.CDB[0] = cmd; /* abort */
1605 c->Request.CDB[1] = 0; /* abort a command */
1606 /* buff contains the tag of the command to abort */
1607 memcpy(&c->Request.CDB[4], buff, 8);
1608 break;
1609 case 1: /* RESET message */
1610 c->Request.CDBLen = 12;
1611 c->Request.Type.Attribute = ATTR_SIMPLE;
1612 c->Request.Type.Direction = XFER_WRITE;
1613 c->Request.Timeout = 0;
1614 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
1615 c->Request.CDB[0] = cmd; /* reset */
1616 c->Request.CDB[1] = 0x04; /* reset a LUN */
1617 case 3: /* No-Op message */
1618 c->Request.CDBLen = 1;
1619 c->Request.Type.Attribute = ATTR_SIMPLE;
1620 c->Request.Type.Direction = XFER_WRITE;
1621 c->Request.Timeout = 0;
1622 c->Request.CDB[0] = cmd;
1623 break;
1624 default:
1625 printk(KERN_WARNING
1626 "cciss%d: unknown message type %d\n",
1627 ctlr, cmd);
1628 return IO_ERROR;
1629 }
1630 } else {
1631 printk(KERN_WARNING
1632 "cciss%d: unknown command type %d\n", ctlr, cmd_type);
1633 return IO_ERROR;
1634 }
1635 /* Fill in the scatter gather information */
1636 if (size > 0) {
1637 buff_dma_handle.val = (__u64) pci_map_single(h->pdev,
1638 buff, size, PCI_DMA_BIDIRECTIONAL);
1639 c->SG[0].Addr.lower = buff_dma_handle.val32.lower;
1640 c->SG[0].Addr.upper = buff_dma_handle.val32.upper;
1641 c->SG[0].Len = size;
1642 c->SG[0].Ext = 0; /* we are not chaining */
1643 }
1644 return status;
1645 }
1646 static int sendcmd_withirq(__u8 cmd,
1647 int ctlr,
1648 void *buff,
1649 size_t size,
1650 unsigned int use_unit_num,
1651 unsigned int log_unit,
1652 __u8 page_code,
1653 int cmd_type)
1654 {
1655 ctlr_info_t *h = hba[ctlr];
1656 CommandList_struct *c;
1657 u64bit buff_dma_handle;
1658 unsigned long flags;
1659 int return_status;
1660 DECLARE_COMPLETION(wait);
1661
1662 if ((c = cmd_alloc(h , 0)) == NULL)
1663 return -ENOMEM;
1664 return_status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
1665 log_unit, page_code, NULL, cmd_type);
1666 if (return_status != IO_OK) {
1667 cmd_free(h, c, 0);
1668 return return_status;
1669 }
1670 resend_cmd2:
1671 c->waiting = &wait;
1672
1673 /* Put the request on the tail of the queue and send it */
1674 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1675 addQ(&h->reqQ, c);
1676 h->Qdepth++;
1677 start_io(h);
1678 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1679
1680 wait_for_completion(&wait);
1681
1682 if(c->err_info->CommandStatus != 0)
1683 { /* an error has occurred */
1684 switch(c->err_info->CommandStatus)
1685 {
1686 case CMD_TARGET_STATUS:
1687 printk(KERN_WARNING "cciss: cmd %p has "
1688 " completed with errors\n", c);
1689 if( c->err_info->ScsiStatus)
1690 {
1691 printk(KERN_WARNING "cciss: cmd %p "
1692 "has SCSI Status = %x\n",
1693 c,
1694 c->err_info->ScsiStatus);
1695 }
1696
1697 break;
1698 case CMD_DATA_UNDERRUN:
1699 case CMD_DATA_OVERRUN:
1700 /* expected for inquire and report lun commands */
1701 break;
1702 case CMD_INVALID:
1703 printk(KERN_WARNING "cciss: Cmd %p is "
1704 "reported invalid\n", c);
1705 return_status = IO_ERROR;
1706 break;
1707 case CMD_PROTOCOL_ERR:
1708 printk(KERN_WARNING "cciss: cmd %p has "
1709 "protocol error \n", c);
1710 return_status = IO_ERROR;
1711 break;
1712 case CMD_HARDWARE_ERR:
1713 printk(KERN_WARNING "cciss: cmd %p had "
1714 " hardware error\n", c);
1715 return_status = IO_ERROR;
1716 break;
1717 case CMD_CONNECTION_LOST:
1718 printk(KERN_WARNING "cciss: cmd %p had "
1719 "connection lost\n", c);
1720 return_status = IO_ERROR;
1721 break;
1722 case CMD_ABORTED:
1723 printk(KERN_WARNING "cciss: cmd %p was "
1724 "aborted\n", c);
1725 return_status = IO_ERROR;
1726 break;
1727 case CMD_ABORT_FAILED:
1728 printk(KERN_WARNING "cciss: cmd %p reports "
1729 "abort failed\n", c);
1730 return_status = IO_ERROR;
1731 break;
1732 case CMD_UNSOLICITED_ABORT:
1733 printk(KERN_WARNING
1734 "cciss%d: unsolicited abort %p\n",
1735 ctlr, c);
1736 if (c->retry_count < MAX_CMD_RETRIES) {
1737 printk(KERN_WARNING
1738 "cciss%d: retrying %p\n",
1739 ctlr, c);
1740 c->retry_count++;
1741 /* erase the old error information */
1742 memset(c->err_info, 0,
1743 sizeof(ErrorInfo_struct));
1744 return_status = IO_OK;
1745 INIT_COMPLETION(wait);
1746 goto resend_cmd2;
1747 }
1748 return_status = IO_ERROR;
1749 break;
1750 default:
1751 printk(KERN_WARNING "cciss: cmd %p returned "
1752 "unknown status %x\n", c,
1753 c->err_info->CommandStatus);
1754 return_status = IO_ERROR;
1755 }
1756 }
1757 /* unlock the buffers from DMA */
1758 buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
1759 buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
1760 pci_unmap_single( h->pdev, (dma_addr_t) buff_dma_handle.val,
1761 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
1762 cmd_free(h, c, 0);
1763 return(return_status);
1764
1765 }
1766 static void cciss_geometry_inquiry(int ctlr, int logvol,
1767 int withirq, unsigned int total_size,
1768 unsigned int block_size, InquiryData_struct *inq_buff,
1769 drive_info_struct *drv)
1770 {
1771 int return_code;
1772 memset(inq_buff, 0, sizeof(InquiryData_struct));
1773 if (withirq)
1774 return_code = sendcmd_withirq(CISS_INQUIRY, ctlr,
1775 inq_buff, sizeof(*inq_buff), 1, logvol ,0xC1, TYPE_CMD);
1776 else
1777 return_code = sendcmd(CISS_INQUIRY, ctlr, inq_buff,
1778 sizeof(*inq_buff), 1, logvol ,0xC1, NULL, TYPE_CMD);
1779 if (return_code == IO_OK) {
1780 if(inq_buff->data_byte[8] == 0xFF) {
1781 printk(KERN_WARNING
1782 "cciss: reading geometry failed, volume "
1783 "does not support reading geometry\n");
1784 drv->block_size = block_size;
1785 drv->nr_blocks = total_size;
1786 drv->heads = 255;
1787 drv->sectors = 32; // Sectors per track
1788 drv->cylinders = total_size / 255 / 32;
1789 } else {
1790 unsigned int t;
1791
1792 drv->block_size = block_size;
1793 drv->nr_blocks = total_size;
1794 drv->heads = inq_buff->data_byte[6];
1795 drv->sectors = inq_buff->data_byte[7];
1796 drv->cylinders = (inq_buff->data_byte[4] & 0xff) << 8;
1797 drv->cylinders += inq_buff->data_byte[5];
1798 drv->raid_level = inq_buff->data_byte[8];
1799 t = drv->heads * drv->sectors;
1800 if (t > 1) {
1801 drv->cylinders = total_size/t;
1802 }
1803 }
1804 } else { /* Get geometry failed */
1805 printk(KERN_WARNING "cciss: reading geometry failed\n");
1806 }
1807 printk(KERN_INFO " heads= %d, sectors= %d, cylinders= %d\n\n",
1808 drv->heads, drv->sectors, drv->cylinders);
1809 }
1810 static void
1811 cciss_read_capacity(int ctlr, int logvol, ReadCapdata_struct *buf,
1812 int withirq, unsigned int *total_size, unsigned int *block_size)
1813 {
1814 int return_code;
1815 memset(buf, 0, sizeof(*buf));
1816 if (withirq)
1817 return_code = sendcmd_withirq(CCISS_READ_CAPACITY,
1818 ctlr, buf, sizeof(*buf), 1, logvol, 0, TYPE_CMD);
1819 else
1820 return_code = sendcmd(CCISS_READ_CAPACITY,
1821 ctlr, buf, sizeof(*buf), 1, logvol, 0, NULL, TYPE_CMD);
1822 if (return_code == IO_OK) {
1823 *total_size = be32_to_cpu(*((__be32 *) &buf->total_size[0]))+1;
1824 *block_size = be32_to_cpu(*((__be32 *) &buf->block_size[0]));
1825 } else { /* read capacity command failed */
1826 printk(KERN_WARNING "cciss: read capacity failed\n");
1827 *total_size = 0;
1828 *block_size = BLOCK_SIZE;
1829 }
1830 printk(KERN_INFO " blocks= %u block_size= %d\n",
1831 *total_size, *block_size);
1832 return;
1833 }
1834
1835 static int cciss_revalidate(struct gendisk *disk)
1836 {
1837 ctlr_info_t *h = get_host(disk);
1838 drive_info_struct *drv = get_drv(disk);
1839 int logvol;
1840 int FOUND=0;
1841 unsigned int block_size;
1842 unsigned int total_size;
1843 ReadCapdata_struct *size_buff = NULL;
1844 InquiryData_struct *inq_buff = NULL;
1845
1846 for(logvol=0; logvol < CISS_MAX_LUN; logvol++)
1847 {
1848 if(h->drv[logvol].LunID == drv->LunID) {
1849 FOUND=1;
1850 break;
1851 }
1852 }
1853
1854 if (!FOUND) return 1;
1855
1856 size_buff = kmalloc(sizeof( ReadCapdata_struct), GFP_KERNEL);
1857 if (size_buff == NULL)
1858 {
1859 printk(KERN_WARNING "cciss: out of memory\n");
1860 return 1;
1861 }
1862 inq_buff = kmalloc(sizeof( InquiryData_struct), GFP_KERNEL);
1863 if (inq_buff == NULL)
1864 {
1865 printk(KERN_WARNING "cciss: out of memory\n");
1866 kfree(size_buff);
1867 return 1;
1868 }
1869
1870 cciss_read_capacity(h->ctlr, logvol, size_buff, 1, &total_size, &block_size);
1871 cciss_geometry_inquiry(h->ctlr, logvol, 1, total_size, block_size, inq_buff, drv);
1872
1873 blk_queue_hardsect_size(drv->queue, drv->block_size);
1874 set_capacity(disk, drv->nr_blocks);
1875
1876 kfree(size_buff);
1877 kfree(inq_buff);
1878 return 0;
1879 }
1880
1881 /*
1882 * Wait polling for a command to complete.
1883 * The memory mapped FIFO is polled for the completion.
1884 * Used only at init time, interrupts from the HBA are disabled.
1885 */
1886 static unsigned long pollcomplete(int ctlr)
1887 {
1888 unsigned long done;
1889 int i;
1890
1891 /* Wait (up to 20 seconds) for a command to complete */
1892
1893 for (i = 20 * HZ; i > 0; i--) {
1894 done = hba[ctlr]->access.command_completed(hba[ctlr]);
1895 if (done == FIFO_EMPTY)
1896 schedule_timeout_uninterruptible(1);
1897 else
1898 return (done);
1899 }
1900 /* Invalid address to tell caller we ran out of time */
1901 return 1;
1902 }
1903
1904 static int add_sendcmd_reject(__u8 cmd, int ctlr, unsigned long complete)
1905 {
1906 /* We get in here if sendcmd() is polling for completions
1907 and gets some command back that it wasn't expecting --
1908 something other than that which it just sent down.
1909 Ordinarily, that shouldn't happen, but it can happen when
1910 the scsi tape stuff gets into error handling mode, and
1911 starts using sendcmd() to try to abort commands and
1912 reset tape drives. In that case, sendcmd may pick up
1913 completions of commands that were sent to logical drives
1914 through the block i/o system, or cciss ioctls completing, etc.
1915 In that case, we need to save those completions for later
1916 processing by the interrupt handler.
1917 */
1918
1919 #ifdef CONFIG_CISS_SCSI_TAPE
1920 struct sendcmd_reject_list *srl = &hba[ctlr]->scsi_rejects;
1921
1922 /* If it's not the scsi tape stuff doing error handling, (abort */
1923 /* or reset) then we don't expect anything weird. */
1924 if (cmd != CCISS_RESET_MSG && cmd != CCISS_ABORT_MSG) {
1925 #endif
1926 printk( KERN_WARNING "cciss cciss%d: SendCmd "
1927 "Invalid command list address returned! (%lx)\n",
1928 ctlr, complete);
1929 /* not much we can do. */
1930 #ifdef CONFIG_CISS_SCSI_TAPE
1931 return 1;
1932 }
1933
1934 /* We've sent down an abort or reset, but something else
1935 has completed */
1936 if (srl->ncompletions >= (NR_CMDS + 2)) {
1937 /* Uh oh. No room to save it for later... */
1938 printk(KERN_WARNING "cciss%d: Sendcmd: Invalid command addr, "
1939 "reject list overflow, command lost!\n", ctlr);
1940 return 1;
1941 }
1942 /* Save it for later */
1943 srl->complete[srl->ncompletions] = complete;
1944 srl->ncompletions++;
1945 #endif
1946 return 0;
1947 }
1948
1949 /*
1950 * Send a command to the controller, and wait for it to complete.
1951 * Only used at init time.
1952 */
1953 static int sendcmd(
1954 __u8 cmd,
1955 int ctlr,
1956 void *buff,
1957 size_t size,
1958 unsigned int use_unit_num, /* 0: address the controller,
1959 1: address logical volume log_unit,
1960 2: periph device address is scsi3addr */
1961 unsigned int log_unit,
1962 __u8 page_code,
1963 unsigned char *scsi3addr,
1964 int cmd_type)
1965 {
1966 CommandList_struct *c;
1967 int i;
1968 unsigned long complete;
1969 ctlr_info_t *info_p= hba[ctlr];
1970 u64bit buff_dma_handle;
1971 int status, done = 0;
1972
1973 if ((c = cmd_alloc(info_p, 1)) == NULL) {
1974 printk(KERN_WARNING "cciss: unable to get memory");
1975 return(IO_ERROR);
1976 }
1977 status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
1978 log_unit, page_code, scsi3addr, cmd_type);
1979 if (status != IO_OK) {
1980 cmd_free(info_p, c, 1);
1981 return status;
1982 }
1983 resend_cmd1:
1984 /*
1985 * Disable interrupt
1986 */
1987 #ifdef CCISS_DEBUG
1988 printk(KERN_DEBUG "cciss: turning intr off\n");
1989 #endif /* CCISS_DEBUG */
1990 info_p->access.set_intr_mask(info_p, CCISS_INTR_OFF);
1991
1992 /* Make sure there is room in the command FIFO */
1993 /* Actually it should be completely empty at this time */
1994 /* unless we are in here doing error handling for the scsi */
1995 /* tape side of the driver. */
1996 for (i = 200000; i > 0; i--)
1997 {
1998 /* if fifo isn't full go */
1999 if (!(info_p->access.fifo_full(info_p)))
2000 {
2001
2002 break;
2003 }
2004 udelay(10);
2005 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
2006 " waiting!\n", ctlr);
2007 }
2008 /*
2009 * Send the cmd
2010 */
2011 info_p->access.submit_command(info_p, c);
2012 done = 0;
2013 do {
2014 complete = pollcomplete(ctlr);
2015
2016 #ifdef CCISS_DEBUG
2017 printk(KERN_DEBUG "cciss: command completed\n");
2018 #endif /* CCISS_DEBUG */
2019
2020 if (complete == 1) {
2021 printk( KERN_WARNING
2022 "cciss cciss%d: SendCmd Timeout out, "
2023 "No command list address returned!\n",
2024 ctlr);
2025 status = IO_ERROR;
2026 done = 1;
2027 break;
2028 }
2029
2030 /* This will need to change for direct lookup completions */
2031 if ( (complete & CISS_ERROR_BIT)
2032 && (complete & ~CISS_ERROR_BIT) == c->busaddr)
2033 {
2034 /* if data overrun or underun on Report command
2035 ignore it
2036 */
2037 if (((c->Request.CDB[0] == CISS_REPORT_LOG) ||
2038 (c->Request.CDB[0] == CISS_REPORT_PHYS) ||
2039 (c->Request.CDB[0] == CISS_INQUIRY)) &&
2040 ((c->err_info->CommandStatus ==
2041 CMD_DATA_OVERRUN) ||
2042 (c->err_info->CommandStatus ==
2043 CMD_DATA_UNDERRUN)
2044 ))
2045 {
2046 complete = c->busaddr;
2047 } else {
2048 if (c->err_info->CommandStatus ==
2049 CMD_UNSOLICITED_ABORT) {
2050 printk(KERN_WARNING "cciss%d: "
2051 "unsolicited abort %p\n",
2052 ctlr, c);
2053 if (c->retry_count < MAX_CMD_RETRIES) {
2054 printk(KERN_WARNING
2055 "cciss%d: retrying %p\n",
2056 ctlr, c);
2057 c->retry_count++;
2058 /* erase the old error */
2059 /* information */
2060 memset(c->err_info, 0,
2061 sizeof(ErrorInfo_struct));
2062 goto resend_cmd1;
2063 } else {
2064 printk(KERN_WARNING
2065 "cciss%d: retried %p too "
2066 "many times\n", ctlr, c);
2067 status = IO_ERROR;
2068 goto cleanup1;
2069 }
2070 } else if (c->err_info->CommandStatus == CMD_UNABORTABLE) {
2071 printk(KERN_WARNING "cciss%d: command could not be aborted.\n", ctlr);
2072 status = IO_ERROR;
2073 goto cleanup1;
2074 }
2075 printk(KERN_WARNING "ciss ciss%d: sendcmd"
2076 " Error %x \n", ctlr,
2077 c->err_info->CommandStatus);
2078 printk(KERN_WARNING "ciss ciss%d: sendcmd"
2079 " offensive info\n"
2080 " size %x\n num %x value %x\n", ctlr,
2081 c->err_info->MoreErrInfo.Invalid_Cmd.offense_size,
2082 c->err_info->MoreErrInfo.Invalid_Cmd.offense_num,
2083 c->err_info->MoreErrInfo.Invalid_Cmd.offense_value);
2084 status = IO_ERROR;
2085 goto cleanup1;
2086 }
2087 }
2088 /* This will need changing for direct lookup completions */
2089 if (complete != c->busaddr) {
2090 if (add_sendcmd_reject(cmd, ctlr, complete) != 0) {
2091 BUG(); /* we are pretty much hosed if we get here. */
2092 }
2093 continue;
2094 } else
2095 done = 1;
2096 } while (!done);
2097
2098 cleanup1:
2099 /* unlock the data buffer from DMA */
2100 buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
2101 buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
2102 pci_unmap_single(info_p->pdev, (dma_addr_t) buff_dma_handle.val,
2103 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
2104 #ifdef CONFIG_CISS_SCSI_TAPE
2105 /* if we saved some commands for later, process them now. */
2106 if (info_p->scsi_rejects.ncompletions > 0)
2107 do_cciss_intr(0, info_p, NULL);
2108 #endif
2109 cmd_free(info_p, c, 1);
2110 return (status);
2111 }
2112 /*
2113 * Map (physical) PCI mem into (virtual) kernel space
2114 */
2115 static void __iomem *remap_pci_mem(ulong base, ulong size)
2116 {
2117 ulong page_base = ((ulong) base) & PAGE_MASK;
2118 ulong page_offs = ((ulong) base) - page_base;
2119 void __iomem *page_remapped = ioremap(page_base, page_offs+size);
2120
2121 return page_remapped ? (page_remapped + page_offs) : NULL;
2122 }
2123
2124 /*
2125 * Takes jobs of the Q and sends them to the hardware, then puts it on
2126 * the Q to wait for completion.
2127 */
2128 static void start_io( ctlr_info_t *h)
2129 {
2130 CommandList_struct *c;
2131
2132 while(( c = h->reqQ) != NULL )
2133 {
2134 /* can't do anything if fifo is full */
2135 if ((h->access.fifo_full(h))) {
2136 printk(KERN_WARNING "cciss: fifo full\n");
2137 break;
2138 }
2139
2140 /* Get the frist entry from the Request Q */
2141 removeQ(&(h->reqQ), c);
2142 h->Qdepth--;
2143
2144 /* Tell the controller execute command */
2145 h->access.submit_command(h, c);
2146
2147 /* Put job onto the completed Q */
2148 addQ (&(h->cmpQ), c);
2149 }
2150 }
2151
2152 static inline void complete_buffers(struct bio *bio, int status)
2153 {
2154 while (bio) {
2155 struct bio *xbh = bio->bi_next;
2156 int nr_sectors = bio_sectors(bio);
2157
2158 bio->bi_next = NULL;
2159 blk_finished_io(len);
2160 bio_endio(bio, nr_sectors << 9, status ? 0 : -EIO);
2161 bio = xbh;
2162 }
2163
2164 }
2165 /* Assumes that CCISS_LOCK(h->ctlr) is held. */
2166 /* Zeros out the error record and then resends the command back */
2167 /* to the controller */
2168 static inline void resend_cciss_cmd( ctlr_info_t *h, CommandList_struct *c)
2169 {
2170 /* erase the old error information */
2171 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
2172
2173 /* add it to software queue and then send it to the controller */
2174 addQ(&(h->reqQ),c);
2175 h->Qdepth++;
2176 if(h->Qdepth > h->maxQsinceinit)
2177 h->maxQsinceinit = h->Qdepth;
2178
2179 start_io(h);
2180 }
2181
2182 static void cciss_softirq_done(struct request *rq)
2183 {
2184 CommandList_struct *cmd = rq->completion_data;
2185 ctlr_info_t *h = hba[cmd->ctlr];
2186 u64bit temp64;
2187 int i, ddir;
2188
2189 if (cmd->Request.Type.Direction == XFER_READ)
2190 ddir = PCI_DMA_FROMDEVICE;
2191 else
2192 ddir = PCI_DMA_TODEVICE;
2193
2194 /* command did not need to be retried */
2195 /* unmap the DMA mapping for all the scatter gather elements */
2196 for(i=0; i<cmd->Header.SGList; i++) {
2197 temp64.val32.lower = cmd->SG[i].Addr.lower;
2198 temp64.val32.upper = cmd->SG[i].Addr.upper;
2199 pci_unmap_page(h->pdev, temp64.val, cmd->SG[i].Len, ddir);
2200 }
2201
2202 complete_buffers(rq->bio, rq->errors);
2203
2204 #ifdef CCISS_DEBUG
2205 printk("Done with %p\n", rq);
2206 #endif /* CCISS_DEBUG */
2207
2208 spin_lock_irq(&h->lock);
2209 end_that_request_last(rq, rq->errors);
2210 cmd_free(h, cmd,1);
2211 spin_unlock_irq(&h->lock);
2212 }
2213
2214 /* checks the status of the job and calls complete buffers to mark all
2215 * buffers for the completed job. Note that this function does not need
2216 * to hold the hba/queue lock.
2217 */
2218 static inline void complete_command( ctlr_info_t *h, CommandList_struct *cmd,
2219 int timeout)
2220 {
2221 int status = 1;
2222 int retry_cmd = 0;
2223
2224 if (timeout)
2225 status = 0;
2226
2227 if(cmd->err_info->CommandStatus != 0)
2228 { /* an error has occurred */
2229 switch(cmd->err_info->CommandStatus)
2230 {
2231 unsigned char sense_key;
2232 case CMD_TARGET_STATUS:
2233 status = 0;
2234
2235 if( cmd->err_info->ScsiStatus == 0x02)
2236 {
2237 printk(KERN_WARNING "cciss: cmd %p "
2238 "has CHECK CONDITION "
2239 " byte 2 = 0x%x\n", cmd,
2240 cmd->err_info->SenseInfo[2]
2241 );
2242 /* check the sense key */
2243 sense_key = 0xf &
2244 cmd->err_info->SenseInfo[2];
2245 /* no status or recovered error */
2246 if((sense_key == 0x0) ||
2247 (sense_key == 0x1))
2248 {
2249 status = 1;
2250 }
2251 } else
2252 {
2253 printk(KERN_WARNING "cciss: cmd %p "
2254 "has SCSI Status 0x%x\n",
2255 cmd, cmd->err_info->ScsiStatus);
2256 }
2257 break;
2258 case CMD_DATA_UNDERRUN:
2259 printk(KERN_WARNING "cciss: cmd %p has"
2260 " completed with data underrun "
2261 "reported\n", cmd);
2262 break;
2263 case CMD_DATA_OVERRUN:
2264 printk(KERN_WARNING "cciss: cmd %p has"
2265 " completed with data overrun "
2266 "reported\n", cmd);
2267 break;
2268 case CMD_INVALID:
2269 printk(KERN_WARNING "cciss: cmd %p is "
2270 "reported invalid\n", cmd);
2271 status = 0;
2272 break;
2273 case CMD_PROTOCOL_ERR:
2274 printk(KERN_WARNING "cciss: cmd %p has "
2275 "protocol error \n", cmd);
2276 status = 0;
2277 break;
2278 case CMD_HARDWARE_ERR:
2279 printk(KERN_WARNING "cciss: cmd %p had "
2280 " hardware error\n", cmd);
2281 status = 0;
2282 break;
2283 case CMD_CONNECTION_LOST:
2284 printk(KERN_WARNING "cciss: cmd %p had "
2285 "connection lost\n", cmd);
2286 status=0;
2287 break;
2288 case CMD_ABORTED:
2289 printk(KERN_WARNING "cciss: cmd %p was "
2290 "aborted\n", cmd);
2291 status=0;
2292 break;
2293 case CMD_ABORT_FAILED:
2294 printk(KERN_WARNING "cciss: cmd %p reports "
2295 "abort failed\n", cmd);
2296 status=0;
2297 break;
2298 case CMD_UNSOLICITED_ABORT:
2299 printk(KERN_WARNING "cciss%d: unsolicited "
2300 "abort %p\n", h->ctlr, cmd);
2301 if (cmd->retry_count < MAX_CMD_RETRIES) {
2302 retry_cmd=1;
2303 printk(KERN_WARNING
2304 "cciss%d: retrying %p\n",
2305 h->ctlr, cmd);
2306 cmd->retry_count++;
2307 } else
2308 printk(KERN_WARNING
2309 "cciss%d: %p retried too "
2310 "many times\n", h->ctlr, cmd);
2311 status=0;
2312 break;
2313 case CMD_TIMEOUT:
2314 printk(KERN_WARNING "cciss: cmd %p timedout\n",
2315 cmd);
2316 status=0;
2317 break;
2318 default:
2319 printk(KERN_WARNING "cciss: cmd %p returned "
2320 "unknown status %x\n", cmd,
2321 cmd->err_info->CommandStatus);
2322 status=0;
2323 }
2324 }
2325 /* We need to return this command */
2326 if(retry_cmd) {
2327 resend_cciss_cmd(h,cmd);
2328 return;
2329 }
2330
2331 cmd->rq->completion_data = cmd;
2332 cmd->rq->errors = status;
2333 blk_complete_request(cmd->rq);
2334 }
2335
2336 /*
2337 * Get a request and submit it to the controller.
2338 */
2339 static void do_cciss_request(request_queue_t *q)
2340 {
2341 ctlr_info_t *h= q->queuedata;
2342 CommandList_struct *c;
2343 int start_blk, seg;
2344 struct request *creq;
2345 u64bit temp64;
2346 struct scatterlist tmp_sg[MAXSGENTRIES];
2347 drive_info_struct *drv;
2348 int i, dir;
2349
2350 /* We call start_io here in case there is a command waiting on the
2351 * queue that has not been sent.
2352 */
2353 if (blk_queue_plugged(q))
2354 goto startio;
2355
2356 queue:
2357 creq = elv_next_request(q);
2358 if (!creq)
2359 goto startio;
2360
2361 if (creq->nr_phys_segments > MAXSGENTRIES)
2362 BUG();
2363
2364 if (( c = cmd_alloc(h, 1)) == NULL)
2365 goto full;
2366
2367 blkdev_dequeue_request(creq);
2368
2369 spin_unlock_irq(q->queue_lock);
2370
2371 c->cmd_type = CMD_RWREQ;
2372 c->rq = creq;
2373
2374 /* fill in the request */
2375 drv = creq->rq_disk->private_data;
2376 c->Header.ReplyQueue = 0; // unused in simple mode
2377 /* got command from pool, so use the command block index instead */
2378 /* for direct lookups. */
2379 /* The first 2 bits are reserved for controller error reporting. */
2380 c->Header.Tag.lower = (c->cmdindex << 3);
2381 c->Header.Tag.lower |= 0x04; /* flag for direct lookup. */
2382 c->Header.LUN.LogDev.VolId= drv->LunID;
2383 c->Header.LUN.LogDev.Mode = 1;
2384 c->Request.CDBLen = 10; // 12 byte commands not in FW yet;
2385 c->Request.Type.Type = TYPE_CMD; // It is a command.
2386 c->Request.Type.Attribute = ATTR_SIMPLE;
2387 c->Request.Type.Direction =
2388 (rq_data_dir(creq) == READ) ? XFER_READ: XFER_WRITE;
2389 c->Request.Timeout = 0; // Don't time out
2390 c->Request.CDB[0] = (rq_data_dir(creq) == READ) ? CCISS_READ : CCISS_WRITE;
2391 start_blk = creq->sector;
2392 #ifdef CCISS_DEBUG
2393 printk(KERN_DEBUG "ciss: sector =%d nr_sectors=%d\n",(int) creq->sector,
2394 (int) creq->nr_sectors);
2395 #endif /* CCISS_DEBUG */
2396
2397 seg = blk_rq_map_sg(q, creq, tmp_sg);
2398
2399 /* get the DMA records for the setup */
2400 if (c->Request.Type.Direction == XFER_READ)
2401 dir = PCI_DMA_FROMDEVICE;
2402 else
2403 dir = PCI_DMA_TODEVICE;
2404
2405 for (i=0; i<seg; i++)
2406 {
2407 c->SG[i].Len = tmp_sg[i].length;
2408 temp64.val = (__u64) pci_map_page(h->pdev, tmp_sg[i].page,
2409 tmp_sg[i].offset, tmp_sg[i].length,
2410 dir);
2411 c->SG[i].Addr.lower = temp64.val32.lower;
2412 c->SG[i].Addr.upper = temp64.val32.upper;
2413 c->SG[i].Ext = 0; // we are not chaining
2414 }
2415 /* track how many SG entries we are using */
2416 if( seg > h->maxSG)
2417 h->maxSG = seg;
2418
2419 #ifdef CCISS_DEBUG
2420 printk(KERN_DEBUG "cciss: Submitting %d sectors in %d segments\n", creq->nr_sectors, seg);
2421 #endif /* CCISS_DEBUG */
2422
2423 c->Header.SGList = c->Header.SGTotal = seg;
2424 c->Request.CDB[1]= 0;
2425 c->Request.CDB[2]= (start_blk >> 24) & 0xff; //MSB
2426 c->Request.CDB[3]= (start_blk >> 16) & 0xff;
2427 c->Request.CDB[4]= (start_blk >> 8) & 0xff;
2428 c->Request.CDB[5]= start_blk & 0xff;
2429 c->Request.CDB[6]= 0; // (sect >> 24) & 0xff; MSB
2430 c->Request.CDB[7]= (creq->nr_sectors >> 8) & 0xff;
2431 c->Request.CDB[8]= creq->nr_sectors & 0xff;
2432 c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0;
2433
2434 spin_lock_irq(q->queue_lock);
2435
2436 addQ(&(h->reqQ),c);
2437 h->Qdepth++;
2438 if(h->Qdepth > h->maxQsinceinit)
2439 h->maxQsinceinit = h->Qdepth;
2440
2441 goto queue;
2442 full:
2443 blk_stop_queue(q);
2444 startio:
2445 /* We will already have the driver lock here so not need
2446 * to lock it.
2447 */
2448 start_io(h);
2449 }
2450
2451 static inline unsigned long get_next_completion(ctlr_info_t *h)
2452 {
2453 #ifdef CONFIG_CISS_SCSI_TAPE
2454 /* Any rejects from sendcmd() lying around? Process them first */
2455 if (h->scsi_rejects.ncompletions == 0)
2456 return h->access.command_completed(h);
2457 else {
2458 struct sendcmd_reject_list *srl;
2459 int n;
2460 srl = &h->scsi_rejects;
2461 n = --srl->ncompletions;
2462 /* printk("cciss%d: processing saved reject\n", h->ctlr); */
2463 printk("p");
2464 return srl->complete[n];
2465 }
2466 #else
2467 return h->access.command_completed(h);
2468 #endif
2469 }
2470
2471 static inline int interrupt_pending(ctlr_info_t *h)
2472 {
2473 #ifdef CONFIG_CISS_SCSI_TAPE
2474 return ( h->access.intr_pending(h)
2475 || (h->scsi_rejects.ncompletions > 0));
2476 #else
2477 return h->access.intr_pending(h);
2478 #endif
2479 }
2480
2481 static inline long interrupt_not_for_us(ctlr_info_t *h)
2482 {
2483 #ifdef CONFIG_CISS_SCSI_TAPE
2484 return (((h->access.intr_pending(h) == 0) ||
2485 (h->interrupts_enabled == 0))
2486 && (h->scsi_rejects.ncompletions == 0));
2487 #else
2488 return (((h->access.intr_pending(h) == 0) ||
2489 (h->interrupts_enabled == 0)));
2490 #endif
2491 }
2492
2493 static irqreturn_t do_cciss_intr(int irq, void *dev_id, struct pt_regs *regs)
2494 {
2495 ctlr_info_t *h = dev_id;
2496 CommandList_struct *c;
2497 unsigned long flags;
2498 __u32 a, a1, a2;
2499 int j;
2500 int start_queue = h->next_to_run;
2501
2502 if (interrupt_not_for_us(h))
2503 return IRQ_NONE;
2504 /*
2505 * If there are completed commands in the completion queue,
2506 * we had better do something about it.
2507 */
2508 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
2509 while (interrupt_pending(h)) {
2510 while((a = get_next_completion(h)) != FIFO_EMPTY) {
2511 a1 = a;
2512 if ((a & 0x04)) {
2513 a2 = (a >> 3);
2514 if (a2 >= NR_CMDS) {
2515 printk(KERN_WARNING "cciss: controller cciss%d failed, stopping.\n", h->ctlr);
2516 fail_all_cmds(h->ctlr);
2517 return IRQ_HANDLED;
2518 }
2519
2520 c = h->cmd_pool + a2;
2521 a = c->busaddr;
2522
2523 } else {
2524 a &= ~3;
2525 if ((c = h->cmpQ) == NULL) {
2526 printk(KERN_WARNING "cciss: Completion of %08x ignored\n", a1);
2527 continue;
2528 }
2529 while(c->busaddr != a) {
2530 c = c->next;
2531 if (c == h->cmpQ)
2532 break;
2533 }
2534 }
2535 /*
2536 * If we've found the command, take it off the
2537 * completion Q and free it
2538 */
2539 if (c->busaddr == a) {
2540 removeQ(&h->cmpQ, c);
2541 if (c->cmd_type == CMD_RWREQ) {
2542 complete_command(h, c, 0);
2543 } else if (c->cmd_type == CMD_IOCTL_PEND) {
2544 complete(c->waiting);
2545 }
2546 # ifdef CONFIG_CISS_SCSI_TAPE
2547 else if (c->cmd_type == CMD_SCSI)
2548 complete_scsi_command(c, 0, a1);
2549 # endif
2550 continue;
2551 }
2552 }
2553 }
2554
2555 /* check to see if we have maxed out the number of commands that can
2556 * be placed on the queue. If so then exit. We do this check here
2557 * in case the interrupt we serviced was from an ioctl and did not
2558 * free any new commands.
2559 */
2560 if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS)
2561 goto cleanup;
2562
2563 /* We have room on the queue for more commands. Now we need to queue
2564 * them up. We will also keep track of the next queue to run so
2565 * that every queue gets a chance to be started first.
2566 */
2567 for (j=0; j < h->highest_lun + 1; j++){
2568 int curr_queue = (start_queue + j) % (h->highest_lun + 1);
2569 /* make sure the disk has been added and the drive is real
2570 * because this can be called from the middle of init_one.
2571 */
2572 if(!(h->drv[curr_queue].queue) ||
2573 !(h->drv[curr_queue].heads))
2574 continue;
2575 blk_start_queue(h->gendisk[curr_queue]->queue);
2576
2577 /* check to see if we have maxed out the number of commands
2578 * that can be placed on the queue.
2579 */
2580 if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS)
2581 {
2582 if (curr_queue == start_queue){
2583 h->next_to_run = (start_queue + 1) % (h->highest_lun + 1);
2584 goto cleanup;
2585 } else {
2586 h->next_to_run = curr_queue;
2587 goto cleanup;
2588 }
2589 } else {
2590 curr_queue = (curr_queue + 1) % (h->highest_lun + 1);
2591 }
2592 }
2593
2594 cleanup:
2595 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
2596 return IRQ_HANDLED;
2597 }
2598 /*
2599 * We cannot read the structure directly, for portablity we must use
2600 * the io functions.
2601 * This is for debug only.
2602 */
2603 #ifdef CCISS_DEBUG
2604 static void print_cfg_table( CfgTable_struct *tb)
2605 {
2606 int i;
2607 char temp_name[17];
2608
2609 printk("Controller Configuration information\n");
2610 printk("------------------------------------\n");
2611 for(i=0;i<4;i++)
2612 temp_name[i] = readb(&(tb->Signature[i]));
2613 temp_name[4]='\0';
2614 printk(" Signature = %s\n", temp_name);
2615 printk(" Spec Number = %d\n", readl(&(tb->SpecValence)));
2616 printk(" Transport methods supported = 0x%x\n",
2617 readl(&(tb-> TransportSupport)));
2618 printk(" Transport methods active = 0x%x\n",
2619 readl(&(tb->TransportActive)));
2620 printk(" Requested transport Method = 0x%x\n",
2621 readl(&(tb->HostWrite.TransportRequest)));
2622 printk(" Coalese Interrupt Delay = 0x%x\n",
2623 readl(&(tb->HostWrite.CoalIntDelay)));
2624 printk(" Coalese Interrupt Count = 0x%x\n",
2625 readl(&(tb->HostWrite.CoalIntCount)));
2626 printk(" Max outstanding commands = 0x%d\n",
2627 readl(&(tb->CmdsOutMax)));
2628 printk(" Bus Types = 0x%x\n", readl(&(tb-> BusTypes)));
2629 for(i=0;i<16;i++)
2630 temp_name[i] = readb(&(tb->ServerName[i]));
2631 temp_name[16] = '\0';
2632 printk(" Server Name = %s\n", temp_name);
2633 printk(" Heartbeat Counter = 0x%x\n\n\n",
2634 readl(&(tb->HeartBeat)));
2635 }
2636 #endif /* CCISS_DEBUG */
2637
2638 static void release_io_mem(ctlr_info_t *c)
2639 {
2640 /* if IO mem was not protected do nothing */
2641 if( c->io_mem_addr == 0)
2642 return;
2643 release_region(c->io_mem_addr, c->io_mem_length);
2644 c->io_mem_addr = 0;
2645 c->io_mem_length = 0;
2646 }
2647
2648 static int find_PCI_BAR_index(struct pci_dev *pdev,
2649 unsigned long pci_bar_addr)
2650 {
2651 int i, offset, mem_type, bar_type;
2652 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
2653 return 0;
2654 offset = 0;
2655 for (i=0; i<DEVICE_COUNT_RESOURCE; i++) {
2656 bar_type = pci_resource_flags(pdev, i) &
2657 PCI_BASE_ADDRESS_SPACE;
2658 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
2659 offset += 4;
2660 else {
2661 mem_type = pci_resource_flags(pdev, i) &
2662 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
2663 switch (mem_type) {
2664 case PCI_BASE_ADDRESS_MEM_TYPE_32:
2665 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
2666 offset += 4; /* 32 bit */
2667 break;
2668 case PCI_BASE_ADDRESS_MEM_TYPE_64:
2669 offset += 8;
2670 break;
2671 default: /* reserved in PCI 2.2 */
2672 printk(KERN_WARNING "Base address is invalid\n");
2673 return -1;
2674 break;
2675 }
2676 }
2677 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
2678 return i+1;
2679 }
2680 return -1;
2681 }
2682
2683 /* If MSI/MSI-X is supported by the kernel we will try to enable it on
2684 * controllers that are capable. If not, we use IO-APIC mode.
2685 */
2686
2687 static void __devinit cciss_interrupt_mode(ctlr_info_t *c, struct pci_dev *pdev, __u32 board_id)
2688 {
2689 #ifdef CONFIG_PCI_MSI
2690 int err;
2691 struct msix_entry cciss_msix_entries[4] = {{0,0}, {0,1},
2692 {0,2}, {0,3}};
2693
2694 /* Some boards advertise MSI but don't really support it */
2695 if ((board_id == 0x40700E11) ||
2696 (board_id == 0x40800E11) ||
2697 (board_id == 0x40820E11) ||
2698 (board_id == 0x40830E11))
2699 goto default_int_mode;
2700
2701 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) {
2702 err = pci_enable_msix(pdev, cciss_msix_entries, 4);
2703 if (!err) {
2704 c->intr[0] = cciss_msix_entries[0].vector;
2705 c->intr[1] = cciss_msix_entries[1].vector;
2706 c->intr[2] = cciss_msix_entries[2].vector;
2707 c->intr[3] = cciss_msix_entries[3].vector;
2708 c->msix_vector = 1;
2709 return;
2710 }
2711 if (err > 0) {
2712 printk(KERN_WARNING "cciss: only %d MSI-X vectors "
2713 "available\n", err);
2714 } else {
2715 printk(KERN_WARNING "cciss: MSI-X init failed %d\n",
2716 err);
2717 }
2718 }
2719 if (pci_find_capability(pdev, PCI_CAP_ID_MSI)) {
2720 if (!pci_enable_msi(pdev)) {
2721 c->intr[SIMPLE_MODE_INT] = pdev->irq;
2722 c->msi_vector = 1;
2723 return;
2724 } else {
2725 printk(KERN_WARNING "cciss: MSI init failed\n");
2726 c->intr[SIMPLE_MODE_INT] = pdev->irq;
2727 return;
2728 }
2729 }
2730 #endif /* CONFIG_PCI_MSI */
2731 /* if we get here we're going to use the default interrupt mode */
2732 default_int_mode:
2733 c->intr[SIMPLE_MODE_INT] = pdev->irq;
2734 return;
2735 }
2736
2737 static int cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
2738 {
2739 ushort subsystem_vendor_id, subsystem_device_id, command;
2740 __u32 board_id, scratchpad = 0;
2741 __u64 cfg_offset;
2742 __u32 cfg_base_addr;
2743 __u64 cfg_base_addr_index;
2744 int i;
2745
2746 /* check to see if controller has been disabled */
2747 /* BEFORE trying to enable it */
2748 (void) pci_read_config_word(pdev, PCI_COMMAND,&command);
2749 if(!(command & 0x02))
2750 {
2751 printk(KERN_WARNING "cciss: controller appears to be disabled\n");
2752 return(-1);
2753 }
2754
2755 if (pci_enable_device(pdev))
2756 {
2757 printk(KERN_ERR "cciss: Unable to Enable PCI device\n");
2758 return( -1);
2759 }
2760
2761 subsystem_vendor_id = pdev->subsystem_vendor;
2762 subsystem_device_id = pdev->subsystem_device;
2763 board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) |
2764 subsystem_vendor_id);
2765
2766 /* search for our IO range so we can protect it */
2767 for(i=0; i<DEVICE_COUNT_RESOURCE; i++)
2768 {
2769 /* is this an IO range */
2770 if( pci_resource_flags(pdev, i) & 0x01 ) {
2771 c->io_mem_addr = pci_resource_start(pdev, i);
2772 c->io_mem_length = pci_resource_end(pdev, i) -
2773 pci_resource_start(pdev, i) +1;
2774 #ifdef CCISS_DEBUG
2775 printk("IO value found base_addr[%d] %lx %lx\n", i,
2776 c->io_mem_addr, c->io_mem_length);
2777 #endif /* CCISS_DEBUG */
2778 /* register the IO range */
2779 if(!request_region( c->io_mem_addr,
2780 c->io_mem_length, "cciss"))
2781 {
2782 printk(KERN_WARNING "cciss I/O memory range already in use addr=%lx length=%ld\n",
2783 c->io_mem_addr, c->io_mem_length);
2784 c->io_mem_addr= 0;
2785 c->io_mem_length = 0;
2786 }
2787 break;
2788 }
2789 }
2790
2791 #ifdef CCISS_DEBUG
2792 printk("command = %x\n", command);
2793 printk("irq = %x\n", pdev->irq);
2794 printk("board_id = %x\n", board_id);
2795 #endif /* CCISS_DEBUG */
2796
2797 /* If the kernel supports MSI/MSI-X we will try to enable that functionality,
2798 * else we use the IO-APIC interrupt assigned to us by system ROM.
2799 */
2800 cciss_interrupt_mode(c, pdev, board_id);
2801
2802 /*
2803 * Memory base addr is first addr , the second points to the config
2804 * table
2805 */
2806
2807 c->paddr = pci_resource_start(pdev, 0); /* addressing mode bits already removed */
2808 #ifdef CCISS_DEBUG
2809 printk("address 0 = %x\n", c->paddr);
2810 #endif /* CCISS_DEBUG */
2811 c->vaddr = remap_pci_mem(c->paddr, 200);
2812
2813 /* Wait for the board to become ready. (PCI hotplug needs this.)
2814 * We poll for up to 120 secs, once per 100ms. */
2815 for (i=0; i < 1200; i++) {
2816 scratchpad = readl(c->vaddr + SA5_SCRATCHPAD_OFFSET);
2817 if (scratchpad == CCISS_FIRMWARE_READY)
2818 break;
2819 set_current_state(TASK_INTERRUPTIBLE);
2820 schedule_timeout(HZ / 10); /* wait 100ms */
2821 }
2822 if (scratchpad != CCISS_FIRMWARE_READY) {
2823 printk(KERN_WARNING "cciss: Board not ready. Timed out.\n");
2824 return -1;
2825 }
2826
2827 /* get the address index number */
2828 cfg_base_addr = readl(c->vaddr + SA5_CTCFG_OFFSET);
2829 cfg_base_addr &= (__u32) 0x0000ffff;
2830 #ifdef CCISS_DEBUG
2831 printk("cfg base address = %x\n", cfg_base_addr);
2832 #endif /* CCISS_DEBUG */
2833 cfg_base_addr_index =
2834 find_PCI_BAR_index(pdev, cfg_base_addr);
2835 #ifdef CCISS_DEBUG
2836 printk("cfg base address index = %x\n", cfg_base_addr_index);
2837 #endif /* CCISS_DEBUG */
2838 if (cfg_base_addr_index == -1) {
2839 printk(KERN_WARNING "cciss: Cannot find cfg_base_addr_index\n");
2840 release_io_mem(c);
2841 return -1;
2842 }
2843
2844 cfg_offset = readl(c->vaddr + SA5_CTMEM_OFFSET);
2845 #ifdef CCISS_DEBUG
2846 printk("cfg offset = %x\n", cfg_offset);
2847 #endif /* CCISS_DEBUG */
2848 c->cfgtable = remap_pci_mem(pci_resource_start(pdev,
2849 cfg_base_addr_index) + cfg_offset,
2850 sizeof(CfgTable_struct));
2851 c->board_id = board_id;
2852
2853 #ifdef CCISS_DEBUG
2854 print_cfg_table(c->cfgtable);
2855 #endif /* CCISS_DEBUG */
2856
2857 for(i=0; i<NR_PRODUCTS; i++) {
2858 if (board_id == products[i].board_id) {
2859 c->product_name = products[i].product_name;
2860 c->access = *(products[i].access);
2861 break;
2862 }
2863 }
2864 if (i == NR_PRODUCTS) {
2865 printk(KERN_WARNING "cciss: Sorry, I don't know how"
2866 " to access the Smart Array controller %08lx\n",
2867 (unsigned long)board_id);
2868 return -1;
2869 }
2870 if ( (readb(&c->cfgtable->Signature[0]) != 'C') ||
2871 (readb(&c->cfgtable->Signature[1]) != 'I') ||
2872 (readb(&c->cfgtable->Signature[2]) != 'S') ||
2873 (readb(&c->cfgtable->Signature[3]) != 'S') )
2874 {
2875 printk("Does not appear to be a valid CISS config table\n");
2876 return -1;
2877 }
2878
2879 #ifdef CONFIG_X86
2880 {
2881 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
2882 __u32 prefetch;
2883 prefetch = readl(&(c->cfgtable->SCSI_Prefetch));
2884 prefetch |= 0x100;
2885 writel(prefetch, &(c->cfgtable->SCSI_Prefetch));
2886 }
2887 #endif
2888
2889 #ifdef CCISS_DEBUG
2890 printk("Trying to put board into Simple mode\n");
2891 #endif /* CCISS_DEBUG */
2892 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
2893 /* Update the field, and then ring the doorbell */
2894 writel( CFGTBL_Trans_Simple,
2895 &(c->cfgtable->HostWrite.TransportRequest));
2896 writel( CFGTBL_ChangeReq, c->vaddr + SA5_DOORBELL);
2897
2898 /* under certain very rare conditions, this can take awhile.
2899 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
2900 * as we enter this code.) */
2901 for(i=0;i<MAX_CONFIG_WAIT;i++) {
2902 if (!(readl(c->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
2903 break;
2904 /* delay and try again */
2905 set_current_state(TASK_INTERRUPTIBLE);
2906 schedule_timeout(10);
2907 }
2908
2909 #ifdef CCISS_DEBUG
2910 printk(KERN_DEBUG "I counter got to %d %x\n", i, readl(c->vaddr + SA5_DOORBELL));
2911 #endif /* CCISS_DEBUG */
2912 #ifdef CCISS_DEBUG
2913 print_cfg_table(c->cfgtable);
2914 #endif /* CCISS_DEBUG */
2915
2916 if (!(readl(&(c->cfgtable->TransportActive)) & CFGTBL_Trans_Simple))
2917 {
2918 printk(KERN_WARNING "cciss: unable to get board into"
2919 " simple mode\n");
2920 return -1;
2921 }
2922 return 0;
2923
2924 }
2925
2926 /*
2927 * Gets information about the local volumes attached to the controller.
2928 */
2929 static void cciss_getgeometry(int cntl_num)
2930 {
2931 ReportLunData_struct *ld_buff;
2932 ReadCapdata_struct *size_buff;
2933 InquiryData_struct *inq_buff;
2934 int return_code;
2935 int i;
2936 int listlength = 0;
2937 __u32 lunid = 0;
2938 int block_size;
2939 int total_size;
2940
2941 ld_buff = kmalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
2942 if (ld_buff == NULL)
2943 {
2944 printk(KERN_ERR "cciss: out of memory\n");
2945 return;
2946 }
2947 memset(ld_buff, 0, sizeof(ReportLunData_struct));
2948 size_buff = kmalloc(sizeof( ReadCapdata_struct), GFP_KERNEL);
2949 if (size_buff == NULL)
2950 {
2951 printk(KERN_ERR "cciss: out of memory\n");
2952 kfree(ld_buff);
2953 return;
2954 }
2955 inq_buff = kmalloc(sizeof( InquiryData_struct), GFP_KERNEL);
2956 if (inq_buff == NULL)
2957 {
2958 printk(KERN_ERR "cciss: out of memory\n");
2959 kfree(ld_buff);
2960 kfree(size_buff);
2961 return;
2962 }
2963 /* Get the firmware version */
2964 return_code = sendcmd(CISS_INQUIRY, cntl_num, inq_buff,
2965 sizeof(InquiryData_struct), 0, 0 ,0, NULL, TYPE_CMD);
2966 if (return_code == IO_OK)
2967 {
2968 hba[cntl_num]->firm_ver[0] = inq_buff->data_byte[32];
2969 hba[cntl_num]->firm_ver[1] = inq_buff->data_byte[33];
2970 hba[cntl_num]->firm_ver[2] = inq_buff->data_byte[34];
2971 hba[cntl_num]->firm_ver[3] = inq_buff->data_byte[35];
2972 } else /* send command failed */
2973 {
2974 printk(KERN_WARNING "cciss: unable to determine firmware"
2975 " version of controller\n");
2976 }
2977 /* Get the number of logical volumes */
2978 return_code = sendcmd(CISS_REPORT_LOG, cntl_num, ld_buff,
2979 sizeof(ReportLunData_struct), 0, 0, 0, NULL, TYPE_CMD);
2980
2981 if( return_code == IO_OK)
2982 {
2983 #ifdef CCISS_DEBUG
2984 printk("LUN Data\n--------------------------\n");
2985 #endif /* CCISS_DEBUG */
2986
2987 listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[0])) << 24;
2988 listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[1])) << 16;
2989 listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[2])) << 8;
2990 listlength |= 0xff & (unsigned int)(ld_buff->LUNListLength[3]);
2991 } else /* reading number of logical volumes failed */
2992 {
2993 printk(KERN_WARNING "cciss: report logical volume"
2994 " command failed\n");
2995 listlength = 0;
2996 }
2997 hba[cntl_num]->num_luns = listlength / 8; // 8 bytes pre entry
2998 if (hba[cntl_num]->num_luns > CISS_MAX_LUN)
2999 {
3000 printk(KERN_ERR "ciss: only %d number of logical volumes supported\n",
3001 CISS_MAX_LUN);
3002 hba[cntl_num]->num_luns = CISS_MAX_LUN;
3003 }
3004 #ifdef CCISS_DEBUG
3005 printk(KERN_DEBUG "Length = %x %x %x %x = %d\n", ld_buff->LUNListLength[0],
3006 ld_buff->LUNListLength[1], ld_buff->LUNListLength[2],
3007 ld_buff->LUNListLength[3], hba[cntl_num]->num_luns);
3008 #endif /* CCISS_DEBUG */
3009
3010 hba[cntl_num]->highest_lun = hba[cntl_num]->num_luns-1;
3011 // for(i=0; i< hba[cntl_num]->num_luns; i++)
3012 for(i=0; i < CISS_MAX_LUN; i++)
3013 {
3014 if (i < hba[cntl_num]->num_luns){
3015 lunid = (0xff & (unsigned int)(ld_buff->LUN[i][3]))
3016 << 24;
3017 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][2]))
3018 << 16;
3019 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][1]))
3020 << 8;
3021 lunid |= 0xff & (unsigned int)(ld_buff->LUN[i][0]);
3022
3023 hba[cntl_num]->drv[i].LunID = lunid;
3024
3025
3026 #ifdef CCISS_DEBUG
3027 printk(KERN_DEBUG "LUN[%d]: %x %x %x %x = %x\n", i,
3028 ld_buff->LUN[i][0], ld_buff->LUN[i][1],
3029 ld_buff->LUN[i][2], ld_buff->LUN[i][3],
3030 hba[cntl_num]->drv[i].LunID);
3031 #endif /* CCISS_DEBUG */
3032 cciss_read_capacity(cntl_num, i, size_buff, 0,
3033 &total_size, &block_size);
3034 cciss_geometry_inquiry(cntl_num, i, 0, total_size,
3035 block_size, inq_buff, &hba[cntl_num]->drv[i]);
3036 } else {
3037 /* initialize raid_level to indicate a free space */
3038 hba[cntl_num]->drv[i].raid_level = -1;
3039 }
3040 }
3041 kfree(ld_buff);
3042 kfree(size_buff);
3043 kfree(inq_buff);
3044 }
3045
3046 /* Function to find the first free pointer into our hba[] array */
3047 /* Returns -1 if no free entries are left. */
3048 static int alloc_cciss_hba(void)
3049 {
3050 struct gendisk *disk[NWD];
3051 int i, n;
3052 for (n = 0; n < NWD; n++) {
3053 disk[n] = alloc_disk(1 << NWD_SHIFT);
3054 if (!disk[n])
3055 goto out;
3056 }
3057
3058 for(i=0; i< MAX_CTLR; i++) {
3059 if (!hba[i]) {
3060 ctlr_info_t *p;
3061 p = kmalloc(sizeof(ctlr_info_t), GFP_KERNEL);
3062 if (!p)
3063 goto Enomem;
3064 memset(p, 0, sizeof(ctlr_info_t));
3065 for (n = 0; n < NWD; n++)
3066 p->gendisk[n] = disk[n];
3067 hba[i] = p;
3068 return i;
3069 }
3070 }
3071 printk(KERN_WARNING "cciss: This driver supports a maximum"
3072 " of %d controllers.\n", MAX_CTLR);
3073 goto out;
3074 Enomem:
3075 printk(KERN_ERR "cciss: out of memory.\n");
3076 out:
3077 while (n--)
3078 put_disk(disk[n]);
3079 return -1;
3080 }
3081
3082 static void free_hba(int i)
3083 {
3084 ctlr_info_t *p = hba[i];
3085 int n;
3086
3087 hba[i] = NULL;
3088 for (n = 0; n < NWD; n++)
3089 put_disk(p->gendisk[n]);
3090 kfree(p);
3091 }
3092
3093 /*
3094 * This is it. Find all the controllers and register them. I really hate
3095 * stealing all these major device numbers.
3096 * returns the number of block devices registered.
3097 */
3098 static int __devinit cciss_init_one(struct pci_dev *pdev,
3099 const struct pci_device_id *ent)
3100 {
3101 request_queue_t *q;
3102 int i;
3103 int j;
3104 int rc;
3105
3106 printk(KERN_DEBUG "cciss: Device 0x%x has been found at"
3107 " bus %d dev %d func %d\n",
3108 pdev->device, pdev->bus->number, PCI_SLOT(pdev->devfn),
3109 PCI_FUNC(pdev->devfn));
3110 i = alloc_cciss_hba();
3111 if(i < 0)
3112 return (-1);
3113
3114 hba[i]->busy_initializing = 1;
3115
3116 if (cciss_pci_init(hba[i], pdev) != 0)
3117 goto clean1;
3118
3119 sprintf(hba[i]->devname, "cciss%d", i);
3120 hba[i]->ctlr = i;
3121 hba[i]->pdev = pdev;
3122
3123 /* configure PCI DMA stuff */
3124 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK))
3125 printk("cciss: using DAC cycles\n");
3126 else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK))
3127 printk("cciss: not using DAC cycles\n");
3128 else {
3129 printk("cciss: no suitable DMA available\n");
3130 goto clean1;
3131 }
3132
3133 /*
3134 * register with the major number, or get a dynamic major number
3135 * by passing 0 as argument. This is done for greater than
3136 * 8 controller support.
3137 */
3138 if (i < MAX_CTLR_ORIG)
3139 hba[i]->major = COMPAQ_CISS_MAJOR + i;
3140 rc = register_blkdev(hba[i]->major, hba[i]->devname);
3141 if(rc == -EBUSY || rc == -EINVAL) {
3142 printk(KERN_ERR
3143 "cciss: Unable to get major number %d for %s "
3144 "on hba %d\n", hba[i]->major, hba[i]->devname, i);
3145 goto clean1;
3146 }
3147 else {
3148 if (i >= MAX_CTLR_ORIG)
3149 hba[i]->major = rc;
3150 }
3151
3152 /* make sure the board interrupts are off */
3153 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
3154 if( request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr,
3155 SA_INTERRUPT | SA_SHIRQ | SA_SAMPLE_RANDOM,
3156 hba[i]->devname, hba[i])) {
3157 printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
3158 hba[i]->intr[SIMPLE_MODE_INT], hba[i]->devname);
3159 goto clean2;
3160 }
3161 hba[i]->cmd_pool_bits = kmalloc(((NR_CMDS+BITS_PER_LONG-1)/BITS_PER_LONG)*sizeof(unsigned long), GFP_KERNEL);
3162 hba[i]->cmd_pool = (CommandList_struct *)pci_alloc_consistent(
3163 hba[i]->pdev, NR_CMDS * sizeof(CommandList_struct),
3164 &(hba[i]->cmd_pool_dhandle));
3165 hba[i]->errinfo_pool = (ErrorInfo_struct *)pci_alloc_consistent(
3166 hba[i]->pdev, NR_CMDS * sizeof( ErrorInfo_struct),
3167 &(hba[i]->errinfo_pool_dhandle));
3168 if((hba[i]->cmd_pool_bits == NULL)
3169 || (hba[i]->cmd_pool == NULL)
3170 || (hba[i]->errinfo_pool == NULL)) {
3171 printk( KERN_ERR "cciss: out of memory");
3172 goto clean4;
3173 }
3174 #ifdef CONFIG_CISS_SCSI_TAPE
3175 hba[i]->scsi_rejects.complete =
3176 kmalloc(sizeof(hba[i]->scsi_rejects.complete[0]) *
3177 (NR_CMDS + 5), GFP_KERNEL);
3178 if (hba[i]->scsi_rejects.complete == NULL) {
3179 printk( KERN_ERR "cciss: out of memory");
3180 goto clean4;
3181 }
3182 #endif
3183 spin_lock_init(&hba[i]->lock);
3184
3185 /* Initialize the pdev driver private data.
3186 have it point to hba[i]. */
3187 pci_set_drvdata(pdev, hba[i]);
3188 /* command and error info recs zeroed out before
3189 they are used */
3190 memset(hba[i]->cmd_pool_bits, 0, ((NR_CMDS+BITS_PER_LONG-1)/BITS_PER_LONG)*sizeof(unsigned long));
3191
3192 #ifdef CCISS_DEBUG
3193 printk(KERN_DEBUG "Scanning for drives on controller cciss%d\n",i);
3194 #endif /* CCISS_DEBUG */
3195
3196 cciss_getgeometry(i);
3197
3198 cciss_scsi_setup(i);
3199
3200 /* Turn the interrupts on so we can service requests */
3201 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
3202
3203 cciss_procinit(i);
3204 hba[i]->busy_initializing = 0;
3205
3206 for(j=0; j < NWD; j++) { /* mfm */
3207 drive_info_struct *drv = &(hba[i]->drv[j]);
3208 struct gendisk *disk = hba[i]->gendisk[j];
3209
3210 q = blk_init_queue(do_cciss_request, &hba[i]->lock);
3211 if (!q) {
3212 printk(KERN_ERR
3213 "cciss: unable to allocate queue for disk %d\n",
3214 j);
3215 break;
3216 }
3217 drv->queue = q;
3218
3219 q->backing_dev_info.ra_pages = READ_AHEAD;
3220 blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask);
3221
3222 /* This is a hardware imposed limit. */
3223 blk_queue_max_hw_segments(q, MAXSGENTRIES);
3224
3225 /* This is a limit in the driver and could be eliminated. */
3226 blk_queue_max_phys_segments(q, MAXSGENTRIES);
3227
3228 blk_queue_max_sectors(q, 512);
3229
3230 blk_queue_softirq_done(q, cciss_softirq_done);
3231
3232 q->queuedata = hba[i];
3233 sprintf(disk->disk_name, "cciss/c%dd%d", i, j);
3234 sprintf(disk->devfs_name, "cciss/host%d/target%d", i, j);
3235 disk->major = hba[i]->major;
3236 disk->first_minor = j << NWD_SHIFT;
3237 disk->fops = &cciss_fops;
3238 disk->queue = q;
3239 disk->private_data = drv;
3240 /* we must register the controller even if no disks exist */
3241 /* this is for the online array utilities */
3242 if(!drv->heads && j)
3243 continue;
3244 blk_queue_hardsect_size(q, drv->block_size);
3245 set_capacity(disk, drv->nr_blocks);
3246 add_disk(disk);
3247 }
3248
3249 return(1);
3250
3251 clean4:
3252 #ifdef CONFIG_CISS_SCSI_TAPE
3253 if(hba[i]->scsi_rejects.complete)
3254 kfree(hba[i]->scsi_rejects.complete);
3255 #endif
3256 kfree(hba[i]->cmd_pool_bits);
3257 if(hba[i]->cmd_pool)
3258 pci_free_consistent(hba[i]->pdev,
3259 NR_CMDS * sizeof(CommandList_struct),
3260 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
3261 if(hba[i]->errinfo_pool)
3262 pci_free_consistent(hba[i]->pdev,
3263 NR_CMDS * sizeof( ErrorInfo_struct),
3264 hba[i]->errinfo_pool,
3265 hba[i]->errinfo_pool_dhandle);
3266 free_irq(hba[i]->intr[SIMPLE_MODE_INT], hba[i]);
3267 clean2:
3268 unregister_blkdev(hba[i]->major, hba[i]->devname);
3269 clean1:
3270 release_io_mem(hba[i]);
3271 free_hba(i);
3272 hba[i]->busy_initializing = 0;
3273 return(-1);
3274 }
3275
3276 static void __devexit cciss_remove_one (struct pci_dev *pdev)
3277 {
3278 ctlr_info_t *tmp_ptr;
3279 int i, j;
3280 char flush_buf[4];
3281 int return_code;
3282
3283 if (pci_get_drvdata(pdev) == NULL)
3284 {
3285 printk( KERN_ERR "cciss: Unable to remove device \n");
3286 return;
3287 }
3288 tmp_ptr = pci_get_drvdata(pdev);
3289 i = tmp_ptr->ctlr;
3290 if (hba[i] == NULL)
3291 {
3292 printk(KERN_ERR "cciss: device appears to "
3293 "already be removed \n");
3294 return;
3295 }
3296 /* Turn board interrupts off and send the flush cache command */
3297 /* sendcmd will turn off interrupt, and send the flush...
3298 * To write all data in the battery backed cache to disks */
3299 memset(flush_buf, 0, 4);
3300 return_code = sendcmd(CCISS_CACHE_FLUSH, i, flush_buf, 4, 0, 0, 0, NULL,
3301 TYPE_CMD);
3302 if(return_code != IO_OK)
3303 {
3304 printk(KERN_WARNING "Error Flushing cache on controller %d\n",
3305 i);
3306 }
3307 free_irq(hba[i]->intr[2], hba[i]);
3308
3309 #ifdef CONFIG_PCI_MSI
3310 if (hba[i]->msix_vector)
3311 pci_disable_msix(hba[i]->pdev);
3312 else if (hba[i]->msi_vector)
3313 pci_disable_msi(hba[i]->pdev);
3314 #endif /* CONFIG_PCI_MSI */
3315
3316 pci_set_drvdata(pdev, NULL);
3317 iounmap(hba[i]->vaddr);
3318 cciss_unregister_scsi(i); /* unhook from SCSI subsystem */
3319 unregister_blkdev(hba[i]->major, hba[i]->devname);
3320 remove_proc_entry(hba[i]->devname, proc_cciss);
3321
3322 /* remove it from the disk list */
3323 for (j = 0; j < NWD; j++) {
3324 struct gendisk *disk = hba[i]->gendisk[j];
3325 if (disk) {
3326 request_queue_t *q = disk->queue;
3327
3328 if (disk->flags & GENHD_FL_UP)
3329 del_gendisk(disk);
3330 if (q)
3331 blk_cleanup_queue(q);
3332 }
3333 }
3334
3335 pci_free_consistent(hba[i]->pdev, NR_CMDS * sizeof(CommandList_struct),
3336 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
3337 pci_free_consistent(hba[i]->pdev, NR_CMDS * sizeof( ErrorInfo_struct),
3338 hba[i]->errinfo_pool, hba[i]->errinfo_pool_dhandle);
3339 kfree(hba[i]->cmd_pool_bits);
3340 #ifdef CONFIG_CISS_SCSI_TAPE
3341 kfree(hba[i]->scsi_rejects.complete);
3342 #endif
3343 release_io_mem(hba[i]);
3344 free_hba(i);
3345 }
3346
3347 static struct pci_driver cciss_pci_driver = {
3348 .name = "cciss",
3349 .probe = cciss_init_one,
3350 .remove = __devexit_p(cciss_remove_one),
3351 .id_table = cciss_pci_device_id, /* id_table */
3352 };
3353
3354 /*
3355 * This is it. Register the PCI driver information for the cards we control
3356 * the OS will call our registered routines when it finds one of our cards.
3357 */
3358 static int __init cciss_init(void)
3359 {
3360 printk(KERN_INFO DRIVER_NAME "\n");
3361
3362 /* Register for our PCI devices */
3363 return pci_register_driver(&cciss_pci_driver);
3364 }
3365
3366 static void __exit cciss_cleanup(void)
3367 {
3368 int i;
3369
3370 pci_unregister_driver(&cciss_pci_driver);
3371 /* double check that all controller entrys have been removed */
3372 for (i=0; i< MAX_CTLR; i++)
3373 {
3374 if (hba[i] != NULL)
3375 {
3376 printk(KERN_WARNING "cciss: had to remove"
3377 " controller %d\n", i);
3378 cciss_remove_one(hba[i]->pdev);
3379 }
3380 }
3381 remove_proc_entry("cciss", proc_root_driver);
3382 }
3383
3384 static void fail_all_cmds(unsigned long ctlr)
3385 {
3386 /* If we get here, the board is apparently dead. */
3387 ctlr_info_t *h = hba[ctlr];
3388 CommandList_struct *c;
3389 unsigned long flags;
3390
3391 printk(KERN_WARNING "cciss%d: controller not responding.\n", h->ctlr);
3392 h->alive = 0; /* the controller apparently died... */
3393
3394 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
3395
3396 pci_disable_device(h->pdev); /* Make sure it is really dead. */
3397
3398 /* move everything off the request queue onto the completed queue */
3399 while( (c = h->reqQ) != NULL ) {
3400 removeQ(&(h->reqQ), c);
3401 h->Qdepth--;
3402 addQ (&(h->cmpQ), c);
3403 }
3404
3405 /* Now, fail everything on the completed queue with a HW error */
3406 while( (c = h->cmpQ) != NULL ) {
3407 removeQ(&h->cmpQ, c);
3408 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
3409 if (c->cmd_type == CMD_RWREQ) {
3410 complete_command(h, c, 0);
3411 } else if (c->cmd_type == CMD_IOCTL_PEND)
3412 complete(c->waiting);
3413 #ifdef CONFIG_CISS_SCSI_TAPE
3414 else if (c->cmd_type == CMD_SCSI)
3415 complete_scsi_command(c, 0, 0);
3416 #endif
3417 }
3418 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
3419 return;
3420 }
3421
3422 module_init(cciss_init);
3423 module_exit(cciss_cleanup);
This page took 0.118814 seconds and 5 git commands to generate.