Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _SCSI_SCSI_HOST_H |
2 | #define _SCSI_SCSI_HOST_H | |
3 | ||
4 | #include <linux/device.h> | |
5 | #include <linux/list.h> | |
6 | #include <linux/types.h> | |
7 | #include <linux/workqueue.h> | |
8 | ||
9 | struct block_device; | |
10 | struct module; | |
11 | struct scsi_cmnd; | |
12 | struct scsi_device; | |
a283bd37 | 13 | struct scsi_target; |
1da177e4 LT |
14 | struct Scsi_Host; |
15 | struct scsi_host_cmd_pool; | |
16 | struct scsi_transport_template; | |
17 | ||
18 | ||
19 | /* | |
20 | * The various choices mean: | |
21 | * NONE: Self evident. Host adapter is not capable of scatter-gather. | |
22 | * ALL: Means that the host adapter module can do scatter-gather, | |
23 | * and that there is no limit to the size of the table to which | |
24 | * we scatter/gather data. | |
25 | * Anything else: Indicates the maximum number of chains that can be | |
26 | * used in one scatter-gather request. | |
27 | */ | |
28 | #define SG_NONE 0 | |
29 | #define SG_ALL 0xff | |
30 | ||
31 | ||
32 | #define DISABLE_CLUSTERING 0 | |
33 | #define ENABLE_CLUSTERING 1 | |
34 | ||
35 | enum scsi_eh_timer_return { | |
36 | EH_NOT_HANDLED, | |
37 | EH_HANDLED, | |
38 | EH_RESET_TIMER, | |
39 | }; | |
40 | ||
41 | ||
42 | struct scsi_host_template { | |
43 | struct module *module; | |
44 | const char *name; | |
45 | ||
46 | /* | |
47 | * Used to initialize old-style drivers. For new-style drivers | |
48 | * just perform all work in your module initialization function. | |
49 | * | |
50 | * Status: OBSOLETE | |
51 | */ | |
52 | int (* detect)(struct scsi_host_template *); | |
53 | ||
54 | /* | |
55 | * Used as unload callback for hosts with old-style drivers. | |
56 | * | |
57 | * Status: OBSOLETE | |
58 | */ | |
59 | int (* release)(struct Scsi_Host *); | |
60 | ||
61 | /* | |
62 | * The info function will return whatever useful information the | |
63 | * developer sees fit. If not provided, then the name field will | |
64 | * be used instead. | |
65 | * | |
66 | * Status: OPTIONAL | |
67 | */ | |
68 | const char *(* info)(struct Scsi_Host *); | |
69 | ||
70 | /* | |
71 | * Ioctl interface | |
72 | * | |
73 | * Status: OPTIONAL | |
74 | */ | |
75 | int (* ioctl)(struct scsi_device *dev, int cmd, void __user *arg); | |
76 | ||
77 | ||
78 | #ifdef CONFIG_COMPAT | |
79 | /* | |
80 | * Compat handler. Handle 32bit ABI. | |
81 | * When unknown ioctl is passed return -ENOIOCTLCMD. | |
82 | * | |
83 | * Status: OPTIONAL | |
84 | */ | |
85 | int (* compat_ioctl)(struct scsi_device *dev, int cmd, void __user *arg); | |
86 | #endif | |
87 | ||
88 | /* | |
89 | * The queuecommand function is used to queue up a scsi | |
90 | * command block to the LLDD. When the driver finished | |
91 | * processing the command the done callback is invoked. | |
92 | * | |
93 | * If queuecommand returns 0, then the HBA has accepted the | |
94 | * command. The done() function must be called on the command | |
95 | * when the driver has finished with it. (you may call done on the | |
96 | * command before queuecommand returns, but in this case you | |
97 | * *must* return 0 from queuecommand). | |
98 | * | |
99 | * Queuecommand may also reject the command, in which case it may | |
100 | * not touch the command and must not call done() for it. | |
101 | * | |
102 | * There are two possible rejection returns: | |
103 | * | |
104 | * SCSI_MLQUEUE_DEVICE_BUSY: Block this device temporarily, but | |
105 | * allow commands to other devices serviced by this host. | |
106 | * | |
107 | * SCSI_MLQUEUE_HOST_BUSY: Block all devices served by this | |
108 | * host temporarily. | |
109 | * | |
110 | * For compatibility, any other non-zero return is treated the | |
111 | * same as SCSI_MLQUEUE_HOST_BUSY. | |
112 | * | |
113 | * NOTE: "temporarily" means either until the next command for# | |
114 | * this device/host completes, or a period of time determined by | |
115 | * I/O pressure in the system if there are no other outstanding | |
116 | * commands. | |
117 | * | |
118 | * STATUS: REQUIRED | |
119 | */ | |
120 | int (* queuecommand)(struct scsi_cmnd *, | |
121 | void (*done)(struct scsi_cmnd *)); | |
122 | ||
123 | /* | |
124 | * This is an error handling strategy routine. You don't need to | |
125 | * define one of these if you don't want to - there is a default | |
126 | * routine that is present that should work in most cases. For those | |
127 | * driver authors that have the inclination and ability to write their | |
128 | * own strategy routine, this is where it is specified. Note - the | |
129 | * strategy routine is *ALWAYS* run in the context of the kernel eh | |
130 | * thread. Thus you are guaranteed to *NOT* be in an interrupt | |
131 | * handler when you execute this, and you are also guaranteed to | |
132 | * *NOT* have any other commands being queued while you are in the | |
133 | * strategy routine. When you return from this function, operations | |
134 | * return to normal. | |
135 | * | |
136 | * See scsi_error.c scsi_unjam_host for additional comments about | |
137 | * what this function should and should not be attempting to do. | |
138 | * | |
139 | * Status: REQUIRED (at least one of them) | |
140 | */ | |
141 | int (* eh_strategy_handler)(struct Scsi_Host *); | |
142 | int (* eh_abort_handler)(struct scsi_cmnd *); | |
143 | int (* eh_device_reset_handler)(struct scsi_cmnd *); | |
144 | int (* eh_bus_reset_handler)(struct scsi_cmnd *); | |
145 | int (* eh_host_reset_handler)(struct scsi_cmnd *); | |
146 | ||
147 | /* | |
148 | * This is an optional routine to notify the host that the scsi | |
149 | * timer just fired. The returns tell the timer routine what to | |
150 | * do about this: | |
151 | * | |
152 | * EH_HANDLED: I fixed the error, please complete the command | |
153 | * EH_RESET_TIMER: I need more time, reset the timer and | |
154 | * begin counting again | |
155 | * EH_NOT_HANDLED Begin normal error recovery | |
156 | * | |
157 | * Status: OPTIONAL | |
158 | */ | |
159 | enum scsi_eh_timer_return (* eh_timed_out)(struct scsi_cmnd *); | |
160 | ||
161 | /* | |
162 | * Before the mid layer attempts to scan for a new device where none | |
163 | * currently exists, it will call this entry in your driver. Should | |
164 | * your driver need to allocate any structs or perform any other init | |
165 | * items in order to send commands to a currently unused target/lun | |
166 | * combo, then this is where you can perform those allocations. This | |
167 | * is specifically so that drivers won't have to perform any kind of | |
168 | * "is this a new device" checks in their queuecommand routine, | |
169 | * thereby making the hot path a bit quicker. | |
170 | * | |
171 | * Return values: 0 on success, non-0 on failure | |
172 | * | |
173 | * Deallocation: If we didn't find any devices at this ID, you will | |
174 | * get an immediate call to slave_destroy(). If we find something | |
175 | * here then you will get a call to slave_configure(), then the | |
176 | * device will be used for however long it is kept around, then when | |
177 | * the device is removed from the system (or * possibly at reboot | |
178 | * time), you will then get a call to slave_destroy(). This is | |
179 | * assuming you implement slave_configure and slave_destroy. | |
180 | * However, if you allocate memory and hang it off the device struct, | |
181 | * then you must implement the slave_destroy() routine at a minimum | |
182 | * in order to avoid leaking memory | |
183 | * each time a device is tore down. | |
184 | * | |
185 | * Status: OPTIONAL | |
186 | */ | |
187 | int (* slave_alloc)(struct scsi_device *); | |
188 | ||
189 | /* | |
190 | * Once the device has responded to an INQUIRY and we know the | |
191 | * device is online, we call into the low level driver with the | |
192 | * struct scsi_device *. If the low level device driver implements | |
193 | * this function, it *must* perform the task of setting the queue | |
194 | * depth on the device. All other tasks are optional and depend | |
195 | * on what the driver supports and various implementation details. | |
196 | * | |
197 | * Things currently recommended to be handled at this time include: | |
198 | * | |
199 | * 1. Setting the device queue depth. Proper setting of this is | |
200 | * described in the comments for scsi_adjust_queue_depth. | |
201 | * 2. Determining if the device supports the various synchronous | |
202 | * negotiation protocols. The device struct will already have | |
203 | * responded to INQUIRY and the results of the standard items | |
204 | * will have been shoved into the various device flag bits, eg. | |
205 | * device->sdtr will be true if the device supports SDTR messages. | |
206 | * 3. Allocating command structs that the device will need. | |
207 | * 4. Setting the default timeout on this device (if needed). | |
208 | * 5. Anything else the low level driver might want to do on a device | |
209 | * specific setup basis... | |
210 | * 6. Return 0 on success, non-0 on error. The device will be marked | |
211 | * as offline on error so that no access will occur. If you return | |
212 | * non-0, your slave_destroy routine will never get called for this | |
213 | * device, so don't leave any loose memory hanging around, clean | |
214 | * up after yourself before returning non-0 | |
215 | * | |
216 | * Status: OPTIONAL | |
217 | */ | |
218 | int (* slave_configure)(struct scsi_device *); | |
219 | ||
220 | /* | |
221 | * Immediately prior to deallocating the device and after all activity | |
222 | * has ceased the mid layer calls this point so that the low level | |
223 | * driver may completely detach itself from the scsi device and vice | |
224 | * versa. The low level driver is responsible for freeing any memory | |
225 | * it allocated in the slave_alloc or slave_configure calls. | |
226 | * | |
227 | * Status: OPTIONAL | |
228 | */ | |
229 | void (* slave_destroy)(struct scsi_device *); | |
230 | ||
a283bd37 JB |
231 | /* |
232 | * Before the mid layer attempts to scan for a new device attached | |
233 | * to a target where no target currently exists, it will call this | |
234 | * entry in your driver. Should your driver need to allocate any | |
235 | * structs or perform any other init items in order to send commands | |
236 | * to a currently unused target, then this is where you can perform | |
237 | * those allocations. | |
238 | * | |
239 | * Return values: 0 on success, non-0 on failure | |
240 | * | |
241 | * Status: OPTIONAL | |
242 | */ | |
243 | int (* target_alloc)(struct scsi_target *); | |
244 | ||
245 | /* | |
246 | * Immediately prior to deallocating the target structure, and | |
247 | * after all activity to attached scsi devices has ceased, the | |
248 | * midlayer calls this point so that the driver may deallocate | |
249 | * and terminate any references to the target. | |
250 | * | |
251 | * Status: OPTIONAL | |
252 | */ | |
253 | void (* target_destroy)(struct scsi_target *); | |
254 | ||
1da177e4 LT |
255 | /* |
256 | * fill in this function to allow the queue depth of this host | |
257 | * to be changeable (on a per device basis). returns either | |
258 | * the current queue depth setting (may be different from what | |
259 | * was passed in) or an error. An error should only be | |
260 | * returned if the requested depth is legal but the driver was | |
261 | * unable to set it. If the requested depth is illegal, the | |
262 | * driver should set and return the closest legal queue depth. | |
263 | * | |
264 | */ | |
265 | int (* change_queue_depth)(struct scsi_device *, int); | |
266 | ||
267 | /* | |
268 | * fill in this function to allow the changing of tag types | |
269 | * (this also allows the enabling/disabling of tag command | |
270 | * queueing). An error should only be returned if something | |
271 | * went wrong in the driver while trying to set the tag type. | |
272 | * If the driver doesn't support the requested tag type, then | |
273 | * it should set the closest type it does support without | |
274 | * returning an error. Returns the actual tag type set. | |
275 | */ | |
276 | int (* change_queue_type)(struct scsi_device *, int); | |
277 | ||
278 | /* | |
279 | * This function determines the bios parameters for a given | |
280 | * harddisk. These tend to be numbers that are made up by | |
281 | * the host adapter. Parameters: | |
282 | * size, device, list (heads, sectors, cylinders) | |
283 | * | |
284 | * Status: OPTIONAL */ | |
285 | int (* bios_param)(struct scsi_device *, struct block_device *, | |
286 | sector_t, int []); | |
287 | ||
288 | /* | |
289 | * Can be used to export driver statistics and other infos to the | |
290 | * world outside the kernel ie. userspace and it also provides an | |
291 | * interface to feed the driver with information. | |
292 | * | |
293 | * Status: OBSOLETE | |
294 | */ | |
295 | int (*proc_info)(struct Scsi_Host *, char *, char **, off_t, int, int); | |
296 | ||
297 | /* | |
298 | * Name of proc directory | |
299 | */ | |
300 | char *proc_name; | |
301 | ||
302 | /* | |
303 | * Used to store the procfs directory if a driver implements the | |
304 | * proc_info method. | |
305 | */ | |
306 | struct proc_dir_entry *proc_dir; | |
307 | ||
308 | /* | |
309 | * This determines if we will use a non-interrupt driven | |
310 | * or an interrupt driven scheme, It is set to the maximum number | |
311 | * of simultaneous commands a given host adapter will accept. | |
312 | */ | |
313 | int can_queue; | |
314 | ||
315 | /* | |
316 | * In many instances, especially where disconnect / reconnect are | |
317 | * supported, our host also has an ID on the SCSI bus. If this is | |
318 | * the case, then it must be reserved. Please set this_id to -1 if | |
319 | * your setup is in single initiator mode, and the host lacks an | |
320 | * ID. | |
321 | */ | |
322 | int this_id; | |
323 | ||
324 | /* | |
325 | * This determines the degree to which the host adapter is capable | |
326 | * of scatter-gather. | |
327 | */ | |
328 | unsigned short sg_tablesize; | |
329 | ||
330 | /* | |
331 | * If the host adapter has limitations beside segment count | |
332 | */ | |
333 | unsigned short max_sectors; | |
334 | ||
335 | /* | |
336 | * dma scatter gather segment boundary limit. a segment crossing this | |
337 | * boundary will be split in two. | |
338 | */ | |
339 | unsigned long dma_boundary; | |
340 | ||
341 | /* | |
342 | * This specifies "machine infinity" for host templates which don't | |
343 | * limit the transfer size. Note this limit represents an absolute | |
344 | * maximum, and may be over the transfer limits allowed for | |
345 | * individual devices (e.g. 256 for SCSI-1) | |
346 | */ | |
347 | #define SCSI_DEFAULT_MAX_SECTORS 1024 | |
348 | ||
349 | /* | |
350 | * True if this host adapter can make good use of linked commands. | |
351 | * This will allow more than one command to be queued to a given | |
352 | * unit on a given host. Set this to the maximum number of command | |
353 | * blocks to be provided for each device. Set this to 1 for one | |
354 | * command block per lun, 2 for two, etc. Do not set this to 0. | |
355 | * You should make sure that the host adapter will do the right thing | |
356 | * before you try setting this above 1. | |
357 | */ | |
358 | short cmd_per_lun; | |
359 | ||
360 | /* | |
361 | * present contains counter indicating how many boards of this | |
362 | * type were found when we did the scan. | |
363 | */ | |
364 | unsigned char present; | |
365 | ||
366 | /* | |
367 | * true if this host adapter uses unchecked DMA onto an ISA bus. | |
368 | */ | |
369 | unsigned unchecked_isa_dma:1; | |
370 | ||
371 | /* | |
372 | * true if this host adapter can make good use of clustering. | |
373 | * I originally thought that if the tablesize was large that it | |
374 | * was a waste of CPU cycles to prepare a cluster list, but | |
375 | * it works out that the Buslogic is faster if you use a smaller | |
376 | * number of segments (i.e. use clustering). I guess it is | |
377 | * inefficient. | |
378 | */ | |
379 | unsigned use_clustering:1; | |
380 | ||
381 | /* | |
382 | * True for emulated SCSI host adapters (e.g. ATAPI) | |
383 | */ | |
384 | unsigned emulated:1; | |
385 | ||
386 | /* | |
387 | * True if the low-level driver performs its own reset-settle delays. | |
388 | */ | |
389 | unsigned skip_settle_delay:1; | |
390 | ||
391 | /* | |
392 | * ordered write support | |
393 | */ | |
394 | unsigned ordered_flush:1; | |
395 | unsigned ordered_tag:1; | |
396 | ||
397 | /* | |
398 | * Countdown for host blocking with no commands outstanding | |
399 | */ | |
400 | unsigned int max_host_blocked; | |
401 | ||
402 | /* | |
403 | * Default value for the blocking. If the queue is empty, | |
404 | * host_blocked counts down in the request_fn until it restarts | |
405 | * host operations as zero is reached. | |
406 | * | |
407 | * FIXME: This should probably be a value in the template | |
408 | */ | |
409 | #define SCSI_DEFAULT_HOST_BLOCKED 7 | |
410 | ||
411 | /* | |
412 | * Pointer to the sysfs class properties for this host, NULL terminated. | |
413 | */ | |
414 | struct class_device_attribute **shost_attrs; | |
415 | ||
416 | /* | |
417 | * Pointer to the SCSI device properties for this host, NULL terminated. | |
418 | */ | |
419 | struct device_attribute **sdev_attrs; | |
420 | ||
421 | /* | |
422 | * List of hosts per template. | |
423 | * | |
424 | * This is only for use by scsi_module.c for legacy templates. | |
425 | * For these access to it is synchronized implicitly by | |
426 | * module_init/module_exit. | |
427 | */ | |
428 | struct list_head legacy_hosts; | |
429 | }; | |
430 | ||
431 | /* | |
d3301874 MA |
432 | * shost state: If you alter this, you also need to alter scsi_sysfs.c |
433 | * (for the ascii descriptions) and the state model enforcer: | |
434 | * scsi_host_set_state() | |
1da177e4 | 435 | */ |
d3301874 MA |
436 | enum scsi_host_state { |
437 | SHOST_CREATED = 1, | |
438 | SHOST_RUNNING, | |
1da177e4 | 439 | SHOST_CANCEL, |
d3301874 | 440 | SHOST_DEL, |
1da177e4 | 441 | SHOST_RECOVERY, |
939647ee JB |
442 | SHOST_CANCEL_RECOVERY, |
443 | SHOST_DEL_RECOVERY, | |
1da177e4 LT |
444 | }; |
445 | ||
446 | struct Scsi_Host { | |
447 | /* | |
448 | * __devices is protected by the host_lock, but you should | |
449 | * usually use scsi_device_lookup / shost_for_each_device | |
450 | * to access it and don't care about locking yourself. | |
451 | * In the rare case of beeing in irq context you can use | |
452 | * their __ prefixed variants with the lock held. NEVER | |
453 | * access this list directly from a driver. | |
454 | */ | |
455 | struct list_head __devices; | |
456 | struct list_head __targets; | |
457 | ||
458 | struct scsi_host_cmd_pool *cmd_pool; | |
459 | spinlock_t free_list_lock; | |
460 | struct list_head free_list; /* backup store of cmd structs */ | |
461 | struct list_head starved_list; | |
462 | ||
463 | spinlock_t default_lock; | |
464 | spinlock_t *host_lock; | |
465 | ||
466 | struct semaphore scan_mutex;/* serialize scanning activity */ | |
467 | ||
468 | struct list_head eh_cmd_q; | |
469 | struct task_struct * ehandler; /* Error recovery thread. */ | |
1da177e4 LT |
470 | struct semaphore * eh_action; /* Wait for specific actions on the |
471 | host. */ | |
472 | unsigned int eh_active:1; /* Indicates the eh thread is awake and active if | |
473 | this is true. */ | |
1da177e4 LT |
474 | wait_queue_head_t host_wait; |
475 | struct scsi_host_template *hostt; | |
476 | struct scsi_transport_template *transportt; | |
06f81ea8 | 477 | |
478 | /* | |
479 | * The following two fields are protected with host_lock; | |
480 | * however, eh routines can safely access during eh processing | |
481 | * without acquiring the lock. | |
482 | */ | |
483 | unsigned int host_busy; /* commands actually active on low-level */ | |
484 | unsigned int host_failed; /* commands that failed. */ | |
1da177e4 LT |
485 | |
486 | unsigned short host_no; /* Used for IOCTL_GET_IDLUN, /proc/scsi et al. */ | |
487 | int resetting; /* if set, it means that last_reset is a valid value */ | |
488 | unsigned long last_reset; | |
489 | ||
490 | /* | |
491 | * These three parameters can be used to allow for wide scsi, | |
492 | * and for host adapters that support multiple busses | |
493 | * The first two should be set to 1 more than the actual max id | |
494 | * or lun (i.e. 8 for normal systems). | |
495 | */ | |
496 | unsigned int max_id; | |
497 | unsigned int max_lun; | |
498 | unsigned int max_channel; | |
499 | ||
500 | /* | |
501 | * This is a unique identifier that must be assigned so that we | |
502 | * have some way of identifying each detected host adapter properly | |
503 | * and uniquely. For hosts that do not support more than one card | |
504 | * in the system at one time, this does not need to be set. It is | |
505 | * initialized to 0 in scsi_register. | |
506 | */ | |
507 | unsigned int unique_id; | |
508 | ||
509 | /* | |
510 | * The maximum length of SCSI commands that this host can accept. | |
511 | * Probably 12 for most host adapters, but could be 16 for others. | |
512 | * For drivers that don't set this field, a value of 12 is | |
513 | * assumed. I am leaving this as a number rather than a bit | |
514 | * because you never know what subsequent SCSI standards might do | |
515 | * (i.e. could there be a 20 byte or a 24-byte command a few years | |
516 | * down the road?). | |
517 | */ | |
518 | unsigned char max_cmd_len; | |
519 | ||
520 | int this_id; | |
521 | int can_queue; | |
522 | short cmd_per_lun; | |
523 | short unsigned int sg_tablesize; | |
524 | short unsigned int max_sectors; | |
525 | unsigned long dma_boundary; | |
526 | /* | |
527 | * Used to assign serial numbers to the cmds. | |
528 | * Protected by the host lock. | |
529 | */ | |
530 | unsigned long cmd_serial_number, cmd_pid; | |
531 | ||
532 | unsigned unchecked_isa_dma:1; | |
533 | unsigned use_clustering:1; | |
534 | unsigned use_blk_tcq:1; | |
535 | ||
536 | /* | |
537 | * Host has requested that no further requests come through for the | |
538 | * time being. | |
539 | */ | |
540 | unsigned host_self_blocked:1; | |
541 | ||
542 | /* | |
543 | * Host uses correct SCSI ordering not PC ordering. The bit is | |
544 | * set for the minority of drivers whose authors actually read | |
545 | * the spec ;) | |
546 | */ | |
547 | unsigned reverse_ordering:1; | |
548 | ||
549 | /* | |
550 | * ordered write support | |
551 | */ | |
552 | unsigned ordered_flush:1; | |
553 | unsigned ordered_tag:1; | |
554 | ||
555 | /* | |
556 | * Optional work queue to be utilized by the transport | |
557 | */ | |
558 | char work_q_name[KOBJ_NAME_LEN]; | |
559 | struct workqueue_struct *work_q; | |
560 | ||
561 | /* | |
562 | * Host has rejected a command because it was busy. | |
563 | */ | |
564 | unsigned int host_blocked; | |
565 | ||
566 | /* | |
567 | * Value host_blocked counts down from | |
568 | */ | |
569 | unsigned int max_host_blocked; | |
570 | ||
571 | /* legacy crap */ | |
572 | unsigned long base; | |
573 | unsigned long io_port; | |
574 | unsigned char n_io_port; | |
575 | unsigned char dma_channel; | |
576 | unsigned int irq; | |
577 | ||
578 | ||
d3301874 | 579 | enum scsi_host_state shost_state; |
1da177e4 LT |
580 | |
581 | /* ldm bits */ | |
582 | struct device shost_gendev; | |
583 | struct class_device shost_classdev; | |
584 | ||
585 | /* | |
586 | * List of hosts per template. | |
587 | * | |
588 | * This is only for use by scsi_module.c for legacy templates. | |
589 | * For these access to it is synchronized implicitly by | |
590 | * module_init/module_exit. | |
591 | */ | |
592 | struct list_head sht_legacy_list; | |
593 | ||
594 | /* | |
595 | * Points to the transport data (if any) which is allocated | |
596 | * separately | |
597 | */ | |
598 | void *shost_data; | |
599 | ||
600 | /* | |
601 | * We should ensure that this is aligned, both for better performance | |
602 | * and also because some compilers (m68k) don't automatically force | |
603 | * alignment to a long boundary. | |
604 | */ | |
605 | unsigned long hostdata[0] /* Used for storage of host specific stuff */ | |
606 | __attribute__ ((aligned (sizeof(unsigned long)))); | |
607 | }; | |
608 | ||
609 | #define class_to_shost(d) \ | |
610 | container_of(d, struct Scsi_Host, shost_classdev) | |
611 | ||
9ccfc756 JB |
612 | #define shost_printk(prefix, shost, fmt, a...) \ |
613 | dev_printk(prefix, &(shost)->shost_gendev, fmt, ##a) | |
614 | ||
615 | ||
1da177e4 LT |
616 | int scsi_is_host_device(const struct device *); |
617 | ||
618 | static inline struct Scsi_Host *dev_to_shost(struct device *dev) | |
619 | { | |
620 | while (!scsi_is_host_device(dev)) { | |
621 | if (!dev->parent) | |
622 | return NULL; | |
623 | dev = dev->parent; | |
624 | } | |
625 | return container_of(dev, struct Scsi_Host, shost_gendev); | |
626 | } | |
627 | ||
939647ee JB |
628 | static inline int scsi_host_in_recovery(struct Scsi_Host *shost) |
629 | { | |
630 | return shost->shost_state == SHOST_RECOVERY || | |
631 | shost->shost_state == SHOST_CANCEL_RECOVERY || | |
632 | shost->shost_state == SHOST_DEL_RECOVERY; | |
633 | } | |
634 | ||
1da177e4 LT |
635 | extern int scsi_queue_work(struct Scsi_Host *, struct work_struct *); |
636 | extern void scsi_flush_work(struct Scsi_Host *); | |
637 | ||
638 | extern struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *, int); | |
639 | extern int __must_check scsi_add_host(struct Scsi_Host *, struct device *); | |
640 | extern void scsi_scan_host(struct Scsi_Host *); | |
641 | extern void scsi_scan_single_target(struct Scsi_Host *, unsigned int, | |
642 | unsigned int); | |
643 | extern void scsi_rescan_device(struct device *); | |
644 | extern void scsi_remove_host(struct Scsi_Host *); | |
645 | extern struct Scsi_Host *scsi_host_get(struct Scsi_Host *); | |
646 | extern void scsi_host_put(struct Scsi_Host *t); | |
647 | extern struct Scsi_Host *scsi_host_lookup(unsigned short); | |
d3301874 | 648 | extern const char *scsi_host_state_name(enum scsi_host_state); |
1da177e4 LT |
649 | |
650 | extern u64 scsi_calculate_bounce_limit(struct Scsi_Host *); | |
651 | ||
652 | static inline void scsi_assign_lock(struct Scsi_Host *shost, spinlock_t *lock) | |
653 | { | |
654 | shost->host_lock = lock; | |
655 | } | |
656 | ||
1da177e4 LT |
657 | static inline struct device *scsi_get_device(struct Scsi_Host *shost) |
658 | { | |
659 | return shost->shost_gendev.parent; | |
660 | } | |
661 | ||
82f29467 MA |
662 | /** |
663 | * scsi_host_scan_allowed - Is scanning of this host allowed | |
664 | * @shost: Pointer to Scsi_Host. | |
665 | **/ | |
666 | static inline int scsi_host_scan_allowed(struct Scsi_Host *shost) | |
667 | { | |
668 | return shost->shost_state == SHOST_RUNNING; | |
669 | } | |
670 | ||
1da177e4 LT |
671 | extern void scsi_unblock_requests(struct Scsi_Host *); |
672 | extern void scsi_block_requests(struct Scsi_Host *); | |
673 | ||
674 | struct class_container; | |
675 | /* | |
676 | * These two functions are used to allocate and free a pseudo device | |
677 | * which will connect to the host adapter itself rather than any | |
678 | * physical device. You must deallocate when you are done with the | |
679 | * thing. This physical pseudo-device isn't real and won't be available | |
680 | * from any high-level drivers. | |
681 | */ | |
682 | extern void scsi_free_host_dev(struct scsi_device *); | |
683 | extern struct scsi_device *scsi_get_host_dev(struct Scsi_Host *); | |
684 | ||
685 | /* legacy interfaces */ | |
686 | extern struct Scsi_Host *scsi_register(struct scsi_host_template *, int); | |
687 | extern void scsi_unregister(struct Scsi_Host *); | |
47ba39ee | 688 | extern int scsi_host_set_state(struct Scsi_Host *, enum scsi_host_state); |
1da177e4 LT |
689 | |
690 | #endif /* _SCSI_SCSI_HOST_H */ |