[SCSI] add inline functions for recognising created and blocked states
[deliverable/linux.git] / include / scsi / scsi_device.h
CommitLineData
1da177e4
LT
1#ifndef _SCSI_SCSI_DEVICE_H
2#define _SCSI_SCSI_DEVICE_H
3
4#include <linux/device.h>
5#include <linux/list.h>
6#include <linux/spinlock.h>
ffedb452 7#include <linux/workqueue.h>
a4d04a4c 8#include <linux/blkdev.h>
d211f052 9#include <scsi/scsi.h>
1da177e4
LT
10#include <asm/atomic.h>
11
12struct request_queue;
13struct scsi_cmnd;
e10fb91c 14struct scsi_lun;
ea73a9f2 15struct scsi_sense_hdr;
1da177e4 16
1cf72699
JB
17struct scsi_mode_data {
18 __u32 length;
19 __u16 block_descriptor_length;
20 __u8 medium_type;
21 __u8 device_specific;
22 __u8 header_length;
23 __u8 longlba:1;
24};
25
1da177e4
LT
26/*
27 * sdev state: If you alter this, you also need to alter scsi_sysfs.c
28 * (for the ascii descriptions) and the state model enforcer:
29 * scsi_lib:scsi_device_set_state().
30 */
31enum scsi_device_state {
32 SDEV_CREATED = 1, /* device created but not added to sysfs
33 * Only internal commands allowed (for inq) */
34 SDEV_RUNNING, /* device properly configured
35 * All commands allowed */
36 SDEV_CANCEL, /* beginning to delete device
37 * Only error handler commands allowed */
38 SDEV_DEL, /* device deleted
39 * no commands allowed */
40 SDEV_QUIESCE, /* Device quiescent. No block commands
41 * will be accepted, only specials (which
42 * originate in the mid-layer) */
43 SDEV_OFFLINE, /* Device offlined (by error handling or
44 * user request */
45 SDEV_BLOCK, /* Device blocked by scsi lld. No scsi
46 * commands from user or midlayer should be issued
47 * to the scsi lld. */
48};
49
a341cd0f
JG
50enum scsi_device_event {
51 SDEV_EVT_MEDIA_CHANGE = 1, /* media has changed */
52
53 SDEV_EVT_LAST = SDEV_EVT_MEDIA_CHANGE,
54 SDEV_EVT_MAXBITS = SDEV_EVT_LAST + 1
55};
56
57struct scsi_event {
58 enum scsi_device_event evt_type;
59 struct list_head node;
60
61 /* put union of data structures, for non-simple event types,
62 * here
63 */
64};
65
1da177e4
LT
66struct scsi_device {
67 struct Scsi_Host *host;
68 struct request_queue *request_queue;
69
70 /* the next two are protected by the host->host_lock */
71 struct list_head siblings; /* list of all devices on this host */
72 struct list_head same_target_siblings; /* just the devices sharing same target id */
73
c2a9331c 74 /* this is now protected by the request_queue->queue_lock */
06f81ea8 75 unsigned int device_busy; /* commands actually active on
76 * low-level. protected by queue_lock. */
1da177e4
LT
77 spinlock_t list_lock;
78 struct list_head cmd_list; /* queue of in use SCSI Command structures */
79 struct list_head starved_entry;
80 struct scsi_cmnd *current_cmnd; /* currently active command */
81 unsigned short queue_depth; /* How deep of a queue we want */
82 unsigned short last_queue_full_depth; /* These two are used by */
83 unsigned short last_queue_full_count; /* scsi_track_queue_full() */
84 unsigned long last_queue_full_time;/* don't let QUEUE_FULLs on the same
85 jiffie count on our counter, they
86 could all be from the same event. */
87
88 unsigned int id, lun, channel;
89
90 unsigned int manufacturer; /* Manufacturer of device, for using
91 * vendor-specific cmd's */
92 unsigned sector_size; /* size in bytes */
93
94 void *hostdata; /* available to low-level driver */
1da177e4
LT
95 char type;
96 char scsi_level;
97 char inq_periph_qual; /* PQ from INQUIRY data */
98 unsigned char inquiry_len; /* valid bytes in 'inquiry' */
99 unsigned char * inquiry; /* INQUIRY response data */
7f23e146
JB
100 const char * vendor; /* [back_compat] point into 'inquiry' ... */
101 const char * model; /* ... after scan; point to static string */
102 const char * rev; /* ... "nullnullnullnull" before scan */
1da177e4
LT
103 unsigned char current_tag; /* current tag */
104 struct scsi_target *sdev_target; /* used only for single_lun */
105
106 unsigned int sdev_bflags; /* black/white flags as also found in
107 * scsi_devinfo.[hc]. For now used only to
108 * pass settings from slave_alloc to scsi
109 * core. */
110 unsigned writeable:1;
111 unsigned removable:1;
112 unsigned changed:1; /* Data invalid due to media change */
113 unsigned busy:1; /* Used to prevent races */
114 unsigned lockable:1; /* Able to prevent media removal */
115 unsigned locked:1; /* Media removal disabled */
116 unsigned borken:1; /* Tell the Seagate driver to be
117 * painfully slow on this device */
118 unsigned disconnect:1; /* can disconnect */
119 unsigned soft_reset:1; /* Uses soft reset option */
120 unsigned sdtr:1; /* Device supports SDTR messages */
121 unsigned wdtr:1; /* Device supports WDTR messages */
122 unsigned ppr:1; /* Device supports PPR messages */
123 unsigned tagged_supported:1; /* Supports SCSI-II tagged queuing */
124 unsigned simple_tags:1; /* simple queue tag messages are enabled */
125 unsigned ordered_tags:1;/* ordered queue tag messages are enabled */
1da177e4
LT
126 unsigned was_reset:1; /* There was a bus reset on the bus for
127 * this device */
128 unsigned expecting_cc_ua:1; /* Expecting a CHECK_CONDITION/UNIT_ATTN
129 * because we did a bus reset. */
130 unsigned use_10_for_rw:1; /* first try 10-byte read / write */
131 unsigned use_10_for_ms:1; /* first try 10-byte mode sense/select */
132 unsigned skip_ms_page_8:1; /* do not use MODE SENSE page 0x08 */
133 unsigned skip_ms_page_3f:1; /* do not use MODE SENSE page 0x3f */
134 unsigned use_192_bytes_for_3f:1; /* ask for 192 bytes from page 0x3f */
135 unsigned no_start_on_add:1; /* do not issue start on add */
136 unsigned allow_restart:1; /* issue START_UNIT in error handler */
c3c94c5a 137 unsigned manage_start_stop:1; /* Let HLD (sd) manage start/stop */
d2886ea3 138 unsigned start_stop_pwr_cond:1; /* Set power cond. in START_STOP_UNIT */
1da177e4
LT
139 unsigned no_uld_attach:1; /* disable connecting to upper level drivers */
140 unsigned select_no_atn:1;
141 unsigned fix_capacity:1; /* READ_CAPACITY is too high by 1 */
61bf54b7 142 unsigned guess_capacity:1; /* READ_CAPACITY might be too high by 1 */
1da177e4 143 unsigned retry_hwerror:1; /* Retry HARDWARE_ERROR */
18351070
LT
144 unsigned last_sector_bug:1; /* do not use multisector accesses on
145 SD_LAST_BUGGY_SECTORS */
1da177e4 146
a341cd0f
JG
147 DECLARE_BITMAP(supported_events, SDEV_EVT_MAXBITS); /* supported events */
148 struct list_head event_list; /* asserted events */
149 struct work_struct event_work;
150
1da177e4
LT
151 unsigned int device_blocked; /* Device returned QUEUE_FULL. */
152
153 unsigned int max_device_blocked; /* what device_blocked counts down from */
154#define SCSI_DEFAULT_DEVICE_BLOCKED 3
155
156 atomic_t iorequest_cnt;
157 atomic_t iodone_cnt;
158 atomic_t ioerr_cnt;
159
160 int timeout;
161
ee959b00
TJ
162 struct device sdev_gendev,
163 sdev_dev;
1da177e4 164
ffedb452
JB
165 struct execute_work ew; /* used to get process context on put */
166
a6a8d9f8 167 struct scsi_dh_data *scsi_dh_data;
1da177e4
LT
168 enum scsi_device_state sdev_state;
169 unsigned long sdev_data[0];
170} __attribute__((aligned(sizeof(unsigned long))));
a6a8d9f8 171
765cbc6d
HR
172struct scsi_dh_devlist {
173 char *vendor;
174 char *model;
175};
176
a6a8d9f8
CS
177struct scsi_device_handler {
178 /* Used by the infrastructure */
179 struct list_head list; /* list of scsi_device_handlers */
a6a8d9f8
CS
180
181 /* Filled by the hardware handler */
182 struct module *module;
183 const char *name;
765cbc6d 184 const struct scsi_dh_devlist *devlist;
a6a8d9f8 185 int (*check_sense)(struct scsi_device *, struct scsi_sense_hdr *);
765cbc6d
HR
186 int (*attach)(struct scsi_device *);
187 void (*detach)(struct scsi_device *);
a6a8d9f8
CS
188 int (*activate)(struct scsi_device *);
189 int (*prep_fn)(struct scsi_device *, struct request *);
190};
191
192struct scsi_dh_data {
193 struct scsi_device_handler *scsi_dh;
194 char buf[0];
195};
196
1da177e4
LT
197#define to_scsi_device(d) \
198 container_of(d, struct scsi_device, sdev_gendev)
199#define class_to_sdev(d) \
ee959b00 200 container_of(d, struct scsi_device, sdev_dev)
1da177e4 201#define transport_class_to_sdev(class_dev) \
ee959b00 202 to_scsi_device(class_dev->parent)
1da177e4 203
9ccfc756
JB
204#define sdev_printk(prefix, sdev, fmt, a...) \
205 dev_printk(prefix, &(sdev)->sdev_gendev, fmt, ##a)
206
a4d04a4c
MP
207#define scmd_printk(prefix, scmd, fmt, a...) \
208 (scmd)->request->rq_disk ? \
209 sdev_printk(prefix, (scmd)->device, "[%s] " fmt, \
210 (scmd)->request->rq_disk->disk_name, ##a) : \
211 sdev_printk(prefix, (scmd)->device, fmt, ##a)
01d7b3b8 212
ffedb452 213enum scsi_target_state {
643eb2d9
JB
214 STARGET_CREATED = 1,
215 STARGET_RUNNING,
ffedb452
JB
216 STARGET_DEL,
217};
218
1da177e4
LT
219/*
220 * scsi_target: representation of a scsi target, for now, this is only
221 * used for single_lun devices. If no one has active IO to the target,
222 * starget_sdev_user is NULL, else it points to the active sdev.
223 */
224struct scsi_target {
225 struct scsi_device *starget_sdev_user;
226 struct list_head siblings;
227 struct list_head devices;
228 struct device dev;
229 unsigned int reap_ref; /* protected by the host lock */
230 unsigned int channel;
231 unsigned int id; /* target id ... replace
232 * scsi_device.id eventually */
1bfc5d9d 233 unsigned int create:1; /* signal that it needs to be added */
25d7c363
TB
234 unsigned int single_lun:1; /* Indicates we should only
235 * allow I/O to one of the luns
236 * for the device at a time. */
1bfc5d9d
AS
237 unsigned int pdt_1f_for_no_lun; /* PDT = 0x1f */
238 /* means no lun present */
239
6f3a2024 240 char scsi_level;
ffedb452
JB
241 struct execute_work ew;
242 enum scsi_target_state state;
a283bd37
JB
243 void *hostdata; /* available to low-level driver */
244 unsigned long starget_data[0]; /* for the transport */
245 /* starget_data must be the last element!!!! */
1da177e4
LT
246} __attribute__((aligned(sizeof(unsigned long))));
247
248#define to_scsi_target(d) container_of(d, struct scsi_target, dev)
249static inline struct scsi_target *scsi_target(struct scsi_device *sdev)
250{
251 return to_scsi_target(sdev->sdev_gendev.parent);
252}
253#define transport_class_to_starget(class_dev) \
ee959b00 254 to_scsi_target(class_dev->parent)
1da177e4 255
9ccfc756
JB
256#define starget_printk(prefix, starget, fmt, a...) \
257 dev_printk(prefix, &(starget)->dev, fmt, ##a)
258
1da177e4
LT
259extern struct scsi_device *__scsi_add_device(struct Scsi_Host *,
260 uint, uint, uint, void *hostdata);
146f7262
JB
261extern int scsi_add_device(struct Scsi_Host *host, uint channel,
262 uint target, uint lun);
a6a8d9f8 263extern int scsi_register_device_handler(struct scsi_device_handler *scsi_dh);
1da177e4 264extern void scsi_remove_device(struct scsi_device *);
a6a8d9f8 265extern int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh);
1da177e4
LT
266
267extern int scsi_device_get(struct scsi_device *);
268extern void scsi_device_put(struct scsi_device *);
269extern struct scsi_device *scsi_device_lookup(struct Scsi_Host *,
270 uint, uint, uint);
271extern struct scsi_device *__scsi_device_lookup(struct Scsi_Host *,
272 uint, uint, uint);
273extern struct scsi_device *scsi_device_lookup_by_target(struct scsi_target *,
274 uint);
275extern struct scsi_device *__scsi_device_lookup_by_target(struct scsi_target *,
276 uint);
277extern void starget_for_each_device(struct scsi_target *, void *,
278 void (*fn)(struct scsi_device *, void *));
522939d4
MR
279extern void __starget_for_each_device(struct scsi_target *, void *,
280 void (*fn)(struct scsi_device *,
281 void *));
1da177e4
LT
282
283/* only exposed to implement shost_for_each_device */
284extern struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *,
285 struct scsi_device *);
286
287/**
3e082a91
MW
288 * shost_for_each_device - iterate over all devices of a host
289 * @sdev: the &struct scsi_device to use as a cursor
290 * @shost: the &struct scsi_host to iterate over
1da177e4 291 *
3e082a91
MW
292 * Iterator that returns each device attached to @shost. This loop
293 * takes a reference on each device and releases it at the end. If
294 * you break out of the loop, you must call scsi_device_put(sdev).
1da177e4
LT
295 */
296#define shost_for_each_device(sdev, shost) \
297 for ((sdev) = __scsi_iterate_devices((shost), NULL); \
298 (sdev); \
299 (sdev) = __scsi_iterate_devices((shost), (sdev)))
300
301/**
3e082a91
MW
302 * __shost_for_each_device - iterate over all devices of a host (UNLOCKED)
303 * @sdev: the &struct scsi_device to use as a cursor
304 * @shost: the &struct scsi_host to iterate over
1da177e4 305 *
3e082a91
MW
306 * Iterator that returns each device attached to @shost. It does _not_
307 * take a reference on the scsi_device, so the whole loop must be
308 * protected by shost->host_lock.
1da177e4 309 *
3e082a91
MW
310 * Note: The only reason to use this is because you need to access the
311 * device list in interrupt context. Otherwise you really want to use
312 * shost_for_each_device instead.
1da177e4
LT
313 */
314#define __shost_for_each_device(sdev, shost) \
315 list_for_each_entry((sdev), &((shost)->__devices), siblings)
316
317extern void scsi_adjust_queue_depth(struct scsi_device *, int, int);
318extern int scsi_track_queue_full(struct scsi_device *, int);
319
320extern int scsi_set_medium_removal(struct scsi_device *, char);
321
322extern int scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
323 unsigned char *buffer, int len, int timeout,
1cf72699 324 int retries, struct scsi_mode_data *data,
ea73a9f2 325 struct scsi_sense_hdr *);
5baba830
JB
326extern int scsi_mode_select(struct scsi_device *sdev, int pf, int sp,
327 int modepage, unsigned char *buffer, int len,
328 int timeout, int retries,
329 struct scsi_mode_data *data,
330 struct scsi_sense_hdr *);
1da177e4 331extern int scsi_test_unit_ready(struct scsi_device *sdev, int timeout,
001aac25 332 int retries, struct scsi_sense_hdr *sshdr);
1da177e4
LT
333extern int scsi_device_set_state(struct scsi_device *sdev,
334 enum scsi_device_state state);
a341cd0f
JG
335extern struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type,
336 gfp_t gfpflags);
337extern void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt);
338extern void sdev_evt_send_simple(struct scsi_device *sdev,
339 enum scsi_device_event evt_type, gfp_t gfpflags);
1da177e4
LT
340extern int scsi_device_quiesce(struct scsi_device *sdev);
341extern void scsi_device_resume(struct scsi_device *sdev);
342extern void scsi_target_quiesce(struct scsi_target *);
343extern void scsi_target_resume(struct scsi_target *);
344extern void scsi_scan_target(struct device *parent, unsigned int channel,
345 unsigned int id, unsigned int lun, int rescan);
346extern void scsi_target_reap(struct scsi_target *);
347extern void scsi_target_block(struct device *);
348extern void scsi_target_unblock(struct device *);
349extern void scsi_remove_target(struct device *);
2f4701d8 350extern void int_to_scsilun(unsigned int, struct scsi_lun *);
462b7859 351extern int scsilun_to_int(struct scsi_lun *);
1da177e4
LT
352extern const char *scsi_device_state_name(enum scsi_device_state);
353extern int scsi_is_sdev_device(const struct device *);
354extern int scsi_is_target_device(const struct device *);
33aa687d
JB
355extern int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
356 int data_direction, void *buffer, unsigned bufflen,
357 unsigned char *sense, int timeout, int retries,
358 int flag);
ea73a9f2
JB
359extern int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
360 int data_direction, void *buffer, unsigned bufflen,
361 struct scsi_sense_hdr *, int timeout, int retries);
6e68af66 362extern int scsi_execute_async(struct scsi_device *sdev,
bb1d1073 363 const unsigned char *cmd, int cmd_len, int data_direction,
6e68af66
MC
364 void *buffer, unsigned bufflen, int use_sg,
365 int timeout, int retries, void *privdata,
366 void (*done)(void *, char *, int, int),
367 gfp_t gfp);
33aa687d 368
cb5d9e09 369static inline int __must_check scsi_device_reprobe(struct scsi_device *sdev)
e28482c5 370{
cb5d9e09 371 return device_reprobe(&sdev->sdev_gendev);
e28482c5
JB
372}
373
01d7b3b8
JG
374static inline unsigned int sdev_channel(struct scsi_device *sdev)
375{
376 return sdev->channel;
377}
378
379static inline unsigned int sdev_id(struct scsi_device *sdev)
380{
381 return sdev->id;
382}
383
384#define scmd_id(scmd) sdev_id((scmd)->device)
385#define scmd_channel(scmd) sdev_channel((scmd)->device)
386
0f1d87a2
JB
387/*
388 * checks for positions of the SCSI state machine
389 */
1da177e4
LT
390static inline int scsi_device_online(struct scsi_device *sdev)
391{
392 return sdev->sdev_state != SDEV_OFFLINE;
393}
0f1d87a2
JB
394static inline int scsi_device_blocked(struct scsi_device *sdev)
395{
396 return sdev->sdev_state == SDEV_BLOCK;
397}
398static inline int scsi_device_created(struct scsi_device *sdev)
399{
400 return sdev->sdev_state == SDEV_CREATED;
401}
1da177e4
LT
402
403/* accessor functions for the SCSI parameters */
404static inline int scsi_device_sync(struct scsi_device *sdev)
405{
406 return sdev->sdtr;
407}
408static inline int scsi_device_wide(struct scsi_device *sdev)
409{
410 return sdev->wdtr;
411}
412static inline int scsi_device_dt(struct scsi_device *sdev)
413{
414 return sdev->ppr;
415}
416static inline int scsi_device_dt_only(struct scsi_device *sdev)
417{
418 if (sdev->inquiry_len < 57)
419 return 0;
420 return (sdev->inquiry[56] & 0x0c) == 0x04;
421}
422static inline int scsi_device_ius(struct scsi_device *sdev)
423{
424 if (sdev->inquiry_len < 57)
425 return 0;
426 return sdev->inquiry[56] & 0x01;
427}
428static inline int scsi_device_qas(struct scsi_device *sdev)
429{
430 if (sdev->inquiry_len < 57)
431 return 0;
432 return sdev->inquiry[56] & 0x02;
433}
b30c2fc1
JB
434static inline int scsi_device_enclosure(struct scsi_device *sdev)
435{
436 return sdev->inquiry[6] & (1<<6);
437}
d7b8bcb0 438
7027ad72
MP
439static inline int scsi_device_protection(struct scsi_device *sdev)
440{
d211f052 441 return sdev->scsi_level > SCSI_2 && sdev->inquiry[5] & (1<<0);
7027ad72
MP
442}
443
d7b8bcb0
MT
444#define MODULE_ALIAS_SCSI_DEVICE(type) \
445 MODULE_ALIAS("scsi:t-" __stringify(type) "*")
446#define SCSI_DEVICE_MODALIAS_FMT "scsi:t-0x%02x"
447
1da177e4 448#endif /* _SCSI_SCSI_DEVICE_H */
This page took 0.33831 seconds and 5 git commands to generate.