47b97157995dc55ea6e596c38d56dc941f4b1168
[deliverable/linux.git] / include / linux / libata.h
1 /*
2 * Copyright 2003-2005 Red Hat, Inc. All rights reserved.
3 * Copyright 2003-2005 Jeff Garzik
4 *
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2, or (at your option)
9 * any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; see the file COPYING. If not, write to
18 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
19 *
20 *
21 * libata documentation is available via 'make {ps|pdf}docs',
22 * as Documentation/DocBook/libata.*
23 *
24 */
25
26 #ifndef __LINUX_LIBATA_H__
27 #define __LINUX_LIBATA_H__
28
29 #include <linux/delay.h>
30 #include <linux/interrupt.h>
31 #include <linux/pci.h>
32 #include <linux/dma-mapping.h>
33 #include <asm/io.h>
34 #include <linux/ata.h>
35 #include <linux/workqueue.h>
36 #include <scsi/scsi_host.h>
37
38 /*
39 * compile-time options: to be removed as soon as all the drivers are
40 * converted to the new debugging mechanism
41 */
42 #undef ATA_DEBUG /* debugging output */
43 #undef ATA_VERBOSE_DEBUG /* yet more debugging output */
44 #undef ATA_IRQ_TRAP /* define to ack screaming irqs */
45 #undef ATA_NDEBUG /* define to disable quick runtime checks */
46 #undef ATA_ENABLE_PATA /* define to enable PATA support in some
47 * low-level drivers */
48
49
50 /* note: prints function name for you */
51 #ifdef ATA_DEBUG
52 #define DPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args)
53 #ifdef ATA_VERBOSE_DEBUG
54 #define VPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args)
55 #else
56 #define VPRINTK(fmt, args...)
57 #endif /* ATA_VERBOSE_DEBUG */
58 #else
59 #define DPRINTK(fmt, args...)
60 #define VPRINTK(fmt, args...)
61 #endif /* ATA_DEBUG */
62
63 #define BPRINTK(fmt, args...) if (ap->flags & ATA_FLAG_DEBUGMSG) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args)
64
65 /* NEW: debug levels */
66 #define HAVE_LIBATA_MSG 1
67
68 enum {
69 ATA_MSG_DRV = 0x0001,
70 ATA_MSG_INFO = 0x0002,
71 ATA_MSG_PROBE = 0x0004,
72 ATA_MSG_WARN = 0x0008,
73 ATA_MSG_MALLOC = 0x0010,
74 ATA_MSG_CTL = 0x0020,
75 ATA_MSG_INTR = 0x0040,
76 ATA_MSG_ERR = 0x0080,
77 };
78
79 #define ata_msg_drv(p) ((p)->msg_enable & ATA_MSG_DRV)
80 #define ata_msg_info(p) ((p)->msg_enable & ATA_MSG_INFO)
81 #define ata_msg_probe(p) ((p)->msg_enable & ATA_MSG_PROBE)
82 #define ata_msg_warn(p) ((p)->msg_enable & ATA_MSG_WARN)
83 #define ata_msg_malloc(p) ((p)->msg_enable & ATA_MSG_MALLOC)
84 #define ata_msg_ctl(p) ((p)->msg_enable & ATA_MSG_CTL)
85 #define ata_msg_intr(p) ((p)->msg_enable & ATA_MSG_INTR)
86 #define ata_msg_err(p) ((p)->msg_enable & ATA_MSG_ERR)
87
88 static inline u32 ata_msg_init(int dval, int default_msg_enable_bits)
89 {
90 if (dval < 0 || dval >= (sizeof(u32) * 8))
91 return default_msg_enable_bits; /* should be 0x1 - only driver info msgs */
92 if (!dval)
93 return 0;
94 return (1 << dval) - 1;
95 }
96
97 /* defines only for the constants which don't work well as enums */
98 #define ATA_TAG_POISON 0xfafbfcfdU
99
100 /* move to PCI layer? */
101 static inline struct device *pci_dev_to_dev(struct pci_dev *pdev)
102 {
103 return &pdev->dev;
104 }
105
106 enum {
107 /* various global constants */
108 LIBATA_MAX_PRD = ATA_MAX_PRD / 2,
109 ATA_MAX_PORTS = 8,
110 ATA_DEF_QUEUE = 1,
111 ATA_MAX_QUEUE = 1,
112 ATA_MAX_SECTORS = 200, /* FIXME */
113 ATA_MAX_BUS = 2,
114 ATA_DEF_BUSY_WAIT = 10000,
115 ATA_SHORT_PAUSE = (HZ >> 6) + 1,
116
117 ATA_SHT_EMULATED = 1,
118 ATA_SHT_CMD_PER_LUN = 1,
119 ATA_SHT_THIS_ID = -1,
120 ATA_SHT_USE_CLUSTERING = 1,
121
122 /* struct ata_device stuff */
123 ATA_DFLAG_LBA = (1 << 0), /* device supports LBA */
124 ATA_DFLAG_LBA48 = (1 << 1), /* device supports LBA48 */
125 ATA_DFLAG_CFG_MASK = (1 << 8) - 1,
126
127 ATA_DFLAG_PIO = (1 << 8), /* device currently in PIO mode */
128
129 ATA_DEV_UNKNOWN = 0, /* unknown device */
130 ATA_DEV_ATA = 1, /* ATA device */
131 ATA_DEV_ATA_UNSUP = 2, /* ATA device (unsupported) */
132 ATA_DEV_ATAPI = 3, /* ATAPI device */
133 ATA_DEV_ATAPI_UNSUP = 4, /* ATAPI device (unsupported) */
134 ATA_DEV_NONE = 5, /* no device */
135
136 /* struct ata_port flags */
137 ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */
138 /* (doesn't imply presence) */
139 ATA_FLAG_SATA = (1 << 1),
140 ATA_FLAG_NO_LEGACY = (1 << 2), /* no legacy mode check */
141 ATA_FLAG_MMIO = (1 << 3), /* use MMIO, not PIO */
142 ATA_FLAG_SRST = (1 << 4), /* (obsolete) use ATA SRST, not E.D.D. */
143 ATA_FLAG_SATA_RESET = (1 << 5), /* (obsolete) use COMRESET */
144 ATA_FLAG_NO_ATAPI = (1 << 6), /* No ATAPI support */
145 ATA_FLAG_PIO_DMA = (1 << 7), /* PIO cmds via DMA */
146 ATA_FLAG_PIO_LBA48 = (1 << 8), /* Host DMA engine is LBA28 only */
147 ATA_FLAG_IRQ_MASK = (1 << 9), /* Mask IRQ in PIO xfers */
148
149 ATA_FLAG_NOINTR = (1 << 16), /* FIXME: Remove this once
150 * proper HSM is in place. */
151 ATA_FLAG_DEBUGMSG = (1 << 17),
152 ATA_FLAG_FLUSH_PORT_TASK = (1 << 18), /* flush port task */
153
154 ATA_FLAG_DISABLED = (1 << 19), /* port is disabled, ignore it */
155 ATA_FLAG_SUSPENDED = (1 << 20), /* port is suspended */
156
157 /* bits 24:31 of ap->flags are reserved for LLDD specific flags */
158
159 /* struct ata_queued_cmd flags */
160 ATA_QCFLAG_ACTIVE = (1 << 0), /* cmd not yet ack'd to scsi lyer */
161 ATA_QCFLAG_SG = (1 << 1), /* have s/g table? */
162 ATA_QCFLAG_SINGLE = (1 << 2), /* no s/g, just a single buffer */
163 ATA_QCFLAG_DMAMAP = ATA_QCFLAG_SG | ATA_QCFLAG_SINGLE,
164 ATA_QCFLAG_IO = (1 << 3), /* standard IO command */
165 ATA_QCFLAG_RESULT_TF = (1 << 4), /* result TF requested */
166
167 ATA_QCFLAG_EH_SCHEDULED = (1 << 16), /* EH scheduled */
168
169 /* host set flags */
170 ATA_HOST_SIMPLEX = (1 << 0), /* Host is simplex, one DMA channel per host_set only */
171
172 /* various lengths of time */
173 ATA_TMOUT_PIO = 30 * HZ,
174 ATA_TMOUT_BOOT = 30 * HZ, /* heuristic */
175 ATA_TMOUT_BOOT_QUICK = 7 * HZ, /* heuristic */
176 ATA_TMOUT_CDB = 30 * HZ,
177 ATA_TMOUT_CDB_QUICK = 5 * HZ,
178 ATA_TMOUT_INTERNAL = 30 * HZ,
179 ATA_TMOUT_INTERNAL_QUICK = 5 * HZ,
180
181 /* ATA bus states */
182 BUS_UNKNOWN = 0,
183 BUS_DMA = 1,
184 BUS_IDLE = 2,
185 BUS_NOINTR = 3,
186 BUS_NODATA = 4,
187 BUS_TIMER = 5,
188 BUS_PIO = 6,
189 BUS_EDD = 7,
190 BUS_IDENTIFY = 8,
191 BUS_PACKET = 9,
192
193 /* SATA port states */
194 PORT_UNKNOWN = 0,
195 PORT_ENABLED = 1,
196 PORT_DISABLED = 2,
197
198 /* encoding various smaller bitmaps into a single
199 * unsigned int bitmap
200 */
201 ATA_BITS_PIO = 5,
202 ATA_BITS_MWDMA = 3,
203 ATA_BITS_UDMA = 8,
204
205 ATA_SHIFT_PIO = 0,
206 ATA_SHIFT_MWDMA = ATA_SHIFT_PIO + ATA_BITS_PIO,
207 ATA_SHIFT_UDMA = ATA_SHIFT_MWDMA + ATA_BITS_MWDMA,
208
209 ATA_MASK_PIO = ((1 << ATA_BITS_PIO) - 1) << ATA_SHIFT_PIO,
210 ATA_MASK_MWDMA = ((1 << ATA_BITS_MWDMA) - 1) << ATA_SHIFT_MWDMA,
211 ATA_MASK_UDMA = ((1 << ATA_BITS_UDMA) - 1) << ATA_SHIFT_UDMA,
212
213 /* size of buffer to pad xfers ending on unaligned boundaries */
214 ATA_DMA_PAD_SZ = 4,
215 ATA_DMA_PAD_BUF_SZ = ATA_DMA_PAD_SZ * ATA_MAX_QUEUE,
216
217 /* masks for port functions */
218 ATA_PORT_PRIMARY = (1 << 0),
219 ATA_PORT_SECONDARY = (1 << 1),
220
221 /* how hard are we gonna try to probe/recover devices */
222 ATA_PROBE_MAX_TRIES = 3,
223 };
224
225 enum hsm_task_states {
226 HSM_ST_UNKNOWN,
227 HSM_ST_IDLE,
228 HSM_ST_POLL,
229 HSM_ST_TMOUT,
230 HSM_ST,
231 HSM_ST_LAST,
232 HSM_ST_LAST_POLL,
233 HSM_ST_ERR,
234 };
235
236 enum ata_completion_errors {
237 AC_ERR_DEV = (1 << 0), /* device reported error */
238 AC_ERR_HSM = (1 << 1), /* host state machine violation */
239 AC_ERR_TIMEOUT = (1 << 2), /* timeout */
240 AC_ERR_MEDIA = (1 << 3), /* media error */
241 AC_ERR_ATA_BUS = (1 << 4), /* ATA bus error */
242 AC_ERR_HOST_BUS = (1 << 5), /* host bus error */
243 AC_ERR_SYSTEM = (1 << 6), /* system error */
244 AC_ERR_INVALID = (1 << 7), /* invalid argument */
245 AC_ERR_OTHER = (1 << 8), /* unknown */
246 };
247
248 /* forward declarations */
249 struct scsi_device;
250 struct ata_port_operations;
251 struct ata_port;
252 struct ata_queued_cmd;
253
254 /* typedefs */
255 typedef void (*ata_qc_cb_t) (struct ata_queued_cmd *qc);
256 typedef void (*ata_probeinit_fn_t)(struct ata_port *);
257 typedef int (*ata_reset_fn_t)(struct ata_port *, unsigned int *);
258 typedef void (*ata_postreset_fn_t)(struct ata_port *ap, unsigned int *);
259
260 struct ata_ioports {
261 unsigned long cmd_addr;
262 unsigned long data_addr;
263 unsigned long error_addr;
264 unsigned long feature_addr;
265 unsigned long nsect_addr;
266 unsigned long lbal_addr;
267 unsigned long lbam_addr;
268 unsigned long lbah_addr;
269 unsigned long device_addr;
270 unsigned long status_addr;
271 unsigned long command_addr;
272 unsigned long altstatus_addr;
273 unsigned long ctl_addr;
274 unsigned long bmdma_addr;
275 unsigned long scr_addr;
276 };
277
278 struct ata_probe_ent {
279 struct list_head node;
280 struct device *dev;
281 const struct ata_port_operations *port_ops;
282 struct scsi_host_template *sht;
283 struct ata_ioports port[ATA_MAX_PORTS];
284 unsigned int n_ports;
285 unsigned int hard_port_no;
286 unsigned int pio_mask;
287 unsigned int mwdma_mask;
288 unsigned int udma_mask;
289 unsigned int legacy_mode;
290 unsigned long irq;
291 unsigned int irq_flags;
292 unsigned long host_flags;
293 unsigned long host_set_flags;
294 void __iomem *mmio_base;
295 void *private_data;
296 };
297
298 struct ata_host_set {
299 spinlock_t lock;
300 struct device *dev;
301 unsigned long irq;
302 void __iomem *mmio_base;
303 unsigned int n_ports;
304 void *private_data;
305 const struct ata_port_operations *ops;
306 unsigned long flags;
307 int simplex_claimed; /* Keep seperate in case we
308 ever need to do this locked */
309 struct ata_port * ports[0];
310 };
311
312 struct ata_queued_cmd {
313 struct ata_port *ap;
314 struct ata_device *dev;
315
316 struct scsi_cmnd *scsicmd;
317 void (*scsidone)(struct scsi_cmnd *);
318
319 struct ata_taskfile tf;
320 u8 cdb[ATAPI_CDB_LEN];
321
322 unsigned long flags; /* ATA_QCFLAG_xxx */
323 unsigned int tag;
324 unsigned int n_elem;
325 unsigned int orig_n_elem;
326
327 int dma_dir;
328
329 unsigned int pad_len;
330
331 unsigned int nsect;
332 unsigned int cursect;
333
334 unsigned int nbytes;
335 unsigned int curbytes;
336
337 unsigned int cursg;
338 unsigned int cursg_ofs;
339
340 struct scatterlist sgent;
341 struct scatterlist pad_sgent;
342 void *buf_virt;
343
344 /* DO NOT iterate over __sg manually, use ata_for_each_sg() */
345 struct scatterlist *__sg;
346
347 unsigned int err_mask;
348 struct ata_taskfile result_tf;
349 ata_qc_cb_t complete_fn;
350
351 void *private_data;
352 };
353
354 struct ata_host_stats {
355 unsigned long unhandled_irq;
356 unsigned long idle_irq;
357 unsigned long rw_reqbuf;
358 };
359
360 struct ata_device {
361 u64 n_sectors; /* size of device, if ATA */
362 unsigned long flags; /* ATA_DFLAG_xxx */
363 unsigned int class; /* ATA_DEV_xxx */
364 unsigned int devno; /* 0 or 1 */
365 u16 id[ATA_ID_WORDS]; /* IDENTIFY xxx DEVICE data */
366 u8 pio_mode;
367 u8 dma_mode;
368 u8 xfer_mode;
369 unsigned int xfer_shift; /* ATA_SHIFT_xxx */
370
371 unsigned int multi_count; /* sectors count for
372 READ/WRITE MULTIPLE */
373 unsigned int max_sectors; /* per-device max sectors */
374 unsigned int cdb_len;
375
376 /* per-dev xfer mask */
377 unsigned int pio_mask;
378 unsigned int mwdma_mask;
379 unsigned int udma_mask;
380
381 /* for CHS addressing */
382 u16 cylinders; /* Number of cylinders */
383 u16 heads; /* Number of heads */
384 u16 sectors; /* Number of sectors per track */
385 };
386
387 struct ata_port {
388 struct Scsi_Host *host; /* our co-allocated scsi host */
389 const struct ata_port_operations *ops;
390 unsigned long flags; /* ATA_FLAG_xxx */
391 unsigned int id; /* unique id req'd by scsi midlyr */
392 unsigned int port_no; /* unique port #; from zero */
393 unsigned int hard_port_no; /* hardware port #; from zero */
394
395 struct ata_prd *prd; /* our SG list */
396 dma_addr_t prd_dma; /* and its DMA mapping */
397
398 void *pad; /* array of DMA pad buffers */
399 dma_addr_t pad_dma;
400
401 struct ata_ioports ioaddr; /* ATA cmd/ctl/dma register blocks */
402
403 u8 ctl; /* cache of ATA control register */
404 u8 last_ctl; /* Cache last written value */
405 unsigned int pio_mask;
406 unsigned int mwdma_mask;
407 unsigned int udma_mask;
408 unsigned int cbl; /* cable type; ATA_CBL_xxx */
409 unsigned int sata_spd_limit; /* SATA PHY speed limit */
410
411 struct ata_device device[ATA_MAX_DEVICES];
412
413 struct ata_queued_cmd qcmd[ATA_MAX_QUEUE];
414 unsigned long qactive;
415 unsigned int active_tag;
416
417 struct ata_host_stats stats;
418 struct ata_host_set *host_set;
419 struct device *dev;
420
421 struct work_struct port_task;
422
423 unsigned int hsm_task_state;
424 unsigned long pio_task_timeout;
425
426 u32 msg_enable;
427 struct list_head eh_done_q;
428
429 void *private_data;
430
431 u8 sector_buf[ATA_SECT_SIZE]; /* owned by EH */
432 };
433
434 struct ata_port_operations {
435 void (*port_disable) (struct ata_port *);
436
437 void (*dev_config) (struct ata_port *, struct ata_device *);
438
439 void (*set_piomode) (struct ata_port *, struct ata_device *);
440 void (*set_dmamode) (struct ata_port *, struct ata_device *);
441 unsigned long (*mode_filter) (const struct ata_port *, struct ata_device *, unsigned long);
442
443 void (*tf_load) (struct ata_port *ap, const struct ata_taskfile *tf);
444 void (*tf_read) (struct ata_port *ap, struct ata_taskfile *tf);
445
446 void (*exec_command)(struct ata_port *ap, const struct ata_taskfile *tf);
447 u8 (*check_status)(struct ata_port *ap);
448 u8 (*check_altstatus)(struct ata_port *ap);
449 void (*dev_select)(struct ata_port *ap, unsigned int device);
450
451 void (*phy_reset) (struct ata_port *ap); /* obsolete */
452 void (*set_mode) (struct ata_port *ap);
453 int (*probe_reset) (struct ata_port *ap, unsigned int *classes);
454
455 void (*post_set_mode) (struct ata_port *ap);
456
457 int (*check_atapi_dma) (struct ata_queued_cmd *qc);
458
459 void (*bmdma_setup) (struct ata_queued_cmd *qc);
460 void (*bmdma_start) (struct ata_queued_cmd *qc);
461
462 void (*qc_prep) (struct ata_queued_cmd *qc);
463 unsigned int (*qc_issue) (struct ata_queued_cmd *qc);
464
465 void (*eng_timeout) (struct ata_port *ap);
466
467 irqreturn_t (*irq_handler)(int, void *, struct pt_regs *);
468 void (*irq_clear) (struct ata_port *);
469
470 u32 (*scr_read) (struct ata_port *ap, unsigned int sc_reg);
471 void (*scr_write) (struct ata_port *ap, unsigned int sc_reg,
472 u32 val);
473
474 int (*port_start) (struct ata_port *ap);
475 void (*port_stop) (struct ata_port *ap);
476
477 void (*host_stop) (struct ata_host_set *host_set);
478
479 void (*bmdma_stop) (struct ata_queued_cmd *qc);
480 u8 (*bmdma_status) (struct ata_port *ap);
481 };
482
483 struct ata_port_info {
484 struct scsi_host_template *sht;
485 unsigned long host_flags;
486 unsigned long pio_mask;
487 unsigned long mwdma_mask;
488 unsigned long udma_mask;
489 const struct ata_port_operations *port_ops;
490 void *private_data;
491 };
492
493 struct ata_timing {
494 unsigned short mode; /* ATA mode */
495 unsigned short setup; /* t1 */
496 unsigned short act8b; /* t2 for 8-bit I/O */
497 unsigned short rec8b; /* t2i for 8-bit I/O */
498 unsigned short cyc8b; /* t0 for 8-bit I/O */
499 unsigned short active; /* t2 or tD */
500 unsigned short recover; /* t2i or tK */
501 unsigned short cycle; /* t0 */
502 unsigned short udma; /* t2CYCTYP/2 */
503 };
504
505 #define FIT(v,vmin,vmax) max_t(short,min_t(short,v,vmax),vmin)
506
507 extern void ata_port_probe(struct ata_port *);
508 extern void __sata_phy_reset(struct ata_port *ap);
509 extern void sata_phy_reset(struct ata_port *ap);
510 extern void ata_bus_reset(struct ata_port *ap);
511 extern int sata_set_spd(struct ata_port *ap);
512 extern int ata_drive_probe_reset(struct ata_port *ap,
513 ata_probeinit_fn_t probeinit,
514 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
515 ata_postreset_fn_t postreset, unsigned int *classes);
516 extern void ata_std_probeinit(struct ata_port *ap);
517 extern int ata_std_softreset(struct ata_port *ap, unsigned int *classes);
518 extern int sata_std_hardreset(struct ata_port *ap, unsigned int *class);
519 extern void ata_std_postreset(struct ata_port *ap, unsigned int *classes);
520 extern int ata_dev_revalidate(struct ata_port *ap, struct ata_device *dev,
521 int post_reset);
522 extern void ata_port_disable(struct ata_port *);
523 extern void ata_std_ports(struct ata_ioports *ioaddr);
524 #ifdef CONFIG_PCI
525 extern int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
526 unsigned int n_ports);
527 extern void ata_pci_remove_one (struct pci_dev *pdev);
528 extern int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state);
529 extern int ata_pci_device_resume(struct pci_dev *pdev);
530 extern int ata_pci_clear_simplex(struct pci_dev *pdev);
531 #endif /* CONFIG_PCI */
532 extern int ata_device_add(const struct ata_probe_ent *ent);
533 extern void ata_host_set_remove(struct ata_host_set *host_set);
534 extern int ata_scsi_detect(struct scsi_host_template *sht);
535 extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
536 extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
537 extern int ata_scsi_release(struct Scsi_Host *host);
538 extern unsigned int ata_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc);
539 extern int sata_scr_valid(struct ata_port *ap);
540 extern int sata_scr_read(struct ata_port *ap, int reg, u32 *val);
541 extern int sata_scr_write(struct ata_port *ap, int reg, u32 val);
542 extern int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val);
543 extern int ata_port_online(struct ata_port *ap);
544 extern int ata_port_offline(struct ata_port *ap);
545 extern int ata_scsi_device_resume(struct scsi_device *);
546 extern int ata_scsi_device_suspend(struct scsi_device *, pm_message_t state);
547 extern int ata_device_resume(struct ata_port *, struct ata_device *);
548 extern int ata_device_suspend(struct ata_port *, struct ata_device *, pm_message_t state);
549 extern int ata_ratelimit(void);
550 extern unsigned int ata_busy_sleep(struct ata_port *ap,
551 unsigned long timeout_pat,
552 unsigned long timeout);
553 extern void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *),
554 void *data, unsigned long delay);
555 extern u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
556 unsigned long interval_msec,
557 unsigned long timeout_msec);
558
559 /*
560 * Default driver ops implementations
561 */
562 extern void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf);
563 extern void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
564 extern void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp);
565 extern void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf);
566 extern void ata_noop_dev_select (struct ata_port *ap, unsigned int device);
567 extern void ata_std_dev_select (struct ata_port *ap, unsigned int device);
568 extern u8 ata_check_status(struct ata_port *ap);
569 extern u8 ata_altstatus(struct ata_port *ap);
570 extern void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf);
571 extern int ata_std_probe_reset(struct ata_port *ap, unsigned int *classes);
572 extern int ata_port_start (struct ata_port *ap);
573 extern void ata_port_stop (struct ata_port *ap);
574 extern void ata_host_stop (struct ata_host_set *host_set);
575 extern irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs);
576 extern void ata_qc_prep(struct ata_queued_cmd *qc);
577 extern void ata_noop_qc_prep(struct ata_queued_cmd *qc);
578 extern unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc);
579 extern void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf,
580 unsigned int buflen);
581 extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
582 unsigned int n_elem);
583 extern unsigned int ata_dev_classify(const struct ata_taskfile *tf);
584 extern void ata_id_string(const u16 *id, unsigned char *s,
585 unsigned int ofs, unsigned int len);
586 extern void ata_id_c_string(const u16 *id, unsigned char *s,
587 unsigned int ofs, unsigned int len);
588 extern void ata_bmdma_setup (struct ata_queued_cmd *qc);
589 extern void ata_bmdma_start (struct ata_queued_cmd *qc);
590 extern void ata_bmdma_stop(struct ata_queued_cmd *qc);
591 extern u8 ata_bmdma_status(struct ata_port *ap);
592 extern void ata_bmdma_irq_clear(struct ata_port *ap);
593 extern void __ata_qc_complete(struct ata_queued_cmd *qc);
594 extern void ata_scsi_simulate(struct ata_port *ap, struct ata_device *dev,
595 struct scsi_cmnd *cmd,
596 void (*done)(struct scsi_cmnd *));
597 extern int ata_std_bios_param(struct scsi_device *sdev,
598 struct block_device *bdev,
599 sector_t capacity, int geom[]);
600 extern int ata_scsi_slave_config(struct scsi_device *sdev);
601 extern struct ata_device *ata_dev_pair(struct ata_port *ap,
602 struct ata_device *adev);
603
604 /*
605 * Timing helpers
606 */
607
608 extern unsigned int ata_pio_need_iordy(const struct ata_device *);
609 extern int ata_timing_compute(struct ata_device *, unsigned short,
610 struct ata_timing *, int, int);
611 extern void ata_timing_merge(const struct ata_timing *,
612 const struct ata_timing *, struct ata_timing *,
613 unsigned int);
614
615 enum {
616 ATA_TIMING_SETUP = (1 << 0),
617 ATA_TIMING_ACT8B = (1 << 1),
618 ATA_TIMING_REC8B = (1 << 2),
619 ATA_TIMING_CYC8B = (1 << 3),
620 ATA_TIMING_8BIT = ATA_TIMING_ACT8B | ATA_TIMING_REC8B |
621 ATA_TIMING_CYC8B,
622 ATA_TIMING_ACTIVE = (1 << 4),
623 ATA_TIMING_RECOVER = (1 << 5),
624 ATA_TIMING_CYCLE = (1 << 6),
625 ATA_TIMING_UDMA = (1 << 7),
626 ATA_TIMING_ALL = ATA_TIMING_SETUP | ATA_TIMING_ACT8B |
627 ATA_TIMING_REC8B | ATA_TIMING_CYC8B |
628 ATA_TIMING_ACTIVE | ATA_TIMING_RECOVER |
629 ATA_TIMING_CYCLE | ATA_TIMING_UDMA,
630 };
631
632
633 #ifdef CONFIG_PCI
634 struct pci_bits {
635 unsigned int reg; /* PCI config register to read */
636 unsigned int width; /* 1 (8 bit), 2 (16 bit), 4 (32 bit) */
637 unsigned long mask;
638 unsigned long val;
639 };
640
641 extern void ata_pci_host_stop (struct ata_host_set *host_set);
642 extern struct ata_probe_ent *
643 ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int portmask);
644 extern int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits);
645 extern unsigned long ata_pci_default_filter(const struct ata_port *, struct ata_device *, unsigned long);
646 #endif /* CONFIG_PCI */
647
648 /*
649 * EH
650 */
651 extern void ata_eng_timeout(struct ata_port *ap);
652 extern void ata_eh_qc_complete(struct ata_queued_cmd *qc);
653 extern void ata_eh_qc_retry(struct ata_queued_cmd *qc);
654
655
656 static inline int
657 ata_sg_is_last(struct scatterlist *sg, struct ata_queued_cmd *qc)
658 {
659 if (sg == &qc->pad_sgent)
660 return 1;
661 if (qc->pad_len)
662 return 0;
663 if (((sg - qc->__sg) + 1) == qc->n_elem)
664 return 1;
665 return 0;
666 }
667
668 static inline struct scatterlist *
669 ata_qc_first_sg(struct ata_queued_cmd *qc)
670 {
671 if (qc->n_elem)
672 return qc->__sg;
673 if (qc->pad_len)
674 return &qc->pad_sgent;
675 return NULL;
676 }
677
678 static inline struct scatterlist *
679 ata_qc_next_sg(struct scatterlist *sg, struct ata_queued_cmd *qc)
680 {
681 if (sg == &qc->pad_sgent)
682 return NULL;
683 if (++sg - qc->__sg < qc->n_elem)
684 return sg;
685 if (qc->pad_len)
686 return &qc->pad_sgent;
687 return NULL;
688 }
689
690 #define ata_for_each_sg(sg, qc) \
691 for (sg = ata_qc_first_sg(qc); sg; sg = ata_qc_next_sg(sg, qc))
692
693 static inline unsigned int ata_tag_valid(unsigned int tag)
694 {
695 return (tag < ATA_MAX_QUEUE) ? 1 : 0;
696 }
697
698 static inline unsigned int ata_class_enabled(unsigned int class)
699 {
700 return class == ATA_DEV_ATA || class == ATA_DEV_ATAPI;
701 }
702
703 static inline unsigned int ata_class_disabled(unsigned int class)
704 {
705 return class == ATA_DEV_ATA_UNSUP || class == ATA_DEV_ATAPI_UNSUP;
706 }
707
708 static inline unsigned int ata_class_absent(unsigned int class)
709 {
710 return !ata_class_enabled(class) && !ata_class_disabled(class);
711 }
712
713 static inline unsigned int ata_dev_enabled(const struct ata_device *dev)
714 {
715 return ata_class_enabled(dev->class);
716 }
717
718 static inline unsigned int ata_dev_disabled(const struct ata_device *dev)
719 {
720 return ata_class_disabled(dev->class);
721 }
722
723 static inline unsigned int ata_dev_absent(const struct ata_device *dev)
724 {
725 return ata_class_absent(dev->class);
726 }
727
728 static inline u8 ata_chk_status(struct ata_port *ap)
729 {
730 return ap->ops->check_status(ap);
731 }
732
733
734 /**
735 * ata_pause - Flush writes and pause 400 nanoseconds.
736 * @ap: Port to wait for.
737 *
738 * LOCKING:
739 * Inherited from caller.
740 */
741
742 static inline void ata_pause(struct ata_port *ap)
743 {
744 ata_altstatus(ap);
745 ndelay(400);
746 }
747
748
749 /**
750 * ata_busy_wait - Wait for a port status register
751 * @ap: Port to wait for.
752 *
753 * Waits up to max*10 microseconds for the selected bits in the port's
754 * status register to be cleared.
755 * Returns final value of status register.
756 *
757 * LOCKING:
758 * Inherited from caller.
759 */
760
761 static inline u8 ata_busy_wait(struct ata_port *ap, unsigned int bits,
762 unsigned int max)
763 {
764 u8 status;
765
766 do {
767 udelay(10);
768 status = ata_chk_status(ap);
769 max--;
770 } while ((status & bits) && (max > 0));
771
772 return status;
773 }
774
775
776 /**
777 * ata_wait_idle - Wait for a port to be idle.
778 * @ap: Port to wait for.
779 *
780 * Waits up to 10ms for port's BUSY and DRQ signals to clear.
781 * Returns final value of status register.
782 *
783 * LOCKING:
784 * Inherited from caller.
785 */
786
787 static inline u8 ata_wait_idle(struct ata_port *ap)
788 {
789 u8 status = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
790
791 if (status & (ATA_BUSY | ATA_DRQ)) {
792 unsigned long l = ap->ioaddr.status_addr;
793 if (ata_msg_warn(ap))
794 printk(KERN_WARNING "ATA: abnormal status 0x%X on port 0x%lX\n",
795 status, l);
796 }
797
798 return status;
799 }
800
801 static inline void ata_qc_set_polling(struct ata_queued_cmd *qc)
802 {
803 qc->tf.ctl |= ATA_NIEN;
804 }
805
806 static inline struct ata_queued_cmd *ata_qc_from_tag (struct ata_port *ap,
807 unsigned int tag)
808 {
809 if (likely(ata_tag_valid(tag)))
810 return &ap->qcmd[tag];
811 return NULL;
812 }
813
814 static inline void ata_tf_init(struct ata_port *ap, struct ata_taskfile *tf, unsigned int device)
815 {
816 memset(tf, 0, sizeof(*tf));
817
818 tf->ctl = ap->ctl;
819 if (device == 0)
820 tf->device = ATA_DEVICE_OBS;
821 else
822 tf->device = ATA_DEVICE_OBS | ATA_DEV1;
823 }
824
825 static inline void ata_qc_reinit(struct ata_queued_cmd *qc)
826 {
827 qc->__sg = NULL;
828 qc->flags = 0;
829 qc->cursect = qc->cursg = qc->cursg_ofs = 0;
830 qc->nsect = 0;
831 qc->nbytes = qc->curbytes = 0;
832 qc->err_mask = 0;
833
834 ata_tf_init(qc->ap, &qc->tf, qc->dev->devno);
835
836 /* init result_tf such that it indicates normal completion */
837 qc->result_tf.command = ATA_DRDY;
838 qc->result_tf.feature = 0;
839 }
840
841 /**
842 * ata_qc_complete - Complete an active ATA command
843 * @qc: Command to complete
844 * @err_mask: ATA Status register contents
845 *
846 * Indicate to the mid and upper layers that an ATA
847 * command has completed, with either an ok or not-ok status.
848 *
849 * LOCKING:
850 * spin_lock_irqsave(host_set lock)
851 */
852 static inline void ata_qc_complete(struct ata_queued_cmd *qc)
853 {
854 struct ata_port *ap = qc->ap;
855
856 if (unlikely(qc->flags & ATA_QCFLAG_EH_SCHEDULED))
857 return;
858
859 /* read result TF if failed or requested */
860 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
861 ap->ops->tf_read(ap, &qc->result_tf);
862
863 __ata_qc_complete(qc);
864 }
865
866 /**
867 * ata_irq_on - Enable interrupts on a port.
868 * @ap: Port on which interrupts are enabled.
869 *
870 * Enable interrupts on a legacy IDE device using MMIO or PIO,
871 * wait for idle, clear any pending interrupts.
872 *
873 * LOCKING:
874 * Inherited from caller.
875 */
876
877 static inline u8 ata_irq_on(struct ata_port *ap)
878 {
879 struct ata_ioports *ioaddr = &ap->ioaddr;
880 u8 tmp;
881
882 ap->ctl &= ~ATA_NIEN;
883 ap->last_ctl = ap->ctl;
884
885 if (ap->flags & ATA_FLAG_MMIO)
886 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
887 else
888 outb(ap->ctl, ioaddr->ctl_addr);
889 tmp = ata_wait_idle(ap);
890
891 ap->ops->irq_clear(ap);
892
893 return tmp;
894 }
895
896
897 /**
898 * ata_irq_ack - Acknowledge a device interrupt.
899 * @ap: Port on which interrupts are enabled.
900 *
901 * Wait up to 10 ms for legacy IDE device to become idle (BUSY
902 * or BUSY+DRQ clear). Obtain dma status and port status from
903 * device. Clear the interrupt. Return port status.
904 *
905 * LOCKING:
906 */
907
908 static inline u8 ata_irq_ack(struct ata_port *ap, unsigned int chk_drq)
909 {
910 unsigned int bits = chk_drq ? ATA_BUSY | ATA_DRQ : ATA_BUSY;
911 u8 host_stat, post_stat, status;
912
913 status = ata_busy_wait(ap, bits, 1000);
914 if (status & bits)
915 if (ata_msg_err(ap))
916 printk(KERN_ERR "abnormal status 0x%X\n", status);
917
918 /* get controller status; clear intr, err bits */
919 if (ap->flags & ATA_FLAG_MMIO) {
920 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
921 host_stat = readb(mmio + ATA_DMA_STATUS);
922 writeb(host_stat | ATA_DMA_INTR | ATA_DMA_ERR,
923 mmio + ATA_DMA_STATUS);
924
925 post_stat = readb(mmio + ATA_DMA_STATUS);
926 } else {
927 host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
928 outb(host_stat | ATA_DMA_INTR | ATA_DMA_ERR,
929 ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
930
931 post_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
932 }
933
934 if (ata_msg_intr(ap))
935 printk(KERN_INFO "%s: irq ack: host_stat 0x%X, new host_stat 0x%X, drv_stat 0x%X\n",
936 __FUNCTION__,
937 host_stat, post_stat, status);
938
939 return status;
940 }
941
942 static inline u32 scr_read(struct ata_port *ap, unsigned int reg)
943 {
944 return ap->ops->scr_read(ap, reg);
945 }
946
947 static inline void scr_write(struct ata_port *ap, unsigned int reg, u32 val)
948 {
949 ap->ops->scr_write(ap, reg, val);
950 }
951
952 static inline void scr_write_flush(struct ata_port *ap, unsigned int reg,
953 u32 val)
954 {
955 ap->ops->scr_write(ap, reg, val);
956 (void) ap->ops->scr_read(ap, reg);
957 }
958
959 static inline unsigned int sata_dev_present(struct ata_port *ap)
960 {
961 return ((scr_read(ap, SCR_STATUS) & 0xf) == 0x3) ? 1 : 0;
962 }
963
964 static inline int ata_try_flush_cache(const struct ata_device *dev)
965 {
966 return ata_id_wcache_enabled(dev->id) ||
967 ata_id_has_flush(dev->id) ||
968 ata_id_has_flush_ext(dev->id);
969 }
970
971 static inline unsigned int ac_err_mask(u8 status)
972 {
973 if (status & ATA_BUSY)
974 return AC_ERR_HSM;
975 if (status & (ATA_ERR | ATA_DF))
976 return AC_ERR_DEV;
977 return 0;
978 }
979
980 static inline unsigned int __ac_err_mask(u8 status)
981 {
982 unsigned int mask = ac_err_mask(status);
983 if (mask == 0)
984 return AC_ERR_OTHER;
985 return mask;
986 }
987
988 static inline int ata_pad_alloc(struct ata_port *ap, struct device *dev)
989 {
990 ap->pad_dma = 0;
991 ap->pad = dma_alloc_coherent(dev, ATA_DMA_PAD_BUF_SZ,
992 &ap->pad_dma, GFP_KERNEL);
993 return (ap->pad == NULL) ? -ENOMEM : 0;
994 }
995
996 static inline void ata_pad_free(struct ata_port *ap, struct device *dev)
997 {
998 dma_free_coherent(dev, ATA_DMA_PAD_BUF_SZ, ap->pad, ap->pad_dma);
999 }
1000
1001 static inline struct ata_port *ata_shost_to_port(struct Scsi_Host *host)
1002 {
1003 return (struct ata_port *) &host->hostdata[0];
1004 }
1005
1006 #endif /* __LINUX_LIBATA_H__ */
This page took 0.07176 seconds and 5 git commands to generate.