2 libata-core.c - helper library for ATA
4 Copyright 2003-2004 Red Hat, Inc. All rights reserved.
5 Copyright 2003-2004 Jeff Garzik
7 The contents of this file are subject to the Open
8 Software License version 1.1 that can be found at
9 http://www.opensource.org/licenses/osl-1.1.txt and is included herein
12 Alternatively, the contents of this file may be used under the terms
13 of the GNU General Public License version 2 (the "GPL") as distributed
14 in the kernel source COPYING file, in which case the provisions of
15 the GPL are applicable instead of the above. If you wish to allow
16 the use of your version of this file only under the terms of the
17 GPL and not to allow others to use your version of this file under
18 the OSL, indicate your decision by deleting the provisions above and
19 replace them with the notice and other provisions required by the GPL.
20 If you do not delete the provisions above, a recipient may use your
21 version of this file under either the OSL or the GPL.
25 #include <linux/config.h>
26 #include <linux/kernel.h>
27 #include <linux/module.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/list.h>
32 #include <linux/highmem.h>
33 #include <linux/spinlock.h>
34 #include <linux/blkdev.h>
35 #include <linux/delay.h>
36 #include <linux/timer.h>
37 #include <linux/interrupt.h>
38 #include <linux/completion.h>
39 #include <linux/suspend.h>
40 #include <linux/workqueue.h>
41 #include <scsi/scsi.h>
43 #include "scsi_priv.h"
44 #include <scsi/scsi_host.h>
45 #include <linux/libata.h>
47 #include <asm/semaphore.h>
48 #include <asm/byteorder.h>
52 static unsigned int ata_busy_sleep (struct ata_port
*ap
,
53 unsigned long tmout_pat
,
55 static void ata_set_mode(struct ata_port
*ap
);
56 static void ata_dev_set_xfermode(struct ata_port
*ap
, struct ata_device
*dev
);
57 static unsigned int ata_get_mode_mask(struct ata_port
*ap
, int shift
);
58 static int fgb(u32 bitmap
);
59 static int ata_choose_xfer_mode(struct ata_port
*ap
,
61 unsigned int *xfer_shift_out
);
62 static int ata_qc_complete_noop(struct ata_queued_cmd
*qc
, u8 drv_stat
);
63 static void __ata_qc_complete(struct ata_queued_cmd
*qc
);
65 static unsigned int ata_unique_id
= 1;
66 static struct workqueue_struct
*ata_wq
;
68 MODULE_AUTHOR("Jeff Garzik");
69 MODULE_DESCRIPTION("Library module for ATA devices");
70 MODULE_LICENSE("GPL");
71 MODULE_VERSION(DRV_VERSION
);
74 * ata_tf_load - send taskfile registers to host controller
75 * @ap: Port to which output is sent
76 * @tf: ATA taskfile register set
78 * Outputs ATA taskfile to standard ATA host controller.
81 * Inherited from caller.
84 static void ata_tf_load_pio(struct ata_port
*ap
, struct ata_taskfile
*tf
)
86 struct ata_ioports
*ioaddr
= &ap
->ioaddr
;
87 unsigned int is_addr
= tf
->flags
& ATA_TFLAG_ISADDR
;
89 if (tf
->ctl
!= ap
->last_ctl
) {
90 outb(tf
->ctl
, ioaddr
->ctl_addr
);
91 ap
->last_ctl
= tf
->ctl
;
95 if (is_addr
&& (tf
->flags
& ATA_TFLAG_LBA48
)) {
96 outb(tf
->hob_feature
, ioaddr
->feature_addr
);
97 outb(tf
->hob_nsect
, ioaddr
->nsect_addr
);
98 outb(tf
->hob_lbal
, ioaddr
->lbal_addr
);
99 outb(tf
->hob_lbam
, ioaddr
->lbam_addr
);
100 outb(tf
->hob_lbah
, ioaddr
->lbah_addr
);
101 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
110 outb(tf
->feature
, ioaddr
->feature_addr
);
111 outb(tf
->nsect
, ioaddr
->nsect_addr
);
112 outb(tf
->lbal
, ioaddr
->lbal_addr
);
113 outb(tf
->lbam
, ioaddr
->lbam_addr
);
114 outb(tf
->lbah
, ioaddr
->lbah_addr
);
115 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
123 if (tf
->flags
& ATA_TFLAG_DEVICE
) {
124 outb(tf
->device
, ioaddr
->device_addr
);
125 VPRINTK("device 0x%X\n", tf
->device
);
132 * ata_tf_load_mmio - send taskfile registers to host controller
133 * @ap: Port to which output is sent
134 * @tf: ATA taskfile register set
136 * Outputs ATA taskfile to standard ATA host controller using MMIO.
139 * Inherited from caller.
142 static void ata_tf_load_mmio(struct ata_port
*ap
, struct ata_taskfile
*tf
)
144 struct ata_ioports
*ioaddr
= &ap
->ioaddr
;
145 unsigned int is_addr
= tf
->flags
& ATA_TFLAG_ISADDR
;
147 if (tf
->ctl
!= ap
->last_ctl
) {
148 writeb(tf
->ctl
, (void __iomem
*) ap
->ioaddr
.ctl_addr
);
149 ap
->last_ctl
= tf
->ctl
;
153 if (is_addr
&& (tf
->flags
& ATA_TFLAG_LBA48
)) {
154 writeb(tf
->hob_feature
, (void __iomem
*) ioaddr
->feature_addr
);
155 writeb(tf
->hob_nsect
, (void __iomem
*) ioaddr
->nsect_addr
);
156 writeb(tf
->hob_lbal
, (void __iomem
*) ioaddr
->lbal_addr
);
157 writeb(tf
->hob_lbam
, (void __iomem
*) ioaddr
->lbam_addr
);
158 writeb(tf
->hob_lbah
, (void __iomem
*) ioaddr
->lbah_addr
);
159 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
168 writeb(tf
->feature
, (void __iomem
*) ioaddr
->feature_addr
);
169 writeb(tf
->nsect
, (void __iomem
*) ioaddr
->nsect_addr
);
170 writeb(tf
->lbal
, (void __iomem
*) ioaddr
->lbal_addr
);
171 writeb(tf
->lbam
, (void __iomem
*) ioaddr
->lbam_addr
);
172 writeb(tf
->lbah
, (void __iomem
*) ioaddr
->lbah_addr
);
173 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
181 if (tf
->flags
& ATA_TFLAG_DEVICE
) {
182 writeb(tf
->device
, (void __iomem
*) ioaddr
->device_addr
);
183 VPRINTK("device 0x%X\n", tf
->device
);
189 void ata_tf_load(struct ata_port
*ap
, struct ata_taskfile
*tf
)
191 if (ap
->flags
& ATA_FLAG_MMIO
)
192 ata_tf_load_mmio(ap
, tf
);
194 ata_tf_load_pio(ap
, tf
);
198 * ata_exec_command - issue ATA command to host controller
199 * @ap: port to which command is being issued
200 * @tf: ATA taskfile register set
202 * Issues PIO/MMIO write to ATA command register, with proper
203 * synchronization with interrupt handler / other threads.
206 * spin_lock_irqsave(host_set lock)
209 static void ata_exec_command_pio(struct ata_port
*ap
, struct ata_taskfile
*tf
)
211 DPRINTK("ata%u: cmd 0x%X\n", ap
->id
, tf
->command
);
213 outb(tf
->command
, ap
->ioaddr
.command_addr
);
219 * ata_exec_command_mmio - issue ATA command to host controller
220 * @ap: port to which command is being issued
221 * @tf: ATA taskfile register set
223 * Issues MMIO write to ATA command register, with proper
224 * synchronization with interrupt handler / other threads.
227 * spin_lock_irqsave(host_set lock)
230 static void ata_exec_command_mmio(struct ata_port
*ap
, struct ata_taskfile
*tf
)
232 DPRINTK("ata%u: cmd 0x%X\n", ap
->id
, tf
->command
);
234 writeb(tf
->command
, (void __iomem
*) ap
->ioaddr
.command_addr
);
238 void ata_exec_command(struct ata_port
*ap
, struct ata_taskfile
*tf
)
240 if (ap
->flags
& ATA_FLAG_MMIO
)
241 ata_exec_command_mmio(ap
, tf
);
243 ata_exec_command_pio(ap
, tf
);
247 * ata_exec - issue ATA command to host controller
248 * @ap: port to which command is being issued
249 * @tf: ATA taskfile register set
251 * Issues PIO/MMIO write to ATA command register, with proper
252 * synchronization with interrupt handler / other threads.
255 * Obtains host_set lock.
258 static inline void ata_exec(struct ata_port
*ap
, struct ata_taskfile
*tf
)
262 DPRINTK("ata%u: cmd 0x%X\n", ap
->id
, tf
->command
);
263 spin_lock_irqsave(&ap
->host_set
->lock
, flags
);
264 ap
->ops
->exec_command(ap
, tf
);
265 spin_unlock_irqrestore(&ap
->host_set
->lock
, flags
);
269 * ata_tf_to_host - issue ATA taskfile to host controller
270 * @ap: port to which command is being issued
271 * @tf: ATA taskfile register set
273 * Issues ATA taskfile register set to ATA host controller,
274 * with proper synchronization with interrupt handler and
278 * Obtains host_set lock.
281 static void ata_tf_to_host(struct ata_port
*ap
, struct ata_taskfile
*tf
)
283 ap
->ops
->tf_load(ap
, tf
);
289 * ata_tf_to_host_nolock - issue ATA taskfile to host controller
290 * @ap: port to which command is being issued
291 * @tf: ATA taskfile register set
293 * Issues ATA taskfile register set to ATA host controller,
294 * with proper synchronization with interrupt handler and
298 * spin_lock_irqsave(host_set lock)
301 void ata_tf_to_host_nolock(struct ata_port
*ap
, struct ata_taskfile
*tf
)
303 ap
->ops
->tf_load(ap
, tf
);
304 ap
->ops
->exec_command(ap
, tf
);
308 * ata_tf_read - input device's ATA taskfile shadow registers
309 * @ap: Port from which input is read
310 * @tf: ATA taskfile register set for storing input
312 * Reads ATA taskfile registers for currently-selected device
316 * Inherited from caller.
319 static void ata_tf_read_pio(struct ata_port
*ap
, struct ata_taskfile
*tf
)
321 struct ata_ioports
*ioaddr
= &ap
->ioaddr
;
323 tf
->nsect
= inb(ioaddr
->nsect_addr
);
324 tf
->lbal
= inb(ioaddr
->lbal_addr
);
325 tf
->lbam
= inb(ioaddr
->lbam_addr
);
326 tf
->lbah
= inb(ioaddr
->lbah_addr
);
327 tf
->device
= inb(ioaddr
->device_addr
);
329 if (tf
->flags
& ATA_TFLAG_LBA48
) {
330 outb(tf
->ctl
| ATA_HOB
, ioaddr
->ctl_addr
);
331 tf
->hob_feature
= inb(ioaddr
->error_addr
);
332 tf
->hob_nsect
= inb(ioaddr
->nsect_addr
);
333 tf
->hob_lbal
= inb(ioaddr
->lbal_addr
);
334 tf
->hob_lbam
= inb(ioaddr
->lbam_addr
);
335 tf
->hob_lbah
= inb(ioaddr
->lbah_addr
);
340 * ata_tf_read_mmio - input device's ATA taskfile shadow registers
341 * @ap: Port from which input is read
342 * @tf: ATA taskfile register set for storing input
344 * Reads ATA taskfile registers for currently-selected device
348 * Inherited from caller.
351 static void ata_tf_read_mmio(struct ata_port
*ap
, struct ata_taskfile
*tf
)
353 struct ata_ioports
*ioaddr
= &ap
->ioaddr
;
355 tf
->nsect
= readb((void __iomem
*)ioaddr
->nsect_addr
);
356 tf
->lbal
= readb((void __iomem
*)ioaddr
->lbal_addr
);
357 tf
->lbam
= readb((void __iomem
*)ioaddr
->lbam_addr
);
358 tf
->lbah
= readb((void __iomem
*)ioaddr
->lbah_addr
);
359 tf
->device
= readb((void __iomem
*)ioaddr
->device_addr
);
361 if (tf
->flags
& ATA_TFLAG_LBA48
) {
362 writeb(tf
->ctl
| ATA_HOB
, (void __iomem
*) ap
->ioaddr
.ctl_addr
);
363 tf
->hob_feature
= readb((void __iomem
*)ioaddr
->error_addr
);
364 tf
->hob_nsect
= readb((void __iomem
*)ioaddr
->nsect_addr
);
365 tf
->hob_lbal
= readb((void __iomem
*)ioaddr
->lbal_addr
);
366 tf
->hob_lbam
= readb((void __iomem
*)ioaddr
->lbam_addr
);
367 tf
->hob_lbah
= readb((void __iomem
*)ioaddr
->lbah_addr
);
371 void ata_tf_read(struct ata_port
*ap
, struct ata_taskfile
*tf
)
373 if (ap
->flags
& ATA_FLAG_MMIO
)
374 ata_tf_read_mmio(ap
, tf
);
376 ata_tf_read_pio(ap
, tf
);
380 * ata_check_status_pio - Read device status reg & clear interrupt
381 * @ap: port where the device is
383 * Reads ATA taskfile status register for currently-selected device
384 * and return it's value. This also clears pending interrupts
388 * Inherited from caller.
390 static u8
ata_check_status_pio(struct ata_port
*ap
)
392 return inb(ap
->ioaddr
.status_addr
);
396 * ata_check_status_mmio - Read device status reg & clear interrupt
397 * @ap: port where the device is
399 * Reads ATA taskfile status register for currently-selected device
400 * via MMIO and return it's value. This also clears pending interrupts
404 * Inherited from caller.
406 static u8
ata_check_status_mmio(struct ata_port
*ap
)
408 return readb((void __iomem
*) ap
->ioaddr
.status_addr
);
411 u8
ata_check_status(struct ata_port
*ap
)
413 if (ap
->flags
& ATA_FLAG_MMIO
)
414 return ata_check_status_mmio(ap
);
415 return ata_check_status_pio(ap
);
418 u8
ata_altstatus(struct ata_port
*ap
)
420 if (ap
->ops
->check_altstatus
)
421 return ap
->ops
->check_altstatus(ap
);
423 if (ap
->flags
& ATA_FLAG_MMIO
)
424 return readb((void __iomem
*)ap
->ioaddr
.altstatus_addr
);
425 return inb(ap
->ioaddr
.altstatus_addr
);
428 u8
ata_chk_err(struct ata_port
*ap
)
430 if (ap
->ops
->check_err
)
431 return ap
->ops
->check_err(ap
);
433 if (ap
->flags
& ATA_FLAG_MMIO
) {
434 return readb((void __iomem
*) ap
->ioaddr
.error_addr
);
436 return inb(ap
->ioaddr
.error_addr
);
440 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
441 * @tf: Taskfile to convert
442 * @fis: Buffer into which data will output
443 * @pmp: Port multiplier port
445 * Converts a standard ATA taskfile to a Serial ATA
446 * FIS structure (Register - Host to Device).
449 * Inherited from caller.
452 void ata_tf_to_fis(struct ata_taskfile
*tf
, u8
*fis
, u8 pmp
)
454 fis
[0] = 0x27; /* Register - Host to Device FIS */
455 fis
[1] = (pmp
& 0xf) | (1 << 7); /* Port multiplier number,
456 bit 7 indicates Command FIS */
457 fis
[2] = tf
->command
;
458 fis
[3] = tf
->feature
;
465 fis
[8] = tf
->hob_lbal
;
466 fis
[9] = tf
->hob_lbam
;
467 fis
[10] = tf
->hob_lbah
;
468 fis
[11] = tf
->hob_feature
;
471 fis
[13] = tf
->hob_nsect
;
482 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
483 * @fis: Buffer from which data will be input
484 * @tf: Taskfile to output
486 * Converts a standard ATA taskfile to a Serial ATA
487 * FIS structure (Register - Host to Device).
490 * Inherited from caller.
493 void ata_tf_from_fis(u8
*fis
, struct ata_taskfile
*tf
)
495 tf
->command
= fis
[2]; /* status */
496 tf
->feature
= fis
[3]; /* error */
503 tf
->hob_lbal
= fis
[8];
504 tf
->hob_lbam
= fis
[9];
505 tf
->hob_lbah
= fis
[10];
508 tf
->hob_nsect
= fis
[13];
512 * ata_prot_to_cmd - determine which read/write opcodes to use
513 * @protocol: ATA_PROT_xxx taskfile protocol
514 * @lba48: true is lba48 is present
516 * Given necessary input, determine which read/write commands
517 * to use to transfer data.
522 static int ata_prot_to_cmd(int protocol
, int lba48
)
524 int rcmd
= 0, wcmd
= 0;
529 rcmd
= ATA_CMD_PIO_READ_EXT
;
530 wcmd
= ATA_CMD_PIO_WRITE_EXT
;
532 rcmd
= ATA_CMD_PIO_READ
;
533 wcmd
= ATA_CMD_PIO_WRITE
;
539 rcmd
= ATA_CMD_READ_EXT
;
540 wcmd
= ATA_CMD_WRITE_EXT
;
543 wcmd
= ATA_CMD_WRITE
;
551 return rcmd
| (wcmd
<< 8);
555 * ata_dev_set_protocol - set taskfile protocol and r/w commands
556 * @dev: device to examine and configure
558 * Examine the device configuration, after we have
559 * read the identify-device page and configured the
560 * data transfer mode. Set internal state related to
561 * the ATA taskfile protocol (pio, pio mult, dma, etc.)
562 * and calculate the proper read/write commands to use.
567 static void ata_dev_set_protocol(struct ata_device
*dev
)
569 int pio
= (dev
->flags
& ATA_DFLAG_PIO
);
570 int lba48
= (dev
->flags
& ATA_DFLAG_LBA48
);
574 proto
= dev
->xfer_protocol
= ATA_PROT_PIO
;
576 proto
= dev
->xfer_protocol
= ATA_PROT_DMA
;
578 cmd
= ata_prot_to_cmd(proto
, lba48
);
582 dev
->read_cmd
= cmd
& 0xff;
583 dev
->write_cmd
= (cmd
>> 8) & 0xff;
586 static const char * xfer_mode_str
[] = {
606 * ata_udma_string - convert UDMA bit offset to string
607 * @mask: mask of bits supported; only highest bit counts.
609 * Determine string which represents the highest speed
610 * (highest bit in @udma_mask).
616 * Constant C string representing highest speed listed in
617 * @udma_mask, or the constant C string "<n/a>".
620 static const char *ata_mode_string(unsigned int mask
)
624 for (i
= 7; i
>= 0; i
--)
627 for (i
= ATA_SHIFT_MWDMA
+ 2; i
>= ATA_SHIFT_MWDMA
; i
--)
630 for (i
= ATA_SHIFT_PIO
+ 4; i
>= ATA_SHIFT_PIO
; i
--)
637 return xfer_mode_str
[i
];
641 * ata_pio_devchk - PATA device presence detection
642 * @ap: ATA channel to examine
643 * @device: Device to examine (starting at zero)
645 * This technique was originally described in
646 * Hale Landis's ATADRVR (www.ata-atapi.com), and
647 * later found its way into the ATA/ATAPI spec.
649 * Write a pattern to the ATA shadow registers,
650 * and if a device is present, it will respond by
651 * correctly storing and echoing back the
652 * ATA shadow register contents.
658 static unsigned int ata_pio_devchk(struct ata_port
*ap
,
661 struct ata_ioports
*ioaddr
= &ap
->ioaddr
;
664 ap
->ops
->dev_select(ap
, device
);
666 outb(0x55, ioaddr
->nsect_addr
);
667 outb(0xaa, ioaddr
->lbal_addr
);
669 outb(0xaa, ioaddr
->nsect_addr
);
670 outb(0x55, ioaddr
->lbal_addr
);
672 outb(0x55, ioaddr
->nsect_addr
);
673 outb(0xaa, ioaddr
->lbal_addr
);
675 nsect
= inb(ioaddr
->nsect_addr
);
676 lbal
= inb(ioaddr
->lbal_addr
);
678 if ((nsect
== 0x55) && (lbal
== 0xaa))
679 return 1; /* we found a device */
681 return 0; /* nothing found */
685 * ata_mmio_devchk - PATA device presence detection
686 * @ap: ATA channel to examine
687 * @device: Device to examine (starting at zero)
689 * This technique was originally described in
690 * Hale Landis's ATADRVR (www.ata-atapi.com), and
691 * later found its way into the ATA/ATAPI spec.
693 * Write a pattern to the ATA shadow registers,
694 * and if a device is present, it will respond by
695 * correctly storing and echoing back the
696 * ATA shadow register contents.
702 static unsigned int ata_mmio_devchk(struct ata_port
*ap
,
705 struct ata_ioports
*ioaddr
= &ap
->ioaddr
;
708 ap
->ops
->dev_select(ap
, device
);
710 writeb(0x55, (void __iomem
*) ioaddr
->nsect_addr
);
711 writeb(0xaa, (void __iomem
*) ioaddr
->lbal_addr
);
713 writeb(0xaa, (void __iomem
*) ioaddr
->nsect_addr
);
714 writeb(0x55, (void __iomem
*) ioaddr
->lbal_addr
);
716 writeb(0x55, (void __iomem
*) ioaddr
->nsect_addr
);
717 writeb(0xaa, (void __iomem
*) ioaddr
->lbal_addr
);
719 nsect
= readb((void __iomem
*) ioaddr
->nsect_addr
);
720 lbal
= readb((void __iomem
*) ioaddr
->lbal_addr
);
722 if ((nsect
== 0x55) && (lbal
== 0xaa))
723 return 1; /* we found a device */
725 return 0; /* nothing found */
729 * ata_devchk - PATA device presence detection
730 * @ap: ATA channel to examine
731 * @device: Device to examine (starting at zero)
733 * Dispatch ATA device presence detection, depending
734 * on whether we are using PIO or MMIO to talk to the
735 * ATA shadow registers.
741 static unsigned int ata_devchk(struct ata_port
*ap
,
744 if (ap
->flags
& ATA_FLAG_MMIO
)
745 return ata_mmio_devchk(ap
, device
);
746 return ata_pio_devchk(ap
, device
);
750 * ata_dev_classify - determine device type based on ATA-spec signature
751 * @tf: ATA taskfile register set for device to be identified
753 * Determine from taskfile register contents whether a device is
754 * ATA or ATAPI, as per "Signature and persistence" section
755 * of ATA/PI spec (volume 1, sect 5.14).
761 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
762 * the event of failure.
765 unsigned int ata_dev_classify(struct ata_taskfile
*tf
)
767 /* Apple's open source Darwin code hints that some devices only
768 * put a proper signature into the LBA mid/high registers,
769 * So, we only check those. It's sufficient for uniqueness.
772 if (((tf
->lbam
== 0) && (tf
->lbah
== 0)) ||
773 ((tf
->lbam
== 0x3c) && (tf
->lbah
== 0xc3))) {
774 DPRINTK("found ATA device by sig\n");
778 if (((tf
->lbam
== 0x14) && (tf
->lbah
== 0xeb)) ||
779 ((tf
->lbam
== 0x69) && (tf
->lbah
== 0x96))) {
780 DPRINTK("found ATAPI device by sig\n");
781 return ATA_DEV_ATAPI
;
784 DPRINTK("unknown device\n");
785 return ATA_DEV_UNKNOWN
;
789 * ata_dev_try_classify - Parse returned ATA device signature
790 * @ap: ATA channel to examine
791 * @device: Device to examine (starting at zero)
793 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
794 * an ATA/ATAPI-defined set of values is placed in the ATA
795 * shadow registers, indicating the results of device detection
798 * Select the ATA device, and read the values from the ATA shadow
799 * registers. Then parse according to the Error register value,
800 * and the spec-defined values examined by ata_dev_classify().
806 static u8
ata_dev_try_classify(struct ata_port
*ap
, unsigned int device
)
808 struct ata_device
*dev
= &ap
->device
[device
];
809 struct ata_taskfile tf
;
813 ap
->ops
->dev_select(ap
, device
);
815 memset(&tf
, 0, sizeof(tf
));
817 err
= ata_chk_err(ap
);
818 ap
->ops
->tf_read(ap
, &tf
);
820 dev
->class = ATA_DEV_NONE
;
822 /* see if device passed diags */
825 else if ((device
== 0) && (err
== 0x81))
830 /* determine if device if ATA or ATAPI */
831 class = ata_dev_classify(&tf
);
832 if (class == ATA_DEV_UNKNOWN
)
834 if ((class == ATA_DEV_ATA
) && (ata_chk_status(ap
) == 0))
843 * ata_dev_id_string - Convert IDENTIFY DEVICE page into string
844 * @id: IDENTIFY DEVICE results we will examine
845 * @s: string into which data is output
846 * @ofs: offset into identify device page
847 * @len: length of string to return. must be an even number.
849 * The strings in the IDENTIFY DEVICE page are broken up into
850 * 16-bit chunks. Run through the string, and output each
851 * 8-bit chunk linearly, regardless of platform.
857 void ata_dev_id_string(u16
*id
, unsigned char *s
,
858 unsigned int ofs
, unsigned int len
)
876 void ata_noop_dev_select (struct ata_port
*ap
, unsigned int device
)
881 * ata_std_dev_select - Select device 0/1 on ATA bus
882 * @ap: ATA channel to manipulate
883 * @device: ATA device (numbered from zero) to select
885 * Use the method defined in the ATA specification to
886 * make either device 0, or device 1, active on the
893 void ata_std_dev_select (struct ata_port
*ap
, unsigned int device
)
898 tmp
= ATA_DEVICE_OBS
;
900 tmp
= ATA_DEVICE_OBS
| ATA_DEV1
;
902 if (ap
->flags
& ATA_FLAG_MMIO
) {
903 writeb(tmp
, (void __iomem
*) ap
->ioaddr
.device_addr
);
905 outb(tmp
, ap
->ioaddr
.device_addr
);
907 ata_pause(ap
); /* needed; also flushes, for mmio */
911 * ata_dev_select - Select device 0/1 on ATA bus
912 * @ap: ATA channel to manipulate
913 * @device: ATA device (numbered from zero) to select
914 * @wait: non-zero to wait for Status register BSY bit to clear
915 * @can_sleep: non-zero if context allows sleeping
917 * Use the method defined in the ATA specification to
918 * make either device 0, or device 1, active on the
921 * This is a high-level version of ata_std_dev_select(),
922 * which additionally provides the services of inserting
923 * the proper pauses and status polling, where needed.
929 void ata_dev_select(struct ata_port
*ap
, unsigned int device
,
930 unsigned int wait
, unsigned int can_sleep
)
932 VPRINTK("ENTER, ata%u: device %u, wait %u\n",
933 ap
->id
, device
, wait
);
938 ap
->ops
->dev_select(ap
, device
);
941 if (can_sleep
&& ap
->device
[device
].class == ATA_DEV_ATAPI
)
948 * ata_dump_id - IDENTIFY DEVICE info debugging output
949 * @dev: Device whose IDENTIFY DEVICE page we will dump
951 * Dump selected 16-bit words from a detected device's
952 * IDENTIFY PAGE page.
958 static inline void ata_dump_id(struct ata_device
*dev
)
960 DPRINTK("49==0x%04x "
970 DPRINTK("80==0x%04x "
980 DPRINTK("88==0x%04x "
987 * ata_dev_identify - obtain IDENTIFY x DEVICE page
988 * @ap: port on which device we wish to probe resides
989 * @device: device bus address, starting at zero
991 * Following bus reset, we issue the IDENTIFY [PACKET] DEVICE
992 * command, and read back the 512-byte device information page.
993 * The device information page is fed to us via the standard
994 * PIO-IN protocol, but we hand-code it here. (TODO: investigate
995 * using standard PIO-IN paths)
997 * After reading the device information page, we use several
998 * bits of information from it to initialize data structures
999 * that will be used during the lifetime of the ata_device.
1000 * Other data from the info page is used to disqualify certain
1001 * older ATA devices we do not wish to support.
1004 * Inherited from caller. Some functions called by this function
1005 * obtain the host_set lock.
1008 static void ata_dev_identify(struct ata_port
*ap
, unsigned int device
)
1010 struct ata_device
*dev
= &ap
->device
[device
];
1013 unsigned long xfer_modes
;
1015 unsigned int using_edd
;
1016 DECLARE_COMPLETION(wait
);
1017 struct ata_queued_cmd
*qc
;
1018 unsigned long flags
;
1021 if (!ata_dev_present(dev
)) {
1022 DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n",
1027 if (ap
->flags
& (ATA_FLAG_SRST
| ATA_FLAG_SATA_RESET
))
1032 DPRINTK("ENTER, host %u, dev %u\n", ap
->id
, device
);
1034 assert (dev
->class == ATA_DEV_ATA
|| dev
->class == ATA_DEV_ATAPI
||
1035 dev
->class == ATA_DEV_NONE
);
1037 ata_dev_select(ap
, device
, 1, 1); /* select device 0/1 */
1039 qc
= ata_qc_new_init(ap
, dev
);
1042 ata_sg_init_one(qc
, dev
->id
, sizeof(dev
->id
));
1043 qc
->dma_dir
= DMA_FROM_DEVICE
;
1044 qc
->tf
.protocol
= ATA_PROT_PIO
;
1048 if (dev
->class == ATA_DEV_ATA
) {
1049 qc
->tf
.command
= ATA_CMD_ID_ATA
;
1050 DPRINTK("do ATA identify\n");
1052 qc
->tf
.command
= ATA_CMD_ID_ATAPI
;
1053 DPRINTK("do ATAPI identify\n");
1056 qc
->waiting
= &wait
;
1057 qc
->complete_fn
= ata_qc_complete_noop
;
1059 spin_lock_irqsave(&ap
->host_set
->lock
, flags
);
1060 rc
= ata_qc_issue(qc
);
1061 spin_unlock_irqrestore(&ap
->host_set
->lock
, flags
);
1066 wait_for_completion(&wait
);
1068 status
= ata_chk_status(ap
);
1069 if (status
& ATA_ERR
) {
1071 * arg! EDD works for all test cases, but seems to return
1072 * the ATA signature for some ATAPI devices. Until the
1073 * reason for this is found and fixed, we fix up the mess
1074 * here. If IDENTIFY DEVICE returns command aborted
1075 * (as ATAPI devices do), then we issue an
1076 * IDENTIFY PACKET DEVICE.
1078 * ATA software reset (SRST, the default) does not appear
1079 * to have this problem.
1081 if ((using_edd
) && (qc
->tf
.command
== ATA_CMD_ID_ATA
)) {
1082 u8 err
= ata_chk_err(ap
);
1083 if (err
& ATA_ABORTED
) {
1084 dev
->class = ATA_DEV_ATAPI
;
1095 swap_buf_le16(dev
->id
, ATA_ID_WORDS
);
1097 /* print device capabilities */
1098 printk(KERN_DEBUG
"ata%u: dev %u cfg "
1099 "49:%04x 82:%04x 83:%04x 84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n",
1100 ap
->id
, device
, dev
->id
[49],
1101 dev
->id
[82], dev
->id
[83], dev
->id
[84],
1102 dev
->id
[85], dev
->id
[86], dev
->id
[87],
1106 * common ATA, ATAPI feature tests
1109 /* we require LBA and DMA support (bits 8 & 9 of word 49) */
1110 if (!ata_id_has_dma(dev
->id
) || !ata_id_has_lba(dev
->id
)) {
1111 printk(KERN_DEBUG
"ata%u: no dma/lba\n", ap
->id
);
1115 /* quick-n-dirty find max transfer mode; for printk only */
1116 xfer_modes
= dev
->id
[ATA_ID_UDMA_MODES
];
1118 xfer_modes
= (dev
->id
[ATA_ID_MWDMA_MODES
]) << ATA_SHIFT_MWDMA
;
1120 xfer_modes
= (dev
->id
[ATA_ID_PIO_MODES
]) << (ATA_SHIFT_PIO
+ 3);
1121 xfer_modes
|= (0x7 << ATA_SHIFT_PIO
);
1126 /* ATA-specific feature tests */
1127 if (dev
->class == ATA_DEV_ATA
) {
1128 if (!ata_id_is_ata(dev
->id
)) /* sanity check */
1131 tmp
= dev
->id
[ATA_ID_MAJOR_VER
];
1132 for (i
= 14; i
>= 1; i
--)
1136 /* we require at least ATA-3 */
1138 printk(KERN_DEBUG
"ata%u: no ATA-3\n", ap
->id
);
1142 if (ata_id_has_lba48(dev
->id
)) {
1143 dev
->flags
|= ATA_DFLAG_LBA48
;
1144 dev
->n_sectors
= ata_id_u64(dev
->id
, 100);
1146 dev
->n_sectors
= ata_id_u32(dev
->id
, 60);
1149 ap
->host
->max_cmd_len
= 16;
1151 /* print device info to dmesg */
1152 printk(KERN_INFO
"ata%u: dev %u ATA, max %s, %Lu sectors:%s\n",
1154 ata_mode_string(xfer_modes
),
1155 (unsigned long long)dev
->n_sectors
,
1156 dev
->flags
& ATA_DFLAG_LBA48
? " lba48" : "");
1159 /* ATAPI-specific feature tests */
1161 if (ata_id_is_ata(dev
->id
)) /* sanity check */
1164 rc
= atapi_cdb_len(dev
->id
);
1165 if ((rc
< 12) || (rc
> ATAPI_CDB_LEN
)) {
1166 printk(KERN_WARNING
"ata%u: unsupported CDB len\n", ap
->id
);
1169 ap
->cdb_len
= (unsigned int) rc
;
1170 ap
->host
->max_cmd_len
= (unsigned char) ap
->cdb_len
;
1172 /* print device info to dmesg */
1173 printk(KERN_INFO
"ata%u: dev %u ATAPI, max %s\n",
1175 ata_mode_string(xfer_modes
));
1178 DPRINTK("EXIT, drv_stat = 0x%x\n", ata_chk_status(ap
));
1182 printk(KERN_WARNING
"ata%u: dev %u not supported, ignoring\n",
1185 dev
->class++; /* converts ATA_DEV_xxx into ATA_DEV_xxx_UNSUP */
1186 DPRINTK("EXIT, err\n");
1190 * ata_bus_probe - Reset and probe ATA bus
1193 * Master ATA bus probing function. Initiates a hardware-dependent
1194 * bus reset, then attempts to identify any devices found on
1198 * PCI/etc. bus probe sem.
1201 * Zero on success, non-zero on error.
1204 static int ata_bus_probe(struct ata_port
*ap
)
1206 unsigned int i
, found
= 0;
1208 ap
->ops
->phy_reset(ap
);
1209 if (ap
->flags
& ATA_FLAG_PORT_DISABLED
)
1212 for (i
= 0; i
< ATA_MAX_DEVICES
; i
++) {
1213 ata_dev_identify(ap
, i
);
1214 if (ata_dev_present(&ap
->device
[i
])) {
1216 if (ap
->ops
->dev_config
)
1217 ap
->ops
->dev_config(ap
, &ap
->device
[i
]);
1221 if ((!found
) || (ap
->flags
& ATA_FLAG_PORT_DISABLED
))
1222 goto err_out_disable
;
1225 if (ap
->flags
& ATA_FLAG_PORT_DISABLED
)
1226 goto err_out_disable
;
1231 ap
->ops
->port_disable(ap
);
1237 * ata_port_probe - Mark port as enabled
1238 * @ap: Port for which we indicate enablement
1240 * Modify @ap data structure such that the system
1241 * thinks that the entire port is enabled.
1243 * LOCKING: host_set lock, or some other form of
1247 void ata_port_probe(struct ata_port
*ap
)
1249 ap
->flags
&= ~ATA_FLAG_PORT_DISABLED
;
1253 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1254 * @ap: SATA port associated with target SATA PHY.
1256 * This function issues commands to standard SATA Sxxx
1257 * PHY registers, to wake up the phy (and device), and
1258 * clear any reset condition.
1261 * PCI/etc. bus probe sem.
1264 void __sata_phy_reset(struct ata_port
*ap
)
1267 unsigned long timeout
= jiffies
+ (HZ
* 5);
1269 if (ap
->flags
& ATA_FLAG_SATA_RESET
) {
1270 /* issue phy wake/reset */
1271 scr_write_flush(ap
, SCR_CONTROL
, 0x301);
1272 udelay(400); /* FIXME: a guess */
1274 scr_write_flush(ap
, SCR_CONTROL
, 0x300); /* phy wake/clear reset */
1276 /* wait for phy to become ready, if necessary */
1279 sstatus
= scr_read(ap
, SCR_STATUS
);
1280 if ((sstatus
& 0xf) != 1)
1282 } while (time_before(jiffies
, timeout
));
1284 /* TODO: phy layer with polling, timeouts, etc. */
1285 if (sata_dev_present(ap
))
1288 sstatus
= scr_read(ap
, SCR_STATUS
);
1289 printk(KERN_INFO
"ata%u: no device found (phy stat %08x)\n",
1291 ata_port_disable(ap
);
1294 if (ap
->flags
& ATA_FLAG_PORT_DISABLED
)
1297 if (ata_busy_sleep(ap
, ATA_TMOUT_BOOT_QUICK
, ATA_TMOUT_BOOT
)) {
1298 ata_port_disable(ap
);
1302 ap
->cbl
= ATA_CBL_SATA
;
1306 * sata_phy_reset - Reset SATA bus.
1307 * @ap: SATA port associated with target SATA PHY.
1309 * This function resets the SATA bus, and then probes
1310 * the bus for devices.
1313 * PCI/etc. bus probe sem.
1316 void sata_phy_reset(struct ata_port
*ap
)
1318 __sata_phy_reset(ap
);
1319 if (ap
->flags
& ATA_FLAG_PORT_DISABLED
)
1325 * ata_port_disable - Disable port.
1326 * @ap: Port to be disabled.
1328 * Modify @ap data structure such that the system
1329 * thinks that the entire port is disabled, and should
1330 * never attempt to probe or communicate with devices
1333 * LOCKING: host_set lock, or some other form of
1337 void ata_port_disable(struct ata_port
*ap
)
1339 ap
->device
[0].class = ATA_DEV_NONE
;
1340 ap
->device
[1].class = ATA_DEV_NONE
;
1341 ap
->flags
|= ATA_FLAG_PORT_DISABLED
;
1347 } xfer_mode_classes
[] = {
1348 { ATA_SHIFT_UDMA
, XFER_UDMA_0
},
1349 { ATA_SHIFT_MWDMA
, XFER_MW_DMA_0
},
1350 { ATA_SHIFT_PIO
, XFER_PIO_0
},
1353 static inline u8
base_from_shift(unsigned int shift
)
1357 for (i
= 0; i
< ARRAY_SIZE(xfer_mode_classes
); i
++)
1358 if (xfer_mode_classes
[i
].shift
== shift
)
1359 return xfer_mode_classes
[i
].base
;
1364 static void ata_dev_set_mode(struct ata_port
*ap
, struct ata_device
*dev
)
1369 if (!ata_dev_present(dev
) || (ap
->flags
& ATA_FLAG_PORT_DISABLED
))
1372 if (dev
->xfer_shift
== ATA_SHIFT_PIO
)
1373 dev
->flags
|= ATA_DFLAG_PIO
;
1375 ata_dev_set_xfermode(ap
, dev
);
1377 base
= base_from_shift(dev
->xfer_shift
);
1378 ofs
= dev
->xfer_mode
- base
;
1379 idx
= ofs
+ dev
->xfer_shift
;
1380 WARN_ON(idx
>= ARRAY_SIZE(xfer_mode_str
));
1382 DPRINTK("idx=%d xfer_shift=%u, xfer_mode=0x%x, base=0x%x, offset=%d\n",
1383 idx
, dev
->xfer_shift
, (int)dev
->xfer_mode
, (int)base
, ofs
);
1385 printk(KERN_INFO
"ata%u: dev %u configured for %s\n",
1386 ap
->id
, dev
->devno
, xfer_mode_str
[idx
]);
1389 static int ata_host_set_pio(struct ata_port
*ap
)
1395 mask
= ata_get_mode_mask(ap
, ATA_SHIFT_PIO
);
1398 printk(KERN_WARNING
"ata%u: no PIO support\n", ap
->id
);
1402 base
= base_from_shift(ATA_SHIFT_PIO
);
1403 xfer_mode
= base
+ x
;
1405 DPRINTK("base 0x%x xfer_mode 0x%x mask 0x%x x %d\n",
1406 (int)base
, (int)xfer_mode
, mask
, x
);
1408 for (i
= 0; i
< ATA_MAX_DEVICES
; i
++) {
1409 struct ata_device
*dev
= &ap
->device
[i
];
1410 if (ata_dev_present(dev
)) {
1411 dev
->pio_mode
= xfer_mode
;
1412 dev
->xfer_mode
= xfer_mode
;
1413 dev
->xfer_shift
= ATA_SHIFT_PIO
;
1414 if (ap
->ops
->set_piomode
)
1415 ap
->ops
->set_piomode(ap
, dev
);
1422 static void ata_host_set_dma(struct ata_port
*ap
, u8 xfer_mode
,
1423 unsigned int xfer_shift
)
1427 for (i
= 0; i
< ATA_MAX_DEVICES
; i
++) {
1428 struct ata_device
*dev
= &ap
->device
[i
];
1429 if (ata_dev_present(dev
)) {
1430 dev
->dma_mode
= xfer_mode
;
1431 dev
->xfer_mode
= xfer_mode
;
1432 dev
->xfer_shift
= xfer_shift
;
1433 if (ap
->ops
->set_dmamode
)
1434 ap
->ops
->set_dmamode(ap
, dev
);
1440 * ata_set_mode - Program timings and issue SET FEATURES - XFER
1441 * @ap: port on which timings will be programmed
1443 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.).
1446 * PCI/etc. bus probe sem.
1449 static void ata_set_mode(struct ata_port
*ap
)
1451 unsigned int i
, xfer_shift
;
1455 /* step 1: always set host PIO timings */
1456 rc
= ata_host_set_pio(ap
);
1460 /* step 2: choose the best data xfer mode */
1461 xfer_mode
= xfer_shift
= 0;
1462 rc
= ata_choose_xfer_mode(ap
, &xfer_mode
, &xfer_shift
);
1466 /* step 3: if that xfer mode isn't PIO, set host DMA timings */
1467 if (xfer_shift
!= ATA_SHIFT_PIO
)
1468 ata_host_set_dma(ap
, xfer_mode
, xfer_shift
);
1470 /* step 4: update devices' xfer mode */
1471 ata_dev_set_mode(ap
, &ap
->device
[0]);
1472 ata_dev_set_mode(ap
, &ap
->device
[1]);
1474 if (ap
->flags
& ATA_FLAG_PORT_DISABLED
)
1477 if (ap
->ops
->post_set_mode
)
1478 ap
->ops
->post_set_mode(ap
);
1480 for (i
= 0; i
< 2; i
++) {
1481 struct ata_device
*dev
= &ap
->device
[i
];
1482 ata_dev_set_protocol(dev
);
1488 ata_port_disable(ap
);
1492 * ata_busy_sleep - sleep until BSY clears, or timeout
1493 * @ap: port containing status register to be polled
1494 * @tmout_pat: impatience timeout
1495 * @tmout: overall timeout
1497 * Sleep until ATA Status register bit BSY clears,
1498 * or a timeout occurs.
1504 static unsigned int ata_busy_sleep (struct ata_port
*ap
,
1505 unsigned long tmout_pat
,
1506 unsigned long tmout
)
1508 unsigned long timer_start
, timeout
;
1511 status
= ata_busy_wait(ap
, ATA_BUSY
, 300);
1512 timer_start
= jiffies
;
1513 timeout
= timer_start
+ tmout_pat
;
1514 while ((status
& ATA_BUSY
) && (time_before(jiffies
, timeout
))) {
1516 status
= ata_busy_wait(ap
, ATA_BUSY
, 3);
1519 if (status
& ATA_BUSY
)
1520 printk(KERN_WARNING
"ata%u is slow to respond, "
1521 "please be patient\n", ap
->id
);
1523 timeout
= timer_start
+ tmout
;
1524 while ((status
& ATA_BUSY
) && (time_before(jiffies
, timeout
))) {
1526 status
= ata_chk_status(ap
);
1529 if (status
& ATA_BUSY
) {
1530 printk(KERN_ERR
"ata%u failed to respond (%lu secs)\n",
1531 ap
->id
, tmout
/ HZ
);
1538 static void ata_bus_post_reset(struct ata_port
*ap
, unsigned int devmask
)
1540 struct ata_ioports
*ioaddr
= &ap
->ioaddr
;
1541 unsigned int dev0
= devmask
& (1 << 0);
1542 unsigned int dev1
= devmask
& (1 << 1);
1543 unsigned long timeout
;
1545 /* if device 0 was found in ata_devchk, wait for its
1549 ata_busy_sleep(ap
, ATA_TMOUT_BOOT_QUICK
, ATA_TMOUT_BOOT
);
1551 /* if device 1 was found in ata_devchk, wait for
1552 * register access, then wait for BSY to clear
1554 timeout
= jiffies
+ ATA_TMOUT_BOOT
;
1558 ap
->ops
->dev_select(ap
, 1);
1559 if (ap
->flags
& ATA_FLAG_MMIO
) {
1560 nsect
= readb((void __iomem
*) ioaddr
->nsect_addr
);
1561 lbal
= readb((void __iomem
*) ioaddr
->lbal_addr
);
1563 nsect
= inb(ioaddr
->nsect_addr
);
1564 lbal
= inb(ioaddr
->lbal_addr
);
1566 if ((nsect
== 1) && (lbal
== 1))
1568 if (time_after(jiffies
, timeout
)) {
1572 msleep(50); /* give drive a breather */
1575 ata_busy_sleep(ap
, ATA_TMOUT_BOOT_QUICK
, ATA_TMOUT_BOOT
);
1577 /* is all this really necessary? */
1578 ap
->ops
->dev_select(ap
, 0);
1580 ap
->ops
->dev_select(ap
, 1);
1582 ap
->ops
->dev_select(ap
, 0);
1586 * ata_bus_edd - Issue EXECUTE DEVICE DIAGNOSTIC command.
1587 * @ap: Port to reset and probe
1589 * Use the EXECUTE DEVICE DIAGNOSTIC command to reset and
1590 * probe the bus. Not often used these days.
1593 * PCI/etc. bus probe sem.
1597 static unsigned int ata_bus_edd(struct ata_port
*ap
)
1599 struct ata_taskfile tf
;
1601 /* set up execute-device-diag (bus reset) taskfile */
1602 /* also, take interrupts to a known state (disabled) */
1603 DPRINTK("execute-device-diag\n");
1604 ata_tf_init(ap
, &tf
, 0);
1606 tf
.command
= ATA_CMD_EDD
;
1607 tf
.protocol
= ATA_PROT_NODATA
;
1610 ata_tf_to_host(ap
, &tf
);
1612 /* spec says at least 2ms. but who knows with those
1613 * crazy ATAPI devices...
1617 return ata_busy_sleep(ap
, ATA_TMOUT_BOOT_QUICK
, ATA_TMOUT_BOOT
);
1620 static unsigned int ata_bus_softreset(struct ata_port
*ap
,
1621 unsigned int devmask
)
1623 struct ata_ioports
*ioaddr
= &ap
->ioaddr
;
1625 DPRINTK("ata%u: bus reset via SRST\n", ap
->id
);
1627 /* software reset. causes dev0 to be selected */
1628 if (ap
->flags
& ATA_FLAG_MMIO
) {
1629 writeb(ap
->ctl
, (void __iomem
*) ioaddr
->ctl_addr
);
1630 udelay(20); /* FIXME: flush */
1631 writeb(ap
->ctl
| ATA_SRST
, (void __iomem
*) ioaddr
->ctl_addr
);
1632 udelay(20); /* FIXME: flush */
1633 writeb(ap
->ctl
, (void __iomem
*) ioaddr
->ctl_addr
);
1635 outb(ap
->ctl
, ioaddr
->ctl_addr
);
1637 outb(ap
->ctl
| ATA_SRST
, ioaddr
->ctl_addr
);
1639 outb(ap
->ctl
, ioaddr
->ctl_addr
);
1642 /* spec mandates ">= 2ms" before checking status.
1643 * We wait 150ms, because that was the magic delay used for
1644 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
1645 * between when the ATA command register is written, and then
1646 * status is checked. Because waiting for "a while" before
1647 * checking status is fine, post SRST, we perform this magic
1648 * delay here as well.
1652 ata_bus_post_reset(ap
, devmask
);
1658 * ata_bus_reset - reset host port and associated ATA channel
1659 * @ap: port to reset
1661 * This is typically the first time we actually start issuing
1662 * commands to the ATA channel. We wait for BSY to clear, then
1663 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
1664 * result. Determine what devices, if any, are on the channel
1665 * by looking at the device 0/1 error register. Look at the signature
1666 * stored in each device's taskfile registers, to determine if
1667 * the device is ATA or ATAPI.
1670 * PCI/etc. bus probe sem.
1671 * Obtains host_set lock.
1674 * Sets ATA_FLAG_PORT_DISABLED if bus reset fails.
1677 void ata_bus_reset(struct ata_port
*ap
)
1679 struct ata_ioports
*ioaddr
= &ap
->ioaddr
;
1680 unsigned int slave_possible
= ap
->flags
& ATA_FLAG_SLAVE_POSS
;
1682 unsigned int dev0
, dev1
= 0, rc
= 0, devmask
= 0;
1684 DPRINTK("ENTER, host %u, port %u\n", ap
->id
, ap
->port_no
);
1686 /* determine if device 0/1 are present */
1687 if (ap
->flags
& ATA_FLAG_SATA_RESET
)
1690 dev0
= ata_devchk(ap
, 0);
1692 dev1
= ata_devchk(ap
, 1);
1696 devmask
|= (1 << 0);
1698 devmask
|= (1 << 1);
1700 /* select device 0 again */
1701 ap
->ops
->dev_select(ap
, 0);
1703 /* issue bus reset */
1704 if (ap
->flags
& ATA_FLAG_SRST
)
1705 rc
= ata_bus_softreset(ap
, devmask
);
1706 else if ((ap
->flags
& ATA_FLAG_SATA_RESET
) == 0) {
1707 /* set up device control */
1708 if (ap
->flags
& ATA_FLAG_MMIO
)
1709 writeb(ap
->ctl
, (void __iomem
*) ioaddr
->ctl_addr
);
1711 outb(ap
->ctl
, ioaddr
->ctl_addr
);
1712 rc
= ata_bus_edd(ap
);
1719 * determine by signature whether we have ATA or ATAPI devices
1721 err
= ata_dev_try_classify(ap
, 0);
1722 if ((slave_possible
) && (err
!= 0x81))
1723 ata_dev_try_classify(ap
, 1);
1725 /* re-enable interrupts */
1726 if (ap
->ioaddr
.ctl_addr
) /* FIXME: hack. create a hook instead */
1729 /* is double-select really necessary? */
1730 if (ap
->device
[1].class != ATA_DEV_NONE
)
1731 ap
->ops
->dev_select(ap
, 1);
1732 if (ap
->device
[0].class != ATA_DEV_NONE
)
1733 ap
->ops
->dev_select(ap
, 0);
1735 /* if no devices were detected, disable this port */
1736 if ((ap
->device
[0].class == ATA_DEV_NONE
) &&
1737 (ap
->device
[1].class == ATA_DEV_NONE
))
1740 if (ap
->flags
& (ATA_FLAG_SATA_RESET
| ATA_FLAG_SRST
)) {
1741 /* set up device control for ATA_FLAG_SATA_RESET */
1742 if (ap
->flags
& ATA_FLAG_MMIO
)
1743 writeb(ap
->ctl
, (void __iomem
*) ioaddr
->ctl_addr
);
1745 outb(ap
->ctl
, ioaddr
->ctl_addr
);
1752 printk(KERN_ERR
"ata%u: disabling port\n", ap
->id
);
1753 ap
->ops
->port_disable(ap
);
1758 static void ata_pr_blacklisted(struct ata_port
*ap
, struct ata_device
*dev
)
1760 printk(KERN_WARNING
"ata%u: dev %u is on DMA blacklist, disabling DMA\n",
1761 ap
->id
, dev
->devno
);
1764 static const char * ata_dma_blacklist
[] = {
1783 "Toshiba CD-ROM XM-6202B",
1785 "E-IDE CD-ROM CR-840",
1788 "SAMSUNG CD-ROM SC-148C",
1789 "SAMSUNG CD-ROM SC",
1791 "SAMSUNG CD-ROM SN-124",
1792 "ATAPI CD-ROM DRIVE 40X MAXIMUM",
1796 static int ata_dma_blacklisted(struct ata_port
*ap
, struct ata_device
*dev
)
1798 unsigned char model_num
[40];
1803 ata_dev_id_string(dev
->id
, model_num
, ATA_ID_PROD_OFS
,
1806 len
= strnlen(s
, sizeof(model_num
));
1808 /* ATAPI specifies that empty space is blank-filled; remove blanks */
1809 while ((len
> 0) && (s
[len
- 1] == ' ')) {
1814 for (i
= 0; i
< ARRAY_SIZE(ata_dma_blacklist
); i
++)
1815 if (!strncmp(ata_dma_blacklist
[i
], s
, len
))
1821 static unsigned int ata_get_mode_mask(struct ata_port
*ap
, int shift
)
1823 struct ata_device
*master
, *slave
;
1826 master
= &ap
->device
[0];
1827 slave
= &ap
->device
[1];
1829 assert (ata_dev_present(master
) || ata_dev_present(slave
));
1831 if (shift
== ATA_SHIFT_UDMA
) {
1832 mask
= ap
->udma_mask
;
1833 if (ata_dev_present(master
)) {
1834 mask
&= (master
->id
[ATA_ID_UDMA_MODES
] & 0xff);
1835 if (ata_dma_blacklisted(ap
, master
)) {
1837 ata_pr_blacklisted(ap
, master
);
1840 if (ata_dev_present(slave
)) {
1841 mask
&= (slave
->id
[ATA_ID_UDMA_MODES
] & 0xff);
1842 if (ata_dma_blacklisted(ap
, slave
)) {
1844 ata_pr_blacklisted(ap
, slave
);
1848 else if (shift
== ATA_SHIFT_MWDMA
) {
1849 mask
= ap
->mwdma_mask
;
1850 if (ata_dev_present(master
)) {
1851 mask
&= (master
->id
[ATA_ID_MWDMA_MODES
] & 0x07);
1852 if (ata_dma_blacklisted(ap
, master
)) {
1854 ata_pr_blacklisted(ap
, master
);
1857 if (ata_dev_present(slave
)) {
1858 mask
&= (slave
->id
[ATA_ID_MWDMA_MODES
] & 0x07);
1859 if (ata_dma_blacklisted(ap
, slave
)) {
1861 ata_pr_blacklisted(ap
, slave
);
1865 else if (shift
== ATA_SHIFT_PIO
) {
1866 mask
= ap
->pio_mask
;
1867 if (ata_dev_present(master
)) {
1868 /* spec doesn't return explicit support for
1869 * PIO0-2, so we fake it
1871 u16 tmp_mode
= master
->id
[ATA_ID_PIO_MODES
] & 0x03;
1876 if (ata_dev_present(slave
)) {
1877 /* spec doesn't return explicit support for
1878 * PIO0-2, so we fake it
1880 u16 tmp_mode
= slave
->id
[ATA_ID_PIO_MODES
] & 0x03;
1887 mask
= 0xffffffff; /* shut up compiler warning */
1894 /* find greatest bit */
1895 static int fgb(u32 bitmap
)
1900 for (i
= 0; i
< 32; i
++)
1901 if (bitmap
& (1 << i
))
1908 * ata_choose_xfer_mode - attempt to find best transfer mode
1909 * @ap: Port for which an xfer mode will be selected
1910 * @xfer_mode_out: (output) SET FEATURES - XFER MODE code
1911 * @xfer_shift_out: (output) bit shift that selects this mode
1913 * Based on host and device capabilities, determine the
1914 * maximum transfer mode that is amenable to all.
1917 * PCI/etc. bus probe sem.
1920 * Zero on success, negative on error.
1923 static int ata_choose_xfer_mode(struct ata_port
*ap
,
1925 unsigned int *xfer_shift_out
)
1927 unsigned int mask
, shift
;
1930 for (i
= 0; i
< ARRAY_SIZE(xfer_mode_classes
); i
++) {
1931 shift
= xfer_mode_classes
[i
].shift
;
1932 mask
= ata_get_mode_mask(ap
, shift
);
1936 *xfer_mode_out
= xfer_mode_classes
[i
].base
+ x
;
1937 *xfer_shift_out
= shift
;
1946 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
1947 * @ap: Port associated with device @dev
1948 * @dev: Device to which command will be sent
1950 * Issue SET FEATURES - XFER MODE command to device @dev
1954 * PCI/etc. bus probe sem.
1957 static void ata_dev_set_xfermode(struct ata_port
*ap
, struct ata_device
*dev
)
1959 DECLARE_COMPLETION(wait
);
1960 struct ata_queued_cmd
*qc
;
1962 unsigned long flags
;
1964 /* set up set-features taskfile */
1965 DPRINTK("set features - xfer mode\n");
1967 qc
= ata_qc_new_init(ap
, dev
);
1970 qc
->tf
.command
= ATA_CMD_SET_FEATURES
;
1971 qc
->tf
.feature
= SETFEATURES_XFER
;
1972 qc
->tf
.flags
|= ATA_TFLAG_ISADDR
| ATA_TFLAG_DEVICE
;
1973 qc
->tf
.protocol
= ATA_PROT_NODATA
;
1974 qc
->tf
.nsect
= dev
->xfer_mode
;
1976 qc
->waiting
= &wait
;
1977 qc
->complete_fn
= ata_qc_complete_noop
;
1979 spin_lock_irqsave(&ap
->host_set
->lock
, flags
);
1980 rc
= ata_qc_issue(qc
);
1981 spin_unlock_irqrestore(&ap
->host_set
->lock
, flags
);
1984 ata_port_disable(ap
);
1986 wait_for_completion(&wait
);
1992 * ata_sg_clean - Unmap DMA memory associated with command
1993 * @qc: Command containing DMA memory to be released
1995 * Unmap all mapped DMA memory associated with this command.
1998 * spin_lock_irqsave(host_set lock)
2001 static void ata_sg_clean(struct ata_queued_cmd
*qc
)
2003 struct ata_port
*ap
= qc
->ap
;
2004 struct scatterlist
*sg
= qc
->sg
;
2005 int dir
= qc
->dma_dir
;
2007 assert(qc
->flags
& ATA_QCFLAG_DMAMAP
);
2010 if (qc
->flags
& ATA_QCFLAG_SINGLE
)
2011 assert(qc
->n_elem
== 1);
2013 DPRINTK("unmapping %u sg elements\n", qc
->n_elem
);
2015 if (qc
->flags
& ATA_QCFLAG_SG
)
2016 dma_unmap_sg(ap
->host_set
->dev
, sg
, qc
->n_elem
, dir
);
2018 dma_unmap_single(ap
->host_set
->dev
, sg_dma_address(&sg
[0]),
2019 sg_dma_len(&sg
[0]), dir
);
2021 qc
->flags
&= ~ATA_QCFLAG_DMAMAP
;
2026 * ata_fill_sg - Fill PCI IDE PRD table
2027 * @qc: Metadata associated with taskfile to be transferred
2029 * Fill PCI IDE PRD (scatter-gather) table with segments
2030 * associated with the current disk command.
2033 * spin_lock_irqsave(host_set lock)
2036 static void ata_fill_sg(struct ata_queued_cmd
*qc
)
2038 struct scatterlist
*sg
= qc
->sg
;
2039 struct ata_port
*ap
= qc
->ap
;
2040 unsigned int idx
, nelem
;
2043 assert(qc
->n_elem
> 0);
2046 for (nelem
= qc
->n_elem
; nelem
; nelem
--,sg
++) {
2050 /* determine if physical DMA addr spans 64K boundary.
2051 * Note h/w doesn't support 64-bit, so we unconditionally
2052 * truncate dma_addr_t to u32.
2054 addr
= (u32
) sg_dma_address(sg
);
2055 sg_len
= sg_dma_len(sg
);
2058 offset
= addr
& 0xffff;
2060 if ((offset
+ sg_len
) > 0x10000)
2061 len
= 0x10000 - offset
;
2063 ap
->prd
[idx
].addr
= cpu_to_le32(addr
);
2064 ap
->prd
[idx
].flags_len
= cpu_to_le32(len
& 0xffff);
2065 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx
, addr
, len
);
2074 ap
->prd
[idx
- 1].flags_len
|= cpu_to_le32(ATA_PRD_EOT
);
2077 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
2078 * @qc: Metadata associated with taskfile to check
2080 * Allow low-level driver to filter ATA PACKET commands, returning
2081 * a status indicating whether or not it is OK to use DMA for the
2082 * supplied PACKET command.
2085 * spin_lock_irqsave(host_set lock)
2087 * RETURNS: 0 when ATAPI DMA can be used
2090 int ata_check_atapi_dma(struct ata_queued_cmd
*qc
)
2092 struct ata_port
*ap
= qc
->ap
;
2093 int rc
= 0; /* Assume ATAPI DMA is OK by default */
2095 if (ap
->ops
->check_atapi_dma
)
2096 rc
= ap
->ops
->check_atapi_dma(qc
);
2101 * ata_qc_prep - Prepare taskfile for submission
2102 * @qc: Metadata associated with taskfile to be prepared
2104 * Prepare ATA taskfile for submission.
2107 * spin_lock_irqsave(host_set lock)
2109 void ata_qc_prep(struct ata_queued_cmd
*qc
)
2111 if (!(qc
->flags
& ATA_QCFLAG_DMAMAP
))
2118 * ata_sg_init_one - Associate command with memory buffer
2119 * @qc: Command to be associated
2120 * @buf: Memory buffer
2121 * @buflen: Length of memory buffer, in bytes.
2123 * Initialize the data-related elements of queued_cmd @qc
2124 * to point to a single memory buffer, @buf of byte length @buflen.
2127 * spin_lock_irqsave(host_set lock)
2130 void ata_sg_init_one(struct ata_queued_cmd
*qc
, void *buf
, unsigned int buflen
)
2132 struct scatterlist
*sg
;
2134 qc
->flags
|= ATA_QCFLAG_SINGLE
;
2136 memset(&qc
->sgent
, 0, sizeof(qc
->sgent
));
2137 qc
->sg
= &qc
->sgent
;
2142 sg
->page
= virt_to_page(buf
);
2143 sg
->offset
= (unsigned long) buf
& ~PAGE_MASK
;
2144 sg
->length
= buflen
;
2148 * ata_sg_init - Associate command with scatter-gather table.
2149 * @qc: Command to be associated
2150 * @sg: Scatter-gather table.
2151 * @n_elem: Number of elements in s/g table.
2153 * Initialize the data-related elements of queued_cmd @qc
2154 * to point to a scatter-gather table @sg, containing @n_elem
2158 * spin_lock_irqsave(host_set lock)
2161 void ata_sg_init(struct ata_queued_cmd
*qc
, struct scatterlist
*sg
,
2162 unsigned int n_elem
)
2164 qc
->flags
|= ATA_QCFLAG_SG
;
2166 qc
->n_elem
= n_elem
;
2170 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
2171 * @qc: Command with memory buffer to be mapped.
2173 * DMA-map the memory buffer associated with queued_cmd @qc.
2176 * spin_lock_irqsave(host_set lock)
2179 * Zero on success, negative on error.
2182 static int ata_sg_setup_one(struct ata_queued_cmd
*qc
)
2184 struct ata_port
*ap
= qc
->ap
;
2185 int dir
= qc
->dma_dir
;
2186 struct scatterlist
*sg
= qc
->sg
;
2187 dma_addr_t dma_address
;
2189 dma_address
= dma_map_single(ap
->host_set
->dev
, qc
->buf_virt
,
2191 if (dma_mapping_error(dma_address
))
2194 sg_dma_address(sg
) = dma_address
;
2195 sg_dma_len(sg
) = sg
->length
;
2197 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg
),
2198 qc
->tf
.flags
& ATA_TFLAG_WRITE
? "write" : "read");
2204 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
2205 * @qc: Command with scatter-gather table to be mapped.
2207 * DMA-map the scatter-gather table associated with queued_cmd @qc.
2210 * spin_lock_irqsave(host_set lock)
2213 * Zero on success, negative on error.
2217 static int ata_sg_setup(struct ata_queued_cmd
*qc
)
2219 struct ata_port
*ap
= qc
->ap
;
2220 struct scatterlist
*sg
= qc
->sg
;
2223 VPRINTK("ENTER, ata%u\n", ap
->id
);
2224 assert(qc
->flags
& ATA_QCFLAG_SG
);
2227 n_elem
= dma_map_sg(ap
->host_set
->dev
, sg
, qc
->n_elem
, dir
);
2231 DPRINTK("%d sg elements mapped\n", n_elem
);
2233 qc
->n_elem
= n_elem
;
2243 * None. (executing in kernel thread context)
2249 static unsigned long ata_pio_poll(struct ata_port
*ap
)
2252 unsigned int poll_state
= PIO_ST_UNKNOWN
;
2253 unsigned int reg_state
= PIO_ST_UNKNOWN
;
2254 const unsigned int tmout_state
= PIO_ST_TMOUT
;
2256 switch (ap
->pio_task_state
) {
2259 poll_state
= PIO_ST_POLL
;
2263 case PIO_ST_LAST_POLL
:
2264 poll_state
= PIO_ST_LAST_POLL
;
2265 reg_state
= PIO_ST_LAST
;
2272 status
= ata_chk_status(ap
);
2273 if (status
& ATA_BUSY
) {
2274 if (time_after(jiffies
, ap
->pio_task_timeout
)) {
2275 ap
->pio_task_state
= tmout_state
;
2278 ap
->pio_task_state
= poll_state
;
2279 return ATA_SHORT_PAUSE
;
2282 ap
->pio_task_state
= reg_state
;
2287 * ata_pio_complete -
2291 * None. (executing in kernel thread context)
2294 static void ata_pio_complete (struct ata_port
*ap
)
2296 struct ata_queued_cmd
*qc
;
2300 * This is purely hueristic. This is a fast path.
2301 * Sometimes when we enter, BSY will be cleared in
2302 * a chk-status or two. If not, the drive is probably seeking
2303 * or something. Snooze for a couple msecs, then
2304 * chk-status again. If still busy, fall back to
2305 * PIO_ST_POLL state.
2307 drv_stat
= ata_busy_wait(ap
, ATA_BUSY
| ATA_DRQ
, 10);
2308 if (drv_stat
& (ATA_BUSY
| ATA_DRQ
)) {
2310 drv_stat
= ata_busy_wait(ap
, ATA_BUSY
| ATA_DRQ
, 10);
2311 if (drv_stat
& (ATA_BUSY
| ATA_DRQ
)) {
2312 ap
->pio_task_state
= PIO_ST_LAST_POLL
;
2313 ap
->pio_task_timeout
= jiffies
+ ATA_TMOUT_PIO
;
2318 drv_stat
= ata_wait_idle(ap
);
2319 if (!ata_ok(drv_stat
)) {
2320 ap
->pio_task_state
= PIO_ST_ERR
;
2324 qc
= ata_qc_from_tag(ap
, ap
->active_tag
);
2327 ap
->pio_task_state
= PIO_ST_IDLE
;
2331 ata_qc_complete(qc
, drv_stat
);
2334 void swap_buf_le16(u16
*buf
, unsigned int buf_words
)
2339 for (i
= 0; i
< buf_words
; i
++)
2340 buf
[i
] = le16_to_cpu(buf
[i
]);
2341 #endif /* __BIG_ENDIAN */
2344 static void ata_mmio_data_xfer(struct ata_port
*ap
, unsigned char *buf
,
2345 unsigned int buflen
, int write_data
)
2348 unsigned int words
= buflen
>> 1;
2349 u16
*buf16
= (u16
*) buf
;
2350 void __iomem
*mmio
= (void __iomem
*)ap
->ioaddr
.data_addr
;
2353 for (i
= 0; i
< words
; i
++)
2354 writew(le16_to_cpu(buf16
[i
]), mmio
);
2356 for (i
= 0; i
< words
; i
++)
2357 buf16
[i
] = cpu_to_le16(readw(mmio
));
2361 static void ata_pio_data_xfer(struct ata_port
*ap
, unsigned char *buf
,
2362 unsigned int buflen
, int write_data
)
2364 unsigned int dwords
= buflen
>> 1;
2367 outsw(ap
->ioaddr
.data_addr
, buf
, dwords
);
2369 insw(ap
->ioaddr
.data_addr
, buf
, dwords
);
2372 static void ata_data_xfer(struct ata_port
*ap
, unsigned char *buf
,
2373 unsigned int buflen
, int do_write
)
2375 if (ap
->flags
& ATA_FLAG_MMIO
)
2376 ata_mmio_data_xfer(ap
, buf
, buflen
, do_write
);
2378 ata_pio_data_xfer(ap
, buf
, buflen
, do_write
);
2381 static void ata_pio_sector(struct ata_queued_cmd
*qc
)
2383 int do_write
= (qc
->tf
.flags
& ATA_TFLAG_WRITE
);
2384 struct scatterlist
*sg
= qc
->sg
;
2385 struct ata_port
*ap
= qc
->ap
;
2387 unsigned int offset
;
2390 if (qc
->cursect
== (qc
->nsect
- 1))
2391 ap
->pio_task_state
= PIO_ST_LAST
;
2393 page
= sg
[qc
->cursg
].page
;
2394 offset
= sg
[qc
->cursg
].offset
+ qc
->cursg_ofs
* ATA_SECT_SIZE
;
2396 /* get the current page and offset */
2397 page
= nth_page(page
, (offset
>> PAGE_SHIFT
));
2398 offset
%= PAGE_SIZE
;
2400 buf
= kmap(page
) + offset
;
2405 if ((qc
->cursg_ofs
* ATA_SECT_SIZE
) == (&sg
[qc
->cursg
])->length
) {
2410 DPRINTK("data %s\n", qc
->tf
.flags
& ATA_TFLAG_WRITE
? "write" : "read");
2412 /* do the actual data transfer */
2413 do_write
= (qc
->tf
.flags
& ATA_TFLAG_WRITE
);
2414 ata_data_xfer(ap
, buf
, ATA_SECT_SIZE
, do_write
);
2419 static void __atapi_pio_bytes(struct ata_queued_cmd
*qc
, unsigned int bytes
)
2421 int do_write
= (qc
->tf
.flags
& ATA_TFLAG_WRITE
);
2422 struct scatterlist
*sg
= qc
->sg
;
2423 struct ata_port
*ap
= qc
->ap
;
2426 unsigned int offset
, count
;
2428 if (qc
->curbytes
== qc
->nbytes
- bytes
)
2429 ap
->pio_task_state
= PIO_ST_LAST
;
2432 sg
= &qc
->sg
[qc
->cursg
];
2436 offset
= sg
->offset
+ qc
->cursg_ofs
;
2438 /* get the current page and offset */
2439 page
= nth_page(page
, (offset
>> PAGE_SHIFT
));
2440 offset
%= PAGE_SIZE
;
2442 count
= min(sg
->length
- qc
->cursg_ofs
, bytes
);
2444 /* don't cross page boundaries */
2445 count
= min(count
, (unsigned int)PAGE_SIZE
- offset
);
2447 buf
= kmap(page
) + offset
;
2450 qc
->curbytes
+= count
;
2451 qc
->cursg_ofs
+= count
;
2453 if (qc
->cursg_ofs
== sg
->length
) {
2458 DPRINTK("data %s\n", qc
->tf
.flags
& ATA_TFLAG_WRITE
? "write" : "read");
2460 /* do the actual data transfer */
2461 ata_data_xfer(ap
, buf
, count
, do_write
);
2466 if (qc
->cursg_ofs
< sg
->length
)
2472 static void atapi_pio_bytes(struct ata_queued_cmd
*qc
)
2474 struct ata_port
*ap
= qc
->ap
;
2475 struct ata_device
*dev
= qc
->dev
;
2476 unsigned int ireason
, bc_lo
, bc_hi
, bytes
;
2477 int i_write
, do_write
= (qc
->tf
.flags
& ATA_TFLAG_WRITE
) ? 1 : 0;
2479 ap
->ops
->tf_read(ap
, &qc
->tf
);
2480 ireason
= qc
->tf
.nsect
;
2481 bc_lo
= qc
->tf
.lbam
;
2482 bc_hi
= qc
->tf
.lbah
;
2483 bytes
= (bc_hi
<< 8) | bc_lo
;
2485 /* shall be cleared to zero, indicating xfer of data */
2486 if (ireason
& (1 << 0))
2489 /* make sure transfer direction matches expected */
2490 i_write
= ((ireason
& (1 << 1)) == 0) ? 1 : 0;
2491 if (do_write
!= i_write
)
2494 __atapi_pio_bytes(qc
, bytes
);
2499 printk(KERN_INFO
"ata%u: dev %u: ATAPI check failed\n",
2500 ap
->id
, dev
->devno
);
2501 ap
->pio_task_state
= PIO_ST_ERR
;
2509 * None. (executing in kernel thread context)
2512 static void ata_pio_block(struct ata_port
*ap
)
2514 struct ata_queued_cmd
*qc
;
2518 * This is purely hueristic. This is a fast path.
2519 * Sometimes when we enter, BSY will be cleared in
2520 * a chk-status or two. If not, the drive is probably seeking
2521 * or something. Snooze for a couple msecs, then
2522 * chk-status again. If still busy, fall back to
2523 * PIO_ST_POLL state.
2525 status
= ata_busy_wait(ap
, ATA_BUSY
, 5);
2526 if (status
& ATA_BUSY
) {
2528 status
= ata_busy_wait(ap
, ATA_BUSY
, 10);
2529 if (status
& ATA_BUSY
) {
2530 ap
->pio_task_state
= PIO_ST_POLL
;
2531 ap
->pio_task_timeout
= jiffies
+ ATA_TMOUT_PIO
;
2536 qc
= ata_qc_from_tag(ap
, ap
->active_tag
);
2539 if (is_atapi_taskfile(&qc
->tf
)) {
2540 /* no more data to transfer or unsupported ATAPI command */
2541 if ((status
& ATA_DRQ
) == 0) {
2542 ap
->pio_task_state
= PIO_ST_IDLE
;
2546 ata_qc_complete(qc
, status
);
2550 atapi_pio_bytes(qc
);
2552 /* handle BSY=0, DRQ=0 as error */
2553 if ((status
& ATA_DRQ
) == 0) {
2554 ap
->pio_task_state
= PIO_ST_ERR
;
2562 static void ata_pio_error(struct ata_port
*ap
)
2564 struct ata_queued_cmd
*qc
;
2567 qc
= ata_qc_from_tag(ap
, ap
->active_tag
);
2570 drv_stat
= ata_chk_status(ap
);
2571 printk(KERN_WARNING
"ata%u: PIO error, drv_stat 0x%x\n",
2574 ap
->pio_task_state
= PIO_ST_IDLE
;
2578 ata_qc_complete(qc
, drv_stat
| ATA_ERR
);
2581 static void ata_pio_task(void *_data
)
2583 struct ata_port
*ap
= _data
;
2584 unsigned long timeout
= 0;
2586 switch (ap
->pio_task_state
) {
2595 ata_pio_complete(ap
);
2599 case PIO_ST_LAST_POLL
:
2600 timeout
= ata_pio_poll(ap
);
2610 queue_delayed_work(ata_wq
, &ap
->pio_task
,
2613 queue_work(ata_wq
, &ap
->pio_task
);
2616 static void atapi_request_sense(struct ata_port
*ap
, struct ata_device
*dev
,
2617 struct scsi_cmnd
*cmd
)
2619 DECLARE_COMPLETION(wait
);
2620 struct ata_queued_cmd
*qc
;
2621 unsigned long flags
;
2624 DPRINTK("ATAPI request sense\n");
2626 qc
= ata_qc_new_init(ap
, dev
);
2629 /* FIXME: is this needed? */
2630 memset(cmd
->sense_buffer
, 0, sizeof(cmd
->sense_buffer
));
2632 ata_sg_init_one(qc
, cmd
->sense_buffer
, sizeof(cmd
->sense_buffer
));
2633 qc
->dma_dir
= DMA_FROM_DEVICE
;
2635 memset(&qc
->cdb
, 0, ap
->cdb_len
);
2636 qc
->cdb
[0] = REQUEST_SENSE
;
2637 qc
->cdb
[4] = SCSI_SENSE_BUFFERSIZE
;
2639 qc
->tf
.flags
|= ATA_TFLAG_ISADDR
| ATA_TFLAG_DEVICE
;
2640 qc
->tf
.command
= ATA_CMD_PACKET
;
2642 qc
->tf
.protocol
= ATA_PROT_ATAPI
;
2643 qc
->tf
.lbam
= (8 * 1024) & 0xff;
2644 qc
->tf
.lbah
= (8 * 1024) >> 8;
2645 qc
->nbytes
= SCSI_SENSE_BUFFERSIZE
;
2647 qc
->waiting
= &wait
;
2648 qc
->complete_fn
= ata_qc_complete_noop
;
2650 spin_lock_irqsave(&ap
->host_set
->lock
, flags
);
2651 rc
= ata_qc_issue(qc
);
2652 spin_unlock_irqrestore(&ap
->host_set
->lock
, flags
);
2655 ata_port_disable(ap
);
2657 wait_for_completion(&wait
);
2663 * ata_qc_timeout - Handle timeout of queued command
2664 * @qc: Command that timed out
2666 * Some part of the kernel (currently, only the SCSI layer)
2667 * has noticed that the active command on port @ap has not
2668 * completed after a specified length of time. Handle this
2669 * condition by disabling DMA (if necessary) and completing
2670 * transactions, with error if necessary.
2672 * This also handles the case of the "lost interrupt", where
2673 * for some reason (possibly hardware bug, possibly driver bug)
2674 * an interrupt was not delivered to the driver, even though the
2675 * transaction completed successfully.
2678 * Inherited from SCSI layer (none, can sleep)
2681 static void ata_qc_timeout(struct ata_queued_cmd
*qc
)
2683 struct ata_port
*ap
= qc
->ap
;
2684 struct ata_device
*dev
= qc
->dev
;
2685 u8 host_stat
= 0, drv_stat
;
2689 /* FIXME: doesn't this conflict with timeout handling? */
2690 if (qc
->dev
->class == ATA_DEV_ATAPI
&& qc
->scsicmd
) {
2691 struct scsi_cmnd
*cmd
= qc
->scsicmd
;
2693 if (!scsi_eh_eflags_chk(cmd
, SCSI_EH_CANCEL_CMD
)) {
2695 /* finish completing original command */
2696 __ata_qc_complete(qc
);
2698 atapi_request_sense(ap
, dev
, cmd
);
2700 cmd
->result
= (CHECK_CONDITION
<< 1) | (DID_OK
<< 16);
2701 scsi_finish_command(cmd
);
2707 /* hack alert! We cannot use the supplied completion
2708 * function from inside the ->eh_strategy_handler() thread.
2709 * libata is the only user of ->eh_strategy_handler() in
2710 * any kernel, so the default scsi_done() assumes it is
2711 * not being called from the SCSI EH.
2713 qc
->scsidone
= scsi_finish_command
;
2715 switch (qc
->tf
.protocol
) {
2718 case ATA_PROT_ATAPI_DMA
:
2719 host_stat
= ap
->ops
->bmdma_status(ap
);
2721 /* before we do anything else, clear DMA-Start bit */
2722 ap
->ops
->bmdma_stop(ap
);
2728 drv_stat
= ata_chk_status(ap
);
2730 /* ack bmdma irq events */
2731 ap
->ops
->irq_clear(ap
);
2733 printk(KERN_ERR
"ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x\n",
2734 ap
->id
, qc
->tf
.command
, drv_stat
, host_stat
);
2736 /* complete taskfile transaction */
2737 ata_qc_complete(qc
, drv_stat
);
2745 * ata_eng_timeout - Handle timeout of queued command
2746 * @ap: Port on which timed-out command is active
2748 * Some part of the kernel (currently, only the SCSI layer)
2749 * has noticed that the active command on port @ap has not
2750 * completed after a specified length of time. Handle this
2751 * condition by disabling DMA (if necessary) and completing
2752 * transactions, with error if necessary.
2754 * This also handles the case of the "lost interrupt", where
2755 * for some reason (possibly hardware bug, possibly driver bug)
2756 * an interrupt was not delivered to the driver, even though the
2757 * transaction completed successfully.
2760 * Inherited from SCSI layer (none, can sleep)
2763 void ata_eng_timeout(struct ata_port
*ap
)
2765 struct ata_queued_cmd
*qc
;
2769 qc
= ata_qc_from_tag(ap
, ap
->active_tag
);
2771 printk(KERN_ERR
"ata%u: BUG: timeout without command\n",
2783 * ata_qc_new - Request an available ATA command, for queueing
2784 * @ap: Port associated with device @dev
2785 * @dev: Device from whom we request an available command structure
2791 static struct ata_queued_cmd
*ata_qc_new(struct ata_port
*ap
)
2793 struct ata_queued_cmd
*qc
= NULL
;
2796 for (i
= 0; i
< ATA_MAX_QUEUE
; i
++)
2797 if (!test_and_set_bit(i
, &ap
->qactive
)) {
2798 qc
= ata_qc_from_tag(ap
, i
);
2809 * ata_qc_new_init - Request an available ATA command, and initialize it
2810 * @ap: Port associated with device @dev
2811 * @dev: Device from whom we request an available command structure
2817 struct ata_queued_cmd
*ata_qc_new_init(struct ata_port
*ap
,
2818 struct ata_device
*dev
)
2820 struct ata_queued_cmd
*qc
;
2822 qc
= ata_qc_new(ap
);
2829 qc
->cursect
= qc
->cursg
= qc
->cursg_ofs
= 0;
2831 qc
->nbytes
= qc
->curbytes
= 0;
2833 ata_tf_init(ap
, &qc
->tf
, dev
->devno
);
2835 if (dev
->flags
& ATA_DFLAG_LBA48
)
2836 qc
->tf
.flags
|= ATA_TFLAG_LBA48
;
2842 static int ata_qc_complete_noop(struct ata_queued_cmd
*qc
, u8 drv_stat
)
2847 static void __ata_qc_complete(struct ata_queued_cmd
*qc
)
2849 struct ata_port
*ap
= qc
->ap
;
2850 unsigned int tag
, do_clear
= 0;
2854 if (likely(ata_tag_valid(tag
))) {
2855 if (tag
== ap
->active_tag
)
2856 ap
->active_tag
= ATA_TAG_POISON
;
2857 qc
->tag
= ATA_TAG_POISON
;
2862 struct completion
*waiting
= qc
->waiting
;
2867 if (likely(do_clear
))
2868 clear_bit(tag
, &ap
->qactive
);
2872 * ata_qc_free - free unused ata_queued_cmd
2873 * @qc: Command to complete
2875 * Designed to free unused ata_queued_cmd object
2876 * in case something prevents using it.
2879 * spin_lock_irqsave(host_set lock)
2882 void ata_qc_free(struct ata_queued_cmd
*qc
)
2884 assert(qc
!= NULL
); /* ata_qc_from_tag _might_ return NULL */
2885 assert(qc
->waiting
== NULL
); /* nothing should be waiting */
2887 __ata_qc_complete(qc
);
2891 * ata_qc_complete - Complete an active ATA command
2892 * @qc: Command to complete
2893 * @drv_stat: ATA Status register contents
2895 * Indicate to the mid and upper layers that an ATA
2896 * command has completed, with either an ok or not-ok status.
2899 * spin_lock_irqsave(host_set lock)
2903 void ata_qc_complete(struct ata_queued_cmd
*qc
, u8 drv_stat
)
2907 assert(qc
!= NULL
); /* ata_qc_from_tag _might_ return NULL */
2908 assert(qc
->flags
& ATA_QCFLAG_ACTIVE
);
2910 if (likely(qc
->flags
& ATA_QCFLAG_DMAMAP
))
2913 /* call completion callback */
2914 rc
= qc
->complete_fn(qc
, drv_stat
);
2915 qc
->flags
&= ~ATA_QCFLAG_ACTIVE
;
2917 /* if callback indicates not to complete command (non-zero),
2918 * return immediately
2923 __ata_qc_complete(qc
);
2928 static inline int ata_should_dma_map(struct ata_queued_cmd
*qc
)
2930 struct ata_port
*ap
= qc
->ap
;
2932 switch (qc
->tf
.protocol
) {
2934 case ATA_PROT_ATAPI_DMA
:
2937 case ATA_PROT_ATAPI
:
2939 case ATA_PROT_PIO_MULT
:
2940 if (ap
->flags
& ATA_FLAG_PIO_DMA
)
2953 * ata_qc_issue - issue taskfile to device
2954 * @qc: command to issue to device
2956 * Prepare an ATA command to submission to device.
2957 * This includes mapping the data into a DMA-able
2958 * area, filling in the S/G table, and finally
2959 * writing the taskfile to hardware, starting the command.
2962 * spin_lock_irqsave(host_set lock)
2965 * Zero on success, negative on error.
2968 int ata_qc_issue(struct ata_queued_cmd
*qc
)
2970 struct ata_port
*ap
= qc
->ap
;
2972 if (ata_should_dma_map(qc
)) {
2973 if (qc
->flags
& ATA_QCFLAG_SG
) {
2974 if (ata_sg_setup(qc
))
2976 } else if (qc
->flags
& ATA_QCFLAG_SINGLE
) {
2977 if (ata_sg_setup_one(qc
))
2981 qc
->flags
&= ~ATA_QCFLAG_DMAMAP
;
2984 ap
->ops
->qc_prep(qc
);
2986 qc
->ap
->active_tag
= qc
->tag
;
2987 qc
->flags
|= ATA_QCFLAG_ACTIVE
;
2989 return ap
->ops
->qc_issue(qc
);
2996 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
2997 * @qc: command to issue to device
2999 * Using various libata functions and hooks, this function
3000 * starts an ATA command. ATA commands are grouped into
3001 * classes called "protocols", and issuing each type of protocol
3002 * is slightly different.
3005 * spin_lock_irqsave(host_set lock)
3008 * Zero on success, negative on error.
3011 int ata_qc_issue_prot(struct ata_queued_cmd
*qc
)
3013 struct ata_port
*ap
= qc
->ap
;
3015 ata_dev_select(ap
, qc
->dev
->devno
, 1, 0);
3017 switch (qc
->tf
.protocol
) {
3018 case ATA_PROT_NODATA
:
3019 ata_tf_to_host_nolock(ap
, &qc
->tf
);
3023 ap
->ops
->tf_load(ap
, &qc
->tf
); /* load tf registers */
3024 ap
->ops
->bmdma_setup(qc
); /* set up bmdma */
3025 ap
->ops
->bmdma_start(qc
); /* initiate bmdma */
3028 case ATA_PROT_PIO
: /* load tf registers, initiate polling pio */
3029 ata_qc_set_polling(qc
);
3030 ata_tf_to_host_nolock(ap
, &qc
->tf
);
3031 ap
->pio_task_state
= PIO_ST
;
3032 queue_work(ata_wq
, &ap
->pio_task
);
3035 case ATA_PROT_ATAPI
:
3036 ata_qc_set_polling(qc
);
3037 ata_tf_to_host_nolock(ap
, &qc
->tf
);
3038 queue_work(ata_wq
, &ap
->packet_task
);
3041 case ATA_PROT_ATAPI_NODATA
:
3042 ata_tf_to_host_nolock(ap
, &qc
->tf
);
3043 queue_work(ata_wq
, &ap
->packet_task
);
3046 case ATA_PROT_ATAPI_DMA
:
3047 ap
->ops
->tf_load(ap
, &qc
->tf
); /* load tf registers */
3048 ap
->ops
->bmdma_setup(qc
); /* set up bmdma */
3049 queue_work(ata_wq
, &ap
->packet_task
);
3061 * ata_bmdma_setup - Set up PCI IDE BMDMA transaction
3062 * @qc: Info associated with this ATA transaction.
3065 * spin_lock_irqsave(host_set lock)
3068 static void ata_bmdma_setup_mmio (struct ata_queued_cmd
*qc
)
3070 struct ata_port
*ap
= qc
->ap
;
3071 unsigned int rw
= (qc
->tf
.flags
& ATA_TFLAG_WRITE
);
3073 void __iomem
*mmio
= (void __iomem
*) ap
->ioaddr
.bmdma_addr
;
3075 /* load PRD table addr. */
3076 mb(); /* make sure PRD table writes are visible to controller */
3077 writel(ap
->prd_dma
, mmio
+ ATA_DMA_TABLE_OFS
);
3079 /* specify data direction, triple-check start bit is clear */
3080 dmactl
= readb(mmio
+ ATA_DMA_CMD
);
3081 dmactl
&= ~(ATA_DMA_WR
| ATA_DMA_START
);
3083 dmactl
|= ATA_DMA_WR
;
3084 writeb(dmactl
, mmio
+ ATA_DMA_CMD
);
3086 /* issue r/w command */
3087 ap
->ops
->exec_command(ap
, &qc
->tf
);
3091 * ata_bmdma_start - Start a PCI IDE BMDMA transaction
3092 * @qc: Info associated with this ATA transaction.
3095 * spin_lock_irqsave(host_set lock)
3098 static void ata_bmdma_start_mmio (struct ata_queued_cmd
*qc
)
3100 struct ata_port
*ap
= qc
->ap
;
3101 void __iomem
*mmio
= (void __iomem
*) ap
->ioaddr
.bmdma_addr
;
3104 /* start host DMA transaction */
3105 dmactl
= readb(mmio
+ ATA_DMA_CMD
);
3106 writeb(dmactl
| ATA_DMA_START
, mmio
+ ATA_DMA_CMD
);
3108 /* Strictly, one may wish to issue a readb() here, to
3109 * flush the mmio write. However, control also passes
3110 * to the hardware at this point, and it will interrupt
3111 * us when we are to resume control. So, in effect,
3112 * we don't care when the mmio write flushes.
3113 * Further, a read of the DMA status register _immediately_
3114 * following the write may not be what certain flaky hardware
3115 * is expected, so I think it is best to not add a readb()
3116 * without first all the MMIO ATA cards/mobos.
3117 * Or maybe I'm just being paranoid.
3122 * ata_bmdma_setup_pio - Set up PCI IDE BMDMA transaction (PIO)
3123 * @qc: Info associated with this ATA transaction.
3126 * spin_lock_irqsave(host_set lock)
3129 static void ata_bmdma_setup_pio (struct ata_queued_cmd
*qc
)
3131 struct ata_port
*ap
= qc
->ap
;
3132 unsigned int rw
= (qc
->tf
.flags
& ATA_TFLAG_WRITE
);
3135 /* load PRD table addr. */
3136 outl(ap
->prd_dma
, ap
->ioaddr
.bmdma_addr
+ ATA_DMA_TABLE_OFS
);
3138 /* specify data direction, triple-check start bit is clear */
3139 dmactl
= inb(ap
->ioaddr
.bmdma_addr
+ ATA_DMA_CMD
);
3140 dmactl
&= ~(ATA_DMA_WR
| ATA_DMA_START
);
3142 dmactl
|= ATA_DMA_WR
;
3143 outb(dmactl
, ap
->ioaddr
.bmdma_addr
+ ATA_DMA_CMD
);
3145 /* issue r/w command */
3146 ap
->ops
->exec_command(ap
, &qc
->tf
);
3150 * ata_bmdma_start_pio - Start a PCI IDE BMDMA transaction (PIO)
3151 * @qc: Info associated with this ATA transaction.
3154 * spin_lock_irqsave(host_set lock)
3157 static void ata_bmdma_start_pio (struct ata_queued_cmd
*qc
)
3159 struct ata_port
*ap
= qc
->ap
;
3162 /* start host DMA transaction */
3163 dmactl
= inb(ap
->ioaddr
.bmdma_addr
+ ATA_DMA_CMD
);
3164 outb(dmactl
| ATA_DMA_START
,
3165 ap
->ioaddr
.bmdma_addr
+ ATA_DMA_CMD
);
3168 void ata_bmdma_start(struct ata_queued_cmd
*qc
)
3170 if (qc
->ap
->flags
& ATA_FLAG_MMIO
)
3171 ata_bmdma_start_mmio(qc
);
3173 ata_bmdma_start_pio(qc
);
3176 void ata_bmdma_setup(struct ata_queued_cmd
*qc
)
3178 if (qc
->ap
->flags
& ATA_FLAG_MMIO
)
3179 ata_bmdma_setup_mmio(qc
);
3181 ata_bmdma_setup_pio(qc
);
3184 void ata_bmdma_irq_clear(struct ata_port
*ap
)
3186 if (ap
->flags
& ATA_FLAG_MMIO
) {
3187 void __iomem
*mmio
= ((void __iomem
*) ap
->ioaddr
.bmdma_addr
) + ATA_DMA_STATUS
;
3188 writeb(readb(mmio
), mmio
);
3190 unsigned long addr
= ap
->ioaddr
.bmdma_addr
+ ATA_DMA_STATUS
;
3191 outb(inb(addr
), addr
);
3196 u8
ata_bmdma_status(struct ata_port
*ap
)
3199 if (ap
->flags
& ATA_FLAG_MMIO
) {
3200 void __iomem
*mmio
= (void __iomem
*) ap
->ioaddr
.bmdma_addr
;
3201 host_stat
= readb(mmio
+ ATA_DMA_STATUS
);
3203 host_stat
= inb(ap
->ioaddr
.bmdma_addr
+ ATA_DMA_STATUS
);
3207 void ata_bmdma_stop(struct ata_port
*ap
)
3209 if (ap
->flags
& ATA_FLAG_MMIO
) {
3210 void __iomem
*mmio
= (void __iomem
*) ap
->ioaddr
.bmdma_addr
;
3212 /* clear start/stop bit */
3213 writeb(readb(mmio
+ ATA_DMA_CMD
) & ~ATA_DMA_START
,
3214 mmio
+ ATA_DMA_CMD
);
3216 /* clear start/stop bit */
3217 outb(inb(ap
->ioaddr
.bmdma_addr
+ ATA_DMA_CMD
) & ~ATA_DMA_START
,
3218 ap
->ioaddr
.bmdma_addr
+ ATA_DMA_CMD
);
3221 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
3222 ata_altstatus(ap
); /* dummy read */
3226 * ata_host_intr - Handle host interrupt for given (port, task)
3227 * @ap: Port on which interrupt arrived (possibly...)
3228 * @qc: Taskfile currently active in engine
3230 * Handle host interrupt for given queued command. Currently,
3231 * only DMA interrupts are handled. All other commands are
3232 * handled via polling with interrupts disabled (nIEN bit).
3235 * spin_lock_irqsave(host_set lock)
3238 * One if interrupt was handled, zero if not (shared irq).
3241 inline unsigned int ata_host_intr (struct ata_port
*ap
,
3242 struct ata_queued_cmd
*qc
)
3244 u8 status
, host_stat
;
3246 switch (qc
->tf
.protocol
) {
3249 case ATA_PROT_ATAPI_DMA
:
3250 case ATA_PROT_ATAPI
:
3251 /* check status of DMA engine */
3252 host_stat
= ap
->ops
->bmdma_status(ap
);
3253 VPRINTK("ata%u: host_stat 0x%X\n", ap
->id
, host_stat
);
3255 /* if it's not our irq... */
3256 if (!(host_stat
& ATA_DMA_INTR
))
3259 /* before we do anything else, clear DMA-Start bit */
3260 ap
->ops
->bmdma_stop(ap
);
3264 case ATA_PROT_ATAPI_NODATA
:
3265 case ATA_PROT_NODATA
:
3266 /* check altstatus */
3267 status
= ata_altstatus(ap
);
3268 if (status
& ATA_BUSY
)
3271 /* check main status, clearing INTRQ */
3272 status
= ata_chk_status(ap
);
3273 if (unlikely(status
& ATA_BUSY
))
3275 DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
3276 ap
->id
, qc
->tf
.protocol
, status
);
3278 /* ack bmdma irq events */
3279 ap
->ops
->irq_clear(ap
);
3281 /* complete taskfile transaction */
3282 ata_qc_complete(qc
, status
);
3289 return 1; /* irq handled */
3292 ap
->stats
.idle_irq
++;
3295 if ((ap
->stats
.idle_irq
% 1000) == 0) {
3297 ata_irq_ack(ap
, 0); /* debug trap */
3298 printk(KERN_WARNING
"ata%d: irq trap\n", ap
->id
);
3301 return 0; /* irq not handled */
3305 * ata_interrupt - Default ATA host interrupt handler
3306 * @irq: irq line (unused)
3307 * @dev_instance: pointer to our ata_host_set information structure
3310 * Default interrupt handler for PCI IDE devices. Calls
3311 * ata_host_intr() for each port that is not disabled.
3314 * Obtains host_set lock during operation.
3317 * IRQ_NONE or IRQ_HANDLED.
3321 irqreturn_t
ata_interrupt (int irq
, void *dev_instance
, struct pt_regs
*regs
)
3323 struct ata_host_set
*host_set
= dev_instance
;
3325 unsigned int handled
= 0;
3326 unsigned long flags
;
3328 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
3329 spin_lock_irqsave(&host_set
->lock
, flags
);
3331 for (i
= 0; i
< host_set
->n_ports
; i
++) {
3332 struct ata_port
*ap
;
3334 ap
= host_set
->ports
[i
];
3335 if (ap
&& (!(ap
->flags
& ATA_FLAG_PORT_DISABLED
))) {
3336 struct ata_queued_cmd
*qc
;
3338 qc
= ata_qc_from_tag(ap
, ap
->active_tag
);
3339 if (qc
&& (!(qc
->tf
.ctl
& ATA_NIEN
)) &&
3340 (qc
->flags
& ATA_QCFLAG_ACTIVE
))
3341 handled
|= ata_host_intr(ap
, qc
);
3345 spin_unlock_irqrestore(&host_set
->lock
, flags
);
3347 return IRQ_RETVAL(handled
);
3351 * atapi_packet_task - Write CDB bytes to hardware
3352 * @_data: Port to which ATAPI device is attached.
3354 * When device has indicated its readiness to accept
3355 * a CDB, this function is called. Send the CDB.
3356 * If DMA is to be performed, exit immediately.
3357 * Otherwise, we are in polling mode, so poll
3358 * status under operation succeeds or fails.
3361 * Kernel thread context (may sleep)
3364 static void atapi_packet_task(void *_data
)
3366 struct ata_port
*ap
= _data
;
3367 struct ata_queued_cmd
*qc
;
3370 qc
= ata_qc_from_tag(ap
, ap
->active_tag
);
3372 assert(qc
->flags
& ATA_QCFLAG_ACTIVE
);
3374 /* sleep-wait for BSY to clear */
3375 DPRINTK("busy wait\n");
3376 if (ata_busy_sleep(ap
, ATA_TMOUT_CDB_QUICK
, ATA_TMOUT_CDB
))
3379 /* make sure DRQ is set */
3380 status
= ata_chk_status(ap
);
3381 if ((status
& (ATA_BUSY
| ATA_DRQ
)) != ATA_DRQ
)
3385 DPRINTK("send cdb\n");
3386 assert(ap
->cdb_len
>= 12);
3387 ata_data_xfer(ap
, qc
->cdb
, ap
->cdb_len
, 1);
3389 /* if we are DMA'ing, irq handler takes over from here */
3390 if (qc
->tf
.protocol
== ATA_PROT_ATAPI_DMA
)
3391 ap
->ops
->bmdma_start(qc
); /* initiate bmdma */
3393 /* non-data commands are also handled via irq */
3394 else if (qc
->tf
.protocol
== ATA_PROT_ATAPI_NODATA
) {
3398 /* PIO commands are handled by polling */
3400 ap
->pio_task_state
= PIO_ST
;
3401 queue_work(ata_wq
, &ap
->pio_task
);
3407 ata_qc_complete(qc
, ATA_ERR
);
3410 int ata_port_start (struct ata_port
*ap
)
3412 struct device
*dev
= ap
->host_set
->dev
;
3414 ap
->prd
= dma_alloc_coherent(dev
, ATA_PRD_TBL_SZ
, &ap
->prd_dma
, GFP_KERNEL
);
3418 DPRINTK("prd alloc, virt %p, dma %llx\n", ap
->prd
, (unsigned long long) ap
->prd_dma
);
3423 void ata_port_stop (struct ata_port
*ap
)
3425 struct device
*dev
= ap
->host_set
->dev
;
3427 dma_free_coherent(dev
, ATA_PRD_TBL_SZ
, ap
->prd
, ap
->prd_dma
);
3431 * ata_host_remove - Unregister SCSI host structure with upper layers
3432 * @ap: Port to unregister
3433 * @do_unregister: 1 if we fully unregister, 0 to just stop the port
3438 static void ata_host_remove(struct ata_port
*ap
, unsigned int do_unregister
)
3440 struct Scsi_Host
*sh
= ap
->host
;
3445 scsi_remove_host(sh
);
3447 ap
->ops
->port_stop(ap
);
3451 * ata_host_init - Initialize an ata_port structure
3452 * @ap: Structure to initialize
3453 * @host: associated SCSI mid-layer structure
3454 * @host_set: Collection of hosts to which @ap belongs
3455 * @ent: Probe information provided by low-level driver
3456 * @port_no: Port number associated with this ata_port
3458 * Initialize a new ata_port structure, and its associated
3462 * Inherited from caller.
3466 static void ata_host_init(struct ata_port
*ap
, struct Scsi_Host
*host
,
3467 struct ata_host_set
*host_set
,
3468 struct ata_probe_ent
*ent
, unsigned int port_no
)
3474 host
->max_channel
= 1;
3475 host
->unique_id
= ata_unique_id
++;
3476 host
->max_cmd_len
= 12;
3477 scsi_set_device(host
, ent
->dev
);
3478 scsi_assign_lock(host
, &host_set
->lock
);
3480 ap
->flags
= ATA_FLAG_PORT_DISABLED
;
3481 ap
->id
= host
->unique_id
;
3483 ap
->ctl
= ATA_DEVCTL_OBS
;
3484 ap
->host_set
= host_set
;
3485 ap
->port_no
= port_no
;
3487 ent
->legacy_mode
? ent
->hard_port_no
: port_no
;
3488 ap
->pio_mask
= ent
->pio_mask
;
3489 ap
->mwdma_mask
= ent
->mwdma_mask
;
3490 ap
->udma_mask
= ent
->udma_mask
;
3491 ap
->flags
|= ent
->host_flags
;
3492 ap
->ops
= ent
->port_ops
;
3493 ap
->cbl
= ATA_CBL_NONE
;
3494 ap
->active_tag
= ATA_TAG_POISON
;
3495 ap
->last_ctl
= 0xFF;
3497 INIT_WORK(&ap
->packet_task
, atapi_packet_task
, ap
);
3498 INIT_WORK(&ap
->pio_task
, ata_pio_task
, ap
);
3500 for (i
= 0; i
< ATA_MAX_DEVICES
; i
++)
3501 ap
->device
[i
].devno
= i
;
3504 ap
->stats
.unhandled_irq
= 1;
3505 ap
->stats
.idle_irq
= 1;
3508 memcpy(&ap
->ioaddr
, &ent
->port
[port_no
], sizeof(struct ata_ioports
));
3512 * ata_host_add - Attach low-level ATA driver to system
3513 * @ent: Information provided by low-level driver
3514 * @host_set: Collections of ports to which we add
3515 * @port_no: Port number associated with this host
3517 * Attach low-level ATA driver to system.
3520 * PCI/etc. bus probe sem.
3523 * New ata_port on success, for NULL on error.
3527 static struct ata_port
* ata_host_add(struct ata_probe_ent
*ent
,
3528 struct ata_host_set
*host_set
,
3529 unsigned int port_no
)
3531 struct Scsi_Host
*host
;
3532 struct ata_port
*ap
;
3536 host
= scsi_host_alloc(ent
->sht
, sizeof(struct ata_port
));
3540 ap
= (struct ata_port
*) &host
->hostdata
[0];
3542 ata_host_init(ap
, host
, host_set
, ent
, port_no
);
3544 rc
= ap
->ops
->port_start(ap
);
3551 scsi_host_put(host
);
3556 * ata_device_add - Register hardware device with ATA and SCSI layers
3557 * @ent: Probe information describing hardware device to be registered
3559 * This function processes the information provided in the probe
3560 * information struct @ent, allocates the necessary ATA and SCSI
3561 * host information structures, initializes them, and registers
3562 * everything with requisite kernel subsystems.
3564 * This function requests irqs, probes the ATA bus, and probes
3568 * PCI/etc. bus probe sem.
3571 * Number of ports registered. Zero on error (no ports registered).
3575 int ata_device_add(struct ata_probe_ent
*ent
)
3577 unsigned int count
= 0, i
;
3578 struct device
*dev
= ent
->dev
;
3579 struct ata_host_set
*host_set
;
3582 /* alloc a container for our list of ATA ports (buses) */
3583 host_set
= kmalloc(sizeof(struct ata_host_set
) +
3584 (ent
->n_ports
* sizeof(void *)), GFP_KERNEL
);
3587 memset(host_set
, 0, sizeof(struct ata_host_set
) + (ent
->n_ports
* sizeof(void *)));
3588 spin_lock_init(&host_set
->lock
);
3590 host_set
->dev
= dev
;
3591 host_set
->n_ports
= ent
->n_ports
;
3592 host_set
->irq
= ent
->irq
;
3593 host_set
->mmio_base
= ent
->mmio_base
;
3594 host_set
->private_data
= ent
->private_data
;
3595 host_set
->ops
= ent
->port_ops
;
3597 /* register each port bound to this device */
3598 for (i
= 0; i
< ent
->n_ports
; i
++) {
3599 struct ata_port
*ap
;
3600 unsigned long xfer_mode_mask
;
3602 ap
= ata_host_add(ent
, host_set
, i
);
3606 host_set
->ports
[i
] = ap
;
3607 xfer_mode_mask
=(ap
->udma_mask
<< ATA_SHIFT_UDMA
) |
3608 (ap
->mwdma_mask
<< ATA_SHIFT_MWDMA
) |
3609 (ap
->pio_mask
<< ATA_SHIFT_PIO
);
3611 /* print per-port info to dmesg */
3612 printk(KERN_INFO
"ata%u: %cATA max %s cmd 0x%lX ctl 0x%lX "
3613 "bmdma 0x%lX irq %lu\n",
3615 ap
->flags
& ATA_FLAG_SATA
? 'S' : 'P',
3616 ata_mode_string(xfer_mode_mask
),
3617 ap
->ioaddr
.cmd_addr
,
3618 ap
->ioaddr
.ctl_addr
,
3619 ap
->ioaddr
.bmdma_addr
,
3623 host_set
->ops
->irq_clear(ap
);
3632 /* obtain irq, that is shared between channels */
3633 if (request_irq(ent
->irq
, ent
->port_ops
->irq_handler
, ent
->irq_flags
,
3634 DRV_NAME
, host_set
))
3637 /* perform each probe synchronously */
3638 DPRINTK("probe begin\n");
3639 for (i
= 0; i
< count
; i
++) {
3640 struct ata_port
*ap
;
3643 ap
= host_set
->ports
[i
];
3645 DPRINTK("ata%u: probe begin\n", ap
->id
);
3646 rc
= ata_bus_probe(ap
);
3647 DPRINTK("ata%u: probe end\n", ap
->id
);
3650 /* FIXME: do something useful here?
3651 * Current libata behavior will
3652 * tear down everything when
3653 * the module is removed
3654 * or the h/w is unplugged.
3658 rc
= scsi_add_host(ap
->host
, dev
);
3660 printk(KERN_ERR
"ata%u: scsi_add_host failed\n",
3662 /* FIXME: do something useful here */
3663 /* FIXME: handle unconditional calls to
3664 * scsi_scan_host and ata_host_remove, below,
3670 /* probes are done, now scan each port's disk(s) */
3671 DPRINTK("probe begin\n");
3672 for (i
= 0; i
< count
; i
++) {
3673 struct ata_port
*ap
= host_set
->ports
[i
];
3675 scsi_scan_host(ap
->host
);
3678 dev_set_drvdata(dev
, host_set
);
3680 VPRINTK("EXIT, returning %u\n", ent
->n_ports
);
3681 return ent
->n_ports
; /* success */
3684 for (i
= 0; i
< count
; i
++) {
3685 ata_host_remove(host_set
->ports
[i
], 1);
3686 scsi_host_put(host_set
->ports
[i
]->host
);
3689 VPRINTK("EXIT, returning 0\n");
3694 * ata_scsi_release - SCSI layer callback hook for host unload
3695 * @host: libata host to be unloaded
3697 * Performs all duties necessary to shut down a libata port...
3698 * Kill port kthread, disable port, and release resources.
3701 * Inherited from SCSI layer.
3707 int ata_scsi_release(struct Scsi_Host
*host
)
3709 struct ata_port
*ap
= (struct ata_port
*) &host
->hostdata
[0];
3713 ap
->ops
->port_disable(ap
);
3714 ata_host_remove(ap
, 0);
3721 * ata_std_ports - initialize ioaddr with standard port offsets.
3722 * @ioaddr: IO address structure to be initialized
3724 void ata_std_ports(struct ata_ioports
*ioaddr
)
3726 ioaddr
->data_addr
= ioaddr
->cmd_addr
+ ATA_REG_DATA
;
3727 ioaddr
->error_addr
= ioaddr
->cmd_addr
+ ATA_REG_ERR
;
3728 ioaddr
->feature_addr
= ioaddr
->cmd_addr
+ ATA_REG_FEATURE
;
3729 ioaddr
->nsect_addr
= ioaddr
->cmd_addr
+ ATA_REG_NSECT
;
3730 ioaddr
->lbal_addr
= ioaddr
->cmd_addr
+ ATA_REG_LBAL
;
3731 ioaddr
->lbam_addr
= ioaddr
->cmd_addr
+ ATA_REG_LBAM
;
3732 ioaddr
->lbah_addr
= ioaddr
->cmd_addr
+ ATA_REG_LBAH
;
3733 ioaddr
->device_addr
= ioaddr
->cmd_addr
+ ATA_REG_DEVICE
;
3734 ioaddr
->status_addr
= ioaddr
->cmd_addr
+ ATA_REG_STATUS
;
3735 ioaddr
->command_addr
= ioaddr
->cmd_addr
+ ATA_REG_CMD
;
3738 static struct ata_probe_ent
*
3739 ata_probe_ent_alloc(struct device
*dev
, struct ata_port_info
*port
)
3741 struct ata_probe_ent
*probe_ent
;
3743 probe_ent
= kmalloc(sizeof(*probe_ent
), GFP_KERNEL
);
3745 printk(KERN_ERR DRV_NAME
"(%s): out of memory\n",
3746 kobject_name(&(dev
->kobj
)));
3750 memset(probe_ent
, 0, sizeof(*probe_ent
));
3752 INIT_LIST_HEAD(&probe_ent
->node
);
3753 probe_ent
->dev
= dev
;
3755 probe_ent
->sht
= port
->sht
;
3756 probe_ent
->host_flags
= port
->host_flags
;
3757 probe_ent
->pio_mask
= port
->pio_mask
;
3758 probe_ent
->mwdma_mask
= port
->mwdma_mask
;
3759 probe_ent
->udma_mask
= port
->udma_mask
;
3760 probe_ent
->port_ops
= port
->port_ops
;
3766 struct ata_probe_ent
*
3767 ata_pci_init_native_mode(struct pci_dev
*pdev
, struct ata_port_info
**port
)
3769 struct ata_probe_ent
*probe_ent
=
3770 ata_probe_ent_alloc(pci_dev_to_dev(pdev
), port
[0]);
3774 probe_ent
->n_ports
= 2;
3775 probe_ent
->irq
= pdev
->irq
;
3776 probe_ent
->irq_flags
= SA_SHIRQ
;
3778 probe_ent
->port
[0].cmd_addr
= pci_resource_start(pdev
, 0);
3779 probe_ent
->port
[0].altstatus_addr
=
3780 probe_ent
->port
[0].ctl_addr
=
3781 pci_resource_start(pdev
, 1) | ATA_PCI_CTL_OFS
;
3782 probe_ent
->port
[0].bmdma_addr
= pci_resource_start(pdev
, 4);
3784 probe_ent
->port
[1].cmd_addr
= pci_resource_start(pdev
, 2);
3785 probe_ent
->port
[1].altstatus_addr
=
3786 probe_ent
->port
[1].ctl_addr
=
3787 pci_resource_start(pdev
, 3) | ATA_PCI_CTL_OFS
;
3788 probe_ent
->port
[1].bmdma_addr
= pci_resource_start(pdev
, 4) + 8;
3790 ata_std_ports(&probe_ent
->port
[0]);
3791 ata_std_ports(&probe_ent
->port
[1]);
3796 static struct ata_probe_ent
*
3797 ata_pci_init_legacy_mode(struct pci_dev
*pdev
, struct ata_port_info
**port
,
3798 struct ata_probe_ent
**ppe2
)
3800 struct ata_probe_ent
*probe_ent
, *probe_ent2
;
3802 probe_ent
= ata_probe_ent_alloc(pci_dev_to_dev(pdev
), port
[0]);
3805 probe_ent2
= ata_probe_ent_alloc(pci_dev_to_dev(pdev
), port
[1]);
3811 probe_ent
->n_ports
= 1;
3812 probe_ent
->irq
= 14;
3814 probe_ent
->hard_port_no
= 0;
3815 probe_ent
->legacy_mode
= 1;
3817 probe_ent2
->n_ports
= 1;
3818 probe_ent2
->irq
= 15;
3820 probe_ent2
->hard_port_no
= 1;
3821 probe_ent2
->legacy_mode
= 1;
3823 probe_ent
->port
[0].cmd_addr
= 0x1f0;
3824 probe_ent
->port
[0].altstatus_addr
=
3825 probe_ent
->port
[0].ctl_addr
= 0x3f6;
3826 probe_ent
->port
[0].bmdma_addr
= pci_resource_start(pdev
, 4);
3828 probe_ent2
->port
[0].cmd_addr
= 0x170;
3829 probe_ent2
->port
[0].altstatus_addr
=
3830 probe_ent2
->port
[0].ctl_addr
= 0x376;
3831 probe_ent2
->port
[0].bmdma_addr
= pci_resource_start(pdev
, 4)+8;
3833 ata_std_ports(&probe_ent
->port
[0]);
3834 ata_std_ports(&probe_ent2
->port
[0]);
3841 * ata_pci_init_one - Initialize/register PCI IDE host controller
3842 * @pdev: Controller to be initialized
3843 * @port_info: Information from low-level host driver
3844 * @n_ports: Number of ports attached to host controller
3847 * Inherited from PCI layer (may sleep).
3850 * Zero on success, negative on errno-based value on error.
3854 int ata_pci_init_one (struct pci_dev
*pdev
, struct ata_port_info
**port_info
,
3855 unsigned int n_ports
)
3857 struct ata_probe_ent
*probe_ent
, *probe_ent2
= NULL
;
3858 struct ata_port_info
*port
[2];
3860 unsigned int legacy_mode
= 0;
3861 int disable_dev_on_err
= 1;
3866 port
[0] = port_info
[0];
3868 port
[1] = port_info
[1];
3872 if ((port
[0]->host_flags
& ATA_FLAG_NO_LEGACY
) == 0
3873 && (pdev
->class >> 8) == PCI_CLASS_STORAGE_IDE
) {
3874 /* TODO: support transitioning to native mode? */
3875 pci_read_config_byte(pdev
, PCI_CLASS_PROG
, &tmp8
);
3876 mask
= (1 << 2) | (1 << 0);
3877 if ((tmp8
& mask
) != mask
)
3878 legacy_mode
= (1 << 3);
3882 if ((!legacy_mode
) && (n_ports
> 1)) {
3883 printk(KERN_ERR
"ata: BUG: native mode, n_ports > 1\n");
3887 rc
= pci_enable_device(pdev
);
3891 rc
= pci_request_regions(pdev
, DRV_NAME
);
3893 disable_dev_on_err
= 0;
3898 if (!request_region(0x1f0, 8, "libata")) {
3899 struct resource
*conflict
, res
;
3901 res
.end
= 0x1f0 + 8 - 1;
3902 conflict
= ____request_resource(&ioport_resource
, &res
);
3903 if (!strcmp(conflict
->name
, "libata"))
3904 legacy_mode
|= (1 << 0);
3906 disable_dev_on_err
= 0;
3907 printk(KERN_WARNING
"ata: 0x1f0 IDE port busy\n");
3910 legacy_mode
|= (1 << 0);
3912 if (!request_region(0x170, 8, "libata")) {
3913 struct resource
*conflict
, res
;
3915 res
.end
= 0x170 + 8 - 1;
3916 conflict
= ____request_resource(&ioport_resource
, &res
);
3917 if (!strcmp(conflict
->name
, "libata"))
3918 legacy_mode
|= (1 << 1);
3920 disable_dev_on_err
= 0;
3921 printk(KERN_WARNING
"ata: 0x170 IDE port busy\n");
3924 legacy_mode
|= (1 << 1);
3927 /* we have legacy mode, but all ports are unavailable */
3928 if (legacy_mode
== (1 << 3)) {
3930 goto err_out_regions
;
3933 rc
= pci_set_dma_mask(pdev
, ATA_DMA_MASK
);
3935 goto err_out_regions
;
3936 rc
= pci_set_consistent_dma_mask(pdev
, ATA_DMA_MASK
);
3938 goto err_out_regions
;
3941 probe_ent
= ata_pci_init_legacy_mode(pdev
, port
, &probe_ent2
);
3943 probe_ent
= ata_pci_init_native_mode(pdev
, port
);
3946 goto err_out_regions
;
3949 pci_set_master(pdev
);
3951 /* FIXME: check ata_device_add return */
3953 if (legacy_mode
& (1 << 0))
3954 ata_device_add(probe_ent
);
3955 if (legacy_mode
& (1 << 1))
3956 ata_device_add(probe_ent2
);
3958 ata_device_add(probe_ent
);
3966 if (legacy_mode
& (1 << 0))
3967 release_region(0x1f0, 8);
3968 if (legacy_mode
& (1 << 1))
3969 release_region(0x170, 8);
3970 pci_release_regions(pdev
);
3972 if (disable_dev_on_err
)
3973 pci_disable_device(pdev
);
3978 * ata_pci_remove_one - PCI layer callback for device removal
3979 * @pdev: PCI device that was removed
3981 * PCI layer indicates to libata via this hook that
3982 * hot-unplug or module unload event has occured.
3983 * Handle this by unregistering all objects associated
3984 * with this PCI device. Free those objects. Then finally
3985 * release PCI resources and disable device.
3988 * Inherited from PCI layer (may sleep).
3991 void ata_pci_remove_one (struct pci_dev
*pdev
)
3993 struct device
*dev
= pci_dev_to_dev(pdev
);
3994 struct ata_host_set
*host_set
= dev_get_drvdata(dev
);
3995 struct ata_port
*ap
;
3998 for (i
= 0; i
< host_set
->n_ports
; i
++) {
3999 ap
= host_set
->ports
[i
];
4001 scsi_remove_host(ap
->host
);
4004 free_irq(host_set
->irq
, host_set
);
4005 if (host_set
->ops
->host_stop
)
4006 host_set
->ops
->host_stop(host_set
);
4007 if (host_set
->mmio_base
)
4008 iounmap(host_set
->mmio_base
);
4010 for (i
= 0; i
< host_set
->n_ports
; i
++) {
4011 ap
= host_set
->ports
[i
];
4013 ata_scsi_release(ap
->host
);
4015 if ((ap
->flags
& ATA_FLAG_NO_LEGACY
) == 0) {
4016 struct ata_ioports
*ioaddr
= &ap
->ioaddr
;
4018 if (ioaddr
->cmd_addr
== 0x1f0)
4019 release_region(0x1f0, 8);
4020 else if (ioaddr
->cmd_addr
== 0x170)
4021 release_region(0x170, 8);
4024 scsi_host_put(ap
->host
);
4029 pci_release_regions(pdev
);
4030 pci_disable_device(pdev
);
4031 dev_set_drvdata(dev
, NULL
);
4034 /* move to PCI subsystem */
4035 int pci_test_config_bits(struct pci_dev
*pdev
, struct pci_bits
*bits
)
4037 unsigned long tmp
= 0;
4039 switch (bits
->width
) {
4042 pci_read_config_byte(pdev
, bits
->reg
, &tmp8
);
4048 pci_read_config_word(pdev
, bits
->reg
, &tmp16
);
4054 pci_read_config_dword(pdev
, bits
->reg
, &tmp32
);
4065 return (tmp
== bits
->val
) ? 1 : 0;
4067 #endif /* CONFIG_PCI */
4070 static int __init
ata_init(void)
4072 ata_wq
= create_workqueue("ata");
4076 printk(KERN_DEBUG
"libata version " DRV_VERSION
" loaded.\n");
4080 static void __exit
ata_exit(void)
4082 destroy_workqueue(ata_wq
);
4085 module_init(ata_init
);
4086 module_exit(ata_exit
);
4089 * libata is essentially a library of internal helper functions for
4090 * low-level ATA host controller drivers. As such, the API/ABI is
4091 * likely to change as new drivers are added and updated.
4092 * Do not depend on ABI/API stability.
4095 EXPORT_SYMBOL_GPL(ata_std_bios_param
);
4096 EXPORT_SYMBOL_GPL(ata_std_ports
);
4097 EXPORT_SYMBOL_GPL(ata_device_add
);
4098 EXPORT_SYMBOL_GPL(ata_sg_init
);
4099 EXPORT_SYMBOL_GPL(ata_sg_init_one
);
4100 EXPORT_SYMBOL_GPL(ata_qc_complete
);
4101 EXPORT_SYMBOL_GPL(ata_qc_issue_prot
);
4102 EXPORT_SYMBOL_GPL(ata_eng_timeout
);
4103 EXPORT_SYMBOL_GPL(ata_tf_load
);
4104 EXPORT_SYMBOL_GPL(ata_tf_read
);
4105 EXPORT_SYMBOL_GPL(ata_noop_dev_select
);
4106 EXPORT_SYMBOL_GPL(ata_std_dev_select
);
4107 EXPORT_SYMBOL_GPL(ata_tf_to_fis
);
4108 EXPORT_SYMBOL_GPL(ata_tf_from_fis
);
4109 EXPORT_SYMBOL_GPL(ata_check_status
);
4110 EXPORT_SYMBOL_GPL(ata_altstatus
);
4111 EXPORT_SYMBOL_GPL(ata_chk_err
);
4112 EXPORT_SYMBOL_GPL(ata_exec_command
);
4113 EXPORT_SYMBOL_GPL(ata_port_start
);
4114 EXPORT_SYMBOL_GPL(ata_port_stop
);
4115 EXPORT_SYMBOL_GPL(ata_interrupt
);
4116 EXPORT_SYMBOL_GPL(ata_qc_prep
);
4117 EXPORT_SYMBOL_GPL(ata_bmdma_setup
);
4118 EXPORT_SYMBOL_GPL(ata_bmdma_start
);
4119 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear
);
4120 EXPORT_SYMBOL_GPL(ata_bmdma_status
);
4121 EXPORT_SYMBOL_GPL(ata_bmdma_stop
);
4122 EXPORT_SYMBOL_GPL(ata_port_probe
);
4123 EXPORT_SYMBOL_GPL(sata_phy_reset
);
4124 EXPORT_SYMBOL_GPL(__sata_phy_reset
);
4125 EXPORT_SYMBOL_GPL(ata_bus_reset
);
4126 EXPORT_SYMBOL_GPL(ata_port_disable
);
4127 EXPORT_SYMBOL_GPL(ata_scsi_ioctl
);
4128 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd
);
4129 EXPORT_SYMBOL_GPL(ata_scsi_error
);
4130 EXPORT_SYMBOL_GPL(ata_scsi_slave_config
);
4131 EXPORT_SYMBOL_GPL(ata_scsi_release
);
4132 EXPORT_SYMBOL_GPL(ata_host_intr
);
4133 EXPORT_SYMBOL_GPL(ata_dev_classify
);
4134 EXPORT_SYMBOL_GPL(ata_dev_id_string
);
4135 EXPORT_SYMBOL_GPL(ata_scsi_simulate
);
4138 EXPORT_SYMBOL_GPL(pci_test_config_bits
);
4139 EXPORT_SYMBOL_GPL(ata_pci_init_native_mode
);
4140 EXPORT_SYMBOL_GPL(ata_pci_init_one
);
4141 EXPORT_SYMBOL_GPL(ata_pci_remove_one
);
4142 #endif /* CONFIG_PCI */
This page took 0.142473 seconds and 5 git commands to generate.