2 * libata-sff.c - helper library for PCI IDE BMDMA
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
8 * Copyright 2003-2006 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2006 Jeff Garzik
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
35 #include <linux/kernel.h>
36 #include <linux/gfp.h>
37 #include <linux/pci.h>
38 #include <linux/libata.h>
39 #include <linux/highmem.h>
43 const struct ata_port_operations ata_sff_port_ops
= {
44 .inherits
= &ata_base_port_ops
,
46 .qc_prep
= ata_sff_qc_prep
,
47 .qc_issue
= ata_sff_qc_issue
,
48 .qc_fill_rtf
= ata_sff_qc_fill_rtf
,
50 .freeze
= ata_sff_freeze
,
52 .prereset
= ata_sff_prereset
,
53 .softreset
= ata_sff_softreset
,
54 .hardreset
= sata_sff_hardreset
,
55 .postreset
= ata_sff_postreset
,
56 .drain_fifo
= ata_sff_drain_fifo
,
57 .error_handler
= ata_sff_error_handler
,
58 .post_internal_cmd
= ata_sff_post_internal_cmd
,
60 .sff_dev_select
= ata_sff_dev_select
,
61 .sff_check_status
= ata_sff_check_status
,
62 .sff_tf_load
= ata_sff_tf_load
,
63 .sff_tf_read
= ata_sff_tf_read
,
64 .sff_exec_command
= ata_sff_exec_command
,
65 .sff_data_xfer
= ata_sff_data_xfer
,
66 .sff_irq_clear
= ata_sff_irq_clear
,
68 .lost_interrupt
= ata_sff_lost_interrupt
,
70 .port_start
= ata_sff_port_start
,
72 EXPORT_SYMBOL_GPL(ata_sff_port_ops
);
74 const struct ata_port_operations ata_bmdma_port_ops
= {
75 .inherits
= &ata_sff_port_ops
,
77 .mode_filter
= ata_bmdma_mode_filter
,
79 .bmdma_setup
= ata_bmdma_setup
,
80 .bmdma_start
= ata_bmdma_start
,
81 .bmdma_stop
= ata_bmdma_stop
,
82 .bmdma_status
= ata_bmdma_status
,
84 EXPORT_SYMBOL_GPL(ata_bmdma_port_ops
);
86 const struct ata_port_operations ata_bmdma32_port_ops
= {
87 .inherits
= &ata_bmdma_port_ops
,
89 .sff_data_xfer
= ata_sff_data_xfer32
,
90 .port_start
= ata_sff_port_start32
,
92 EXPORT_SYMBOL_GPL(ata_bmdma32_port_ops
);
95 * ata_fill_sg - Fill PCI IDE PRD table
96 * @qc: Metadata associated with taskfile to be transferred
98 * Fill PCI IDE PRD (scatter-gather) table with segments
99 * associated with the current disk command.
102 * spin_lock_irqsave(host lock)
105 static void ata_fill_sg(struct ata_queued_cmd
*qc
)
107 struct ata_port
*ap
= qc
->ap
;
108 struct scatterlist
*sg
;
112 for_each_sg(qc
->sg
, sg
, qc
->n_elem
, si
) {
116 /* determine if physical DMA addr spans 64K boundary.
117 * Note h/w doesn't support 64-bit, so we unconditionally
118 * truncate dma_addr_t to u32.
120 addr
= (u32
) sg_dma_address(sg
);
121 sg_len
= sg_dma_len(sg
);
124 offset
= addr
& 0xffff;
126 if ((offset
+ sg_len
) > 0x10000)
127 len
= 0x10000 - offset
;
129 ap
->prd
[pi
].addr
= cpu_to_le32(addr
);
130 ap
->prd
[pi
].flags_len
= cpu_to_le32(len
& 0xffff);
131 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi
, addr
, len
);
139 ap
->prd
[pi
- 1].flags_len
|= cpu_to_le32(ATA_PRD_EOT
);
143 * ata_fill_sg_dumb - Fill PCI IDE PRD table
144 * @qc: Metadata associated with taskfile to be transferred
146 * Fill PCI IDE PRD (scatter-gather) table with segments
147 * associated with the current disk command. Perform the fill
148 * so that we avoid writing any length 64K records for
149 * controllers that don't follow the spec.
152 * spin_lock_irqsave(host lock)
155 static void ata_fill_sg_dumb(struct ata_queued_cmd
*qc
)
157 struct ata_port
*ap
= qc
->ap
;
158 struct scatterlist
*sg
;
162 for_each_sg(qc
->sg
, sg
, qc
->n_elem
, si
) {
164 u32 sg_len
, len
, blen
;
166 /* determine if physical DMA addr spans 64K boundary.
167 * Note h/w doesn't support 64-bit, so we unconditionally
168 * truncate dma_addr_t to u32.
170 addr
= (u32
) sg_dma_address(sg
);
171 sg_len
= sg_dma_len(sg
);
174 offset
= addr
& 0xffff;
176 if ((offset
+ sg_len
) > 0x10000)
177 len
= 0x10000 - offset
;
180 ap
->prd
[pi
].addr
= cpu_to_le32(addr
);
182 /* Some PATA chipsets like the CS5530 can't
183 cope with 0x0000 meaning 64K as the spec
185 ap
->prd
[pi
].flags_len
= cpu_to_le32(0x8000);
187 ap
->prd
[++pi
].addr
= cpu_to_le32(addr
+ 0x8000);
189 ap
->prd
[pi
].flags_len
= cpu_to_le32(blen
);
190 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi
, addr
, len
);
198 ap
->prd
[pi
- 1].flags_len
|= cpu_to_le32(ATA_PRD_EOT
);
202 * ata_sff_qc_prep - Prepare taskfile for submission
203 * @qc: Metadata associated with taskfile to be prepared
205 * Prepare ATA taskfile for submission.
208 * spin_lock_irqsave(host lock)
210 void ata_sff_qc_prep(struct ata_queued_cmd
*qc
)
212 if (!(qc
->flags
& ATA_QCFLAG_DMAMAP
))
217 EXPORT_SYMBOL_GPL(ata_sff_qc_prep
);
220 * ata_sff_dumb_qc_prep - Prepare taskfile for submission
221 * @qc: Metadata associated with taskfile to be prepared
223 * Prepare ATA taskfile for submission.
226 * spin_lock_irqsave(host lock)
228 void ata_sff_dumb_qc_prep(struct ata_queued_cmd
*qc
)
230 if (!(qc
->flags
& ATA_QCFLAG_DMAMAP
))
233 ata_fill_sg_dumb(qc
);
235 EXPORT_SYMBOL_GPL(ata_sff_dumb_qc_prep
);
238 * ata_sff_check_status - Read device status reg & clear interrupt
239 * @ap: port where the device is
241 * Reads ATA taskfile status register for currently-selected device
242 * and return its value. This also clears pending interrupts
246 * Inherited from caller.
248 u8
ata_sff_check_status(struct ata_port
*ap
)
250 return ioread8(ap
->ioaddr
.status_addr
);
252 EXPORT_SYMBOL_GPL(ata_sff_check_status
);
255 * ata_sff_altstatus - Read device alternate status reg
256 * @ap: port where the device is
258 * Reads ATA taskfile alternate status register for
259 * currently-selected device and return its value.
261 * Note: may NOT be used as the check_altstatus() entry in
262 * ata_port_operations.
265 * Inherited from caller.
267 static u8
ata_sff_altstatus(struct ata_port
*ap
)
269 if (ap
->ops
->sff_check_altstatus
)
270 return ap
->ops
->sff_check_altstatus(ap
);
272 return ioread8(ap
->ioaddr
.altstatus_addr
);
276 * ata_sff_irq_status - Check if the device is busy
277 * @ap: port where the device is
279 * Determine if the port is currently busy. Uses altstatus
280 * if available in order to avoid clearing shared IRQ status
281 * when finding an IRQ source. Non ctl capable devices don't
282 * share interrupt lines fortunately for us.
285 * Inherited from caller.
287 static u8
ata_sff_irq_status(struct ata_port
*ap
)
291 if (ap
->ops
->sff_check_altstatus
|| ap
->ioaddr
.altstatus_addr
) {
292 status
= ata_sff_altstatus(ap
);
293 /* Not us: We are busy */
294 if (status
& ATA_BUSY
)
297 /* Clear INTRQ latch */
298 status
= ap
->ops
->sff_check_status(ap
);
303 * ata_sff_sync - Flush writes
304 * @ap: Port to wait for.
307 * If we have an mmio device with no ctl and no altstatus
308 * method this will fail. No such devices are known to exist.
311 * Inherited from caller.
314 static void ata_sff_sync(struct ata_port
*ap
)
316 if (ap
->ops
->sff_check_altstatus
)
317 ap
->ops
->sff_check_altstatus(ap
);
318 else if (ap
->ioaddr
.altstatus_addr
)
319 ioread8(ap
->ioaddr
.altstatus_addr
);
323 * ata_sff_pause - Flush writes and wait 400nS
324 * @ap: Port to pause for.
327 * If we have an mmio device with no ctl and no altstatus
328 * method this will fail. No such devices are known to exist.
331 * Inherited from caller.
334 void ata_sff_pause(struct ata_port
*ap
)
339 EXPORT_SYMBOL_GPL(ata_sff_pause
);
342 * ata_sff_dma_pause - Pause before commencing DMA
343 * @ap: Port to pause for.
345 * Perform I/O fencing and ensure sufficient cycle delays occur
346 * for the HDMA1:0 transition
349 void ata_sff_dma_pause(struct ata_port
*ap
)
351 if (ap
->ops
->sff_check_altstatus
|| ap
->ioaddr
.altstatus_addr
) {
352 /* An altstatus read will cause the needed delay without
353 messing up the IRQ status */
354 ata_sff_altstatus(ap
);
357 /* There are no DMA controllers without ctl. BUG here to ensure
358 we never violate the HDMA1:0 transition timing and risk
362 EXPORT_SYMBOL_GPL(ata_sff_dma_pause
);
365 * ata_sff_busy_sleep - sleep until BSY clears, or timeout
366 * @ap: port containing status register to be polled
367 * @tmout_pat: impatience timeout in msecs
368 * @tmout: overall timeout in msecs
370 * Sleep until ATA Status register bit BSY clears,
371 * or a timeout occurs.
374 * Kernel thread context (may sleep).
377 * 0 on success, -errno otherwise.
379 int ata_sff_busy_sleep(struct ata_port
*ap
,
380 unsigned long tmout_pat
, unsigned long tmout
)
382 unsigned long timer_start
, timeout
;
385 status
= ata_sff_busy_wait(ap
, ATA_BUSY
, 300);
386 timer_start
= jiffies
;
387 timeout
= ata_deadline(timer_start
, tmout_pat
);
388 while (status
!= 0xff && (status
& ATA_BUSY
) &&
389 time_before(jiffies
, timeout
)) {
391 status
= ata_sff_busy_wait(ap
, ATA_BUSY
, 3);
394 if (status
!= 0xff && (status
& ATA_BUSY
))
395 ata_port_printk(ap
, KERN_WARNING
,
396 "port is slow to respond, please be patient "
397 "(Status 0x%x)\n", status
);
399 timeout
= ata_deadline(timer_start
, tmout
);
400 while (status
!= 0xff && (status
& ATA_BUSY
) &&
401 time_before(jiffies
, timeout
)) {
403 status
= ap
->ops
->sff_check_status(ap
);
409 if (status
& ATA_BUSY
) {
410 ata_port_printk(ap
, KERN_ERR
, "port failed to respond "
411 "(%lu secs, Status 0x%x)\n",
412 DIV_ROUND_UP(tmout
, 1000), status
);
418 EXPORT_SYMBOL_GPL(ata_sff_busy_sleep
);
420 static int ata_sff_check_ready(struct ata_link
*link
)
422 u8 status
= link
->ap
->ops
->sff_check_status(link
->ap
);
424 return ata_check_ready(status
);
428 * ata_sff_wait_ready - sleep until BSY clears, or timeout
429 * @link: SFF link to wait ready status for
430 * @deadline: deadline jiffies for the operation
432 * Sleep until ATA Status register bit BSY clears, or timeout
436 * Kernel thread context (may sleep).
439 * 0 on success, -errno otherwise.
441 int ata_sff_wait_ready(struct ata_link
*link
, unsigned long deadline
)
443 return ata_wait_ready(link
, deadline
, ata_sff_check_ready
);
445 EXPORT_SYMBOL_GPL(ata_sff_wait_ready
);
448 * ata_sff_set_devctl - Write device control reg
449 * @ap: port where the device is
450 * @ctl: value to write
452 * Writes ATA taskfile device control register.
454 * Note: may NOT be used as the sff_set_devctl() entry in
455 * ata_port_operations.
458 * Inherited from caller.
460 static void ata_sff_set_devctl(struct ata_port
*ap
, u8 ctl
)
462 if (ap
->ops
->sff_set_devctl
)
463 ap
->ops
->sff_set_devctl(ap
, ctl
);
465 iowrite8(ctl
, ap
->ioaddr
.ctl_addr
);
469 * ata_sff_dev_select - Select device 0/1 on ATA bus
470 * @ap: ATA channel to manipulate
471 * @device: ATA device (numbered from zero) to select
473 * Use the method defined in the ATA specification to
474 * make either device 0, or device 1, active on the
475 * ATA channel. Works with both PIO and MMIO.
477 * May be used as the dev_select() entry in ata_port_operations.
482 void ata_sff_dev_select(struct ata_port
*ap
, unsigned int device
)
487 tmp
= ATA_DEVICE_OBS
;
489 tmp
= ATA_DEVICE_OBS
| ATA_DEV1
;
491 iowrite8(tmp
, ap
->ioaddr
.device_addr
);
492 ata_sff_pause(ap
); /* needed; also flushes, for mmio */
494 EXPORT_SYMBOL_GPL(ata_sff_dev_select
);
497 * ata_dev_select - Select device 0/1 on ATA bus
498 * @ap: ATA channel to manipulate
499 * @device: ATA device (numbered from zero) to select
500 * @wait: non-zero to wait for Status register BSY bit to clear
501 * @can_sleep: non-zero if context allows sleeping
503 * Use the method defined in the ATA specification to
504 * make either device 0, or device 1, active on the
507 * This is a high-level version of ata_sff_dev_select(), which
508 * additionally provides the services of inserting the proper
509 * pauses and status polling, where needed.
514 void ata_dev_select(struct ata_port
*ap
, unsigned int device
,
515 unsigned int wait
, unsigned int can_sleep
)
517 if (ata_msg_probe(ap
))
518 ata_port_printk(ap
, KERN_INFO
, "ata_dev_select: ENTER, "
519 "device %u, wait %u\n", device
, wait
);
524 ap
->ops
->sff_dev_select(ap
, device
);
527 if (can_sleep
&& ap
->link
.device
[device
].class == ATA_DEV_ATAPI
)
534 * ata_sff_irq_on - Enable interrupts on a port.
535 * @ap: Port on which interrupts are enabled.
537 * Enable interrupts on a legacy IDE device using MMIO or PIO,
538 * wait for idle, clear any pending interrupts.
540 * Note: may NOT be used as the sff_irq_on() entry in
541 * ata_port_operations.
544 * Inherited from caller.
546 void ata_sff_irq_on(struct ata_port
*ap
)
548 struct ata_ioports
*ioaddr
= &ap
->ioaddr
;
550 if (ap
->ops
->sff_irq_on
) {
551 ap
->ops
->sff_irq_on(ap
);
555 ap
->ctl
&= ~ATA_NIEN
;
556 ap
->last_ctl
= ap
->ctl
;
558 if (ap
->ops
->sff_set_devctl
|| ioaddr
->ctl_addr
)
559 ata_sff_set_devctl(ap
, ap
->ctl
);
562 ap
->ops
->sff_irq_clear(ap
);
564 EXPORT_SYMBOL_GPL(ata_sff_irq_on
);
567 * ata_sff_irq_clear - Clear PCI IDE BMDMA interrupt.
568 * @ap: Port associated with this ATA transaction.
570 * Clear interrupt and error flags in DMA status register.
572 * May be used as the irq_clear() entry in ata_port_operations.
575 * spin_lock_irqsave(host lock)
577 void ata_sff_irq_clear(struct ata_port
*ap
)
579 void __iomem
*mmio
= ap
->ioaddr
.bmdma_addr
;
584 iowrite8(ioread8(mmio
+ ATA_DMA_STATUS
), mmio
+ ATA_DMA_STATUS
);
586 EXPORT_SYMBOL_GPL(ata_sff_irq_clear
);
589 * ata_sff_tf_load - send taskfile registers to host controller
590 * @ap: Port to which output is sent
591 * @tf: ATA taskfile register set
593 * Outputs ATA taskfile to standard ATA host controller.
596 * Inherited from caller.
598 void ata_sff_tf_load(struct ata_port
*ap
, const struct ata_taskfile
*tf
)
600 struct ata_ioports
*ioaddr
= &ap
->ioaddr
;
601 unsigned int is_addr
= tf
->flags
& ATA_TFLAG_ISADDR
;
603 if (tf
->ctl
!= ap
->last_ctl
) {
604 if (ioaddr
->ctl_addr
)
605 iowrite8(tf
->ctl
, ioaddr
->ctl_addr
);
606 ap
->last_ctl
= tf
->ctl
;
609 if (is_addr
&& (tf
->flags
& ATA_TFLAG_LBA48
)) {
610 WARN_ON_ONCE(!ioaddr
->ctl_addr
);
611 iowrite8(tf
->hob_feature
, ioaddr
->feature_addr
);
612 iowrite8(tf
->hob_nsect
, ioaddr
->nsect_addr
);
613 iowrite8(tf
->hob_lbal
, ioaddr
->lbal_addr
);
614 iowrite8(tf
->hob_lbam
, ioaddr
->lbam_addr
);
615 iowrite8(tf
->hob_lbah
, ioaddr
->lbah_addr
);
616 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
625 iowrite8(tf
->feature
, ioaddr
->feature_addr
);
626 iowrite8(tf
->nsect
, ioaddr
->nsect_addr
);
627 iowrite8(tf
->lbal
, ioaddr
->lbal_addr
);
628 iowrite8(tf
->lbam
, ioaddr
->lbam_addr
);
629 iowrite8(tf
->lbah
, ioaddr
->lbah_addr
);
630 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
638 if (tf
->flags
& ATA_TFLAG_DEVICE
) {
639 iowrite8(tf
->device
, ioaddr
->device_addr
);
640 VPRINTK("device 0x%X\n", tf
->device
);
643 EXPORT_SYMBOL_GPL(ata_sff_tf_load
);
646 * ata_sff_tf_read - input device's ATA taskfile shadow registers
647 * @ap: Port from which input is read
648 * @tf: ATA taskfile register set for storing input
650 * Reads ATA taskfile registers for currently-selected device
651 * into @tf. Assumes the device has a fully SFF compliant task file
652 * layout and behaviour. If you device does not (eg has a different
653 * status method) then you will need to provide a replacement tf_read
656 * Inherited from caller.
658 void ata_sff_tf_read(struct ata_port
*ap
, struct ata_taskfile
*tf
)
660 struct ata_ioports
*ioaddr
= &ap
->ioaddr
;
662 tf
->command
= ata_sff_check_status(ap
);
663 tf
->feature
= ioread8(ioaddr
->error_addr
);
664 tf
->nsect
= ioread8(ioaddr
->nsect_addr
);
665 tf
->lbal
= ioread8(ioaddr
->lbal_addr
);
666 tf
->lbam
= ioread8(ioaddr
->lbam_addr
);
667 tf
->lbah
= ioread8(ioaddr
->lbah_addr
);
668 tf
->device
= ioread8(ioaddr
->device_addr
);
670 if (tf
->flags
& ATA_TFLAG_LBA48
) {
671 if (likely(ioaddr
->ctl_addr
)) {
672 iowrite8(tf
->ctl
| ATA_HOB
, ioaddr
->ctl_addr
);
673 tf
->hob_feature
= ioread8(ioaddr
->error_addr
);
674 tf
->hob_nsect
= ioread8(ioaddr
->nsect_addr
);
675 tf
->hob_lbal
= ioread8(ioaddr
->lbal_addr
);
676 tf
->hob_lbam
= ioread8(ioaddr
->lbam_addr
);
677 tf
->hob_lbah
= ioread8(ioaddr
->lbah_addr
);
678 iowrite8(tf
->ctl
, ioaddr
->ctl_addr
);
679 ap
->last_ctl
= tf
->ctl
;
684 EXPORT_SYMBOL_GPL(ata_sff_tf_read
);
687 * ata_sff_exec_command - issue ATA command to host controller
688 * @ap: port to which command is being issued
689 * @tf: ATA taskfile register set
691 * Issues ATA command, with proper synchronization with interrupt
692 * handler / other threads.
695 * spin_lock_irqsave(host lock)
697 void ata_sff_exec_command(struct ata_port
*ap
, const struct ata_taskfile
*tf
)
699 DPRINTK("ata%u: cmd 0x%X\n", ap
->print_id
, tf
->command
);
701 iowrite8(tf
->command
, ap
->ioaddr
.command_addr
);
704 EXPORT_SYMBOL_GPL(ata_sff_exec_command
);
707 * ata_tf_to_host - issue ATA taskfile to host controller
708 * @ap: port to which command is being issued
709 * @tf: ATA taskfile register set
711 * Issues ATA taskfile register set to ATA host controller,
712 * with proper synchronization with interrupt handler and
716 * spin_lock_irqsave(host lock)
718 static inline void ata_tf_to_host(struct ata_port
*ap
,
719 const struct ata_taskfile
*tf
)
721 ap
->ops
->sff_tf_load(ap
, tf
);
722 ap
->ops
->sff_exec_command(ap
, tf
);
726 * ata_sff_data_xfer - Transfer data by PIO
727 * @dev: device to target
729 * @buflen: buffer length
732 * Transfer data from/to the device data register by PIO.
735 * Inherited from caller.
740 unsigned int ata_sff_data_xfer(struct ata_device
*dev
, unsigned char *buf
,
741 unsigned int buflen
, int rw
)
743 struct ata_port
*ap
= dev
->link
->ap
;
744 void __iomem
*data_addr
= ap
->ioaddr
.data_addr
;
745 unsigned int words
= buflen
>> 1;
747 /* Transfer multiple of 2 bytes */
749 ioread16_rep(data_addr
, buf
, words
);
751 iowrite16_rep(data_addr
, buf
, words
);
753 /* Transfer trailing byte, if any. */
754 if (unlikely(buflen
& 0x01)) {
755 unsigned char pad
[2];
757 /* Point buf to the tail of buffer */
761 * Use io*16_rep() accessors here as well to avoid pointlessly
762 * swapping bytes to and from on the big endian machines...
765 ioread16_rep(data_addr
, pad
, 1);
769 iowrite16_rep(data_addr
, pad
, 1);
776 EXPORT_SYMBOL_GPL(ata_sff_data_xfer
);
779 * ata_sff_data_xfer32 - Transfer data by PIO
780 * @dev: device to target
782 * @buflen: buffer length
785 * Transfer data from/to the device data register by PIO using 32bit
789 * Inherited from caller.
795 unsigned int ata_sff_data_xfer32(struct ata_device
*dev
, unsigned char *buf
,
796 unsigned int buflen
, int rw
)
798 struct ata_port
*ap
= dev
->link
->ap
;
799 void __iomem
*data_addr
= ap
->ioaddr
.data_addr
;
800 unsigned int words
= buflen
>> 2;
801 int slop
= buflen
& 3;
803 if (!(ap
->pflags
& ATA_PFLAG_PIO32
))
804 return ata_sff_data_xfer(dev
, buf
, buflen
, rw
);
806 /* Transfer multiple of 4 bytes */
808 ioread32_rep(data_addr
, buf
, words
);
810 iowrite32_rep(data_addr
, buf
, words
);
812 /* Transfer trailing bytes, if any */
813 if (unlikely(slop
)) {
814 unsigned char pad
[4];
816 /* Point buf to the tail of buffer */
817 buf
+= buflen
- slop
;
820 * Use io*_rep() accessors here as well to avoid pointlessly
821 * swapping bytes to and from on the big endian machines...
825 ioread16_rep(data_addr
, pad
, 1);
827 ioread32_rep(data_addr
, pad
, 1);
828 memcpy(buf
, pad
, slop
);
830 memcpy(pad
, buf
, slop
);
832 iowrite16_rep(data_addr
, pad
, 1);
834 iowrite32_rep(data_addr
, pad
, 1);
837 return (buflen
+ 1) & ~1;
839 EXPORT_SYMBOL_GPL(ata_sff_data_xfer32
);
842 * ata_sff_data_xfer_noirq - Transfer data by PIO
843 * @dev: device to target
845 * @buflen: buffer length
848 * Transfer data from/to the device data register by PIO. Do the
849 * transfer with interrupts disabled.
852 * Inherited from caller.
857 unsigned int ata_sff_data_xfer_noirq(struct ata_device
*dev
, unsigned char *buf
,
858 unsigned int buflen
, int rw
)
861 unsigned int consumed
;
863 local_irq_save(flags
);
864 consumed
= ata_sff_data_xfer(dev
, buf
, buflen
, rw
);
865 local_irq_restore(flags
);
869 EXPORT_SYMBOL_GPL(ata_sff_data_xfer_noirq
);
872 * ata_pio_sector - Transfer a sector of data.
873 * @qc: Command on going
875 * Transfer qc->sect_size bytes of data from/to the ATA device.
878 * Inherited from caller.
880 static void ata_pio_sector(struct ata_queued_cmd
*qc
)
882 int do_write
= (qc
->tf
.flags
& ATA_TFLAG_WRITE
);
883 struct ata_port
*ap
= qc
->ap
;
888 if (qc
->curbytes
== qc
->nbytes
- qc
->sect_size
)
889 ap
->hsm_task_state
= HSM_ST_LAST
;
891 page
= sg_page(qc
->cursg
);
892 offset
= qc
->cursg
->offset
+ qc
->cursg_ofs
;
894 /* get the current page and offset */
895 page
= nth_page(page
, (offset
>> PAGE_SHIFT
));
898 DPRINTK("data %s\n", qc
->tf
.flags
& ATA_TFLAG_WRITE
? "write" : "read");
900 if (PageHighMem(page
)) {
903 /* FIXME: use a bounce buffer */
904 local_irq_save(flags
);
905 buf
= kmap_atomic(page
, KM_IRQ0
);
907 /* do the actual data transfer */
908 ap
->ops
->sff_data_xfer(qc
->dev
, buf
+ offset
, qc
->sect_size
,
911 kunmap_atomic(buf
, KM_IRQ0
);
912 local_irq_restore(flags
);
914 buf
= page_address(page
);
915 ap
->ops
->sff_data_xfer(qc
->dev
, buf
+ offset
, qc
->sect_size
,
919 if (!do_write
&& !PageSlab(page
))
920 flush_dcache_page(page
);
922 qc
->curbytes
+= qc
->sect_size
;
923 qc
->cursg_ofs
+= qc
->sect_size
;
925 if (qc
->cursg_ofs
== qc
->cursg
->length
) {
926 qc
->cursg
= sg_next(qc
->cursg
);
932 * ata_pio_sectors - Transfer one or many sectors.
933 * @qc: Command on going
935 * Transfer one or many sectors of data from/to the
936 * ATA device for the DRQ request.
939 * Inherited from caller.
941 static void ata_pio_sectors(struct ata_queued_cmd
*qc
)
943 if (is_multi_taskfile(&qc
->tf
)) {
944 /* READ/WRITE MULTIPLE */
947 WARN_ON_ONCE(qc
->dev
->multi_count
== 0);
949 nsect
= min((qc
->nbytes
- qc
->curbytes
) / qc
->sect_size
,
950 qc
->dev
->multi_count
);
956 ata_sff_sync(qc
->ap
); /* flush */
960 * atapi_send_cdb - Write CDB bytes to hardware
961 * @ap: Port to which ATAPI device is attached.
962 * @qc: Taskfile currently active
964 * When device has indicated its readiness to accept
965 * a CDB, this function is called. Send the CDB.
970 static void atapi_send_cdb(struct ata_port
*ap
, struct ata_queued_cmd
*qc
)
973 DPRINTK("send cdb\n");
974 WARN_ON_ONCE(qc
->dev
->cdb_len
< 12);
976 ap
->ops
->sff_data_xfer(qc
->dev
, qc
->cdb
, qc
->dev
->cdb_len
, 1);
978 /* FIXME: If the CDB is for DMA do we need to do the transition delay
979 or is bmdma_start guaranteed to do it ? */
980 switch (qc
->tf
.protocol
) {
982 ap
->hsm_task_state
= HSM_ST
;
984 case ATAPI_PROT_NODATA
:
985 ap
->hsm_task_state
= HSM_ST_LAST
;
988 ap
->hsm_task_state
= HSM_ST_LAST
;
990 ap
->ops
->bmdma_start(qc
);
996 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
997 * @qc: Command on going
998 * @bytes: number of bytes
1000 * Transfer Transfer data from/to the ATAPI device.
1003 * Inherited from caller.
1006 static int __atapi_pio_bytes(struct ata_queued_cmd
*qc
, unsigned int bytes
)
1008 int rw
= (qc
->tf
.flags
& ATA_TFLAG_WRITE
) ? WRITE
: READ
;
1009 struct ata_port
*ap
= qc
->ap
;
1010 struct ata_device
*dev
= qc
->dev
;
1011 struct ata_eh_info
*ehi
= &dev
->link
->eh_info
;
1012 struct scatterlist
*sg
;
1015 unsigned int offset
, count
, consumed
;
1019 if (unlikely(!sg
)) {
1020 ata_ehi_push_desc(ehi
, "unexpected or too much trailing data "
1021 "buf=%u cur=%u bytes=%u",
1022 qc
->nbytes
, qc
->curbytes
, bytes
);
1027 offset
= sg
->offset
+ qc
->cursg_ofs
;
1029 /* get the current page and offset */
1030 page
= nth_page(page
, (offset
>> PAGE_SHIFT
));
1031 offset
%= PAGE_SIZE
;
1033 /* don't overrun current sg */
1034 count
= min(sg
->length
- qc
->cursg_ofs
, bytes
);
1036 /* don't cross page boundaries */
1037 count
= min(count
, (unsigned int)PAGE_SIZE
- offset
);
1039 DPRINTK("data %s\n", qc
->tf
.flags
& ATA_TFLAG_WRITE
? "write" : "read");
1041 if (PageHighMem(page
)) {
1042 unsigned long flags
;
1044 /* FIXME: use bounce buffer */
1045 local_irq_save(flags
);
1046 buf
= kmap_atomic(page
, KM_IRQ0
);
1048 /* do the actual data transfer */
1049 consumed
= ap
->ops
->sff_data_xfer(dev
, buf
+ offset
,
1052 kunmap_atomic(buf
, KM_IRQ0
);
1053 local_irq_restore(flags
);
1055 buf
= page_address(page
);
1056 consumed
= ap
->ops
->sff_data_xfer(dev
, buf
+ offset
,
1060 bytes
-= min(bytes
, consumed
);
1061 qc
->curbytes
+= count
;
1062 qc
->cursg_ofs
+= count
;
1064 if (qc
->cursg_ofs
== sg
->length
) {
1065 qc
->cursg
= sg_next(qc
->cursg
);
1070 * There used to be a WARN_ON_ONCE(qc->cursg && count != consumed);
1071 * Unfortunately __atapi_pio_bytes doesn't know enough to do the WARN
1072 * check correctly as it doesn't know if it is the last request being
1073 * made. Somebody should implement a proper sanity check.
1081 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
1082 * @qc: Command on going
1084 * Transfer Transfer data from/to the ATAPI device.
1087 * Inherited from caller.
1089 static void atapi_pio_bytes(struct ata_queued_cmd
*qc
)
1091 struct ata_port
*ap
= qc
->ap
;
1092 struct ata_device
*dev
= qc
->dev
;
1093 struct ata_eh_info
*ehi
= &dev
->link
->eh_info
;
1094 unsigned int ireason
, bc_lo
, bc_hi
, bytes
;
1095 int i_write
, do_write
= (qc
->tf
.flags
& ATA_TFLAG_WRITE
) ? 1 : 0;
1097 /* Abuse qc->result_tf for temp storage of intermediate TF
1098 * here to save some kernel stack usage.
1099 * For normal completion, qc->result_tf is not relevant. For
1100 * error, qc->result_tf is later overwritten by ata_qc_complete().
1101 * So, the correctness of qc->result_tf is not affected.
1103 ap
->ops
->sff_tf_read(ap
, &qc
->result_tf
);
1104 ireason
= qc
->result_tf
.nsect
;
1105 bc_lo
= qc
->result_tf
.lbam
;
1106 bc_hi
= qc
->result_tf
.lbah
;
1107 bytes
= (bc_hi
<< 8) | bc_lo
;
1109 /* shall be cleared to zero, indicating xfer of data */
1110 if (unlikely(ireason
& (1 << 0)))
1113 /* make sure transfer direction matches expected */
1114 i_write
= ((ireason
& (1 << 1)) == 0) ? 1 : 0;
1115 if (unlikely(do_write
!= i_write
))
1118 if (unlikely(!bytes
))
1121 VPRINTK("ata%u: xfering %d bytes\n", ap
->print_id
, bytes
);
1123 if (unlikely(__atapi_pio_bytes(qc
, bytes
)))
1125 ata_sff_sync(ap
); /* flush */
1130 ata_ehi_push_desc(ehi
, "ATAPI check failed (ireason=0x%x bytes=%u)",
1133 qc
->err_mask
|= AC_ERR_HSM
;
1134 ap
->hsm_task_state
= HSM_ST_ERR
;
1138 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
1139 * @ap: the target ata_port
1143 * 1 if ok in workqueue, 0 otherwise.
1145 static inline int ata_hsm_ok_in_wq(struct ata_port
*ap
,
1146 struct ata_queued_cmd
*qc
)
1148 if (qc
->tf
.flags
& ATA_TFLAG_POLLING
)
1151 if (ap
->hsm_task_state
== HSM_ST_FIRST
) {
1152 if (qc
->tf
.protocol
== ATA_PROT_PIO
&&
1153 (qc
->tf
.flags
& ATA_TFLAG_WRITE
))
1156 if (ata_is_atapi(qc
->tf
.protocol
) &&
1157 !(qc
->dev
->flags
& ATA_DFLAG_CDB_INTR
))
1165 * ata_hsm_qc_complete - finish a qc running on standard HSM
1166 * @qc: Command to complete
1167 * @in_wq: 1 if called from workqueue, 0 otherwise
1169 * Finish @qc which is running on standard HSM.
1172 * If @in_wq is zero, spin_lock_irqsave(host lock).
1173 * Otherwise, none on entry and grabs host lock.
1175 static void ata_hsm_qc_complete(struct ata_queued_cmd
*qc
, int in_wq
)
1177 struct ata_port
*ap
= qc
->ap
;
1178 unsigned long flags
;
1180 if (ap
->ops
->error_handler
) {
1182 spin_lock_irqsave(ap
->lock
, flags
);
1184 /* EH might have kicked in while host lock is
1187 qc
= ata_qc_from_tag(ap
, qc
->tag
);
1189 if (likely(!(qc
->err_mask
& AC_ERR_HSM
))) {
1191 ata_qc_complete(qc
);
1193 ata_port_freeze(ap
);
1196 spin_unlock_irqrestore(ap
->lock
, flags
);
1198 if (likely(!(qc
->err_mask
& AC_ERR_HSM
)))
1199 ata_qc_complete(qc
);
1201 ata_port_freeze(ap
);
1205 spin_lock_irqsave(ap
->lock
, flags
);
1207 ata_qc_complete(qc
);
1208 spin_unlock_irqrestore(ap
->lock
, flags
);
1210 ata_qc_complete(qc
);
1215 * ata_sff_hsm_move - move the HSM to the next state.
1216 * @ap: the target ata_port
1218 * @status: current device status
1219 * @in_wq: 1 if called from workqueue, 0 otherwise
1222 * 1 when poll next status needed, 0 otherwise.
1224 int ata_sff_hsm_move(struct ata_port
*ap
, struct ata_queued_cmd
*qc
,
1225 u8 status
, int in_wq
)
1227 struct ata_eh_info
*ehi
= &ap
->link
.eh_info
;
1228 unsigned long flags
= 0;
1231 WARN_ON_ONCE((qc
->flags
& ATA_QCFLAG_ACTIVE
) == 0);
1233 /* Make sure ata_sff_qc_issue() does not throw things
1234 * like DMA polling into the workqueue. Notice that
1235 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
1237 WARN_ON_ONCE(in_wq
!= ata_hsm_ok_in_wq(ap
, qc
));
1240 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
1241 ap
->print_id
, qc
->tf
.protocol
, ap
->hsm_task_state
, status
);
1243 switch (ap
->hsm_task_state
) {
1245 /* Send first data block or PACKET CDB */
1247 /* If polling, we will stay in the work queue after
1248 * sending the data. Otherwise, interrupt handler
1249 * takes over after sending the data.
1251 poll_next
= (qc
->tf
.flags
& ATA_TFLAG_POLLING
);
1253 /* check device status */
1254 if (unlikely((status
& ATA_DRQ
) == 0)) {
1255 /* handle BSY=0, DRQ=0 as error */
1256 if (likely(status
& (ATA_ERR
| ATA_DF
)))
1257 /* device stops HSM for abort/error */
1258 qc
->err_mask
|= AC_ERR_DEV
;
1260 /* HSM violation. Let EH handle this */
1261 ata_ehi_push_desc(ehi
,
1262 "ST_FIRST: !(DRQ|ERR|DF)");
1263 qc
->err_mask
|= AC_ERR_HSM
;
1266 ap
->hsm_task_state
= HSM_ST_ERR
;
1270 /* Device should not ask for data transfer (DRQ=1)
1271 * when it finds something wrong.
1272 * We ignore DRQ here and stop the HSM by
1273 * changing hsm_task_state to HSM_ST_ERR and
1274 * let the EH abort the command or reset the device.
1276 if (unlikely(status
& (ATA_ERR
| ATA_DF
))) {
1277 /* Some ATAPI tape drives forget to clear the ERR bit
1278 * when doing the next command (mostly request sense).
1279 * We ignore ERR here to workaround and proceed sending
1282 if (!(qc
->dev
->horkage
& ATA_HORKAGE_STUCK_ERR
)) {
1283 ata_ehi_push_desc(ehi
, "ST_FIRST: "
1284 "DRQ=1 with device error, "
1285 "dev_stat 0x%X", status
);
1286 qc
->err_mask
|= AC_ERR_HSM
;
1287 ap
->hsm_task_state
= HSM_ST_ERR
;
1292 /* Send the CDB (atapi) or the first data block (ata pio out).
1293 * During the state transition, interrupt handler shouldn't
1294 * be invoked before the data transfer is complete and
1295 * hsm_task_state is changed. Hence, the following locking.
1298 spin_lock_irqsave(ap
->lock
, flags
);
1300 if (qc
->tf
.protocol
== ATA_PROT_PIO
) {
1301 /* PIO data out protocol.
1302 * send first data block.
1305 /* ata_pio_sectors() might change the state
1306 * to HSM_ST_LAST. so, the state is changed here
1307 * before ata_pio_sectors().
1309 ap
->hsm_task_state
= HSM_ST
;
1310 ata_pio_sectors(qc
);
1313 atapi_send_cdb(ap
, qc
);
1316 spin_unlock_irqrestore(ap
->lock
, flags
);
1318 /* if polling, ata_pio_task() handles the rest.
1319 * otherwise, interrupt handler takes over from here.
1324 /* complete command or read/write the data register */
1325 if (qc
->tf
.protocol
== ATAPI_PROT_PIO
) {
1326 /* ATAPI PIO protocol */
1327 if ((status
& ATA_DRQ
) == 0) {
1328 /* No more data to transfer or device error.
1329 * Device error will be tagged in HSM_ST_LAST.
1331 ap
->hsm_task_state
= HSM_ST_LAST
;
1335 /* Device should not ask for data transfer (DRQ=1)
1336 * when it finds something wrong.
1337 * We ignore DRQ here and stop the HSM by
1338 * changing hsm_task_state to HSM_ST_ERR and
1339 * let the EH abort the command or reset the device.
1341 if (unlikely(status
& (ATA_ERR
| ATA_DF
))) {
1342 ata_ehi_push_desc(ehi
, "ST-ATAPI: "
1343 "DRQ=1 with device error, "
1344 "dev_stat 0x%X", status
);
1345 qc
->err_mask
|= AC_ERR_HSM
;
1346 ap
->hsm_task_state
= HSM_ST_ERR
;
1350 atapi_pio_bytes(qc
);
1352 if (unlikely(ap
->hsm_task_state
== HSM_ST_ERR
))
1353 /* bad ireason reported by device */
1357 /* ATA PIO protocol */
1358 if (unlikely((status
& ATA_DRQ
) == 0)) {
1359 /* handle BSY=0, DRQ=0 as error */
1360 if (likely(status
& (ATA_ERR
| ATA_DF
))) {
1361 /* device stops HSM for abort/error */
1362 qc
->err_mask
|= AC_ERR_DEV
;
1364 /* If diagnostic failed and this is
1365 * IDENTIFY, it's likely a phantom
1366 * device. Mark hint.
1368 if (qc
->dev
->horkage
&
1369 ATA_HORKAGE_DIAGNOSTIC
)
1373 /* HSM violation. Let EH handle this.
1374 * Phantom devices also trigger this
1375 * condition. Mark hint.
1377 ata_ehi_push_desc(ehi
, "ST-ATA: "
1378 "DRQ=0 without device error, "
1379 "dev_stat 0x%X", status
);
1380 qc
->err_mask
|= AC_ERR_HSM
|
1384 ap
->hsm_task_state
= HSM_ST_ERR
;
1388 /* For PIO reads, some devices may ask for
1389 * data transfer (DRQ=1) alone with ERR=1.
1390 * We respect DRQ here and transfer one
1391 * block of junk data before changing the
1392 * hsm_task_state to HSM_ST_ERR.
1394 * For PIO writes, ERR=1 DRQ=1 doesn't make
1395 * sense since the data block has been
1396 * transferred to the device.
1398 if (unlikely(status
& (ATA_ERR
| ATA_DF
))) {
1399 /* data might be corrputed */
1400 qc
->err_mask
|= AC_ERR_DEV
;
1402 if (!(qc
->tf
.flags
& ATA_TFLAG_WRITE
)) {
1403 ata_pio_sectors(qc
);
1404 status
= ata_wait_idle(ap
);
1407 if (status
& (ATA_BUSY
| ATA_DRQ
)) {
1408 ata_ehi_push_desc(ehi
, "ST-ATA: "
1409 "BUSY|DRQ persists on ERR|DF, "
1410 "dev_stat 0x%X", status
);
1411 qc
->err_mask
|= AC_ERR_HSM
;
1414 /* There are oddball controllers with
1415 * status register stuck at 0x7f and
1416 * lbal/m/h at zero which makes it
1417 * pass all other presence detection
1418 * mechanisms we have. Set NODEV_HINT
1419 * for it. Kernel bz#7241.
1422 qc
->err_mask
|= AC_ERR_NODEV_HINT
;
1424 /* ata_pio_sectors() might change the
1425 * state to HSM_ST_LAST. so, the state
1426 * is changed after ata_pio_sectors().
1428 ap
->hsm_task_state
= HSM_ST_ERR
;
1432 ata_pio_sectors(qc
);
1434 if (ap
->hsm_task_state
== HSM_ST_LAST
&&
1435 (!(qc
->tf
.flags
& ATA_TFLAG_WRITE
))) {
1437 status
= ata_wait_idle(ap
);
1446 if (unlikely(!ata_ok(status
))) {
1447 qc
->err_mask
|= __ac_err_mask(status
);
1448 ap
->hsm_task_state
= HSM_ST_ERR
;
1452 /* no more data to transfer */
1453 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
1454 ap
->print_id
, qc
->dev
->devno
, status
);
1456 WARN_ON_ONCE(qc
->err_mask
& (AC_ERR_DEV
| AC_ERR_HSM
));
1458 ap
->hsm_task_state
= HSM_ST_IDLE
;
1460 /* complete taskfile transaction */
1461 ata_hsm_qc_complete(qc
, in_wq
);
1467 ap
->hsm_task_state
= HSM_ST_IDLE
;
1469 /* complete taskfile transaction */
1470 ata_hsm_qc_complete(qc
, in_wq
);
1481 EXPORT_SYMBOL_GPL(ata_sff_hsm_move
);
1483 void ata_pio_task(struct work_struct
*work
)
1485 struct ata_port
*ap
=
1486 container_of(work
, struct ata_port
, port_task
.work
);
1487 struct ata_queued_cmd
*qc
= ap
->port_task_data
;
1492 WARN_ON_ONCE(ap
->hsm_task_state
== HSM_ST_IDLE
);
1495 * This is purely heuristic. This is a fast path.
1496 * Sometimes when we enter, BSY will be cleared in
1497 * a chk-status or two. If not, the drive is probably seeking
1498 * or something. Snooze for a couple msecs, then
1499 * chk-status again. If still busy, queue delayed work.
1501 status
= ata_sff_busy_wait(ap
, ATA_BUSY
, 5);
1502 if (status
& ATA_BUSY
) {
1504 status
= ata_sff_busy_wait(ap
, ATA_BUSY
, 10);
1505 if (status
& ATA_BUSY
) {
1506 ata_pio_queue_task(ap
, qc
, ATA_SHORT_PAUSE
);
1512 poll_next
= ata_sff_hsm_move(ap
, qc
, status
, 1);
1514 /* another command or interrupt handler
1515 * may be running at this point.
1522 * ata_sff_qc_issue - issue taskfile to device in proto-dependent manner
1523 * @qc: command to issue to device
1525 * Using various libata functions and hooks, this function
1526 * starts an ATA command. ATA commands are grouped into
1527 * classes called "protocols", and issuing each type of protocol
1528 * is slightly different.
1530 * May be used as the qc_issue() entry in ata_port_operations.
1533 * spin_lock_irqsave(host lock)
1536 * Zero on success, AC_ERR_* mask on failure
1538 unsigned int ata_sff_qc_issue(struct ata_queued_cmd
*qc
)
1540 struct ata_port
*ap
= qc
->ap
;
1542 /* Use polling pio if the LLD doesn't handle
1543 * interrupt driven pio and atapi CDB interrupt.
1545 if (ap
->flags
& ATA_FLAG_PIO_POLLING
) {
1546 switch (qc
->tf
.protocol
) {
1548 case ATA_PROT_NODATA
:
1549 case ATAPI_PROT_PIO
:
1550 case ATAPI_PROT_NODATA
:
1551 qc
->tf
.flags
|= ATA_TFLAG_POLLING
;
1553 case ATAPI_PROT_DMA
:
1554 if (qc
->dev
->flags
& ATA_DFLAG_CDB_INTR
)
1555 /* see ata_dma_blacklisted() */
1563 /* select the device */
1564 ata_dev_select(ap
, qc
->dev
->devno
, 1, 0);
1566 /* start the command */
1567 switch (qc
->tf
.protocol
) {
1568 case ATA_PROT_NODATA
:
1569 if (qc
->tf
.flags
& ATA_TFLAG_POLLING
)
1570 ata_qc_set_polling(qc
);
1572 ata_tf_to_host(ap
, &qc
->tf
);
1573 ap
->hsm_task_state
= HSM_ST_LAST
;
1575 if (qc
->tf
.flags
& ATA_TFLAG_POLLING
)
1576 ata_pio_queue_task(ap
, qc
, 0);
1581 WARN_ON_ONCE(qc
->tf
.flags
& ATA_TFLAG_POLLING
);
1583 ap
->ops
->sff_tf_load(ap
, &qc
->tf
); /* load tf registers */
1584 ap
->ops
->bmdma_setup(qc
); /* set up bmdma */
1585 ap
->ops
->bmdma_start(qc
); /* initiate bmdma */
1586 ap
->hsm_task_state
= HSM_ST_LAST
;
1590 if (qc
->tf
.flags
& ATA_TFLAG_POLLING
)
1591 ata_qc_set_polling(qc
);
1593 ata_tf_to_host(ap
, &qc
->tf
);
1595 if (qc
->tf
.flags
& ATA_TFLAG_WRITE
) {
1596 /* PIO data out protocol */
1597 ap
->hsm_task_state
= HSM_ST_FIRST
;
1598 ata_pio_queue_task(ap
, qc
, 0);
1600 /* always send first data block using
1601 * the ata_pio_task() codepath.
1604 /* PIO data in protocol */
1605 ap
->hsm_task_state
= HSM_ST
;
1607 if (qc
->tf
.flags
& ATA_TFLAG_POLLING
)
1608 ata_pio_queue_task(ap
, qc
, 0);
1610 /* if polling, ata_pio_task() handles the rest.
1611 * otherwise, interrupt handler takes over from here.
1617 case ATAPI_PROT_PIO
:
1618 case ATAPI_PROT_NODATA
:
1619 if (qc
->tf
.flags
& ATA_TFLAG_POLLING
)
1620 ata_qc_set_polling(qc
);
1622 ata_tf_to_host(ap
, &qc
->tf
);
1624 ap
->hsm_task_state
= HSM_ST_FIRST
;
1626 /* send cdb by polling if no cdb interrupt */
1627 if ((!(qc
->dev
->flags
& ATA_DFLAG_CDB_INTR
)) ||
1628 (qc
->tf
.flags
& ATA_TFLAG_POLLING
))
1629 ata_pio_queue_task(ap
, qc
, 0);
1632 case ATAPI_PROT_DMA
:
1633 WARN_ON_ONCE(qc
->tf
.flags
& ATA_TFLAG_POLLING
);
1635 ap
->ops
->sff_tf_load(ap
, &qc
->tf
); /* load tf registers */
1636 ap
->ops
->bmdma_setup(qc
); /* set up bmdma */
1637 ap
->hsm_task_state
= HSM_ST_FIRST
;
1639 /* send cdb by polling if no cdb interrupt */
1640 if (!(qc
->dev
->flags
& ATA_DFLAG_CDB_INTR
))
1641 ata_pio_queue_task(ap
, qc
, 0);
1646 return AC_ERR_SYSTEM
;
1651 EXPORT_SYMBOL_GPL(ata_sff_qc_issue
);
1654 * ata_sff_qc_fill_rtf - fill result TF using ->sff_tf_read
1655 * @qc: qc to fill result TF for
1657 * @qc is finished and result TF needs to be filled. Fill it
1658 * using ->sff_tf_read.
1661 * spin_lock_irqsave(host lock)
1664 * true indicating that result TF is successfully filled.
1666 bool ata_sff_qc_fill_rtf(struct ata_queued_cmd
*qc
)
1668 qc
->ap
->ops
->sff_tf_read(qc
->ap
, &qc
->result_tf
);
1671 EXPORT_SYMBOL_GPL(ata_sff_qc_fill_rtf
);
1674 * ata_sff_host_intr - Handle host interrupt for given (port, task)
1675 * @ap: Port on which interrupt arrived (possibly...)
1676 * @qc: Taskfile currently active in engine
1678 * Handle host interrupt for given queued command. Currently,
1679 * only DMA interrupts are handled. All other commands are
1680 * handled via polling with interrupts disabled (nIEN bit).
1683 * spin_lock_irqsave(host lock)
1686 * One if interrupt was handled, zero if not (shared irq).
1688 unsigned int ata_sff_host_intr(struct ata_port
*ap
,
1689 struct ata_queued_cmd
*qc
)
1691 struct ata_eh_info
*ehi
= &ap
->link
.eh_info
;
1692 u8 status
, host_stat
= 0;
1693 bool bmdma_stopped
= false;
1695 VPRINTK("ata%u: protocol %d task_state %d\n",
1696 ap
->print_id
, qc
->tf
.protocol
, ap
->hsm_task_state
);
1698 /* Check whether we are expecting interrupt in this state */
1699 switch (ap
->hsm_task_state
) {
1701 /* Some pre-ATAPI-4 devices assert INTRQ
1702 * at this state when ready to receive CDB.
1705 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
1706 * The flag was turned on only for atapi devices. No
1707 * need to check ata_is_atapi(qc->tf.protocol) again.
1709 if (!(qc
->dev
->flags
& ATA_DFLAG_CDB_INTR
))
1713 if (qc
->tf
.protocol
== ATA_PROT_DMA
||
1714 qc
->tf
.protocol
== ATAPI_PROT_DMA
) {
1715 /* check status of DMA engine */
1716 host_stat
= ap
->ops
->bmdma_status(ap
);
1717 VPRINTK("ata%u: host_stat 0x%X\n",
1718 ap
->print_id
, host_stat
);
1720 /* if it's not our irq... */
1721 if (!(host_stat
& ATA_DMA_INTR
))
1724 /* before we do anything else, clear DMA-Start bit */
1725 ap
->ops
->bmdma_stop(qc
);
1726 bmdma_stopped
= true;
1728 if (unlikely(host_stat
& ATA_DMA_ERR
)) {
1729 /* error when transfering data to/from memory */
1730 qc
->err_mask
|= AC_ERR_HOST_BUS
;
1731 ap
->hsm_task_state
= HSM_ST_ERR
;
1742 /* check main status, clearing INTRQ if needed */
1743 status
= ata_sff_irq_status(ap
);
1744 if (status
& ATA_BUSY
) {
1745 if (bmdma_stopped
) {
1746 /* BMDMA engine is already stopped, we're screwed */
1747 qc
->err_mask
|= AC_ERR_HSM
;
1748 ap
->hsm_task_state
= HSM_ST_ERR
;
1753 /* ack bmdma irq events */
1754 ap
->ops
->sff_irq_clear(ap
);
1756 ata_sff_hsm_move(ap
, qc
, status
, 0);
1758 if (unlikely(qc
->err_mask
) && (qc
->tf
.protocol
== ATA_PROT_DMA
||
1759 qc
->tf
.protocol
== ATAPI_PROT_DMA
))
1760 ata_ehi_push_desc(ehi
, "BMDMA stat 0x%x", host_stat
);
1762 return 1; /* irq handled */
1765 ap
->stats
.idle_irq
++;
1768 if ((ap
->stats
.idle_irq
% 1000) == 0) {
1769 ap
->ops
->sff_check_status(ap
);
1770 ap
->ops
->sff_irq_clear(ap
);
1771 ata_port_printk(ap
, KERN_WARNING
, "irq trap\n");
1775 return 0; /* irq not handled */
1777 EXPORT_SYMBOL_GPL(ata_sff_host_intr
);
1780 * ata_sff_interrupt - Default ATA host interrupt handler
1781 * @irq: irq line (unused)
1782 * @dev_instance: pointer to our ata_host information structure
1784 * Default interrupt handler for PCI IDE devices. Calls
1785 * ata_sff_host_intr() for each port that is not disabled.
1788 * Obtains host lock during operation.
1791 * IRQ_NONE or IRQ_HANDLED.
1793 irqreturn_t
ata_sff_interrupt(int irq
, void *dev_instance
)
1795 struct ata_host
*host
= dev_instance
;
1796 bool retried
= false;
1798 unsigned int handled
, idle
, polling
;
1799 unsigned long flags
;
1801 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
1802 spin_lock_irqsave(&host
->lock
, flags
);
1805 handled
= idle
= polling
= 0;
1806 for (i
= 0; i
< host
->n_ports
; i
++) {
1807 struct ata_port
*ap
= host
->ports
[i
];
1808 struct ata_queued_cmd
*qc
;
1810 if (unlikely(ap
->flags
& ATA_FLAG_DISABLED
))
1813 qc
= ata_qc_from_tag(ap
, ap
->link
.active_tag
);
1815 if (!(qc
->tf
.flags
& ATA_TFLAG_POLLING
))
1816 handled
|= ata_sff_host_intr(ap
, qc
);
1824 * If no port was expecting IRQ but the controller is actually
1825 * asserting IRQ line, nobody cared will ensue. Check IRQ
1826 * pending status if available and clear spurious IRQ.
1828 if (!handled
&& !retried
) {
1831 for (i
= 0; i
< host
->n_ports
; i
++) {
1832 struct ata_port
*ap
= host
->ports
[i
];
1834 if (polling
& (1 << i
))
1837 if (!ap
->ops
->sff_irq_check
||
1838 !ap
->ops
->sff_irq_check(ap
))
1841 if (idle
& (1 << i
)) {
1842 ap
->ops
->sff_check_status(ap
);
1843 ap
->ops
->sff_irq_clear(ap
);
1845 /* clear INTRQ and check if BUSY cleared */
1846 if (!(ap
->ops
->sff_check_status(ap
) & ATA_BUSY
))
1849 * With command in flight, we can't do
1850 * sff_irq_clear() w/o racing with completion.
1861 spin_unlock_irqrestore(&host
->lock
, flags
);
1863 return IRQ_RETVAL(handled
);
1865 EXPORT_SYMBOL_GPL(ata_sff_interrupt
);
1868 * ata_sff_lost_interrupt - Check for an apparent lost interrupt
1869 * @ap: port that appears to have timed out
1871 * Called from the libata error handlers when the core code suspects
1872 * an interrupt has been lost. If it has complete anything we can and
1873 * then return. Interface must support altstatus for this faster
1874 * recovery to occur.
1877 * Caller holds host lock
1880 void ata_sff_lost_interrupt(struct ata_port
*ap
)
1883 struct ata_queued_cmd
*qc
;
1885 /* Only one outstanding command per SFF channel */
1886 qc
= ata_qc_from_tag(ap
, ap
->link
.active_tag
);
1887 /* Check we have a live one.. */
1888 if (qc
== NULL
|| !(qc
->flags
& ATA_QCFLAG_ACTIVE
))
1890 /* We cannot lose an interrupt on a polled command */
1891 if (qc
->tf
.flags
& ATA_TFLAG_POLLING
)
1893 /* See if the controller thinks it is still busy - if so the command
1894 isn't a lost IRQ but is still in progress */
1895 status
= ata_sff_altstatus(ap
);
1896 if (status
& ATA_BUSY
)
1899 /* There was a command running, we are no longer busy and we have
1901 ata_port_printk(ap
, KERN_WARNING
, "lost interrupt (Status 0x%x)\n",
1903 /* Run the host interrupt logic as if the interrupt had not been
1905 ata_sff_host_intr(ap
, qc
);
1907 EXPORT_SYMBOL_GPL(ata_sff_lost_interrupt
);
1910 * ata_sff_freeze - Freeze SFF controller port
1911 * @ap: port to freeze
1913 * Freeze BMDMA controller port.
1916 * Inherited from caller.
1918 void ata_sff_freeze(struct ata_port
*ap
)
1920 ap
->ctl
|= ATA_NIEN
;
1921 ap
->last_ctl
= ap
->ctl
;
1923 if (ap
->ops
->sff_set_devctl
|| ap
->ioaddr
.ctl_addr
)
1924 ata_sff_set_devctl(ap
, ap
->ctl
);
1926 /* Under certain circumstances, some controllers raise IRQ on
1927 * ATA_NIEN manipulation. Also, many controllers fail to mask
1928 * previously pending IRQ on ATA_NIEN assertion. Clear it.
1930 ap
->ops
->sff_check_status(ap
);
1932 ap
->ops
->sff_irq_clear(ap
);
1934 EXPORT_SYMBOL_GPL(ata_sff_freeze
);
1937 * ata_sff_thaw - Thaw SFF controller port
1940 * Thaw SFF controller port.
1943 * Inherited from caller.
1945 void ata_sff_thaw(struct ata_port
*ap
)
1947 /* clear & re-enable interrupts */
1948 ap
->ops
->sff_check_status(ap
);
1949 ap
->ops
->sff_irq_clear(ap
);
1952 EXPORT_SYMBOL_GPL(ata_sff_thaw
);
1955 * ata_sff_prereset - prepare SFF link for reset
1956 * @link: SFF link to be reset
1957 * @deadline: deadline jiffies for the operation
1959 * SFF link @link is about to be reset. Initialize it. It first
1960 * calls ata_std_prereset() and wait for !BSY if the port is
1964 * Kernel thread context (may sleep)
1967 * 0 on success, -errno otherwise.
1969 int ata_sff_prereset(struct ata_link
*link
, unsigned long deadline
)
1971 struct ata_eh_context
*ehc
= &link
->eh_context
;
1974 rc
= ata_std_prereset(link
, deadline
);
1978 /* if we're about to do hardreset, nothing more to do */
1979 if (ehc
->i
.action
& ATA_EH_HARDRESET
)
1982 /* wait for !BSY if we don't know that no device is attached */
1983 if (!ata_link_offline(link
)) {
1984 rc
= ata_sff_wait_ready(link
, deadline
);
1985 if (rc
&& rc
!= -ENODEV
) {
1986 ata_link_printk(link
, KERN_WARNING
, "device not ready "
1987 "(errno=%d), forcing hardreset\n", rc
);
1988 ehc
->i
.action
|= ATA_EH_HARDRESET
;
1994 EXPORT_SYMBOL_GPL(ata_sff_prereset
);
1997 * ata_devchk - PATA device presence detection
1998 * @ap: ATA channel to examine
1999 * @device: Device to examine (starting at zero)
2001 * This technique was originally described in
2002 * Hale Landis's ATADRVR (www.ata-atapi.com), and
2003 * later found its way into the ATA/ATAPI spec.
2005 * Write a pattern to the ATA shadow registers,
2006 * and if a device is present, it will respond by
2007 * correctly storing and echoing back the
2008 * ATA shadow register contents.
2013 static unsigned int ata_devchk(struct ata_port
*ap
, unsigned int device
)
2015 struct ata_ioports
*ioaddr
= &ap
->ioaddr
;
2018 ap
->ops
->sff_dev_select(ap
, device
);
2020 iowrite8(0x55, ioaddr
->nsect_addr
);
2021 iowrite8(0xaa, ioaddr
->lbal_addr
);
2023 iowrite8(0xaa, ioaddr
->nsect_addr
);
2024 iowrite8(0x55, ioaddr
->lbal_addr
);
2026 iowrite8(0x55, ioaddr
->nsect_addr
);
2027 iowrite8(0xaa, ioaddr
->lbal_addr
);
2029 nsect
= ioread8(ioaddr
->nsect_addr
);
2030 lbal
= ioread8(ioaddr
->lbal_addr
);
2032 if ((nsect
== 0x55) && (lbal
== 0xaa))
2033 return 1; /* we found a device */
2035 return 0; /* nothing found */
2039 * ata_sff_dev_classify - Parse returned ATA device signature
2040 * @dev: ATA device to classify (starting at zero)
2041 * @present: device seems present
2042 * @r_err: Value of error register on completion
2044 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
2045 * an ATA/ATAPI-defined set of values is placed in the ATA
2046 * shadow registers, indicating the results of device detection
2049 * Select the ATA device, and read the values from the ATA shadow
2050 * registers. Then parse according to the Error register value,
2051 * and the spec-defined values examined by ata_dev_classify().
2057 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
2059 unsigned int ata_sff_dev_classify(struct ata_device
*dev
, int present
,
2062 struct ata_port
*ap
= dev
->link
->ap
;
2063 struct ata_taskfile tf
;
2067 ap
->ops
->sff_dev_select(ap
, dev
->devno
);
2069 memset(&tf
, 0, sizeof(tf
));
2071 ap
->ops
->sff_tf_read(ap
, &tf
);
2076 /* see if device passed diags: continue and warn later */
2078 /* diagnostic fail : do nothing _YET_ */
2079 dev
->horkage
|= ATA_HORKAGE_DIAGNOSTIC
;
2082 else if ((dev
->devno
== 0) && (err
== 0x81))
2085 return ATA_DEV_NONE
;
2087 /* determine if device is ATA or ATAPI */
2088 class = ata_dev_classify(&tf
);
2090 if (class == ATA_DEV_UNKNOWN
) {
2091 /* If the device failed diagnostic, it's likely to
2092 * have reported incorrect device signature too.
2093 * Assume ATA device if the device seems present but
2094 * device signature is invalid with diagnostic
2097 if (present
&& (dev
->horkage
& ATA_HORKAGE_DIAGNOSTIC
))
2098 class = ATA_DEV_ATA
;
2100 class = ATA_DEV_NONE
;
2101 } else if ((class == ATA_DEV_ATA
) &&
2102 (ap
->ops
->sff_check_status(ap
) == 0))
2103 class = ATA_DEV_NONE
;
2107 EXPORT_SYMBOL_GPL(ata_sff_dev_classify
);
2110 * ata_sff_wait_after_reset - wait for devices to become ready after reset
2111 * @link: SFF link which is just reset
2112 * @devmask: mask of present devices
2113 * @deadline: deadline jiffies for the operation
2115 * Wait devices attached to SFF @link to become ready after
2116 * reset. It contains preceding 150ms wait to avoid accessing TF
2117 * status register too early.
2120 * Kernel thread context (may sleep).
2123 * 0 on success, -ENODEV if some or all of devices in @devmask
2124 * don't seem to exist. -errno on other errors.
2126 int ata_sff_wait_after_reset(struct ata_link
*link
, unsigned int devmask
,
2127 unsigned long deadline
)
2129 struct ata_port
*ap
= link
->ap
;
2130 struct ata_ioports
*ioaddr
= &ap
->ioaddr
;
2131 unsigned int dev0
= devmask
& (1 << 0);
2132 unsigned int dev1
= devmask
& (1 << 1);
2135 msleep(ATA_WAIT_AFTER_RESET
);
2137 /* always check readiness of the master device */
2138 rc
= ata_sff_wait_ready(link
, deadline
);
2139 /* -ENODEV means the odd clown forgot the D7 pulldown resistor
2140 * and TF status is 0xff, bail out on it too.
2145 /* if device 1 was found in ata_devchk, wait for register
2146 * access briefly, then wait for BSY to clear.
2151 ap
->ops
->sff_dev_select(ap
, 1);
2153 /* Wait for register access. Some ATAPI devices fail
2154 * to set nsect/lbal after reset, so don't waste too
2155 * much time on it. We're gonna wait for !BSY anyway.
2157 for (i
= 0; i
< 2; i
++) {
2160 nsect
= ioread8(ioaddr
->nsect_addr
);
2161 lbal
= ioread8(ioaddr
->lbal_addr
);
2162 if ((nsect
== 1) && (lbal
== 1))
2164 msleep(50); /* give drive a breather */
2167 rc
= ata_sff_wait_ready(link
, deadline
);
2175 /* is all this really necessary? */
2176 ap
->ops
->sff_dev_select(ap
, 0);
2178 ap
->ops
->sff_dev_select(ap
, 1);
2180 ap
->ops
->sff_dev_select(ap
, 0);
2184 EXPORT_SYMBOL_GPL(ata_sff_wait_after_reset
);
2186 static int ata_bus_softreset(struct ata_port
*ap
, unsigned int devmask
,
2187 unsigned long deadline
)
2189 struct ata_ioports
*ioaddr
= &ap
->ioaddr
;
2191 DPRINTK("ata%u: bus reset via SRST\n", ap
->print_id
);
2193 /* software reset. causes dev0 to be selected */
2194 iowrite8(ap
->ctl
, ioaddr
->ctl_addr
);
2195 udelay(20); /* FIXME: flush */
2196 iowrite8(ap
->ctl
| ATA_SRST
, ioaddr
->ctl_addr
);
2197 udelay(20); /* FIXME: flush */
2198 iowrite8(ap
->ctl
, ioaddr
->ctl_addr
);
2199 ap
->last_ctl
= ap
->ctl
;
2201 /* wait the port to become ready */
2202 return ata_sff_wait_after_reset(&ap
->link
, devmask
, deadline
);
2206 * ata_sff_softreset - reset host port via ATA SRST
2207 * @link: ATA link to reset
2208 * @classes: resulting classes of attached devices
2209 * @deadline: deadline jiffies for the operation
2211 * Reset host port using ATA SRST.
2214 * Kernel thread context (may sleep)
2217 * 0 on success, -errno otherwise.
2219 int ata_sff_softreset(struct ata_link
*link
, unsigned int *classes
,
2220 unsigned long deadline
)
2222 struct ata_port
*ap
= link
->ap
;
2223 unsigned int slave_possible
= ap
->flags
& ATA_FLAG_SLAVE_POSS
;
2224 unsigned int devmask
= 0;
2230 /* determine if device 0/1 are present */
2231 if (ata_devchk(ap
, 0))
2232 devmask
|= (1 << 0);
2233 if (slave_possible
&& ata_devchk(ap
, 1))
2234 devmask
|= (1 << 1);
2236 /* select device 0 again */
2237 ap
->ops
->sff_dev_select(ap
, 0);
2239 /* issue bus reset */
2240 DPRINTK("about to softreset, devmask=%x\n", devmask
);
2241 rc
= ata_bus_softreset(ap
, devmask
, deadline
);
2242 /* if link is occupied, -ENODEV too is an error */
2243 if (rc
&& (rc
!= -ENODEV
|| sata_scr_valid(link
))) {
2244 ata_link_printk(link
, KERN_ERR
, "SRST failed (errno=%d)\n", rc
);
2248 /* determine by signature whether we have ATA or ATAPI devices */
2249 classes
[0] = ata_sff_dev_classify(&link
->device
[0],
2250 devmask
& (1 << 0), &err
);
2251 if (slave_possible
&& err
!= 0x81)
2252 classes
[1] = ata_sff_dev_classify(&link
->device
[1],
2253 devmask
& (1 << 1), &err
);
2255 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes
[0], classes
[1]);
2258 EXPORT_SYMBOL_GPL(ata_sff_softreset
);
2261 * sata_sff_hardreset - reset host port via SATA phy reset
2262 * @link: link to reset
2263 * @class: resulting class of attached device
2264 * @deadline: deadline jiffies for the operation
2266 * SATA phy-reset host port using DET bits of SControl register,
2267 * wait for !BSY and classify the attached device.
2270 * Kernel thread context (may sleep)
2273 * 0 on success, -errno otherwise.
2275 int sata_sff_hardreset(struct ata_link
*link
, unsigned int *class,
2276 unsigned long deadline
)
2278 struct ata_eh_context
*ehc
= &link
->eh_context
;
2279 const unsigned long *timing
= sata_ehc_deb_timing(ehc
);
2283 rc
= sata_link_hardreset(link
, timing
, deadline
, &online
,
2284 ata_sff_check_ready
);
2286 *class = ata_sff_dev_classify(link
->device
, 1, NULL
);
2288 DPRINTK("EXIT, class=%u\n", *class);
2291 EXPORT_SYMBOL_GPL(sata_sff_hardreset
);
2294 * ata_sff_postreset - SFF postreset callback
2295 * @link: the target SFF ata_link
2296 * @classes: classes of attached devices
2298 * This function is invoked after a successful reset. It first
2299 * calls ata_std_postreset() and performs SFF specific postreset
2303 * Kernel thread context (may sleep)
2305 void ata_sff_postreset(struct ata_link
*link
, unsigned int *classes
)
2307 struct ata_port
*ap
= link
->ap
;
2309 ata_std_postreset(link
, classes
);
2311 /* is double-select really necessary? */
2312 if (classes
[0] != ATA_DEV_NONE
)
2313 ap
->ops
->sff_dev_select(ap
, 1);
2314 if (classes
[1] != ATA_DEV_NONE
)
2315 ap
->ops
->sff_dev_select(ap
, 0);
2317 /* bail out if no device is present */
2318 if (classes
[0] == ATA_DEV_NONE
&& classes
[1] == ATA_DEV_NONE
) {
2319 DPRINTK("EXIT, no device\n");
2323 /* set up device control */
2324 if (ap
->ops
->sff_set_devctl
|| ap
->ioaddr
.ctl_addr
) {
2325 ata_sff_set_devctl(ap
, ap
->ctl
);
2326 ap
->last_ctl
= ap
->ctl
;
2329 EXPORT_SYMBOL_GPL(ata_sff_postreset
);
2332 * ata_sff_drain_fifo - Stock FIFO drain logic for SFF controllers
2335 * Drain the FIFO and device of any stuck data following a command
2336 * failing to complete. In some cases this is necessary before a
2337 * reset will recover the device.
2341 void ata_sff_drain_fifo(struct ata_queued_cmd
*qc
)
2344 struct ata_port
*ap
;
2346 /* We only need to flush incoming data when a command was running */
2347 if (qc
== NULL
|| qc
->dma_dir
== DMA_TO_DEVICE
)
2351 /* Drain up to 64K of data before we give up this recovery method */
2352 for (count
= 0; (ap
->ops
->sff_check_status(ap
) & ATA_DRQ
)
2353 && count
< 65536; count
+= 2)
2354 ioread16(ap
->ioaddr
.data_addr
);
2356 /* Can become DEBUG later */
2358 ata_port_printk(ap
, KERN_DEBUG
,
2359 "drained %d bytes to clear DRQ.\n", count
);
2362 EXPORT_SYMBOL_GPL(ata_sff_drain_fifo
);
2365 * ata_sff_error_handler - Stock error handler for BMDMA controller
2366 * @ap: port to handle error for
2368 * Stock error handler for SFF controller. It can handle both
2369 * PATA and SATA controllers. Many controllers should be able to
2370 * use this EH as-is or with some added handling before and
2374 * Kernel thread context (may sleep)
2376 void ata_sff_error_handler(struct ata_port
*ap
)
2378 ata_reset_fn_t softreset
= ap
->ops
->softreset
;
2379 ata_reset_fn_t hardreset
= ap
->ops
->hardreset
;
2380 struct ata_queued_cmd
*qc
;
2381 unsigned long flags
;
2384 qc
= __ata_qc_from_tag(ap
, ap
->link
.active_tag
);
2385 if (qc
&& !(qc
->flags
& ATA_QCFLAG_FAILED
))
2388 /* reset PIO HSM and stop DMA engine */
2389 spin_lock_irqsave(ap
->lock
, flags
);
2391 ap
->hsm_task_state
= HSM_ST_IDLE
;
2393 if (ap
->ioaddr
.bmdma_addr
&&
2394 qc
&& (qc
->tf
.protocol
== ATA_PROT_DMA
||
2395 qc
->tf
.protocol
== ATAPI_PROT_DMA
)) {
2398 host_stat
= ap
->ops
->bmdma_status(ap
);
2400 /* BMDMA controllers indicate host bus error by
2401 * setting DMA_ERR bit and timing out. As it wasn't
2402 * really a timeout event, adjust error mask and
2403 * cancel frozen state.
2405 if (qc
->err_mask
== AC_ERR_TIMEOUT
2406 && (host_stat
& ATA_DMA_ERR
)) {
2407 qc
->err_mask
= AC_ERR_HOST_BUS
;
2411 ap
->ops
->bmdma_stop(qc
);
2413 /* if we're gonna thaw, make sure IRQ is clear */
2415 ap
->ops
->sff_check_status(ap
);
2416 ap
->ops
->sff_irq_clear(ap
);
2418 spin_unlock_irqrestore(ap
->lock
, flags
);
2419 ata_eh_thaw_port(ap
);
2420 spin_lock_irqsave(ap
->lock
, flags
);
2424 /* We *MUST* do FIFO draining before we issue a reset as several
2425 * devices helpfully clear their internal state and will lock solid
2426 * if we touch the data port post reset. Pass qc in case anyone wants
2427 * to do different PIO/DMA recovery or has per command fixups
2429 if (ap
->ops
->drain_fifo
)
2430 ap
->ops
->drain_fifo(qc
);
2432 spin_unlock_irqrestore(ap
->lock
, flags
);
2434 /* PIO and DMA engines have been stopped, perform recovery */
2436 /* Ignore ata_sff_softreset if ctl isn't accessible and
2437 * built-in hardresets if SCR access isn't available.
2439 if (softreset
== ata_sff_softreset
&& !ap
->ioaddr
.ctl_addr
)
2441 if (ata_is_builtin_hardreset(hardreset
) && !sata_scr_valid(&ap
->link
))
2444 ata_do_eh(ap
, ap
->ops
->prereset
, softreset
, hardreset
,
2445 ap
->ops
->postreset
);
2447 EXPORT_SYMBOL_GPL(ata_sff_error_handler
);
2450 * ata_sff_post_internal_cmd - Stock post_internal_cmd for SFF controller
2451 * @qc: internal command to clean up
2454 * Kernel thread context (may sleep)
2456 void ata_sff_post_internal_cmd(struct ata_queued_cmd
*qc
)
2458 struct ata_port
*ap
= qc
->ap
;
2459 unsigned long flags
;
2461 spin_lock_irqsave(ap
->lock
, flags
);
2463 ap
->hsm_task_state
= HSM_ST_IDLE
;
2465 if (ap
->ioaddr
.bmdma_addr
)
2466 ap
->ops
->bmdma_stop(qc
);
2468 spin_unlock_irqrestore(ap
->lock
, flags
);
2470 EXPORT_SYMBOL_GPL(ata_sff_post_internal_cmd
);
2473 * ata_sff_port_start - Set port up for dma.
2474 * @ap: Port to initialize
2476 * Called just after data structures for each port are
2477 * initialized. Allocates space for PRD table if the device
2478 * is DMA capable SFF.
2480 * May be used as the port_start() entry in ata_port_operations.
2483 * Inherited from caller.
2485 int ata_sff_port_start(struct ata_port
*ap
)
2487 if (ap
->ioaddr
.bmdma_addr
)
2488 return ata_port_start(ap
);
2491 EXPORT_SYMBOL_GPL(ata_sff_port_start
);
2494 * ata_sff_port_start32 - Set port up for dma.
2495 * @ap: Port to initialize
2497 * Called just after data structures for each port are
2498 * initialized. Allocates space for PRD table if the device
2499 * is DMA capable SFF.
2501 * May be used as the port_start() entry in ata_port_operations for
2502 * devices that are capable of 32bit PIO.
2505 * Inherited from caller.
2507 int ata_sff_port_start32(struct ata_port
*ap
)
2509 ap
->pflags
|= ATA_PFLAG_PIO32
| ATA_PFLAG_PIO32CHANGE
;
2510 if (ap
->ioaddr
.bmdma_addr
)
2511 return ata_port_start(ap
);
2514 EXPORT_SYMBOL_GPL(ata_sff_port_start32
);
2517 * ata_sff_std_ports - initialize ioaddr with standard port offsets.
2518 * @ioaddr: IO address structure to be initialized
2520 * Utility function which initializes data_addr, error_addr,
2521 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
2522 * device_addr, status_addr, and command_addr to standard offsets
2523 * relative to cmd_addr.
2525 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
2527 void ata_sff_std_ports(struct ata_ioports
*ioaddr
)
2529 ioaddr
->data_addr
= ioaddr
->cmd_addr
+ ATA_REG_DATA
;
2530 ioaddr
->error_addr
= ioaddr
->cmd_addr
+ ATA_REG_ERR
;
2531 ioaddr
->feature_addr
= ioaddr
->cmd_addr
+ ATA_REG_FEATURE
;
2532 ioaddr
->nsect_addr
= ioaddr
->cmd_addr
+ ATA_REG_NSECT
;
2533 ioaddr
->lbal_addr
= ioaddr
->cmd_addr
+ ATA_REG_LBAL
;
2534 ioaddr
->lbam_addr
= ioaddr
->cmd_addr
+ ATA_REG_LBAM
;
2535 ioaddr
->lbah_addr
= ioaddr
->cmd_addr
+ ATA_REG_LBAH
;
2536 ioaddr
->device_addr
= ioaddr
->cmd_addr
+ ATA_REG_DEVICE
;
2537 ioaddr
->status_addr
= ioaddr
->cmd_addr
+ ATA_REG_STATUS
;
2538 ioaddr
->command_addr
= ioaddr
->cmd_addr
+ ATA_REG_CMD
;
2540 EXPORT_SYMBOL_GPL(ata_sff_std_ports
);
2542 unsigned long ata_bmdma_mode_filter(struct ata_device
*adev
,
2543 unsigned long xfer_mask
)
2545 /* Filter out DMA modes if the device has been configured by
2546 the BIOS as PIO only */
2548 if (adev
->link
->ap
->ioaddr
.bmdma_addr
== NULL
)
2549 xfer_mask
&= ~(ATA_MASK_MWDMA
| ATA_MASK_UDMA
);
2552 EXPORT_SYMBOL_GPL(ata_bmdma_mode_filter
);
2555 * ata_bmdma_setup - Set up PCI IDE BMDMA transaction
2556 * @qc: Info associated with this ATA transaction.
2559 * spin_lock_irqsave(host lock)
2561 void ata_bmdma_setup(struct ata_queued_cmd
*qc
)
2563 struct ata_port
*ap
= qc
->ap
;
2564 unsigned int rw
= (qc
->tf
.flags
& ATA_TFLAG_WRITE
);
2567 /* load PRD table addr. */
2568 mb(); /* make sure PRD table writes are visible to controller */
2569 iowrite32(ap
->prd_dma
, ap
->ioaddr
.bmdma_addr
+ ATA_DMA_TABLE_OFS
);
2571 /* specify data direction, triple-check start bit is clear */
2572 dmactl
= ioread8(ap
->ioaddr
.bmdma_addr
+ ATA_DMA_CMD
);
2573 dmactl
&= ~(ATA_DMA_WR
| ATA_DMA_START
);
2575 dmactl
|= ATA_DMA_WR
;
2576 iowrite8(dmactl
, ap
->ioaddr
.bmdma_addr
+ ATA_DMA_CMD
);
2578 /* issue r/w command */
2579 ap
->ops
->sff_exec_command(ap
, &qc
->tf
);
2581 EXPORT_SYMBOL_GPL(ata_bmdma_setup
);
2584 * ata_bmdma_start - Start a PCI IDE BMDMA transaction
2585 * @qc: Info associated with this ATA transaction.
2588 * spin_lock_irqsave(host lock)
2590 void ata_bmdma_start(struct ata_queued_cmd
*qc
)
2592 struct ata_port
*ap
= qc
->ap
;
2595 /* start host DMA transaction */
2596 dmactl
= ioread8(ap
->ioaddr
.bmdma_addr
+ ATA_DMA_CMD
);
2597 iowrite8(dmactl
| ATA_DMA_START
, ap
->ioaddr
.bmdma_addr
+ ATA_DMA_CMD
);
2599 /* Strictly, one may wish to issue an ioread8() here, to
2600 * flush the mmio write. However, control also passes
2601 * to the hardware at this point, and it will interrupt
2602 * us when we are to resume control. So, in effect,
2603 * we don't care when the mmio write flushes.
2604 * Further, a read of the DMA status register _immediately_
2605 * following the write may not be what certain flaky hardware
2606 * is expected, so I think it is best to not add a readb()
2607 * without first all the MMIO ATA cards/mobos.
2608 * Or maybe I'm just being paranoid.
2610 * FIXME: The posting of this write means I/O starts are
2611 * unneccessarily delayed for MMIO
2614 EXPORT_SYMBOL_GPL(ata_bmdma_start
);
2617 * ata_bmdma_stop - Stop PCI IDE BMDMA transfer
2618 * @qc: Command we are ending DMA for
2620 * Clears the ATA_DMA_START flag in the dma control register
2622 * May be used as the bmdma_stop() entry in ata_port_operations.
2625 * spin_lock_irqsave(host lock)
2627 void ata_bmdma_stop(struct ata_queued_cmd
*qc
)
2629 struct ata_port
*ap
= qc
->ap
;
2630 void __iomem
*mmio
= ap
->ioaddr
.bmdma_addr
;
2632 /* clear start/stop bit */
2633 iowrite8(ioread8(mmio
+ ATA_DMA_CMD
) & ~ATA_DMA_START
,
2634 mmio
+ ATA_DMA_CMD
);
2636 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
2637 ata_sff_dma_pause(ap
);
2639 EXPORT_SYMBOL_GPL(ata_bmdma_stop
);
2642 * ata_bmdma_status - Read PCI IDE BMDMA status
2643 * @ap: Port associated with this ATA transaction.
2645 * Read and return BMDMA status register.
2647 * May be used as the bmdma_status() entry in ata_port_operations.
2650 * spin_lock_irqsave(host lock)
2652 u8
ata_bmdma_status(struct ata_port
*ap
)
2654 return ioread8(ap
->ioaddr
.bmdma_addr
+ ATA_DMA_STATUS
);
2656 EXPORT_SYMBOL_GPL(ata_bmdma_status
);
2661 * ata_pci_bmdma_clear_simplex - attempt to kick device out of simplex
2664 * Some PCI ATA devices report simplex mode but in fact can be told to
2665 * enter non simplex mode. This implements the necessary logic to
2666 * perform the task on such devices. Calling it on other devices will
2667 * have -undefined- behaviour.
2669 int ata_pci_bmdma_clear_simplex(struct pci_dev
*pdev
)
2671 unsigned long bmdma
= pci_resource_start(pdev
, 4);
2677 simplex
= inb(bmdma
+ 0x02);
2678 outb(simplex
& 0x60, bmdma
+ 0x02);
2679 simplex
= inb(bmdma
+ 0x02);
2684 EXPORT_SYMBOL_GPL(ata_pci_bmdma_clear_simplex
);
2687 * ata_pci_bmdma_init - acquire PCI BMDMA resources and init ATA host
2688 * @host: target ATA host
2690 * Acquire PCI BMDMA resources and initialize @host accordingly.
2693 * Inherited from calling layer (may sleep).
2696 * 0 on success, -errno otherwise.
2698 int ata_pci_bmdma_init(struct ata_host
*host
)
2700 struct device
*gdev
= host
->dev
;
2701 struct pci_dev
*pdev
= to_pci_dev(gdev
);
2704 /* No BAR4 allocation: No DMA */
2705 if (pci_resource_start(pdev
, 4) == 0)
2708 /* TODO: If we get no DMA mask we should fall back to PIO */
2709 rc
= pci_set_dma_mask(pdev
, ATA_DMA_MASK
);
2712 rc
= pci_set_consistent_dma_mask(pdev
, ATA_DMA_MASK
);
2716 /* request and iomap DMA region */
2717 rc
= pcim_iomap_regions(pdev
, 1 << 4, dev_driver_string(gdev
));
2719 dev_printk(KERN_ERR
, gdev
, "failed to request/iomap BAR4\n");
2722 host
->iomap
= pcim_iomap_table(pdev
);
2724 for (i
= 0; i
< 2; i
++) {
2725 struct ata_port
*ap
= host
->ports
[i
];
2726 void __iomem
*bmdma
= host
->iomap
[4] + 8 * i
;
2728 if (ata_port_is_dummy(ap
))
2731 ap
->ioaddr
.bmdma_addr
= bmdma
;
2732 if ((!(ap
->flags
& ATA_FLAG_IGN_SIMPLEX
)) &&
2733 (ioread8(bmdma
+ 2) & 0x80))
2734 host
->flags
|= ATA_HOST_SIMPLEX
;
2736 ata_port_desc(ap
, "bmdma 0x%llx",
2737 (unsigned long long)pci_resource_start(pdev
, 4) + 8 * i
);
2742 EXPORT_SYMBOL_GPL(ata_pci_bmdma_init
);
2744 static int ata_resources_present(struct pci_dev
*pdev
, int port
)
2748 /* Check the PCI resources for this channel are enabled */
2750 for (i
= 0; i
< 2; i
++) {
2751 if (pci_resource_start(pdev
, port
+ i
) == 0 ||
2752 pci_resource_len(pdev
, port
+ i
) == 0)
2759 * ata_pci_sff_init_host - acquire native PCI ATA resources and init host
2760 * @host: target ATA host
2762 * Acquire native PCI ATA resources for @host and initialize the
2763 * first two ports of @host accordingly. Ports marked dummy are
2764 * skipped and allocation failure makes the port dummy.
2766 * Note that native PCI resources are valid even for legacy hosts
2767 * as we fix up pdev resources array early in boot, so this
2768 * function can be used for both native and legacy SFF hosts.
2771 * Inherited from calling layer (may sleep).
2774 * 0 if at least one port is initialized, -ENODEV if no port is
2777 int ata_pci_sff_init_host(struct ata_host
*host
)
2779 struct device
*gdev
= host
->dev
;
2780 struct pci_dev
*pdev
= to_pci_dev(gdev
);
2781 unsigned int mask
= 0;
2784 /* request, iomap BARs and init port addresses accordingly */
2785 for (i
= 0; i
< 2; i
++) {
2786 struct ata_port
*ap
= host
->ports
[i
];
2788 void __iomem
* const *iomap
;
2790 if (ata_port_is_dummy(ap
))
2793 /* Discard disabled ports. Some controllers show
2794 * their unused channels this way. Disabled ports are
2797 if (!ata_resources_present(pdev
, i
)) {
2798 ap
->ops
= &ata_dummy_port_ops
;
2802 rc
= pcim_iomap_regions(pdev
, 0x3 << base
,
2803 dev_driver_string(gdev
));
2805 dev_printk(KERN_WARNING
, gdev
,
2806 "failed to request/iomap BARs for port %d "
2807 "(errno=%d)\n", i
, rc
);
2809 pcim_pin_device(pdev
);
2810 ap
->ops
= &ata_dummy_port_ops
;
2813 host
->iomap
= iomap
= pcim_iomap_table(pdev
);
2815 ap
->ioaddr
.cmd_addr
= iomap
[base
];
2816 ap
->ioaddr
.altstatus_addr
=
2817 ap
->ioaddr
.ctl_addr
= (void __iomem
*)
2818 ((unsigned long)iomap
[base
+ 1] | ATA_PCI_CTL_OFS
);
2819 ata_sff_std_ports(&ap
->ioaddr
);
2821 ata_port_desc(ap
, "cmd 0x%llx ctl 0x%llx",
2822 (unsigned long long)pci_resource_start(pdev
, base
),
2823 (unsigned long long)pci_resource_start(pdev
, base
+ 1));
2829 dev_printk(KERN_ERR
, gdev
, "no available native port\n");
2835 EXPORT_SYMBOL_GPL(ata_pci_sff_init_host
);
2838 * ata_pci_sff_prepare_host - helper to prepare native PCI ATA host
2839 * @pdev: target PCI device
2840 * @ppi: array of port_info, must be enough for two ports
2841 * @r_host: out argument for the initialized ATA host
2843 * Helper to allocate ATA host for @pdev, acquire all native PCI
2844 * resources and initialize it accordingly in one go.
2847 * Inherited from calling layer (may sleep).
2850 * 0 on success, -errno otherwise.
2852 int ata_pci_sff_prepare_host(struct pci_dev
*pdev
,
2853 const struct ata_port_info
* const *ppi
,
2854 struct ata_host
**r_host
)
2856 struct ata_host
*host
;
2859 if (!devres_open_group(&pdev
->dev
, NULL
, GFP_KERNEL
))
2862 host
= ata_host_alloc_pinfo(&pdev
->dev
, ppi
, 2);
2864 dev_printk(KERN_ERR
, &pdev
->dev
,
2865 "failed to allocate ATA host\n");
2870 rc
= ata_pci_sff_init_host(host
);
2874 /* init DMA related stuff */
2875 rc
= ata_pci_bmdma_init(host
);
2879 devres_remove_group(&pdev
->dev
, NULL
);
2884 /* This is necessary because PCI and iomap resources are
2885 * merged and releasing the top group won't release the
2886 * acquired resources if some of those have been acquired
2887 * before entering this function.
2889 pcim_iounmap_regions(pdev
, 0xf);
2891 devres_release_group(&pdev
->dev
, NULL
);
2894 EXPORT_SYMBOL_GPL(ata_pci_sff_prepare_host
);
2897 * ata_pci_sff_activate_host - start SFF host, request IRQ and register it
2898 * @host: target SFF ATA host
2899 * @irq_handler: irq_handler used when requesting IRQ(s)
2900 * @sht: scsi_host_template to use when registering the host
2902 * This is the counterpart of ata_host_activate() for SFF ATA
2903 * hosts. This separate helper is necessary because SFF hosts
2904 * use two separate interrupts in legacy mode.
2907 * Inherited from calling layer (may sleep).
2910 * 0 on success, -errno otherwise.
2912 int ata_pci_sff_activate_host(struct ata_host
*host
,
2913 irq_handler_t irq_handler
,
2914 struct scsi_host_template
*sht
)
2916 struct device
*dev
= host
->dev
;
2917 struct pci_dev
*pdev
= to_pci_dev(dev
);
2918 const char *drv_name
= dev_driver_string(host
->dev
);
2919 int legacy_mode
= 0, rc
;
2921 rc
= ata_host_start(host
);
2925 if ((pdev
->class >> 8) == PCI_CLASS_STORAGE_IDE
) {
2928 /* TODO: What if one channel is in native mode ... */
2929 pci_read_config_byte(pdev
, PCI_CLASS_PROG
, &tmp8
);
2930 mask
= (1 << 2) | (1 << 0);
2931 if ((tmp8
& mask
) != mask
)
2933 #if defined(CONFIG_NO_ATA_LEGACY)
2934 /* Some platforms with PCI limits cannot address compat
2935 port space. In that case we punt if their firmware has
2936 left a device in compatibility mode */
2938 printk(KERN_ERR
"ata: Compatibility mode ATA is not supported on this platform, skipping.\n");
2944 if (!devres_open_group(dev
, NULL
, GFP_KERNEL
))
2947 if (!legacy_mode
&& pdev
->irq
) {
2948 rc
= devm_request_irq(dev
, pdev
->irq
, irq_handler
,
2949 IRQF_SHARED
, drv_name
, host
);
2953 ata_port_desc(host
->ports
[0], "irq %d", pdev
->irq
);
2954 ata_port_desc(host
->ports
[1], "irq %d", pdev
->irq
);
2955 } else if (legacy_mode
) {
2956 if (!ata_port_is_dummy(host
->ports
[0])) {
2957 rc
= devm_request_irq(dev
, ATA_PRIMARY_IRQ(pdev
),
2958 irq_handler
, IRQF_SHARED
,
2963 ata_port_desc(host
->ports
[0], "irq %d",
2964 ATA_PRIMARY_IRQ(pdev
));
2967 if (!ata_port_is_dummy(host
->ports
[1])) {
2968 rc
= devm_request_irq(dev
, ATA_SECONDARY_IRQ(pdev
),
2969 irq_handler
, IRQF_SHARED
,
2974 ata_port_desc(host
->ports
[1], "irq %d",
2975 ATA_SECONDARY_IRQ(pdev
));
2979 rc
= ata_host_register(host
, sht
);
2982 devres_remove_group(dev
, NULL
);
2984 devres_release_group(dev
, NULL
);
2988 EXPORT_SYMBOL_GPL(ata_pci_sff_activate_host
);
2991 * ata_pci_sff_init_one - Initialize/register PCI IDE host controller
2992 * @pdev: Controller to be initialized
2993 * @ppi: array of port_info, must be enough for two ports
2994 * @sht: scsi_host_template to use when registering the host
2995 * @host_priv: host private_data
2996 * @hflag: host flags
2998 * This is a helper function which can be called from a driver's
2999 * xxx_init_one() probe function if the hardware uses traditional
3000 * IDE taskfile registers.
3002 * This function calls pci_enable_device(), reserves its register
3003 * regions, sets the dma mask, enables bus master mode, and calls
3007 * Nobody makes a single channel controller that appears solely as
3008 * the secondary legacy port on PCI.
3011 * Inherited from PCI layer (may sleep).
3014 * Zero on success, negative on errno-based value on error.
3016 int ata_pci_sff_init_one(struct pci_dev
*pdev
,
3017 const struct ata_port_info
* const *ppi
,
3018 struct scsi_host_template
*sht
, void *host_priv
, int hflag
)
3020 struct device
*dev
= &pdev
->dev
;
3021 const struct ata_port_info
*pi
= NULL
;
3022 struct ata_host
*host
= NULL
;
3027 /* look up the first valid port_info */
3028 for (i
= 0; i
< 2 && ppi
[i
]; i
++) {
3029 if (ppi
[i
]->port_ops
!= &ata_dummy_port_ops
) {
3036 dev_printk(KERN_ERR
, &pdev
->dev
,
3037 "no valid port_info specified\n");
3041 if (!devres_open_group(dev
, NULL
, GFP_KERNEL
))
3044 rc
= pcim_enable_device(pdev
);
3048 /* prepare and activate SFF host */
3049 rc
= ata_pci_sff_prepare_host(pdev
, ppi
, &host
);
3052 host
->private_data
= host_priv
;
3053 host
->flags
|= hflag
;
3055 pci_set_master(pdev
);
3056 rc
= ata_pci_sff_activate_host(host
, ata_sff_interrupt
, sht
);
3059 devres_remove_group(&pdev
->dev
, NULL
);
3061 devres_release_group(&pdev
->dev
, NULL
);
3065 EXPORT_SYMBOL_GPL(ata_pci_sff_init_one
);
3067 #endif /* CONFIG_PCI */