4 * Basic PIO and command management functionality.
6 * This code was split off from ide.c. See ide.c for history and original
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2, or (at your option) any
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
19 * For the avoidance of doubt the "preferred form" of this code is one which
20 * is in an open non patent encumbered format. Where cryptographic key signing
21 * forms part of the process of creating an executable the information
22 * including keys needed to generate an equivalently functional executable
23 * are deemed to be part of the source code.
27 #include <linux/module.h>
28 #include <linux/types.h>
29 #include <linux/string.h>
30 #include <linux/kernel.h>
31 #include <linux/timer.h>
33 #include <linux/interrupt.h>
34 #include <linux/major.h>
35 #include <linux/errno.h>
36 #include <linux/genhd.h>
37 #include <linux/blkpg.h>
38 #include <linux/slab.h>
39 #include <linux/init.h>
40 #include <linux/pci.h>
41 #include <linux/delay.h>
42 #include <linux/ide.h>
43 #include <linux/completion.h>
44 #include <linux/reboot.h>
45 #include <linux/cdrom.h>
46 #include <linux/seq_file.h>
47 #include <linux/device.h>
48 #include <linux/kmod.h>
49 #include <linux/scatterlist.h>
50 #include <linux/bitops.h>
52 #include <asm/byteorder.h>
54 #include <asm/uaccess.h>
57 static int __ide_end_request(ide_drive_t
*drive
, struct request
*rq
,
58 int uptodate
, unsigned int nr_bytes
, int dequeue
)
64 error
= uptodate
? uptodate
: -EIO
;
67 * if failfast is set on a request, override number of sectors and
68 * complete the whole request right now
70 if (blk_noretry_request(rq
) && error
)
71 nr_bytes
= rq
->hard_nr_sectors
<< 9;
73 if (!blk_fs_request(rq
) && error
&& !rq
->errors
)
77 * decide whether to reenable DMA -- 3 is a random magic for now,
78 * if we DMA timeout more than 3 times, just stay in PIO
80 if ((drive
->dev_flags
& IDE_DFLAG_DMA_PIO_RETRY
) &&
81 drive
->retry_pio
<= 3) {
82 drive
->dev_flags
&= ~IDE_DFLAG_DMA_PIO_RETRY
;
86 if (!blk_end_request(rq
, error
, nr_bytes
))
89 if (ret
== 0 && dequeue
)
90 drive
->hwif
->rq
= NULL
;
96 * ide_end_request - complete an IDE I/O
97 * @drive: IDE device for the I/O
99 * @nr_sectors: number of sectors completed
101 * This is our end_request wrapper function. We complete the I/O
102 * update random number input and dequeue the request, which if
103 * it was tagged may be out of order.
106 int ide_end_request (ide_drive_t
*drive
, int uptodate
, int nr_sectors
)
108 unsigned int nr_bytes
= nr_sectors
<< 9;
109 struct request
*rq
= drive
->hwif
->rq
;
112 if (blk_pc_request(rq
))
113 nr_bytes
= rq
->data_len
;
115 nr_bytes
= rq
->hard_cur_sectors
<< 9;
118 return __ide_end_request(drive
, rq
, uptodate
, nr_bytes
, 1);
120 EXPORT_SYMBOL(ide_end_request
);
123 * ide_end_dequeued_request - complete an IDE I/O
124 * @drive: IDE device for the I/O
126 * @nr_sectors: number of sectors completed
128 * Complete an I/O that is no longer on the request queue. This
129 * typically occurs when we pull the request and issue a REQUEST_SENSE.
130 * We must still finish the old request but we must not tamper with the
131 * queue in the meantime.
133 * NOTE: This path does not handle barrier, but barrier is not supported
137 int ide_end_dequeued_request(ide_drive_t
*drive
, struct request
*rq
,
138 int uptodate
, int nr_sectors
)
140 BUG_ON(!blk_rq_started(rq
));
142 return __ide_end_request(drive
, rq
, uptodate
, nr_sectors
<< 9, 0);
144 EXPORT_SYMBOL_GPL(ide_end_dequeued_request
);
146 void ide_complete_cmd(ide_drive_t
*drive
, struct ide_cmd
*cmd
, u8 stat
, u8 err
)
148 struct ide_taskfile
*tf
= &cmd
->tf
;
149 struct request
*rq
= cmd
->rq
;
154 drive
->hwif
->tp_ops
->tf_read(drive
, cmd
);
156 if (rq
&& rq
->cmd_type
== REQ_TYPE_ATA_TASKFILE
)
157 memcpy(rq
->special
, cmd
, sizeof(*cmd
));
159 if (cmd
->tf_flags
& IDE_TFLAG_DYN
)
163 void ide_complete_rq(ide_drive_t
*drive
, u8 err
)
165 ide_hwif_t
*hwif
= drive
->hwif
;
166 struct request
*rq
= hwif
->rq
;
172 if (unlikely(blk_end_request(rq
, (rq
->errors
? -EIO
: 0),
176 EXPORT_SYMBOL(ide_complete_rq
);
178 void ide_kill_rq(ide_drive_t
*drive
, struct request
*rq
)
180 u8 drv_req
= blk_special_request(rq
) && rq
->rq_disk
;
181 u8 media
= drive
->media
;
183 drive
->failed_pc
= NULL
;
185 if ((media
== ide_floppy
&& drv_req
) || media
== ide_tape
)
186 rq
->errors
= IDE_DRV_ERROR_GENERAL
;
188 if ((media
== ide_floppy
|| media
== ide_tape
) && drv_req
)
189 ide_complete_rq(drive
, 0);
191 ide_end_request(drive
, 0, 0);
194 static void ide_tf_set_specify_cmd(ide_drive_t
*drive
, struct ide_taskfile
*tf
)
196 tf
->nsect
= drive
->sect
;
197 tf
->lbal
= drive
->sect
;
198 tf
->lbam
= drive
->cyl
;
199 tf
->lbah
= drive
->cyl
>> 8;
200 tf
->device
= (drive
->head
- 1) | drive
->select
;
201 tf
->command
= ATA_CMD_INIT_DEV_PARAMS
;
204 static void ide_tf_set_restore_cmd(ide_drive_t
*drive
, struct ide_taskfile
*tf
)
206 tf
->nsect
= drive
->sect
;
207 tf
->command
= ATA_CMD_RESTORE
;
210 static void ide_tf_set_setmult_cmd(ide_drive_t
*drive
, struct ide_taskfile
*tf
)
212 tf
->nsect
= drive
->mult_req
;
213 tf
->command
= ATA_CMD_SET_MULTI
;
216 static ide_startstop_t
ide_disk_special(ide_drive_t
*drive
)
218 special_t
*s
= &drive
->special
;
221 memset(&cmd
, 0, sizeof(cmd
));
222 cmd
.protocol
= ATA_PROT_NODATA
;
224 if (s
->b
.set_geometry
) {
225 s
->b
.set_geometry
= 0;
226 ide_tf_set_specify_cmd(drive
, &cmd
.tf
);
227 } else if (s
->b
.recalibrate
) {
228 s
->b
.recalibrate
= 0;
229 ide_tf_set_restore_cmd(drive
, &cmd
.tf
);
230 } else if (s
->b
.set_multmode
) {
231 s
->b
.set_multmode
= 0;
232 ide_tf_set_setmult_cmd(drive
, &cmd
.tf
);
234 int special
= s
->all
;
236 printk(KERN_ERR
"%s: bad special flag: 0x%02x\n", drive
->name
, special
);
240 cmd
.tf_flags
= IDE_TFLAG_TF
| IDE_TFLAG_DEVICE
|
241 IDE_TFLAG_CUSTOM_HANDLER
;
243 do_rw_taskfile(drive
, &cmd
);
249 * do_special - issue some special commands
250 * @drive: drive the command is for
252 * do_special() is used to issue ATA_CMD_INIT_DEV_PARAMS,
253 * ATA_CMD_RESTORE and ATA_CMD_SET_MULTI commands to a drive.
255 * It used to do much more, but has been scaled back.
258 static ide_startstop_t
do_special (ide_drive_t
*drive
)
260 special_t
*s
= &drive
->special
;
263 printk("%s: do_special: 0x%02x\n", drive
->name
, s
->all
);
265 if (drive
->media
== ide_disk
)
266 return ide_disk_special(drive
);
273 void ide_map_sg(ide_drive_t
*drive
, struct request
*rq
)
275 ide_hwif_t
*hwif
= drive
->hwif
;
276 struct ide_cmd
*cmd
= &hwif
->cmd
;
277 struct scatterlist
*sg
= hwif
->sg_table
;
279 if (rq
->cmd_type
== REQ_TYPE_ATA_TASKFILE
) {
280 sg_init_one(sg
, rq
->buffer
, rq
->nr_sectors
* SECTOR_SIZE
);
282 } else if (!rq
->bio
) {
283 sg_init_one(sg
, rq
->data
, rq
->data_len
);
286 cmd
->sg_nents
= blk_rq_map_sg(drive
->queue
, rq
, sg
);
288 EXPORT_SYMBOL_GPL(ide_map_sg
);
290 void ide_init_sg_cmd(struct ide_cmd
*cmd
, int nsect
)
292 cmd
->nsect
= cmd
->nleft
= nsect
;
296 EXPORT_SYMBOL_GPL(ide_init_sg_cmd
);
299 * execute_drive_command - issue special drive command
300 * @drive: the drive to issue the command on
301 * @rq: the request structure holding the command
303 * execute_drive_cmd() issues a special drive command, usually
304 * initiated by ioctl() from the external hdparm program. The
305 * command can be a drive command, drive task or taskfile
306 * operation. Weirdly you can call it with NULL to wait for
307 * all commands to finish. Don't do this as that is due to change
310 static ide_startstop_t
execute_drive_cmd (ide_drive_t
*drive
,
313 struct ide_cmd
*cmd
= rq
->special
;
316 if (cmd
->protocol
== ATA_PROT_PIO
) {
317 ide_init_sg_cmd(cmd
, rq
->nr_sectors
);
318 ide_map_sg(drive
, rq
);
321 return do_rw_taskfile(drive
, cmd
);
325 * NULL is actually a valid way of waiting for
326 * all current requests to be flushed from the queue.
329 printk("%s: DRIVE_CMD (null)\n", drive
->name
);
331 ide_complete_rq(drive
, 0);
336 static ide_startstop_t
ide_special_rq(ide_drive_t
*drive
, struct request
*rq
)
342 case REQ_UNPARK_HEADS
:
343 return ide_do_park_unpark(drive
, rq
);
344 case REQ_DEVSET_EXEC
:
345 return ide_do_devset(drive
, rq
);
346 case REQ_DRIVE_RESET
:
347 return ide_do_reset(drive
);
349 blk_dump_rq_flags(rq
, "ide_special_rq - bad request");
350 ide_end_request(drive
, 0, 0);
356 * start_request - start of I/O and command issuing for IDE
358 * start_request() initiates handling of a new I/O request. It
359 * accepts commands and I/O (read/write) requests.
361 * FIXME: this function needs a rename
364 static ide_startstop_t
start_request (ide_drive_t
*drive
, struct request
*rq
)
366 ide_startstop_t startstop
;
368 BUG_ON(!blk_rq_started(rq
));
371 printk("%s: start_request: current=0x%08lx\n",
372 drive
->hwif
->name
, (unsigned long) rq
);
375 /* bail early if we've exceeded max_failures */
376 if (drive
->max_failures
&& (drive
->failures
> drive
->max_failures
)) {
377 rq
->cmd_flags
|= REQ_FAILED
;
381 if (blk_pm_request(rq
))
382 ide_check_pm_state(drive
, rq
);
385 if (ide_wait_stat(&startstop
, drive
, drive
->ready_stat
,
386 ATA_BUSY
| ATA_DRQ
, WAIT_READY
)) {
387 printk(KERN_ERR
"%s: drive not ready for command\n", drive
->name
);
390 if (!drive
->special
.all
) {
391 struct ide_driver
*drv
;
394 * We reset the drive so we need to issue a SETFEATURES.
395 * Do it _after_ do_special() restored device parameters.
397 if (drive
->current_speed
== 0xff)
398 ide_config_drive_speed(drive
, drive
->desired_speed
);
400 if (rq
->cmd_type
== REQ_TYPE_ATA_TASKFILE
)
401 return execute_drive_cmd(drive
, rq
);
402 else if (blk_pm_request(rq
)) {
403 struct request_pm_state
*pm
= rq
->data
;
405 printk("%s: start_power_step(step: %d)\n",
406 drive
->name
, pm
->pm_step
);
408 startstop
= ide_start_power_step(drive
, rq
);
409 if (startstop
== ide_stopped
&&
410 pm
->pm_step
== IDE_PM_COMPLETED
)
411 ide_complete_pm_rq(drive
, rq
);
413 } else if (!rq
->rq_disk
&& blk_special_request(rq
))
415 * TODO: Once all ULDs have been modified to
416 * check for specific op codes rather than
417 * blindly accepting any special request, the
418 * check for ->rq_disk above may be replaced
419 * by a more suitable mechanism or even
422 return ide_special_rq(drive
, rq
);
424 drv
= *(struct ide_driver
**)rq
->rq_disk
->private_data
;
426 return drv
->do_request(drive
, rq
, rq
->sector
);
428 return do_special(drive
);
430 ide_kill_rq(drive
, rq
);
435 * ide_stall_queue - pause an IDE device
436 * @drive: drive to stall
437 * @timeout: time to stall for (jiffies)
439 * ide_stall_queue() can be used by a drive to give excess bandwidth back
440 * to the port by sleeping for timeout jiffies.
443 void ide_stall_queue (ide_drive_t
*drive
, unsigned long timeout
)
445 if (timeout
> WAIT_WORSTCASE
)
446 timeout
= WAIT_WORSTCASE
;
447 drive
->sleep
= timeout
+ jiffies
;
448 drive
->dev_flags
|= IDE_DFLAG_SLEEPING
;
450 EXPORT_SYMBOL(ide_stall_queue
);
452 static inline int ide_lock_port(ide_hwif_t
*hwif
)
462 static inline void ide_unlock_port(ide_hwif_t
*hwif
)
467 static inline int ide_lock_host(struct ide_host
*host
, ide_hwif_t
*hwif
)
471 if (host
->host_flags
& IDE_HFLAG_SERIALIZE
) {
472 rc
= test_and_set_bit_lock(IDE_HOST_BUSY
, &host
->host_busy
);
475 host
->get_lock(ide_intr
, hwif
);
481 static inline void ide_unlock_host(struct ide_host
*host
)
483 if (host
->host_flags
& IDE_HFLAG_SERIALIZE
) {
484 if (host
->release_lock
)
485 host
->release_lock();
486 clear_bit_unlock(IDE_HOST_BUSY
, &host
->host_busy
);
491 * Issue a new request to a device.
493 void do_ide_request(struct request_queue
*q
)
495 ide_drive_t
*drive
= q
->queuedata
;
496 ide_hwif_t
*hwif
= drive
->hwif
;
497 struct ide_host
*host
= hwif
->host
;
498 struct request
*rq
= NULL
;
499 ide_startstop_t startstop
;
502 * drive is doing pre-flush, ordered write, post-flush sequence. even
503 * though that is 3 requests, it must be seen as a single transaction.
504 * we must not preempt this drive until that is complete
506 if (blk_queue_flushing(q
))
508 * small race where queue could get replugged during
509 * the 3-request flush cycle, just yank the plug since
510 * we want it to finish asap
514 spin_unlock_irq(q
->queue_lock
);
516 if (ide_lock_host(host
, hwif
))
519 spin_lock_irq(&hwif
->lock
);
521 if (!ide_lock_port(hwif
)) {
522 ide_hwif_t
*prev_port
;
524 prev_port
= hwif
->host
->cur_port
;
527 if (drive
->dev_flags
& IDE_DFLAG_SLEEPING
) {
528 if (time_before(drive
->sleep
, jiffies
)) {
529 ide_unlock_port(hwif
);
534 if ((hwif
->host
->host_flags
& IDE_HFLAG_SERIALIZE
) &&
537 * set nIEN for previous port, drives in the
538 * quirk_list may not like intr setups/cleanups
540 if (prev_port
&& prev_port
->cur_dev
->quirk_list
== 0)
541 prev_port
->tp_ops
->set_irq(prev_port
, 0);
543 hwif
->host
->cur_port
= hwif
;
545 hwif
->cur_dev
= drive
;
546 drive
->dev_flags
&= ~(IDE_DFLAG_SLEEPING
| IDE_DFLAG_PARKED
);
548 spin_unlock_irq(&hwif
->lock
);
549 spin_lock_irq(q
->queue_lock
);
551 * we know that the queue isn't empty, but this can happen
552 * if the q->prep_rq_fn() decides to kill a request
554 rq
= elv_next_request(drive
->queue
);
555 spin_unlock_irq(q
->queue_lock
);
556 spin_lock_irq(&hwif
->lock
);
559 ide_unlock_port(hwif
);
564 * Sanity: don't accept a request that isn't a PM request
565 * if we are currently power managed. This is very important as
566 * blk_stop_queue() doesn't prevent the elv_next_request()
567 * above to return us whatever is in the queue. Since we call
568 * ide_do_request() ourselves, we end up taking requests while
569 * the queue is blocked...
571 * We let requests forced at head of queue with ide-preempt
572 * though. I hope that doesn't happen too much, hopefully not
573 * unless the subdriver triggers such a thing in its own PM
576 if ((drive
->dev_flags
& IDE_DFLAG_BLOCKED
) &&
577 blk_pm_request(rq
) == 0 &&
578 (rq
->cmd_flags
& REQ_PREEMPT
) == 0) {
579 /* there should be no pending command at this point */
580 ide_unlock_port(hwif
);
586 spin_unlock_irq(&hwif
->lock
);
587 startstop
= start_request(drive
, rq
);
588 spin_lock_irq(&hwif
->lock
);
590 if (startstop
== ide_stopped
)
595 spin_unlock_irq(&hwif
->lock
);
597 ide_unlock_host(host
);
598 spin_lock_irq(q
->queue_lock
);
602 spin_unlock_irq(&hwif
->lock
);
603 ide_unlock_host(host
);
605 spin_lock_irq(q
->queue_lock
);
607 if (!elv_queue_empty(q
))
611 static void ide_plug_device(ide_drive_t
*drive
)
613 struct request_queue
*q
= drive
->queue
;
616 spin_lock_irqsave(q
->queue_lock
, flags
);
617 if (!elv_queue_empty(q
))
619 spin_unlock_irqrestore(q
->queue_lock
, flags
);
622 static int drive_is_ready(ide_drive_t
*drive
)
624 ide_hwif_t
*hwif
= drive
->hwif
;
627 if (drive
->waiting_for_dma
)
628 return hwif
->dma_ops
->dma_test_irq(drive
);
630 if (hwif
->io_ports
.ctl_addr
&&
631 (hwif
->host_flags
& IDE_HFLAG_BROKEN_ALTSTATUS
) == 0)
632 stat
= hwif
->tp_ops
->read_altstatus(hwif
);
634 /* Note: this may clear a pending IRQ!! */
635 stat
= hwif
->tp_ops
->read_status(hwif
);
638 /* drive busy: definitely not interrupting */
641 /* drive ready: *might* be interrupting */
646 * ide_timer_expiry - handle lack of an IDE interrupt
647 * @data: timer callback magic (hwif)
649 * An IDE command has timed out before the expected drive return
650 * occurred. At this point we attempt to clean up the current
651 * mess. If the current handler includes an expiry handler then
652 * we invoke the expiry handler, and providing it is happy the
653 * work is done. If that fails we apply generic recovery rules
654 * invoking the handler and checking the drive DMA status. We
655 * have an excessively incestuous relationship with the DMA
656 * logic that wants cleaning up.
659 void ide_timer_expiry (unsigned long data
)
661 ide_hwif_t
*hwif
= (ide_hwif_t
*)data
;
662 ide_drive_t
*uninitialized_var(drive
);
663 ide_handler_t
*handler
;
668 spin_lock_irqsave(&hwif
->lock
, flags
);
670 handler
= hwif
->handler
;
672 if (handler
== NULL
|| hwif
->req_gen
!= hwif
->req_gen_timer
) {
674 * Either a marginal timeout occurred
675 * (got the interrupt just as timer expired),
676 * or we were "sleeping" to give other devices a chance.
677 * Either way, we don't really want to complain about anything.
680 ide_expiry_t
*expiry
= hwif
->expiry
;
681 ide_startstop_t startstop
= ide_stopped
;
683 drive
= hwif
->cur_dev
;
686 wait
= expiry(drive
);
687 if (wait
> 0) { /* continue */
689 hwif
->timer
.expires
= jiffies
+ wait
;
690 hwif
->req_gen_timer
= hwif
->req_gen
;
691 add_timer(&hwif
->timer
);
692 spin_unlock_irqrestore(&hwif
->lock
, flags
);
696 hwif
->handler
= NULL
;
698 * We need to simulate a real interrupt when invoking
699 * the handler() function, which means we need to
700 * globally mask the specific IRQ:
702 spin_unlock(&hwif
->lock
);
703 /* disable_irq_nosync ?? */
704 disable_irq(hwif
->irq
);
705 /* local CPU only, as if we were handling an interrupt */
708 startstop
= handler(drive
);
709 } else if (drive_is_ready(drive
)) {
710 if (drive
->waiting_for_dma
)
711 hwif
->dma_ops
->dma_lost_irq(drive
);
713 hwif
->ack_intr(hwif
);
714 printk(KERN_WARNING
"%s: lost interrupt\n",
716 startstop
= handler(drive
);
718 if (drive
->waiting_for_dma
)
719 startstop
= ide_dma_timeout_retry(drive
, wait
);
721 startstop
= ide_error(drive
, "irq timeout",
722 hwif
->tp_ops
->read_status(hwif
));
724 spin_lock_irq(&hwif
->lock
);
725 enable_irq(hwif
->irq
);
726 if (startstop
== ide_stopped
) {
727 ide_unlock_port(hwif
);
731 spin_unlock_irqrestore(&hwif
->lock
, flags
);
734 ide_unlock_host(hwif
->host
);
735 ide_plug_device(drive
);
740 * unexpected_intr - handle an unexpected IDE interrupt
741 * @irq: interrupt line
742 * @hwif: port being processed
744 * There's nothing really useful we can do with an unexpected interrupt,
745 * other than reading the status register (to clear it), and logging it.
746 * There should be no way that an irq can happen before we're ready for it,
747 * so we needn't worry much about losing an "important" interrupt here.
749 * On laptops (and "green" PCs), an unexpected interrupt occurs whenever
750 * the drive enters "idle", "standby", or "sleep" mode, so if the status
751 * looks "good", we just ignore the interrupt completely.
753 * This routine assumes __cli() is in effect when called.
755 * If an unexpected interrupt happens on irq15 while we are handling irq14
756 * and if the two interfaces are "serialized" (CMD640), then it looks like
757 * we could screw up by interfering with a new request being set up for
760 * In reality, this is a non-issue. The new command is not sent unless
761 * the drive is ready to accept one, in which case we know the drive is
762 * not trying to interrupt us. And ide_set_handler() is always invoked
763 * before completing the issuance of any new drive command, so we will not
764 * be accidentally invoked as a result of any valid command completion
768 static void unexpected_intr(int irq
, ide_hwif_t
*hwif
)
770 u8 stat
= hwif
->tp_ops
->read_status(hwif
);
772 if (!OK_STAT(stat
, ATA_DRDY
, BAD_STAT
)) {
773 /* Try to not flood the console with msgs */
774 static unsigned long last_msgtime
, count
;
777 if (time_after(jiffies
, last_msgtime
+ HZ
)) {
778 last_msgtime
= jiffies
;
779 printk(KERN_ERR
"%s: unexpected interrupt, "
780 "status=0x%02x, count=%ld\n",
781 hwif
->name
, stat
, count
);
787 * ide_intr - default IDE interrupt handler
788 * @irq: interrupt number
790 * @regs: unused weirdness from the kernel irq layer
792 * This is the default IRQ handler for the IDE layer. You should
793 * not need to override it. If you do be aware it is subtle in
796 * hwif is the interface in the group currently performing
797 * a command. hwif->cur_dev is the drive and hwif->handler is
798 * the IRQ handler to call. As we issue a command the handlers
799 * step through multiple states, reassigning the handler to the
800 * next step in the process. Unlike a smart SCSI controller IDE
801 * expects the main processor to sequence the various transfer
802 * stages. We also manage a poll timer to catch up with most
803 * timeout situations. There are still a few where the handlers
804 * don't ever decide to give up.
806 * The handler eventually returns ide_stopped to indicate the
807 * request completed. At this point we issue the next request
808 * on the port and the process begins again.
811 irqreturn_t
ide_intr (int irq
, void *dev_id
)
813 ide_hwif_t
*hwif
= (ide_hwif_t
*)dev_id
;
814 struct ide_host
*host
= hwif
->host
;
815 ide_drive_t
*uninitialized_var(drive
);
816 ide_handler_t
*handler
;
818 ide_startstop_t startstop
;
819 irqreturn_t irq_ret
= IRQ_NONE
;
822 if (host
->host_flags
& IDE_HFLAG_SERIALIZE
) {
823 if (hwif
!= host
->cur_port
)
827 spin_lock_irqsave(&hwif
->lock
, flags
);
829 if (hwif
->ack_intr
&& hwif
->ack_intr(hwif
) == 0)
832 handler
= hwif
->handler
;
834 if (handler
== NULL
|| hwif
->polling
) {
836 * Not expecting an interrupt from this drive.
837 * That means this could be:
838 * (1) an interrupt from another PCI device
839 * sharing the same PCI INT# as us.
840 * or (2) a drive just entered sleep or standby mode,
841 * and is interrupting to let us know.
842 * or (3) a spurious interrupt of unknown origin.
844 * For PCI, we cannot tell the difference,
845 * so in that case we just ignore it and hope it goes away.
847 if ((host
->irq_flags
& IRQF_SHARED
) == 0) {
849 * Probably not a shared PCI interrupt,
850 * so we can safely try to do something about it:
852 unexpected_intr(irq
, hwif
);
855 * Whack the status register, just in case
856 * we have a leftover pending IRQ.
858 (void)hwif
->tp_ops
->read_status(hwif
);
863 drive
= hwif
->cur_dev
;
865 if (!drive_is_ready(drive
))
867 * This happens regularly when we share a PCI IRQ with
868 * another device. Unfortunately, it can also happen
869 * with some buggy drives that trigger the IRQ before
870 * their status register is up to date. Hopefully we have
871 * enough advance overhead that the latter isn't a problem.
875 hwif
->handler
= NULL
;
877 del_timer(&hwif
->timer
);
878 spin_unlock(&hwif
->lock
);
880 if (hwif
->port_ops
&& hwif
->port_ops
->clear_irq
)
881 hwif
->port_ops
->clear_irq(drive
);
883 if (drive
->dev_flags
& IDE_DFLAG_UNMASK
)
884 local_irq_enable_in_hardirq();
886 /* service this interrupt, may set handler for next interrupt */
887 startstop
= handler(drive
);
889 spin_lock_irq(&hwif
->lock
);
891 * Note that handler() may have set things up for another
892 * interrupt to occur soon, but it cannot happen until
893 * we exit from this routine, because it will be the
894 * same irq as is currently being serviced here, and Linux
895 * won't allow another of the same (on any CPU) until we return.
897 if (startstop
== ide_stopped
) {
898 BUG_ON(hwif
->handler
);
899 ide_unlock_port(hwif
);
902 irq_ret
= IRQ_HANDLED
;
904 spin_unlock_irqrestore(&hwif
->lock
, flags
);
907 ide_unlock_host(hwif
->host
);
908 ide_plug_device(drive
);
913 EXPORT_SYMBOL_GPL(ide_intr
);
915 void ide_pad_transfer(ide_drive_t
*drive
, int write
, int len
)
917 ide_hwif_t
*hwif
= drive
->hwif
;
922 hwif
->tp_ops
->output_data(drive
, NULL
, buf
, min(4, len
));
924 hwif
->tp_ops
->input_data(drive
, NULL
, buf
, min(4, len
));
928 EXPORT_SYMBOL_GPL(ide_pad_transfer
);