[libata] build fix after merging some pre-packet_task-removal code
[deliverable/linux.git] / drivers / scsi / libata-core.c
1 /*
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
33 */
34
35 #include <linux/config.h>
36 #include <linux/kernel.h>
37 #include <linux/module.h>
38 #include <linux/pci.h>
39 #include <linux/init.h>
40 #include <linux/list.h>
41 #include <linux/mm.h>
42 #include <linux/highmem.h>
43 #include <linux/spinlock.h>
44 #include <linux/blkdev.h>
45 #include <linux/delay.h>
46 #include <linux/timer.h>
47 #include <linux/interrupt.h>
48 #include <linux/completion.h>
49 #include <linux/suspend.h>
50 #include <linux/workqueue.h>
51 #include <linux/jiffies.h>
52 #include <linux/scatterlist.h>
53 #include <scsi/scsi.h>
54 #include "scsi_priv.h"
55 #include <scsi/scsi_cmnd.h>
56 #include <scsi/scsi_host.h>
57 #include <linux/libata.h>
58 #include <asm/io.h>
59 #include <asm/semaphore.h>
60 #include <asm/byteorder.h>
61
62 #include "libata.h"
63
64 static void ata_dev_reread_id(struct ata_port *ap, struct ata_device *dev);
65 static void ata_dev_init_params(struct ata_port *ap, struct ata_device *dev);
66 static void ata_set_mode(struct ata_port *ap);
67 static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev);
68 static unsigned int ata_get_mode_mask(const struct ata_port *ap, int shift);
69 static int fgb(u32 bitmap);
70 static int ata_choose_xfer_mode(const struct ata_port *ap,
71 u8 *xfer_mode_out,
72 unsigned int *xfer_shift_out);
73 static void ata_pio_error(struct ata_port *ap);
74
75 static unsigned int ata_unique_id = 1;
76 static struct workqueue_struct *ata_wq;
77
78 int atapi_enabled = 0;
79 module_param(atapi_enabled, int, 0444);
80 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
81
82 MODULE_AUTHOR("Jeff Garzik");
83 MODULE_DESCRIPTION("Library module for ATA devices");
84 MODULE_LICENSE("GPL");
85 MODULE_VERSION(DRV_VERSION);
86
87 /**
88 * ata_tf_load_pio - send taskfile registers to host controller
89 * @ap: Port to which output is sent
90 * @tf: ATA taskfile register set
91 *
92 * Outputs ATA taskfile to standard ATA host controller.
93 *
94 * LOCKING:
95 * Inherited from caller.
96 */
97
98 static void ata_tf_load_pio(struct ata_port *ap, const struct ata_taskfile *tf)
99 {
100 struct ata_ioports *ioaddr = &ap->ioaddr;
101 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
102
103 if (tf->ctl != ap->last_ctl) {
104 outb(tf->ctl, ioaddr->ctl_addr);
105 ap->last_ctl = tf->ctl;
106 ata_wait_idle(ap);
107 }
108
109 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
110 outb(tf->hob_feature, ioaddr->feature_addr);
111 outb(tf->hob_nsect, ioaddr->nsect_addr);
112 outb(tf->hob_lbal, ioaddr->lbal_addr);
113 outb(tf->hob_lbam, ioaddr->lbam_addr);
114 outb(tf->hob_lbah, ioaddr->lbah_addr);
115 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
116 tf->hob_feature,
117 tf->hob_nsect,
118 tf->hob_lbal,
119 tf->hob_lbam,
120 tf->hob_lbah);
121 }
122
123 if (is_addr) {
124 outb(tf->feature, ioaddr->feature_addr);
125 outb(tf->nsect, ioaddr->nsect_addr);
126 outb(tf->lbal, ioaddr->lbal_addr);
127 outb(tf->lbam, ioaddr->lbam_addr);
128 outb(tf->lbah, ioaddr->lbah_addr);
129 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
130 tf->feature,
131 tf->nsect,
132 tf->lbal,
133 tf->lbam,
134 tf->lbah);
135 }
136
137 if (tf->flags & ATA_TFLAG_DEVICE) {
138 outb(tf->device, ioaddr->device_addr);
139 VPRINTK("device 0x%X\n", tf->device);
140 }
141
142 ata_wait_idle(ap);
143 }
144
145 /**
146 * ata_tf_load_mmio - send taskfile registers to host controller
147 * @ap: Port to which output is sent
148 * @tf: ATA taskfile register set
149 *
150 * Outputs ATA taskfile to standard ATA host controller using MMIO.
151 *
152 * LOCKING:
153 * Inherited from caller.
154 */
155
156 static void ata_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
157 {
158 struct ata_ioports *ioaddr = &ap->ioaddr;
159 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
160
161 if (tf->ctl != ap->last_ctl) {
162 writeb(tf->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
163 ap->last_ctl = tf->ctl;
164 ata_wait_idle(ap);
165 }
166
167 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
168 writeb(tf->hob_feature, (void __iomem *) ioaddr->feature_addr);
169 writeb(tf->hob_nsect, (void __iomem *) ioaddr->nsect_addr);
170 writeb(tf->hob_lbal, (void __iomem *) ioaddr->lbal_addr);
171 writeb(tf->hob_lbam, (void __iomem *) ioaddr->lbam_addr);
172 writeb(tf->hob_lbah, (void __iomem *) ioaddr->lbah_addr);
173 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
174 tf->hob_feature,
175 tf->hob_nsect,
176 tf->hob_lbal,
177 tf->hob_lbam,
178 tf->hob_lbah);
179 }
180
181 if (is_addr) {
182 writeb(tf->feature, (void __iomem *) ioaddr->feature_addr);
183 writeb(tf->nsect, (void __iomem *) ioaddr->nsect_addr);
184 writeb(tf->lbal, (void __iomem *) ioaddr->lbal_addr);
185 writeb(tf->lbam, (void __iomem *) ioaddr->lbam_addr);
186 writeb(tf->lbah, (void __iomem *) ioaddr->lbah_addr);
187 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
188 tf->feature,
189 tf->nsect,
190 tf->lbal,
191 tf->lbam,
192 tf->lbah);
193 }
194
195 if (tf->flags & ATA_TFLAG_DEVICE) {
196 writeb(tf->device, (void __iomem *) ioaddr->device_addr);
197 VPRINTK("device 0x%X\n", tf->device);
198 }
199
200 ata_wait_idle(ap);
201 }
202
203
204 /**
205 * ata_tf_load - send taskfile registers to host controller
206 * @ap: Port to which output is sent
207 * @tf: ATA taskfile register set
208 *
209 * Outputs ATA taskfile to standard ATA host controller using MMIO
210 * or PIO as indicated by the ATA_FLAG_MMIO flag.
211 * Writes the control, feature, nsect, lbal, lbam, and lbah registers.
212 * Optionally (ATA_TFLAG_LBA48) writes hob_feature, hob_nsect,
213 * hob_lbal, hob_lbam, and hob_lbah.
214 *
215 * This function waits for idle (!BUSY and !DRQ) after writing
216 * registers. If the control register has a new value, this
217 * function also waits for idle after writing control and before
218 * writing the remaining registers.
219 *
220 * May be used as the tf_load() entry in ata_port_operations.
221 *
222 * LOCKING:
223 * Inherited from caller.
224 */
225 void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
226 {
227 if (ap->flags & ATA_FLAG_MMIO)
228 ata_tf_load_mmio(ap, tf);
229 else
230 ata_tf_load_pio(ap, tf);
231 }
232
233 /**
234 * ata_exec_command_pio - issue ATA command to host controller
235 * @ap: port to which command is being issued
236 * @tf: ATA taskfile register set
237 *
238 * Issues PIO write to ATA command register, with proper
239 * synchronization with interrupt handler / other threads.
240 *
241 * LOCKING:
242 * spin_lock_irqsave(host_set lock)
243 */
244
245 static void ata_exec_command_pio(struct ata_port *ap, const struct ata_taskfile *tf)
246 {
247 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
248
249 outb(tf->command, ap->ioaddr.command_addr);
250 ata_pause(ap);
251 }
252
253
254 /**
255 * ata_exec_command_mmio - issue ATA command to host controller
256 * @ap: port to which command is being issued
257 * @tf: ATA taskfile register set
258 *
259 * Issues MMIO write to ATA command register, with proper
260 * synchronization with interrupt handler / other threads.
261 *
262 * LOCKING:
263 * spin_lock_irqsave(host_set lock)
264 */
265
266 static void ata_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
267 {
268 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
269
270 writeb(tf->command, (void __iomem *) ap->ioaddr.command_addr);
271 ata_pause(ap);
272 }
273
274
275 /**
276 * ata_exec_command - issue ATA command to host controller
277 * @ap: port to which command is being issued
278 * @tf: ATA taskfile register set
279 *
280 * Issues PIO/MMIO write to ATA command register, with proper
281 * synchronization with interrupt handler / other threads.
282 *
283 * LOCKING:
284 * spin_lock_irqsave(host_set lock)
285 */
286 void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
287 {
288 if (ap->flags & ATA_FLAG_MMIO)
289 ata_exec_command_mmio(ap, tf);
290 else
291 ata_exec_command_pio(ap, tf);
292 }
293
294 /**
295 * ata_tf_to_host - issue ATA taskfile to host controller
296 * @ap: port to which command is being issued
297 * @tf: ATA taskfile register set
298 *
299 * Issues ATA taskfile register set to ATA host controller,
300 * with proper synchronization with interrupt handler and
301 * other threads.
302 *
303 * LOCKING:
304 * spin_lock_irqsave(host_set lock)
305 */
306
307 static inline void ata_tf_to_host(struct ata_port *ap,
308 const struct ata_taskfile *tf)
309 {
310 ap->ops->tf_load(ap, tf);
311 ap->ops->exec_command(ap, tf);
312 }
313
314 /**
315 * ata_tf_read_pio - input device's ATA taskfile shadow registers
316 * @ap: Port from which input is read
317 * @tf: ATA taskfile register set for storing input
318 *
319 * Reads ATA taskfile registers for currently-selected device
320 * into @tf.
321 *
322 * LOCKING:
323 * Inherited from caller.
324 */
325
326 static void ata_tf_read_pio(struct ata_port *ap, struct ata_taskfile *tf)
327 {
328 struct ata_ioports *ioaddr = &ap->ioaddr;
329
330 tf->command = ata_check_status(ap);
331 tf->feature = inb(ioaddr->error_addr);
332 tf->nsect = inb(ioaddr->nsect_addr);
333 tf->lbal = inb(ioaddr->lbal_addr);
334 tf->lbam = inb(ioaddr->lbam_addr);
335 tf->lbah = inb(ioaddr->lbah_addr);
336 tf->device = inb(ioaddr->device_addr);
337
338 if (tf->flags & ATA_TFLAG_LBA48) {
339 outb(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
340 tf->hob_feature = inb(ioaddr->error_addr);
341 tf->hob_nsect = inb(ioaddr->nsect_addr);
342 tf->hob_lbal = inb(ioaddr->lbal_addr);
343 tf->hob_lbam = inb(ioaddr->lbam_addr);
344 tf->hob_lbah = inb(ioaddr->lbah_addr);
345 }
346 }
347
348 /**
349 * ata_tf_read_mmio - input device's ATA taskfile shadow registers
350 * @ap: Port from which input is read
351 * @tf: ATA taskfile register set for storing input
352 *
353 * Reads ATA taskfile registers for currently-selected device
354 * into @tf via MMIO.
355 *
356 * LOCKING:
357 * Inherited from caller.
358 */
359
360 static void ata_tf_read_mmio(struct ata_port *ap, struct ata_taskfile *tf)
361 {
362 struct ata_ioports *ioaddr = &ap->ioaddr;
363
364 tf->command = ata_check_status(ap);
365 tf->feature = readb((void __iomem *)ioaddr->error_addr);
366 tf->nsect = readb((void __iomem *)ioaddr->nsect_addr);
367 tf->lbal = readb((void __iomem *)ioaddr->lbal_addr);
368 tf->lbam = readb((void __iomem *)ioaddr->lbam_addr);
369 tf->lbah = readb((void __iomem *)ioaddr->lbah_addr);
370 tf->device = readb((void __iomem *)ioaddr->device_addr);
371
372 if (tf->flags & ATA_TFLAG_LBA48) {
373 writeb(tf->ctl | ATA_HOB, (void __iomem *) ap->ioaddr.ctl_addr);
374 tf->hob_feature = readb((void __iomem *)ioaddr->error_addr);
375 tf->hob_nsect = readb((void __iomem *)ioaddr->nsect_addr);
376 tf->hob_lbal = readb((void __iomem *)ioaddr->lbal_addr);
377 tf->hob_lbam = readb((void __iomem *)ioaddr->lbam_addr);
378 tf->hob_lbah = readb((void __iomem *)ioaddr->lbah_addr);
379 }
380 }
381
382
383 /**
384 * ata_tf_read - input device's ATA taskfile shadow registers
385 * @ap: Port from which input is read
386 * @tf: ATA taskfile register set for storing input
387 *
388 * Reads ATA taskfile registers for currently-selected device
389 * into @tf.
390 *
391 * Reads nsect, lbal, lbam, lbah, and device. If ATA_TFLAG_LBA48
392 * is set, also reads the hob registers.
393 *
394 * May be used as the tf_read() entry in ata_port_operations.
395 *
396 * LOCKING:
397 * Inherited from caller.
398 */
399 void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
400 {
401 if (ap->flags & ATA_FLAG_MMIO)
402 ata_tf_read_mmio(ap, tf);
403 else
404 ata_tf_read_pio(ap, tf);
405 }
406
407 /**
408 * ata_check_status_pio - Read device status reg & clear interrupt
409 * @ap: port where the device is
410 *
411 * Reads ATA taskfile status register for currently-selected device
412 * and return its value. This also clears pending interrupts
413 * from this device
414 *
415 * LOCKING:
416 * Inherited from caller.
417 */
418 static u8 ata_check_status_pio(struct ata_port *ap)
419 {
420 return inb(ap->ioaddr.status_addr);
421 }
422
423 /**
424 * ata_check_status_mmio - Read device status reg & clear interrupt
425 * @ap: port where the device is
426 *
427 * Reads ATA taskfile status register for currently-selected device
428 * via MMIO and return its value. This also clears pending interrupts
429 * from this device
430 *
431 * LOCKING:
432 * Inherited from caller.
433 */
434 static u8 ata_check_status_mmio(struct ata_port *ap)
435 {
436 return readb((void __iomem *) ap->ioaddr.status_addr);
437 }
438
439
440 /**
441 * ata_check_status - Read device status reg & clear interrupt
442 * @ap: port where the device is
443 *
444 * Reads ATA taskfile status register for currently-selected device
445 * and return its value. This also clears pending interrupts
446 * from this device
447 *
448 * May be used as the check_status() entry in ata_port_operations.
449 *
450 * LOCKING:
451 * Inherited from caller.
452 */
453 u8 ata_check_status(struct ata_port *ap)
454 {
455 if (ap->flags & ATA_FLAG_MMIO)
456 return ata_check_status_mmio(ap);
457 return ata_check_status_pio(ap);
458 }
459
460
461 /**
462 * ata_altstatus - Read device alternate status reg
463 * @ap: port where the device is
464 *
465 * Reads ATA taskfile alternate status register for
466 * currently-selected device and return its value.
467 *
468 * Note: may NOT be used as the check_altstatus() entry in
469 * ata_port_operations.
470 *
471 * LOCKING:
472 * Inherited from caller.
473 */
474 u8 ata_altstatus(struct ata_port *ap)
475 {
476 if (ap->ops->check_altstatus)
477 return ap->ops->check_altstatus(ap);
478
479 if (ap->flags & ATA_FLAG_MMIO)
480 return readb((void __iomem *)ap->ioaddr.altstatus_addr);
481 return inb(ap->ioaddr.altstatus_addr);
482 }
483
484
485 /**
486 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
487 * @tf: Taskfile to convert
488 * @fis: Buffer into which data will output
489 * @pmp: Port multiplier port
490 *
491 * Converts a standard ATA taskfile to a Serial ATA
492 * FIS structure (Register - Host to Device).
493 *
494 * LOCKING:
495 * Inherited from caller.
496 */
497
498 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
499 {
500 fis[0] = 0x27; /* Register - Host to Device FIS */
501 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
502 bit 7 indicates Command FIS */
503 fis[2] = tf->command;
504 fis[3] = tf->feature;
505
506 fis[4] = tf->lbal;
507 fis[5] = tf->lbam;
508 fis[6] = tf->lbah;
509 fis[7] = tf->device;
510
511 fis[8] = tf->hob_lbal;
512 fis[9] = tf->hob_lbam;
513 fis[10] = tf->hob_lbah;
514 fis[11] = tf->hob_feature;
515
516 fis[12] = tf->nsect;
517 fis[13] = tf->hob_nsect;
518 fis[14] = 0;
519 fis[15] = tf->ctl;
520
521 fis[16] = 0;
522 fis[17] = 0;
523 fis[18] = 0;
524 fis[19] = 0;
525 }
526
527 /**
528 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
529 * @fis: Buffer from which data will be input
530 * @tf: Taskfile to output
531 *
532 * Converts a serial ATA FIS structure to a standard ATA taskfile.
533 *
534 * LOCKING:
535 * Inherited from caller.
536 */
537
538 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
539 {
540 tf->command = fis[2]; /* status */
541 tf->feature = fis[3]; /* error */
542
543 tf->lbal = fis[4];
544 tf->lbam = fis[5];
545 tf->lbah = fis[6];
546 tf->device = fis[7];
547
548 tf->hob_lbal = fis[8];
549 tf->hob_lbam = fis[9];
550 tf->hob_lbah = fis[10];
551
552 tf->nsect = fis[12];
553 tf->hob_nsect = fis[13];
554 }
555
556 static const u8 ata_rw_cmds[] = {
557 /* pio multi */
558 ATA_CMD_READ_MULTI,
559 ATA_CMD_WRITE_MULTI,
560 ATA_CMD_READ_MULTI_EXT,
561 ATA_CMD_WRITE_MULTI_EXT,
562 0,
563 0,
564 0,
565 ATA_CMD_WRITE_MULTI_FUA_EXT,
566 /* pio */
567 ATA_CMD_PIO_READ,
568 ATA_CMD_PIO_WRITE,
569 ATA_CMD_PIO_READ_EXT,
570 ATA_CMD_PIO_WRITE_EXT,
571 0,
572 0,
573 0,
574 0,
575 /* dma */
576 ATA_CMD_READ,
577 ATA_CMD_WRITE,
578 ATA_CMD_READ_EXT,
579 ATA_CMD_WRITE_EXT,
580 0,
581 0,
582 0,
583 ATA_CMD_WRITE_FUA_EXT
584 };
585
586 /**
587 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
588 * @qc: command to examine and configure
589 *
590 * Examine the device configuration and tf->flags to calculate
591 * the proper read/write commands and protocol to use.
592 *
593 * LOCKING:
594 * caller.
595 */
596 int ata_rwcmd_protocol(struct ata_queued_cmd *qc)
597 {
598 struct ata_taskfile *tf = &qc->tf;
599 struct ata_device *dev = qc->dev;
600 u8 cmd;
601
602 int index, fua, lba48, write;
603
604 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
605 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
606 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
607
608 if (dev->flags & ATA_DFLAG_PIO) {
609 tf->protocol = ATA_PROT_PIO;
610 index = dev->multi_count ? 0 : 8;
611 } else if (lba48 && (qc->ap->flags & ATA_FLAG_PIO_LBA48)) {
612 /* Unable to use DMA due to host limitation */
613 tf->protocol = ATA_PROT_PIO;
614 index = dev->multi_count ? 0 : 8;
615 } else {
616 tf->protocol = ATA_PROT_DMA;
617 index = 16;
618 }
619
620 cmd = ata_rw_cmds[index + fua + lba48 + write];
621 if (cmd) {
622 tf->command = cmd;
623 return 0;
624 }
625 return -1;
626 }
627
628 static const char * const xfer_mode_str[] = {
629 "UDMA/16",
630 "UDMA/25",
631 "UDMA/33",
632 "UDMA/44",
633 "UDMA/66",
634 "UDMA/100",
635 "UDMA/133",
636 "UDMA7",
637 "MWDMA0",
638 "MWDMA1",
639 "MWDMA2",
640 "PIO0",
641 "PIO1",
642 "PIO2",
643 "PIO3",
644 "PIO4",
645 };
646
647 /**
648 * ata_udma_string - convert UDMA bit offset to string
649 * @mask: mask of bits supported; only highest bit counts.
650 *
651 * Determine string which represents the highest speed
652 * (highest bit in @udma_mask).
653 *
654 * LOCKING:
655 * None.
656 *
657 * RETURNS:
658 * Constant C string representing highest speed listed in
659 * @udma_mask, or the constant C string "<n/a>".
660 */
661
662 static const char *ata_mode_string(unsigned int mask)
663 {
664 int i;
665
666 for (i = 7; i >= 0; i--)
667 if (mask & (1 << i))
668 goto out;
669 for (i = ATA_SHIFT_MWDMA + 2; i >= ATA_SHIFT_MWDMA; i--)
670 if (mask & (1 << i))
671 goto out;
672 for (i = ATA_SHIFT_PIO + 4; i >= ATA_SHIFT_PIO; i--)
673 if (mask & (1 << i))
674 goto out;
675
676 return "<n/a>";
677
678 out:
679 return xfer_mode_str[i];
680 }
681
682 /**
683 * ata_pio_devchk - PATA device presence detection
684 * @ap: ATA channel to examine
685 * @device: Device to examine (starting at zero)
686 *
687 * This technique was originally described in
688 * Hale Landis's ATADRVR (www.ata-atapi.com), and
689 * later found its way into the ATA/ATAPI spec.
690 *
691 * Write a pattern to the ATA shadow registers,
692 * and if a device is present, it will respond by
693 * correctly storing and echoing back the
694 * ATA shadow register contents.
695 *
696 * LOCKING:
697 * caller.
698 */
699
700 static unsigned int ata_pio_devchk(struct ata_port *ap,
701 unsigned int device)
702 {
703 struct ata_ioports *ioaddr = &ap->ioaddr;
704 u8 nsect, lbal;
705
706 ap->ops->dev_select(ap, device);
707
708 outb(0x55, ioaddr->nsect_addr);
709 outb(0xaa, ioaddr->lbal_addr);
710
711 outb(0xaa, ioaddr->nsect_addr);
712 outb(0x55, ioaddr->lbal_addr);
713
714 outb(0x55, ioaddr->nsect_addr);
715 outb(0xaa, ioaddr->lbal_addr);
716
717 nsect = inb(ioaddr->nsect_addr);
718 lbal = inb(ioaddr->lbal_addr);
719
720 if ((nsect == 0x55) && (lbal == 0xaa))
721 return 1; /* we found a device */
722
723 return 0; /* nothing found */
724 }
725
726 /**
727 * ata_mmio_devchk - PATA device presence detection
728 * @ap: ATA channel to examine
729 * @device: Device to examine (starting at zero)
730 *
731 * This technique was originally described in
732 * Hale Landis's ATADRVR (www.ata-atapi.com), and
733 * later found its way into the ATA/ATAPI spec.
734 *
735 * Write a pattern to the ATA shadow registers,
736 * and if a device is present, it will respond by
737 * correctly storing and echoing back the
738 * ATA shadow register contents.
739 *
740 * LOCKING:
741 * caller.
742 */
743
744 static unsigned int ata_mmio_devchk(struct ata_port *ap,
745 unsigned int device)
746 {
747 struct ata_ioports *ioaddr = &ap->ioaddr;
748 u8 nsect, lbal;
749
750 ap->ops->dev_select(ap, device);
751
752 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
753 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
754
755 writeb(0xaa, (void __iomem *) ioaddr->nsect_addr);
756 writeb(0x55, (void __iomem *) ioaddr->lbal_addr);
757
758 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
759 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
760
761 nsect = readb((void __iomem *) ioaddr->nsect_addr);
762 lbal = readb((void __iomem *) ioaddr->lbal_addr);
763
764 if ((nsect == 0x55) && (lbal == 0xaa))
765 return 1; /* we found a device */
766
767 return 0; /* nothing found */
768 }
769
770 /**
771 * ata_devchk - PATA device presence detection
772 * @ap: ATA channel to examine
773 * @device: Device to examine (starting at zero)
774 *
775 * Dispatch ATA device presence detection, depending
776 * on whether we are using PIO or MMIO to talk to the
777 * ATA shadow registers.
778 *
779 * LOCKING:
780 * caller.
781 */
782
783 static unsigned int ata_devchk(struct ata_port *ap,
784 unsigned int device)
785 {
786 if (ap->flags & ATA_FLAG_MMIO)
787 return ata_mmio_devchk(ap, device);
788 return ata_pio_devchk(ap, device);
789 }
790
791 /**
792 * ata_dev_classify - determine device type based on ATA-spec signature
793 * @tf: ATA taskfile register set for device to be identified
794 *
795 * Determine from taskfile register contents whether a device is
796 * ATA or ATAPI, as per "Signature and persistence" section
797 * of ATA/PI spec (volume 1, sect 5.14).
798 *
799 * LOCKING:
800 * None.
801 *
802 * RETURNS:
803 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
804 * the event of failure.
805 */
806
807 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
808 {
809 /* Apple's open source Darwin code hints that some devices only
810 * put a proper signature into the LBA mid/high registers,
811 * So, we only check those. It's sufficient for uniqueness.
812 */
813
814 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
815 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
816 DPRINTK("found ATA device by sig\n");
817 return ATA_DEV_ATA;
818 }
819
820 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
821 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
822 DPRINTK("found ATAPI device by sig\n");
823 return ATA_DEV_ATAPI;
824 }
825
826 DPRINTK("unknown device\n");
827 return ATA_DEV_UNKNOWN;
828 }
829
830 /**
831 * ata_dev_try_classify - Parse returned ATA device signature
832 * @ap: ATA channel to examine
833 * @device: Device to examine (starting at zero)
834 * @r_err: Value of error register on completion
835 *
836 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
837 * an ATA/ATAPI-defined set of values is placed in the ATA
838 * shadow registers, indicating the results of device detection
839 * and diagnostics.
840 *
841 * Select the ATA device, and read the values from the ATA shadow
842 * registers. Then parse according to the Error register value,
843 * and the spec-defined values examined by ata_dev_classify().
844 *
845 * LOCKING:
846 * caller.
847 *
848 * RETURNS:
849 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
850 */
851
852 static unsigned int
853 ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
854 {
855 struct ata_taskfile tf;
856 unsigned int class;
857 u8 err;
858
859 ap->ops->dev_select(ap, device);
860
861 memset(&tf, 0, sizeof(tf));
862
863 ap->ops->tf_read(ap, &tf);
864 err = tf.feature;
865 if (r_err)
866 *r_err = err;
867
868 /* see if device passed diags */
869 if (err == 1)
870 /* do nothing */ ;
871 else if ((device == 0) && (err == 0x81))
872 /* do nothing */ ;
873 else
874 return ATA_DEV_NONE;
875
876 /* determine if device is ATA or ATAPI */
877 class = ata_dev_classify(&tf);
878
879 if (class == ATA_DEV_UNKNOWN)
880 return ATA_DEV_NONE;
881 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
882 return ATA_DEV_NONE;
883 return class;
884 }
885
886 /**
887 * ata_dev_id_string - Convert IDENTIFY DEVICE page into string
888 * @id: IDENTIFY DEVICE results we will examine
889 * @s: string into which data is output
890 * @ofs: offset into identify device page
891 * @len: length of string to return. must be an even number.
892 *
893 * The strings in the IDENTIFY DEVICE page are broken up into
894 * 16-bit chunks. Run through the string, and output each
895 * 8-bit chunk linearly, regardless of platform.
896 *
897 * LOCKING:
898 * caller.
899 */
900
901 void ata_dev_id_string(const u16 *id, unsigned char *s,
902 unsigned int ofs, unsigned int len)
903 {
904 unsigned int c;
905
906 while (len > 0) {
907 c = id[ofs] >> 8;
908 *s = c;
909 s++;
910
911 c = id[ofs] & 0xff;
912 *s = c;
913 s++;
914
915 ofs++;
916 len -= 2;
917 }
918 }
919
920
921 /**
922 * ata_noop_dev_select - Select device 0/1 on ATA bus
923 * @ap: ATA channel to manipulate
924 * @device: ATA device (numbered from zero) to select
925 *
926 * This function performs no actual function.
927 *
928 * May be used as the dev_select() entry in ata_port_operations.
929 *
930 * LOCKING:
931 * caller.
932 */
933 void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
934 {
935 }
936
937
938 /**
939 * ata_std_dev_select - Select device 0/1 on ATA bus
940 * @ap: ATA channel to manipulate
941 * @device: ATA device (numbered from zero) to select
942 *
943 * Use the method defined in the ATA specification to
944 * make either device 0, or device 1, active on the
945 * ATA channel. Works with both PIO and MMIO.
946 *
947 * May be used as the dev_select() entry in ata_port_operations.
948 *
949 * LOCKING:
950 * caller.
951 */
952
953 void ata_std_dev_select (struct ata_port *ap, unsigned int device)
954 {
955 u8 tmp;
956
957 if (device == 0)
958 tmp = ATA_DEVICE_OBS;
959 else
960 tmp = ATA_DEVICE_OBS | ATA_DEV1;
961
962 if (ap->flags & ATA_FLAG_MMIO) {
963 writeb(tmp, (void __iomem *) ap->ioaddr.device_addr);
964 } else {
965 outb(tmp, ap->ioaddr.device_addr);
966 }
967 ata_pause(ap); /* needed; also flushes, for mmio */
968 }
969
970 /**
971 * ata_dev_select - Select device 0/1 on ATA bus
972 * @ap: ATA channel to manipulate
973 * @device: ATA device (numbered from zero) to select
974 * @wait: non-zero to wait for Status register BSY bit to clear
975 * @can_sleep: non-zero if context allows sleeping
976 *
977 * Use the method defined in the ATA specification to
978 * make either device 0, or device 1, active on the
979 * ATA channel.
980 *
981 * This is a high-level version of ata_std_dev_select(),
982 * which additionally provides the services of inserting
983 * the proper pauses and status polling, where needed.
984 *
985 * LOCKING:
986 * caller.
987 */
988
989 void ata_dev_select(struct ata_port *ap, unsigned int device,
990 unsigned int wait, unsigned int can_sleep)
991 {
992 VPRINTK("ENTER, ata%u: device %u, wait %u\n",
993 ap->id, device, wait);
994
995 if (wait)
996 ata_wait_idle(ap);
997
998 ap->ops->dev_select(ap, device);
999
1000 if (wait) {
1001 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
1002 msleep(150);
1003 ata_wait_idle(ap);
1004 }
1005 }
1006
1007 /**
1008 * ata_dump_id - IDENTIFY DEVICE info debugging output
1009 * @dev: Device whose IDENTIFY DEVICE page we will dump
1010 *
1011 * Dump selected 16-bit words from a detected device's
1012 * IDENTIFY PAGE page.
1013 *
1014 * LOCKING:
1015 * caller.
1016 */
1017
1018 static inline void ata_dump_id(const struct ata_device *dev)
1019 {
1020 DPRINTK("49==0x%04x "
1021 "53==0x%04x "
1022 "63==0x%04x "
1023 "64==0x%04x "
1024 "75==0x%04x \n",
1025 dev->id[49],
1026 dev->id[53],
1027 dev->id[63],
1028 dev->id[64],
1029 dev->id[75]);
1030 DPRINTK("80==0x%04x "
1031 "81==0x%04x "
1032 "82==0x%04x "
1033 "83==0x%04x "
1034 "84==0x%04x \n",
1035 dev->id[80],
1036 dev->id[81],
1037 dev->id[82],
1038 dev->id[83],
1039 dev->id[84]);
1040 DPRINTK("88==0x%04x "
1041 "93==0x%04x\n",
1042 dev->id[88],
1043 dev->id[93]);
1044 }
1045
1046 /*
1047 * Compute the PIO modes available for this device. This is not as
1048 * trivial as it seems if we must consider early devices correctly.
1049 *
1050 * FIXME: pre IDE drive timing (do we care ?).
1051 */
1052
1053 static unsigned int ata_pio_modes(const struct ata_device *adev)
1054 {
1055 u16 modes;
1056
1057 /* Usual case. Word 53 indicates word 64 is valid */
1058 if (adev->id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1059 modes = adev->id[ATA_ID_PIO_MODES] & 0x03;
1060 modes <<= 3;
1061 modes |= 0x7;
1062 return modes;
1063 }
1064
1065 /* If word 64 isn't valid then Word 51 high byte holds the PIO timing
1066 number for the maximum. Turn it into a mask and return it */
1067 modes = (2 << ((adev->id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF)) - 1 ;
1068 return modes;
1069 /* But wait.. there's more. Design your standards by committee and
1070 you too can get a free iordy field to process. However its the
1071 speeds not the modes that are supported... Note drivers using the
1072 timing API will get this right anyway */
1073 }
1074
1075 static inline void
1076 ata_queue_pio_task(struct ata_port *ap)
1077 {
1078 if (!(ap->flags & ATA_FLAG_FLUSH_PIO_TASK))
1079 queue_work(ata_wq, &ap->pio_task);
1080 }
1081
1082 static inline void
1083 ata_queue_delayed_pio_task(struct ata_port *ap, unsigned long delay)
1084 {
1085 if (!(ap->flags & ATA_FLAG_FLUSH_PIO_TASK))
1086 queue_delayed_work(ata_wq, &ap->pio_task, delay);
1087 }
1088
1089 /**
1090 * ata_flush_pio_tasks - Flush pio_task
1091 * @ap: the target ata_port
1092 *
1093 * After this function completes, pio_task is
1094 * guranteed not to be running or scheduled.
1095 *
1096 * LOCKING:
1097 * Kernel thread context (may sleep)
1098 */
1099
1100 static void ata_flush_pio_tasks(struct ata_port *ap)
1101 {
1102 int tmp = 0;
1103 unsigned long flags;
1104
1105 DPRINTK("ENTER\n");
1106
1107 spin_lock_irqsave(&ap->host_set->lock, flags);
1108 ap->flags |= ATA_FLAG_FLUSH_PIO_TASK;
1109 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1110
1111 DPRINTK("flush #1\n");
1112 flush_workqueue(ata_wq);
1113
1114 /*
1115 * At this point, if a task is running, it's guaranteed to see
1116 * the FLUSH flag; thus, it will never queue pio tasks again.
1117 * Cancel and flush.
1118 */
1119 tmp |= cancel_delayed_work(&ap->pio_task);
1120 if (!tmp) {
1121 DPRINTK("flush #2\n");
1122 flush_workqueue(ata_wq);
1123 }
1124
1125 spin_lock_irqsave(&ap->host_set->lock, flags);
1126 ap->flags &= ~ATA_FLAG_FLUSH_PIO_TASK;
1127 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1128
1129 DPRINTK("EXIT\n");
1130 }
1131
1132 void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1133 {
1134 struct completion *waiting = qc->private_data;
1135
1136 qc->ap->ops->tf_read(qc->ap, &qc->tf);
1137 complete(waiting);
1138 }
1139
1140 /**
1141 * ata_exec_internal - execute libata internal command
1142 * @ap: Port to which the command is sent
1143 * @dev: Device to which the command is sent
1144 * @tf: Taskfile registers for the command and the result
1145 * @dma_dir: Data tranfer direction of the command
1146 * @buf: Data buffer of the command
1147 * @buflen: Length of data buffer
1148 *
1149 * Executes libata internal command with timeout. @tf contains
1150 * command on entry and result on return. Timeout and error
1151 * conditions are reported via return value. No recovery action
1152 * is taken after a command times out. It's caller's duty to
1153 * clean up after timeout.
1154 *
1155 * LOCKING:
1156 * None. Should be called with kernel context, might sleep.
1157 */
1158
1159 static unsigned
1160 ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
1161 struct ata_taskfile *tf,
1162 int dma_dir, void *buf, unsigned int buflen)
1163 {
1164 u8 command = tf->command;
1165 struct ata_queued_cmd *qc;
1166 DECLARE_COMPLETION(wait);
1167 unsigned long flags;
1168 unsigned int err_mask;
1169
1170 spin_lock_irqsave(&ap->host_set->lock, flags);
1171
1172 qc = ata_qc_new_init(ap, dev);
1173 BUG_ON(qc == NULL);
1174
1175 qc->tf = *tf;
1176 qc->dma_dir = dma_dir;
1177 if (dma_dir != DMA_NONE) {
1178 ata_sg_init_one(qc, buf, buflen);
1179 qc->nsect = buflen / ATA_SECT_SIZE;
1180 }
1181
1182 qc->private_data = &wait;
1183 qc->complete_fn = ata_qc_complete_internal;
1184
1185 qc->err_mask = ata_qc_issue(qc);
1186 if (qc->err_mask)
1187 ata_qc_complete(qc);
1188
1189 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1190
1191 if (!wait_for_completion_timeout(&wait, ATA_TMOUT_INTERNAL)) {
1192 spin_lock_irqsave(&ap->host_set->lock, flags);
1193
1194 /* We're racing with irq here. If we lose, the
1195 * following test prevents us from completing the qc
1196 * again. If completion irq occurs after here but
1197 * before the caller cleans up, it will result in a
1198 * spurious interrupt. We can live with that.
1199 */
1200 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1201 qc->err_mask = AC_ERR_TIMEOUT;
1202 ata_qc_complete(qc);
1203 printk(KERN_WARNING "ata%u: qc timeout (cmd 0x%x)\n",
1204 ap->id, command);
1205 }
1206
1207 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1208 }
1209
1210 *tf = qc->tf;
1211 err_mask = qc->err_mask;
1212
1213 ata_qc_free(qc);
1214
1215 return err_mask;
1216 }
1217
1218 /**
1219 * ata_pio_need_iordy - check if iordy needed
1220 * @adev: ATA device
1221 *
1222 * Check if the current speed of the device requires IORDY. Used
1223 * by various controllers for chip configuration.
1224 */
1225
1226 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1227 {
1228 int pio;
1229 int speed = adev->pio_mode - XFER_PIO_0;
1230
1231 if (speed < 2)
1232 return 0;
1233 if (speed > 2)
1234 return 1;
1235
1236 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1237
1238 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1239 pio = adev->id[ATA_ID_EIDE_PIO];
1240 /* Is the speed faster than the drive allows non IORDY ? */
1241 if (pio) {
1242 /* This is cycle times not frequency - watch the logic! */
1243 if (pio > 240) /* PIO2 is 240nS per cycle */
1244 return 1;
1245 return 0;
1246 }
1247 }
1248 return 0;
1249 }
1250
1251 /**
1252 * ata_dev_identify - obtain IDENTIFY x DEVICE page
1253 * @ap: port on which device we wish to probe resides
1254 * @device: device bus address, starting at zero
1255 *
1256 * Following bus reset, we issue the IDENTIFY [PACKET] DEVICE
1257 * command, and read back the 512-byte device information page.
1258 * The device information page is fed to us via the standard
1259 * PIO-IN protocol, but we hand-code it here. (TODO: investigate
1260 * using standard PIO-IN paths)
1261 *
1262 * After reading the device information page, we use several
1263 * bits of information from it to initialize data structures
1264 * that will be used during the lifetime of the ata_device.
1265 * Other data from the info page is used to disqualify certain
1266 * older ATA devices we do not wish to support.
1267 *
1268 * LOCKING:
1269 * Inherited from caller. Some functions called by this function
1270 * obtain the host_set lock.
1271 */
1272
1273 static void ata_dev_identify(struct ata_port *ap, unsigned int device)
1274 {
1275 struct ata_device *dev = &ap->device[device];
1276 unsigned int major_version;
1277 u16 tmp;
1278 unsigned long xfer_modes;
1279 unsigned int using_edd;
1280 struct ata_taskfile tf;
1281 unsigned int err_mask;
1282 int rc;
1283
1284 if (!ata_dev_present(dev)) {
1285 DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n",
1286 ap->id, device);
1287 return;
1288 }
1289
1290 if (ap->flags & (ATA_FLAG_SRST | ATA_FLAG_SATA_RESET))
1291 using_edd = 0;
1292 else
1293 using_edd = 1;
1294
1295 DPRINTK("ENTER, host %u, dev %u\n", ap->id, device);
1296
1297 assert (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ATAPI ||
1298 dev->class == ATA_DEV_NONE);
1299
1300 ata_dev_select(ap, device, 1, 1); /* select device 0/1 */
1301
1302 retry:
1303 ata_tf_init(ap, &tf, device);
1304
1305 if (dev->class == ATA_DEV_ATA) {
1306 tf.command = ATA_CMD_ID_ATA;
1307 DPRINTK("do ATA identify\n");
1308 } else {
1309 tf.command = ATA_CMD_ID_ATAPI;
1310 DPRINTK("do ATAPI identify\n");
1311 }
1312
1313 tf.protocol = ATA_PROT_PIO;
1314
1315 err_mask = ata_exec_internal(ap, dev, &tf, DMA_FROM_DEVICE,
1316 dev->id, sizeof(dev->id));
1317
1318 if (err_mask) {
1319 if (err_mask & ~AC_ERR_DEV)
1320 goto err_out;
1321
1322 /*
1323 * arg! EDD works for all test cases, but seems to return
1324 * the ATA signature for some ATAPI devices. Until the
1325 * reason for this is found and fixed, we fix up the mess
1326 * here. If IDENTIFY DEVICE returns command aborted
1327 * (as ATAPI devices do), then we issue an
1328 * IDENTIFY PACKET DEVICE.
1329 *
1330 * ATA software reset (SRST, the default) does not appear
1331 * to have this problem.
1332 */
1333 if ((using_edd) && (dev->class == ATA_DEV_ATA)) {
1334 u8 err = tf.feature;
1335 if (err & ATA_ABORTED) {
1336 dev->class = ATA_DEV_ATAPI;
1337 goto retry;
1338 }
1339 }
1340 goto err_out;
1341 }
1342
1343 swap_buf_le16(dev->id, ATA_ID_WORDS);
1344
1345 /* print device capabilities */
1346 printk(KERN_DEBUG "ata%u: dev %u cfg "
1347 "49:%04x 82:%04x 83:%04x 84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n",
1348 ap->id, device, dev->id[49],
1349 dev->id[82], dev->id[83], dev->id[84],
1350 dev->id[85], dev->id[86], dev->id[87],
1351 dev->id[88]);
1352
1353 /*
1354 * common ATA, ATAPI feature tests
1355 */
1356
1357 /* we require DMA support (bits 8 of word 49) */
1358 if (!ata_id_has_dma(dev->id)) {
1359 printk(KERN_DEBUG "ata%u: no dma\n", ap->id);
1360 goto err_out_nosup;
1361 }
1362
1363 /* quick-n-dirty find max transfer mode; for printk only */
1364 xfer_modes = dev->id[ATA_ID_UDMA_MODES];
1365 if (!xfer_modes)
1366 xfer_modes = (dev->id[ATA_ID_MWDMA_MODES]) << ATA_SHIFT_MWDMA;
1367 if (!xfer_modes)
1368 xfer_modes = ata_pio_modes(dev);
1369
1370 ata_dump_id(dev);
1371
1372 /* ATA-specific feature tests */
1373 if (dev->class == ATA_DEV_ATA) {
1374 if (!ata_id_is_ata(dev->id)) /* sanity check */
1375 goto err_out_nosup;
1376
1377 /* get major version */
1378 tmp = dev->id[ATA_ID_MAJOR_VER];
1379 for (major_version = 14; major_version >= 1; major_version--)
1380 if (tmp & (1 << major_version))
1381 break;
1382
1383 /*
1384 * The exact sequence expected by certain pre-ATA4 drives is:
1385 * SRST RESET
1386 * IDENTIFY
1387 * INITIALIZE DEVICE PARAMETERS
1388 * anything else..
1389 * Some drives were very specific about that exact sequence.
1390 */
1391 if (major_version < 4 || (!ata_id_has_lba(dev->id))) {
1392 ata_dev_init_params(ap, dev);
1393
1394 /* current CHS translation info (id[53-58]) might be
1395 * changed. reread the identify device info.
1396 */
1397 ata_dev_reread_id(ap, dev);
1398 }
1399
1400 if (ata_id_has_lba(dev->id)) {
1401 dev->flags |= ATA_DFLAG_LBA;
1402
1403 if (ata_id_has_lba48(dev->id)) {
1404 dev->flags |= ATA_DFLAG_LBA48;
1405 dev->n_sectors = ata_id_u64(dev->id, 100);
1406 } else {
1407 dev->n_sectors = ata_id_u32(dev->id, 60);
1408 }
1409
1410 /* print device info to dmesg */
1411 printk(KERN_INFO "ata%u: dev %u ATA-%d, max %s, %Lu sectors:%s\n",
1412 ap->id, device,
1413 major_version,
1414 ata_mode_string(xfer_modes),
1415 (unsigned long long)dev->n_sectors,
1416 dev->flags & ATA_DFLAG_LBA48 ? " LBA48" : " LBA");
1417 } else {
1418 /* CHS */
1419
1420 /* Default translation */
1421 dev->cylinders = dev->id[1];
1422 dev->heads = dev->id[3];
1423 dev->sectors = dev->id[6];
1424 dev->n_sectors = dev->cylinders * dev->heads * dev->sectors;
1425
1426 if (ata_id_current_chs_valid(dev->id)) {
1427 /* Current CHS translation is valid. */
1428 dev->cylinders = dev->id[54];
1429 dev->heads = dev->id[55];
1430 dev->sectors = dev->id[56];
1431
1432 dev->n_sectors = ata_id_u32(dev->id, 57);
1433 }
1434
1435 /* print device info to dmesg */
1436 printk(KERN_INFO "ata%u: dev %u ATA-%d, max %s, %Lu sectors: CHS %d/%d/%d\n",
1437 ap->id, device,
1438 major_version,
1439 ata_mode_string(xfer_modes),
1440 (unsigned long long)dev->n_sectors,
1441 (int)dev->cylinders, (int)dev->heads, (int)dev->sectors);
1442
1443 }
1444
1445 if (dev->id[59] & 0x100) {
1446 dev->multi_count = dev->id[59] & 0xff;
1447 DPRINTK("ata%u: dev %u multi count %u\n",
1448 ap->id, device, dev->multi_count);
1449 }
1450
1451 ap->host->max_cmd_len = 16;
1452 }
1453
1454 /* ATAPI-specific feature tests */
1455 else if (dev->class == ATA_DEV_ATAPI) {
1456 if (ata_id_is_ata(dev->id)) /* sanity check */
1457 goto err_out_nosup;
1458
1459 rc = atapi_cdb_len(dev->id);
1460 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1461 printk(KERN_WARNING "ata%u: unsupported CDB len\n", ap->id);
1462 goto err_out_nosup;
1463 }
1464 ap->cdb_len = (unsigned int) rc;
1465 ap->host->max_cmd_len = (unsigned char) ap->cdb_len;
1466
1467 if (ata_id_cdb_intr(dev->id))
1468 dev->flags |= ATA_DFLAG_CDB_INTR;
1469
1470 /* print device info to dmesg */
1471 printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n",
1472 ap->id, device,
1473 ata_mode_string(xfer_modes));
1474 }
1475
1476 DPRINTK("EXIT, drv_stat = 0x%x\n", ata_chk_status(ap));
1477 return;
1478
1479 err_out_nosup:
1480 printk(KERN_WARNING "ata%u: dev %u not supported, ignoring\n",
1481 ap->id, device);
1482 err_out:
1483 dev->class++; /* converts ATA_DEV_xxx into ATA_DEV_xxx_UNSUP */
1484 DPRINTK("EXIT, err\n");
1485 }
1486
1487
1488 static inline u8 ata_dev_knobble(const struct ata_port *ap)
1489 {
1490 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(ap->device->id)));
1491 }
1492
1493 /**
1494 * ata_dev_config - Run device specific handlers & check for SATA->PATA bridges
1495 * @ap: Bus
1496 * @i: Device
1497 *
1498 * LOCKING:
1499 */
1500
1501 void ata_dev_config(struct ata_port *ap, unsigned int i)
1502 {
1503 /* limit bridge transfers to udma5, 200 sectors */
1504 if (ata_dev_knobble(ap)) {
1505 printk(KERN_INFO "ata%u(%u): applying bridge limits\n",
1506 ap->id, ap->device->devno);
1507 ap->udma_mask &= ATA_UDMA5;
1508 ap->host->max_sectors = ATA_MAX_SECTORS;
1509 ap->host->hostt->max_sectors = ATA_MAX_SECTORS;
1510 ap->device[i].flags |= ATA_DFLAG_LOCK_SECTORS;
1511 }
1512
1513 if (ap->ops->dev_config)
1514 ap->ops->dev_config(ap, &ap->device[i]);
1515 }
1516
1517 /**
1518 * ata_bus_probe - Reset and probe ATA bus
1519 * @ap: Bus to probe
1520 *
1521 * Master ATA bus probing function. Initiates a hardware-dependent
1522 * bus reset, then attempts to identify any devices found on
1523 * the bus.
1524 *
1525 * LOCKING:
1526 * PCI/etc. bus probe sem.
1527 *
1528 * RETURNS:
1529 * Zero on success, non-zero on error.
1530 */
1531
1532 static int ata_bus_probe(struct ata_port *ap)
1533 {
1534 unsigned int i, found = 0;
1535
1536 if (ap->ops->probe_reset) {
1537 unsigned int classes[ATA_MAX_DEVICES];
1538 int rc;
1539
1540 ata_port_probe(ap);
1541
1542 rc = ap->ops->probe_reset(ap, classes);
1543 if (rc == 0) {
1544 for (i = 0; i < ATA_MAX_DEVICES; i++)
1545 ap->device[i].class = classes[i];
1546 } else {
1547 printk(KERN_ERR "ata%u: probe reset failed, "
1548 "disabling port\n", ap->id);
1549 ata_port_disable(ap);
1550 }
1551 } else
1552 ap->ops->phy_reset(ap);
1553
1554 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1555 goto err_out;
1556
1557 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1558 ata_dev_identify(ap, i);
1559 if (ata_dev_present(&ap->device[i])) {
1560 found = 1;
1561 ata_dev_config(ap,i);
1562 }
1563 }
1564
1565 if ((!found) || (ap->flags & ATA_FLAG_PORT_DISABLED))
1566 goto err_out_disable;
1567
1568 ata_set_mode(ap);
1569 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1570 goto err_out_disable;
1571
1572 return 0;
1573
1574 err_out_disable:
1575 ap->ops->port_disable(ap);
1576 err_out:
1577 return -1;
1578 }
1579
1580 /**
1581 * ata_port_probe - Mark port as enabled
1582 * @ap: Port for which we indicate enablement
1583 *
1584 * Modify @ap data structure such that the system
1585 * thinks that the entire port is enabled.
1586 *
1587 * LOCKING: host_set lock, or some other form of
1588 * serialization.
1589 */
1590
1591 void ata_port_probe(struct ata_port *ap)
1592 {
1593 ap->flags &= ~ATA_FLAG_PORT_DISABLED;
1594 }
1595
1596 /**
1597 * sata_print_link_status - Print SATA link status
1598 * @ap: SATA port to printk link status about
1599 *
1600 * This function prints link speed and status of a SATA link.
1601 *
1602 * LOCKING:
1603 * None.
1604 */
1605 static void sata_print_link_status(struct ata_port *ap)
1606 {
1607 u32 sstatus, tmp;
1608 const char *speed;
1609
1610 if (!ap->ops->scr_read)
1611 return;
1612
1613 sstatus = scr_read(ap, SCR_STATUS);
1614
1615 if (sata_dev_present(ap)) {
1616 tmp = (sstatus >> 4) & 0xf;
1617 if (tmp & (1 << 0))
1618 speed = "1.5";
1619 else if (tmp & (1 << 1))
1620 speed = "3.0";
1621 else
1622 speed = "<unknown>";
1623 printk(KERN_INFO "ata%u: SATA link up %s Gbps (SStatus %X)\n",
1624 ap->id, speed, sstatus);
1625 } else {
1626 printk(KERN_INFO "ata%u: SATA link down (SStatus %X)\n",
1627 ap->id, sstatus);
1628 }
1629 }
1630
1631 /**
1632 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1633 * @ap: SATA port associated with target SATA PHY.
1634 *
1635 * This function issues commands to standard SATA Sxxx
1636 * PHY registers, to wake up the phy (and device), and
1637 * clear any reset condition.
1638 *
1639 * LOCKING:
1640 * PCI/etc. bus probe sem.
1641 *
1642 */
1643 void __sata_phy_reset(struct ata_port *ap)
1644 {
1645 u32 sstatus;
1646 unsigned long timeout = jiffies + (HZ * 5);
1647
1648 if (ap->flags & ATA_FLAG_SATA_RESET) {
1649 /* issue phy wake/reset */
1650 scr_write_flush(ap, SCR_CONTROL, 0x301);
1651 /* Couldn't find anything in SATA I/II specs, but
1652 * AHCI-1.1 10.4.2 says at least 1 ms. */
1653 mdelay(1);
1654 }
1655 scr_write_flush(ap, SCR_CONTROL, 0x300); /* phy wake/clear reset */
1656
1657 /* wait for phy to become ready, if necessary */
1658 do {
1659 msleep(200);
1660 sstatus = scr_read(ap, SCR_STATUS);
1661 if ((sstatus & 0xf) != 1)
1662 break;
1663 } while (time_before(jiffies, timeout));
1664
1665 /* print link status */
1666 sata_print_link_status(ap);
1667
1668 /* TODO: phy layer with polling, timeouts, etc. */
1669 if (sata_dev_present(ap))
1670 ata_port_probe(ap);
1671 else
1672 ata_port_disable(ap);
1673
1674 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1675 return;
1676
1677 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
1678 ata_port_disable(ap);
1679 return;
1680 }
1681
1682 ap->cbl = ATA_CBL_SATA;
1683 }
1684
1685 /**
1686 * sata_phy_reset - Reset SATA bus.
1687 * @ap: SATA port associated with target SATA PHY.
1688 *
1689 * This function resets the SATA bus, and then probes
1690 * the bus for devices.
1691 *
1692 * LOCKING:
1693 * PCI/etc. bus probe sem.
1694 *
1695 */
1696 void sata_phy_reset(struct ata_port *ap)
1697 {
1698 __sata_phy_reset(ap);
1699 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1700 return;
1701 ata_bus_reset(ap);
1702 }
1703
1704 /**
1705 * ata_port_disable - Disable port.
1706 * @ap: Port to be disabled.
1707 *
1708 * Modify @ap data structure such that the system
1709 * thinks that the entire port is disabled, and should
1710 * never attempt to probe or communicate with devices
1711 * on this port.
1712 *
1713 * LOCKING: host_set lock, or some other form of
1714 * serialization.
1715 */
1716
1717 void ata_port_disable(struct ata_port *ap)
1718 {
1719 ap->device[0].class = ATA_DEV_NONE;
1720 ap->device[1].class = ATA_DEV_NONE;
1721 ap->flags |= ATA_FLAG_PORT_DISABLED;
1722 }
1723
1724 /*
1725 * This mode timing computation functionality is ported over from
1726 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
1727 */
1728 /*
1729 * PIO 0-5, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
1730 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
1731 * for PIO 5, which is a nonstandard extension and UDMA6, which
1732 * is currently supported only by Maxtor drives.
1733 */
1734
1735 static const struct ata_timing ata_timing[] = {
1736
1737 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
1738 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
1739 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
1740 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
1741
1742 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
1743 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
1744 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
1745
1746 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
1747
1748 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
1749 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
1750 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
1751
1752 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
1753 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
1754 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
1755
1756 /* { XFER_PIO_5, 20, 50, 30, 100, 50, 30, 100, 0 }, */
1757 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
1758 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
1759
1760 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
1761 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
1762 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
1763
1764 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
1765
1766 { 0xFF }
1767 };
1768
1769 #define ENOUGH(v,unit) (((v)-1)/(unit)+1)
1770 #define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
1771
1772 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
1773 {
1774 q->setup = EZ(t->setup * 1000, T);
1775 q->act8b = EZ(t->act8b * 1000, T);
1776 q->rec8b = EZ(t->rec8b * 1000, T);
1777 q->cyc8b = EZ(t->cyc8b * 1000, T);
1778 q->active = EZ(t->active * 1000, T);
1779 q->recover = EZ(t->recover * 1000, T);
1780 q->cycle = EZ(t->cycle * 1000, T);
1781 q->udma = EZ(t->udma * 1000, UT);
1782 }
1783
1784 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
1785 struct ata_timing *m, unsigned int what)
1786 {
1787 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
1788 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
1789 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
1790 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
1791 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
1792 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
1793 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
1794 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
1795 }
1796
1797 static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
1798 {
1799 const struct ata_timing *t;
1800
1801 for (t = ata_timing; t->mode != speed; t++)
1802 if (t->mode == 0xFF)
1803 return NULL;
1804 return t;
1805 }
1806
1807 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
1808 struct ata_timing *t, int T, int UT)
1809 {
1810 const struct ata_timing *s;
1811 struct ata_timing p;
1812
1813 /*
1814 * Find the mode.
1815 */
1816
1817 if (!(s = ata_timing_find_mode(speed)))
1818 return -EINVAL;
1819
1820 memcpy(t, s, sizeof(*s));
1821
1822 /*
1823 * If the drive is an EIDE drive, it can tell us it needs extended
1824 * PIO/MW_DMA cycle timing.
1825 */
1826
1827 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
1828 memset(&p, 0, sizeof(p));
1829 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
1830 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
1831 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
1832 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
1833 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
1834 }
1835 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
1836 }
1837
1838 /*
1839 * Convert the timing to bus clock counts.
1840 */
1841
1842 ata_timing_quantize(t, t, T, UT);
1843
1844 /*
1845 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
1846 * S.M.A.R.T * and some other commands. We have to ensure that the
1847 * DMA cycle timing is slower/equal than the fastest PIO timing.
1848 */
1849
1850 if (speed > XFER_PIO_4) {
1851 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
1852 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
1853 }
1854
1855 /*
1856 * Lengthen active & recovery time so that cycle time is correct.
1857 */
1858
1859 if (t->act8b + t->rec8b < t->cyc8b) {
1860 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
1861 t->rec8b = t->cyc8b - t->act8b;
1862 }
1863
1864 if (t->active + t->recover < t->cycle) {
1865 t->active += (t->cycle - (t->active + t->recover)) / 2;
1866 t->recover = t->cycle - t->active;
1867 }
1868
1869 return 0;
1870 }
1871
1872 static const struct {
1873 unsigned int shift;
1874 u8 base;
1875 } xfer_mode_classes[] = {
1876 { ATA_SHIFT_UDMA, XFER_UDMA_0 },
1877 { ATA_SHIFT_MWDMA, XFER_MW_DMA_0 },
1878 { ATA_SHIFT_PIO, XFER_PIO_0 },
1879 };
1880
1881 static u8 base_from_shift(unsigned int shift)
1882 {
1883 int i;
1884
1885 for (i = 0; i < ARRAY_SIZE(xfer_mode_classes); i++)
1886 if (xfer_mode_classes[i].shift == shift)
1887 return xfer_mode_classes[i].base;
1888
1889 return 0xff;
1890 }
1891
1892 static void ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev)
1893 {
1894 int ofs, idx;
1895 u8 base;
1896
1897 if (!ata_dev_present(dev) || (ap->flags & ATA_FLAG_PORT_DISABLED))
1898 return;
1899
1900 if (dev->xfer_shift == ATA_SHIFT_PIO)
1901 dev->flags |= ATA_DFLAG_PIO;
1902
1903 ata_dev_set_xfermode(ap, dev);
1904
1905 base = base_from_shift(dev->xfer_shift);
1906 ofs = dev->xfer_mode - base;
1907 idx = ofs + dev->xfer_shift;
1908 WARN_ON(idx >= ARRAY_SIZE(xfer_mode_str));
1909
1910 DPRINTK("idx=%d xfer_shift=%u, xfer_mode=0x%x, base=0x%x, offset=%d\n",
1911 idx, dev->xfer_shift, (int)dev->xfer_mode, (int)base, ofs);
1912
1913 printk(KERN_INFO "ata%u: dev %u configured for %s\n",
1914 ap->id, dev->devno, xfer_mode_str[idx]);
1915 }
1916
1917 static int ata_host_set_pio(struct ata_port *ap)
1918 {
1919 unsigned int mask;
1920 int x, i;
1921 u8 base, xfer_mode;
1922
1923 mask = ata_get_mode_mask(ap, ATA_SHIFT_PIO);
1924 x = fgb(mask);
1925 if (x < 0) {
1926 printk(KERN_WARNING "ata%u: no PIO support\n", ap->id);
1927 return -1;
1928 }
1929
1930 base = base_from_shift(ATA_SHIFT_PIO);
1931 xfer_mode = base + x;
1932
1933 DPRINTK("base 0x%x xfer_mode 0x%x mask 0x%x x %d\n",
1934 (int)base, (int)xfer_mode, mask, x);
1935
1936 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1937 struct ata_device *dev = &ap->device[i];
1938 if (ata_dev_present(dev)) {
1939 dev->pio_mode = xfer_mode;
1940 dev->xfer_mode = xfer_mode;
1941 dev->xfer_shift = ATA_SHIFT_PIO;
1942 if (ap->ops->set_piomode)
1943 ap->ops->set_piomode(ap, dev);
1944 }
1945 }
1946
1947 return 0;
1948 }
1949
1950 static void ata_host_set_dma(struct ata_port *ap, u8 xfer_mode,
1951 unsigned int xfer_shift)
1952 {
1953 int i;
1954
1955 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1956 struct ata_device *dev = &ap->device[i];
1957 if (ata_dev_present(dev)) {
1958 dev->dma_mode = xfer_mode;
1959 dev->xfer_mode = xfer_mode;
1960 dev->xfer_shift = xfer_shift;
1961 if (ap->ops->set_dmamode)
1962 ap->ops->set_dmamode(ap, dev);
1963 }
1964 }
1965 }
1966
1967 /**
1968 * ata_set_mode - Program timings and issue SET FEATURES - XFER
1969 * @ap: port on which timings will be programmed
1970 *
1971 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.).
1972 *
1973 * LOCKING:
1974 * PCI/etc. bus probe sem.
1975 */
1976 static void ata_set_mode(struct ata_port *ap)
1977 {
1978 unsigned int xfer_shift;
1979 u8 xfer_mode;
1980 int rc;
1981
1982 /* step 1: always set host PIO timings */
1983 rc = ata_host_set_pio(ap);
1984 if (rc)
1985 goto err_out;
1986
1987 /* step 2: choose the best data xfer mode */
1988 xfer_mode = xfer_shift = 0;
1989 rc = ata_choose_xfer_mode(ap, &xfer_mode, &xfer_shift);
1990 if (rc)
1991 goto err_out;
1992
1993 /* step 3: if that xfer mode isn't PIO, set host DMA timings */
1994 if (xfer_shift != ATA_SHIFT_PIO)
1995 ata_host_set_dma(ap, xfer_mode, xfer_shift);
1996
1997 /* step 4: update devices' xfer mode */
1998 ata_dev_set_mode(ap, &ap->device[0]);
1999 ata_dev_set_mode(ap, &ap->device[1]);
2000
2001 if (ap->flags & ATA_FLAG_PORT_DISABLED)
2002 return;
2003
2004 if (ap->ops->post_set_mode)
2005 ap->ops->post_set_mode(ap);
2006
2007 return;
2008
2009 err_out:
2010 ata_port_disable(ap);
2011 }
2012
2013 /**
2014 * ata_busy_sleep - sleep until BSY clears, or timeout
2015 * @ap: port containing status register to be polled
2016 * @tmout_pat: impatience timeout
2017 * @tmout: overall timeout
2018 *
2019 * Sleep until ATA Status register bit BSY clears,
2020 * or a timeout occurs.
2021 *
2022 * LOCKING: None.
2023 */
2024
2025 unsigned int ata_busy_sleep (struct ata_port *ap,
2026 unsigned long tmout_pat, unsigned long tmout)
2027 {
2028 unsigned long timer_start, timeout;
2029 u8 status;
2030
2031 status = ata_busy_wait(ap, ATA_BUSY, 300);
2032 timer_start = jiffies;
2033 timeout = timer_start + tmout_pat;
2034 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
2035 msleep(50);
2036 status = ata_busy_wait(ap, ATA_BUSY, 3);
2037 }
2038
2039 if (status & ATA_BUSY)
2040 printk(KERN_WARNING "ata%u is slow to respond, "
2041 "please be patient\n", ap->id);
2042
2043 timeout = timer_start + tmout;
2044 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
2045 msleep(50);
2046 status = ata_chk_status(ap);
2047 }
2048
2049 if (status & ATA_BUSY) {
2050 printk(KERN_ERR "ata%u failed to respond (%lu secs)\n",
2051 ap->id, tmout / HZ);
2052 return 1;
2053 }
2054
2055 return 0;
2056 }
2057
2058 static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
2059 {
2060 struct ata_ioports *ioaddr = &ap->ioaddr;
2061 unsigned int dev0 = devmask & (1 << 0);
2062 unsigned int dev1 = devmask & (1 << 1);
2063 unsigned long timeout;
2064
2065 /* if device 0 was found in ata_devchk, wait for its
2066 * BSY bit to clear
2067 */
2068 if (dev0)
2069 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2070
2071 /* if device 1 was found in ata_devchk, wait for
2072 * register access, then wait for BSY to clear
2073 */
2074 timeout = jiffies + ATA_TMOUT_BOOT;
2075 while (dev1) {
2076 u8 nsect, lbal;
2077
2078 ap->ops->dev_select(ap, 1);
2079 if (ap->flags & ATA_FLAG_MMIO) {
2080 nsect = readb((void __iomem *) ioaddr->nsect_addr);
2081 lbal = readb((void __iomem *) ioaddr->lbal_addr);
2082 } else {
2083 nsect = inb(ioaddr->nsect_addr);
2084 lbal = inb(ioaddr->lbal_addr);
2085 }
2086 if ((nsect == 1) && (lbal == 1))
2087 break;
2088 if (time_after(jiffies, timeout)) {
2089 dev1 = 0;
2090 break;
2091 }
2092 msleep(50); /* give drive a breather */
2093 }
2094 if (dev1)
2095 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2096
2097 /* is all this really necessary? */
2098 ap->ops->dev_select(ap, 0);
2099 if (dev1)
2100 ap->ops->dev_select(ap, 1);
2101 if (dev0)
2102 ap->ops->dev_select(ap, 0);
2103 }
2104
2105 /**
2106 * ata_bus_edd - Issue EXECUTE DEVICE DIAGNOSTIC command.
2107 * @ap: Port to reset and probe
2108 *
2109 * Use the EXECUTE DEVICE DIAGNOSTIC command to reset and
2110 * probe the bus. Not often used these days.
2111 *
2112 * LOCKING:
2113 * PCI/etc. bus probe sem.
2114 * Obtains host_set lock.
2115 *
2116 */
2117
2118 static unsigned int ata_bus_edd(struct ata_port *ap)
2119 {
2120 struct ata_taskfile tf;
2121 unsigned long flags;
2122
2123 /* set up execute-device-diag (bus reset) taskfile */
2124 /* also, take interrupts to a known state (disabled) */
2125 DPRINTK("execute-device-diag\n");
2126 ata_tf_init(ap, &tf, 0);
2127 tf.ctl |= ATA_NIEN;
2128 tf.command = ATA_CMD_EDD;
2129 tf.protocol = ATA_PROT_NODATA;
2130
2131 /* do bus reset */
2132 spin_lock_irqsave(&ap->host_set->lock, flags);
2133 ata_tf_to_host(ap, &tf);
2134 spin_unlock_irqrestore(&ap->host_set->lock, flags);
2135
2136 /* spec says at least 2ms. but who knows with those
2137 * crazy ATAPI devices...
2138 */
2139 msleep(150);
2140
2141 return ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2142 }
2143
2144 static unsigned int ata_bus_softreset(struct ata_port *ap,
2145 unsigned int devmask)
2146 {
2147 struct ata_ioports *ioaddr = &ap->ioaddr;
2148
2149 DPRINTK("ata%u: bus reset via SRST\n", ap->id);
2150
2151 /* software reset. causes dev0 to be selected */
2152 if (ap->flags & ATA_FLAG_MMIO) {
2153 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2154 udelay(20); /* FIXME: flush */
2155 writeb(ap->ctl | ATA_SRST, (void __iomem *) ioaddr->ctl_addr);
2156 udelay(20); /* FIXME: flush */
2157 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2158 } else {
2159 outb(ap->ctl, ioaddr->ctl_addr);
2160 udelay(10);
2161 outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
2162 udelay(10);
2163 outb(ap->ctl, ioaddr->ctl_addr);
2164 }
2165
2166 /* spec mandates ">= 2ms" before checking status.
2167 * We wait 150ms, because that was the magic delay used for
2168 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
2169 * between when the ATA command register is written, and then
2170 * status is checked. Because waiting for "a while" before
2171 * checking status is fine, post SRST, we perform this magic
2172 * delay here as well.
2173 */
2174 msleep(150);
2175
2176 ata_bus_post_reset(ap, devmask);
2177
2178 return 0;
2179 }
2180
2181 /**
2182 * ata_bus_reset - reset host port and associated ATA channel
2183 * @ap: port to reset
2184 *
2185 * This is typically the first time we actually start issuing
2186 * commands to the ATA channel. We wait for BSY to clear, then
2187 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
2188 * result. Determine what devices, if any, are on the channel
2189 * by looking at the device 0/1 error register. Look at the signature
2190 * stored in each device's taskfile registers, to determine if
2191 * the device is ATA or ATAPI.
2192 *
2193 * LOCKING:
2194 * PCI/etc. bus probe sem.
2195 * Obtains host_set lock.
2196 *
2197 * SIDE EFFECTS:
2198 * Sets ATA_FLAG_PORT_DISABLED if bus reset fails.
2199 */
2200
2201 void ata_bus_reset(struct ata_port *ap)
2202 {
2203 struct ata_ioports *ioaddr = &ap->ioaddr;
2204 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2205 u8 err;
2206 unsigned int dev0, dev1 = 0, rc = 0, devmask = 0;
2207
2208 DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no);
2209
2210 /* determine if device 0/1 are present */
2211 if (ap->flags & ATA_FLAG_SATA_RESET)
2212 dev0 = 1;
2213 else {
2214 dev0 = ata_devchk(ap, 0);
2215 if (slave_possible)
2216 dev1 = ata_devchk(ap, 1);
2217 }
2218
2219 if (dev0)
2220 devmask |= (1 << 0);
2221 if (dev1)
2222 devmask |= (1 << 1);
2223
2224 /* select device 0 again */
2225 ap->ops->dev_select(ap, 0);
2226
2227 /* issue bus reset */
2228 if (ap->flags & ATA_FLAG_SRST)
2229 rc = ata_bus_softreset(ap, devmask);
2230 else if ((ap->flags & ATA_FLAG_SATA_RESET) == 0) {
2231 /* set up device control */
2232 if (ap->flags & ATA_FLAG_MMIO)
2233 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2234 else
2235 outb(ap->ctl, ioaddr->ctl_addr);
2236 rc = ata_bus_edd(ap);
2237 }
2238
2239 if (rc)
2240 goto err_out;
2241
2242 /*
2243 * determine by signature whether we have ATA or ATAPI devices
2244 */
2245 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
2246 if ((slave_possible) && (err != 0x81))
2247 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
2248
2249 /* re-enable interrupts */
2250 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2251 ata_irq_on(ap);
2252
2253 /* is double-select really necessary? */
2254 if (ap->device[1].class != ATA_DEV_NONE)
2255 ap->ops->dev_select(ap, 1);
2256 if (ap->device[0].class != ATA_DEV_NONE)
2257 ap->ops->dev_select(ap, 0);
2258
2259 /* if no devices were detected, disable this port */
2260 if ((ap->device[0].class == ATA_DEV_NONE) &&
2261 (ap->device[1].class == ATA_DEV_NONE))
2262 goto err_out;
2263
2264 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
2265 /* set up device control for ATA_FLAG_SATA_RESET */
2266 if (ap->flags & ATA_FLAG_MMIO)
2267 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2268 else
2269 outb(ap->ctl, ioaddr->ctl_addr);
2270 }
2271
2272 DPRINTK("EXIT\n");
2273 return;
2274
2275 err_out:
2276 printk(KERN_ERR "ata%u: disabling port\n", ap->id);
2277 ap->ops->port_disable(ap);
2278
2279 DPRINTK("EXIT\n");
2280 }
2281
2282 static int sata_phy_resume(struct ata_port *ap)
2283 {
2284 unsigned long timeout = jiffies + (HZ * 5);
2285 u32 sstatus;
2286
2287 scr_write_flush(ap, SCR_CONTROL, 0x300);
2288
2289 /* Wait for phy to become ready, if necessary. */
2290 do {
2291 msleep(200);
2292 sstatus = scr_read(ap, SCR_STATUS);
2293 if ((sstatus & 0xf) != 1)
2294 return 0;
2295 } while (time_before(jiffies, timeout));
2296
2297 return -1;
2298 }
2299
2300 /**
2301 * ata_std_probeinit - initialize probing
2302 * @ap: port to be probed
2303 *
2304 * @ap is about to be probed. Initialize it. This function is
2305 * to be used as standard callback for ata_drive_probe_reset().
2306 */
2307 extern void ata_std_probeinit(struct ata_port *ap)
2308 {
2309 if (ap->flags & ATA_FLAG_SATA && ap->ops->scr_read)
2310 sata_phy_resume(ap);
2311 }
2312
2313 /**
2314 * ata_std_softreset - reset host port via ATA SRST
2315 * @ap: port to reset
2316 * @verbose: fail verbosely
2317 * @classes: resulting classes of attached devices
2318 *
2319 * Reset host port using ATA SRST. This function is to be used
2320 * as standard callback for ata_drive_*_reset() functions.
2321 *
2322 * LOCKING:
2323 * Kernel thread context (may sleep)
2324 *
2325 * RETURNS:
2326 * 0 on success, -errno otherwise.
2327 */
2328 int ata_std_softreset(struct ata_port *ap, int verbose, unsigned int *classes)
2329 {
2330 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2331 unsigned int devmask = 0, err_mask;
2332 u8 err;
2333
2334 DPRINTK("ENTER\n");
2335
2336 /* determine if device 0/1 are present */
2337 if (ata_devchk(ap, 0))
2338 devmask |= (1 << 0);
2339 if (slave_possible && ata_devchk(ap, 1))
2340 devmask |= (1 << 1);
2341
2342 /* devchk reports device presence without actual device on
2343 * most SATA controllers. Check SStatus and turn devmask off
2344 * if link is offline. Note that we should continue resetting
2345 * even when it seems like there's no device.
2346 */
2347 if (ap->ops->scr_read && !sata_dev_present(ap))
2348 devmask = 0;
2349
2350 /* select device 0 again */
2351 ap->ops->dev_select(ap, 0);
2352
2353 /* issue bus reset */
2354 DPRINTK("about to softreset, devmask=%x\n", devmask);
2355 err_mask = ata_bus_softreset(ap, devmask);
2356 if (err_mask) {
2357 if (verbose)
2358 printk(KERN_ERR "ata%u: SRST failed (err_mask=0x%x)\n",
2359 ap->id, err_mask);
2360 else
2361 DPRINTK("EXIT, softreset failed (err_mask=0x%x)\n",
2362 err_mask);
2363 return -EIO;
2364 }
2365
2366 /* determine by signature whether we have ATA or ATAPI devices */
2367 classes[0] = ata_dev_try_classify(ap, 0, &err);
2368 if (slave_possible && err != 0x81)
2369 classes[1] = ata_dev_try_classify(ap, 1, &err);
2370
2371 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
2372 return 0;
2373 }
2374
2375 /**
2376 * sata_std_hardreset - reset host port via SATA phy reset
2377 * @ap: port to reset
2378 * @verbose: fail verbosely
2379 * @class: resulting class of attached device
2380 *
2381 * SATA phy-reset host port using DET bits of SControl register.
2382 * This function is to be used as standard callback for
2383 * ata_drive_*_reset().
2384 *
2385 * LOCKING:
2386 * Kernel thread context (may sleep)
2387 *
2388 * RETURNS:
2389 * 0 on success, -errno otherwise.
2390 */
2391 int sata_std_hardreset(struct ata_port *ap, int verbose, unsigned int *class)
2392 {
2393 u32 serror;
2394
2395 DPRINTK("ENTER\n");
2396
2397 /* Issue phy wake/reset */
2398 scr_write_flush(ap, SCR_CONTROL, 0x301);
2399
2400 /*
2401 * Couldn't find anything in SATA I/II specs, but AHCI-1.1
2402 * 10.4.2 says at least 1 ms.
2403 */
2404 msleep(1);
2405
2406 /* Bring phy back */
2407 sata_phy_resume(ap);
2408
2409 /* Clear SError */
2410 serror = scr_read(ap, SCR_ERROR);
2411 scr_write(ap, SCR_ERROR, serror);
2412
2413 /* TODO: phy layer with polling, timeouts, etc. */
2414 if (!sata_dev_present(ap)) {
2415 *class = ATA_DEV_NONE;
2416 DPRINTK("EXIT, link offline\n");
2417 return 0;
2418 }
2419
2420 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2421 if (verbose)
2422 printk(KERN_ERR "ata%u: COMRESET failed "
2423 "(device not ready)\n", ap->id);
2424 else
2425 DPRINTK("EXIT, device not ready\n");
2426 return -EIO;
2427 }
2428
2429 *class = ata_dev_try_classify(ap, 0, NULL);
2430
2431 DPRINTK("EXIT, class=%u\n", *class);
2432 return 0;
2433 }
2434
2435 /**
2436 * ata_std_postreset - standard postreset callback
2437 * @ap: the target ata_port
2438 * @classes: classes of attached devices
2439 *
2440 * This function is invoked after a successful reset. Note that
2441 * the device might have been reset more than once using
2442 * different reset methods before postreset is invoked.
2443 * postreset is also reponsible for setting cable type.
2444 *
2445 * This function is to be used as standard callback for
2446 * ata_drive_*_reset().
2447 *
2448 * LOCKING:
2449 * Kernel thread context (may sleep)
2450 */
2451 void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
2452 {
2453 DPRINTK("ENTER\n");
2454
2455 /* set cable type */
2456 if (ap->cbl == ATA_CBL_NONE && ap->flags & ATA_FLAG_SATA)
2457 ap->cbl = ATA_CBL_SATA;
2458
2459 /* print link status */
2460 if (ap->cbl == ATA_CBL_SATA)
2461 sata_print_link_status(ap);
2462
2463 /* bail out if no device is present */
2464 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2465 DPRINTK("EXIT, no device\n");
2466 return;
2467 }
2468
2469 /* is double-select really necessary? */
2470 if (classes[0] != ATA_DEV_NONE)
2471 ap->ops->dev_select(ap, 1);
2472 if (classes[1] != ATA_DEV_NONE)
2473 ap->ops->dev_select(ap, 0);
2474
2475 /* re-enable interrupts & set up device control */
2476 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2477 ata_irq_on(ap);
2478
2479 DPRINTK("EXIT\n");
2480 }
2481
2482 /**
2483 * ata_std_probe_reset - standard probe reset method
2484 * @ap: prot to perform probe-reset
2485 * @classes: resulting classes of attached devices
2486 *
2487 * The stock off-the-shelf ->probe_reset method.
2488 *
2489 * LOCKING:
2490 * Kernel thread context (may sleep)
2491 *
2492 * RETURNS:
2493 * 0 on success, -errno otherwise.
2494 */
2495 int ata_std_probe_reset(struct ata_port *ap, unsigned int *classes)
2496 {
2497 ata_reset_fn_t hardreset;
2498
2499 hardreset = NULL;
2500 if (ap->flags & ATA_FLAG_SATA && ap->ops->scr_read)
2501 hardreset = sata_std_hardreset;
2502
2503 return ata_drive_probe_reset(ap, ata_std_probeinit,
2504 ata_std_softreset, hardreset,
2505 ata_std_postreset, classes);
2506 }
2507
2508 static int do_probe_reset(struct ata_port *ap, ata_reset_fn_t reset,
2509 ata_postreset_fn_t postreset,
2510 unsigned int *classes)
2511 {
2512 int i, rc;
2513
2514 for (i = 0; i < ATA_MAX_DEVICES; i++)
2515 classes[i] = ATA_DEV_UNKNOWN;
2516
2517 rc = reset(ap, 0, classes);
2518 if (rc)
2519 return rc;
2520
2521 /* If any class isn't ATA_DEV_UNKNOWN, consider classification
2522 * is complete and convert all ATA_DEV_UNKNOWN to
2523 * ATA_DEV_NONE.
2524 */
2525 for (i = 0; i < ATA_MAX_DEVICES; i++)
2526 if (classes[i] != ATA_DEV_UNKNOWN)
2527 break;
2528
2529 if (i < ATA_MAX_DEVICES)
2530 for (i = 0; i < ATA_MAX_DEVICES; i++)
2531 if (classes[i] == ATA_DEV_UNKNOWN)
2532 classes[i] = ATA_DEV_NONE;
2533
2534 if (postreset)
2535 postreset(ap, classes);
2536
2537 return classes[0] != ATA_DEV_UNKNOWN ? 0 : -ENODEV;
2538 }
2539
2540 /**
2541 * ata_drive_probe_reset - Perform probe reset with given methods
2542 * @ap: port to reset
2543 * @probeinit: probeinit method (can be NULL)
2544 * @softreset: softreset method (can be NULL)
2545 * @hardreset: hardreset method (can be NULL)
2546 * @postreset: postreset method (can be NULL)
2547 * @classes: resulting classes of attached devices
2548 *
2549 * Reset the specified port and classify attached devices using
2550 * given methods. This function prefers softreset but tries all
2551 * possible reset sequences to reset and classify devices. This
2552 * function is intended to be used for constructing ->probe_reset
2553 * callback by low level drivers.
2554 *
2555 * Reset methods should follow the following rules.
2556 *
2557 * - Return 0 on sucess, -errno on failure.
2558 * - If classification is supported, fill classes[] with
2559 * recognized class codes.
2560 * - If classification is not supported, leave classes[] alone.
2561 * - If verbose is non-zero, print error message on failure;
2562 * otherwise, shut up.
2563 *
2564 * LOCKING:
2565 * Kernel thread context (may sleep)
2566 *
2567 * RETURNS:
2568 * 0 on success, -EINVAL if no reset method is avaliable, -ENODEV
2569 * if classification fails, and any error code from reset
2570 * methods.
2571 */
2572 int ata_drive_probe_reset(struct ata_port *ap, ata_probeinit_fn_t probeinit,
2573 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
2574 ata_postreset_fn_t postreset, unsigned int *classes)
2575 {
2576 int rc = -EINVAL;
2577
2578 if (probeinit)
2579 probeinit(ap);
2580
2581 if (softreset) {
2582 rc = do_probe_reset(ap, softreset, postreset, classes);
2583 if (rc == 0)
2584 return 0;
2585 }
2586
2587 if (!hardreset)
2588 return rc;
2589
2590 rc = do_probe_reset(ap, hardreset, postreset, classes);
2591 if (rc == 0 || rc != -ENODEV)
2592 return rc;
2593
2594 if (softreset)
2595 rc = do_probe_reset(ap, softreset, postreset, classes);
2596
2597 return rc;
2598 }
2599
2600 static void ata_pr_blacklisted(const struct ata_port *ap,
2601 const struct ata_device *dev)
2602 {
2603 printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, disabling DMA\n",
2604 ap->id, dev->devno);
2605 }
2606
2607 static const char * const ata_dma_blacklist [] = {
2608 "WDC AC11000H",
2609 "WDC AC22100H",
2610 "WDC AC32500H",
2611 "WDC AC33100H",
2612 "WDC AC31600H",
2613 "WDC AC32100H",
2614 "WDC AC23200L",
2615 "Compaq CRD-8241B",
2616 "CRD-8400B",
2617 "CRD-8480B",
2618 "CRD-8482B",
2619 "CRD-84",
2620 "SanDisk SDP3B",
2621 "SanDisk SDP3B-64",
2622 "SANYO CD-ROM CRD",
2623 "HITACHI CDR-8",
2624 "HITACHI CDR-8335",
2625 "HITACHI CDR-8435",
2626 "Toshiba CD-ROM XM-6202B",
2627 "TOSHIBA CD-ROM XM-1702BC",
2628 "CD-532E-A",
2629 "E-IDE CD-ROM CR-840",
2630 "CD-ROM Drive/F5A",
2631 "WPI CDD-820",
2632 "SAMSUNG CD-ROM SC-148C",
2633 "SAMSUNG CD-ROM SC",
2634 "SanDisk SDP3B-64",
2635 "ATAPI CD-ROM DRIVE 40X MAXIMUM",
2636 "_NEC DV5800A",
2637 };
2638
2639 static int ata_dma_blacklisted(const struct ata_device *dev)
2640 {
2641 unsigned char model_num[40];
2642 char *s;
2643 unsigned int len;
2644 int i;
2645
2646 ata_dev_id_string(dev->id, model_num, ATA_ID_PROD_OFS,
2647 sizeof(model_num));
2648 s = &model_num[0];
2649 len = strnlen(s, sizeof(model_num));
2650
2651 /* ATAPI specifies that empty space is blank-filled; remove blanks */
2652 while ((len > 0) && (s[len - 1] == ' ')) {
2653 len--;
2654 s[len] = 0;
2655 }
2656
2657 for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i++)
2658 if (!strncmp(ata_dma_blacklist[i], s, len))
2659 return 1;
2660
2661 return 0;
2662 }
2663
2664 static unsigned int ata_get_mode_mask(const struct ata_port *ap, int shift)
2665 {
2666 const struct ata_device *master, *slave;
2667 unsigned int mask;
2668
2669 master = &ap->device[0];
2670 slave = &ap->device[1];
2671
2672 assert (ata_dev_present(master) || ata_dev_present(slave));
2673
2674 if (shift == ATA_SHIFT_UDMA) {
2675 mask = ap->udma_mask;
2676 if (ata_dev_present(master)) {
2677 mask &= (master->id[ATA_ID_UDMA_MODES] & 0xff);
2678 if (ata_dma_blacklisted(master)) {
2679 mask = 0;
2680 ata_pr_blacklisted(ap, master);
2681 }
2682 }
2683 if (ata_dev_present(slave)) {
2684 mask &= (slave->id[ATA_ID_UDMA_MODES] & 0xff);
2685 if (ata_dma_blacklisted(slave)) {
2686 mask = 0;
2687 ata_pr_blacklisted(ap, slave);
2688 }
2689 }
2690 }
2691 else if (shift == ATA_SHIFT_MWDMA) {
2692 mask = ap->mwdma_mask;
2693 if (ata_dev_present(master)) {
2694 mask &= (master->id[ATA_ID_MWDMA_MODES] & 0x07);
2695 if (ata_dma_blacklisted(master)) {
2696 mask = 0;
2697 ata_pr_blacklisted(ap, master);
2698 }
2699 }
2700 if (ata_dev_present(slave)) {
2701 mask &= (slave->id[ATA_ID_MWDMA_MODES] & 0x07);
2702 if (ata_dma_blacklisted(slave)) {
2703 mask = 0;
2704 ata_pr_blacklisted(ap, slave);
2705 }
2706 }
2707 }
2708 else if (shift == ATA_SHIFT_PIO) {
2709 mask = ap->pio_mask;
2710 if (ata_dev_present(master)) {
2711 /* spec doesn't return explicit support for
2712 * PIO0-2, so we fake it
2713 */
2714 u16 tmp_mode = master->id[ATA_ID_PIO_MODES] & 0x03;
2715 tmp_mode <<= 3;
2716 tmp_mode |= 0x7;
2717 mask &= tmp_mode;
2718 }
2719 if (ata_dev_present(slave)) {
2720 /* spec doesn't return explicit support for
2721 * PIO0-2, so we fake it
2722 */
2723 u16 tmp_mode = slave->id[ATA_ID_PIO_MODES] & 0x03;
2724 tmp_mode <<= 3;
2725 tmp_mode |= 0x7;
2726 mask &= tmp_mode;
2727 }
2728 }
2729 else {
2730 mask = 0xffffffff; /* shut up compiler warning */
2731 BUG();
2732 }
2733
2734 return mask;
2735 }
2736
2737 /* find greatest bit */
2738 static int fgb(u32 bitmap)
2739 {
2740 unsigned int i;
2741 int x = -1;
2742
2743 for (i = 0; i < 32; i++)
2744 if (bitmap & (1 << i))
2745 x = i;
2746
2747 return x;
2748 }
2749
2750 /**
2751 * ata_choose_xfer_mode - attempt to find best transfer mode
2752 * @ap: Port for which an xfer mode will be selected
2753 * @xfer_mode_out: (output) SET FEATURES - XFER MODE code
2754 * @xfer_shift_out: (output) bit shift that selects this mode
2755 *
2756 * Based on host and device capabilities, determine the
2757 * maximum transfer mode that is amenable to all.
2758 *
2759 * LOCKING:
2760 * PCI/etc. bus probe sem.
2761 *
2762 * RETURNS:
2763 * Zero on success, negative on error.
2764 */
2765
2766 static int ata_choose_xfer_mode(const struct ata_port *ap,
2767 u8 *xfer_mode_out,
2768 unsigned int *xfer_shift_out)
2769 {
2770 unsigned int mask, shift;
2771 int x, i;
2772
2773 for (i = 0; i < ARRAY_SIZE(xfer_mode_classes); i++) {
2774 shift = xfer_mode_classes[i].shift;
2775 mask = ata_get_mode_mask(ap, shift);
2776
2777 x = fgb(mask);
2778 if (x >= 0) {
2779 *xfer_mode_out = xfer_mode_classes[i].base + x;
2780 *xfer_shift_out = shift;
2781 return 0;
2782 }
2783 }
2784
2785 return -1;
2786 }
2787
2788 /**
2789 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
2790 * @ap: Port associated with device @dev
2791 * @dev: Device to which command will be sent
2792 *
2793 * Issue SET FEATURES - XFER MODE command to device @dev
2794 * on port @ap.
2795 *
2796 * LOCKING:
2797 * PCI/etc. bus probe sem.
2798 */
2799
2800 static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev)
2801 {
2802 struct ata_taskfile tf;
2803
2804 /* set up set-features taskfile */
2805 DPRINTK("set features - xfer mode\n");
2806
2807 ata_tf_init(ap, &tf, dev->devno);
2808 tf.command = ATA_CMD_SET_FEATURES;
2809 tf.feature = SETFEATURES_XFER;
2810 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2811 tf.protocol = ATA_PROT_NODATA;
2812 tf.nsect = dev->xfer_mode;
2813
2814 if (ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0)) {
2815 printk(KERN_ERR "ata%u: failed to set xfermode, disabled\n",
2816 ap->id);
2817 ata_port_disable(ap);
2818 }
2819
2820 DPRINTK("EXIT\n");
2821 }
2822
2823 /**
2824 * ata_dev_reread_id - Reread the device identify device info
2825 * @ap: port where the device is
2826 * @dev: device to reread the identify device info
2827 *
2828 * LOCKING:
2829 */
2830
2831 static void ata_dev_reread_id(struct ata_port *ap, struct ata_device *dev)
2832 {
2833 struct ata_taskfile tf;
2834
2835 ata_tf_init(ap, &tf, dev->devno);
2836
2837 if (dev->class == ATA_DEV_ATA) {
2838 tf.command = ATA_CMD_ID_ATA;
2839 DPRINTK("do ATA identify\n");
2840 } else {
2841 tf.command = ATA_CMD_ID_ATAPI;
2842 DPRINTK("do ATAPI identify\n");
2843 }
2844
2845 tf.flags |= ATA_TFLAG_DEVICE;
2846 tf.protocol = ATA_PROT_PIO;
2847
2848 if (ata_exec_internal(ap, dev, &tf, DMA_FROM_DEVICE,
2849 dev->id, sizeof(dev->id)))
2850 goto err_out;
2851
2852 swap_buf_le16(dev->id, ATA_ID_WORDS);
2853
2854 ata_dump_id(dev);
2855
2856 DPRINTK("EXIT\n");
2857
2858 return;
2859 err_out:
2860 printk(KERN_ERR "ata%u: failed to reread ID, disabled\n", ap->id);
2861 ata_port_disable(ap);
2862 }
2863
2864 /**
2865 * ata_dev_init_params - Issue INIT DEV PARAMS command
2866 * @ap: Port associated with device @dev
2867 * @dev: Device to which command will be sent
2868 *
2869 * LOCKING:
2870 */
2871
2872 static void ata_dev_init_params(struct ata_port *ap, struct ata_device *dev)
2873 {
2874 struct ata_taskfile tf;
2875 u16 sectors = dev->id[6];
2876 u16 heads = dev->id[3];
2877
2878 /* Number of sectors per track 1-255. Number of heads 1-16 */
2879 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
2880 return;
2881
2882 /* set up init dev params taskfile */
2883 DPRINTK("init dev params \n");
2884
2885 ata_tf_init(ap, &tf, dev->devno);
2886 tf.command = ATA_CMD_INIT_DEV_PARAMS;
2887 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2888 tf.protocol = ATA_PROT_NODATA;
2889 tf.nsect = sectors;
2890 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
2891
2892 if (ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0)) {
2893 printk(KERN_ERR "ata%u: failed to init parameters, disabled\n",
2894 ap->id);
2895 ata_port_disable(ap);
2896 }
2897
2898 DPRINTK("EXIT\n");
2899 }
2900
2901 /**
2902 * ata_sg_clean - Unmap DMA memory associated with command
2903 * @qc: Command containing DMA memory to be released
2904 *
2905 * Unmap all mapped DMA memory associated with this command.
2906 *
2907 * LOCKING:
2908 * spin_lock_irqsave(host_set lock)
2909 */
2910
2911 static void ata_sg_clean(struct ata_queued_cmd *qc)
2912 {
2913 struct ata_port *ap = qc->ap;
2914 struct scatterlist *sg = qc->__sg;
2915 int dir = qc->dma_dir;
2916 void *pad_buf = NULL;
2917
2918 assert(qc->flags & ATA_QCFLAG_DMAMAP);
2919 assert(sg != NULL);
2920
2921 if (qc->flags & ATA_QCFLAG_SINGLE)
2922 assert(qc->n_elem == 1);
2923
2924 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
2925
2926 /* if we padded the buffer out to 32-bit bound, and data
2927 * xfer direction is from-device, we must copy from the
2928 * pad buffer back into the supplied buffer
2929 */
2930 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
2931 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
2932
2933 if (qc->flags & ATA_QCFLAG_SG) {
2934 if (qc->n_elem)
2935 dma_unmap_sg(ap->host_set->dev, sg, qc->n_elem, dir);
2936 /* restore last sg */
2937 sg[qc->orig_n_elem - 1].length += qc->pad_len;
2938 if (pad_buf) {
2939 struct scatterlist *psg = &qc->pad_sgent;
2940 void *addr = kmap_atomic(psg->page, KM_IRQ0);
2941 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
2942 kunmap_atomic(addr, KM_IRQ0);
2943 }
2944 } else {
2945 if (sg_dma_len(&sg[0]) > 0)
2946 dma_unmap_single(ap->host_set->dev,
2947 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
2948 dir);
2949 /* restore sg */
2950 sg->length += qc->pad_len;
2951 if (pad_buf)
2952 memcpy(qc->buf_virt + sg->length - qc->pad_len,
2953 pad_buf, qc->pad_len);
2954 }
2955
2956 qc->flags &= ~ATA_QCFLAG_DMAMAP;
2957 qc->__sg = NULL;
2958 }
2959
2960 /**
2961 * ata_fill_sg - Fill PCI IDE PRD table
2962 * @qc: Metadata associated with taskfile to be transferred
2963 *
2964 * Fill PCI IDE PRD (scatter-gather) table with segments
2965 * associated with the current disk command.
2966 *
2967 * LOCKING:
2968 * spin_lock_irqsave(host_set lock)
2969 *
2970 */
2971 static void ata_fill_sg(struct ata_queued_cmd *qc)
2972 {
2973 struct ata_port *ap = qc->ap;
2974 struct scatterlist *sg;
2975 unsigned int idx;
2976
2977 assert(qc->__sg != NULL);
2978 assert(qc->n_elem > 0);
2979
2980 idx = 0;
2981 ata_for_each_sg(sg, qc) {
2982 u32 addr, offset;
2983 u32 sg_len, len;
2984
2985 /* determine if physical DMA addr spans 64K boundary.
2986 * Note h/w doesn't support 64-bit, so we unconditionally
2987 * truncate dma_addr_t to u32.
2988 */
2989 addr = (u32) sg_dma_address(sg);
2990 sg_len = sg_dma_len(sg);
2991
2992 while (sg_len) {
2993 offset = addr & 0xffff;
2994 len = sg_len;
2995 if ((offset + sg_len) > 0x10000)
2996 len = 0x10000 - offset;
2997
2998 ap->prd[idx].addr = cpu_to_le32(addr);
2999 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
3000 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
3001
3002 idx++;
3003 sg_len -= len;
3004 addr += len;
3005 }
3006 }
3007
3008 if (idx)
3009 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
3010 }
3011 /**
3012 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
3013 * @qc: Metadata associated with taskfile to check
3014 *
3015 * Allow low-level driver to filter ATA PACKET commands, returning
3016 * a status indicating whether or not it is OK to use DMA for the
3017 * supplied PACKET command.
3018 *
3019 * LOCKING:
3020 * spin_lock_irqsave(host_set lock)
3021 *
3022 * RETURNS: 0 when ATAPI DMA can be used
3023 * nonzero otherwise
3024 */
3025 int ata_check_atapi_dma(struct ata_queued_cmd *qc)
3026 {
3027 struct ata_port *ap = qc->ap;
3028 int rc = 0; /* Assume ATAPI DMA is OK by default */
3029
3030 if (ap->ops->check_atapi_dma)
3031 rc = ap->ops->check_atapi_dma(qc);
3032
3033 return rc;
3034 }
3035 /**
3036 * ata_qc_prep - Prepare taskfile for submission
3037 * @qc: Metadata associated with taskfile to be prepared
3038 *
3039 * Prepare ATA taskfile for submission.
3040 *
3041 * LOCKING:
3042 * spin_lock_irqsave(host_set lock)
3043 */
3044 void ata_qc_prep(struct ata_queued_cmd *qc)
3045 {
3046 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
3047 return;
3048
3049 ata_fill_sg(qc);
3050 }
3051
3052 /**
3053 * ata_sg_init_one - Associate command with memory buffer
3054 * @qc: Command to be associated
3055 * @buf: Memory buffer
3056 * @buflen: Length of memory buffer, in bytes.
3057 *
3058 * Initialize the data-related elements of queued_cmd @qc
3059 * to point to a single memory buffer, @buf of byte length @buflen.
3060 *
3061 * LOCKING:
3062 * spin_lock_irqsave(host_set lock)
3063 */
3064
3065 void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
3066 {
3067 struct scatterlist *sg;
3068
3069 qc->flags |= ATA_QCFLAG_SINGLE;
3070
3071 memset(&qc->sgent, 0, sizeof(qc->sgent));
3072 qc->__sg = &qc->sgent;
3073 qc->n_elem = 1;
3074 qc->orig_n_elem = 1;
3075 qc->buf_virt = buf;
3076
3077 sg = qc->__sg;
3078 sg_init_one(sg, buf, buflen);
3079 }
3080
3081 /**
3082 * ata_sg_init - Associate command with scatter-gather table.
3083 * @qc: Command to be associated
3084 * @sg: Scatter-gather table.
3085 * @n_elem: Number of elements in s/g table.
3086 *
3087 * Initialize the data-related elements of queued_cmd @qc
3088 * to point to a scatter-gather table @sg, containing @n_elem
3089 * elements.
3090 *
3091 * LOCKING:
3092 * spin_lock_irqsave(host_set lock)
3093 */
3094
3095 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
3096 unsigned int n_elem)
3097 {
3098 qc->flags |= ATA_QCFLAG_SG;
3099 qc->__sg = sg;
3100 qc->n_elem = n_elem;
3101 qc->orig_n_elem = n_elem;
3102 }
3103
3104 /**
3105 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
3106 * @qc: Command with memory buffer to be mapped.
3107 *
3108 * DMA-map the memory buffer associated with queued_cmd @qc.
3109 *
3110 * LOCKING:
3111 * spin_lock_irqsave(host_set lock)
3112 *
3113 * RETURNS:
3114 * Zero on success, negative on error.
3115 */
3116
3117 static int ata_sg_setup_one(struct ata_queued_cmd *qc)
3118 {
3119 struct ata_port *ap = qc->ap;
3120 int dir = qc->dma_dir;
3121 struct scatterlist *sg = qc->__sg;
3122 dma_addr_t dma_address;
3123
3124 /* we must lengthen transfers to end on a 32-bit boundary */
3125 qc->pad_len = sg->length & 3;
3126 if (qc->pad_len) {
3127 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3128 struct scatterlist *psg = &qc->pad_sgent;
3129
3130 assert(qc->dev->class == ATA_DEV_ATAPI);
3131
3132 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3133
3134 if (qc->tf.flags & ATA_TFLAG_WRITE)
3135 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
3136 qc->pad_len);
3137
3138 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3139 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3140 /* trim sg */
3141 sg->length -= qc->pad_len;
3142
3143 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
3144 sg->length, qc->pad_len);
3145 }
3146
3147 if (!sg->length) {
3148 sg_dma_address(sg) = 0;
3149 goto skip_map;
3150 }
3151
3152 dma_address = dma_map_single(ap->host_set->dev, qc->buf_virt,
3153 sg->length, dir);
3154 if (dma_mapping_error(dma_address)) {
3155 /* restore sg */
3156 sg->length += qc->pad_len;
3157 return -1;
3158 }
3159
3160 sg_dma_address(sg) = dma_address;
3161 skip_map:
3162 sg_dma_len(sg) = sg->length;
3163
3164 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
3165 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3166
3167 return 0;
3168 }
3169
3170 /**
3171 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
3172 * @qc: Command with scatter-gather table to be mapped.
3173 *
3174 * DMA-map the scatter-gather table associated with queued_cmd @qc.
3175 *
3176 * LOCKING:
3177 * spin_lock_irqsave(host_set lock)
3178 *
3179 * RETURNS:
3180 * Zero on success, negative on error.
3181 *
3182 */
3183
3184 static int ata_sg_setup(struct ata_queued_cmd *qc)
3185 {
3186 struct ata_port *ap = qc->ap;
3187 struct scatterlist *sg = qc->__sg;
3188 struct scatterlist *lsg = &sg[qc->n_elem - 1];
3189 int n_elem, pre_n_elem, dir, trim_sg = 0;
3190
3191 VPRINTK("ENTER, ata%u\n", ap->id);
3192 assert(qc->flags & ATA_QCFLAG_SG);
3193
3194 /* we must lengthen transfers to end on a 32-bit boundary */
3195 qc->pad_len = lsg->length & 3;
3196 if (qc->pad_len) {
3197 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3198 struct scatterlist *psg = &qc->pad_sgent;
3199 unsigned int offset;
3200
3201 assert(qc->dev->class == ATA_DEV_ATAPI);
3202
3203 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3204
3205 /*
3206 * psg->page/offset are used to copy to-be-written
3207 * data in this function or read data in ata_sg_clean.
3208 */
3209 offset = lsg->offset + lsg->length - qc->pad_len;
3210 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
3211 psg->offset = offset_in_page(offset);
3212
3213 if (qc->tf.flags & ATA_TFLAG_WRITE) {
3214 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3215 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
3216 kunmap_atomic(addr, KM_IRQ0);
3217 }
3218
3219 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3220 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3221 /* trim last sg */
3222 lsg->length -= qc->pad_len;
3223 if (lsg->length == 0)
3224 trim_sg = 1;
3225
3226 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
3227 qc->n_elem - 1, lsg->length, qc->pad_len);
3228 }
3229
3230 pre_n_elem = qc->n_elem;
3231 if (trim_sg && pre_n_elem)
3232 pre_n_elem--;
3233
3234 if (!pre_n_elem) {
3235 n_elem = 0;
3236 goto skip_map;
3237 }
3238
3239 dir = qc->dma_dir;
3240 n_elem = dma_map_sg(ap->host_set->dev, sg, pre_n_elem, dir);
3241 if (n_elem < 1) {
3242 /* restore last sg */
3243 lsg->length += qc->pad_len;
3244 return -1;
3245 }
3246
3247 DPRINTK("%d sg elements mapped\n", n_elem);
3248
3249 skip_map:
3250 qc->n_elem = n_elem;
3251
3252 return 0;
3253 }
3254
3255 /**
3256 * ata_poll_qc_complete - turn irq back on and finish qc
3257 * @qc: Command to complete
3258 * @err_mask: ATA status register content
3259 *
3260 * LOCKING:
3261 * None. (grabs host lock)
3262 */
3263
3264 void ata_poll_qc_complete(struct ata_queued_cmd *qc)
3265 {
3266 struct ata_port *ap = qc->ap;
3267 unsigned long flags;
3268
3269 spin_lock_irqsave(&ap->host_set->lock, flags);
3270 ata_irq_on(ap);
3271 ata_qc_complete(qc);
3272 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3273 }
3274
3275 /**
3276 * ata_pio_poll - poll using PIO, depending on current state
3277 * @ap: the target ata_port
3278 *
3279 * LOCKING:
3280 * None. (executing in kernel thread context)
3281 *
3282 * RETURNS:
3283 * timeout value to use
3284 */
3285
3286 static unsigned long ata_pio_poll(struct ata_port *ap)
3287 {
3288 struct ata_queued_cmd *qc;
3289 u8 status;
3290 unsigned int poll_state = HSM_ST_UNKNOWN;
3291 unsigned int reg_state = HSM_ST_UNKNOWN;
3292
3293 qc = ata_qc_from_tag(ap, ap->active_tag);
3294 assert(qc != NULL);
3295
3296 switch (ap->hsm_task_state) {
3297 case HSM_ST:
3298 case HSM_ST_POLL:
3299 poll_state = HSM_ST_POLL;
3300 reg_state = HSM_ST;
3301 break;
3302 case HSM_ST_LAST:
3303 case HSM_ST_LAST_POLL:
3304 poll_state = HSM_ST_LAST_POLL;
3305 reg_state = HSM_ST_LAST;
3306 break;
3307 default:
3308 BUG();
3309 break;
3310 }
3311
3312 status = ata_chk_status(ap);
3313 if (status & ATA_BUSY) {
3314 if (time_after(jiffies, ap->pio_task_timeout)) {
3315 qc->err_mask |= AC_ERR_TIMEOUT;
3316 ap->hsm_task_state = HSM_ST_TMOUT;
3317 return 0;
3318 }
3319 ap->hsm_task_state = poll_state;
3320 return ATA_SHORT_PAUSE;
3321 }
3322
3323 ap->hsm_task_state = reg_state;
3324 return 0;
3325 }
3326
3327 /**
3328 * ata_pio_complete - check if drive is busy or idle
3329 * @ap: the target ata_port
3330 *
3331 * LOCKING:
3332 * None. (executing in kernel thread context)
3333 *
3334 * RETURNS:
3335 * Zero if qc completed.
3336 * Non-zero if has next.
3337 */
3338
3339 static int ata_pio_complete (struct ata_port *ap)
3340 {
3341 struct ata_queued_cmd *qc;
3342 u8 drv_stat;
3343
3344 /*
3345 * This is purely heuristic. This is a fast path. Sometimes when
3346 * we enter, BSY will be cleared in a chk-status or two. If not,
3347 * the drive is probably seeking or something. Snooze for a couple
3348 * msecs, then chk-status again. If still busy, fall back to
3349 * HSM_ST_LAST_POLL state.
3350 */
3351 drv_stat = ata_busy_wait(ap, ATA_BUSY, 10);
3352 if (drv_stat & ATA_BUSY) {
3353 msleep(2);
3354 drv_stat = ata_busy_wait(ap, ATA_BUSY, 10);
3355 if (drv_stat & ATA_BUSY) {
3356 ap->hsm_task_state = HSM_ST_LAST_POLL;
3357 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
3358 return 1;
3359 }
3360 }
3361
3362 qc = ata_qc_from_tag(ap, ap->active_tag);
3363 assert(qc != NULL);
3364
3365 drv_stat = ata_wait_idle(ap);
3366 if (!ata_ok(drv_stat)) {
3367 qc->err_mask |= __ac_err_mask(drv_stat);
3368 ap->hsm_task_state = HSM_ST_ERR;
3369 return 1;
3370 }
3371
3372 ap->hsm_task_state = HSM_ST_IDLE;
3373
3374 assert(qc->err_mask == 0);
3375 ata_poll_qc_complete(qc);
3376
3377 /* another command may start at this point */
3378
3379 return 0;
3380 }
3381
3382
3383 /**
3384 * swap_buf_le16 - swap halves of 16-bit words in place
3385 * @buf: Buffer to swap
3386 * @buf_words: Number of 16-bit words in buffer.
3387 *
3388 * Swap halves of 16-bit words if needed to convert from
3389 * little-endian byte order to native cpu byte order, or
3390 * vice-versa.
3391 *
3392 * LOCKING:
3393 * Inherited from caller.
3394 */
3395 void swap_buf_le16(u16 *buf, unsigned int buf_words)
3396 {
3397 #ifdef __BIG_ENDIAN
3398 unsigned int i;
3399
3400 for (i = 0; i < buf_words; i++)
3401 buf[i] = le16_to_cpu(buf[i]);
3402 #endif /* __BIG_ENDIAN */
3403 }
3404
3405 /**
3406 * ata_mmio_data_xfer - Transfer data by MMIO
3407 * @ap: port to read/write
3408 * @buf: data buffer
3409 * @buflen: buffer length
3410 * @write_data: read/write
3411 *
3412 * Transfer data from/to the device data register by MMIO.
3413 *
3414 * LOCKING:
3415 * Inherited from caller.
3416 */
3417
3418 static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
3419 unsigned int buflen, int write_data)
3420 {
3421 unsigned int i;
3422 unsigned int words = buflen >> 1;
3423 u16 *buf16 = (u16 *) buf;
3424 void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr;
3425
3426 /* Transfer multiple of 2 bytes */
3427 if (write_data) {
3428 for (i = 0; i < words; i++)
3429 writew(le16_to_cpu(buf16[i]), mmio);
3430 } else {
3431 for (i = 0; i < words; i++)
3432 buf16[i] = cpu_to_le16(readw(mmio));
3433 }
3434
3435 /* Transfer trailing 1 byte, if any. */
3436 if (unlikely(buflen & 0x01)) {
3437 u16 align_buf[1] = { 0 };
3438 unsigned char *trailing_buf = buf + buflen - 1;
3439
3440 if (write_data) {
3441 memcpy(align_buf, trailing_buf, 1);
3442 writew(le16_to_cpu(align_buf[0]), mmio);
3443 } else {
3444 align_buf[0] = cpu_to_le16(readw(mmio));
3445 memcpy(trailing_buf, align_buf, 1);
3446 }
3447 }
3448 }
3449
3450 /**
3451 * ata_pio_data_xfer - Transfer data by PIO
3452 * @ap: port to read/write
3453 * @buf: data buffer
3454 * @buflen: buffer length
3455 * @write_data: read/write
3456 *
3457 * Transfer data from/to the device data register by PIO.
3458 *
3459 * LOCKING:
3460 * Inherited from caller.
3461 */
3462
3463 static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf,
3464 unsigned int buflen, int write_data)
3465 {
3466 unsigned int words = buflen >> 1;
3467
3468 /* Transfer multiple of 2 bytes */
3469 if (write_data)
3470 outsw(ap->ioaddr.data_addr, buf, words);
3471 else
3472 insw(ap->ioaddr.data_addr, buf, words);
3473
3474 /* Transfer trailing 1 byte, if any. */
3475 if (unlikely(buflen & 0x01)) {
3476 u16 align_buf[1] = { 0 };
3477 unsigned char *trailing_buf = buf + buflen - 1;
3478
3479 if (write_data) {
3480 memcpy(align_buf, trailing_buf, 1);
3481 outw(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
3482 } else {
3483 align_buf[0] = cpu_to_le16(inw(ap->ioaddr.data_addr));
3484 memcpy(trailing_buf, align_buf, 1);
3485 }
3486 }
3487 }
3488
3489 /**
3490 * ata_data_xfer - Transfer data from/to the data register.
3491 * @ap: port to read/write
3492 * @buf: data buffer
3493 * @buflen: buffer length
3494 * @do_write: read/write
3495 *
3496 * Transfer data from/to the device data register.
3497 *
3498 * LOCKING:
3499 * Inherited from caller.
3500 */
3501
3502 static void ata_data_xfer(struct ata_port *ap, unsigned char *buf,
3503 unsigned int buflen, int do_write)
3504 {
3505 /* Make the crap hardware pay the costs not the good stuff */
3506 if (unlikely(ap->flags & ATA_FLAG_IRQ_MASK)) {
3507 unsigned long flags;
3508 local_irq_save(flags);
3509 if (ap->flags & ATA_FLAG_MMIO)
3510 ata_mmio_data_xfer(ap, buf, buflen, do_write);
3511 else
3512 ata_pio_data_xfer(ap, buf, buflen, do_write);
3513 local_irq_restore(flags);
3514 } else {
3515 if (ap->flags & ATA_FLAG_MMIO)
3516 ata_mmio_data_xfer(ap, buf, buflen, do_write);
3517 else
3518 ata_pio_data_xfer(ap, buf, buflen, do_write);
3519 }
3520 }
3521
3522 /**
3523 * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
3524 * @qc: Command on going
3525 *
3526 * Transfer ATA_SECT_SIZE of data from/to the ATA device.
3527 *
3528 * LOCKING:
3529 * Inherited from caller.
3530 */
3531
3532 static void ata_pio_sector(struct ata_queued_cmd *qc)
3533 {
3534 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3535 struct scatterlist *sg = qc->__sg;
3536 struct ata_port *ap = qc->ap;
3537 struct page *page;
3538 unsigned int offset;
3539 unsigned char *buf;
3540
3541 if (qc->cursect == (qc->nsect - 1))
3542 ap->hsm_task_state = HSM_ST_LAST;
3543
3544 page = sg[qc->cursg].page;
3545 offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE;
3546
3547 /* get the current page and offset */
3548 page = nth_page(page, (offset >> PAGE_SHIFT));
3549 offset %= PAGE_SIZE;
3550
3551 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3552
3553 if (PageHighMem(page)) {
3554 unsigned long flags;
3555
3556 local_irq_save(flags);
3557 buf = kmap_atomic(page, KM_IRQ0);
3558
3559 /* do the actual data transfer */
3560 ata_data_xfer(ap, buf + offset, ATA_SECT_SIZE, do_write);
3561
3562 kunmap_atomic(buf, KM_IRQ0);
3563 local_irq_restore(flags);
3564 } else {
3565 buf = page_address(page);
3566 ata_data_xfer(ap, buf + offset, ATA_SECT_SIZE, do_write);
3567 }
3568
3569 qc->cursect++;
3570 qc->cursg_ofs++;
3571
3572 if ((qc->cursg_ofs * ATA_SECT_SIZE) == (&sg[qc->cursg])->length) {
3573 qc->cursg++;
3574 qc->cursg_ofs = 0;
3575 }
3576 }
3577
3578 /**
3579 * ata_pio_sectors - Transfer one or many 512-byte sectors.
3580 * @qc: Command on going
3581 *
3582 * Transfer one or many ATA_SECT_SIZE of data from/to the
3583 * ATA device for the DRQ request.
3584 *
3585 * LOCKING:
3586 * Inherited from caller.
3587 */
3588
3589 static void ata_pio_sectors(struct ata_queued_cmd *qc)
3590 {
3591 if (is_multi_taskfile(&qc->tf)) {
3592 /* READ/WRITE MULTIPLE */
3593 unsigned int nsect;
3594
3595 assert(qc->dev->multi_count);
3596
3597 nsect = min(qc->nsect - qc->cursect, qc->dev->multi_count);
3598 while (nsect--)
3599 ata_pio_sector(qc);
3600 } else
3601 ata_pio_sector(qc);
3602 }
3603
3604 /**
3605 * atapi_send_cdb - Write CDB bytes to hardware
3606 * @ap: Port to which ATAPI device is attached.
3607 * @qc: Taskfile currently active
3608 *
3609 * When device has indicated its readiness to accept
3610 * a CDB, this function is called. Send the CDB.
3611 *
3612 * LOCKING:
3613 * caller.
3614 */
3615
3616 static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
3617 {
3618 /* send SCSI cdb */
3619 DPRINTK("send cdb\n");
3620 assert(ap->cdb_len >= 12);
3621
3622 ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1);
3623 ata_altstatus(ap); /* flush */
3624
3625 switch (qc->tf.protocol) {
3626 case ATA_PROT_ATAPI:
3627 ap->hsm_task_state = HSM_ST;
3628 break;
3629 case ATA_PROT_ATAPI_NODATA:
3630 ap->hsm_task_state = HSM_ST_LAST;
3631 break;
3632 case ATA_PROT_ATAPI_DMA:
3633 ap->hsm_task_state = HSM_ST_LAST;
3634 /* initiate bmdma */
3635 ap->ops->bmdma_start(qc);
3636 break;
3637 }
3638 }
3639
3640 /**
3641 * ata_pio_first_block - Write first data block to hardware
3642 * @ap: Port to which ATA/ATAPI device is attached.
3643 *
3644 * When device has indicated its readiness to accept
3645 * the data, this function sends out the CDB or
3646 * the first data block by PIO.
3647 * After this,
3648 * - If polling, ata_pio_task() handles the rest.
3649 * - Otherwise, interrupt handler takes over.
3650 *
3651 * LOCKING:
3652 * Kernel thread context (may sleep)
3653 *
3654 * RETURNS:
3655 * Zero if irq handler takes over
3656 * Non-zero if has next (polling).
3657 */
3658
3659 static int ata_pio_first_block(struct ata_port *ap)
3660 {
3661 struct ata_queued_cmd *qc;
3662 u8 status;
3663 unsigned long flags;
3664 int has_next;
3665
3666 qc = ata_qc_from_tag(ap, ap->active_tag);
3667 assert(qc != NULL);
3668 assert(qc->flags & ATA_QCFLAG_ACTIVE);
3669
3670 /* if polling, we will stay in the work queue after sending the data.
3671 * otherwise, interrupt handler takes over after sending the data.
3672 */
3673 has_next = (qc->tf.flags & ATA_TFLAG_POLLING);
3674
3675 /* sleep-wait for BSY to clear */
3676 DPRINTK("busy wait\n");
3677 if (ata_busy_sleep(ap, ATA_TMOUT_DATAOUT_QUICK, ATA_TMOUT_DATAOUT)) {
3678 qc->err_mask |= AC_ERR_TIMEOUT;
3679 ap->hsm_task_state = HSM_ST_TMOUT;
3680 goto err_out;
3681 }
3682
3683 /* make sure DRQ is set */
3684 status = ata_chk_status(ap);
3685 if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ) {
3686 /* device status error */
3687 qc->err_mask |= AC_ERR_HSM;
3688 ap->hsm_task_state = HSM_ST_ERR;
3689 goto err_out;
3690 }
3691
3692 /* Send the CDB (atapi) or the first data block (ata pio out).
3693 * During the state transition, interrupt handler shouldn't
3694 * be invoked before the data transfer is complete and
3695 * hsm_task_state is changed. Hence, the following locking.
3696 */
3697 spin_lock_irqsave(&ap->host_set->lock, flags);
3698
3699 if (qc->tf.protocol == ATA_PROT_PIO) {
3700 /* PIO data out protocol.
3701 * send first data block.
3702 */
3703
3704 /* ata_pio_sectors() might change the state to HSM_ST_LAST.
3705 * so, the state is changed here before ata_pio_sectors().
3706 */
3707 ap->hsm_task_state = HSM_ST;
3708 ata_pio_sectors(qc);
3709 ata_altstatus(ap); /* flush */
3710 } else
3711 /* send CDB */
3712 atapi_send_cdb(ap, qc);
3713
3714 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3715
3716 /* if polling, ata_pio_task() handles the rest.
3717 * otherwise, interrupt handler takes over from here.
3718 */
3719 return has_next;
3720
3721 err_out:
3722 return 1; /* has next */
3723 }
3724
3725 /**
3726 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
3727 * @qc: Command on going
3728 * @bytes: number of bytes
3729 *
3730 * Transfer Transfer data from/to the ATAPI device.
3731 *
3732 * LOCKING:
3733 * Inherited from caller.
3734 *
3735 */
3736
3737 static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
3738 {
3739 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3740 struct scatterlist *sg = qc->__sg;
3741 struct ata_port *ap = qc->ap;
3742 struct page *page;
3743 unsigned char *buf;
3744 unsigned int offset, count;
3745
3746 if (qc->curbytes + bytes >= qc->nbytes)
3747 ap->hsm_task_state = HSM_ST_LAST;
3748
3749 next_sg:
3750 if (unlikely(qc->cursg >= qc->n_elem)) {
3751 /*
3752 * The end of qc->sg is reached and the device expects
3753 * more data to transfer. In order not to overrun qc->sg
3754 * and fulfill length specified in the byte count register,
3755 * - for read case, discard trailing data from the device
3756 * - for write case, padding zero data to the device
3757 */
3758 u16 pad_buf[1] = { 0 };
3759 unsigned int words = bytes >> 1;
3760 unsigned int i;
3761
3762 if (words) /* warning if bytes > 1 */
3763 printk(KERN_WARNING "ata%u: %u bytes trailing data\n",
3764 ap->id, bytes);
3765
3766 for (i = 0; i < words; i++)
3767 ata_data_xfer(ap, (unsigned char*)pad_buf, 2, do_write);
3768
3769 ap->hsm_task_state = HSM_ST_LAST;
3770 return;
3771 }
3772
3773 sg = &qc->__sg[qc->cursg];
3774
3775 page = sg->page;
3776 offset = sg->offset + qc->cursg_ofs;
3777
3778 /* get the current page and offset */
3779 page = nth_page(page, (offset >> PAGE_SHIFT));
3780 offset %= PAGE_SIZE;
3781
3782 /* don't overrun current sg */
3783 count = min(sg->length - qc->cursg_ofs, bytes);
3784
3785 /* don't cross page boundaries */
3786 count = min(count, (unsigned int)PAGE_SIZE - offset);
3787
3788 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3789
3790 if (PageHighMem(page)) {
3791 unsigned long flags;
3792
3793 local_irq_save(flags);
3794 buf = kmap_atomic(page, KM_IRQ0);
3795
3796 /* do the actual data transfer */
3797 ata_data_xfer(ap, buf + offset, count, do_write);
3798
3799 kunmap_atomic(buf, KM_IRQ0);
3800 local_irq_restore(flags);
3801 } else {
3802 buf = page_address(page);
3803 ata_data_xfer(ap, buf + offset, count, do_write);
3804 }
3805
3806 bytes -= count;
3807 qc->curbytes += count;
3808 qc->cursg_ofs += count;
3809
3810 if (qc->cursg_ofs == sg->length) {
3811 qc->cursg++;
3812 qc->cursg_ofs = 0;
3813 }
3814
3815 if (bytes)
3816 goto next_sg;
3817 }
3818
3819 /**
3820 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
3821 * @qc: Command on going
3822 *
3823 * Transfer Transfer data from/to the ATAPI device.
3824 *
3825 * LOCKING:
3826 * Inherited from caller.
3827 */
3828
3829 static void atapi_pio_bytes(struct ata_queued_cmd *qc)
3830 {
3831 struct ata_port *ap = qc->ap;
3832 struct ata_device *dev = qc->dev;
3833 unsigned int ireason, bc_lo, bc_hi, bytes;
3834 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
3835
3836 ap->ops->tf_read(ap, &qc->tf);
3837 ireason = qc->tf.nsect;
3838 bc_lo = qc->tf.lbam;
3839 bc_hi = qc->tf.lbah;
3840 bytes = (bc_hi << 8) | bc_lo;
3841
3842 /* shall be cleared to zero, indicating xfer of data */
3843 if (ireason & (1 << 0))
3844 goto err_out;
3845
3846 /* make sure transfer direction matches expected */
3847 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
3848 if (do_write != i_write)
3849 goto err_out;
3850
3851 VPRINTK("ata%u: xfering %d bytes\n", ap->id, bytes);
3852
3853 __atapi_pio_bytes(qc, bytes);
3854
3855 return;
3856
3857 err_out:
3858 printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n",
3859 ap->id, dev->devno);
3860 qc->err_mask |= AC_ERR_HSM;
3861 ap->hsm_task_state = HSM_ST_ERR;
3862 }
3863
3864 /**
3865 * ata_pio_block - start PIO on a block
3866 * @ap: the target ata_port
3867 *
3868 * LOCKING:
3869 * None. (executing in kernel thread context)
3870 */
3871
3872 static void ata_pio_block(struct ata_port *ap)
3873 {
3874 struct ata_queued_cmd *qc;
3875 u8 status;
3876
3877 /*
3878 * This is purely heuristic. This is a fast path.
3879 * Sometimes when we enter, BSY will be cleared in
3880 * a chk-status or two. If not, the drive is probably seeking
3881 * or something. Snooze for a couple msecs, then
3882 * chk-status again. If still busy, fall back to
3883 * HSM_ST_POLL state.
3884 */
3885 status = ata_busy_wait(ap, ATA_BUSY, 5);
3886 if (status & ATA_BUSY) {
3887 msleep(2);
3888 status = ata_busy_wait(ap, ATA_BUSY, 10);
3889 if (status & ATA_BUSY) {
3890 ap->hsm_task_state = HSM_ST_POLL;
3891 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
3892 return;
3893 }
3894 }
3895
3896 qc = ata_qc_from_tag(ap, ap->active_tag);
3897 assert(qc != NULL);
3898
3899 /* check error */
3900 if (status & (ATA_ERR | ATA_DF)) {
3901 qc->err_mask |= AC_ERR_DEV;
3902 ap->hsm_task_state = HSM_ST_ERR;
3903 return;
3904 }
3905
3906 /* transfer data if any */
3907 if (is_atapi_taskfile(&qc->tf)) {
3908 /* DRQ=0 means no more data to transfer */
3909 if ((status & ATA_DRQ) == 0) {
3910 ap->hsm_task_state = HSM_ST_LAST;
3911 return;
3912 }
3913
3914 atapi_pio_bytes(qc);
3915 } else {
3916 /* handle BSY=0, DRQ=0 as error */
3917 if ((status & ATA_DRQ) == 0) {
3918 qc->err_mask |= AC_ERR_HSM;
3919 ap->hsm_task_state = HSM_ST_ERR;
3920 return;
3921 }
3922
3923 ata_pio_sectors(qc);
3924 }
3925
3926 ata_altstatus(ap); /* flush */
3927 }
3928
3929 static void ata_pio_error(struct ata_port *ap)
3930 {
3931 struct ata_queued_cmd *qc;
3932
3933 qc = ata_qc_from_tag(ap, ap->active_tag);
3934 assert(qc != NULL);
3935
3936 if (qc->tf.command != ATA_CMD_PACKET)
3937 printk(KERN_WARNING "ata%u: PIO error\n", ap->id);
3938
3939 /* make sure qc->err_mask is available to
3940 * know what's wrong and recover
3941 */
3942 assert(qc->err_mask);
3943
3944 ap->hsm_task_state = HSM_ST_IDLE;
3945
3946 ata_poll_qc_complete(qc);
3947 }
3948
3949 static void ata_pio_task(void *_data)
3950 {
3951 struct ata_port *ap = _data;
3952 unsigned long timeout;
3953 int has_next;
3954
3955 fsm_start:
3956 timeout = 0;
3957 has_next = 1;
3958
3959 switch (ap->hsm_task_state) {
3960 case HSM_ST_FIRST:
3961 has_next = ata_pio_first_block(ap);
3962 break;
3963
3964 case HSM_ST:
3965 ata_pio_block(ap);
3966 break;
3967
3968 case HSM_ST_LAST:
3969 has_next = ata_pio_complete(ap);
3970 break;
3971
3972 case HSM_ST_POLL:
3973 case HSM_ST_LAST_POLL:
3974 timeout = ata_pio_poll(ap);
3975 break;
3976
3977 case HSM_ST_TMOUT:
3978 case HSM_ST_ERR:
3979 ata_pio_error(ap);
3980 return;
3981
3982 default:
3983 BUG();
3984 return;
3985 }
3986
3987 if (timeout)
3988 ata_queue_delayed_pio_task(ap, timeout);
3989 else if (has_next)
3990 goto fsm_start;
3991 }
3992
3993 /**
3994 * ata_qc_timeout - Handle timeout of queued command
3995 * @qc: Command that timed out
3996 *
3997 * Some part of the kernel (currently, only the SCSI layer)
3998 * has noticed that the active command on port @ap has not
3999 * completed after a specified length of time. Handle this
4000 * condition by disabling DMA (if necessary) and completing
4001 * transactions, with error if necessary.
4002 *
4003 * This also handles the case of the "lost interrupt", where
4004 * for some reason (possibly hardware bug, possibly driver bug)
4005 * an interrupt was not delivered to the driver, even though the
4006 * transaction completed successfully.
4007 *
4008 * LOCKING:
4009 * Inherited from SCSI layer (none, can sleep)
4010 */
4011
4012 static void ata_qc_timeout(struct ata_queued_cmd *qc)
4013 {
4014 struct ata_port *ap = qc->ap;
4015 struct ata_host_set *host_set = ap->host_set;
4016 u8 host_stat = 0, drv_stat;
4017 unsigned long flags;
4018
4019 DPRINTK("ENTER\n");
4020
4021 ata_flush_pio_tasks(ap);
4022 ap->hsm_task_state = HSM_ST_IDLE;
4023
4024 spin_lock_irqsave(&host_set->lock, flags);
4025
4026 switch (qc->tf.protocol) {
4027
4028 case ATA_PROT_DMA:
4029 case ATA_PROT_ATAPI_DMA:
4030 host_stat = ap->ops->bmdma_status(ap);
4031
4032 /* before we do anything else, clear DMA-Start bit */
4033 ap->ops->bmdma_stop(qc);
4034
4035 /* fall through */
4036
4037 default:
4038 ata_altstatus(ap);
4039 drv_stat = ata_chk_status(ap);
4040
4041 /* ack bmdma irq events */
4042 ap->ops->irq_clear(ap);
4043
4044 printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x\n",
4045 ap->id, qc->tf.command, drv_stat, host_stat);
4046
4047 ap->hsm_task_state = HSM_ST_IDLE;
4048
4049 /* complete taskfile transaction */
4050 qc->err_mask |= AC_ERR_TIMEOUT;
4051 break;
4052 }
4053
4054 spin_unlock_irqrestore(&host_set->lock, flags);
4055
4056 ata_eh_qc_complete(qc);
4057
4058 DPRINTK("EXIT\n");
4059 }
4060
4061 /**
4062 * ata_eng_timeout - Handle timeout of queued command
4063 * @ap: Port on which timed-out command is active
4064 *
4065 * Some part of the kernel (currently, only the SCSI layer)
4066 * has noticed that the active command on port @ap has not
4067 * completed after a specified length of time. Handle this
4068 * condition by disabling DMA (if necessary) and completing
4069 * transactions, with error if necessary.
4070 *
4071 * This also handles the case of the "lost interrupt", where
4072 * for some reason (possibly hardware bug, possibly driver bug)
4073 * an interrupt was not delivered to the driver, even though the
4074 * transaction completed successfully.
4075 *
4076 * LOCKING:
4077 * Inherited from SCSI layer (none, can sleep)
4078 */
4079
4080 void ata_eng_timeout(struct ata_port *ap)
4081 {
4082 struct ata_queued_cmd *qc;
4083
4084 DPRINTK("ENTER\n");
4085
4086 qc = ata_qc_from_tag(ap, ap->active_tag);
4087 if (qc)
4088 ata_qc_timeout(qc);
4089 else {
4090 printk(KERN_ERR "ata%u: BUG: timeout without command\n",
4091 ap->id);
4092 goto out;
4093 }
4094
4095 out:
4096 DPRINTK("EXIT\n");
4097 }
4098
4099 /**
4100 * ata_qc_new - Request an available ATA command, for queueing
4101 * @ap: Port associated with device @dev
4102 * @dev: Device from whom we request an available command structure
4103 *
4104 * LOCKING:
4105 * None.
4106 */
4107
4108 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4109 {
4110 struct ata_queued_cmd *qc = NULL;
4111 unsigned int i;
4112
4113 for (i = 0; i < ATA_MAX_QUEUE; i++)
4114 if (!test_and_set_bit(i, &ap->qactive)) {
4115 qc = ata_qc_from_tag(ap, i);
4116 break;
4117 }
4118
4119 if (qc)
4120 qc->tag = i;
4121
4122 return qc;
4123 }
4124
4125 /**
4126 * ata_qc_new_init - Request an available ATA command, and initialize it
4127 * @ap: Port associated with device @dev
4128 * @dev: Device from whom we request an available command structure
4129 *
4130 * LOCKING:
4131 * None.
4132 */
4133
4134 struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
4135 struct ata_device *dev)
4136 {
4137 struct ata_queued_cmd *qc;
4138
4139 qc = ata_qc_new(ap);
4140 if (qc) {
4141 qc->scsicmd = NULL;
4142 qc->ap = ap;
4143 qc->dev = dev;
4144
4145 ata_qc_reinit(qc);
4146 }
4147
4148 return qc;
4149 }
4150
4151 /**
4152 * ata_qc_free - free unused ata_queued_cmd
4153 * @qc: Command to complete
4154 *
4155 * Designed to free unused ata_queued_cmd object
4156 * in case something prevents using it.
4157 *
4158 * LOCKING:
4159 * spin_lock_irqsave(host_set lock)
4160 */
4161 void ata_qc_free(struct ata_queued_cmd *qc)
4162 {
4163 struct ata_port *ap = qc->ap;
4164 unsigned int tag;
4165
4166 assert(qc != NULL); /* ata_qc_from_tag _might_ return NULL */
4167
4168 qc->flags = 0;
4169 tag = qc->tag;
4170 if (likely(ata_tag_valid(tag))) {
4171 if (tag == ap->active_tag)
4172 ap->active_tag = ATA_TAG_POISON;
4173 qc->tag = ATA_TAG_POISON;
4174 clear_bit(tag, &ap->qactive);
4175 }
4176 }
4177
4178 /**
4179 * ata_qc_complete - Complete an active ATA command
4180 * @qc: Command to complete
4181 * @err_mask: ATA Status register contents
4182 *
4183 * Indicate to the mid and upper layers that an ATA
4184 * command has completed, with either an ok or not-ok status.
4185 *
4186 * LOCKING:
4187 * spin_lock_irqsave(host_set lock)
4188 */
4189
4190 void ata_qc_complete(struct ata_queued_cmd *qc)
4191 {
4192 assert(qc != NULL); /* ata_qc_from_tag _might_ return NULL */
4193 assert(qc->flags & ATA_QCFLAG_ACTIVE);
4194
4195 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4196 ata_sg_clean(qc);
4197
4198 /* atapi: mark qc as inactive to prevent the interrupt handler
4199 * from completing the command twice later, before the error handler
4200 * is called. (when rc != 0 and atapi request sense is needed)
4201 */
4202 qc->flags &= ~ATA_QCFLAG_ACTIVE;
4203
4204 /* call completion callback */
4205 qc->complete_fn(qc);
4206 }
4207
4208 static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
4209 {
4210 struct ata_port *ap = qc->ap;
4211
4212 switch (qc->tf.protocol) {
4213 case ATA_PROT_DMA:
4214 case ATA_PROT_ATAPI_DMA:
4215 return 1;
4216
4217 case ATA_PROT_ATAPI:
4218 case ATA_PROT_PIO:
4219 case ATA_PROT_PIO_MULT:
4220 if (ap->flags & ATA_FLAG_PIO_DMA)
4221 return 1;
4222
4223 /* fall through */
4224
4225 default:
4226 return 0;
4227 }
4228
4229 /* never reached */
4230 }
4231
4232 /**
4233 * ata_qc_issue - issue taskfile to device
4234 * @qc: command to issue to device
4235 *
4236 * Prepare an ATA command to submission to device.
4237 * This includes mapping the data into a DMA-able
4238 * area, filling in the S/G table, and finally
4239 * writing the taskfile to hardware, starting the command.
4240 *
4241 * LOCKING:
4242 * spin_lock_irqsave(host_set lock)
4243 *
4244 * RETURNS:
4245 * Zero on success, AC_ERR_* mask on failure
4246 */
4247
4248 unsigned int ata_qc_issue(struct ata_queued_cmd *qc)
4249 {
4250 struct ata_port *ap = qc->ap;
4251
4252 if (ata_should_dma_map(qc)) {
4253 if (qc->flags & ATA_QCFLAG_SG) {
4254 if (ata_sg_setup(qc))
4255 goto sg_err;
4256 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
4257 if (ata_sg_setup_one(qc))
4258 goto sg_err;
4259 }
4260 } else {
4261 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4262 }
4263
4264 ap->ops->qc_prep(qc);
4265
4266 qc->ap->active_tag = qc->tag;
4267 qc->flags |= ATA_QCFLAG_ACTIVE;
4268
4269 return ap->ops->qc_issue(qc);
4270
4271 sg_err:
4272 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4273 return AC_ERR_SYSTEM;
4274 }
4275
4276
4277 /**
4278 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
4279 * @qc: command to issue to device
4280 *
4281 * Using various libata functions and hooks, this function
4282 * starts an ATA command. ATA commands are grouped into
4283 * classes called "protocols", and issuing each type of protocol
4284 * is slightly different.
4285 *
4286 * May be used as the qc_issue() entry in ata_port_operations.
4287 *
4288 * LOCKING:
4289 * spin_lock_irqsave(host_set lock)
4290 *
4291 * RETURNS:
4292 * Zero on success, AC_ERR_* mask on failure
4293 */
4294
4295 unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
4296 {
4297 struct ata_port *ap = qc->ap;
4298
4299 /* Use polling pio if the LLD doesn't handle
4300 * interrupt driven pio and atapi CDB interrupt.
4301 */
4302 if (ap->flags & ATA_FLAG_PIO_POLLING) {
4303 switch (qc->tf.protocol) {
4304 case ATA_PROT_PIO:
4305 case ATA_PROT_ATAPI:
4306 case ATA_PROT_ATAPI_NODATA:
4307 qc->tf.flags |= ATA_TFLAG_POLLING;
4308 break;
4309 case ATA_PROT_ATAPI_DMA:
4310 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
4311 BUG();
4312 break;
4313 default:
4314 break;
4315 }
4316 }
4317
4318 /* select the device */
4319 ata_dev_select(ap, qc->dev->devno, 1, 0);
4320
4321 /* start the command */
4322 switch (qc->tf.protocol) {
4323 case ATA_PROT_NODATA:
4324 if (qc->tf.flags & ATA_TFLAG_POLLING)
4325 ata_qc_set_polling(qc);
4326
4327 ata_tf_to_host(ap, &qc->tf);
4328 ap->hsm_task_state = HSM_ST_LAST;
4329
4330 if (qc->tf.flags & ATA_TFLAG_POLLING)
4331 ata_queue_pio_task(ap);
4332
4333 break;
4334
4335 case ATA_PROT_DMA:
4336 assert(!(qc->tf.flags & ATA_TFLAG_POLLING));
4337
4338 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4339 ap->ops->bmdma_setup(qc); /* set up bmdma */
4340 ap->ops->bmdma_start(qc); /* initiate bmdma */
4341 ap->hsm_task_state = HSM_ST_LAST;
4342 break;
4343
4344 case ATA_PROT_PIO:
4345 if (qc->tf.flags & ATA_TFLAG_POLLING)
4346 ata_qc_set_polling(qc);
4347
4348 ata_tf_to_host(ap, &qc->tf);
4349
4350 if (qc->tf.flags & ATA_TFLAG_WRITE) {
4351 /* PIO data out protocol */
4352 ap->hsm_task_state = HSM_ST_FIRST;
4353 ata_queue_pio_task(ap);
4354
4355 /* always send first data block using
4356 * the ata_pio_task() codepath.
4357 */
4358 } else {
4359 /* PIO data in protocol */
4360 ap->hsm_task_state = HSM_ST;
4361
4362 if (qc->tf.flags & ATA_TFLAG_POLLING)
4363 ata_queue_pio_task(ap);
4364
4365 /* if polling, ata_pio_task() handles the rest.
4366 * otherwise, interrupt handler takes over from here.
4367 */
4368 }
4369
4370 break;
4371
4372 case ATA_PROT_ATAPI:
4373 case ATA_PROT_ATAPI_NODATA:
4374 if (qc->tf.flags & ATA_TFLAG_POLLING)
4375 ata_qc_set_polling(qc);
4376
4377 ata_tf_to_host(ap, &qc->tf);
4378
4379 ap->hsm_task_state = HSM_ST_FIRST;
4380
4381 /* send cdb by polling if no cdb interrupt */
4382 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
4383 (qc->tf.flags & ATA_TFLAG_POLLING))
4384 ata_queue_pio_task(ap);
4385 break;
4386
4387 case ATA_PROT_ATAPI_DMA:
4388 assert(!(qc->tf.flags & ATA_TFLAG_POLLING));
4389
4390 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4391 ap->ops->bmdma_setup(qc); /* set up bmdma */
4392 ap->hsm_task_state = HSM_ST_FIRST;
4393
4394 /* send cdb by polling if no cdb interrupt */
4395 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4396 ata_queue_pio_task(ap);
4397 break;
4398
4399 default:
4400 WARN_ON(1);
4401 return AC_ERR_SYSTEM;
4402 }
4403
4404 return 0;
4405 }
4406
4407 /**
4408 * ata_bmdma_setup_mmio - Set up PCI IDE BMDMA transaction
4409 * @qc: Info associated with this ATA transaction.
4410 *
4411 * LOCKING:
4412 * spin_lock_irqsave(host_set lock)
4413 */
4414
4415 static void ata_bmdma_setup_mmio (struct ata_queued_cmd *qc)
4416 {
4417 struct ata_port *ap = qc->ap;
4418 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
4419 u8 dmactl;
4420 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
4421
4422 /* load PRD table addr. */
4423 mb(); /* make sure PRD table writes are visible to controller */
4424 writel(ap->prd_dma, mmio + ATA_DMA_TABLE_OFS);
4425
4426 /* specify data direction, triple-check start bit is clear */
4427 dmactl = readb(mmio + ATA_DMA_CMD);
4428 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
4429 if (!rw)
4430 dmactl |= ATA_DMA_WR;
4431 writeb(dmactl, mmio + ATA_DMA_CMD);
4432
4433 /* issue r/w command */
4434 ap->ops->exec_command(ap, &qc->tf);
4435 }
4436
4437 /**
4438 * ata_bmdma_start_mmio - Start a PCI IDE BMDMA transaction
4439 * @qc: Info associated with this ATA transaction.
4440 *
4441 * LOCKING:
4442 * spin_lock_irqsave(host_set lock)
4443 */
4444
4445 static void ata_bmdma_start_mmio (struct ata_queued_cmd *qc)
4446 {
4447 struct ata_port *ap = qc->ap;
4448 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
4449 u8 dmactl;
4450
4451 /* start host DMA transaction */
4452 dmactl = readb(mmio + ATA_DMA_CMD);
4453 writeb(dmactl | ATA_DMA_START, mmio + ATA_DMA_CMD);
4454
4455 /* Strictly, one may wish to issue a readb() here, to
4456 * flush the mmio write. However, control also passes
4457 * to the hardware at this point, and it will interrupt
4458 * us when we are to resume control. So, in effect,
4459 * we don't care when the mmio write flushes.
4460 * Further, a read of the DMA status register _immediately_
4461 * following the write may not be what certain flaky hardware
4462 * is expected, so I think it is best to not add a readb()
4463 * without first all the MMIO ATA cards/mobos.
4464 * Or maybe I'm just being paranoid.
4465 */
4466 }
4467
4468 /**
4469 * ata_bmdma_setup_pio - Set up PCI IDE BMDMA transaction (PIO)
4470 * @qc: Info associated with this ATA transaction.
4471 *
4472 * LOCKING:
4473 * spin_lock_irqsave(host_set lock)
4474 */
4475
4476 static void ata_bmdma_setup_pio (struct ata_queued_cmd *qc)
4477 {
4478 struct ata_port *ap = qc->ap;
4479 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
4480 u8 dmactl;
4481
4482 /* load PRD table addr. */
4483 outl(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
4484
4485 /* specify data direction, triple-check start bit is clear */
4486 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
4487 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
4488 if (!rw)
4489 dmactl |= ATA_DMA_WR;
4490 outb(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
4491
4492 /* issue r/w command */
4493 ap->ops->exec_command(ap, &qc->tf);
4494 }
4495
4496 /**
4497 * ata_bmdma_start_pio - Start a PCI IDE BMDMA transaction (PIO)
4498 * @qc: Info associated with this ATA transaction.
4499 *
4500 * LOCKING:
4501 * spin_lock_irqsave(host_set lock)
4502 */
4503
4504 static void ata_bmdma_start_pio (struct ata_queued_cmd *qc)
4505 {
4506 struct ata_port *ap = qc->ap;
4507 u8 dmactl;
4508
4509 /* start host DMA transaction */
4510 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
4511 outb(dmactl | ATA_DMA_START,
4512 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
4513 }
4514
4515
4516 /**
4517 * ata_bmdma_start - Start a PCI IDE BMDMA transaction
4518 * @qc: Info associated with this ATA transaction.
4519 *
4520 * Writes the ATA_DMA_START flag to the DMA command register.
4521 *
4522 * May be used as the bmdma_start() entry in ata_port_operations.
4523 *
4524 * LOCKING:
4525 * spin_lock_irqsave(host_set lock)
4526 */
4527 void ata_bmdma_start(struct ata_queued_cmd *qc)
4528 {
4529 if (qc->ap->flags & ATA_FLAG_MMIO)
4530 ata_bmdma_start_mmio(qc);
4531 else
4532 ata_bmdma_start_pio(qc);
4533 }
4534
4535
4536 /**
4537 * ata_bmdma_setup - Set up PCI IDE BMDMA transaction
4538 * @qc: Info associated with this ATA transaction.
4539 *
4540 * Writes address of PRD table to device's PRD Table Address
4541 * register, sets the DMA control register, and calls
4542 * ops->exec_command() to start the transfer.
4543 *
4544 * May be used as the bmdma_setup() entry in ata_port_operations.
4545 *
4546 * LOCKING:
4547 * spin_lock_irqsave(host_set lock)
4548 */
4549 void ata_bmdma_setup(struct ata_queued_cmd *qc)
4550 {
4551 if (qc->ap->flags & ATA_FLAG_MMIO)
4552 ata_bmdma_setup_mmio(qc);
4553 else
4554 ata_bmdma_setup_pio(qc);
4555 }
4556
4557
4558 /**
4559 * ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt.
4560 * @ap: Port associated with this ATA transaction.
4561 *
4562 * Clear interrupt and error flags in DMA status register.
4563 *
4564 * May be used as the irq_clear() entry in ata_port_operations.
4565 *
4566 * LOCKING:
4567 * spin_lock_irqsave(host_set lock)
4568 */
4569
4570 void ata_bmdma_irq_clear(struct ata_port *ap)
4571 {
4572 if (ap->flags & ATA_FLAG_MMIO) {
4573 void __iomem *mmio = ((void __iomem *) ap->ioaddr.bmdma_addr) + ATA_DMA_STATUS;
4574 writeb(readb(mmio), mmio);
4575 } else {
4576 unsigned long addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS;
4577 outb(inb(addr), addr);
4578 }
4579
4580 }
4581
4582
4583 /**
4584 * ata_bmdma_status - Read PCI IDE BMDMA status
4585 * @ap: Port associated with this ATA transaction.
4586 *
4587 * Read and return BMDMA status register.
4588 *
4589 * May be used as the bmdma_status() entry in ata_port_operations.
4590 *
4591 * LOCKING:
4592 * spin_lock_irqsave(host_set lock)
4593 */
4594
4595 u8 ata_bmdma_status(struct ata_port *ap)
4596 {
4597 u8 host_stat;
4598 if (ap->flags & ATA_FLAG_MMIO) {
4599 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
4600 host_stat = readb(mmio + ATA_DMA_STATUS);
4601 } else
4602 host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
4603 return host_stat;
4604 }
4605
4606
4607 /**
4608 * ata_bmdma_stop - Stop PCI IDE BMDMA transfer
4609 * @qc: Command we are ending DMA for
4610 *
4611 * Clears the ATA_DMA_START flag in the dma control register
4612 *
4613 * May be used as the bmdma_stop() entry in ata_port_operations.
4614 *
4615 * LOCKING:
4616 * spin_lock_irqsave(host_set lock)
4617 */
4618
4619 void ata_bmdma_stop(struct ata_queued_cmd *qc)
4620 {
4621 struct ata_port *ap = qc->ap;
4622 if (ap->flags & ATA_FLAG_MMIO) {
4623 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
4624
4625 /* clear start/stop bit */
4626 writeb(readb(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,
4627 mmio + ATA_DMA_CMD);
4628 } else {
4629 /* clear start/stop bit */
4630 outb(inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START,
4631 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
4632 }
4633
4634 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
4635 ata_altstatus(ap); /* dummy read */
4636 }
4637
4638 /**
4639 * ata_host_intr - Handle host interrupt for given (port, task)
4640 * @ap: Port on which interrupt arrived (possibly...)
4641 * @qc: Taskfile currently active in engine
4642 *
4643 * Handle host interrupt for given queued command. Currently,
4644 * only DMA interrupts are handled. All other commands are
4645 * handled via polling with interrupts disabled (nIEN bit).
4646 *
4647 * LOCKING:
4648 * spin_lock_irqsave(host_set lock)
4649 *
4650 * RETURNS:
4651 * One if interrupt was handled, zero if not (shared irq).
4652 */
4653
4654 inline unsigned int ata_host_intr (struct ata_port *ap,
4655 struct ata_queued_cmd *qc)
4656 {
4657 u8 status, host_stat = 0;
4658
4659 VPRINTK("ata%u: protocol %d task_state %d\n",
4660 ap->id, qc->tf.protocol, ap->hsm_task_state);
4661
4662 /* Check whether we are expecting interrupt in this state */
4663 switch (ap->hsm_task_state) {
4664 case HSM_ST_FIRST:
4665 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
4666 * The flag was turned on only for atapi devices.
4667 * No need to check is_atapi_taskfile(&qc->tf) again.
4668 */
4669 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4670 goto idle_irq;
4671 break;
4672 case HSM_ST_LAST:
4673 if (qc->tf.protocol == ATA_PROT_DMA ||
4674 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
4675 /* check status of DMA engine */
4676 host_stat = ap->ops->bmdma_status(ap);
4677 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
4678
4679 /* if it's not our irq... */
4680 if (!(host_stat & ATA_DMA_INTR))
4681 goto idle_irq;
4682
4683 /* before we do anything else, clear DMA-Start bit */
4684 ap->ops->bmdma_stop(qc);
4685
4686 if (unlikely(host_stat & ATA_DMA_ERR)) {
4687 /* error when transfering data to/from memory */
4688 qc->err_mask |= AC_ERR_HOST_BUS;
4689 ap->hsm_task_state = HSM_ST_ERR;
4690 }
4691 }
4692 break;
4693 case HSM_ST:
4694 break;
4695 default:
4696 goto idle_irq;
4697 }
4698
4699 /* check altstatus */
4700 status = ata_altstatus(ap);
4701 if (status & ATA_BUSY)
4702 goto idle_irq;
4703
4704 /* check main status, clearing INTRQ */
4705 status = ata_chk_status(ap);
4706 if (unlikely(status & ATA_BUSY))
4707 goto idle_irq;
4708
4709 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
4710 ap->id, qc->tf.protocol, ap->hsm_task_state, status);
4711
4712 /* ack bmdma irq events */
4713 ap->ops->irq_clear(ap);
4714
4715 /* check error */
4716 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4717 qc->err_mask |= AC_ERR_DEV;
4718 ap->hsm_task_state = HSM_ST_ERR;
4719 }
4720
4721 fsm_start:
4722 switch (ap->hsm_task_state) {
4723 case HSM_ST_FIRST:
4724 /* Some pre-ATAPI-4 devices assert INTRQ
4725 * at this state when ready to receive CDB.
4726 */
4727
4728 /* check device status */
4729 if (unlikely((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ)) {
4730 /* Wrong status. Let EH handle this */
4731 qc->err_mask |= AC_ERR_HSM;
4732 ap->hsm_task_state = HSM_ST_ERR;
4733 goto fsm_start;
4734 }
4735
4736 atapi_send_cdb(ap, qc);
4737
4738 break;
4739
4740 case HSM_ST:
4741 /* complete command or read/write the data register */
4742 if (qc->tf.protocol == ATA_PROT_ATAPI) {
4743 /* ATAPI PIO protocol */
4744 if ((status & ATA_DRQ) == 0) {
4745 /* no more data to transfer */
4746 ap->hsm_task_state = HSM_ST_LAST;
4747 goto fsm_start;
4748 }
4749
4750 atapi_pio_bytes(qc);
4751
4752 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
4753 /* bad ireason reported by device */
4754 goto fsm_start;
4755
4756 } else {
4757 /* ATA PIO protocol */
4758 if (unlikely((status & ATA_DRQ) == 0)) {
4759 /* handle BSY=0, DRQ=0 as error */
4760 qc->err_mask |= AC_ERR_HSM;
4761 ap->hsm_task_state = HSM_ST_ERR;
4762 goto fsm_start;
4763 }
4764
4765 ata_pio_sectors(qc);
4766
4767 if (ap->hsm_task_state == HSM_ST_LAST &&
4768 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
4769 /* all data read */
4770 ata_altstatus(ap);
4771 status = ata_chk_status(ap);
4772 goto fsm_start;
4773 }
4774 }
4775
4776 ata_altstatus(ap); /* flush */
4777 break;
4778
4779 case HSM_ST_LAST:
4780 if (unlikely(status & ATA_DRQ)) {
4781 /* handle DRQ=1 as error */
4782 qc->err_mask |= AC_ERR_HSM;
4783 ap->hsm_task_state = HSM_ST_ERR;
4784 goto fsm_start;
4785 }
4786
4787 /* no more data to transfer */
4788 DPRINTK("ata%u: command complete, drv_stat 0x%x\n",
4789 ap->id, status);
4790
4791 ap->hsm_task_state = HSM_ST_IDLE;
4792
4793 /* complete taskfile transaction */
4794 qc->err_mask |= ac_err_mask(status);
4795 ata_qc_complete(qc);
4796 break;
4797
4798 case HSM_ST_ERR:
4799 if (qc->tf.command != ATA_CMD_PACKET)
4800 printk(KERN_ERR "ata%u: command error, drv_stat 0x%x host_stat 0x%x\n",
4801 ap->id, status, host_stat);
4802
4803 /* make sure qc->err_mask is available to
4804 * know what's wrong and recover
4805 */
4806 assert(qc->err_mask);
4807
4808 ap->hsm_task_state = HSM_ST_IDLE;
4809 ata_qc_complete(qc);
4810 break;
4811 default:
4812 goto idle_irq;
4813 }
4814
4815 return 1; /* irq handled */
4816
4817 idle_irq:
4818 ap->stats.idle_irq++;
4819
4820 #ifdef ATA_IRQ_TRAP
4821 if ((ap->stats.idle_irq % 1000) == 0) {
4822 handled = 1;
4823 ata_irq_ack(ap, 0); /* debug trap */
4824 printk(KERN_WARNING "ata%d: irq trap\n", ap->id);
4825 }
4826 #endif
4827 return 0; /* irq not handled */
4828 }
4829
4830 /**
4831 * ata_interrupt - Default ATA host interrupt handler
4832 * @irq: irq line (unused)
4833 * @dev_instance: pointer to our ata_host_set information structure
4834 * @regs: unused
4835 *
4836 * Default interrupt handler for PCI IDE devices. Calls
4837 * ata_host_intr() for each port that is not disabled.
4838 *
4839 * LOCKING:
4840 * Obtains host_set lock during operation.
4841 *
4842 * RETURNS:
4843 * IRQ_NONE or IRQ_HANDLED.
4844 */
4845
4846 irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
4847 {
4848 struct ata_host_set *host_set = dev_instance;
4849 unsigned int i;
4850 unsigned int handled = 0;
4851 unsigned long flags;
4852
4853 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
4854 spin_lock_irqsave(&host_set->lock, flags);
4855
4856 for (i = 0; i < host_set->n_ports; i++) {
4857 struct ata_port *ap;
4858
4859 ap = host_set->ports[i];
4860 if (ap &&
4861 !(ap->flags & ATA_FLAG_PORT_DISABLED)) {
4862 struct ata_queued_cmd *qc;
4863
4864 qc = ata_qc_from_tag(ap, ap->active_tag);
4865 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
4866 (qc->flags & ATA_QCFLAG_ACTIVE))
4867 handled |= ata_host_intr(ap, qc);
4868 }
4869 }
4870
4871 spin_unlock_irqrestore(&host_set->lock, flags);
4872
4873 return IRQ_RETVAL(handled);
4874 }
4875
4876 /*
4877 * Execute a 'simple' command, that only consists of the opcode 'cmd' itself,
4878 * without filling any other registers
4879 */
4880 static int ata_do_simple_cmd(struct ata_port *ap, struct ata_device *dev,
4881 u8 cmd)
4882 {
4883 struct ata_taskfile tf;
4884 int err;
4885
4886 ata_tf_init(ap, &tf, dev->devno);
4887
4888 tf.command = cmd;
4889 tf.flags |= ATA_TFLAG_DEVICE;
4890 tf.protocol = ATA_PROT_NODATA;
4891
4892 err = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0);
4893 if (err)
4894 printk(KERN_ERR "%s: ata command failed: %d\n",
4895 __FUNCTION__, err);
4896
4897 return err;
4898 }
4899
4900 static int ata_flush_cache(struct ata_port *ap, struct ata_device *dev)
4901 {
4902 u8 cmd;
4903
4904 if (!ata_try_flush_cache(dev))
4905 return 0;
4906
4907 if (ata_id_has_flush_ext(dev->id))
4908 cmd = ATA_CMD_FLUSH_EXT;
4909 else
4910 cmd = ATA_CMD_FLUSH;
4911
4912 return ata_do_simple_cmd(ap, dev, cmd);
4913 }
4914
4915 static int ata_standby_drive(struct ata_port *ap, struct ata_device *dev)
4916 {
4917 return ata_do_simple_cmd(ap, dev, ATA_CMD_STANDBYNOW1);
4918 }
4919
4920 static int ata_start_drive(struct ata_port *ap, struct ata_device *dev)
4921 {
4922 return ata_do_simple_cmd(ap, dev, ATA_CMD_IDLEIMMEDIATE);
4923 }
4924
4925 /**
4926 * ata_device_resume - wakeup a previously suspended devices
4927 * @ap: port the device is connected to
4928 * @dev: the device to resume
4929 *
4930 * Kick the drive back into action, by sending it an idle immediate
4931 * command and making sure its transfer mode matches between drive
4932 * and host.
4933 *
4934 */
4935 int ata_device_resume(struct ata_port *ap, struct ata_device *dev)
4936 {
4937 if (ap->flags & ATA_FLAG_SUSPENDED) {
4938 ap->flags &= ~ATA_FLAG_SUSPENDED;
4939 ata_set_mode(ap);
4940 }
4941 if (!ata_dev_present(dev))
4942 return 0;
4943 if (dev->class == ATA_DEV_ATA)
4944 ata_start_drive(ap, dev);
4945
4946 return 0;
4947 }
4948
4949 /**
4950 * ata_device_suspend - prepare a device for suspend
4951 * @ap: port the device is connected to
4952 * @dev: the device to suspend
4953 *
4954 * Flush the cache on the drive, if appropriate, then issue a
4955 * standbynow command.
4956 */
4957 int ata_device_suspend(struct ata_port *ap, struct ata_device *dev)
4958 {
4959 if (!ata_dev_present(dev))
4960 return 0;
4961 if (dev->class == ATA_DEV_ATA)
4962 ata_flush_cache(ap, dev);
4963
4964 ata_standby_drive(ap, dev);
4965 ap->flags |= ATA_FLAG_SUSPENDED;
4966 return 0;
4967 }
4968
4969 /**
4970 * ata_port_start - Set port up for dma.
4971 * @ap: Port to initialize
4972 *
4973 * Called just after data structures for each port are
4974 * initialized. Allocates space for PRD table.
4975 *
4976 * May be used as the port_start() entry in ata_port_operations.
4977 *
4978 * LOCKING:
4979 * Inherited from caller.
4980 */
4981
4982 int ata_port_start (struct ata_port *ap)
4983 {
4984 struct device *dev = ap->host_set->dev;
4985 int rc;
4986
4987 ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL);
4988 if (!ap->prd)
4989 return -ENOMEM;
4990
4991 rc = ata_pad_alloc(ap, dev);
4992 if (rc) {
4993 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
4994 return rc;
4995 }
4996
4997 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma);
4998
4999 return 0;
5000 }
5001
5002
5003 /**
5004 * ata_port_stop - Undo ata_port_start()
5005 * @ap: Port to shut down
5006 *
5007 * Frees the PRD table.
5008 *
5009 * May be used as the port_stop() entry in ata_port_operations.
5010 *
5011 * LOCKING:
5012 * Inherited from caller.
5013 */
5014
5015 void ata_port_stop (struct ata_port *ap)
5016 {
5017 struct device *dev = ap->host_set->dev;
5018
5019 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
5020 ata_pad_free(ap, dev);
5021 }
5022
5023 void ata_host_stop (struct ata_host_set *host_set)
5024 {
5025 if (host_set->mmio_base)
5026 iounmap(host_set->mmio_base);
5027 }
5028
5029
5030 /**
5031 * ata_host_remove - Unregister SCSI host structure with upper layers
5032 * @ap: Port to unregister
5033 * @do_unregister: 1 if we fully unregister, 0 to just stop the port
5034 *
5035 * LOCKING:
5036 * Inherited from caller.
5037 */
5038
5039 static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister)
5040 {
5041 struct Scsi_Host *sh = ap->host;
5042
5043 DPRINTK("ENTER\n");
5044
5045 if (do_unregister)
5046 scsi_remove_host(sh);
5047
5048 ap->ops->port_stop(ap);
5049 }
5050
5051 /**
5052 * ata_host_init - Initialize an ata_port structure
5053 * @ap: Structure to initialize
5054 * @host: associated SCSI mid-layer structure
5055 * @host_set: Collection of hosts to which @ap belongs
5056 * @ent: Probe information provided by low-level driver
5057 * @port_no: Port number associated with this ata_port
5058 *
5059 * Initialize a new ata_port structure, and its associated
5060 * scsi_host.
5061 *
5062 * LOCKING:
5063 * Inherited from caller.
5064 */
5065
5066 static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
5067 struct ata_host_set *host_set,
5068 const struct ata_probe_ent *ent, unsigned int port_no)
5069 {
5070 unsigned int i;
5071
5072 host->max_id = 16;
5073 host->max_lun = 1;
5074 host->max_channel = 1;
5075 host->unique_id = ata_unique_id++;
5076 host->max_cmd_len = 12;
5077
5078 ap->flags = ATA_FLAG_PORT_DISABLED;
5079 ap->id = host->unique_id;
5080 ap->host = host;
5081 ap->ctl = ATA_DEVCTL_OBS;
5082 ap->host_set = host_set;
5083 ap->port_no = port_no;
5084 ap->hard_port_no =
5085 ent->legacy_mode ? ent->hard_port_no : port_no;
5086 ap->pio_mask = ent->pio_mask;
5087 ap->mwdma_mask = ent->mwdma_mask;
5088 ap->udma_mask = ent->udma_mask;
5089 ap->flags |= ent->host_flags;
5090 ap->ops = ent->port_ops;
5091 ap->cbl = ATA_CBL_NONE;
5092 ap->active_tag = ATA_TAG_POISON;
5093 ap->last_ctl = 0xFF;
5094
5095 INIT_WORK(&ap->pio_task, ata_pio_task, ap);
5096 INIT_LIST_HEAD(&ap->eh_done_q);
5097
5098 for (i = 0; i < ATA_MAX_DEVICES; i++)
5099 ap->device[i].devno = i;
5100
5101 #ifdef ATA_IRQ_TRAP
5102 ap->stats.unhandled_irq = 1;
5103 ap->stats.idle_irq = 1;
5104 #endif
5105
5106 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
5107 }
5108
5109 /**
5110 * ata_host_add - Attach low-level ATA driver to system
5111 * @ent: Information provided by low-level driver
5112 * @host_set: Collections of ports to which we add
5113 * @port_no: Port number associated with this host
5114 *
5115 * Attach low-level ATA driver to system.
5116 *
5117 * LOCKING:
5118 * PCI/etc. bus probe sem.
5119 *
5120 * RETURNS:
5121 * New ata_port on success, for NULL on error.
5122 */
5123
5124 static struct ata_port * ata_host_add(const struct ata_probe_ent *ent,
5125 struct ata_host_set *host_set,
5126 unsigned int port_no)
5127 {
5128 struct Scsi_Host *host;
5129 struct ata_port *ap;
5130 int rc;
5131
5132 DPRINTK("ENTER\n");
5133 host = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
5134 if (!host)
5135 return NULL;
5136
5137 ap = (struct ata_port *) &host->hostdata[0];
5138
5139 ata_host_init(ap, host, host_set, ent, port_no);
5140
5141 rc = ap->ops->port_start(ap);
5142 if (rc)
5143 goto err_out;
5144
5145 return ap;
5146
5147 err_out:
5148 scsi_host_put(host);
5149 return NULL;
5150 }
5151
5152 /**
5153 * ata_device_add - Register hardware device with ATA and SCSI layers
5154 * @ent: Probe information describing hardware device to be registered
5155 *
5156 * This function processes the information provided in the probe
5157 * information struct @ent, allocates the necessary ATA and SCSI
5158 * host information structures, initializes them, and registers
5159 * everything with requisite kernel subsystems.
5160 *
5161 * This function requests irqs, probes the ATA bus, and probes
5162 * the SCSI bus.
5163 *
5164 * LOCKING:
5165 * PCI/etc. bus probe sem.
5166 *
5167 * RETURNS:
5168 * Number of ports registered. Zero on error (no ports registered).
5169 */
5170
5171 int ata_device_add(const struct ata_probe_ent *ent)
5172 {
5173 unsigned int count = 0, i;
5174 struct device *dev = ent->dev;
5175 struct ata_host_set *host_set;
5176
5177 DPRINTK("ENTER\n");
5178 /* alloc a container for our list of ATA ports (buses) */
5179 host_set = kzalloc(sizeof(struct ata_host_set) +
5180 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
5181 if (!host_set)
5182 return 0;
5183 spin_lock_init(&host_set->lock);
5184
5185 host_set->dev = dev;
5186 host_set->n_ports = ent->n_ports;
5187 host_set->irq = ent->irq;
5188 host_set->mmio_base = ent->mmio_base;
5189 host_set->private_data = ent->private_data;
5190 host_set->ops = ent->port_ops;
5191
5192 /* register each port bound to this device */
5193 for (i = 0; i < ent->n_ports; i++) {
5194 struct ata_port *ap;
5195 unsigned long xfer_mode_mask;
5196
5197 ap = ata_host_add(ent, host_set, i);
5198 if (!ap)
5199 goto err_out;
5200
5201 host_set->ports[i] = ap;
5202 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
5203 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
5204 (ap->pio_mask << ATA_SHIFT_PIO);
5205
5206 /* print per-port info to dmesg */
5207 printk(KERN_INFO "ata%u: %cATA max %s cmd 0x%lX ctl 0x%lX "
5208 "bmdma 0x%lX irq %lu\n",
5209 ap->id,
5210 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
5211 ata_mode_string(xfer_mode_mask),
5212 ap->ioaddr.cmd_addr,
5213 ap->ioaddr.ctl_addr,
5214 ap->ioaddr.bmdma_addr,
5215 ent->irq);
5216
5217 ata_chk_status(ap);
5218 host_set->ops->irq_clear(ap);
5219 count++;
5220 }
5221
5222 if (!count)
5223 goto err_free_ret;
5224
5225 /* obtain irq, that is shared between channels */
5226 if (request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags,
5227 DRV_NAME, host_set))
5228 goto err_out;
5229
5230 /* perform each probe synchronously */
5231 DPRINTK("probe begin\n");
5232 for (i = 0; i < count; i++) {
5233 struct ata_port *ap;
5234 int rc;
5235
5236 ap = host_set->ports[i];
5237
5238 DPRINTK("ata%u: bus probe begin\n", ap->id);
5239 rc = ata_bus_probe(ap);
5240 DPRINTK("ata%u: bus probe end\n", ap->id);
5241
5242 if (rc) {
5243 /* FIXME: do something useful here?
5244 * Current libata behavior will
5245 * tear down everything when
5246 * the module is removed
5247 * or the h/w is unplugged.
5248 */
5249 }
5250
5251 rc = scsi_add_host(ap->host, dev);
5252 if (rc) {
5253 printk(KERN_ERR "ata%u: scsi_add_host failed\n",
5254 ap->id);
5255 /* FIXME: do something useful here */
5256 /* FIXME: handle unconditional calls to
5257 * scsi_scan_host and ata_host_remove, below,
5258 * at the very least
5259 */
5260 }
5261 }
5262
5263 /* probes are done, now scan each port's disk(s) */
5264 DPRINTK("host probe begin\n");
5265 for (i = 0; i < count; i++) {
5266 struct ata_port *ap = host_set->ports[i];
5267
5268 ata_scsi_scan_host(ap);
5269 }
5270
5271 dev_set_drvdata(dev, host_set);
5272
5273 VPRINTK("EXIT, returning %u\n", ent->n_ports);
5274 return ent->n_ports; /* success */
5275
5276 err_out:
5277 for (i = 0; i < count; i++) {
5278 ata_host_remove(host_set->ports[i], 1);
5279 scsi_host_put(host_set->ports[i]->host);
5280 }
5281 err_free_ret:
5282 kfree(host_set);
5283 VPRINTK("EXIT, returning 0\n");
5284 return 0;
5285 }
5286
5287 /**
5288 * ata_host_set_remove - PCI layer callback for device removal
5289 * @host_set: ATA host set that was removed
5290 *
5291 * Unregister all objects associated with this host set. Free those
5292 * objects.
5293 *
5294 * LOCKING:
5295 * Inherited from calling layer (may sleep).
5296 */
5297
5298 void ata_host_set_remove(struct ata_host_set *host_set)
5299 {
5300 struct ata_port *ap;
5301 unsigned int i;
5302
5303 for (i = 0; i < host_set->n_ports; i++) {
5304 ap = host_set->ports[i];
5305 scsi_remove_host(ap->host);
5306 }
5307
5308 free_irq(host_set->irq, host_set);
5309
5310 for (i = 0; i < host_set->n_ports; i++) {
5311 ap = host_set->ports[i];
5312
5313 ata_scsi_release(ap->host);
5314
5315 if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
5316 struct ata_ioports *ioaddr = &ap->ioaddr;
5317
5318 if (ioaddr->cmd_addr == 0x1f0)
5319 release_region(0x1f0, 8);
5320 else if (ioaddr->cmd_addr == 0x170)
5321 release_region(0x170, 8);
5322 }
5323
5324 scsi_host_put(ap->host);
5325 }
5326
5327 if (host_set->ops->host_stop)
5328 host_set->ops->host_stop(host_set);
5329
5330 kfree(host_set);
5331 }
5332
5333 /**
5334 * ata_scsi_release - SCSI layer callback hook for host unload
5335 * @host: libata host to be unloaded
5336 *
5337 * Performs all duties necessary to shut down a libata port...
5338 * Kill port kthread, disable port, and release resources.
5339 *
5340 * LOCKING:
5341 * Inherited from SCSI layer.
5342 *
5343 * RETURNS:
5344 * One.
5345 */
5346
5347 int ata_scsi_release(struct Scsi_Host *host)
5348 {
5349 struct ata_port *ap = (struct ata_port *) &host->hostdata[0];
5350
5351 DPRINTK("ENTER\n");
5352
5353 ap->ops->port_disable(ap);
5354 ata_host_remove(ap, 0);
5355
5356 DPRINTK("EXIT\n");
5357 return 1;
5358 }
5359
5360 /**
5361 * ata_std_ports - initialize ioaddr with standard port offsets.
5362 * @ioaddr: IO address structure to be initialized
5363 *
5364 * Utility function which initializes data_addr, error_addr,
5365 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
5366 * device_addr, status_addr, and command_addr to standard offsets
5367 * relative to cmd_addr.
5368 *
5369 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
5370 */
5371
5372 void ata_std_ports(struct ata_ioports *ioaddr)
5373 {
5374 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
5375 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
5376 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
5377 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
5378 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
5379 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
5380 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
5381 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
5382 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
5383 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
5384 }
5385
5386 static struct ata_probe_ent *
5387 ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
5388 {
5389 struct ata_probe_ent *probe_ent;
5390
5391 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
5392 if (!probe_ent) {
5393 printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
5394 kobject_name(&(dev->kobj)));
5395 return NULL;
5396 }
5397
5398 INIT_LIST_HEAD(&probe_ent->node);
5399 probe_ent->dev = dev;
5400
5401 probe_ent->sht = port->sht;
5402 probe_ent->host_flags = port->host_flags;
5403 probe_ent->pio_mask = port->pio_mask;
5404 probe_ent->mwdma_mask = port->mwdma_mask;
5405 probe_ent->udma_mask = port->udma_mask;
5406 probe_ent->port_ops = port->port_ops;
5407
5408 return probe_ent;
5409 }
5410
5411
5412
5413 #ifdef CONFIG_PCI
5414
5415 void ata_pci_host_stop (struct ata_host_set *host_set)
5416 {
5417 struct pci_dev *pdev = to_pci_dev(host_set->dev);
5418
5419 pci_iounmap(pdev, host_set->mmio_base);
5420 }
5421
5422 /**
5423 * ata_pci_init_native_mode - Initialize native-mode driver
5424 * @pdev: pci device to be initialized
5425 * @port: array[2] of pointers to port info structures.
5426 * @ports: bitmap of ports present
5427 *
5428 * Utility function which allocates and initializes an
5429 * ata_probe_ent structure for a standard dual-port
5430 * PIO-based IDE controller. The returned ata_probe_ent
5431 * structure can be passed to ata_device_add(). The returned
5432 * ata_probe_ent structure should then be freed with kfree().
5433 *
5434 * The caller need only pass the address of the primary port, the
5435 * secondary will be deduced automatically. If the device has non
5436 * standard secondary port mappings this function can be called twice,
5437 * once for each interface.
5438 */
5439
5440 struct ata_probe_ent *
5441 ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int ports)
5442 {
5443 struct ata_probe_ent *probe_ent =
5444 ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
5445 int p = 0;
5446
5447 if (!probe_ent)
5448 return NULL;
5449
5450 probe_ent->irq = pdev->irq;
5451 probe_ent->irq_flags = SA_SHIRQ;
5452 probe_ent->private_data = port[0]->private_data;
5453
5454 if (ports & ATA_PORT_PRIMARY) {
5455 probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 0);
5456 probe_ent->port[p].altstatus_addr =
5457 probe_ent->port[p].ctl_addr =
5458 pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS;
5459 probe_ent->port[p].bmdma_addr = pci_resource_start(pdev, 4);
5460 ata_std_ports(&probe_ent->port[p]);
5461 p++;
5462 }
5463
5464 if (ports & ATA_PORT_SECONDARY) {
5465 probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 2);
5466 probe_ent->port[p].altstatus_addr =
5467 probe_ent->port[p].ctl_addr =
5468 pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS;
5469 probe_ent->port[p].bmdma_addr = pci_resource_start(pdev, 4) + 8;
5470 ata_std_ports(&probe_ent->port[p]);
5471 p++;
5472 }
5473
5474 probe_ent->n_ports = p;
5475 return probe_ent;
5476 }
5477
5478 static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev, struct ata_port_info *port, int port_num)
5479 {
5480 struct ata_probe_ent *probe_ent;
5481
5482 probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port);
5483 if (!probe_ent)
5484 return NULL;
5485
5486 probe_ent->legacy_mode = 1;
5487 probe_ent->n_ports = 1;
5488 probe_ent->hard_port_no = port_num;
5489 probe_ent->private_data = port->private_data;
5490
5491 switch(port_num)
5492 {
5493 case 0:
5494 probe_ent->irq = 14;
5495 probe_ent->port[0].cmd_addr = 0x1f0;
5496 probe_ent->port[0].altstatus_addr =
5497 probe_ent->port[0].ctl_addr = 0x3f6;
5498 break;
5499 case 1:
5500 probe_ent->irq = 15;
5501 probe_ent->port[0].cmd_addr = 0x170;
5502 probe_ent->port[0].altstatus_addr =
5503 probe_ent->port[0].ctl_addr = 0x376;
5504 break;
5505 }
5506 probe_ent->port[0].bmdma_addr = pci_resource_start(pdev, 4) + 8 * port_num;
5507 ata_std_ports(&probe_ent->port[0]);
5508 return probe_ent;
5509 }
5510
5511 /**
5512 * ata_pci_init_one - Initialize/register PCI IDE host controller
5513 * @pdev: Controller to be initialized
5514 * @port_info: Information from low-level host driver
5515 * @n_ports: Number of ports attached to host controller
5516 *
5517 * This is a helper function which can be called from a driver's
5518 * xxx_init_one() probe function if the hardware uses traditional
5519 * IDE taskfile registers.
5520 *
5521 * This function calls pci_enable_device(), reserves its register
5522 * regions, sets the dma mask, enables bus master mode, and calls
5523 * ata_device_add()
5524 *
5525 * LOCKING:
5526 * Inherited from PCI layer (may sleep).
5527 *
5528 * RETURNS:
5529 * Zero on success, negative on errno-based value on error.
5530 */
5531
5532 int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
5533 unsigned int n_ports)
5534 {
5535 struct ata_probe_ent *probe_ent = NULL, *probe_ent2 = NULL;
5536 struct ata_port_info *port[2];
5537 u8 tmp8, mask;
5538 unsigned int legacy_mode = 0;
5539 int disable_dev_on_err = 1;
5540 int rc;
5541
5542 DPRINTK("ENTER\n");
5543
5544 port[0] = port_info[0];
5545 if (n_ports > 1)
5546 port[1] = port_info[1];
5547 else
5548 port[1] = port[0];
5549
5550 if ((port[0]->host_flags & ATA_FLAG_NO_LEGACY) == 0
5551 && (pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
5552 /* TODO: What if one channel is in native mode ... */
5553 pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
5554 mask = (1 << 2) | (1 << 0);
5555 if ((tmp8 & mask) != mask)
5556 legacy_mode = (1 << 3);
5557 }
5558
5559 /* FIXME... */
5560 if ((!legacy_mode) && (n_ports > 2)) {
5561 printk(KERN_ERR "ata: BUG: native mode, n_ports > 2\n");
5562 n_ports = 2;
5563 /* For now */
5564 }
5565
5566 /* FIXME: Really for ATA it isn't safe because the device may be
5567 multi-purpose and we want to leave it alone if it was already
5568 enabled. Secondly for shared use as Arjan says we want refcounting
5569
5570 Checking dev->is_enabled is insufficient as this is not set at
5571 boot for the primary video which is BIOS enabled
5572 */
5573
5574 rc = pci_enable_device(pdev);
5575 if (rc)
5576 return rc;
5577
5578 rc = pci_request_regions(pdev, DRV_NAME);
5579 if (rc) {
5580 disable_dev_on_err = 0;
5581 goto err_out;
5582 }
5583
5584 /* FIXME: Should use platform specific mappers for legacy port ranges */
5585 if (legacy_mode) {
5586 if (!request_region(0x1f0, 8, "libata")) {
5587 struct resource *conflict, res;
5588 res.start = 0x1f0;
5589 res.end = 0x1f0 + 8 - 1;
5590 conflict = ____request_resource(&ioport_resource, &res);
5591 if (!strcmp(conflict->name, "libata"))
5592 legacy_mode |= (1 << 0);
5593 else {
5594 disable_dev_on_err = 0;
5595 printk(KERN_WARNING "ata: 0x1f0 IDE port busy\n");
5596 }
5597 } else
5598 legacy_mode |= (1 << 0);
5599
5600 if (!request_region(0x170, 8, "libata")) {
5601 struct resource *conflict, res;
5602 res.start = 0x170;
5603 res.end = 0x170 + 8 - 1;
5604 conflict = ____request_resource(&ioport_resource, &res);
5605 if (!strcmp(conflict->name, "libata"))
5606 legacy_mode |= (1 << 1);
5607 else {
5608 disable_dev_on_err = 0;
5609 printk(KERN_WARNING "ata: 0x170 IDE port busy\n");
5610 }
5611 } else
5612 legacy_mode |= (1 << 1);
5613 }
5614
5615 /* we have legacy mode, but all ports are unavailable */
5616 if (legacy_mode == (1 << 3)) {
5617 rc = -EBUSY;
5618 goto err_out_regions;
5619 }
5620
5621 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
5622 if (rc)
5623 goto err_out_regions;
5624 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
5625 if (rc)
5626 goto err_out_regions;
5627
5628 if (legacy_mode) {
5629 if (legacy_mode & (1 << 0))
5630 probe_ent = ata_pci_init_legacy_port(pdev, port[0], 0);
5631 if (legacy_mode & (1 << 1))
5632 probe_ent2 = ata_pci_init_legacy_port(pdev, port[1], 1);
5633 } else {
5634 if (n_ports == 2)
5635 probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
5636 else
5637 probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY);
5638 }
5639 if (!probe_ent && !probe_ent2) {
5640 rc = -ENOMEM;
5641 goto err_out_regions;
5642 }
5643
5644 pci_set_master(pdev);
5645
5646 /* FIXME: check ata_device_add return */
5647 if (legacy_mode) {
5648 if (legacy_mode & (1 << 0))
5649 ata_device_add(probe_ent);
5650 if (legacy_mode & (1 << 1))
5651 ata_device_add(probe_ent2);
5652 } else
5653 ata_device_add(probe_ent);
5654
5655 kfree(probe_ent);
5656 kfree(probe_ent2);
5657
5658 return 0;
5659
5660 err_out_regions:
5661 if (legacy_mode & (1 << 0))
5662 release_region(0x1f0, 8);
5663 if (legacy_mode & (1 << 1))
5664 release_region(0x170, 8);
5665 pci_release_regions(pdev);
5666 err_out:
5667 if (disable_dev_on_err)
5668 pci_disable_device(pdev);
5669 return rc;
5670 }
5671
5672 /**
5673 * ata_pci_remove_one - PCI layer callback for device removal
5674 * @pdev: PCI device that was removed
5675 *
5676 * PCI layer indicates to libata via this hook that
5677 * hot-unplug or module unload event has occurred.
5678 * Handle this by unregistering all objects associated
5679 * with this PCI device. Free those objects. Then finally
5680 * release PCI resources and disable device.
5681 *
5682 * LOCKING:
5683 * Inherited from PCI layer (may sleep).
5684 */
5685
5686 void ata_pci_remove_one (struct pci_dev *pdev)
5687 {
5688 struct device *dev = pci_dev_to_dev(pdev);
5689 struct ata_host_set *host_set = dev_get_drvdata(dev);
5690
5691 ata_host_set_remove(host_set);
5692 pci_release_regions(pdev);
5693 pci_disable_device(pdev);
5694 dev_set_drvdata(dev, NULL);
5695 }
5696
5697 /* move to PCI subsystem */
5698 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
5699 {
5700 unsigned long tmp = 0;
5701
5702 switch (bits->width) {
5703 case 1: {
5704 u8 tmp8 = 0;
5705 pci_read_config_byte(pdev, bits->reg, &tmp8);
5706 tmp = tmp8;
5707 break;
5708 }
5709 case 2: {
5710 u16 tmp16 = 0;
5711 pci_read_config_word(pdev, bits->reg, &tmp16);
5712 tmp = tmp16;
5713 break;
5714 }
5715 case 4: {
5716 u32 tmp32 = 0;
5717 pci_read_config_dword(pdev, bits->reg, &tmp32);
5718 tmp = tmp32;
5719 break;
5720 }
5721
5722 default:
5723 return -EINVAL;
5724 }
5725
5726 tmp &= bits->mask;
5727
5728 return (tmp == bits->val) ? 1 : 0;
5729 }
5730
5731 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state)
5732 {
5733 pci_save_state(pdev);
5734 pci_disable_device(pdev);
5735 pci_set_power_state(pdev, PCI_D3hot);
5736 return 0;
5737 }
5738
5739 int ata_pci_device_resume(struct pci_dev *pdev)
5740 {
5741 pci_set_power_state(pdev, PCI_D0);
5742 pci_restore_state(pdev);
5743 pci_enable_device(pdev);
5744 pci_set_master(pdev);
5745 return 0;
5746 }
5747 #endif /* CONFIG_PCI */
5748
5749
5750 static int __init ata_init(void)
5751 {
5752 ata_wq = create_workqueue("ata");
5753 if (!ata_wq)
5754 return -ENOMEM;
5755
5756 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
5757 return 0;
5758 }
5759
5760 static void __exit ata_exit(void)
5761 {
5762 destroy_workqueue(ata_wq);
5763 }
5764
5765 module_init(ata_init);
5766 module_exit(ata_exit);
5767
5768 static unsigned long ratelimit_time;
5769 static spinlock_t ata_ratelimit_lock = SPIN_LOCK_UNLOCKED;
5770
5771 int ata_ratelimit(void)
5772 {
5773 int rc;
5774 unsigned long flags;
5775
5776 spin_lock_irqsave(&ata_ratelimit_lock, flags);
5777
5778 if (time_after(jiffies, ratelimit_time)) {
5779 rc = 1;
5780 ratelimit_time = jiffies + (HZ/5);
5781 } else
5782 rc = 0;
5783
5784 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
5785
5786 return rc;
5787 }
5788
5789 /*
5790 * libata is essentially a library of internal helper functions for
5791 * low-level ATA host controller drivers. As such, the API/ABI is
5792 * likely to change as new drivers are added and updated.
5793 * Do not depend on ABI/API stability.
5794 */
5795
5796 EXPORT_SYMBOL_GPL(ata_std_bios_param);
5797 EXPORT_SYMBOL_GPL(ata_std_ports);
5798 EXPORT_SYMBOL_GPL(ata_device_add);
5799 EXPORT_SYMBOL_GPL(ata_host_set_remove);
5800 EXPORT_SYMBOL_GPL(ata_sg_init);
5801 EXPORT_SYMBOL_GPL(ata_sg_init_one);
5802 EXPORT_SYMBOL_GPL(ata_qc_complete);
5803 EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
5804 EXPORT_SYMBOL_GPL(ata_eng_timeout);
5805 EXPORT_SYMBOL_GPL(ata_tf_load);
5806 EXPORT_SYMBOL_GPL(ata_tf_read);
5807 EXPORT_SYMBOL_GPL(ata_noop_dev_select);
5808 EXPORT_SYMBOL_GPL(ata_std_dev_select);
5809 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
5810 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
5811 EXPORT_SYMBOL_GPL(ata_check_status);
5812 EXPORT_SYMBOL_GPL(ata_altstatus);
5813 EXPORT_SYMBOL_GPL(ata_exec_command);
5814 EXPORT_SYMBOL_GPL(ata_port_start);
5815 EXPORT_SYMBOL_GPL(ata_port_stop);
5816 EXPORT_SYMBOL_GPL(ata_host_stop);
5817 EXPORT_SYMBOL_GPL(ata_interrupt);
5818 EXPORT_SYMBOL_GPL(ata_qc_prep);
5819 EXPORT_SYMBOL_GPL(ata_bmdma_setup);
5820 EXPORT_SYMBOL_GPL(ata_bmdma_start);
5821 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
5822 EXPORT_SYMBOL_GPL(ata_bmdma_status);
5823 EXPORT_SYMBOL_GPL(ata_bmdma_stop);
5824 EXPORT_SYMBOL_GPL(ata_port_probe);
5825 EXPORT_SYMBOL_GPL(sata_phy_reset);
5826 EXPORT_SYMBOL_GPL(__sata_phy_reset);
5827 EXPORT_SYMBOL_GPL(ata_bus_reset);
5828 EXPORT_SYMBOL_GPL(ata_std_probeinit);
5829 EXPORT_SYMBOL_GPL(ata_std_softreset);
5830 EXPORT_SYMBOL_GPL(sata_std_hardreset);
5831 EXPORT_SYMBOL_GPL(ata_std_postreset);
5832 EXPORT_SYMBOL_GPL(ata_std_probe_reset);
5833 EXPORT_SYMBOL_GPL(ata_drive_probe_reset);
5834 EXPORT_SYMBOL_GPL(ata_port_disable);
5835 EXPORT_SYMBOL_GPL(ata_ratelimit);
5836 EXPORT_SYMBOL_GPL(ata_busy_sleep);
5837 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
5838 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
5839 EXPORT_SYMBOL_GPL(ata_scsi_error);
5840 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
5841 EXPORT_SYMBOL_GPL(ata_scsi_release);
5842 EXPORT_SYMBOL_GPL(ata_host_intr);
5843 EXPORT_SYMBOL_GPL(ata_dev_classify);
5844 EXPORT_SYMBOL_GPL(ata_dev_id_string);
5845 EXPORT_SYMBOL_GPL(ata_dev_config);
5846 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
5847 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
5848 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
5849
5850 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
5851 EXPORT_SYMBOL_GPL(ata_timing_compute);
5852 EXPORT_SYMBOL_GPL(ata_timing_merge);
5853
5854 #ifdef CONFIG_PCI
5855 EXPORT_SYMBOL_GPL(pci_test_config_bits);
5856 EXPORT_SYMBOL_GPL(ata_pci_host_stop);
5857 EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
5858 EXPORT_SYMBOL_GPL(ata_pci_init_one);
5859 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
5860 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
5861 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
5862 #endif /* CONFIG_PCI */
5863
5864 EXPORT_SYMBOL_GPL(ata_device_suspend);
5865 EXPORT_SYMBOL_GPL(ata_device_resume);
5866 EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
5867 EXPORT_SYMBOL_GPL(ata_scsi_device_resume);
This page took 0.218001 seconds and 6 git commands to generate.