Merge branch 'max-sect' into upstream
[deliverable/linux.git] / drivers / scsi / libata-core.c
1 /*
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
33 */
34
35 #include <linux/config.h>
36 #include <linux/kernel.h>
37 #include <linux/module.h>
38 #include <linux/pci.h>
39 #include <linux/init.h>
40 #include <linux/list.h>
41 #include <linux/mm.h>
42 #include <linux/highmem.h>
43 #include <linux/spinlock.h>
44 #include <linux/blkdev.h>
45 #include <linux/delay.h>
46 #include <linux/timer.h>
47 #include <linux/interrupt.h>
48 #include <linux/completion.h>
49 #include <linux/suspend.h>
50 #include <linux/workqueue.h>
51 #include <linux/jiffies.h>
52 #include <linux/scatterlist.h>
53 #include <scsi/scsi.h>
54 #include "scsi_priv.h"
55 #include <scsi/scsi_cmnd.h>
56 #include <scsi/scsi_host.h>
57 #include <linux/libata.h>
58 #include <asm/io.h>
59 #include <asm/semaphore.h>
60 #include <asm/byteorder.h>
61
62 #include "libata.h"
63
64 static unsigned int ata_dev_init_params(struct ata_device *dev,
65 u16 heads, u16 sectors);
66 static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
67 static void ata_dev_xfermask(struct ata_device *dev);
68
69 static unsigned int ata_unique_id = 1;
70 static struct workqueue_struct *ata_wq;
71
72 int atapi_enabled = 1;
73 module_param(atapi_enabled, int, 0444);
74 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
75
76 int atapi_dmadir = 0;
77 module_param(atapi_dmadir, int, 0444);
78 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
79
80 int libata_fua = 0;
81 module_param_named(fua, libata_fua, int, 0444);
82 MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
83
84 MODULE_AUTHOR("Jeff Garzik");
85 MODULE_DESCRIPTION("Library module for ATA devices");
86 MODULE_LICENSE("GPL");
87 MODULE_VERSION(DRV_VERSION);
88
89
90 /**
91 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
92 * @tf: Taskfile to convert
93 * @fis: Buffer into which data will output
94 * @pmp: Port multiplier port
95 *
96 * Converts a standard ATA taskfile to a Serial ATA
97 * FIS structure (Register - Host to Device).
98 *
99 * LOCKING:
100 * Inherited from caller.
101 */
102
103 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
104 {
105 fis[0] = 0x27; /* Register - Host to Device FIS */
106 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
107 bit 7 indicates Command FIS */
108 fis[2] = tf->command;
109 fis[3] = tf->feature;
110
111 fis[4] = tf->lbal;
112 fis[5] = tf->lbam;
113 fis[6] = tf->lbah;
114 fis[7] = tf->device;
115
116 fis[8] = tf->hob_lbal;
117 fis[9] = tf->hob_lbam;
118 fis[10] = tf->hob_lbah;
119 fis[11] = tf->hob_feature;
120
121 fis[12] = tf->nsect;
122 fis[13] = tf->hob_nsect;
123 fis[14] = 0;
124 fis[15] = tf->ctl;
125
126 fis[16] = 0;
127 fis[17] = 0;
128 fis[18] = 0;
129 fis[19] = 0;
130 }
131
132 /**
133 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
134 * @fis: Buffer from which data will be input
135 * @tf: Taskfile to output
136 *
137 * Converts a serial ATA FIS structure to a standard ATA taskfile.
138 *
139 * LOCKING:
140 * Inherited from caller.
141 */
142
143 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
144 {
145 tf->command = fis[2]; /* status */
146 tf->feature = fis[3]; /* error */
147
148 tf->lbal = fis[4];
149 tf->lbam = fis[5];
150 tf->lbah = fis[6];
151 tf->device = fis[7];
152
153 tf->hob_lbal = fis[8];
154 tf->hob_lbam = fis[9];
155 tf->hob_lbah = fis[10];
156
157 tf->nsect = fis[12];
158 tf->hob_nsect = fis[13];
159 }
160
161 static const u8 ata_rw_cmds[] = {
162 /* pio multi */
163 ATA_CMD_READ_MULTI,
164 ATA_CMD_WRITE_MULTI,
165 ATA_CMD_READ_MULTI_EXT,
166 ATA_CMD_WRITE_MULTI_EXT,
167 0,
168 0,
169 0,
170 ATA_CMD_WRITE_MULTI_FUA_EXT,
171 /* pio */
172 ATA_CMD_PIO_READ,
173 ATA_CMD_PIO_WRITE,
174 ATA_CMD_PIO_READ_EXT,
175 ATA_CMD_PIO_WRITE_EXT,
176 0,
177 0,
178 0,
179 0,
180 /* dma */
181 ATA_CMD_READ,
182 ATA_CMD_WRITE,
183 ATA_CMD_READ_EXT,
184 ATA_CMD_WRITE_EXT,
185 0,
186 0,
187 0,
188 ATA_CMD_WRITE_FUA_EXT
189 };
190
191 /**
192 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
193 * @qc: command to examine and configure
194 *
195 * Examine the device configuration and tf->flags to calculate
196 * the proper read/write commands and protocol to use.
197 *
198 * LOCKING:
199 * caller.
200 */
201 int ata_rwcmd_protocol(struct ata_queued_cmd *qc)
202 {
203 struct ata_taskfile *tf = &qc->tf;
204 struct ata_device *dev = qc->dev;
205 u8 cmd;
206
207 int index, fua, lba48, write;
208
209 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
210 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
211 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
212
213 if (dev->flags & ATA_DFLAG_PIO) {
214 tf->protocol = ATA_PROT_PIO;
215 index = dev->multi_count ? 0 : 8;
216 } else if (lba48 && (qc->ap->flags & ATA_FLAG_PIO_LBA48)) {
217 /* Unable to use DMA due to host limitation */
218 tf->protocol = ATA_PROT_PIO;
219 index = dev->multi_count ? 0 : 8;
220 } else {
221 tf->protocol = ATA_PROT_DMA;
222 index = 16;
223 }
224
225 cmd = ata_rw_cmds[index + fua + lba48 + write];
226 if (cmd) {
227 tf->command = cmd;
228 return 0;
229 }
230 return -1;
231 }
232
233 /**
234 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
235 * @pio_mask: pio_mask
236 * @mwdma_mask: mwdma_mask
237 * @udma_mask: udma_mask
238 *
239 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
240 * unsigned int xfer_mask.
241 *
242 * LOCKING:
243 * None.
244 *
245 * RETURNS:
246 * Packed xfer_mask.
247 */
248 static unsigned int ata_pack_xfermask(unsigned int pio_mask,
249 unsigned int mwdma_mask,
250 unsigned int udma_mask)
251 {
252 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
253 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
254 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
255 }
256
257 /**
258 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
259 * @xfer_mask: xfer_mask to unpack
260 * @pio_mask: resulting pio_mask
261 * @mwdma_mask: resulting mwdma_mask
262 * @udma_mask: resulting udma_mask
263 *
264 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
265 * Any NULL distination masks will be ignored.
266 */
267 static void ata_unpack_xfermask(unsigned int xfer_mask,
268 unsigned int *pio_mask,
269 unsigned int *mwdma_mask,
270 unsigned int *udma_mask)
271 {
272 if (pio_mask)
273 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
274 if (mwdma_mask)
275 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
276 if (udma_mask)
277 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
278 }
279
280 static const struct ata_xfer_ent {
281 int shift, bits;
282 u8 base;
283 } ata_xfer_tbl[] = {
284 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
285 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
286 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
287 { -1, },
288 };
289
290 /**
291 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
292 * @xfer_mask: xfer_mask of interest
293 *
294 * Return matching XFER_* value for @xfer_mask. Only the highest
295 * bit of @xfer_mask is considered.
296 *
297 * LOCKING:
298 * None.
299 *
300 * RETURNS:
301 * Matching XFER_* value, 0 if no match found.
302 */
303 static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
304 {
305 int highbit = fls(xfer_mask) - 1;
306 const struct ata_xfer_ent *ent;
307
308 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
309 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
310 return ent->base + highbit - ent->shift;
311 return 0;
312 }
313
314 /**
315 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
316 * @xfer_mode: XFER_* of interest
317 *
318 * Return matching xfer_mask for @xfer_mode.
319 *
320 * LOCKING:
321 * None.
322 *
323 * RETURNS:
324 * Matching xfer_mask, 0 if no match found.
325 */
326 static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
327 {
328 const struct ata_xfer_ent *ent;
329
330 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
331 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
332 return 1 << (ent->shift + xfer_mode - ent->base);
333 return 0;
334 }
335
336 /**
337 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
338 * @xfer_mode: XFER_* of interest
339 *
340 * Return matching xfer_shift for @xfer_mode.
341 *
342 * LOCKING:
343 * None.
344 *
345 * RETURNS:
346 * Matching xfer_shift, -1 if no match found.
347 */
348 static int ata_xfer_mode2shift(unsigned int xfer_mode)
349 {
350 const struct ata_xfer_ent *ent;
351
352 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
353 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
354 return ent->shift;
355 return -1;
356 }
357
358 /**
359 * ata_mode_string - convert xfer_mask to string
360 * @xfer_mask: mask of bits supported; only highest bit counts.
361 *
362 * Determine string which represents the highest speed
363 * (highest bit in @modemask).
364 *
365 * LOCKING:
366 * None.
367 *
368 * RETURNS:
369 * Constant C string representing highest speed listed in
370 * @mode_mask, or the constant C string "<n/a>".
371 */
372 static const char *ata_mode_string(unsigned int xfer_mask)
373 {
374 static const char * const xfer_mode_str[] = {
375 "PIO0",
376 "PIO1",
377 "PIO2",
378 "PIO3",
379 "PIO4",
380 "MWDMA0",
381 "MWDMA1",
382 "MWDMA2",
383 "UDMA/16",
384 "UDMA/25",
385 "UDMA/33",
386 "UDMA/44",
387 "UDMA/66",
388 "UDMA/100",
389 "UDMA/133",
390 "UDMA7",
391 };
392 int highbit;
393
394 highbit = fls(xfer_mask) - 1;
395 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
396 return xfer_mode_str[highbit];
397 return "<n/a>";
398 }
399
400 static const char *sata_spd_string(unsigned int spd)
401 {
402 static const char * const spd_str[] = {
403 "1.5 Gbps",
404 "3.0 Gbps",
405 };
406
407 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
408 return "<unknown>";
409 return spd_str[spd - 1];
410 }
411
412 void ata_dev_disable(struct ata_device *dev)
413 {
414 if (ata_dev_enabled(dev)) {
415 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
416 dev->class++;
417 }
418 }
419
420 /**
421 * ata_pio_devchk - PATA device presence detection
422 * @ap: ATA channel to examine
423 * @device: Device to examine (starting at zero)
424 *
425 * This technique was originally described in
426 * Hale Landis's ATADRVR (www.ata-atapi.com), and
427 * later found its way into the ATA/ATAPI spec.
428 *
429 * Write a pattern to the ATA shadow registers,
430 * and if a device is present, it will respond by
431 * correctly storing and echoing back the
432 * ATA shadow register contents.
433 *
434 * LOCKING:
435 * caller.
436 */
437
438 static unsigned int ata_pio_devchk(struct ata_port *ap,
439 unsigned int device)
440 {
441 struct ata_ioports *ioaddr = &ap->ioaddr;
442 u8 nsect, lbal;
443
444 ap->ops->dev_select(ap, device);
445
446 outb(0x55, ioaddr->nsect_addr);
447 outb(0xaa, ioaddr->lbal_addr);
448
449 outb(0xaa, ioaddr->nsect_addr);
450 outb(0x55, ioaddr->lbal_addr);
451
452 outb(0x55, ioaddr->nsect_addr);
453 outb(0xaa, ioaddr->lbal_addr);
454
455 nsect = inb(ioaddr->nsect_addr);
456 lbal = inb(ioaddr->lbal_addr);
457
458 if ((nsect == 0x55) && (lbal == 0xaa))
459 return 1; /* we found a device */
460
461 return 0; /* nothing found */
462 }
463
464 /**
465 * ata_mmio_devchk - PATA device presence detection
466 * @ap: ATA channel to examine
467 * @device: Device to examine (starting at zero)
468 *
469 * This technique was originally described in
470 * Hale Landis's ATADRVR (www.ata-atapi.com), and
471 * later found its way into the ATA/ATAPI spec.
472 *
473 * Write a pattern to the ATA shadow registers,
474 * and if a device is present, it will respond by
475 * correctly storing and echoing back the
476 * ATA shadow register contents.
477 *
478 * LOCKING:
479 * caller.
480 */
481
482 static unsigned int ata_mmio_devchk(struct ata_port *ap,
483 unsigned int device)
484 {
485 struct ata_ioports *ioaddr = &ap->ioaddr;
486 u8 nsect, lbal;
487
488 ap->ops->dev_select(ap, device);
489
490 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
491 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
492
493 writeb(0xaa, (void __iomem *) ioaddr->nsect_addr);
494 writeb(0x55, (void __iomem *) ioaddr->lbal_addr);
495
496 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
497 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
498
499 nsect = readb((void __iomem *) ioaddr->nsect_addr);
500 lbal = readb((void __iomem *) ioaddr->lbal_addr);
501
502 if ((nsect == 0x55) && (lbal == 0xaa))
503 return 1; /* we found a device */
504
505 return 0; /* nothing found */
506 }
507
508 /**
509 * ata_devchk - PATA device presence detection
510 * @ap: ATA channel to examine
511 * @device: Device to examine (starting at zero)
512 *
513 * Dispatch ATA device presence detection, depending
514 * on whether we are using PIO or MMIO to talk to the
515 * ATA shadow registers.
516 *
517 * LOCKING:
518 * caller.
519 */
520
521 static unsigned int ata_devchk(struct ata_port *ap,
522 unsigned int device)
523 {
524 if (ap->flags & ATA_FLAG_MMIO)
525 return ata_mmio_devchk(ap, device);
526 return ata_pio_devchk(ap, device);
527 }
528
529 /**
530 * ata_dev_classify - determine device type based on ATA-spec signature
531 * @tf: ATA taskfile register set for device to be identified
532 *
533 * Determine from taskfile register contents whether a device is
534 * ATA or ATAPI, as per "Signature and persistence" section
535 * of ATA/PI spec (volume 1, sect 5.14).
536 *
537 * LOCKING:
538 * None.
539 *
540 * RETURNS:
541 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
542 * the event of failure.
543 */
544
545 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
546 {
547 /* Apple's open source Darwin code hints that some devices only
548 * put a proper signature into the LBA mid/high registers,
549 * So, we only check those. It's sufficient for uniqueness.
550 */
551
552 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
553 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
554 DPRINTK("found ATA device by sig\n");
555 return ATA_DEV_ATA;
556 }
557
558 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
559 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
560 DPRINTK("found ATAPI device by sig\n");
561 return ATA_DEV_ATAPI;
562 }
563
564 DPRINTK("unknown device\n");
565 return ATA_DEV_UNKNOWN;
566 }
567
568 /**
569 * ata_dev_try_classify - Parse returned ATA device signature
570 * @ap: ATA channel to examine
571 * @device: Device to examine (starting at zero)
572 * @r_err: Value of error register on completion
573 *
574 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
575 * an ATA/ATAPI-defined set of values is placed in the ATA
576 * shadow registers, indicating the results of device detection
577 * and diagnostics.
578 *
579 * Select the ATA device, and read the values from the ATA shadow
580 * registers. Then parse according to the Error register value,
581 * and the spec-defined values examined by ata_dev_classify().
582 *
583 * LOCKING:
584 * caller.
585 *
586 * RETURNS:
587 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
588 */
589
590 static unsigned int
591 ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
592 {
593 struct ata_taskfile tf;
594 unsigned int class;
595 u8 err;
596
597 ap->ops->dev_select(ap, device);
598
599 memset(&tf, 0, sizeof(tf));
600
601 ap->ops->tf_read(ap, &tf);
602 err = tf.feature;
603 if (r_err)
604 *r_err = err;
605
606 /* see if device passed diags */
607 if (err == 1)
608 /* do nothing */ ;
609 else if ((device == 0) && (err == 0x81))
610 /* do nothing */ ;
611 else
612 return ATA_DEV_NONE;
613
614 /* determine if device is ATA or ATAPI */
615 class = ata_dev_classify(&tf);
616
617 if (class == ATA_DEV_UNKNOWN)
618 return ATA_DEV_NONE;
619 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
620 return ATA_DEV_NONE;
621 return class;
622 }
623
624 /**
625 * ata_id_string - Convert IDENTIFY DEVICE page into string
626 * @id: IDENTIFY DEVICE results we will examine
627 * @s: string into which data is output
628 * @ofs: offset into identify device page
629 * @len: length of string to return. must be an even number.
630 *
631 * The strings in the IDENTIFY DEVICE page are broken up into
632 * 16-bit chunks. Run through the string, and output each
633 * 8-bit chunk linearly, regardless of platform.
634 *
635 * LOCKING:
636 * caller.
637 */
638
639 void ata_id_string(const u16 *id, unsigned char *s,
640 unsigned int ofs, unsigned int len)
641 {
642 unsigned int c;
643
644 while (len > 0) {
645 c = id[ofs] >> 8;
646 *s = c;
647 s++;
648
649 c = id[ofs] & 0xff;
650 *s = c;
651 s++;
652
653 ofs++;
654 len -= 2;
655 }
656 }
657
658 /**
659 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
660 * @id: IDENTIFY DEVICE results we will examine
661 * @s: string into which data is output
662 * @ofs: offset into identify device page
663 * @len: length of string to return. must be an odd number.
664 *
665 * This function is identical to ata_id_string except that it
666 * trims trailing spaces and terminates the resulting string with
667 * null. @len must be actual maximum length (even number) + 1.
668 *
669 * LOCKING:
670 * caller.
671 */
672 void ata_id_c_string(const u16 *id, unsigned char *s,
673 unsigned int ofs, unsigned int len)
674 {
675 unsigned char *p;
676
677 WARN_ON(!(len & 1));
678
679 ata_id_string(id, s, ofs, len - 1);
680
681 p = s + strnlen(s, len - 1);
682 while (p > s && p[-1] == ' ')
683 p--;
684 *p = '\0';
685 }
686
687 static u64 ata_id_n_sectors(const u16 *id)
688 {
689 if (ata_id_has_lba(id)) {
690 if (ata_id_has_lba48(id))
691 return ata_id_u64(id, 100);
692 else
693 return ata_id_u32(id, 60);
694 } else {
695 if (ata_id_current_chs_valid(id))
696 return ata_id_u32(id, 57);
697 else
698 return id[1] * id[3] * id[6];
699 }
700 }
701
702 /**
703 * ata_noop_dev_select - Select device 0/1 on ATA bus
704 * @ap: ATA channel to manipulate
705 * @device: ATA device (numbered from zero) to select
706 *
707 * This function performs no actual function.
708 *
709 * May be used as the dev_select() entry in ata_port_operations.
710 *
711 * LOCKING:
712 * caller.
713 */
714 void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
715 {
716 }
717
718
719 /**
720 * ata_std_dev_select - Select device 0/1 on ATA bus
721 * @ap: ATA channel to manipulate
722 * @device: ATA device (numbered from zero) to select
723 *
724 * Use the method defined in the ATA specification to
725 * make either device 0, or device 1, active on the
726 * ATA channel. Works with both PIO and MMIO.
727 *
728 * May be used as the dev_select() entry in ata_port_operations.
729 *
730 * LOCKING:
731 * caller.
732 */
733
734 void ata_std_dev_select (struct ata_port *ap, unsigned int device)
735 {
736 u8 tmp;
737
738 if (device == 0)
739 tmp = ATA_DEVICE_OBS;
740 else
741 tmp = ATA_DEVICE_OBS | ATA_DEV1;
742
743 if (ap->flags & ATA_FLAG_MMIO) {
744 writeb(tmp, (void __iomem *) ap->ioaddr.device_addr);
745 } else {
746 outb(tmp, ap->ioaddr.device_addr);
747 }
748 ata_pause(ap); /* needed; also flushes, for mmio */
749 }
750
751 /**
752 * ata_dev_select - Select device 0/1 on ATA bus
753 * @ap: ATA channel to manipulate
754 * @device: ATA device (numbered from zero) to select
755 * @wait: non-zero to wait for Status register BSY bit to clear
756 * @can_sleep: non-zero if context allows sleeping
757 *
758 * Use the method defined in the ATA specification to
759 * make either device 0, or device 1, active on the
760 * ATA channel.
761 *
762 * This is a high-level version of ata_std_dev_select(),
763 * which additionally provides the services of inserting
764 * the proper pauses and status polling, where needed.
765 *
766 * LOCKING:
767 * caller.
768 */
769
770 void ata_dev_select(struct ata_port *ap, unsigned int device,
771 unsigned int wait, unsigned int can_sleep)
772 {
773 VPRINTK("ENTER, ata%u: device %u, wait %u\n",
774 ap->id, device, wait);
775
776 if (wait)
777 ata_wait_idle(ap);
778
779 ap->ops->dev_select(ap, device);
780
781 if (wait) {
782 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
783 msleep(150);
784 ata_wait_idle(ap);
785 }
786 }
787
788 /**
789 * ata_dump_id - IDENTIFY DEVICE info debugging output
790 * @id: IDENTIFY DEVICE page to dump
791 *
792 * Dump selected 16-bit words from the given IDENTIFY DEVICE
793 * page.
794 *
795 * LOCKING:
796 * caller.
797 */
798
799 static inline void ata_dump_id(const u16 *id)
800 {
801 DPRINTK("49==0x%04x "
802 "53==0x%04x "
803 "63==0x%04x "
804 "64==0x%04x "
805 "75==0x%04x \n",
806 id[49],
807 id[53],
808 id[63],
809 id[64],
810 id[75]);
811 DPRINTK("80==0x%04x "
812 "81==0x%04x "
813 "82==0x%04x "
814 "83==0x%04x "
815 "84==0x%04x \n",
816 id[80],
817 id[81],
818 id[82],
819 id[83],
820 id[84]);
821 DPRINTK("88==0x%04x "
822 "93==0x%04x\n",
823 id[88],
824 id[93]);
825 }
826
827 /**
828 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
829 * @id: IDENTIFY data to compute xfer mask from
830 *
831 * Compute the xfermask for this device. This is not as trivial
832 * as it seems if we must consider early devices correctly.
833 *
834 * FIXME: pre IDE drive timing (do we care ?).
835 *
836 * LOCKING:
837 * None.
838 *
839 * RETURNS:
840 * Computed xfermask
841 */
842 static unsigned int ata_id_xfermask(const u16 *id)
843 {
844 unsigned int pio_mask, mwdma_mask, udma_mask;
845
846 /* Usual case. Word 53 indicates word 64 is valid */
847 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
848 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
849 pio_mask <<= 3;
850 pio_mask |= 0x7;
851 } else {
852 /* If word 64 isn't valid then Word 51 high byte holds
853 * the PIO timing number for the maximum. Turn it into
854 * a mask.
855 */
856 pio_mask = (2 << (id[ATA_ID_OLD_PIO_MODES] & 0xFF)) - 1 ;
857
858 /* But wait.. there's more. Design your standards by
859 * committee and you too can get a free iordy field to
860 * process. However its the speeds not the modes that
861 * are supported... Note drivers using the timing API
862 * will get this right anyway
863 */
864 }
865
866 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
867
868 udma_mask = 0;
869 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
870 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
871
872 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
873 }
874
875 /**
876 * ata_port_queue_task - Queue port_task
877 * @ap: The ata_port to queue port_task for
878 *
879 * Schedule @fn(@data) for execution after @delay jiffies using
880 * port_task. There is one port_task per port and it's the
881 * user(low level driver)'s responsibility to make sure that only
882 * one task is active at any given time.
883 *
884 * libata core layer takes care of synchronization between
885 * port_task and EH. ata_port_queue_task() may be ignored for EH
886 * synchronization.
887 *
888 * LOCKING:
889 * Inherited from caller.
890 */
891 void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data,
892 unsigned long delay)
893 {
894 int rc;
895
896 if (ap->flags & ATA_FLAG_FLUSH_PORT_TASK)
897 return;
898
899 PREPARE_WORK(&ap->port_task, fn, data);
900
901 if (!delay)
902 rc = queue_work(ata_wq, &ap->port_task);
903 else
904 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
905
906 /* rc == 0 means that another user is using port task */
907 WARN_ON(rc == 0);
908 }
909
910 /**
911 * ata_port_flush_task - Flush port_task
912 * @ap: The ata_port to flush port_task for
913 *
914 * After this function completes, port_task is guranteed not to
915 * be running or scheduled.
916 *
917 * LOCKING:
918 * Kernel thread context (may sleep)
919 */
920 void ata_port_flush_task(struct ata_port *ap)
921 {
922 unsigned long flags;
923
924 DPRINTK("ENTER\n");
925
926 spin_lock_irqsave(&ap->host_set->lock, flags);
927 ap->flags |= ATA_FLAG_FLUSH_PORT_TASK;
928 spin_unlock_irqrestore(&ap->host_set->lock, flags);
929
930 DPRINTK("flush #1\n");
931 flush_workqueue(ata_wq);
932
933 /*
934 * At this point, if a task is running, it's guaranteed to see
935 * the FLUSH flag; thus, it will never queue pio tasks again.
936 * Cancel and flush.
937 */
938 if (!cancel_delayed_work(&ap->port_task)) {
939 DPRINTK("flush #2\n");
940 flush_workqueue(ata_wq);
941 }
942
943 spin_lock_irqsave(&ap->host_set->lock, flags);
944 ap->flags &= ~ATA_FLAG_FLUSH_PORT_TASK;
945 spin_unlock_irqrestore(&ap->host_set->lock, flags);
946
947 DPRINTK("EXIT\n");
948 }
949
950 void ata_qc_complete_internal(struct ata_queued_cmd *qc)
951 {
952 struct completion *waiting = qc->private_data;
953
954 complete(waiting);
955 }
956
957 /**
958 * ata_exec_internal - execute libata internal command
959 * @dev: Device to which the command is sent
960 * @tf: Taskfile registers for the command and the result
961 * @cdb: CDB for packet command
962 * @dma_dir: Data tranfer direction of the command
963 * @buf: Data buffer of the command
964 * @buflen: Length of data buffer
965 *
966 * Executes libata internal command with timeout. @tf contains
967 * command on entry and result on return. Timeout and error
968 * conditions are reported via return value. No recovery action
969 * is taken after a command times out. It's caller's duty to
970 * clean up after timeout.
971 *
972 * LOCKING:
973 * None. Should be called with kernel context, might sleep.
974 */
975
976 unsigned ata_exec_internal(struct ata_device *dev,
977 struct ata_taskfile *tf, const u8 *cdb,
978 int dma_dir, void *buf, unsigned int buflen)
979 {
980 struct ata_port *ap = dev->ap;
981 u8 command = tf->command;
982 struct ata_queued_cmd *qc;
983 unsigned int tag, preempted_tag;
984 u32 preempted_sactive, preempted_qc_active;
985 DECLARE_COMPLETION(wait);
986 unsigned long flags;
987 unsigned int err_mask;
988 int rc;
989
990 spin_lock_irqsave(&ap->host_set->lock, flags);
991
992 /* no internal command while frozen */
993 if (ap->flags & ATA_FLAG_FROZEN) {
994 spin_unlock_irqrestore(&ap->host_set->lock, flags);
995 return AC_ERR_SYSTEM;
996 }
997
998 /* initialize internal qc */
999
1000 /* XXX: Tag 0 is used for drivers with legacy EH as some
1001 * drivers choke if any other tag is given. This breaks
1002 * ata_tag_internal() test for those drivers. Don't use new
1003 * EH stuff without converting to it.
1004 */
1005 if (ap->ops->error_handler)
1006 tag = ATA_TAG_INTERNAL;
1007 else
1008 tag = 0;
1009
1010 if (test_and_set_bit(tag, &ap->qc_allocated))
1011 BUG();
1012 qc = __ata_qc_from_tag(ap, tag);
1013
1014 qc->tag = tag;
1015 qc->scsicmd = NULL;
1016 qc->ap = ap;
1017 qc->dev = dev;
1018 ata_qc_reinit(qc);
1019
1020 preempted_tag = ap->active_tag;
1021 preempted_sactive = ap->sactive;
1022 preempted_qc_active = ap->qc_active;
1023 ap->active_tag = ATA_TAG_POISON;
1024 ap->sactive = 0;
1025 ap->qc_active = 0;
1026
1027 /* prepare & issue qc */
1028 qc->tf = *tf;
1029 if (cdb)
1030 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1031 qc->flags |= ATA_QCFLAG_RESULT_TF;
1032 qc->dma_dir = dma_dir;
1033 if (dma_dir != DMA_NONE) {
1034 ata_sg_init_one(qc, buf, buflen);
1035 qc->nsect = buflen / ATA_SECT_SIZE;
1036 }
1037
1038 qc->private_data = &wait;
1039 qc->complete_fn = ata_qc_complete_internal;
1040
1041 ata_qc_issue(qc);
1042
1043 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1044
1045 rc = wait_for_completion_timeout(&wait, ATA_TMOUT_INTERNAL);
1046
1047 ata_port_flush_task(ap);
1048
1049 if (!rc) {
1050 spin_lock_irqsave(&ap->host_set->lock, flags);
1051
1052 /* We're racing with irq here. If we lose, the
1053 * following test prevents us from completing the qc
1054 * twice. If we win, the port is frozen and will be
1055 * cleaned up by ->post_internal_cmd().
1056 */
1057 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1058 qc->err_mask |= AC_ERR_TIMEOUT;
1059
1060 if (ap->ops->error_handler)
1061 ata_port_freeze(ap);
1062 else
1063 ata_qc_complete(qc);
1064
1065 ata_dev_printk(dev, KERN_WARNING,
1066 "qc timeout (cmd 0x%x)\n", command);
1067 }
1068
1069 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1070 }
1071
1072 /* do post_internal_cmd */
1073 if (ap->ops->post_internal_cmd)
1074 ap->ops->post_internal_cmd(qc);
1075
1076 if (qc->flags & ATA_QCFLAG_FAILED && !qc->err_mask) {
1077 ata_dev_printk(dev, KERN_WARNING, "zero err_mask for failed "
1078 "internal command, assuming AC_ERR_OTHER\n");
1079 qc->err_mask |= AC_ERR_OTHER;
1080 }
1081
1082 /* finish up */
1083 spin_lock_irqsave(&ap->host_set->lock, flags);
1084
1085 *tf = qc->result_tf;
1086 err_mask = qc->err_mask;
1087
1088 ata_qc_free(qc);
1089 ap->active_tag = preempted_tag;
1090 ap->sactive = preempted_sactive;
1091 ap->qc_active = preempted_qc_active;
1092
1093 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1094 * Until those drivers are fixed, we detect the condition
1095 * here, fail the command with AC_ERR_SYSTEM and reenable the
1096 * port.
1097 *
1098 * Note that this doesn't change any behavior as internal
1099 * command failure results in disabling the device in the
1100 * higher layer for LLDDs without new reset/EH callbacks.
1101 *
1102 * Kill the following code as soon as those drivers are fixed.
1103 */
1104 if (ap->flags & ATA_FLAG_DISABLED) {
1105 err_mask |= AC_ERR_SYSTEM;
1106 ata_port_probe(ap);
1107 }
1108
1109 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1110
1111 return err_mask;
1112 }
1113
1114 /**
1115 * ata_pio_need_iordy - check if iordy needed
1116 * @adev: ATA device
1117 *
1118 * Check if the current speed of the device requires IORDY. Used
1119 * by various controllers for chip configuration.
1120 */
1121
1122 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1123 {
1124 int pio;
1125 int speed = adev->pio_mode - XFER_PIO_0;
1126
1127 if (speed < 2)
1128 return 0;
1129 if (speed > 2)
1130 return 1;
1131
1132 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1133
1134 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1135 pio = adev->id[ATA_ID_EIDE_PIO];
1136 /* Is the speed faster than the drive allows non IORDY ? */
1137 if (pio) {
1138 /* This is cycle times not frequency - watch the logic! */
1139 if (pio > 240) /* PIO2 is 240nS per cycle */
1140 return 1;
1141 return 0;
1142 }
1143 }
1144 return 0;
1145 }
1146
1147 /**
1148 * ata_dev_read_id - Read ID data from the specified device
1149 * @dev: target device
1150 * @p_class: pointer to class of the target device (may be changed)
1151 * @post_reset: is this read ID post-reset?
1152 * @id: buffer to read IDENTIFY data into
1153 *
1154 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1155 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1156 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1157 * for pre-ATA4 drives.
1158 *
1159 * LOCKING:
1160 * Kernel thread context (may sleep)
1161 *
1162 * RETURNS:
1163 * 0 on success, -errno otherwise.
1164 */
1165 static int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1166 int post_reset, u16 *id)
1167 {
1168 struct ata_port *ap = dev->ap;
1169 unsigned int class = *p_class;
1170 struct ata_taskfile tf;
1171 unsigned int err_mask = 0;
1172 const char *reason;
1173 int rc;
1174
1175 DPRINTK("ENTER, host %u, dev %u\n", ap->id, dev->devno);
1176
1177 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1178
1179 retry:
1180 ata_tf_init(dev, &tf);
1181
1182 switch (class) {
1183 case ATA_DEV_ATA:
1184 tf.command = ATA_CMD_ID_ATA;
1185 break;
1186 case ATA_DEV_ATAPI:
1187 tf.command = ATA_CMD_ID_ATAPI;
1188 break;
1189 default:
1190 rc = -ENODEV;
1191 reason = "unsupported class";
1192 goto err_out;
1193 }
1194
1195 tf.protocol = ATA_PROT_PIO;
1196
1197 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
1198 id, sizeof(id[0]) * ATA_ID_WORDS);
1199 if (err_mask) {
1200 rc = -EIO;
1201 reason = "I/O error";
1202 goto err_out;
1203 }
1204
1205 swap_buf_le16(id, ATA_ID_WORDS);
1206
1207 /* sanity check */
1208 if ((class == ATA_DEV_ATA) != (ata_id_is_ata(id) | ata_id_is_cfa(id))) {
1209 rc = -EINVAL;
1210 reason = "device reports illegal type";
1211 goto err_out;
1212 }
1213
1214 if (post_reset && class == ATA_DEV_ATA) {
1215 /*
1216 * The exact sequence expected by certain pre-ATA4 drives is:
1217 * SRST RESET
1218 * IDENTIFY
1219 * INITIALIZE DEVICE PARAMETERS
1220 * anything else..
1221 * Some drives were very specific about that exact sequence.
1222 */
1223 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1224 err_mask = ata_dev_init_params(dev, id[3], id[6]);
1225 if (err_mask) {
1226 rc = -EIO;
1227 reason = "INIT_DEV_PARAMS failed";
1228 goto err_out;
1229 }
1230
1231 /* current CHS translation info (id[53-58]) might be
1232 * changed. reread the identify device info.
1233 */
1234 post_reset = 0;
1235 goto retry;
1236 }
1237 }
1238
1239 *p_class = class;
1240
1241 return 0;
1242
1243 err_out:
1244 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
1245 "(%s, err_mask=0x%x)\n", reason, err_mask);
1246 return rc;
1247 }
1248
1249 static inline u8 ata_dev_knobble(struct ata_device *dev)
1250 {
1251 return ((dev->ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
1252 }
1253
1254 static void ata_dev_config_ncq(struct ata_device *dev,
1255 char *desc, size_t desc_sz)
1256 {
1257 struct ata_port *ap = dev->ap;
1258 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
1259
1260 if (!ata_id_has_ncq(dev->id)) {
1261 desc[0] = '\0';
1262 return;
1263 }
1264
1265 if (ap->flags & ATA_FLAG_NCQ) {
1266 hdepth = min(ap->host->can_queue, ATA_MAX_QUEUE - 1);
1267 dev->flags |= ATA_DFLAG_NCQ;
1268 }
1269
1270 if (hdepth >= ddepth)
1271 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
1272 else
1273 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
1274 }
1275
1276 /**
1277 * ata_dev_configure - Configure the specified ATA/ATAPI device
1278 * @dev: Target device to configure
1279 * @print_info: Enable device info printout
1280 *
1281 * Configure @dev according to @dev->id. Generic and low-level
1282 * driver specific fixups are also applied.
1283 *
1284 * LOCKING:
1285 * Kernel thread context (may sleep)
1286 *
1287 * RETURNS:
1288 * 0 on success, -errno otherwise
1289 */
1290 static int ata_dev_configure(struct ata_device *dev, int print_info)
1291 {
1292 struct ata_port *ap = dev->ap;
1293 const u16 *id = dev->id;
1294 unsigned int xfer_mask;
1295 int i, rc;
1296
1297 if (!ata_dev_enabled(dev)) {
1298 DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n",
1299 ap->id, dev->devno);
1300 return 0;
1301 }
1302
1303 DPRINTK("ENTER, host %u, dev %u\n", ap->id, dev->devno);
1304
1305 /* print device capabilities */
1306 if (print_info)
1307 ata_dev_printk(dev, KERN_DEBUG, "cfg 49:%04x 82:%04x 83:%04x "
1308 "84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n",
1309 id[49], id[82], id[83], id[84],
1310 id[85], id[86], id[87], id[88]);
1311
1312 /* initialize to-be-configured parameters */
1313 dev->flags &= ~ATA_DFLAG_CFG_MASK;
1314 dev->max_sectors = 0;
1315 dev->cdb_len = 0;
1316 dev->n_sectors = 0;
1317 dev->cylinders = 0;
1318 dev->heads = 0;
1319 dev->sectors = 0;
1320
1321 /*
1322 * common ATA, ATAPI feature tests
1323 */
1324
1325 /* find max transfer mode; for printk only */
1326 xfer_mask = ata_id_xfermask(id);
1327
1328 ata_dump_id(id);
1329
1330 /* ATA-specific feature tests */
1331 if (dev->class == ATA_DEV_ATA) {
1332 dev->n_sectors = ata_id_n_sectors(id);
1333
1334 if (ata_id_has_lba(id)) {
1335 const char *lba_desc;
1336 char ncq_desc[20];
1337
1338 lba_desc = "LBA";
1339 dev->flags |= ATA_DFLAG_LBA;
1340 if (ata_id_has_lba48(id)) {
1341 dev->flags |= ATA_DFLAG_LBA48;
1342 lba_desc = "LBA48";
1343 }
1344
1345 /* config NCQ */
1346 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
1347
1348 /* print device info to dmesg */
1349 if (print_info)
1350 ata_dev_printk(dev, KERN_INFO, "ATA-%d, "
1351 "max %s, %Lu sectors: %s %s\n",
1352 ata_id_major_version(id),
1353 ata_mode_string(xfer_mask),
1354 (unsigned long long)dev->n_sectors,
1355 lba_desc, ncq_desc);
1356 } else {
1357 /* CHS */
1358
1359 /* Default translation */
1360 dev->cylinders = id[1];
1361 dev->heads = id[3];
1362 dev->sectors = id[6];
1363
1364 if (ata_id_current_chs_valid(id)) {
1365 /* Current CHS translation is valid. */
1366 dev->cylinders = id[54];
1367 dev->heads = id[55];
1368 dev->sectors = id[56];
1369 }
1370
1371 /* print device info to dmesg */
1372 if (print_info)
1373 ata_dev_printk(dev, KERN_INFO, "ATA-%d, "
1374 "max %s, %Lu sectors: CHS %u/%u/%u\n",
1375 ata_id_major_version(id),
1376 ata_mode_string(xfer_mask),
1377 (unsigned long long)dev->n_sectors,
1378 dev->cylinders, dev->heads, dev->sectors);
1379 }
1380
1381 if (dev->id[59] & 0x100) {
1382 dev->multi_count = dev->id[59] & 0xff;
1383 DPRINTK("ata%u: dev %u multi count %u\n",
1384 ap->id, dev->devno, dev->multi_count);
1385 }
1386
1387 dev->cdb_len = 16;
1388 }
1389
1390 /* ATAPI-specific feature tests */
1391 else if (dev->class == ATA_DEV_ATAPI) {
1392 char *cdb_intr_string = "";
1393
1394 rc = atapi_cdb_len(id);
1395 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1396 ata_dev_printk(dev, KERN_WARNING,
1397 "unsupported CDB len\n");
1398 rc = -EINVAL;
1399 goto err_out_nosup;
1400 }
1401 dev->cdb_len = (unsigned int) rc;
1402
1403 if (ata_id_cdb_intr(dev->id)) {
1404 dev->flags |= ATA_DFLAG_CDB_INTR;
1405 cdb_intr_string = ", CDB intr";
1406 }
1407
1408 /* print device info to dmesg */
1409 if (print_info)
1410 ata_dev_printk(dev, KERN_INFO, "ATAPI, max %s%s\n",
1411 ata_mode_string(xfer_mask),
1412 cdb_intr_string);
1413 }
1414
1415 ap->host->max_cmd_len = 0;
1416 for (i = 0; i < ATA_MAX_DEVICES; i++)
1417 ap->host->max_cmd_len = max_t(unsigned int,
1418 ap->host->max_cmd_len,
1419 ap->device[i].cdb_len);
1420
1421 /* limit bridge transfers to udma5, 200 sectors */
1422 if (ata_dev_knobble(dev)) {
1423 if (print_info)
1424 ata_dev_printk(dev, KERN_INFO,
1425 "applying bridge limits\n");
1426 dev->udma_mask &= ATA_UDMA5;
1427 dev->max_sectors = ATA_MAX_SECTORS;
1428 }
1429
1430 if (ap->ops->dev_config)
1431 ap->ops->dev_config(ap, dev);
1432
1433 DPRINTK("EXIT, drv_stat = 0x%x\n", ata_chk_status(ap));
1434 return 0;
1435
1436 err_out_nosup:
1437 DPRINTK("EXIT, err\n");
1438 return rc;
1439 }
1440
1441 /**
1442 * ata_bus_probe - Reset and probe ATA bus
1443 * @ap: Bus to probe
1444 *
1445 * Master ATA bus probing function. Initiates a hardware-dependent
1446 * bus reset, then attempts to identify any devices found on
1447 * the bus.
1448 *
1449 * LOCKING:
1450 * PCI/etc. bus probe sem.
1451 *
1452 * RETURNS:
1453 * Zero on success, negative errno otherwise.
1454 */
1455
1456 static int ata_bus_probe(struct ata_port *ap)
1457 {
1458 unsigned int classes[ATA_MAX_DEVICES];
1459 int tries[ATA_MAX_DEVICES];
1460 int i, rc, down_xfermask;
1461 struct ata_device *dev;
1462
1463 ata_port_probe(ap);
1464
1465 for (i = 0; i < ATA_MAX_DEVICES; i++)
1466 tries[i] = ATA_PROBE_MAX_TRIES;
1467
1468 retry:
1469 down_xfermask = 0;
1470
1471 /* reset and determine device classes */
1472 for (i = 0; i < ATA_MAX_DEVICES; i++)
1473 classes[i] = ATA_DEV_UNKNOWN;
1474
1475 if (ap->ops->probe_reset) {
1476 rc = ap->ops->probe_reset(ap, classes);
1477 if (rc) {
1478 ata_port_printk(ap, KERN_ERR,
1479 "reset failed (errno=%d)\n", rc);
1480 return rc;
1481 }
1482 } else {
1483 ap->ops->phy_reset(ap);
1484
1485 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1486 if (!(ap->flags & ATA_FLAG_DISABLED))
1487 classes[i] = ap->device[i].class;
1488 ap->device[i].class = ATA_DEV_UNKNOWN;
1489 }
1490
1491 ata_port_probe(ap);
1492 }
1493
1494 for (i = 0; i < ATA_MAX_DEVICES; i++)
1495 if (classes[i] == ATA_DEV_UNKNOWN)
1496 classes[i] = ATA_DEV_NONE;
1497
1498 /* read IDENTIFY page and configure devices */
1499 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1500 dev = &ap->device[i];
1501
1502 if (tries[i])
1503 dev->class = classes[i];
1504
1505 if (!ata_dev_enabled(dev))
1506 continue;
1507
1508 rc = ata_dev_read_id(dev, &dev->class, 1, dev->id);
1509 if (rc)
1510 goto fail;
1511
1512 rc = ata_dev_configure(dev, 1);
1513 if (rc)
1514 goto fail;
1515 }
1516
1517 /* configure transfer mode */
1518 rc = ata_set_mode(ap, &dev);
1519 if (rc) {
1520 down_xfermask = 1;
1521 goto fail;
1522 }
1523
1524 for (i = 0; i < ATA_MAX_DEVICES; i++)
1525 if (ata_dev_enabled(&ap->device[i]))
1526 return 0;
1527
1528 /* no device present, disable port */
1529 ata_port_disable(ap);
1530 ap->ops->port_disable(ap);
1531 return -ENODEV;
1532
1533 fail:
1534 switch (rc) {
1535 case -EINVAL:
1536 case -ENODEV:
1537 tries[dev->devno] = 0;
1538 break;
1539 case -EIO:
1540 sata_down_spd_limit(ap);
1541 /* fall through */
1542 default:
1543 tries[dev->devno]--;
1544 if (down_xfermask &&
1545 ata_down_xfermask_limit(dev, tries[dev->devno] == 1))
1546 tries[dev->devno] = 0;
1547 }
1548
1549 if (!tries[dev->devno]) {
1550 ata_down_xfermask_limit(dev, 1);
1551 ata_dev_disable(dev);
1552 }
1553
1554 goto retry;
1555 }
1556
1557 /**
1558 * ata_port_probe - Mark port as enabled
1559 * @ap: Port for which we indicate enablement
1560 *
1561 * Modify @ap data structure such that the system
1562 * thinks that the entire port is enabled.
1563 *
1564 * LOCKING: host_set lock, or some other form of
1565 * serialization.
1566 */
1567
1568 void ata_port_probe(struct ata_port *ap)
1569 {
1570 ap->flags &= ~ATA_FLAG_DISABLED;
1571 }
1572
1573 /**
1574 * sata_print_link_status - Print SATA link status
1575 * @ap: SATA port to printk link status about
1576 *
1577 * This function prints link speed and status of a SATA link.
1578 *
1579 * LOCKING:
1580 * None.
1581 */
1582 static void sata_print_link_status(struct ata_port *ap)
1583 {
1584 u32 sstatus, scontrol, tmp;
1585
1586 if (sata_scr_read(ap, SCR_STATUS, &sstatus))
1587 return;
1588 sata_scr_read(ap, SCR_CONTROL, &scontrol);
1589
1590 if (ata_port_online(ap)) {
1591 tmp = (sstatus >> 4) & 0xf;
1592 ata_port_printk(ap, KERN_INFO,
1593 "SATA link up %s (SStatus %X SControl %X)\n",
1594 sata_spd_string(tmp), sstatus, scontrol);
1595 } else {
1596 ata_port_printk(ap, KERN_INFO,
1597 "SATA link down (SStatus %X SControl %X)\n",
1598 sstatus, scontrol);
1599 }
1600 }
1601
1602 /**
1603 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1604 * @ap: SATA port associated with target SATA PHY.
1605 *
1606 * This function issues commands to standard SATA Sxxx
1607 * PHY registers, to wake up the phy (and device), and
1608 * clear any reset condition.
1609 *
1610 * LOCKING:
1611 * PCI/etc. bus probe sem.
1612 *
1613 */
1614 void __sata_phy_reset(struct ata_port *ap)
1615 {
1616 u32 sstatus;
1617 unsigned long timeout = jiffies + (HZ * 5);
1618
1619 if (ap->flags & ATA_FLAG_SATA_RESET) {
1620 /* issue phy wake/reset */
1621 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
1622 /* Couldn't find anything in SATA I/II specs, but
1623 * AHCI-1.1 10.4.2 says at least 1 ms. */
1624 mdelay(1);
1625 }
1626 /* phy wake/clear reset */
1627 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
1628
1629 /* wait for phy to become ready, if necessary */
1630 do {
1631 msleep(200);
1632 sata_scr_read(ap, SCR_STATUS, &sstatus);
1633 if ((sstatus & 0xf) != 1)
1634 break;
1635 } while (time_before(jiffies, timeout));
1636
1637 /* print link status */
1638 sata_print_link_status(ap);
1639
1640 /* TODO: phy layer with polling, timeouts, etc. */
1641 if (!ata_port_offline(ap))
1642 ata_port_probe(ap);
1643 else
1644 ata_port_disable(ap);
1645
1646 if (ap->flags & ATA_FLAG_DISABLED)
1647 return;
1648
1649 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
1650 ata_port_disable(ap);
1651 return;
1652 }
1653
1654 ap->cbl = ATA_CBL_SATA;
1655 }
1656
1657 /**
1658 * sata_phy_reset - Reset SATA bus.
1659 * @ap: SATA port associated with target SATA PHY.
1660 *
1661 * This function resets the SATA bus, and then probes
1662 * the bus for devices.
1663 *
1664 * LOCKING:
1665 * PCI/etc. bus probe sem.
1666 *
1667 */
1668 void sata_phy_reset(struct ata_port *ap)
1669 {
1670 __sata_phy_reset(ap);
1671 if (ap->flags & ATA_FLAG_DISABLED)
1672 return;
1673 ata_bus_reset(ap);
1674 }
1675
1676 /**
1677 * ata_dev_pair - return other device on cable
1678 * @adev: device
1679 *
1680 * Obtain the other device on the same cable, or if none is
1681 * present NULL is returned
1682 */
1683
1684 struct ata_device *ata_dev_pair(struct ata_device *adev)
1685 {
1686 struct ata_port *ap = adev->ap;
1687 struct ata_device *pair = &ap->device[1 - adev->devno];
1688 if (!ata_dev_enabled(pair))
1689 return NULL;
1690 return pair;
1691 }
1692
1693 /**
1694 * ata_port_disable - Disable port.
1695 * @ap: Port to be disabled.
1696 *
1697 * Modify @ap data structure such that the system
1698 * thinks that the entire port is disabled, and should
1699 * never attempt to probe or communicate with devices
1700 * on this port.
1701 *
1702 * LOCKING: host_set lock, or some other form of
1703 * serialization.
1704 */
1705
1706 void ata_port_disable(struct ata_port *ap)
1707 {
1708 ap->device[0].class = ATA_DEV_NONE;
1709 ap->device[1].class = ATA_DEV_NONE;
1710 ap->flags |= ATA_FLAG_DISABLED;
1711 }
1712
1713 /**
1714 * sata_down_spd_limit - adjust SATA spd limit downward
1715 * @ap: Port to adjust SATA spd limit for
1716 *
1717 * Adjust SATA spd limit of @ap downward. Note that this
1718 * function only adjusts the limit. The change must be applied
1719 * using sata_set_spd().
1720 *
1721 * LOCKING:
1722 * Inherited from caller.
1723 *
1724 * RETURNS:
1725 * 0 on success, negative errno on failure
1726 */
1727 int sata_down_spd_limit(struct ata_port *ap)
1728 {
1729 u32 sstatus, spd, mask;
1730 int rc, highbit;
1731
1732 rc = sata_scr_read(ap, SCR_STATUS, &sstatus);
1733 if (rc)
1734 return rc;
1735
1736 mask = ap->sata_spd_limit;
1737 if (mask <= 1)
1738 return -EINVAL;
1739 highbit = fls(mask) - 1;
1740 mask &= ~(1 << highbit);
1741
1742 spd = (sstatus >> 4) & 0xf;
1743 if (spd <= 1)
1744 return -EINVAL;
1745 spd--;
1746 mask &= (1 << spd) - 1;
1747 if (!mask)
1748 return -EINVAL;
1749
1750 ap->sata_spd_limit = mask;
1751
1752 ata_port_printk(ap, KERN_WARNING, "limiting SATA link speed to %s\n",
1753 sata_spd_string(fls(mask)));
1754
1755 return 0;
1756 }
1757
1758 static int __sata_set_spd_needed(struct ata_port *ap, u32 *scontrol)
1759 {
1760 u32 spd, limit;
1761
1762 if (ap->sata_spd_limit == UINT_MAX)
1763 limit = 0;
1764 else
1765 limit = fls(ap->sata_spd_limit);
1766
1767 spd = (*scontrol >> 4) & 0xf;
1768 *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
1769
1770 return spd != limit;
1771 }
1772
1773 /**
1774 * sata_set_spd_needed - is SATA spd configuration needed
1775 * @ap: Port in question
1776 *
1777 * Test whether the spd limit in SControl matches
1778 * @ap->sata_spd_limit. This function is used to determine
1779 * whether hardreset is necessary to apply SATA spd
1780 * configuration.
1781 *
1782 * LOCKING:
1783 * Inherited from caller.
1784 *
1785 * RETURNS:
1786 * 1 if SATA spd configuration is needed, 0 otherwise.
1787 */
1788 int sata_set_spd_needed(struct ata_port *ap)
1789 {
1790 u32 scontrol;
1791
1792 if (sata_scr_read(ap, SCR_CONTROL, &scontrol))
1793 return 0;
1794
1795 return __sata_set_spd_needed(ap, &scontrol);
1796 }
1797
1798 /**
1799 * sata_set_spd - set SATA spd according to spd limit
1800 * @ap: Port to set SATA spd for
1801 *
1802 * Set SATA spd of @ap according to sata_spd_limit.
1803 *
1804 * LOCKING:
1805 * Inherited from caller.
1806 *
1807 * RETURNS:
1808 * 0 if spd doesn't need to be changed, 1 if spd has been
1809 * changed. Negative errno if SCR registers are inaccessible.
1810 */
1811 int sata_set_spd(struct ata_port *ap)
1812 {
1813 u32 scontrol;
1814 int rc;
1815
1816 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
1817 return rc;
1818
1819 if (!__sata_set_spd_needed(ap, &scontrol))
1820 return 0;
1821
1822 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
1823 return rc;
1824
1825 return 1;
1826 }
1827
1828 /*
1829 * This mode timing computation functionality is ported over from
1830 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
1831 */
1832 /*
1833 * PIO 0-5, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
1834 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
1835 * for PIO 5, which is a nonstandard extension and UDMA6, which
1836 * is currently supported only by Maxtor drives.
1837 */
1838
1839 static const struct ata_timing ata_timing[] = {
1840
1841 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
1842 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
1843 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
1844 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
1845
1846 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
1847 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
1848 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
1849
1850 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
1851
1852 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
1853 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
1854 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
1855
1856 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
1857 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
1858 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
1859
1860 /* { XFER_PIO_5, 20, 50, 30, 100, 50, 30, 100, 0 }, */
1861 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
1862 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
1863
1864 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
1865 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
1866 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
1867
1868 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
1869
1870 { 0xFF }
1871 };
1872
1873 #define ENOUGH(v,unit) (((v)-1)/(unit)+1)
1874 #define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
1875
1876 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
1877 {
1878 q->setup = EZ(t->setup * 1000, T);
1879 q->act8b = EZ(t->act8b * 1000, T);
1880 q->rec8b = EZ(t->rec8b * 1000, T);
1881 q->cyc8b = EZ(t->cyc8b * 1000, T);
1882 q->active = EZ(t->active * 1000, T);
1883 q->recover = EZ(t->recover * 1000, T);
1884 q->cycle = EZ(t->cycle * 1000, T);
1885 q->udma = EZ(t->udma * 1000, UT);
1886 }
1887
1888 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
1889 struct ata_timing *m, unsigned int what)
1890 {
1891 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
1892 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
1893 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
1894 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
1895 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
1896 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
1897 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
1898 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
1899 }
1900
1901 static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
1902 {
1903 const struct ata_timing *t;
1904
1905 for (t = ata_timing; t->mode != speed; t++)
1906 if (t->mode == 0xFF)
1907 return NULL;
1908 return t;
1909 }
1910
1911 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
1912 struct ata_timing *t, int T, int UT)
1913 {
1914 const struct ata_timing *s;
1915 struct ata_timing p;
1916
1917 /*
1918 * Find the mode.
1919 */
1920
1921 if (!(s = ata_timing_find_mode(speed)))
1922 return -EINVAL;
1923
1924 memcpy(t, s, sizeof(*s));
1925
1926 /*
1927 * If the drive is an EIDE drive, it can tell us it needs extended
1928 * PIO/MW_DMA cycle timing.
1929 */
1930
1931 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
1932 memset(&p, 0, sizeof(p));
1933 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
1934 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
1935 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
1936 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
1937 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
1938 }
1939 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
1940 }
1941
1942 /*
1943 * Convert the timing to bus clock counts.
1944 */
1945
1946 ata_timing_quantize(t, t, T, UT);
1947
1948 /*
1949 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
1950 * S.M.A.R.T * and some other commands. We have to ensure that the
1951 * DMA cycle timing is slower/equal than the fastest PIO timing.
1952 */
1953
1954 if (speed > XFER_PIO_4) {
1955 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
1956 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
1957 }
1958
1959 /*
1960 * Lengthen active & recovery time so that cycle time is correct.
1961 */
1962
1963 if (t->act8b + t->rec8b < t->cyc8b) {
1964 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
1965 t->rec8b = t->cyc8b - t->act8b;
1966 }
1967
1968 if (t->active + t->recover < t->cycle) {
1969 t->active += (t->cycle - (t->active + t->recover)) / 2;
1970 t->recover = t->cycle - t->active;
1971 }
1972
1973 return 0;
1974 }
1975
1976 /**
1977 * ata_down_xfermask_limit - adjust dev xfer masks downward
1978 * @dev: Device to adjust xfer masks
1979 * @force_pio0: Force PIO0
1980 *
1981 * Adjust xfer masks of @dev downward. Note that this function
1982 * does not apply the change. Invoking ata_set_mode() afterwards
1983 * will apply the limit.
1984 *
1985 * LOCKING:
1986 * Inherited from caller.
1987 *
1988 * RETURNS:
1989 * 0 on success, negative errno on failure
1990 */
1991 int ata_down_xfermask_limit(struct ata_device *dev, int force_pio0)
1992 {
1993 unsigned long xfer_mask;
1994 int highbit;
1995
1996 xfer_mask = ata_pack_xfermask(dev->pio_mask, dev->mwdma_mask,
1997 dev->udma_mask);
1998
1999 if (!xfer_mask)
2000 goto fail;
2001 /* don't gear down to MWDMA from UDMA, go directly to PIO */
2002 if (xfer_mask & ATA_MASK_UDMA)
2003 xfer_mask &= ~ATA_MASK_MWDMA;
2004
2005 highbit = fls(xfer_mask) - 1;
2006 xfer_mask &= ~(1 << highbit);
2007 if (force_pio0)
2008 xfer_mask &= 1 << ATA_SHIFT_PIO;
2009 if (!xfer_mask)
2010 goto fail;
2011
2012 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2013 &dev->udma_mask);
2014
2015 ata_dev_printk(dev, KERN_WARNING, "limiting speed to %s\n",
2016 ata_mode_string(xfer_mask));
2017
2018 return 0;
2019
2020 fail:
2021 return -EINVAL;
2022 }
2023
2024 static int ata_dev_set_mode(struct ata_device *dev)
2025 {
2026 unsigned int err_mask;
2027 int rc;
2028
2029 dev->flags &= ~ATA_DFLAG_PIO;
2030 if (dev->xfer_shift == ATA_SHIFT_PIO)
2031 dev->flags |= ATA_DFLAG_PIO;
2032
2033 err_mask = ata_dev_set_xfermode(dev);
2034 if (err_mask) {
2035 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
2036 "(err_mask=0x%x)\n", err_mask);
2037 return -EIO;
2038 }
2039
2040 rc = ata_dev_revalidate(dev, 0);
2041 if (rc)
2042 return rc;
2043
2044 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
2045 dev->xfer_shift, (int)dev->xfer_mode);
2046
2047 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
2048 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
2049 return 0;
2050 }
2051
2052 /**
2053 * ata_set_mode - Program timings and issue SET FEATURES - XFER
2054 * @ap: port on which timings will be programmed
2055 * @r_failed_dev: out paramter for failed device
2056 *
2057 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2058 * ata_set_mode() fails, pointer to the failing device is
2059 * returned in @r_failed_dev.
2060 *
2061 * LOCKING:
2062 * PCI/etc. bus probe sem.
2063 *
2064 * RETURNS:
2065 * 0 on success, negative errno otherwise
2066 */
2067 int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
2068 {
2069 struct ata_device *dev;
2070 int i, rc = 0, used_dma = 0, found = 0;
2071
2072 /* has private set_mode? */
2073 if (ap->ops->set_mode) {
2074 /* FIXME: make ->set_mode handle no device case and
2075 * return error code and failing device on failure.
2076 */
2077 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2078 if (ata_dev_enabled(&ap->device[i])) {
2079 ap->ops->set_mode(ap);
2080 break;
2081 }
2082 }
2083 return 0;
2084 }
2085
2086 /* step 1: calculate xfer_mask */
2087 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2088 unsigned int pio_mask, dma_mask;
2089
2090 dev = &ap->device[i];
2091
2092 if (!ata_dev_enabled(dev))
2093 continue;
2094
2095 ata_dev_xfermask(dev);
2096
2097 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
2098 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2099 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
2100 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
2101
2102 found = 1;
2103 if (dev->dma_mode)
2104 used_dma = 1;
2105 }
2106 if (!found)
2107 goto out;
2108
2109 /* step 2: always set host PIO timings */
2110 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2111 dev = &ap->device[i];
2112 if (!ata_dev_enabled(dev))
2113 continue;
2114
2115 if (!dev->pio_mode) {
2116 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
2117 rc = -EINVAL;
2118 goto out;
2119 }
2120
2121 dev->xfer_mode = dev->pio_mode;
2122 dev->xfer_shift = ATA_SHIFT_PIO;
2123 if (ap->ops->set_piomode)
2124 ap->ops->set_piomode(ap, dev);
2125 }
2126
2127 /* step 3: set host DMA timings */
2128 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2129 dev = &ap->device[i];
2130
2131 if (!ata_dev_enabled(dev) || !dev->dma_mode)
2132 continue;
2133
2134 dev->xfer_mode = dev->dma_mode;
2135 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
2136 if (ap->ops->set_dmamode)
2137 ap->ops->set_dmamode(ap, dev);
2138 }
2139
2140 /* step 4: update devices' xfer mode */
2141 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2142 dev = &ap->device[i];
2143
2144 if (!ata_dev_enabled(dev))
2145 continue;
2146
2147 rc = ata_dev_set_mode(dev);
2148 if (rc)
2149 goto out;
2150 }
2151
2152 /* Record simplex status. If we selected DMA then the other
2153 * host channels are not permitted to do so.
2154 */
2155 if (used_dma && (ap->host_set->flags & ATA_HOST_SIMPLEX))
2156 ap->host_set->simplex_claimed = 1;
2157
2158 /* step5: chip specific finalisation */
2159 if (ap->ops->post_set_mode)
2160 ap->ops->post_set_mode(ap);
2161
2162 out:
2163 if (rc)
2164 *r_failed_dev = dev;
2165 return rc;
2166 }
2167
2168 /**
2169 * ata_tf_to_host - issue ATA taskfile to host controller
2170 * @ap: port to which command is being issued
2171 * @tf: ATA taskfile register set
2172 *
2173 * Issues ATA taskfile register set to ATA host controller,
2174 * with proper synchronization with interrupt handler and
2175 * other threads.
2176 *
2177 * LOCKING:
2178 * spin_lock_irqsave(host_set lock)
2179 */
2180
2181 static inline void ata_tf_to_host(struct ata_port *ap,
2182 const struct ata_taskfile *tf)
2183 {
2184 ap->ops->tf_load(ap, tf);
2185 ap->ops->exec_command(ap, tf);
2186 }
2187
2188 /**
2189 * ata_busy_sleep - sleep until BSY clears, or timeout
2190 * @ap: port containing status register to be polled
2191 * @tmout_pat: impatience timeout
2192 * @tmout: overall timeout
2193 *
2194 * Sleep until ATA Status register bit BSY clears,
2195 * or a timeout occurs.
2196 *
2197 * LOCKING: None.
2198 */
2199
2200 unsigned int ata_busy_sleep (struct ata_port *ap,
2201 unsigned long tmout_pat, unsigned long tmout)
2202 {
2203 unsigned long timer_start, timeout;
2204 u8 status;
2205
2206 status = ata_busy_wait(ap, ATA_BUSY, 300);
2207 timer_start = jiffies;
2208 timeout = timer_start + tmout_pat;
2209 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
2210 msleep(50);
2211 status = ata_busy_wait(ap, ATA_BUSY, 3);
2212 }
2213
2214 if (status & ATA_BUSY)
2215 ata_port_printk(ap, KERN_WARNING,
2216 "port is slow to respond, please be patient\n");
2217
2218 timeout = timer_start + tmout;
2219 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
2220 msleep(50);
2221 status = ata_chk_status(ap);
2222 }
2223
2224 if (status & ATA_BUSY) {
2225 ata_port_printk(ap, KERN_ERR, "port failed to respond "
2226 "(%lu secs)\n", tmout / HZ);
2227 return 1;
2228 }
2229
2230 return 0;
2231 }
2232
2233 static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
2234 {
2235 struct ata_ioports *ioaddr = &ap->ioaddr;
2236 unsigned int dev0 = devmask & (1 << 0);
2237 unsigned int dev1 = devmask & (1 << 1);
2238 unsigned long timeout;
2239
2240 /* if device 0 was found in ata_devchk, wait for its
2241 * BSY bit to clear
2242 */
2243 if (dev0)
2244 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2245
2246 /* if device 1 was found in ata_devchk, wait for
2247 * register access, then wait for BSY to clear
2248 */
2249 timeout = jiffies + ATA_TMOUT_BOOT;
2250 while (dev1) {
2251 u8 nsect, lbal;
2252
2253 ap->ops->dev_select(ap, 1);
2254 if (ap->flags & ATA_FLAG_MMIO) {
2255 nsect = readb((void __iomem *) ioaddr->nsect_addr);
2256 lbal = readb((void __iomem *) ioaddr->lbal_addr);
2257 } else {
2258 nsect = inb(ioaddr->nsect_addr);
2259 lbal = inb(ioaddr->lbal_addr);
2260 }
2261 if ((nsect == 1) && (lbal == 1))
2262 break;
2263 if (time_after(jiffies, timeout)) {
2264 dev1 = 0;
2265 break;
2266 }
2267 msleep(50); /* give drive a breather */
2268 }
2269 if (dev1)
2270 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2271
2272 /* is all this really necessary? */
2273 ap->ops->dev_select(ap, 0);
2274 if (dev1)
2275 ap->ops->dev_select(ap, 1);
2276 if (dev0)
2277 ap->ops->dev_select(ap, 0);
2278 }
2279
2280 static unsigned int ata_bus_softreset(struct ata_port *ap,
2281 unsigned int devmask)
2282 {
2283 struct ata_ioports *ioaddr = &ap->ioaddr;
2284
2285 DPRINTK("ata%u: bus reset via SRST\n", ap->id);
2286
2287 /* software reset. causes dev0 to be selected */
2288 if (ap->flags & ATA_FLAG_MMIO) {
2289 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2290 udelay(20); /* FIXME: flush */
2291 writeb(ap->ctl | ATA_SRST, (void __iomem *) ioaddr->ctl_addr);
2292 udelay(20); /* FIXME: flush */
2293 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2294 } else {
2295 outb(ap->ctl, ioaddr->ctl_addr);
2296 udelay(10);
2297 outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
2298 udelay(10);
2299 outb(ap->ctl, ioaddr->ctl_addr);
2300 }
2301
2302 /* spec mandates ">= 2ms" before checking status.
2303 * We wait 150ms, because that was the magic delay used for
2304 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
2305 * between when the ATA command register is written, and then
2306 * status is checked. Because waiting for "a while" before
2307 * checking status is fine, post SRST, we perform this magic
2308 * delay here as well.
2309 *
2310 * Old drivers/ide uses the 2mS rule and then waits for ready
2311 */
2312 msleep(150);
2313
2314 /* Before we perform post reset processing we want to see if
2315 * the bus shows 0xFF because the odd clown forgets the D7
2316 * pulldown resistor.
2317 */
2318 if (ata_check_status(ap) == 0xFF) {
2319 ata_port_printk(ap, KERN_ERR, "SRST failed (status 0xFF)\n");
2320 return AC_ERR_OTHER;
2321 }
2322
2323 ata_bus_post_reset(ap, devmask);
2324
2325 return 0;
2326 }
2327
2328 /**
2329 * ata_bus_reset - reset host port and associated ATA channel
2330 * @ap: port to reset
2331 *
2332 * This is typically the first time we actually start issuing
2333 * commands to the ATA channel. We wait for BSY to clear, then
2334 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
2335 * result. Determine what devices, if any, are on the channel
2336 * by looking at the device 0/1 error register. Look at the signature
2337 * stored in each device's taskfile registers, to determine if
2338 * the device is ATA or ATAPI.
2339 *
2340 * LOCKING:
2341 * PCI/etc. bus probe sem.
2342 * Obtains host_set lock.
2343 *
2344 * SIDE EFFECTS:
2345 * Sets ATA_FLAG_DISABLED if bus reset fails.
2346 */
2347
2348 void ata_bus_reset(struct ata_port *ap)
2349 {
2350 struct ata_ioports *ioaddr = &ap->ioaddr;
2351 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2352 u8 err;
2353 unsigned int dev0, dev1 = 0, devmask = 0;
2354
2355 DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no);
2356
2357 /* determine if device 0/1 are present */
2358 if (ap->flags & ATA_FLAG_SATA_RESET)
2359 dev0 = 1;
2360 else {
2361 dev0 = ata_devchk(ap, 0);
2362 if (slave_possible)
2363 dev1 = ata_devchk(ap, 1);
2364 }
2365
2366 if (dev0)
2367 devmask |= (1 << 0);
2368 if (dev1)
2369 devmask |= (1 << 1);
2370
2371 /* select device 0 again */
2372 ap->ops->dev_select(ap, 0);
2373
2374 /* issue bus reset */
2375 if (ap->flags & ATA_FLAG_SRST)
2376 if (ata_bus_softreset(ap, devmask))
2377 goto err_out;
2378
2379 /*
2380 * determine by signature whether we have ATA or ATAPI devices
2381 */
2382 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
2383 if ((slave_possible) && (err != 0x81))
2384 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
2385
2386 /* re-enable interrupts */
2387 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2388 ata_irq_on(ap);
2389
2390 /* is double-select really necessary? */
2391 if (ap->device[1].class != ATA_DEV_NONE)
2392 ap->ops->dev_select(ap, 1);
2393 if (ap->device[0].class != ATA_DEV_NONE)
2394 ap->ops->dev_select(ap, 0);
2395
2396 /* if no devices were detected, disable this port */
2397 if ((ap->device[0].class == ATA_DEV_NONE) &&
2398 (ap->device[1].class == ATA_DEV_NONE))
2399 goto err_out;
2400
2401 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
2402 /* set up device control for ATA_FLAG_SATA_RESET */
2403 if (ap->flags & ATA_FLAG_MMIO)
2404 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2405 else
2406 outb(ap->ctl, ioaddr->ctl_addr);
2407 }
2408
2409 DPRINTK("EXIT\n");
2410 return;
2411
2412 err_out:
2413 ata_port_printk(ap, KERN_ERR, "disabling port\n");
2414 ap->ops->port_disable(ap);
2415
2416 DPRINTK("EXIT\n");
2417 }
2418
2419 static int sata_phy_resume(struct ata_port *ap)
2420 {
2421 unsigned long timeout = jiffies + (HZ * 5);
2422 u32 scontrol, sstatus;
2423 int rc;
2424
2425 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2426 return rc;
2427
2428 scontrol = (scontrol & 0x0f0) | 0x300;
2429
2430 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2431 return rc;
2432
2433 /* Wait for phy to become ready, if necessary. */
2434 do {
2435 msleep(200);
2436 if ((rc = sata_scr_read(ap, SCR_STATUS, &sstatus)))
2437 return rc;
2438 if ((sstatus & 0xf) != 1)
2439 return 0;
2440 } while (time_before(jiffies, timeout));
2441
2442 return -EBUSY;
2443 }
2444
2445 /**
2446 * ata_std_probeinit - initialize probing
2447 * @ap: port to be probed
2448 *
2449 * @ap is about to be probed. Initialize it. This function is
2450 * to be used as standard callback for ata_drive_probe_reset().
2451 *
2452 * NOTE!!! Do not use this function as probeinit if a low level
2453 * driver implements only hardreset. Just pass NULL as probeinit
2454 * in that case. Using this function is probably okay but doing
2455 * so makes reset sequence different from the original
2456 * ->phy_reset implementation and Jeff nervous. :-P
2457 */
2458 void ata_std_probeinit(struct ata_port *ap)
2459 {
2460 u32 scontrol;
2461
2462 /* resume link */
2463 sata_phy_resume(ap);
2464
2465 /* init sata_spd_limit to the current value */
2466 if (sata_scr_read(ap, SCR_CONTROL, &scontrol) == 0) {
2467 int spd = (scontrol >> 4) & 0xf;
2468 ap->sata_spd_limit &= (1 << spd) - 1;
2469 }
2470
2471 /* wait for device */
2472 if (ata_port_online(ap))
2473 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2474 }
2475
2476 /**
2477 * ata_std_softreset - reset host port via ATA SRST
2478 * @ap: port to reset
2479 * @classes: resulting classes of attached devices
2480 *
2481 * Reset host port using ATA SRST. This function is to be used
2482 * as standard callback for ata_drive_*_reset() functions.
2483 *
2484 * LOCKING:
2485 * Kernel thread context (may sleep)
2486 *
2487 * RETURNS:
2488 * 0 on success, -errno otherwise.
2489 */
2490 int ata_std_softreset(struct ata_port *ap, unsigned int *classes)
2491 {
2492 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2493 unsigned int devmask = 0, err_mask;
2494 u8 err;
2495
2496 DPRINTK("ENTER\n");
2497
2498 if (ata_port_offline(ap)) {
2499 classes[0] = ATA_DEV_NONE;
2500 goto out;
2501 }
2502
2503 /* determine if device 0/1 are present */
2504 if (ata_devchk(ap, 0))
2505 devmask |= (1 << 0);
2506 if (slave_possible && ata_devchk(ap, 1))
2507 devmask |= (1 << 1);
2508
2509 /* select device 0 again */
2510 ap->ops->dev_select(ap, 0);
2511
2512 /* issue bus reset */
2513 DPRINTK("about to softreset, devmask=%x\n", devmask);
2514 err_mask = ata_bus_softreset(ap, devmask);
2515 if (err_mask) {
2516 ata_port_printk(ap, KERN_ERR, "SRST failed (err_mask=0x%x)\n",
2517 err_mask);
2518 return -EIO;
2519 }
2520
2521 /* determine by signature whether we have ATA or ATAPI devices */
2522 classes[0] = ata_dev_try_classify(ap, 0, &err);
2523 if (slave_possible && err != 0x81)
2524 classes[1] = ata_dev_try_classify(ap, 1, &err);
2525
2526 out:
2527 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
2528 return 0;
2529 }
2530
2531 /**
2532 * sata_std_hardreset - reset host port via SATA phy reset
2533 * @ap: port to reset
2534 * @class: resulting class of attached device
2535 *
2536 * SATA phy-reset host port using DET bits of SControl register.
2537 * This function is to be used as standard callback for
2538 * ata_drive_*_reset().
2539 *
2540 * LOCKING:
2541 * Kernel thread context (may sleep)
2542 *
2543 * RETURNS:
2544 * 0 on success, -errno otherwise.
2545 */
2546 int sata_std_hardreset(struct ata_port *ap, unsigned int *class)
2547 {
2548 u32 scontrol;
2549 int rc;
2550
2551 DPRINTK("ENTER\n");
2552
2553 if (sata_set_spd_needed(ap)) {
2554 /* SATA spec says nothing about how to reconfigure
2555 * spd. To be on the safe side, turn off phy during
2556 * reconfiguration. This works for at least ICH7 AHCI
2557 * and Sil3124.
2558 */
2559 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2560 return rc;
2561
2562 scontrol = (scontrol & 0x0f0) | 0x302;
2563
2564 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2565 return rc;
2566
2567 sata_set_spd(ap);
2568 }
2569
2570 /* issue phy wake/reset */
2571 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2572 return rc;
2573
2574 scontrol = (scontrol & 0x0f0) | 0x301;
2575
2576 if ((rc = sata_scr_write_flush(ap, SCR_CONTROL, scontrol)))
2577 return rc;
2578
2579 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
2580 * 10.4.2 says at least 1 ms.
2581 */
2582 msleep(1);
2583
2584 /* bring phy back */
2585 sata_phy_resume(ap);
2586
2587 /* TODO: phy layer with polling, timeouts, etc. */
2588 if (ata_port_offline(ap)) {
2589 *class = ATA_DEV_NONE;
2590 DPRINTK("EXIT, link offline\n");
2591 return 0;
2592 }
2593
2594 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2595 ata_port_printk(ap, KERN_ERR,
2596 "COMRESET failed (device not ready)\n");
2597 return -EIO;
2598 }
2599
2600 ap->ops->dev_select(ap, 0); /* probably unnecessary */
2601
2602 *class = ata_dev_try_classify(ap, 0, NULL);
2603
2604 DPRINTK("EXIT, class=%u\n", *class);
2605 return 0;
2606 }
2607
2608 /**
2609 * ata_std_postreset - standard postreset callback
2610 * @ap: the target ata_port
2611 * @classes: classes of attached devices
2612 *
2613 * This function is invoked after a successful reset. Note that
2614 * the device might have been reset more than once using
2615 * different reset methods before postreset is invoked.
2616 *
2617 * This function is to be used as standard callback for
2618 * ata_drive_*_reset().
2619 *
2620 * LOCKING:
2621 * Kernel thread context (may sleep)
2622 */
2623 void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
2624 {
2625 u32 serror;
2626
2627 DPRINTK("ENTER\n");
2628
2629 /* print link status */
2630 sata_print_link_status(ap);
2631
2632 /* clear SError */
2633 if (sata_scr_read(ap, SCR_ERROR, &serror) == 0)
2634 sata_scr_write(ap, SCR_ERROR, serror);
2635
2636 /* re-enable interrupts */
2637 if (!ap->ops->error_handler) {
2638 /* FIXME: hack. create a hook instead */
2639 if (ap->ioaddr.ctl_addr)
2640 ata_irq_on(ap);
2641 }
2642
2643 /* is double-select really necessary? */
2644 if (classes[0] != ATA_DEV_NONE)
2645 ap->ops->dev_select(ap, 1);
2646 if (classes[1] != ATA_DEV_NONE)
2647 ap->ops->dev_select(ap, 0);
2648
2649 /* bail out if no device is present */
2650 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2651 DPRINTK("EXIT, no device\n");
2652 return;
2653 }
2654
2655 /* set up device control */
2656 if (ap->ioaddr.ctl_addr) {
2657 if (ap->flags & ATA_FLAG_MMIO)
2658 writeb(ap->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
2659 else
2660 outb(ap->ctl, ap->ioaddr.ctl_addr);
2661 }
2662
2663 DPRINTK("EXIT\n");
2664 }
2665
2666 /**
2667 * ata_std_probe_reset - standard probe reset method
2668 * @ap: prot to perform probe-reset
2669 * @classes: resulting classes of attached devices
2670 *
2671 * The stock off-the-shelf ->probe_reset method.
2672 *
2673 * LOCKING:
2674 * Kernel thread context (may sleep)
2675 *
2676 * RETURNS:
2677 * 0 on success, -errno otherwise.
2678 */
2679 int ata_std_probe_reset(struct ata_port *ap, unsigned int *classes)
2680 {
2681 ata_reset_fn_t hardreset;
2682
2683 hardreset = NULL;
2684 if (sata_scr_valid(ap))
2685 hardreset = sata_std_hardreset;
2686
2687 return ata_drive_probe_reset(ap, ata_std_probeinit,
2688 ata_std_softreset, hardreset,
2689 ata_std_postreset, classes);
2690 }
2691
2692 int ata_do_reset(struct ata_port *ap, ata_reset_fn_t reset,
2693 unsigned int *classes)
2694 {
2695 int i, rc;
2696
2697 for (i = 0; i < ATA_MAX_DEVICES; i++)
2698 classes[i] = ATA_DEV_UNKNOWN;
2699
2700 rc = reset(ap, classes);
2701 if (rc)
2702 return rc;
2703
2704 /* If any class isn't ATA_DEV_UNKNOWN, consider classification
2705 * is complete and convert all ATA_DEV_UNKNOWN to
2706 * ATA_DEV_NONE.
2707 */
2708 for (i = 0; i < ATA_MAX_DEVICES; i++)
2709 if (classes[i] != ATA_DEV_UNKNOWN)
2710 break;
2711
2712 if (i < ATA_MAX_DEVICES)
2713 for (i = 0; i < ATA_MAX_DEVICES; i++)
2714 if (classes[i] == ATA_DEV_UNKNOWN)
2715 classes[i] = ATA_DEV_NONE;
2716
2717 return 0;
2718 }
2719
2720 /**
2721 * ata_drive_probe_reset - Perform probe reset with given methods
2722 * @ap: port to reset
2723 * @probeinit: probeinit method (can be NULL)
2724 * @softreset: softreset method (can be NULL)
2725 * @hardreset: hardreset method (can be NULL)
2726 * @postreset: postreset method (can be NULL)
2727 * @classes: resulting classes of attached devices
2728 *
2729 * Reset the specified port and classify attached devices using
2730 * given methods. This function prefers softreset but tries all
2731 * possible reset sequences to reset and classify devices. This
2732 * function is intended to be used for constructing ->probe_reset
2733 * callback by low level drivers.
2734 *
2735 * Reset methods should follow the following rules.
2736 *
2737 * - Return 0 on sucess, -errno on failure.
2738 * - If classification is supported, fill classes[] with
2739 * recognized class codes.
2740 * - If classification is not supported, leave classes[] alone.
2741 *
2742 * LOCKING:
2743 * Kernel thread context (may sleep)
2744 *
2745 * RETURNS:
2746 * 0 on success, -EINVAL if no reset method is avaliable, -ENODEV
2747 * if classification fails, and any error code from reset
2748 * methods.
2749 */
2750 int ata_drive_probe_reset(struct ata_port *ap, ata_probeinit_fn_t probeinit,
2751 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
2752 ata_postreset_fn_t postreset, unsigned int *classes)
2753 {
2754 int rc = -EINVAL;
2755
2756 ata_eh_freeze_port(ap);
2757
2758 if (probeinit)
2759 probeinit(ap);
2760
2761 if (softreset && !sata_set_spd_needed(ap)) {
2762 rc = ata_do_reset(ap, softreset, classes);
2763 if (rc == 0 && classes[0] != ATA_DEV_UNKNOWN)
2764 goto done;
2765 ata_port_printk(ap, KERN_INFO, "softreset failed, "
2766 "will try hardreset in 5 secs\n");
2767 ssleep(5);
2768 }
2769
2770 if (!hardreset)
2771 goto done;
2772
2773 while (1) {
2774 rc = ata_do_reset(ap, hardreset, classes);
2775 if (rc == 0) {
2776 if (classes[0] != ATA_DEV_UNKNOWN)
2777 goto done;
2778 break;
2779 }
2780
2781 if (sata_down_spd_limit(ap))
2782 goto done;
2783
2784 ata_port_printk(ap, KERN_INFO, "hardreset failed, "
2785 "will retry in 5 secs\n");
2786 ssleep(5);
2787 }
2788
2789 if (softreset) {
2790 ata_port_printk(ap, KERN_INFO,
2791 "hardreset succeeded without classification, "
2792 "will retry softreset in 5 secs\n");
2793 ssleep(5);
2794
2795 rc = ata_do_reset(ap, softreset, classes);
2796 }
2797
2798 done:
2799 if (rc == 0) {
2800 if (postreset)
2801 postreset(ap, classes);
2802
2803 ata_eh_thaw_port(ap);
2804
2805 if (classes[0] == ATA_DEV_UNKNOWN)
2806 rc = -ENODEV;
2807 }
2808 return rc;
2809 }
2810
2811 /**
2812 * ata_dev_same_device - Determine whether new ID matches configured device
2813 * @dev: device to compare against
2814 * @new_class: class of the new device
2815 * @new_id: IDENTIFY page of the new device
2816 *
2817 * Compare @new_class and @new_id against @dev and determine
2818 * whether @dev is the device indicated by @new_class and
2819 * @new_id.
2820 *
2821 * LOCKING:
2822 * None.
2823 *
2824 * RETURNS:
2825 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
2826 */
2827 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
2828 const u16 *new_id)
2829 {
2830 const u16 *old_id = dev->id;
2831 unsigned char model[2][41], serial[2][21];
2832 u64 new_n_sectors;
2833
2834 if (dev->class != new_class) {
2835 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
2836 dev->class, new_class);
2837 return 0;
2838 }
2839
2840 ata_id_c_string(old_id, model[0], ATA_ID_PROD_OFS, sizeof(model[0]));
2841 ata_id_c_string(new_id, model[1], ATA_ID_PROD_OFS, sizeof(model[1]));
2842 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO_OFS, sizeof(serial[0]));
2843 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO_OFS, sizeof(serial[1]));
2844 new_n_sectors = ata_id_n_sectors(new_id);
2845
2846 if (strcmp(model[0], model[1])) {
2847 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
2848 "'%s' != '%s'\n", model[0], model[1]);
2849 return 0;
2850 }
2851
2852 if (strcmp(serial[0], serial[1])) {
2853 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
2854 "'%s' != '%s'\n", serial[0], serial[1]);
2855 return 0;
2856 }
2857
2858 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
2859 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
2860 "%llu != %llu\n",
2861 (unsigned long long)dev->n_sectors,
2862 (unsigned long long)new_n_sectors);
2863 return 0;
2864 }
2865
2866 return 1;
2867 }
2868
2869 /**
2870 * ata_dev_revalidate - Revalidate ATA device
2871 * @dev: device to revalidate
2872 * @post_reset: is this revalidation after reset?
2873 *
2874 * Re-read IDENTIFY page and make sure @dev is still attached to
2875 * the port.
2876 *
2877 * LOCKING:
2878 * Kernel thread context (may sleep)
2879 *
2880 * RETURNS:
2881 * 0 on success, negative errno otherwise
2882 */
2883 int ata_dev_revalidate(struct ata_device *dev, int post_reset)
2884 {
2885 unsigned int class = dev->class;
2886 u16 *id = (void *)dev->ap->sector_buf;
2887 int rc;
2888
2889 if (!ata_dev_enabled(dev)) {
2890 rc = -ENODEV;
2891 goto fail;
2892 }
2893
2894 /* read ID data */
2895 rc = ata_dev_read_id(dev, &class, post_reset, id);
2896 if (rc)
2897 goto fail;
2898
2899 /* is the device still there? */
2900 if (!ata_dev_same_device(dev, class, id)) {
2901 rc = -ENODEV;
2902 goto fail;
2903 }
2904
2905 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
2906
2907 /* configure device according to the new ID */
2908 rc = ata_dev_configure(dev, 0);
2909 if (rc == 0)
2910 return 0;
2911
2912 fail:
2913 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
2914 return rc;
2915 }
2916
2917 static const char * const ata_dma_blacklist [] = {
2918 "WDC AC11000H", NULL,
2919 "WDC AC22100H", NULL,
2920 "WDC AC32500H", NULL,
2921 "WDC AC33100H", NULL,
2922 "WDC AC31600H", NULL,
2923 "WDC AC32100H", "24.09P07",
2924 "WDC AC23200L", "21.10N21",
2925 "Compaq CRD-8241B", NULL,
2926 "CRD-8400B", NULL,
2927 "CRD-8480B", NULL,
2928 "CRD-8482B", NULL,
2929 "CRD-84", NULL,
2930 "SanDisk SDP3B", NULL,
2931 "SanDisk SDP3B-64", NULL,
2932 "SANYO CD-ROM CRD", NULL,
2933 "HITACHI CDR-8", NULL,
2934 "HITACHI CDR-8335", NULL,
2935 "HITACHI CDR-8435", NULL,
2936 "Toshiba CD-ROM XM-6202B", NULL,
2937 "TOSHIBA CD-ROM XM-1702BC", NULL,
2938 "CD-532E-A", NULL,
2939 "E-IDE CD-ROM CR-840", NULL,
2940 "CD-ROM Drive/F5A", NULL,
2941 "WPI CDD-820", NULL,
2942 "SAMSUNG CD-ROM SC-148C", NULL,
2943 "SAMSUNG CD-ROM SC", NULL,
2944 "SanDisk SDP3B-64", NULL,
2945 "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,
2946 "_NEC DV5800A", NULL,
2947 "SAMSUNG CD-ROM SN-124", "N001"
2948 };
2949
2950 static int ata_strim(char *s, size_t len)
2951 {
2952 len = strnlen(s, len);
2953
2954 /* ATAPI specifies that empty space is blank-filled; remove blanks */
2955 while ((len > 0) && (s[len - 1] == ' ')) {
2956 len--;
2957 s[len] = 0;
2958 }
2959 return len;
2960 }
2961
2962 static int ata_dma_blacklisted(const struct ata_device *dev)
2963 {
2964 unsigned char model_num[40];
2965 unsigned char model_rev[16];
2966 unsigned int nlen, rlen;
2967 int i;
2968
2969 ata_id_string(dev->id, model_num, ATA_ID_PROD_OFS,
2970 sizeof(model_num));
2971 ata_id_string(dev->id, model_rev, ATA_ID_FW_REV_OFS,
2972 sizeof(model_rev));
2973 nlen = ata_strim(model_num, sizeof(model_num));
2974 rlen = ata_strim(model_rev, sizeof(model_rev));
2975
2976 for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i += 2) {
2977 if (!strncmp(ata_dma_blacklist[i], model_num, nlen)) {
2978 if (ata_dma_blacklist[i+1] == NULL)
2979 return 1;
2980 if (!strncmp(ata_dma_blacklist[i], model_rev, rlen))
2981 return 1;
2982 }
2983 }
2984 return 0;
2985 }
2986
2987 /**
2988 * ata_dev_xfermask - Compute supported xfermask of the given device
2989 * @dev: Device to compute xfermask for
2990 *
2991 * Compute supported xfermask of @dev and store it in
2992 * dev->*_mask. This function is responsible for applying all
2993 * known limits including host controller limits, device
2994 * blacklist, etc...
2995 *
2996 * FIXME: The current implementation limits all transfer modes to
2997 * the fastest of the lowested device on the port. This is not
2998 * required on most controllers.
2999 *
3000 * LOCKING:
3001 * None.
3002 */
3003 static void ata_dev_xfermask(struct ata_device *dev)
3004 {
3005 struct ata_port *ap = dev->ap;
3006 struct ata_host_set *hs = ap->host_set;
3007 unsigned long xfer_mask;
3008 int i;
3009
3010 xfer_mask = ata_pack_xfermask(ap->pio_mask,
3011 ap->mwdma_mask, ap->udma_mask);
3012
3013 /* Apply cable rule here. Don't apply it early because when
3014 * we handle hot plug the cable type can itself change.
3015 */
3016 if (ap->cbl == ATA_CBL_PATA40)
3017 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
3018
3019 /* FIXME: Use port-wide xfermask for now */
3020 for (i = 0; i < ATA_MAX_DEVICES; i++) {
3021 struct ata_device *d = &ap->device[i];
3022
3023 if (ata_dev_absent(d))
3024 continue;
3025
3026 if (ata_dev_disabled(d)) {
3027 /* to avoid violating device selection timing */
3028 xfer_mask &= ata_pack_xfermask(d->pio_mask,
3029 UINT_MAX, UINT_MAX);
3030 continue;
3031 }
3032
3033 xfer_mask &= ata_pack_xfermask(d->pio_mask,
3034 d->mwdma_mask, d->udma_mask);
3035 xfer_mask &= ata_id_xfermask(d->id);
3036 if (ata_dma_blacklisted(d))
3037 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3038 }
3039
3040 if (ata_dma_blacklisted(dev))
3041 ata_dev_printk(dev, KERN_WARNING,
3042 "device is on DMA blacklist, disabling DMA\n");
3043
3044 if (hs->flags & ATA_HOST_SIMPLEX) {
3045 if (hs->simplex_claimed)
3046 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3047 }
3048
3049 if (ap->ops->mode_filter)
3050 xfer_mask = ap->ops->mode_filter(ap, dev, xfer_mask);
3051
3052 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
3053 &dev->mwdma_mask, &dev->udma_mask);
3054 }
3055
3056 /**
3057 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
3058 * @dev: Device to which command will be sent
3059 *
3060 * Issue SET FEATURES - XFER MODE command to device @dev
3061 * on port @ap.
3062 *
3063 * LOCKING:
3064 * PCI/etc. bus probe sem.
3065 *
3066 * RETURNS:
3067 * 0 on success, AC_ERR_* mask otherwise.
3068 */
3069
3070 static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
3071 {
3072 struct ata_taskfile tf;
3073 unsigned int err_mask;
3074
3075 /* set up set-features taskfile */
3076 DPRINTK("set features - xfer mode\n");
3077
3078 ata_tf_init(dev, &tf);
3079 tf.command = ATA_CMD_SET_FEATURES;
3080 tf.feature = SETFEATURES_XFER;
3081 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3082 tf.protocol = ATA_PROT_NODATA;
3083 tf.nsect = dev->xfer_mode;
3084
3085 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
3086
3087 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3088 return err_mask;
3089 }
3090
3091 /**
3092 * ata_dev_init_params - Issue INIT DEV PARAMS command
3093 * @dev: Device to which command will be sent
3094 * @heads: Number of heads
3095 * @sectors: Number of sectors
3096 *
3097 * LOCKING:
3098 * Kernel thread context (may sleep)
3099 *
3100 * RETURNS:
3101 * 0 on success, AC_ERR_* mask otherwise.
3102 */
3103 static unsigned int ata_dev_init_params(struct ata_device *dev,
3104 u16 heads, u16 sectors)
3105 {
3106 struct ata_taskfile tf;
3107 unsigned int err_mask;
3108
3109 /* Number of sectors per track 1-255. Number of heads 1-16 */
3110 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
3111 return AC_ERR_INVALID;
3112
3113 /* set up init dev params taskfile */
3114 DPRINTK("init dev params \n");
3115
3116 ata_tf_init(dev, &tf);
3117 tf.command = ATA_CMD_INIT_DEV_PARAMS;
3118 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3119 tf.protocol = ATA_PROT_NODATA;
3120 tf.nsect = sectors;
3121 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
3122
3123 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
3124
3125 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3126 return err_mask;
3127 }
3128
3129 /**
3130 * ata_sg_clean - Unmap DMA memory associated with command
3131 * @qc: Command containing DMA memory to be released
3132 *
3133 * Unmap all mapped DMA memory associated with this command.
3134 *
3135 * LOCKING:
3136 * spin_lock_irqsave(host_set lock)
3137 */
3138
3139 static void ata_sg_clean(struct ata_queued_cmd *qc)
3140 {
3141 struct ata_port *ap = qc->ap;
3142 struct scatterlist *sg = qc->__sg;
3143 int dir = qc->dma_dir;
3144 void *pad_buf = NULL;
3145
3146 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
3147 WARN_ON(sg == NULL);
3148
3149 if (qc->flags & ATA_QCFLAG_SINGLE)
3150 WARN_ON(qc->n_elem > 1);
3151
3152 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
3153
3154 /* if we padded the buffer out to 32-bit bound, and data
3155 * xfer direction is from-device, we must copy from the
3156 * pad buffer back into the supplied buffer
3157 */
3158 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
3159 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3160
3161 if (qc->flags & ATA_QCFLAG_SG) {
3162 if (qc->n_elem)
3163 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
3164 /* restore last sg */
3165 sg[qc->orig_n_elem - 1].length += qc->pad_len;
3166 if (pad_buf) {
3167 struct scatterlist *psg = &qc->pad_sgent;
3168 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3169 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
3170 kunmap_atomic(addr, KM_IRQ0);
3171 }
3172 } else {
3173 if (qc->n_elem)
3174 dma_unmap_single(ap->dev,
3175 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
3176 dir);
3177 /* restore sg */
3178 sg->length += qc->pad_len;
3179 if (pad_buf)
3180 memcpy(qc->buf_virt + sg->length - qc->pad_len,
3181 pad_buf, qc->pad_len);
3182 }
3183
3184 qc->flags &= ~ATA_QCFLAG_DMAMAP;
3185 qc->__sg = NULL;
3186 }
3187
3188 /**
3189 * ata_fill_sg - Fill PCI IDE PRD table
3190 * @qc: Metadata associated with taskfile to be transferred
3191 *
3192 * Fill PCI IDE PRD (scatter-gather) table with segments
3193 * associated with the current disk command.
3194 *
3195 * LOCKING:
3196 * spin_lock_irqsave(host_set lock)
3197 *
3198 */
3199 static void ata_fill_sg(struct ata_queued_cmd *qc)
3200 {
3201 struct ata_port *ap = qc->ap;
3202 struct scatterlist *sg;
3203 unsigned int idx;
3204
3205 WARN_ON(qc->__sg == NULL);
3206 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
3207
3208 idx = 0;
3209 ata_for_each_sg(sg, qc) {
3210 u32 addr, offset;
3211 u32 sg_len, len;
3212
3213 /* determine if physical DMA addr spans 64K boundary.
3214 * Note h/w doesn't support 64-bit, so we unconditionally
3215 * truncate dma_addr_t to u32.
3216 */
3217 addr = (u32) sg_dma_address(sg);
3218 sg_len = sg_dma_len(sg);
3219
3220 while (sg_len) {
3221 offset = addr & 0xffff;
3222 len = sg_len;
3223 if ((offset + sg_len) > 0x10000)
3224 len = 0x10000 - offset;
3225
3226 ap->prd[idx].addr = cpu_to_le32(addr);
3227 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
3228 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
3229
3230 idx++;
3231 sg_len -= len;
3232 addr += len;
3233 }
3234 }
3235
3236 if (idx)
3237 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
3238 }
3239 /**
3240 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
3241 * @qc: Metadata associated with taskfile to check
3242 *
3243 * Allow low-level driver to filter ATA PACKET commands, returning
3244 * a status indicating whether or not it is OK to use DMA for the
3245 * supplied PACKET command.
3246 *
3247 * LOCKING:
3248 * spin_lock_irqsave(host_set lock)
3249 *
3250 * RETURNS: 0 when ATAPI DMA can be used
3251 * nonzero otherwise
3252 */
3253 int ata_check_atapi_dma(struct ata_queued_cmd *qc)
3254 {
3255 struct ata_port *ap = qc->ap;
3256 int rc = 0; /* Assume ATAPI DMA is OK by default */
3257
3258 if (ap->ops->check_atapi_dma)
3259 rc = ap->ops->check_atapi_dma(qc);
3260
3261 /* We don't support polling DMA.
3262 * Use PIO if the LLDD handles only interrupts in
3263 * the HSM_ST_LAST state and the ATAPI device
3264 * generates CDB interrupts.
3265 */
3266 if ((ap->flags & ATA_FLAG_PIO_POLLING) &&
3267 (qc->dev->flags & ATA_DFLAG_CDB_INTR))
3268 rc = 1;
3269
3270 return rc;
3271 }
3272 /**
3273 * ata_qc_prep - Prepare taskfile for submission
3274 * @qc: Metadata associated with taskfile to be prepared
3275 *
3276 * Prepare ATA taskfile for submission.
3277 *
3278 * LOCKING:
3279 * spin_lock_irqsave(host_set lock)
3280 */
3281 void ata_qc_prep(struct ata_queued_cmd *qc)
3282 {
3283 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
3284 return;
3285
3286 ata_fill_sg(qc);
3287 }
3288
3289 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
3290
3291 /**
3292 * ata_sg_init_one - Associate command with memory buffer
3293 * @qc: Command to be associated
3294 * @buf: Memory buffer
3295 * @buflen: Length of memory buffer, in bytes.
3296 *
3297 * Initialize the data-related elements of queued_cmd @qc
3298 * to point to a single memory buffer, @buf of byte length @buflen.
3299 *
3300 * LOCKING:
3301 * spin_lock_irqsave(host_set lock)
3302 */
3303
3304 void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
3305 {
3306 struct scatterlist *sg;
3307
3308 qc->flags |= ATA_QCFLAG_SINGLE;
3309
3310 memset(&qc->sgent, 0, sizeof(qc->sgent));
3311 qc->__sg = &qc->sgent;
3312 qc->n_elem = 1;
3313 qc->orig_n_elem = 1;
3314 qc->buf_virt = buf;
3315
3316 sg = qc->__sg;
3317 sg_init_one(sg, buf, buflen);
3318 }
3319
3320 /**
3321 * ata_sg_init - Associate command with scatter-gather table.
3322 * @qc: Command to be associated
3323 * @sg: Scatter-gather table.
3324 * @n_elem: Number of elements in s/g table.
3325 *
3326 * Initialize the data-related elements of queued_cmd @qc
3327 * to point to a scatter-gather table @sg, containing @n_elem
3328 * elements.
3329 *
3330 * LOCKING:
3331 * spin_lock_irqsave(host_set lock)
3332 */
3333
3334 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
3335 unsigned int n_elem)
3336 {
3337 qc->flags |= ATA_QCFLAG_SG;
3338 qc->__sg = sg;
3339 qc->n_elem = n_elem;
3340 qc->orig_n_elem = n_elem;
3341 }
3342
3343 /**
3344 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
3345 * @qc: Command with memory buffer to be mapped.
3346 *
3347 * DMA-map the memory buffer associated with queued_cmd @qc.
3348 *
3349 * LOCKING:
3350 * spin_lock_irqsave(host_set lock)
3351 *
3352 * RETURNS:
3353 * Zero on success, negative on error.
3354 */
3355
3356 static int ata_sg_setup_one(struct ata_queued_cmd *qc)
3357 {
3358 struct ata_port *ap = qc->ap;
3359 int dir = qc->dma_dir;
3360 struct scatterlist *sg = qc->__sg;
3361 dma_addr_t dma_address;
3362 int trim_sg = 0;
3363
3364 /* we must lengthen transfers to end on a 32-bit boundary */
3365 qc->pad_len = sg->length & 3;
3366 if (qc->pad_len) {
3367 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3368 struct scatterlist *psg = &qc->pad_sgent;
3369
3370 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
3371
3372 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3373
3374 if (qc->tf.flags & ATA_TFLAG_WRITE)
3375 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
3376 qc->pad_len);
3377
3378 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3379 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3380 /* trim sg */
3381 sg->length -= qc->pad_len;
3382 if (sg->length == 0)
3383 trim_sg = 1;
3384
3385 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
3386 sg->length, qc->pad_len);
3387 }
3388
3389 if (trim_sg) {
3390 qc->n_elem--;
3391 goto skip_map;
3392 }
3393
3394 dma_address = dma_map_single(ap->dev, qc->buf_virt,
3395 sg->length, dir);
3396 if (dma_mapping_error(dma_address)) {
3397 /* restore sg */
3398 sg->length += qc->pad_len;
3399 return -1;
3400 }
3401
3402 sg_dma_address(sg) = dma_address;
3403 sg_dma_len(sg) = sg->length;
3404
3405 skip_map:
3406 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
3407 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3408
3409 return 0;
3410 }
3411
3412 /**
3413 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
3414 * @qc: Command with scatter-gather table to be mapped.
3415 *
3416 * DMA-map the scatter-gather table associated with queued_cmd @qc.
3417 *
3418 * LOCKING:
3419 * spin_lock_irqsave(host_set lock)
3420 *
3421 * RETURNS:
3422 * Zero on success, negative on error.
3423 *
3424 */
3425
3426 static int ata_sg_setup(struct ata_queued_cmd *qc)
3427 {
3428 struct ata_port *ap = qc->ap;
3429 struct scatterlist *sg = qc->__sg;
3430 struct scatterlist *lsg = &sg[qc->n_elem - 1];
3431 int n_elem, pre_n_elem, dir, trim_sg = 0;
3432
3433 VPRINTK("ENTER, ata%u\n", ap->id);
3434 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
3435
3436 /* we must lengthen transfers to end on a 32-bit boundary */
3437 qc->pad_len = lsg->length & 3;
3438 if (qc->pad_len) {
3439 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3440 struct scatterlist *psg = &qc->pad_sgent;
3441 unsigned int offset;
3442
3443 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
3444
3445 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3446
3447 /*
3448 * psg->page/offset are used to copy to-be-written
3449 * data in this function or read data in ata_sg_clean.
3450 */
3451 offset = lsg->offset + lsg->length - qc->pad_len;
3452 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
3453 psg->offset = offset_in_page(offset);
3454
3455 if (qc->tf.flags & ATA_TFLAG_WRITE) {
3456 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3457 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
3458 kunmap_atomic(addr, KM_IRQ0);
3459 }
3460
3461 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3462 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3463 /* trim last sg */
3464 lsg->length -= qc->pad_len;
3465 if (lsg->length == 0)
3466 trim_sg = 1;
3467
3468 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
3469 qc->n_elem - 1, lsg->length, qc->pad_len);
3470 }
3471
3472 pre_n_elem = qc->n_elem;
3473 if (trim_sg && pre_n_elem)
3474 pre_n_elem--;
3475
3476 if (!pre_n_elem) {
3477 n_elem = 0;
3478 goto skip_map;
3479 }
3480
3481 dir = qc->dma_dir;
3482 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
3483 if (n_elem < 1) {
3484 /* restore last sg */
3485 lsg->length += qc->pad_len;
3486 return -1;
3487 }
3488
3489 DPRINTK("%d sg elements mapped\n", n_elem);
3490
3491 skip_map:
3492 qc->n_elem = n_elem;
3493
3494 return 0;
3495 }
3496
3497 /**
3498 * swap_buf_le16 - swap halves of 16-bit words in place
3499 * @buf: Buffer to swap
3500 * @buf_words: Number of 16-bit words in buffer.
3501 *
3502 * Swap halves of 16-bit words if needed to convert from
3503 * little-endian byte order to native cpu byte order, or
3504 * vice-versa.
3505 *
3506 * LOCKING:
3507 * Inherited from caller.
3508 */
3509 void swap_buf_le16(u16 *buf, unsigned int buf_words)
3510 {
3511 #ifdef __BIG_ENDIAN
3512 unsigned int i;
3513
3514 for (i = 0; i < buf_words; i++)
3515 buf[i] = le16_to_cpu(buf[i]);
3516 #endif /* __BIG_ENDIAN */
3517 }
3518
3519 /**
3520 * ata_mmio_data_xfer - Transfer data by MMIO
3521 * @ap: port to read/write
3522 * @buf: data buffer
3523 * @buflen: buffer length
3524 * @write_data: read/write
3525 *
3526 * Transfer data from/to the device data register by MMIO.
3527 *
3528 * LOCKING:
3529 * Inherited from caller.
3530 */
3531
3532 static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
3533 unsigned int buflen, int write_data)
3534 {
3535 unsigned int i;
3536 unsigned int words = buflen >> 1;
3537 u16 *buf16 = (u16 *) buf;
3538 void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr;
3539
3540 /* Transfer multiple of 2 bytes */
3541 if (write_data) {
3542 for (i = 0; i < words; i++)
3543 writew(le16_to_cpu(buf16[i]), mmio);
3544 } else {
3545 for (i = 0; i < words; i++)
3546 buf16[i] = cpu_to_le16(readw(mmio));
3547 }
3548
3549 /* Transfer trailing 1 byte, if any. */
3550 if (unlikely(buflen & 0x01)) {
3551 u16 align_buf[1] = { 0 };
3552 unsigned char *trailing_buf = buf + buflen - 1;
3553
3554 if (write_data) {
3555 memcpy(align_buf, trailing_buf, 1);
3556 writew(le16_to_cpu(align_buf[0]), mmio);
3557 } else {
3558 align_buf[0] = cpu_to_le16(readw(mmio));
3559 memcpy(trailing_buf, align_buf, 1);
3560 }
3561 }
3562 }
3563
3564 /**
3565 * ata_pio_data_xfer - Transfer data by PIO
3566 * @ap: port to read/write
3567 * @buf: data buffer
3568 * @buflen: buffer length
3569 * @write_data: read/write
3570 *
3571 * Transfer data from/to the device data register by PIO.
3572 *
3573 * LOCKING:
3574 * Inherited from caller.
3575 */
3576
3577 static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf,
3578 unsigned int buflen, int write_data)
3579 {
3580 unsigned int words = buflen >> 1;
3581
3582 /* Transfer multiple of 2 bytes */
3583 if (write_data)
3584 outsw(ap->ioaddr.data_addr, buf, words);
3585 else
3586 insw(ap->ioaddr.data_addr, buf, words);
3587
3588 /* Transfer trailing 1 byte, if any. */
3589 if (unlikely(buflen & 0x01)) {
3590 u16 align_buf[1] = { 0 };
3591 unsigned char *trailing_buf = buf + buflen - 1;
3592
3593 if (write_data) {
3594 memcpy(align_buf, trailing_buf, 1);
3595 outw(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
3596 } else {
3597 align_buf[0] = cpu_to_le16(inw(ap->ioaddr.data_addr));
3598 memcpy(trailing_buf, align_buf, 1);
3599 }
3600 }
3601 }
3602
3603 /**
3604 * ata_data_xfer - Transfer data from/to the data register.
3605 * @ap: port to read/write
3606 * @buf: data buffer
3607 * @buflen: buffer length
3608 * @do_write: read/write
3609 *
3610 * Transfer data from/to the device data register.
3611 *
3612 * LOCKING:
3613 * Inherited from caller.
3614 */
3615
3616 static void ata_data_xfer(struct ata_port *ap, unsigned char *buf,
3617 unsigned int buflen, int do_write)
3618 {
3619 /* Make the crap hardware pay the costs not the good stuff */
3620 if (unlikely(ap->flags & ATA_FLAG_IRQ_MASK)) {
3621 unsigned long flags;
3622 local_irq_save(flags);
3623 if (ap->flags & ATA_FLAG_MMIO)
3624 ata_mmio_data_xfer(ap, buf, buflen, do_write);
3625 else
3626 ata_pio_data_xfer(ap, buf, buflen, do_write);
3627 local_irq_restore(flags);
3628 } else {
3629 if (ap->flags & ATA_FLAG_MMIO)
3630 ata_mmio_data_xfer(ap, buf, buflen, do_write);
3631 else
3632 ata_pio_data_xfer(ap, buf, buflen, do_write);
3633 }
3634 }
3635
3636 /**
3637 * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
3638 * @qc: Command on going
3639 *
3640 * Transfer ATA_SECT_SIZE of data from/to the ATA device.
3641 *
3642 * LOCKING:
3643 * Inherited from caller.
3644 */
3645
3646 static void ata_pio_sector(struct ata_queued_cmd *qc)
3647 {
3648 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3649 struct scatterlist *sg = qc->__sg;
3650 struct ata_port *ap = qc->ap;
3651 struct page *page;
3652 unsigned int offset;
3653 unsigned char *buf;
3654
3655 if (qc->cursect == (qc->nsect - 1))
3656 ap->hsm_task_state = HSM_ST_LAST;
3657
3658 page = sg[qc->cursg].page;
3659 offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE;
3660
3661 /* get the current page and offset */
3662 page = nth_page(page, (offset >> PAGE_SHIFT));
3663 offset %= PAGE_SIZE;
3664
3665 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3666
3667 if (PageHighMem(page)) {
3668 unsigned long flags;
3669
3670 local_irq_save(flags);
3671 buf = kmap_atomic(page, KM_IRQ0);
3672
3673 /* do the actual data transfer */
3674 ata_data_xfer(ap, buf + offset, ATA_SECT_SIZE, do_write);
3675
3676 kunmap_atomic(buf, KM_IRQ0);
3677 local_irq_restore(flags);
3678 } else {
3679 buf = page_address(page);
3680 ata_data_xfer(ap, buf + offset, ATA_SECT_SIZE, do_write);
3681 }
3682
3683 qc->cursect++;
3684 qc->cursg_ofs++;
3685
3686 if ((qc->cursg_ofs * ATA_SECT_SIZE) == (&sg[qc->cursg])->length) {
3687 qc->cursg++;
3688 qc->cursg_ofs = 0;
3689 }
3690 }
3691
3692 /**
3693 * ata_pio_sectors - Transfer one or many 512-byte sectors.
3694 * @qc: Command on going
3695 *
3696 * Transfer one or many ATA_SECT_SIZE of data from/to the
3697 * ATA device for the DRQ request.
3698 *
3699 * LOCKING:
3700 * Inherited from caller.
3701 */
3702
3703 static void ata_pio_sectors(struct ata_queued_cmd *qc)
3704 {
3705 if (is_multi_taskfile(&qc->tf)) {
3706 /* READ/WRITE MULTIPLE */
3707 unsigned int nsect;
3708
3709 WARN_ON(qc->dev->multi_count == 0);
3710
3711 nsect = min(qc->nsect - qc->cursect, qc->dev->multi_count);
3712 while (nsect--)
3713 ata_pio_sector(qc);
3714 } else
3715 ata_pio_sector(qc);
3716 }
3717
3718 /**
3719 * atapi_send_cdb - Write CDB bytes to hardware
3720 * @ap: Port to which ATAPI device is attached.
3721 * @qc: Taskfile currently active
3722 *
3723 * When device has indicated its readiness to accept
3724 * a CDB, this function is called. Send the CDB.
3725 *
3726 * LOCKING:
3727 * caller.
3728 */
3729
3730 static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
3731 {
3732 /* send SCSI cdb */
3733 DPRINTK("send cdb\n");
3734 WARN_ON(qc->dev->cdb_len < 12);
3735
3736 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
3737 ata_altstatus(ap); /* flush */
3738
3739 switch (qc->tf.protocol) {
3740 case ATA_PROT_ATAPI:
3741 ap->hsm_task_state = HSM_ST;
3742 break;
3743 case ATA_PROT_ATAPI_NODATA:
3744 ap->hsm_task_state = HSM_ST_LAST;
3745 break;
3746 case ATA_PROT_ATAPI_DMA:
3747 ap->hsm_task_state = HSM_ST_LAST;
3748 /* initiate bmdma */
3749 ap->ops->bmdma_start(qc);
3750 break;
3751 }
3752 }
3753
3754 /**
3755 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
3756 * @qc: Command on going
3757 * @bytes: number of bytes
3758 *
3759 * Transfer Transfer data from/to the ATAPI device.
3760 *
3761 * LOCKING:
3762 * Inherited from caller.
3763 *
3764 */
3765
3766 static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
3767 {
3768 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3769 struct scatterlist *sg = qc->__sg;
3770 struct ata_port *ap = qc->ap;
3771 struct page *page;
3772 unsigned char *buf;
3773 unsigned int offset, count;
3774
3775 if (qc->curbytes + bytes >= qc->nbytes)
3776 ap->hsm_task_state = HSM_ST_LAST;
3777
3778 next_sg:
3779 if (unlikely(qc->cursg >= qc->n_elem)) {
3780 /*
3781 * The end of qc->sg is reached and the device expects
3782 * more data to transfer. In order not to overrun qc->sg
3783 * and fulfill length specified in the byte count register,
3784 * - for read case, discard trailing data from the device
3785 * - for write case, padding zero data to the device
3786 */
3787 u16 pad_buf[1] = { 0 };
3788 unsigned int words = bytes >> 1;
3789 unsigned int i;
3790
3791 if (words) /* warning if bytes > 1 */
3792 ata_dev_printk(qc->dev, KERN_WARNING,
3793 "%u bytes trailing data\n", bytes);
3794
3795 for (i = 0; i < words; i++)
3796 ata_data_xfer(ap, (unsigned char*)pad_buf, 2, do_write);
3797
3798 ap->hsm_task_state = HSM_ST_LAST;
3799 return;
3800 }
3801
3802 sg = &qc->__sg[qc->cursg];
3803
3804 page = sg->page;
3805 offset = sg->offset + qc->cursg_ofs;
3806
3807 /* get the current page and offset */
3808 page = nth_page(page, (offset >> PAGE_SHIFT));
3809 offset %= PAGE_SIZE;
3810
3811 /* don't overrun current sg */
3812 count = min(sg->length - qc->cursg_ofs, bytes);
3813
3814 /* don't cross page boundaries */
3815 count = min(count, (unsigned int)PAGE_SIZE - offset);
3816
3817 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3818
3819 if (PageHighMem(page)) {
3820 unsigned long flags;
3821
3822 local_irq_save(flags);
3823 buf = kmap_atomic(page, KM_IRQ0);
3824
3825 /* do the actual data transfer */
3826 ata_data_xfer(ap, buf + offset, count, do_write);
3827
3828 kunmap_atomic(buf, KM_IRQ0);
3829 local_irq_restore(flags);
3830 } else {
3831 buf = page_address(page);
3832 ata_data_xfer(ap, buf + offset, count, do_write);
3833 }
3834
3835 bytes -= count;
3836 qc->curbytes += count;
3837 qc->cursg_ofs += count;
3838
3839 if (qc->cursg_ofs == sg->length) {
3840 qc->cursg++;
3841 qc->cursg_ofs = 0;
3842 }
3843
3844 if (bytes)
3845 goto next_sg;
3846 }
3847
3848 /**
3849 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
3850 * @qc: Command on going
3851 *
3852 * Transfer Transfer data from/to the ATAPI device.
3853 *
3854 * LOCKING:
3855 * Inherited from caller.
3856 */
3857
3858 static void atapi_pio_bytes(struct ata_queued_cmd *qc)
3859 {
3860 struct ata_port *ap = qc->ap;
3861 struct ata_device *dev = qc->dev;
3862 unsigned int ireason, bc_lo, bc_hi, bytes;
3863 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
3864
3865 ap->ops->tf_read(ap, &qc->tf);
3866 ireason = qc->tf.nsect;
3867 bc_lo = qc->tf.lbam;
3868 bc_hi = qc->tf.lbah;
3869 bytes = (bc_hi << 8) | bc_lo;
3870
3871 /* shall be cleared to zero, indicating xfer of data */
3872 if (ireason & (1 << 0))
3873 goto err_out;
3874
3875 /* make sure transfer direction matches expected */
3876 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
3877 if (do_write != i_write)
3878 goto err_out;
3879
3880 VPRINTK("ata%u: xfering %d bytes\n", ap->id, bytes);
3881
3882 __atapi_pio_bytes(qc, bytes);
3883
3884 return;
3885
3886 err_out:
3887 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
3888 qc->err_mask |= AC_ERR_HSM;
3889 ap->hsm_task_state = HSM_ST_ERR;
3890 }
3891
3892 /**
3893 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
3894 * @ap: the target ata_port
3895 * @qc: qc on going
3896 *
3897 * RETURNS:
3898 * 1 if ok in workqueue, 0 otherwise.
3899 */
3900
3901 static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
3902 {
3903 if (qc->tf.flags & ATA_TFLAG_POLLING)
3904 return 1;
3905
3906 if (ap->hsm_task_state == HSM_ST_FIRST) {
3907 if (qc->tf.protocol == ATA_PROT_PIO &&
3908 (qc->tf.flags & ATA_TFLAG_WRITE))
3909 return 1;
3910
3911 if (is_atapi_taskfile(&qc->tf) &&
3912 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
3913 return 1;
3914 }
3915
3916 return 0;
3917 }
3918
3919 /**
3920 * ata_hsm_qc_complete - finish a qc running on standard HSM
3921 * @qc: Command to complete
3922 * @in_wq: 1 if called from workqueue, 0 otherwise
3923 *
3924 * Finish @qc which is running on standard HSM.
3925 *
3926 * LOCKING:
3927 * If @in_wq is zero, spin_lock_irqsave(host_set lock).
3928 * Otherwise, none on entry and grabs host lock.
3929 */
3930 static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
3931 {
3932 struct ata_port *ap = qc->ap;
3933 unsigned long flags;
3934
3935 if (ap->ops->error_handler) {
3936 if (in_wq) {
3937 spin_lock_irqsave(&ap->host_set->lock, flags);
3938
3939 /* EH might have kicked in while host_set lock
3940 * is released.
3941 */
3942 qc = ata_qc_from_tag(ap, qc->tag);
3943 if (qc) {
3944 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
3945 ata_irq_on(ap);
3946 ata_qc_complete(qc);
3947 } else
3948 ata_port_freeze(ap);
3949 }
3950
3951 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3952 } else {
3953 if (likely(!(qc->err_mask & AC_ERR_HSM)))
3954 ata_qc_complete(qc);
3955 else
3956 ata_port_freeze(ap);
3957 }
3958 } else {
3959 if (in_wq) {
3960 spin_lock_irqsave(&ap->host_set->lock, flags);
3961 ata_irq_on(ap);
3962 ata_qc_complete(qc);
3963 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3964 } else
3965 ata_qc_complete(qc);
3966 }
3967 }
3968
3969 /**
3970 * ata_hsm_move - move the HSM to the next state.
3971 * @ap: the target ata_port
3972 * @qc: qc on going
3973 * @status: current device status
3974 * @in_wq: 1 if called from workqueue, 0 otherwise
3975 *
3976 * RETURNS:
3977 * 1 when poll next status needed, 0 otherwise.
3978 */
3979
3980 static int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
3981 u8 status, int in_wq)
3982 {
3983 unsigned long flags = 0;
3984 int poll_next;
3985
3986 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
3987
3988 /* Make sure ata_qc_issue_prot() does not throw things
3989 * like DMA polling into the workqueue. Notice that
3990 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
3991 */
3992 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
3993
3994 fsm_start:
3995 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
3996 ap->id, qc->tf.protocol, ap->hsm_task_state, status);
3997
3998 switch (ap->hsm_task_state) {
3999 case HSM_ST_FIRST:
4000 /* Send first data block or PACKET CDB */
4001
4002 /* If polling, we will stay in the work queue after
4003 * sending the data. Otherwise, interrupt handler
4004 * takes over after sending the data.
4005 */
4006 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
4007
4008 /* check device status */
4009 if (unlikely((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ)) {
4010 /* Wrong status. Let EH handle this */
4011 qc->err_mask |= AC_ERR_HSM;
4012 ap->hsm_task_state = HSM_ST_ERR;
4013 goto fsm_start;
4014 }
4015
4016 /* Device should not ask for data transfer (DRQ=1)
4017 * when it finds something wrong.
4018 * We ignore DRQ here and stop the HSM by
4019 * changing hsm_task_state to HSM_ST_ERR and
4020 * let the EH abort the command or reset the device.
4021 */
4022 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4023 printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
4024 ap->id, status);
4025 qc->err_mask |= AC_ERR_DEV;
4026 ap->hsm_task_state = HSM_ST_ERR;
4027 goto fsm_start;
4028 }
4029
4030 /* Send the CDB (atapi) or the first data block (ata pio out).
4031 * During the state transition, interrupt handler shouldn't
4032 * be invoked before the data transfer is complete and
4033 * hsm_task_state is changed. Hence, the following locking.
4034 */
4035 if (in_wq)
4036 spin_lock_irqsave(&ap->host_set->lock, flags);
4037
4038 if (qc->tf.protocol == ATA_PROT_PIO) {
4039 /* PIO data out protocol.
4040 * send first data block.
4041 */
4042
4043 /* ata_pio_sectors() might change the state
4044 * to HSM_ST_LAST. so, the state is changed here
4045 * before ata_pio_sectors().
4046 */
4047 ap->hsm_task_state = HSM_ST;
4048 ata_pio_sectors(qc);
4049 ata_altstatus(ap); /* flush */
4050 } else
4051 /* send CDB */
4052 atapi_send_cdb(ap, qc);
4053
4054 if (in_wq)
4055 spin_unlock_irqrestore(&ap->host_set->lock, flags);
4056
4057 /* if polling, ata_pio_task() handles the rest.
4058 * otherwise, interrupt handler takes over from here.
4059 */
4060 break;
4061
4062 case HSM_ST:
4063 /* complete command or read/write the data register */
4064 if (qc->tf.protocol == ATA_PROT_ATAPI) {
4065 /* ATAPI PIO protocol */
4066 if ((status & ATA_DRQ) == 0) {
4067 /* no more data to transfer */
4068 ap->hsm_task_state = HSM_ST_LAST;
4069 goto fsm_start;
4070 }
4071
4072 /* Device should not ask for data transfer (DRQ=1)
4073 * when it finds something wrong.
4074 * We ignore DRQ here and stop the HSM by
4075 * changing hsm_task_state to HSM_ST_ERR and
4076 * let the EH abort the command or reset the device.
4077 */
4078 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4079 printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
4080 ap->id, status);
4081 qc->err_mask |= AC_ERR_DEV;
4082 ap->hsm_task_state = HSM_ST_ERR;
4083 goto fsm_start;
4084 }
4085
4086 atapi_pio_bytes(qc);
4087
4088 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
4089 /* bad ireason reported by device */
4090 goto fsm_start;
4091
4092 } else {
4093 /* ATA PIO protocol */
4094 if (unlikely((status & ATA_DRQ) == 0)) {
4095 /* handle BSY=0, DRQ=0 as error */
4096 qc->err_mask |= AC_ERR_HSM;
4097 ap->hsm_task_state = HSM_ST_ERR;
4098 goto fsm_start;
4099 }
4100
4101 /* For PIO reads, some devices may ask for
4102 * data transfer (DRQ=1) alone with ERR=1.
4103 * We respect DRQ here and transfer one
4104 * block of junk data before changing the
4105 * hsm_task_state to HSM_ST_ERR.
4106 *
4107 * For PIO writes, ERR=1 DRQ=1 doesn't make
4108 * sense since the data block has been
4109 * transferred to the device.
4110 */
4111 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4112 /* data might be corrputed */
4113 qc->err_mask |= AC_ERR_DEV;
4114
4115 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
4116 ata_pio_sectors(qc);
4117 ata_altstatus(ap);
4118 status = ata_wait_idle(ap);
4119 }
4120
4121 /* ata_pio_sectors() might change the
4122 * state to HSM_ST_LAST. so, the state
4123 * is changed after ata_pio_sectors().
4124 */
4125 ap->hsm_task_state = HSM_ST_ERR;
4126 goto fsm_start;
4127 }
4128
4129 ata_pio_sectors(qc);
4130
4131 if (ap->hsm_task_state == HSM_ST_LAST &&
4132 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
4133 /* all data read */
4134 ata_altstatus(ap);
4135 status = ata_wait_idle(ap);
4136 goto fsm_start;
4137 }
4138 }
4139
4140 ata_altstatus(ap); /* flush */
4141 poll_next = 1;
4142 break;
4143
4144 case HSM_ST_LAST:
4145 if (unlikely(!ata_ok(status))) {
4146 qc->err_mask |= __ac_err_mask(status);
4147 ap->hsm_task_state = HSM_ST_ERR;
4148 goto fsm_start;
4149 }
4150
4151 /* no more data to transfer */
4152 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
4153 ap->id, qc->dev->devno, status);
4154
4155 WARN_ON(qc->err_mask);
4156
4157 ap->hsm_task_state = HSM_ST_IDLE;
4158
4159 /* complete taskfile transaction */
4160 ata_hsm_qc_complete(qc, in_wq);
4161
4162 poll_next = 0;
4163 break;
4164
4165 case HSM_ST_ERR:
4166 /* make sure qc->err_mask is available to
4167 * know what's wrong and recover
4168 */
4169 WARN_ON(qc->err_mask == 0);
4170
4171 ap->hsm_task_state = HSM_ST_IDLE;
4172
4173 /* complete taskfile transaction */
4174 ata_hsm_qc_complete(qc, in_wq);
4175
4176 poll_next = 0;
4177 break;
4178 default:
4179 poll_next = 0;
4180 BUG();
4181 }
4182
4183 return poll_next;
4184 }
4185
4186 static void ata_pio_task(void *_data)
4187 {
4188 struct ata_queued_cmd *qc = _data;
4189 struct ata_port *ap = qc->ap;
4190 u8 status;
4191 int poll_next;
4192
4193 fsm_start:
4194 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
4195
4196 /*
4197 * This is purely heuristic. This is a fast path.
4198 * Sometimes when we enter, BSY will be cleared in
4199 * a chk-status or two. If not, the drive is probably seeking
4200 * or something. Snooze for a couple msecs, then
4201 * chk-status again. If still busy, queue delayed work.
4202 */
4203 status = ata_busy_wait(ap, ATA_BUSY, 5);
4204 if (status & ATA_BUSY) {
4205 msleep(2);
4206 status = ata_busy_wait(ap, ATA_BUSY, 10);
4207 if (status & ATA_BUSY) {
4208 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
4209 return;
4210 }
4211 }
4212
4213 /* move the HSM */
4214 poll_next = ata_hsm_move(ap, qc, status, 1);
4215
4216 /* another command or interrupt handler
4217 * may be running at this point.
4218 */
4219 if (poll_next)
4220 goto fsm_start;
4221 }
4222
4223 /**
4224 * ata_qc_new - Request an available ATA command, for queueing
4225 * @ap: Port associated with device @dev
4226 * @dev: Device from whom we request an available command structure
4227 *
4228 * LOCKING:
4229 * None.
4230 */
4231
4232 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4233 {
4234 struct ata_queued_cmd *qc = NULL;
4235 unsigned int i;
4236
4237 /* no command while frozen */
4238 if (unlikely(ap->flags & ATA_FLAG_FROZEN))
4239 return NULL;
4240
4241 /* the last tag is reserved for internal command. */
4242 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
4243 if (!test_and_set_bit(i, &ap->qc_allocated)) {
4244 qc = __ata_qc_from_tag(ap, i);
4245 break;
4246 }
4247
4248 if (qc)
4249 qc->tag = i;
4250
4251 return qc;
4252 }
4253
4254 /**
4255 * ata_qc_new_init - Request an available ATA command, and initialize it
4256 * @dev: Device from whom we request an available command structure
4257 *
4258 * LOCKING:
4259 * None.
4260 */
4261
4262 struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
4263 {
4264 struct ata_port *ap = dev->ap;
4265 struct ata_queued_cmd *qc;
4266
4267 qc = ata_qc_new(ap);
4268 if (qc) {
4269 qc->scsicmd = NULL;
4270 qc->ap = ap;
4271 qc->dev = dev;
4272
4273 ata_qc_reinit(qc);
4274 }
4275
4276 return qc;
4277 }
4278
4279 /**
4280 * ata_qc_free - free unused ata_queued_cmd
4281 * @qc: Command to complete
4282 *
4283 * Designed to free unused ata_queued_cmd object
4284 * in case something prevents using it.
4285 *
4286 * LOCKING:
4287 * spin_lock_irqsave(host_set lock)
4288 */
4289 void ata_qc_free(struct ata_queued_cmd *qc)
4290 {
4291 struct ata_port *ap = qc->ap;
4292 unsigned int tag;
4293
4294 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4295
4296 qc->flags = 0;
4297 tag = qc->tag;
4298 if (likely(ata_tag_valid(tag))) {
4299 qc->tag = ATA_TAG_POISON;
4300 clear_bit(tag, &ap->qc_allocated);
4301 }
4302 }
4303
4304 void __ata_qc_complete(struct ata_queued_cmd *qc)
4305 {
4306 struct ata_port *ap = qc->ap;
4307
4308 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4309 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
4310
4311 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4312 ata_sg_clean(qc);
4313
4314 /* command should be marked inactive atomically with qc completion */
4315 if (qc->tf.protocol == ATA_PROT_NCQ)
4316 ap->sactive &= ~(1 << qc->tag);
4317 else
4318 ap->active_tag = ATA_TAG_POISON;
4319
4320 /* atapi: mark qc as inactive to prevent the interrupt handler
4321 * from completing the command twice later, before the error handler
4322 * is called. (when rc != 0 and atapi request sense is needed)
4323 */
4324 qc->flags &= ~ATA_QCFLAG_ACTIVE;
4325 ap->qc_active &= ~(1 << qc->tag);
4326
4327 /* call completion callback */
4328 qc->complete_fn(qc);
4329 }
4330
4331 /**
4332 * ata_qc_complete - Complete an active ATA command
4333 * @qc: Command to complete
4334 * @err_mask: ATA Status register contents
4335 *
4336 * Indicate to the mid and upper layers that an ATA
4337 * command has completed, with either an ok or not-ok status.
4338 *
4339 * LOCKING:
4340 * spin_lock_irqsave(host_set lock)
4341 */
4342 void ata_qc_complete(struct ata_queued_cmd *qc)
4343 {
4344 struct ata_port *ap = qc->ap;
4345
4346 /* XXX: New EH and old EH use different mechanisms to
4347 * synchronize EH with regular execution path.
4348 *
4349 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4350 * Normal execution path is responsible for not accessing a
4351 * failed qc. libata core enforces the rule by returning NULL
4352 * from ata_qc_from_tag() for failed qcs.
4353 *
4354 * Old EH depends on ata_qc_complete() nullifying completion
4355 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
4356 * not synchronize with interrupt handler. Only PIO task is
4357 * taken care of.
4358 */
4359 if (ap->ops->error_handler) {
4360 WARN_ON(ap->flags & ATA_FLAG_FROZEN);
4361
4362 if (unlikely(qc->err_mask))
4363 qc->flags |= ATA_QCFLAG_FAILED;
4364
4365 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4366 if (!ata_tag_internal(qc->tag)) {
4367 /* always fill result TF for failed qc */
4368 ap->ops->tf_read(ap, &qc->result_tf);
4369 ata_qc_schedule_eh(qc);
4370 return;
4371 }
4372 }
4373
4374 /* read result TF if requested */
4375 if (qc->flags & ATA_QCFLAG_RESULT_TF)
4376 ap->ops->tf_read(ap, &qc->result_tf);
4377
4378 __ata_qc_complete(qc);
4379 } else {
4380 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4381 return;
4382
4383 /* read result TF if failed or requested */
4384 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
4385 ap->ops->tf_read(ap, &qc->result_tf);
4386
4387 __ata_qc_complete(qc);
4388 }
4389 }
4390
4391 /**
4392 * ata_qc_complete_multiple - Complete multiple qcs successfully
4393 * @ap: port in question
4394 * @qc_active: new qc_active mask
4395 * @finish_qc: LLDD callback invoked before completing a qc
4396 *
4397 * Complete in-flight commands. This functions is meant to be
4398 * called from low-level driver's interrupt routine to complete
4399 * requests normally. ap->qc_active and @qc_active is compared
4400 * and commands are completed accordingly.
4401 *
4402 * LOCKING:
4403 * spin_lock_irqsave(host_set lock)
4404 *
4405 * RETURNS:
4406 * Number of completed commands on success, -errno otherwise.
4407 */
4408 int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
4409 void (*finish_qc)(struct ata_queued_cmd *))
4410 {
4411 int nr_done = 0;
4412 u32 done_mask;
4413 int i;
4414
4415 done_mask = ap->qc_active ^ qc_active;
4416
4417 if (unlikely(done_mask & qc_active)) {
4418 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
4419 "(%08x->%08x)\n", ap->qc_active, qc_active);
4420 return -EINVAL;
4421 }
4422
4423 for (i = 0; i < ATA_MAX_QUEUE; i++) {
4424 struct ata_queued_cmd *qc;
4425
4426 if (!(done_mask & (1 << i)))
4427 continue;
4428
4429 if ((qc = ata_qc_from_tag(ap, i))) {
4430 if (finish_qc)
4431 finish_qc(qc);
4432 ata_qc_complete(qc);
4433 nr_done++;
4434 }
4435 }
4436
4437 return nr_done;
4438 }
4439
4440 static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
4441 {
4442 struct ata_port *ap = qc->ap;
4443
4444 switch (qc->tf.protocol) {
4445 case ATA_PROT_NCQ:
4446 case ATA_PROT_DMA:
4447 case ATA_PROT_ATAPI_DMA:
4448 return 1;
4449
4450 case ATA_PROT_ATAPI:
4451 case ATA_PROT_PIO:
4452 if (ap->flags & ATA_FLAG_PIO_DMA)
4453 return 1;
4454
4455 /* fall through */
4456
4457 default:
4458 return 0;
4459 }
4460
4461 /* never reached */
4462 }
4463
4464 /**
4465 * ata_qc_issue - issue taskfile to device
4466 * @qc: command to issue to device
4467 *
4468 * Prepare an ATA command to submission to device.
4469 * This includes mapping the data into a DMA-able
4470 * area, filling in the S/G table, and finally
4471 * writing the taskfile to hardware, starting the command.
4472 *
4473 * LOCKING:
4474 * spin_lock_irqsave(host_set lock)
4475 */
4476 void ata_qc_issue(struct ata_queued_cmd *qc)
4477 {
4478 struct ata_port *ap = qc->ap;
4479
4480 /* Make sure only one non-NCQ command is outstanding. The
4481 * check is skipped for old EH because it reuses active qc to
4482 * request ATAPI sense.
4483 */
4484 WARN_ON(ap->ops->error_handler && ata_tag_valid(ap->active_tag));
4485
4486 if (qc->tf.protocol == ATA_PROT_NCQ) {
4487 WARN_ON(ap->sactive & (1 << qc->tag));
4488 ap->sactive |= 1 << qc->tag;
4489 } else {
4490 WARN_ON(ap->sactive);
4491 ap->active_tag = qc->tag;
4492 }
4493
4494 qc->flags |= ATA_QCFLAG_ACTIVE;
4495 ap->qc_active |= 1 << qc->tag;
4496
4497 if (ata_should_dma_map(qc)) {
4498 if (qc->flags & ATA_QCFLAG_SG) {
4499 if (ata_sg_setup(qc))
4500 goto sg_err;
4501 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
4502 if (ata_sg_setup_one(qc))
4503 goto sg_err;
4504 }
4505 } else {
4506 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4507 }
4508
4509 ap->ops->qc_prep(qc);
4510
4511 qc->err_mask |= ap->ops->qc_issue(qc);
4512 if (unlikely(qc->err_mask))
4513 goto err;
4514 return;
4515
4516 sg_err:
4517 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4518 qc->err_mask |= AC_ERR_SYSTEM;
4519 err:
4520 ata_qc_complete(qc);
4521 }
4522
4523 /**
4524 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
4525 * @qc: command to issue to device
4526 *
4527 * Using various libata functions and hooks, this function
4528 * starts an ATA command. ATA commands are grouped into
4529 * classes called "protocols", and issuing each type of protocol
4530 * is slightly different.
4531 *
4532 * May be used as the qc_issue() entry in ata_port_operations.
4533 *
4534 * LOCKING:
4535 * spin_lock_irqsave(host_set lock)
4536 *
4537 * RETURNS:
4538 * Zero on success, AC_ERR_* mask on failure
4539 */
4540
4541 unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
4542 {
4543 struct ata_port *ap = qc->ap;
4544
4545 /* Use polling pio if the LLD doesn't handle
4546 * interrupt driven pio and atapi CDB interrupt.
4547 */
4548 if (ap->flags & ATA_FLAG_PIO_POLLING) {
4549 switch (qc->tf.protocol) {
4550 case ATA_PROT_PIO:
4551 case ATA_PROT_ATAPI:
4552 case ATA_PROT_ATAPI_NODATA:
4553 qc->tf.flags |= ATA_TFLAG_POLLING;
4554 break;
4555 case ATA_PROT_ATAPI_DMA:
4556 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
4557 /* see ata_check_atapi_dma() */
4558 BUG();
4559 break;
4560 default:
4561 break;
4562 }
4563 }
4564
4565 /* select the device */
4566 ata_dev_select(ap, qc->dev->devno, 1, 0);
4567
4568 /* start the command */
4569 switch (qc->tf.protocol) {
4570 case ATA_PROT_NODATA:
4571 if (qc->tf.flags & ATA_TFLAG_POLLING)
4572 ata_qc_set_polling(qc);
4573
4574 ata_tf_to_host(ap, &qc->tf);
4575 ap->hsm_task_state = HSM_ST_LAST;
4576
4577 if (qc->tf.flags & ATA_TFLAG_POLLING)
4578 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4579
4580 break;
4581
4582 case ATA_PROT_DMA:
4583 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
4584
4585 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4586 ap->ops->bmdma_setup(qc); /* set up bmdma */
4587 ap->ops->bmdma_start(qc); /* initiate bmdma */
4588 ap->hsm_task_state = HSM_ST_LAST;
4589 break;
4590
4591 case ATA_PROT_PIO:
4592 if (qc->tf.flags & ATA_TFLAG_POLLING)
4593 ata_qc_set_polling(qc);
4594
4595 ata_tf_to_host(ap, &qc->tf);
4596
4597 if (qc->tf.flags & ATA_TFLAG_WRITE) {
4598 /* PIO data out protocol */
4599 ap->hsm_task_state = HSM_ST_FIRST;
4600 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4601
4602 /* always send first data block using
4603 * the ata_pio_task() codepath.
4604 */
4605 } else {
4606 /* PIO data in protocol */
4607 ap->hsm_task_state = HSM_ST;
4608
4609 if (qc->tf.flags & ATA_TFLAG_POLLING)
4610 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4611
4612 /* if polling, ata_pio_task() handles the rest.
4613 * otherwise, interrupt handler takes over from here.
4614 */
4615 }
4616
4617 break;
4618
4619 case ATA_PROT_ATAPI:
4620 case ATA_PROT_ATAPI_NODATA:
4621 if (qc->tf.flags & ATA_TFLAG_POLLING)
4622 ata_qc_set_polling(qc);
4623
4624 ata_tf_to_host(ap, &qc->tf);
4625
4626 ap->hsm_task_state = HSM_ST_FIRST;
4627
4628 /* send cdb by polling if no cdb interrupt */
4629 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
4630 (qc->tf.flags & ATA_TFLAG_POLLING))
4631 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4632 break;
4633
4634 case ATA_PROT_ATAPI_DMA:
4635 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
4636
4637 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4638 ap->ops->bmdma_setup(qc); /* set up bmdma */
4639 ap->hsm_task_state = HSM_ST_FIRST;
4640
4641 /* send cdb by polling if no cdb interrupt */
4642 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4643 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4644 break;
4645
4646 default:
4647 WARN_ON(1);
4648 return AC_ERR_SYSTEM;
4649 }
4650
4651 return 0;
4652 }
4653
4654 /**
4655 * ata_host_intr - Handle host interrupt for given (port, task)
4656 * @ap: Port on which interrupt arrived (possibly...)
4657 * @qc: Taskfile currently active in engine
4658 *
4659 * Handle host interrupt for given queued command. Currently,
4660 * only DMA interrupts are handled. All other commands are
4661 * handled via polling with interrupts disabled (nIEN bit).
4662 *
4663 * LOCKING:
4664 * spin_lock_irqsave(host_set lock)
4665 *
4666 * RETURNS:
4667 * One if interrupt was handled, zero if not (shared irq).
4668 */
4669
4670 inline unsigned int ata_host_intr (struct ata_port *ap,
4671 struct ata_queued_cmd *qc)
4672 {
4673 u8 status, host_stat = 0;
4674
4675 VPRINTK("ata%u: protocol %d task_state %d\n",
4676 ap->id, qc->tf.protocol, ap->hsm_task_state);
4677
4678 /* Check whether we are expecting interrupt in this state */
4679 switch (ap->hsm_task_state) {
4680 case HSM_ST_FIRST:
4681 /* Some pre-ATAPI-4 devices assert INTRQ
4682 * at this state when ready to receive CDB.
4683 */
4684
4685 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
4686 * The flag was turned on only for atapi devices.
4687 * No need to check is_atapi_taskfile(&qc->tf) again.
4688 */
4689 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4690 goto idle_irq;
4691 break;
4692 case HSM_ST_LAST:
4693 if (qc->tf.protocol == ATA_PROT_DMA ||
4694 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
4695 /* check status of DMA engine */
4696 host_stat = ap->ops->bmdma_status(ap);
4697 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
4698
4699 /* if it's not our irq... */
4700 if (!(host_stat & ATA_DMA_INTR))
4701 goto idle_irq;
4702
4703 /* before we do anything else, clear DMA-Start bit */
4704 ap->ops->bmdma_stop(qc);
4705
4706 if (unlikely(host_stat & ATA_DMA_ERR)) {
4707 /* error when transfering data to/from memory */
4708 qc->err_mask |= AC_ERR_HOST_BUS;
4709 ap->hsm_task_state = HSM_ST_ERR;
4710 }
4711 }
4712 break;
4713 case HSM_ST:
4714 break;
4715 default:
4716 goto idle_irq;
4717 }
4718
4719 /* check altstatus */
4720 status = ata_altstatus(ap);
4721 if (status & ATA_BUSY)
4722 goto idle_irq;
4723
4724 /* check main status, clearing INTRQ */
4725 status = ata_chk_status(ap);
4726 if (unlikely(status & ATA_BUSY))
4727 goto idle_irq;
4728
4729 /* ack bmdma irq events */
4730 ap->ops->irq_clear(ap);
4731
4732 ata_hsm_move(ap, qc, status, 0);
4733 return 1; /* irq handled */
4734
4735 idle_irq:
4736 ap->stats.idle_irq++;
4737
4738 #ifdef ATA_IRQ_TRAP
4739 if ((ap->stats.idle_irq % 1000) == 0) {
4740 ata_irq_ack(ap, 0); /* debug trap */
4741 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
4742 return 1;
4743 }
4744 #endif
4745 return 0; /* irq not handled */
4746 }
4747
4748 /**
4749 * ata_interrupt - Default ATA host interrupt handler
4750 * @irq: irq line (unused)
4751 * @dev_instance: pointer to our ata_host_set information structure
4752 * @regs: unused
4753 *
4754 * Default interrupt handler for PCI IDE devices. Calls
4755 * ata_host_intr() for each port that is not disabled.
4756 *
4757 * LOCKING:
4758 * Obtains host_set lock during operation.
4759 *
4760 * RETURNS:
4761 * IRQ_NONE or IRQ_HANDLED.
4762 */
4763
4764 irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
4765 {
4766 struct ata_host_set *host_set = dev_instance;
4767 unsigned int i;
4768 unsigned int handled = 0;
4769 unsigned long flags;
4770
4771 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
4772 spin_lock_irqsave(&host_set->lock, flags);
4773
4774 for (i = 0; i < host_set->n_ports; i++) {
4775 struct ata_port *ap;
4776
4777 ap = host_set->ports[i];
4778 if (ap &&
4779 !(ap->flags & ATA_FLAG_DISABLED)) {
4780 struct ata_queued_cmd *qc;
4781
4782 qc = ata_qc_from_tag(ap, ap->active_tag);
4783 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
4784 (qc->flags & ATA_QCFLAG_ACTIVE))
4785 handled |= ata_host_intr(ap, qc);
4786 }
4787 }
4788
4789 spin_unlock_irqrestore(&host_set->lock, flags);
4790
4791 return IRQ_RETVAL(handled);
4792 }
4793
4794 /**
4795 * sata_scr_valid - test whether SCRs are accessible
4796 * @ap: ATA port to test SCR accessibility for
4797 *
4798 * Test whether SCRs are accessible for @ap.
4799 *
4800 * LOCKING:
4801 * None.
4802 *
4803 * RETURNS:
4804 * 1 if SCRs are accessible, 0 otherwise.
4805 */
4806 int sata_scr_valid(struct ata_port *ap)
4807 {
4808 return ap->cbl == ATA_CBL_SATA && ap->ops->scr_read;
4809 }
4810
4811 /**
4812 * sata_scr_read - read SCR register of the specified port
4813 * @ap: ATA port to read SCR for
4814 * @reg: SCR to read
4815 * @val: Place to store read value
4816 *
4817 * Read SCR register @reg of @ap into *@val. This function is
4818 * guaranteed to succeed if the cable type of the port is SATA
4819 * and the port implements ->scr_read.
4820 *
4821 * LOCKING:
4822 * None.
4823 *
4824 * RETURNS:
4825 * 0 on success, negative errno on failure.
4826 */
4827 int sata_scr_read(struct ata_port *ap, int reg, u32 *val)
4828 {
4829 if (sata_scr_valid(ap)) {
4830 *val = ap->ops->scr_read(ap, reg);
4831 return 0;
4832 }
4833 return -EOPNOTSUPP;
4834 }
4835
4836 /**
4837 * sata_scr_write - write SCR register of the specified port
4838 * @ap: ATA port to write SCR for
4839 * @reg: SCR to write
4840 * @val: value to write
4841 *
4842 * Write @val to SCR register @reg of @ap. This function is
4843 * guaranteed to succeed if the cable type of the port is SATA
4844 * and the port implements ->scr_read.
4845 *
4846 * LOCKING:
4847 * None.
4848 *
4849 * RETURNS:
4850 * 0 on success, negative errno on failure.
4851 */
4852 int sata_scr_write(struct ata_port *ap, int reg, u32 val)
4853 {
4854 if (sata_scr_valid(ap)) {
4855 ap->ops->scr_write(ap, reg, val);
4856 return 0;
4857 }
4858 return -EOPNOTSUPP;
4859 }
4860
4861 /**
4862 * sata_scr_write_flush - write SCR register of the specified port and flush
4863 * @ap: ATA port to write SCR for
4864 * @reg: SCR to write
4865 * @val: value to write
4866 *
4867 * This function is identical to sata_scr_write() except that this
4868 * function performs flush after writing to the register.
4869 *
4870 * LOCKING:
4871 * None.
4872 *
4873 * RETURNS:
4874 * 0 on success, negative errno on failure.
4875 */
4876 int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val)
4877 {
4878 if (sata_scr_valid(ap)) {
4879 ap->ops->scr_write(ap, reg, val);
4880 ap->ops->scr_read(ap, reg);
4881 return 0;
4882 }
4883 return -EOPNOTSUPP;
4884 }
4885
4886 /**
4887 * ata_port_online - test whether the given port is online
4888 * @ap: ATA port to test
4889 *
4890 * Test whether @ap is online. Note that this function returns 0
4891 * if online status of @ap cannot be obtained, so
4892 * ata_port_online(ap) != !ata_port_offline(ap).
4893 *
4894 * LOCKING:
4895 * None.
4896 *
4897 * RETURNS:
4898 * 1 if the port online status is available and online.
4899 */
4900 int ata_port_online(struct ata_port *ap)
4901 {
4902 u32 sstatus;
4903
4904 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) == 0x3)
4905 return 1;
4906 return 0;
4907 }
4908
4909 /**
4910 * ata_port_offline - test whether the given port is offline
4911 * @ap: ATA port to test
4912 *
4913 * Test whether @ap is offline. Note that this function returns
4914 * 0 if offline status of @ap cannot be obtained, so
4915 * ata_port_online(ap) != !ata_port_offline(ap).
4916 *
4917 * LOCKING:
4918 * None.
4919 *
4920 * RETURNS:
4921 * 1 if the port offline status is available and offline.
4922 */
4923 int ata_port_offline(struct ata_port *ap)
4924 {
4925 u32 sstatus;
4926
4927 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) != 0x3)
4928 return 1;
4929 return 0;
4930 }
4931
4932 /*
4933 * Execute a 'simple' command, that only consists of the opcode 'cmd' itself,
4934 * without filling any other registers
4935 */
4936 static int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
4937 {
4938 struct ata_taskfile tf;
4939 int err;
4940
4941 ata_tf_init(dev, &tf);
4942
4943 tf.command = cmd;
4944 tf.flags |= ATA_TFLAG_DEVICE;
4945 tf.protocol = ATA_PROT_NODATA;
4946
4947 err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
4948 if (err)
4949 ata_dev_printk(dev, KERN_ERR, "%s: ata command failed: %d\n",
4950 __FUNCTION__, err);
4951
4952 return err;
4953 }
4954
4955 static int ata_flush_cache(struct ata_device *dev)
4956 {
4957 u8 cmd;
4958
4959 if (!ata_try_flush_cache(dev))
4960 return 0;
4961
4962 if (ata_id_has_flush_ext(dev->id))
4963 cmd = ATA_CMD_FLUSH_EXT;
4964 else
4965 cmd = ATA_CMD_FLUSH;
4966
4967 return ata_do_simple_cmd(dev, cmd);
4968 }
4969
4970 static int ata_standby_drive(struct ata_device *dev)
4971 {
4972 return ata_do_simple_cmd(dev, ATA_CMD_STANDBYNOW1);
4973 }
4974
4975 static int ata_start_drive(struct ata_device *dev)
4976 {
4977 return ata_do_simple_cmd(dev, ATA_CMD_IDLEIMMEDIATE);
4978 }
4979
4980 /**
4981 * ata_device_resume - wakeup a previously suspended devices
4982 * @dev: the device to resume
4983 *
4984 * Kick the drive back into action, by sending it an idle immediate
4985 * command and making sure its transfer mode matches between drive
4986 * and host.
4987 *
4988 */
4989 int ata_device_resume(struct ata_device *dev)
4990 {
4991 struct ata_port *ap = dev->ap;
4992
4993 if (ap->flags & ATA_FLAG_SUSPENDED) {
4994 struct ata_device *failed_dev;
4995 ap->flags &= ~ATA_FLAG_SUSPENDED;
4996 while (ata_set_mode(ap, &failed_dev))
4997 ata_dev_disable(failed_dev);
4998 }
4999 if (!ata_dev_enabled(dev))
5000 return 0;
5001 if (dev->class == ATA_DEV_ATA)
5002 ata_start_drive(dev);
5003
5004 return 0;
5005 }
5006
5007 /**
5008 * ata_device_suspend - prepare a device for suspend
5009 * @dev: the device to suspend
5010 *
5011 * Flush the cache on the drive, if appropriate, then issue a
5012 * standbynow command.
5013 */
5014 int ata_device_suspend(struct ata_device *dev, pm_message_t state)
5015 {
5016 struct ata_port *ap = dev->ap;
5017
5018 if (!ata_dev_enabled(dev))
5019 return 0;
5020 if (dev->class == ATA_DEV_ATA)
5021 ata_flush_cache(dev);
5022
5023 if (state.event != PM_EVENT_FREEZE)
5024 ata_standby_drive(dev);
5025 ap->flags |= ATA_FLAG_SUSPENDED;
5026 return 0;
5027 }
5028
5029 /**
5030 * ata_port_start - Set port up for dma.
5031 * @ap: Port to initialize
5032 *
5033 * Called just after data structures for each port are
5034 * initialized. Allocates space for PRD table.
5035 *
5036 * May be used as the port_start() entry in ata_port_operations.
5037 *
5038 * LOCKING:
5039 * Inherited from caller.
5040 */
5041
5042 int ata_port_start (struct ata_port *ap)
5043 {
5044 struct device *dev = ap->dev;
5045 int rc;
5046
5047 ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL);
5048 if (!ap->prd)
5049 return -ENOMEM;
5050
5051 rc = ata_pad_alloc(ap, dev);
5052 if (rc) {
5053 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
5054 return rc;
5055 }
5056
5057 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma);
5058
5059 return 0;
5060 }
5061
5062
5063 /**
5064 * ata_port_stop - Undo ata_port_start()
5065 * @ap: Port to shut down
5066 *
5067 * Frees the PRD table.
5068 *
5069 * May be used as the port_stop() entry in ata_port_operations.
5070 *
5071 * LOCKING:
5072 * Inherited from caller.
5073 */
5074
5075 void ata_port_stop (struct ata_port *ap)
5076 {
5077 struct device *dev = ap->dev;
5078
5079 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
5080 ata_pad_free(ap, dev);
5081 }
5082
5083 void ata_host_stop (struct ata_host_set *host_set)
5084 {
5085 if (host_set->mmio_base)
5086 iounmap(host_set->mmio_base);
5087 }
5088
5089
5090 /**
5091 * ata_host_remove - Unregister SCSI host structure with upper layers
5092 * @ap: Port to unregister
5093 * @do_unregister: 1 if we fully unregister, 0 to just stop the port
5094 *
5095 * LOCKING:
5096 * Inherited from caller.
5097 */
5098
5099 static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister)
5100 {
5101 struct Scsi_Host *sh = ap->host;
5102
5103 DPRINTK("ENTER\n");
5104
5105 if (do_unregister)
5106 scsi_remove_host(sh);
5107
5108 ap->ops->port_stop(ap);
5109 }
5110
5111 /**
5112 * ata_host_init - Initialize an ata_port structure
5113 * @ap: Structure to initialize
5114 * @host: associated SCSI mid-layer structure
5115 * @host_set: Collection of hosts to which @ap belongs
5116 * @ent: Probe information provided by low-level driver
5117 * @port_no: Port number associated with this ata_port
5118 *
5119 * Initialize a new ata_port structure, and its associated
5120 * scsi_host.
5121 *
5122 * LOCKING:
5123 * Inherited from caller.
5124 */
5125
5126 static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
5127 struct ata_host_set *host_set,
5128 const struct ata_probe_ent *ent, unsigned int port_no)
5129 {
5130 unsigned int i;
5131
5132 host->max_id = 16;
5133 host->max_lun = 1;
5134 host->max_channel = 1;
5135 host->unique_id = ata_unique_id++;
5136 host->max_cmd_len = 12;
5137
5138 ap->flags = ATA_FLAG_DISABLED;
5139 ap->id = host->unique_id;
5140 ap->host = host;
5141 ap->ctl = ATA_DEVCTL_OBS;
5142 ap->host_set = host_set;
5143 ap->dev = ent->dev;
5144 ap->port_no = port_no;
5145 ap->hard_port_no =
5146 ent->legacy_mode ? ent->hard_port_no : port_no;
5147 ap->pio_mask = ent->pio_mask;
5148 ap->mwdma_mask = ent->mwdma_mask;
5149 ap->udma_mask = ent->udma_mask;
5150 ap->flags |= ent->host_flags;
5151 ap->ops = ent->port_ops;
5152 ap->sata_spd_limit = UINT_MAX;
5153 ap->active_tag = ATA_TAG_POISON;
5154 ap->last_ctl = 0xFF;
5155
5156 INIT_WORK(&ap->port_task, NULL, NULL);
5157 INIT_LIST_HEAD(&ap->eh_done_q);
5158
5159 /* set cable type */
5160 ap->cbl = ATA_CBL_NONE;
5161 if (ap->flags & ATA_FLAG_SATA)
5162 ap->cbl = ATA_CBL_SATA;
5163
5164 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5165 struct ata_device *dev = &ap->device[i];
5166 dev->ap = ap;
5167 dev->devno = i;
5168 dev->pio_mask = UINT_MAX;
5169 dev->mwdma_mask = UINT_MAX;
5170 dev->udma_mask = UINT_MAX;
5171 }
5172
5173 #ifdef ATA_IRQ_TRAP
5174 ap->stats.unhandled_irq = 1;
5175 ap->stats.idle_irq = 1;
5176 #endif
5177
5178 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
5179 }
5180
5181 /**
5182 * ata_host_add - Attach low-level ATA driver to system
5183 * @ent: Information provided by low-level driver
5184 * @host_set: Collections of ports to which we add
5185 * @port_no: Port number associated with this host
5186 *
5187 * Attach low-level ATA driver to system.
5188 *
5189 * LOCKING:
5190 * PCI/etc. bus probe sem.
5191 *
5192 * RETURNS:
5193 * New ata_port on success, for NULL on error.
5194 */
5195
5196 static struct ata_port * ata_host_add(const struct ata_probe_ent *ent,
5197 struct ata_host_set *host_set,
5198 unsigned int port_no)
5199 {
5200 struct Scsi_Host *host;
5201 struct ata_port *ap;
5202 int rc;
5203
5204 DPRINTK("ENTER\n");
5205
5206 if (!ent->port_ops->probe_reset &&
5207 !(ent->host_flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST))) {
5208 printk(KERN_ERR "ata%u: no reset mechanism available\n",
5209 port_no);
5210 return NULL;
5211 }
5212
5213 host = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
5214 if (!host)
5215 return NULL;
5216
5217 host->transportt = &ata_scsi_transport_template;
5218
5219 ap = ata_shost_to_port(host);
5220
5221 ata_host_init(ap, host, host_set, ent, port_no);
5222
5223 rc = ap->ops->port_start(ap);
5224 if (rc)
5225 goto err_out;
5226
5227 return ap;
5228
5229 err_out:
5230 scsi_host_put(host);
5231 return NULL;
5232 }
5233
5234 /**
5235 * ata_device_add - Register hardware device with ATA and SCSI layers
5236 * @ent: Probe information describing hardware device to be registered
5237 *
5238 * This function processes the information provided in the probe
5239 * information struct @ent, allocates the necessary ATA and SCSI
5240 * host information structures, initializes them, and registers
5241 * everything with requisite kernel subsystems.
5242 *
5243 * This function requests irqs, probes the ATA bus, and probes
5244 * the SCSI bus.
5245 *
5246 * LOCKING:
5247 * PCI/etc. bus probe sem.
5248 *
5249 * RETURNS:
5250 * Number of ports registered. Zero on error (no ports registered).
5251 */
5252
5253 int ata_device_add(const struct ata_probe_ent *ent)
5254 {
5255 unsigned int count = 0, i;
5256 struct device *dev = ent->dev;
5257 struct ata_host_set *host_set;
5258
5259 DPRINTK("ENTER\n");
5260 /* alloc a container for our list of ATA ports (buses) */
5261 host_set = kzalloc(sizeof(struct ata_host_set) +
5262 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
5263 if (!host_set)
5264 return 0;
5265 spin_lock_init(&host_set->lock);
5266
5267 host_set->dev = dev;
5268 host_set->n_ports = ent->n_ports;
5269 host_set->irq = ent->irq;
5270 host_set->mmio_base = ent->mmio_base;
5271 host_set->private_data = ent->private_data;
5272 host_set->ops = ent->port_ops;
5273 host_set->flags = ent->host_set_flags;
5274
5275 /* register each port bound to this device */
5276 for (i = 0; i < ent->n_ports; i++) {
5277 struct ata_port *ap;
5278 unsigned long xfer_mode_mask;
5279
5280 ap = ata_host_add(ent, host_set, i);
5281 if (!ap)
5282 goto err_out;
5283
5284 host_set->ports[i] = ap;
5285 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
5286 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
5287 (ap->pio_mask << ATA_SHIFT_PIO);
5288
5289 /* print per-port info to dmesg */
5290 ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%lX "
5291 "ctl 0x%lX bmdma 0x%lX irq %lu\n",
5292 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
5293 ata_mode_string(xfer_mode_mask),
5294 ap->ioaddr.cmd_addr,
5295 ap->ioaddr.ctl_addr,
5296 ap->ioaddr.bmdma_addr,
5297 ent->irq);
5298
5299 ata_chk_status(ap);
5300 host_set->ops->irq_clear(ap);
5301 ata_eh_freeze_port(ap); /* freeze port before requesting IRQ */
5302 count++;
5303 }
5304
5305 if (!count)
5306 goto err_free_ret;
5307
5308 /* obtain irq, that is shared between channels */
5309 if (request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags,
5310 DRV_NAME, host_set))
5311 goto err_out;
5312
5313 /* perform each probe synchronously */
5314 DPRINTK("probe begin\n");
5315 for (i = 0; i < count; i++) {
5316 struct ata_port *ap;
5317 int rc;
5318
5319 ap = host_set->ports[i];
5320
5321 DPRINTK("ata%u: bus probe begin\n", ap->id);
5322 rc = ata_bus_probe(ap);
5323 DPRINTK("ata%u: bus probe end\n", ap->id);
5324
5325 if (rc) {
5326 /* FIXME: do something useful here?
5327 * Current libata behavior will
5328 * tear down everything when
5329 * the module is removed
5330 * or the h/w is unplugged.
5331 */
5332 }
5333
5334 rc = scsi_add_host(ap->host, dev);
5335 if (rc) {
5336 ata_port_printk(ap, KERN_ERR, "scsi_add_host failed\n");
5337 /* FIXME: do something useful here */
5338 /* FIXME: handle unconditional calls to
5339 * scsi_scan_host and ata_host_remove, below,
5340 * at the very least
5341 */
5342 }
5343 }
5344
5345 /* probes are done, now scan each port's disk(s) */
5346 DPRINTK("host probe begin\n");
5347 for (i = 0; i < count; i++) {
5348 struct ata_port *ap = host_set->ports[i];
5349
5350 ata_scsi_scan_host(ap);
5351 }
5352
5353 dev_set_drvdata(dev, host_set);
5354
5355 VPRINTK("EXIT, returning %u\n", ent->n_ports);
5356 return ent->n_ports; /* success */
5357
5358 err_out:
5359 for (i = 0; i < count; i++) {
5360 ata_host_remove(host_set->ports[i], 1);
5361 scsi_host_put(host_set->ports[i]->host);
5362 }
5363 err_free_ret:
5364 kfree(host_set);
5365 VPRINTK("EXIT, returning 0\n");
5366 return 0;
5367 }
5368
5369 /**
5370 * ata_host_set_remove - PCI layer callback for device removal
5371 * @host_set: ATA host set that was removed
5372 *
5373 * Unregister all objects associated with this host set. Free those
5374 * objects.
5375 *
5376 * LOCKING:
5377 * Inherited from calling layer (may sleep).
5378 */
5379
5380 void ata_host_set_remove(struct ata_host_set *host_set)
5381 {
5382 struct ata_port *ap;
5383 unsigned int i;
5384
5385 for (i = 0; i < host_set->n_ports; i++) {
5386 ap = host_set->ports[i];
5387 scsi_remove_host(ap->host);
5388 }
5389
5390 free_irq(host_set->irq, host_set);
5391
5392 for (i = 0; i < host_set->n_ports; i++) {
5393 ap = host_set->ports[i];
5394
5395 ata_scsi_release(ap->host);
5396
5397 if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
5398 struct ata_ioports *ioaddr = &ap->ioaddr;
5399
5400 if (ioaddr->cmd_addr == 0x1f0)
5401 release_region(0x1f0, 8);
5402 else if (ioaddr->cmd_addr == 0x170)
5403 release_region(0x170, 8);
5404 }
5405
5406 scsi_host_put(ap->host);
5407 }
5408
5409 if (host_set->ops->host_stop)
5410 host_set->ops->host_stop(host_set);
5411
5412 kfree(host_set);
5413 }
5414
5415 /**
5416 * ata_scsi_release - SCSI layer callback hook for host unload
5417 * @host: libata host to be unloaded
5418 *
5419 * Performs all duties necessary to shut down a libata port...
5420 * Kill port kthread, disable port, and release resources.
5421 *
5422 * LOCKING:
5423 * Inherited from SCSI layer.
5424 *
5425 * RETURNS:
5426 * One.
5427 */
5428
5429 int ata_scsi_release(struct Scsi_Host *host)
5430 {
5431 struct ata_port *ap = ata_shost_to_port(host);
5432
5433 DPRINTK("ENTER\n");
5434
5435 ap->ops->port_disable(ap);
5436 ata_host_remove(ap, 0);
5437
5438 DPRINTK("EXIT\n");
5439 return 1;
5440 }
5441
5442 /**
5443 * ata_std_ports - initialize ioaddr with standard port offsets.
5444 * @ioaddr: IO address structure to be initialized
5445 *
5446 * Utility function which initializes data_addr, error_addr,
5447 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
5448 * device_addr, status_addr, and command_addr to standard offsets
5449 * relative to cmd_addr.
5450 *
5451 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
5452 */
5453
5454 void ata_std_ports(struct ata_ioports *ioaddr)
5455 {
5456 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
5457 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
5458 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
5459 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
5460 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
5461 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
5462 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
5463 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
5464 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
5465 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
5466 }
5467
5468
5469 #ifdef CONFIG_PCI
5470
5471 void ata_pci_host_stop (struct ata_host_set *host_set)
5472 {
5473 struct pci_dev *pdev = to_pci_dev(host_set->dev);
5474
5475 pci_iounmap(pdev, host_set->mmio_base);
5476 }
5477
5478 /**
5479 * ata_pci_remove_one - PCI layer callback for device removal
5480 * @pdev: PCI device that was removed
5481 *
5482 * PCI layer indicates to libata via this hook that
5483 * hot-unplug or module unload event has occurred.
5484 * Handle this by unregistering all objects associated
5485 * with this PCI device. Free those objects. Then finally
5486 * release PCI resources and disable device.
5487 *
5488 * LOCKING:
5489 * Inherited from PCI layer (may sleep).
5490 */
5491
5492 void ata_pci_remove_one (struct pci_dev *pdev)
5493 {
5494 struct device *dev = pci_dev_to_dev(pdev);
5495 struct ata_host_set *host_set = dev_get_drvdata(dev);
5496
5497 ata_host_set_remove(host_set);
5498 pci_release_regions(pdev);
5499 pci_disable_device(pdev);
5500 dev_set_drvdata(dev, NULL);
5501 }
5502
5503 /* move to PCI subsystem */
5504 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
5505 {
5506 unsigned long tmp = 0;
5507
5508 switch (bits->width) {
5509 case 1: {
5510 u8 tmp8 = 0;
5511 pci_read_config_byte(pdev, bits->reg, &tmp8);
5512 tmp = tmp8;
5513 break;
5514 }
5515 case 2: {
5516 u16 tmp16 = 0;
5517 pci_read_config_word(pdev, bits->reg, &tmp16);
5518 tmp = tmp16;
5519 break;
5520 }
5521 case 4: {
5522 u32 tmp32 = 0;
5523 pci_read_config_dword(pdev, bits->reg, &tmp32);
5524 tmp = tmp32;
5525 break;
5526 }
5527
5528 default:
5529 return -EINVAL;
5530 }
5531
5532 tmp &= bits->mask;
5533
5534 return (tmp == bits->val) ? 1 : 0;
5535 }
5536
5537 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state)
5538 {
5539 pci_save_state(pdev);
5540 pci_disable_device(pdev);
5541 pci_set_power_state(pdev, PCI_D3hot);
5542 return 0;
5543 }
5544
5545 int ata_pci_device_resume(struct pci_dev *pdev)
5546 {
5547 pci_set_power_state(pdev, PCI_D0);
5548 pci_restore_state(pdev);
5549 pci_enable_device(pdev);
5550 pci_set_master(pdev);
5551 return 0;
5552 }
5553 #endif /* CONFIG_PCI */
5554
5555
5556 static int __init ata_init(void)
5557 {
5558 ata_wq = create_workqueue("ata");
5559 if (!ata_wq)
5560 return -ENOMEM;
5561
5562 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
5563 return 0;
5564 }
5565
5566 static void __exit ata_exit(void)
5567 {
5568 destroy_workqueue(ata_wq);
5569 }
5570
5571 module_init(ata_init);
5572 module_exit(ata_exit);
5573
5574 static unsigned long ratelimit_time;
5575 static spinlock_t ata_ratelimit_lock = SPIN_LOCK_UNLOCKED;
5576
5577 int ata_ratelimit(void)
5578 {
5579 int rc;
5580 unsigned long flags;
5581
5582 spin_lock_irqsave(&ata_ratelimit_lock, flags);
5583
5584 if (time_after(jiffies, ratelimit_time)) {
5585 rc = 1;
5586 ratelimit_time = jiffies + (HZ/5);
5587 } else
5588 rc = 0;
5589
5590 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
5591
5592 return rc;
5593 }
5594
5595 /**
5596 * ata_wait_register - wait until register value changes
5597 * @reg: IO-mapped register
5598 * @mask: Mask to apply to read register value
5599 * @val: Wait condition
5600 * @interval_msec: polling interval in milliseconds
5601 * @timeout_msec: timeout in milliseconds
5602 *
5603 * Waiting for some bits of register to change is a common
5604 * operation for ATA controllers. This function reads 32bit LE
5605 * IO-mapped register @reg and tests for the following condition.
5606 *
5607 * (*@reg & mask) != val
5608 *
5609 * If the condition is met, it returns; otherwise, the process is
5610 * repeated after @interval_msec until timeout.
5611 *
5612 * LOCKING:
5613 * Kernel thread context (may sleep)
5614 *
5615 * RETURNS:
5616 * The final register value.
5617 */
5618 u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
5619 unsigned long interval_msec,
5620 unsigned long timeout_msec)
5621 {
5622 unsigned long timeout;
5623 u32 tmp;
5624
5625 tmp = ioread32(reg);
5626
5627 /* Calculate timeout _after_ the first read to make sure
5628 * preceding writes reach the controller before starting to
5629 * eat away the timeout.
5630 */
5631 timeout = jiffies + (timeout_msec * HZ) / 1000;
5632
5633 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
5634 msleep(interval_msec);
5635 tmp = ioread32(reg);
5636 }
5637
5638 return tmp;
5639 }
5640
5641 /*
5642 * libata is essentially a library of internal helper functions for
5643 * low-level ATA host controller drivers. As such, the API/ABI is
5644 * likely to change as new drivers are added and updated.
5645 * Do not depend on ABI/API stability.
5646 */
5647
5648 EXPORT_SYMBOL_GPL(ata_std_bios_param);
5649 EXPORT_SYMBOL_GPL(ata_std_ports);
5650 EXPORT_SYMBOL_GPL(ata_device_add);
5651 EXPORT_SYMBOL_GPL(ata_host_set_remove);
5652 EXPORT_SYMBOL_GPL(ata_sg_init);
5653 EXPORT_SYMBOL_GPL(ata_sg_init_one);
5654 EXPORT_SYMBOL_GPL(ata_qc_complete);
5655 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
5656 EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
5657 EXPORT_SYMBOL_GPL(ata_tf_load);
5658 EXPORT_SYMBOL_GPL(ata_tf_read);
5659 EXPORT_SYMBOL_GPL(ata_noop_dev_select);
5660 EXPORT_SYMBOL_GPL(ata_std_dev_select);
5661 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
5662 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
5663 EXPORT_SYMBOL_GPL(ata_check_status);
5664 EXPORT_SYMBOL_GPL(ata_altstatus);
5665 EXPORT_SYMBOL_GPL(ata_exec_command);
5666 EXPORT_SYMBOL_GPL(ata_port_start);
5667 EXPORT_SYMBOL_GPL(ata_port_stop);
5668 EXPORT_SYMBOL_GPL(ata_host_stop);
5669 EXPORT_SYMBOL_GPL(ata_interrupt);
5670 EXPORT_SYMBOL_GPL(ata_qc_prep);
5671 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
5672 EXPORT_SYMBOL_GPL(ata_bmdma_setup);
5673 EXPORT_SYMBOL_GPL(ata_bmdma_start);
5674 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
5675 EXPORT_SYMBOL_GPL(ata_bmdma_status);
5676 EXPORT_SYMBOL_GPL(ata_bmdma_stop);
5677 EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
5678 EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
5679 EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
5680 EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
5681 EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
5682 EXPORT_SYMBOL_GPL(ata_port_probe);
5683 EXPORT_SYMBOL_GPL(sata_set_spd);
5684 EXPORT_SYMBOL_GPL(sata_phy_reset);
5685 EXPORT_SYMBOL_GPL(__sata_phy_reset);
5686 EXPORT_SYMBOL_GPL(ata_bus_reset);
5687 EXPORT_SYMBOL_GPL(ata_std_probeinit);
5688 EXPORT_SYMBOL_GPL(ata_std_softreset);
5689 EXPORT_SYMBOL_GPL(sata_std_hardreset);
5690 EXPORT_SYMBOL_GPL(ata_std_postreset);
5691 EXPORT_SYMBOL_GPL(ata_std_probe_reset);
5692 EXPORT_SYMBOL_GPL(ata_drive_probe_reset);
5693 EXPORT_SYMBOL_GPL(ata_dev_revalidate);
5694 EXPORT_SYMBOL_GPL(ata_dev_classify);
5695 EXPORT_SYMBOL_GPL(ata_dev_pair);
5696 EXPORT_SYMBOL_GPL(ata_port_disable);
5697 EXPORT_SYMBOL_GPL(ata_ratelimit);
5698 EXPORT_SYMBOL_GPL(ata_wait_register);
5699 EXPORT_SYMBOL_GPL(ata_busy_sleep);
5700 EXPORT_SYMBOL_GPL(ata_port_queue_task);
5701 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
5702 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
5703 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
5704 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
5705 EXPORT_SYMBOL_GPL(ata_scsi_release);
5706 EXPORT_SYMBOL_GPL(ata_host_intr);
5707 EXPORT_SYMBOL_GPL(sata_scr_valid);
5708 EXPORT_SYMBOL_GPL(sata_scr_read);
5709 EXPORT_SYMBOL_GPL(sata_scr_write);
5710 EXPORT_SYMBOL_GPL(sata_scr_write_flush);
5711 EXPORT_SYMBOL_GPL(ata_port_online);
5712 EXPORT_SYMBOL_GPL(ata_port_offline);
5713 EXPORT_SYMBOL_GPL(ata_id_string);
5714 EXPORT_SYMBOL_GPL(ata_id_c_string);
5715 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
5716
5717 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
5718 EXPORT_SYMBOL_GPL(ata_timing_compute);
5719 EXPORT_SYMBOL_GPL(ata_timing_merge);
5720
5721 #ifdef CONFIG_PCI
5722 EXPORT_SYMBOL_GPL(pci_test_config_bits);
5723 EXPORT_SYMBOL_GPL(ata_pci_host_stop);
5724 EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
5725 EXPORT_SYMBOL_GPL(ata_pci_init_one);
5726 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
5727 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
5728 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
5729 EXPORT_SYMBOL_GPL(ata_pci_default_filter);
5730 EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
5731 #endif /* CONFIG_PCI */
5732
5733 EXPORT_SYMBOL_GPL(ata_device_suspend);
5734 EXPORT_SYMBOL_GPL(ata_device_resume);
5735 EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
5736 EXPORT_SYMBOL_GPL(ata_scsi_device_resume);
5737
5738 EXPORT_SYMBOL_GPL(ata_eng_timeout);
5739 EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
5740 EXPORT_SYMBOL_GPL(ata_port_abort);
5741 EXPORT_SYMBOL_GPL(ata_port_freeze);
5742 EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
5743 EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
5744 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
5745 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
5746 EXPORT_SYMBOL_GPL(ata_do_eh);
This page took 0.905885 seconds and 6 git commands to generate.