[PATCH] lockdep: annotate enable_in_hardirq()
[deliverable/linux.git] / drivers / scsi / libata-core.c
CommitLineData
1da177e4 1/*
af36d7f0
JG
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
1da177e4
LT
33 */
34
1da177e4
LT
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/pci.h>
38#include <linux/init.h>
39#include <linux/list.h>
40#include <linux/mm.h>
41#include <linux/highmem.h>
42#include <linux/spinlock.h>
43#include <linux/blkdev.h>
44#include <linux/delay.h>
45#include <linux/timer.h>
46#include <linux/interrupt.h>
47#include <linux/completion.h>
48#include <linux/suspend.h>
49#include <linux/workqueue.h>
67846b30 50#include <linux/jiffies.h>
378f058c 51#include <linux/scatterlist.h>
1da177e4 52#include <scsi/scsi.h>
1da177e4 53#include "scsi_priv.h"
193515d5 54#include <scsi/scsi_cmnd.h>
1da177e4
LT
55#include <scsi/scsi_host.h>
56#include <linux/libata.h>
57#include <asm/io.h>
58#include <asm/semaphore.h>
59#include <asm/byteorder.h>
60
61#include "libata.h"
62
d7bb4cc7
TH
63/* debounce timing parameters in msecs { interval, duration, timeout } */
64const unsigned long sata_deb_timing_boot[] = { 5, 100, 2000 };
65const unsigned long sata_deb_timing_eh[] = { 25, 500, 2000 };
66const unsigned long sata_deb_timing_before_fsrst[] = { 100, 2000, 5000 };
67
3373efd8
TH
68static unsigned int ata_dev_init_params(struct ata_device *dev,
69 u16 heads, u16 sectors);
70static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
71static void ata_dev_xfermask(struct ata_device *dev);
1da177e4
LT
72
73static unsigned int ata_unique_id = 1;
74static struct workqueue_struct *ata_wq;
75
453b07ac
TH
76struct workqueue_struct *ata_aux_wq;
77
418dc1f5 78int atapi_enabled = 1;
1623c81e
JG
79module_param(atapi_enabled, int, 0444);
80MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
81
95de719a
AL
82int atapi_dmadir = 0;
83module_param(atapi_dmadir, int, 0444);
84MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
85
c3c013a2
JG
86int libata_fua = 0;
87module_param_named(fua, libata_fua, int, 0444);
88MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
89
a8601e5f
AM
90static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
91module_param(ata_probe_timeout, int, 0444);
92MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
93
1da177e4
LT
94MODULE_AUTHOR("Jeff Garzik");
95MODULE_DESCRIPTION("Library module for ATA devices");
96MODULE_LICENSE("GPL");
97MODULE_VERSION(DRV_VERSION);
98
0baab86b 99
1da177e4
LT
100/**
101 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
102 * @tf: Taskfile to convert
103 * @fis: Buffer into which data will output
104 * @pmp: Port multiplier port
105 *
106 * Converts a standard ATA taskfile to a Serial ATA
107 * FIS structure (Register - Host to Device).
108 *
109 * LOCKING:
110 * Inherited from caller.
111 */
112
057ace5e 113void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
1da177e4
LT
114{
115 fis[0] = 0x27; /* Register - Host to Device FIS */
116 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
117 bit 7 indicates Command FIS */
118 fis[2] = tf->command;
119 fis[3] = tf->feature;
120
121 fis[4] = tf->lbal;
122 fis[5] = tf->lbam;
123 fis[6] = tf->lbah;
124 fis[7] = tf->device;
125
126 fis[8] = tf->hob_lbal;
127 fis[9] = tf->hob_lbam;
128 fis[10] = tf->hob_lbah;
129 fis[11] = tf->hob_feature;
130
131 fis[12] = tf->nsect;
132 fis[13] = tf->hob_nsect;
133 fis[14] = 0;
134 fis[15] = tf->ctl;
135
136 fis[16] = 0;
137 fis[17] = 0;
138 fis[18] = 0;
139 fis[19] = 0;
140}
141
142/**
143 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
144 * @fis: Buffer from which data will be input
145 * @tf: Taskfile to output
146 *
e12a1be6 147 * Converts a serial ATA FIS structure to a standard ATA taskfile.
1da177e4
LT
148 *
149 * LOCKING:
150 * Inherited from caller.
151 */
152
057ace5e 153void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
1da177e4
LT
154{
155 tf->command = fis[2]; /* status */
156 tf->feature = fis[3]; /* error */
157
158 tf->lbal = fis[4];
159 tf->lbam = fis[5];
160 tf->lbah = fis[6];
161 tf->device = fis[7];
162
163 tf->hob_lbal = fis[8];
164 tf->hob_lbam = fis[9];
165 tf->hob_lbah = fis[10];
166
167 tf->nsect = fis[12];
168 tf->hob_nsect = fis[13];
169}
170
8cbd6df1
AL
171static const u8 ata_rw_cmds[] = {
172 /* pio multi */
173 ATA_CMD_READ_MULTI,
174 ATA_CMD_WRITE_MULTI,
175 ATA_CMD_READ_MULTI_EXT,
176 ATA_CMD_WRITE_MULTI_EXT,
9a3dccc4
TH
177 0,
178 0,
179 0,
180 ATA_CMD_WRITE_MULTI_FUA_EXT,
8cbd6df1
AL
181 /* pio */
182 ATA_CMD_PIO_READ,
183 ATA_CMD_PIO_WRITE,
184 ATA_CMD_PIO_READ_EXT,
185 ATA_CMD_PIO_WRITE_EXT,
9a3dccc4
TH
186 0,
187 0,
188 0,
189 0,
8cbd6df1
AL
190 /* dma */
191 ATA_CMD_READ,
192 ATA_CMD_WRITE,
193 ATA_CMD_READ_EXT,
9a3dccc4
TH
194 ATA_CMD_WRITE_EXT,
195 0,
196 0,
197 0,
198 ATA_CMD_WRITE_FUA_EXT
8cbd6df1 199};
1da177e4
LT
200
201/**
8cbd6df1
AL
202 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
203 * @qc: command to examine and configure
1da177e4 204 *
2e9edbf8 205 * Examine the device configuration and tf->flags to calculate
8cbd6df1 206 * the proper read/write commands and protocol to use.
1da177e4
LT
207 *
208 * LOCKING:
209 * caller.
210 */
9a3dccc4 211int ata_rwcmd_protocol(struct ata_queued_cmd *qc)
1da177e4 212{
8cbd6df1
AL
213 struct ata_taskfile *tf = &qc->tf;
214 struct ata_device *dev = qc->dev;
9a3dccc4 215 u8 cmd;
1da177e4 216
9a3dccc4 217 int index, fua, lba48, write;
2e9edbf8 218
9a3dccc4 219 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
8cbd6df1
AL
220 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
221 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
1da177e4 222
8cbd6df1
AL
223 if (dev->flags & ATA_DFLAG_PIO) {
224 tf->protocol = ATA_PROT_PIO;
9a3dccc4 225 index = dev->multi_count ? 0 : 8;
8d238e01
AC
226 } else if (lba48 && (qc->ap->flags & ATA_FLAG_PIO_LBA48)) {
227 /* Unable to use DMA due to host limitation */
228 tf->protocol = ATA_PROT_PIO;
0565c26d 229 index = dev->multi_count ? 0 : 8;
8cbd6df1
AL
230 } else {
231 tf->protocol = ATA_PROT_DMA;
9a3dccc4 232 index = 16;
8cbd6df1 233 }
1da177e4 234
9a3dccc4
TH
235 cmd = ata_rw_cmds[index + fua + lba48 + write];
236 if (cmd) {
237 tf->command = cmd;
238 return 0;
239 }
240 return -1;
1da177e4
LT
241}
242
cb95d562
TH
243/**
244 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
245 * @pio_mask: pio_mask
246 * @mwdma_mask: mwdma_mask
247 * @udma_mask: udma_mask
248 *
249 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
250 * unsigned int xfer_mask.
251 *
252 * LOCKING:
253 * None.
254 *
255 * RETURNS:
256 * Packed xfer_mask.
257 */
258static unsigned int ata_pack_xfermask(unsigned int pio_mask,
259 unsigned int mwdma_mask,
260 unsigned int udma_mask)
261{
262 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
263 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
264 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
265}
266
c0489e4e
TH
267/**
268 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
269 * @xfer_mask: xfer_mask to unpack
270 * @pio_mask: resulting pio_mask
271 * @mwdma_mask: resulting mwdma_mask
272 * @udma_mask: resulting udma_mask
273 *
274 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
275 * Any NULL distination masks will be ignored.
276 */
277static void ata_unpack_xfermask(unsigned int xfer_mask,
278 unsigned int *pio_mask,
279 unsigned int *mwdma_mask,
280 unsigned int *udma_mask)
281{
282 if (pio_mask)
283 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
284 if (mwdma_mask)
285 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
286 if (udma_mask)
287 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
288}
289
cb95d562 290static const struct ata_xfer_ent {
be9a50c8 291 int shift, bits;
cb95d562
TH
292 u8 base;
293} ata_xfer_tbl[] = {
294 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
295 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
296 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
297 { -1, },
298};
299
300/**
301 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
302 * @xfer_mask: xfer_mask of interest
303 *
304 * Return matching XFER_* value for @xfer_mask. Only the highest
305 * bit of @xfer_mask is considered.
306 *
307 * LOCKING:
308 * None.
309 *
310 * RETURNS:
311 * Matching XFER_* value, 0 if no match found.
312 */
313static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
314{
315 int highbit = fls(xfer_mask) - 1;
316 const struct ata_xfer_ent *ent;
317
318 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
319 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
320 return ent->base + highbit - ent->shift;
321 return 0;
322}
323
324/**
325 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
326 * @xfer_mode: XFER_* of interest
327 *
328 * Return matching xfer_mask for @xfer_mode.
329 *
330 * LOCKING:
331 * None.
332 *
333 * RETURNS:
334 * Matching xfer_mask, 0 if no match found.
335 */
336static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
337{
338 const struct ata_xfer_ent *ent;
339
340 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
341 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
342 return 1 << (ent->shift + xfer_mode - ent->base);
343 return 0;
344}
345
346/**
347 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
348 * @xfer_mode: XFER_* of interest
349 *
350 * Return matching xfer_shift for @xfer_mode.
351 *
352 * LOCKING:
353 * None.
354 *
355 * RETURNS:
356 * Matching xfer_shift, -1 if no match found.
357 */
358static int ata_xfer_mode2shift(unsigned int xfer_mode)
359{
360 const struct ata_xfer_ent *ent;
361
362 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
363 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
364 return ent->shift;
365 return -1;
366}
367
1da177e4 368/**
1da7b0d0
TH
369 * ata_mode_string - convert xfer_mask to string
370 * @xfer_mask: mask of bits supported; only highest bit counts.
1da177e4
LT
371 *
372 * Determine string which represents the highest speed
1da7b0d0 373 * (highest bit in @modemask).
1da177e4
LT
374 *
375 * LOCKING:
376 * None.
377 *
378 * RETURNS:
379 * Constant C string representing highest speed listed in
1da7b0d0 380 * @mode_mask, or the constant C string "<n/a>".
1da177e4 381 */
1da7b0d0 382static const char *ata_mode_string(unsigned int xfer_mask)
1da177e4 383{
75f554bc
TH
384 static const char * const xfer_mode_str[] = {
385 "PIO0",
386 "PIO1",
387 "PIO2",
388 "PIO3",
389 "PIO4",
390 "MWDMA0",
391 "MWDMA1",
392 "MWDMA2",
393 "UDMA/16",
394 "UDMA/25",
395 "UDMA/33",
396 "UDMA/44",
397 "UDMA/66",
398 "UDMA/100",
399 "UDMA/133",
400 "UDMA7",
401 };
1da7b0d0 402 int highbit;
1da177e4 403
1da7b0d0
TH
404 highbit = fls(xfer_mask) - 1;
405 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
406 return xfer_mode_str[highbit];
1da177e4 407 return "<n/a>";
1da177e4
LT
408}
409
4c360c81
TH
410static const char *sata_spd_string(unsigned int spd)
411{
412 static const char * const spd_str[] = {
413 "1.5 Gbps",
414 "3.0 Gbps",
415 };
416
417 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
418 return "<unknown>";
419 return spd_str[spd - 1];
420}
421
3373efd8 422void ata_dev_disable(struct ata_device *dev)
0b8efb0a 423{
0dd4b21f 424 if (ata_dev_enabled(dev) && ata_msg_drv(dev->ap)) {
f15a1daf 425 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
0b8efb0a
TH
426 dev->class++;
427 }
428}
429
1da177e4
LT
430/**
431 * ata_pio_devchk - PATA device presence detection
432 * @ap: ATA channel to examine
433 * @device: Device to examine (starting at zero)
434 *
435 * This technique was originally described in
436 * Hale Landis's ATADRVR (www.ata-atapi.com), and
437 * later found its way into the ATA/ATAPI spec.
438 *
439 * Write a pattern to the ATA shadow registers,
440 * and if a device is present, it will respond by
441 * correctly storing and echoing back the
442 * ATA shadow register contents.
443 *
444 * LOCKING:
445 * caller.
446 */
447
448static unsigned int ata_pio_devchk(struct ata_port *ap,
449 unsigned int device)
450{
451 struct ata_ioports *ioaddr = &ap->ioaddr;
452 u8 nsect, lbal;
453
454 ap->ops->dev_select(ap, device);
455
456 outb(0x55, ioaddr->nsect_addr);
457 outb(0xaa, ioaddr->lbal_addr);
458
459 outb(0xaa, ioaddr->nsect_addr);
460 outb(0x55, ioaddr->lbal_addr);
461
462 outb(0x55, ioaddr->nsect_addr);
463 outb(0xaa, ioaddr->lbal_addr);
464
465 nsect = inb(ioaddr->nsect_addr);
466 lbal = inb(ioaddr->lbal_addr);
467
468 if ((nsect == 0x55) && (lbal == 0xaa))
469 return 1; /* we found a device */
470
471 return 0; /* nothing found */
472}
473
474/**
475 * ata_mmio_devchk - PATA device presence detection
476 * @ap: ATA channel to examine
477 * @device: Device to examine (starting at zero)
478 *
479 * This technique was originally described in
480 * Hale Landis's ATADRVR (www.ata-atapi.com), and
481 * later found its way into the ATA/ATAPI spec.
482 *
483 * Write a pattern to the ATA shadow registers,
484 * and if a device is present, it will respond by
485 * correctly storing and echoing back the
486 * ATA shadow register contents.
487 *
488 * LOCKING:
489 * caller.
490 */
491
492static unsigned int ata_mmio_devchk(struct ata_port *ap,
493 unsigned int device)
494{
495 struct ata_ioports *ioaddr = &ap->ioaddr;
496 u8 nsect, lbal;
497
498 ap->ops->dev_select(ap, device);
499
500 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
501 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
502
503 writeb(0xaa, (void __iomem *) ioaddr->nsect_addr);
504 writeb(0x55, (void __iomem *) ioaddr->lbal_addr);
505
506 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
507 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
508
509 nsect = readb((void __iomem *) ioaddr->nsect_addr);
510 lbal = readb((void __iomem *) ioaddr->lbal_addr);
511
512 if ((nsect == 0x55) && (lbal == 0xaa))
513 return 1; /* we found a device */
514
515 return 0; /* nothing found */
516}
517
518/**
519 * ata_devchk - PATA device presence detection
520 * @ap: ATA channel to examine
521 * @device: Device to examine (starting at zero)
522 *
523 * Dispatch ATA device presence detection, depending
524 * on whether we are using PIO or MMIO to talk to the
525 * ATA shadow registers.
526 *
527 * LOCKING:
528 * caller.
529 */
530
531static unsigned int ata_devchk(struct ata_port *ap,
532 unsigned int device)
533{
534 if (ap->flags & ATA_FLAG_MMIO)
535 return ata_mmio_devchk(ap, device);
536 return ata_pio_devchk(ap, device);
537}
538
539/**
540 * ata_dev_classify - determine device type based on ATA-spec signature
541 * @tf: ATA taskfile register set for device to be identified
542 *
543 * Determine from taskfile register contents whether a device is
544 * ATA or ATAPI, as per "Signature and persistence" section
545 * of ATA/PI spec (volume 1, sect 5.14).
546 *
547 * LOCKING:
548 * None.
549 *
550 * RETURNS:
551 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
552 * the event of failure.
553 */
554
057ace5e 555unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1da177e4
LT
556{
557 /* Apple's open source Darwin code hints that some devices only
558 * put a proper signature into the LBA mid/high registers,
559 * So, we only check those. It's sufficient for uniqueness.
560 */
561
562 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
563 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
564 DPRINTK("found ATA device by sig\n");
565 return ATA_DEV_ATA;
566 }
567
568 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
569 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
570 DPRINTK("found ATAPI device by sig\n");
571 return ATA_DEV_ATAPI;
572 }
573
574 DPRINTK("unknown device\n");
575 return ATA_DEV_UNKNOWN;
576}
577
578/**
579 * ata_dev_try_classify - Parse returned ATA device signature
580 * @ap: ATA channel to examine
581 * @device: Device to examine (starting at zero)
b4dc7623 582 * @r_err: Value of error register on completion
1da177e4
LT
583 *
584 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
585 * an ATA/ATAPI-defined set of values is placed in the ATA
586 * shadow registers, indicating the results of device detection
587 * and diagnostics.
588 *
589 * Select the ATA device, and read the values from the ATA shadow
590 * registers. Then parse according to the Error register value,
591 * and the spec-defined values examined by ata_dev_classify().
592 *
593 * LOCKING:
594 * caller.
b4dc7623
TH
595 *
596 * RETURNS:
597 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
1da177e4
LT
598 */
599
b4dc7623
TH
600static unsigned int
601ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
1da177e4 602{
1da177e4
LT
603 struct ata_taskfile tf;
604 unsigned int class;
605 u8 err;
606
607 ap->ops->dev_select(ap, device);
608
609 memset(&tf, 0, sizeof(tf));
610
1da177e4 611 ap->ops->tf_read(ap, &tf);
0169e284 612 err = tf.feature;
b4dc7623
TH
613 if (r_err)
614 *r_err = err;
1da177e4
LT
615
616 /* see if device passed diags */
617 if (err == 1)
618 /* do nothing */ ;
619 else if ((device == 0) && (err == 0x81))
620 /* do nothing */ ;
621 else
b4dc7623 622 return ATA_DEV_NONE;
1da177e4 623
b4dc7623 624 /* determine if device is ATA or ATAPI */
1da177e4 625 class = ata_dev_classify(&tf);
b4dc7623 626
1da177e4 627 if (class == ATA_DEV_UNKNOWN)
b4dc7623 628 return ATA_DEV_NONE;
1da177e4 629 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
b4dc7623
TH
630 return ATA_DEV_NONE;
631 return class;
1da177e4
LT
632}
633
634/**
6a62a04d 635 * ata_id_string - Convert IDENTIFY DEVICE page into string
1da177e4
LT
636 * @id: IDENTIFY DEVICE results we will examine
637 * @s: string into which data is output
638 * @ofs: offset into identify device page
639 * @len: length of string to return. must be an even number.
640 *
641 * The strings in the IDENTIFY DEVICE page are broken up into
642 * 16-bit chunks. Run through the string, and output each
643 * 8-bit chunk linearly, regardless of platform.
644 *
645 * LOCKING:
646 * caller.
647 */
648
6a62a04d
TH
649void ata_id_string(const u16 *id, unsigned char *s,
650 unsigned int ofs, unsigned int len)
1da177e4
LT
651{
652 unsigned int c;
653
654 while (len > 0) {
655 c = id[ofs] >> 8;
656 *s = c;
657 s++;
658
659 c = id[ofs] & 0xff;
660 *s = c;
661 s++;
662
663 ofs++;
664 len -= 2;
665 }
666}
667
0e949ff3 668/**
6a62a04d 669 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
0e949ff3
TH
670 * @id: IDENTIFY DEVICE results we will examine
671 * @s: string into which data is output
672 * @ofs: offset into identify device page
673 * @len: length of string to return. must be an odd number.
674 *
6a62a04d 675 * This function is identical to ata_id_string except that it
0e949ff3
TH
676 * trims trailing spaces and terminates the resulting string with
677 * null. @len must be actual maximum length (even number) + 1.
678 *
679 * LOCKING:
680 * caller.
681 */
6a62a04d
TH
682void ata_id_c_string(const u16 *id, unsigned char *s,
683 unsigned int ofs, unsigned int len)
0e949ff3
TH
684{
685 unsigned char *p;
686
687 WARN_ON(!(len & 1));
688
6a62a04d 689 ata_id_string(id, s, ofs, len - 1);
0e949ff3
TH
690
691 p = s + strnlen(s, len - 1);
692 while (p > s && p[-1] == ' ')
693 p--;
694 *p = '\0';
695}
0baab86b 696
2940740b
TH
697static u64 ata_id_n_sectors(const u16 *id)
698{
699 if (ata_id_has_lba(id)) {
700 if (ata_id_has_lba48(id))
701 return ata_id_u64(id, 100);
702 else
703 return ata_id_u32(id, 60);
704 } else {
705 if (ata_id_current_chs_valid(id))
706 return ata_id_u32(id, 57);
707 else
708 return id[1] * id[3] * id[6];
709 }
710}
711
0baab86b
EF
712/**
713 * ata_noop_dev_select - Select device 0/1 on ATA bus
714 * @ap: ATA channel to manipulate
715 * @device: ATA device (numbered from zero) to select
716 *
717 * This function performs no actual function.
718 *
719 * May be used as the dev_select() entry in ata_port_operations.
720 *
721 * LOCKING:
722 * caller.
723 */
1da177e4
LT
724void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
725{
726}
727
0baab86b 728
1da177e4
LT
729/**
730 * ata_std_dev_select - Select device 0/1 on ATA bus
731 * @ap: ATA channel to manipulate
732 * @device: ATA device (numbered from zero) to select
733 *
734 * Use the method defined in the ATA specification to
735 * make either device 0, or device 1, active on the
0baab86b
EF
736 * ATA channel. Works with both PIO and MMIO.
737 *
738 * May be used as the dev_select() entry in ata_port_operations.
1da177e4
LT
739 *
740 * LOCKING:
741 * caller.
742 */
743
744void ata_std_dev_select (struct ata_port *ap, unsigned int device)
745{
746 u8 tmp;
747
748 if (device == 0)
749 tmp = ATA_DEVICE_OBS;
750 else
751 tmp = ATA_DEVICE_OBS | ATA_DEV1;
752
753 if (ap->flags & ATA_FLAG_MMIO) {
754 writeb(tmp, (void __iomem *) ap->ioaddr.device_addr);
755 } else {
756 outb(tmp, ap->ioaddr.device_addr);
757 }
758 ata_pause(ap); /* needed; also flushes, for mmio */
759}
760
761/**
762 * ata_dev_select - Select device 0/1 on ATA bus
763 * @ap: ATA channel to manipulate
764 * @device: ATA device (numbered from zero) to select
765 * @wait: non-zero to wait for Status register BSY bit to clear
766 * @can_sleep: non-zero if context allows sleeping
767 *
768 * Use the method defined in the ATA specification to
769 * make either device 0, or device 1, active on the
770 * ATA channel.
771 *
772 * This is a high-level version of ata_std_dev_select(),
773 * which additionally provides the services of inserting
774 * the proper pauses and status polling, where needed.
775 *
776 * LOCKING:
777 * caller.
778 */
779
780void ata_dev_select(struct ata_port *ap, unsigned int device,
781 unsigned int wait, unsigned int can_sleep)
782{
88574551 783 if (ata_msg_probe(ap))
0dd4b21f 784 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, ata%u: "
88574551 785 "device %u, wait %u\n", ap->id, device, wait);
1da177e4
LT
786
787 if (wait)
788 ata_wait_idle(ap);
789
790 ap->ops->dev_select(ap, device);
791
792 if (wait) {
793 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
794 msleep(150);
795 ata_wait_idle(ap);
796 }
797}
798
799/**
800 * ata_dump_id - IDENTIFY DEVICE info debugging output
0bd3300a 801 * @id: IDENTIFY DEVICE page to dump
1da177e4 802 *
0bd3300a
TH
803 * Dump selected 16-bit words from the given IDENTIFY DEVICE
804 * page.
1da177e4
LT
805 *
806 * LOCKING:
807 * caller.
808 */
809
0bd3300a 810static inline void ata_dump_id(const u16 *id)
1da177e4
LT
811{
812 DPRINTK("49==0x%04x "
813 "53==0x%04x "
814 "63==0x%04x "
815 "64==0x%04x "
816 "75==0x%04x \n",
0bd3300a
TH
817 id[49],
818 id[53],
819 id[63],
820 id[64],
821 id[75]);
1da177e4
LT
822 DPRINTK("80==0x%04x "
823 "81==0x%04x "
824 "82==0x%04x "
825 "83==0x%04x "
826 "84==0x%04x \n",
0bd3300a
TH
827 id[80],
828 id[81],
829 id[82],
830 id[83],
831 id[84]);
1da177e4
LT
832 DPRINTK("88==0x%04x "
833 "93==0x%04x\n",
0bd3300a
TH
834 id[88],
835 id[93]);
1da177e4
LT
836}
837
cb95d562
TH
838/**
839 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
840 * @id: IDENTIFY data to compute xfer mask from
841 *
842 * Compute the xfermask for this device. This is not as trivial
843 * as it seems if we must consider early devices correctly.
844 *
845 * FIXME: pre IDE drive timing (do we care ?).
846 *
847 * LOCKING:
848 * None.
849 *
850 * RETURNS:
851 * Computed xfermask
852 */
853static unsigned int ata_id_xfermask(const u16 *id)
854{
855 unsigned int pio_mask, mwdma_mask, udma_mask;
856
857 /* Usual case. Word 53 indicates word 64 is valid */
858 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
859 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
860 pio_mask <<= 3;
861 pio_mask |= 0x7;
862 } else {
863 /* If word 64 isn't valid then Word 51 high byte holds
864 * the PIO timing number for the maximum. Turn it into
865 * a mask.
866 */
867 pio_mask = (2 << (id[ATA_ID_OLD_PIO_MODES] & 0xFF)) - 1 ;
868
869 /* But wait.. there's more. Design your standards by
870 * committee and you too can get a free iordy field to
871 * process. However its the speeds not the modes that
872 * are supported... Note drivers using the timing API
873 * will get this right anyway
874 */
875 }
876
877 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
fb21f0d0
TH
878
879 udma_mask = 0;
880 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
881 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
cb95d562
TH
882
883 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
884}
885
86e45b6b
TH
886/**
887 * ata_port_queue_task - Queue port_task
888 * @ap: The ata_port to queue port_task for
e2a7f77a
RD
889 * @fn: workqueue function to be scheduled
890 * @data: data value to pass to workqueue function
891 * @delay: delay time for workqueue function
86e45b6b
TH
892 *
893 * Schedule @fn(@data) for execution after @delay jiffies using
894 * port_task. There is one port_task per port and it's the
895 * user(low level driver)'s responsibility to make sure that only
896 * one task is active at any given time.
897 *
898 * libata core layer takes care of synchronization between
899 * port_task and EH. ata_port_queue_task() may be ignored for EH
900 * synchronization.
901 *
902 * LOCKING:
903 * Inherited from caller.
904 */
905void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data,
906 unsigned long delay)
907{
908 int rc;
909
2e755f68 910 if (ap->flags & ATA_FLAG_FLUSH_PORT_TASK)
86e45b6b
TH
911 return;
912
913 PREPARE_WORK(&ap->port_task, fn, data);
914
915 if (!delay)
916 rc = queue_work(ata_wq, &ap->port_task);
917 else
918 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
919
920 /* rc == 0 means that another user is using port task */
921 WARN_ON(rc == 0);
922}
923
924/**
925 * ata_port_flush_task - Flush port_task
926 * @ap: The ata_port to flush port_task for
927 *
928 * After this function completes, port_task is guranteed not to
929 * be running or scheduled.
930 *
931 * LOCKING:
932 * Kernel thread context (may sleep)
933 */
934void ata_port_flush_task(struct ata_port *ap)
935{
936 unsigned long flags;
937
938 DPRINTK("ENTER\n");
939
ba6a1308 940 spin_lock_irqsave(ap->lock, flags);
2e755f68 941 ap->flags |= ATA_FLAG_FLUSH_PORT_TASK;
ba6a1308 942 spin_unlock_irqrestore(ap->lock, flags);
86e45b6b
TH
943
944 DPRINTK("flush #1\n");
945 flush_workqueue(ata_wq);
946
947 /*
948 * At this point, if a task is running, it's guaranteed to see
949 * the FLUSH flag; thus, it will never queue pio tasks again.
950 * Cancel and flush.
951 */
952 if (!cancel_delayed_work(&ap->port_task)) {
0dd4b21f 953 if (ata_msg_ctl(ap))
88574551
TH
954 ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n",
955 __FUNCTION__);
86e45b6b
TH
956 flush_workqueue(ata_wq);
957 }
958
ba6a1308 959 spin_lock_irqsave(ap->lock, flags);
2e755f68 960 ap->flags &= ~ATA_FLAG_FLUSH_PORT_TASK;
ba6a1308 961 spin_unlock_irqrestore(ap->lock, flags);
86e45b6b 962
0dd4b21f
BP
963 if (ata_msg_ctl(ap))
964 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
86e45b6b
TH
965}
966
77853bf2 967void ata_qc_complete_internal(struct ata_queued_cmd *qc)
a2a7a662 968{
77853bf2 969 struct completion *waiting = qc->private_data;
a2a7a662 970
a2a7a662 971 complete(waiting);
a2a7a662
TH
972}
973
974/**
975 * ata_exec_internal - execute libata internal command
a2a7a662
TH
976 * @dev: Device to which the command is sent
977 * @tf: Taskfile registers for the command and the result
d69cf37d 978 * @cdb: CDB for packet command
a2a7a662
TH
979 * @dma_dir: Data tranfer direction of the command
980 * @buf: Data buffer of the command
981 * @buflen: Length of data buffer
982 *
983 * Executes libata internal command with timeout. @tf contains
984 * command on entry and result on return. Timeout and error
985 * conditions are reported via return value. No recovery action
986 * is taken after a command times out. It's caller's duty to
987 * clean up after timeout.
988 *
989 * LOCKING:
990 * None. Should be called with kernel context, might sleep.
551e8889
TH
991 *
992 * RETURNS:
993 * Zero on success, AC_ERR_* mask on failure
a2a7a662 994 */
3373efd8 995unsigned ata_exec_internal(struct ata_device *dev,
1ad8e7f9
TH
996 struct ata_taskfile *tf, const u8 *cdb,
997 int dma_dir, void *buf, unsigned int buflen)
a2a7a662 998{
3373efd8 999 struct ata_port *ap = dev->ap;
a2a7a662
TH
1000 u8 command = tf->command;
1001 struct ata_queued_cmd *qc;
2ab7db1f 1002 unsigned int tag, preempted_tag;
dedaf2b0 1003 u32 preempted_sactive, preempted_qc_active;
a2a7a662
TH
1004 DECLARE_COMPLETION(wait);
1005 unsigned long flags;
77853bf2 1006 unsigned int err_mask;
d95a717f 1007 int rc;
a2a7a662 1008
ba6a1308 1009 spin_lock_irqsave(ap->lock, flags);
a2a7a662 1010
e3180499
TH
1011 /* no internal command while frozen */
1012 if (ap->flags & ATA_FLAG_FROZEN) {
ba6a1308 1013 spin_unlock_irqrestore(ap->lock, flags);
e3180499
TH
1014 return AC_ERR_SYSTEM;
1015 }
1016
2ab7db1f 1017 /* initialize internal qc */
a2a7a662 1018
2ab7db1f
TH
1019 /* XXX: Tag 0 is used for drivers with legacy EH as some
1020 * drivers choke if any other tag is given. This breaks
1021 * ata_tag_internal() test for those drivers. Don't use new
1022 * EH stuff without converting to it.
1023 */
1024 if (ap->ops->error_handler)
1025 tag = ATA_TAG_INTERNAL;
1026 else
1027 tag = 0;
1028
6cec4a39 1029 if (test_and_set_bit(tag, &ap->qc_allocated))
2ab7db1f 1030 BUG();
f69499f4 1031 qc = __ata_qc_from_tag(ap, tag);
2ab7db1f
TH
1032
1033 qc->tag = tag;
1034 qc->scsicmd = NULL;
1035 qc->ap = ap;
1036 qc->dev = dev;
1037 ata_qc_reinit(qc);
1038
1039 preempted_tag = ap->active_tag;
dedaf2b0
TH
1040 preempted_sactive = ap->sactive;
1041 preempted_qc_active = ap->qc_active;
2ab7db1f 1042 ap->active_tag = ATA_TAG_POISON;
dedaf2b0
TH
1043 ap->sactive = 0;
1044 ap->qc_active = 0;
2ab7db1f
TH
1045
1046 /* prepare & issue qc */
a2a7a662 1047 qc->tf = *tf;
d69cf37d
TH
1048 if (cdb)
1049 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
e61e0672 1050 qc->flags |= ATA_QCFLAG_RESULT_TF;
a2a7a662
TH
1051 qc->dma_dir = dma_dir;
1052 if (dma_dir != DMA_NONE) {
1053 ata_sg_init_one(qc, buf, buflen);
1054 qc->nsect = buflen / ATA_SECT_SIZE;
1055 }
1056
77853bf2 1057 qc->private_data = &wait;
a2a7a662
TH
1058 qc->complete_fn = ata_qc_complete_internal;
1059
8e0e694a 1060 ata_qc_issue(qc);
a2a7a662 1061
ba6a1308 1062 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662 1063
a8601e5f 1064 rc = wait_for_completion_timeout(&wait, ata_probe_timeout);
d95a717f
TH
1065
1066 ata_port_flush_task(ap);
41ade50c 1067
d95a717f 1068 if (!rc) {
ba6a1308 1069 spin_lock_irqsave(ap->lock, flags);
a2a7a662
TH
1070
1071 /* We're racing with irq here. If we lose, the
1072 * following test prevents us from completing the qc
d95a717f
TH
1073 * twice. If we win, the port is frozen and will be
1074 * cleaned up by ->post_internal_cmd().
a2a7a662 1075 */
77853bf2 1076 if (qc->flags & ATA_QCFLAG_ACTIVE) {
d95a717f
TH
1077 qc->err_mask |= AC_ERR_TIMEOUT;
1078
1079 if (ap->ops->error_handler)
1080 ata_port_freeze(ap);
1081 else
1082 ata_qc_complete(qc);
f15a1daf 1083
0dd4b21f
BP
1084 if (ata_msg_warn(ap))
1085 ata_dev_printk(dev, KERN_WARNING,
88574551 1086 "qc timeout (cmd 0x%x)\n", command);
a2a7a662
TH
1087 }
1088
ba6a1308 1089 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662
TH
1090 }
1091
d95a717f
TH
1092 /* do post_internal_cmd */
1093 if (ap->ops->post_internal_cmd)
1094 ap->ops->post_internal_cmd(qc);
1095
1096 if (qc->flags & ATA_QCFLAG_FAILED && !qc->err_mask) {
0dd4b21f 1097 if (ata_msg_warn(ap))
88574551 1098 ata_dev_printk(dev, KERN_WARNING,
0dd4b21f 1099 "zero err_mask for failed "
88574551 1100 "internal command, assuming AC_ERR_OTHER\n");
d95a717f
TH
1101 qc->err_mask |= AC_ERR_OTHER;
1102 }
1103
15869303 1104 /* finish up */
ba6a1308 1105 spin_lock_irqsave(ap->lock, flags);
15869303 1106
e61e0672 1107 *tf = qc->result_tf;
77853bf2
TH
1108 err_mask = qc->err_mask;
1109
1110 ata_qc_free(qc);
2ab7db1f 1111 ap->active_tag = preempted_tag;
dedaf2b0
TH
1112 ap->sactive = preempted_sactive;
1113 ap->qc_active = preempted_qc_active;
77853bf2 1114
1f7dd3e9
TH
1115 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1116 * Until those drivers are fixed, we detect the condition
1117 * here, fail the command with AC_ERR_SYSTEM and reenable the
1118 * port.
1119 *
1120 * Note that this doesn't change any behavior as internal
1121 * command failure results in disabling the device in the
1122 * higher layer for LLDDs without new reset/EH callbacks.
1123 *
1124 * Kill the following code as soon as those drivers are fixed.
1125 */
198e0fed 1126 if (ap->flags & ATA_FLAG_DISABLED) {
1f7dd3e9
TH
1127 err_mask |= AC_ERR_SYSTEM;
1128 ata_port_probe(ap);
1129 }
1130
ba6a1308 1131 spin_unlock_irqrestore(ap->lock, flags);
15869303 1132
77853bf2 1133 return err_mask;
a2a7a662
TH
1134}
1135
977e6b9f
TH
1136/**
1137 * ata_do_simple_cmd - execute simple internal command
1138 * @dev: Device to which the command is sent
1139 * @cmd: Opcode to execute
1140 *
1141 * Execute a 'simple' command, that only consists of the opcode
1142 * 'cmd' itself, without filling any other registers
1143 *
1144 * LOCKING:
1145 * Kernel thread context (may sleep).
1146 *
1147 * RETURNS:
1148 * Zero on success, AC_ERR_* mask on failure
e58eb583 1149 */
77b08fb5 1150unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
e58eb583
TH
1151{
1152 struct ata_taskfile tf;
e58eb583
TH
1153
1154 ata_tf_init(dev, &tf);
1155
1156 tf.command = cmd;
1157 tf.flags |= ATA_TFLAG_DEVICE;
1158 tf.protocol = ATA_PROT_NODATA;
1159
977e6b9f 1160 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
e58eb583
TH
1161}
1162
1bc4ccff
AC
1163/**
1164 * ata_pio_need_iordy - check if iordy needed
1165 * @adev: ATA device
1166 *
1167 * Check if the current speed of the device requires IORDY. Used
1168 * by various controllers for chip configuration.
1169 */
1170
1171unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1172{
1173 int pio;
1174 int speed = adev->pio_mode - XFER_PIO_0;
1175
1176 if (speed < 2)
1177 return 0;
1178 if (speed > 2)
1179 return 1;
2e9edbf8 1180
1bc4ccff
AC
1181 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1182
1183 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1184 pio = adev->id[ATA_ID_EIDE_PIO];
1185 /* Is the speed faster than the drive allows non IORDY ? */
1186 if (pio) {
1187 /* This is cycle times not frequency - watch the logic! */
1188 if (pio > 240) /* PIO2 is 240nS per cycle */
1189 return 1;
1190 return 0;
1191 }
1192 }
1193 return 0;
1194}
1195
1da177e4 1196/**
49016aca 1197 * ata_dev_read_id - Read ID data from the specified device
49016aca
TH
1198 * @dev: target device
1199 * @p_class: pointer to class of the target device (may be changed)
1200 * @post_reset: is this read ID post-reset?
fe635c7e 1201 * @id: buffer to read IDENTIFY data into
1da177e4 1202 *
49016aca
TH
1203 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1204 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
aec5c3c1
TH
1205 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1206 * for pre-ATA4 drives.
1da177e4
LT
1207 *
1208 * LOCKING:
49016aca
TH
1209 * Kernel thread context (may sleep)
1210 *
1211 * RETURNS:
1212 * 0 on success, -errno otherwise.
1da177e4 1213 */
a9beec95
TH
1214int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1215 int post_reset, u16 *id)
1da177e4 1216{
3373efd8 1217 struct ata_port *ap = dev->ap;
49016aca 1218 unsigned int class = *p_class;
a0123703 1219 struct ata_taskfile tf;
49016aca
TH
1220 unsigned int err_mask = 0;
1221 const char *reason;
1222 int rc;
1da177e4 1223
0dd4b21f 1224 if (ata_msg_ctl(ap))
88574551
TH
1225 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n",
1226 __FUNCTION__, ap->id, dev->devno);
1da177e4 1227
49016aca 1228 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1da177e4 1229
49016aca 1230 retry:
3373efd8 1231 ata_tf_init(dev, &tf);
a0123703 1232
49016aca
TH
1233 switch (class) {
1234 case ATA_DEV_ATA:
a0123703 1235 tf.command = ATA_CMD_ID_ATA;
49016aca
TH
1236 break;
1237 case ATA_DEV_ATAPI:
a0123703 1238 tf.command = ATA_CMD_ID_ATAPI;
49016aca
TH
1239 break;
1240 default:
1241 rc = -ENODEV;
1242 reason = "unsupported class";
1243 goto err_out;
1da177e4
LT
1244 }
1245
a0123703 1246 tf.protocol = ATA_PROT_PIO;
1da177e4 1247
3373efd8 1248 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
49016aca 1249 id, sizeof(id[0]) * ATA_ID_WORDS);
a0123703 1250 if (err_mask) {
49016aca
TH
1251 rc = -EIO;
1252 reason = "I/O error";
1da177e4
LT
1253 goto err_out;
1254 }
1255
49016aca 1256 swap_buf_le16(id, ATA_ID_WORDS);
1da177e4 1257
49016aca 1258 /* sanity check */
692785e7 1259 if ((class == ATA_DEV_ATA) != (ata_id_is_ata(id) | ata_id_is_cfa(id))) {
49016aca
TH
1260 rc = -EINVAL;
1261 reason = "device reports illegal type";
1262 goto err_out;
1263 }
1264
1265 if (post_reset && class == ATA_DEV_ATA) {
1266 /*
1267 * The exact sequence expected by certain pre-ATA4 drives is:
1268 * SRST RESET
1269 * IDENTIFY
1270 * INITIALIZE DEVICE PARAMETERS
1271 * anything else..
1272 * Some drives were very specific about that exact sequence.
1273 */
1274 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
3373efd8 1275 err_mask = ata_dev_init_params(dev, id[3], id[6]);
49016aca
TH
1276 if (err_mask) {
1277 rc = -EIO;
1278 reason = "INIT_DEV_PARAMS failed";
1279 goto err_out;
1280 }
1281
1282 /* current CHS translation info (id[53-58]) might be
1283 * changed. reread the identify device info.
1284 */
1285 post_reset = 0;
1286 goto retry;
1287 }
1288 }
1289
1290 *p_class = class;
fe635c7e 1291
49016aca
TH
1292 return 0;
1293
1294 err_out:
88574551 1295 if (ata_msg_warn(ap))
0dd4b21f 1296 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
88574551 1297 "(%s, err_mask=0x%x)\n", reason, err_mask);
49016aca
TH
1298 return rc;
1299}
1300
3373efd8 1301static inline u8 ata_dev_knobble(struct ata_device *dev)
4b2f3ede 1302{
3373efd8 1303 return ((dev->ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
4b2f3ede
TH
1304}
1305
a6e6ce8e
TH
1306static void ata_dev_config_ncq(struct ata_device *dev,
1307 char *desc, size_t desc_sz)
1308{
1309 struct ata_port *ap = dev->ap;
1310 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
1311
1312 if (!ata_id_has_ncq(dev->id)) {
1313 desc[0] = '\0';
1314 return;
1315 }
1316
1317 if (ap->flags & ATA_FLAG_NCQ) {
1318 hdepth = min(ap->host->can_queue, ATA_MAX_QUEUE - 1);
1319 dev->flags |= ATA_DFLAG_NCQ;
1320 }
1321
1322 if (hdepth >= ddepth)
1323 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
1324 else
1325 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
1326}
1327
49016aca 1328/**
ffeae418 1329 * ata_dev_configure - Configure the specified ATA/ATAPI device
ffeae418 1330 * @dev: Target device to configure
4c2d721a 1331 * @print_info: Enable device info printout
ffeae418
TH
1332 *
1333 * Configure @dev according to @dev->id. Generic and low-level
1334 * driver specific fixups are also applied.
49016aca
TH
1335 *
1336 * LOCKING:
ffeae418
TH
1337 * Kernel thread context (may sleep)
1338 *
1339 * RETURNS:
1340 * 0 on success, -errno otherwise
49016aca 1341 */
a9beec95 1342int ata_dev_configure(struct ata_device *dev, int print_info)
49016aca 1343{
3373efd8 1344 struct ata_port *ap = dev->ap;
1148c3a7 1345 const u16 *id = dev->id;
ff8854b2 1346 unsigned int xfer_mask;
49016aca
TH
1347 int i, rc;
1348
0dd4b21f 1349 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
88574551
TH
1350 ata_dev_printk(dev, KERN_INFO,
1351 "%s: ENTER/EXIT (host %u, dev %u) -- nodev\n",
1352 __FUNCTION__, ap->id, dev->devno);
ffeae418 1353 return 0;
49016aca
TH
1354 }
1355
0dd4b21f 1356 if (ata_msg_probe(ap))
88574551
TH
1357 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n",
1358 __FUNCTION__, ap->id, dev->devno);
1da177e4 1359
c39f5ebe 1360 /* print device capabilities */
0dd4b21f 1361 if (ata_msg_probe(ap))
88574551
TH
1362 ata_dev_printk(dev, KERN_DEBUG,
1363 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
1364 "85:%04x 86:%04x 87:%04x 88:%04x\n",
0dd4b21f 1365 __FUNCTION__,
f15a1daf
TH
1366 id[49], id[82], id[83], id[84],
1367 id[85], id[86], id[87], id[88]);
c39f5ebe 1368
208a9933 1369 /* initialize to-be-configured parameters */
ea1dd4e1 1370 dev->flags &= ~ATA_DFLAG_CFG_MASK;
208a9933
TH
1371 dev->max_sectors = 0;
1372 dev->cdb_len = 0;
1373 dev->n_sectors = 0;
1374 dev->cylinders = 0;
1375 dev->heads = 0;
1376 dev->sectors = 0;
1377
1da177e4
LT
1378 /*
1379 * common ATA, ATAPI feature tests
1380 */
1381
ff8854b2 1382 /* find max transfer mode; for printk only */
1148c3a7 1383 xfer_mask = ata_id_xfermask(id);
1da177e4 1384
0dd4b21f
BP
1385 if (ata_msg_probe(ap))
1386 ata_dump_id(id);
1da177e4
LT
1387
1388 /* ATA-specific feature tests */
1389 if (dev->class == ATA_DEV_ATA) {
1148c3a7 1390 dev->n_sectors = ata_id_n_sectors(id);
2940740b 1391
1148c3a7 1392 if (ata_id_has_lba(id)) {
4c2d721a 1393 const char *lba_desc;
a6e6ce8e 1394 char ncq_desc[20];
8bf62ece 1395
4c2d721a
TH
1396 lba_desc = "LBA";
1397 dev->flags |= ATA_DFLAG_LBA;
1148c3a7 1398 if (ata_id_has_lba48(id)) {
8bf62ece 1399 dev->flags |= ATA_DFLAG_LBA48;
4c2d721a
TH
1400 lba_desc = "LBA48";
1401 }
8bf62ece 1402
a6e6ce8e
TH
1403 /* config NCQ */
1404 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
1405
8bf62ece 1406 /* print device info to dmesg */
0dd4b21f 1407 if (ata_msg_info(ap))
f15a1daf 1408 ata_dev_printk(dev, KERN_INFO, "ATA-%d, "
a6e6ce8e 1409 "max %s, %Lu sectors: %s %s\n",
f15a1daf
TH
1410 ata_id_major_version(id),
1411 ata_mode_string(xfer_mask),
1412 (unsigned long long)dev->n_sectors,
a6e6ce8e 1413 lba_desc, ncq_desc);
ffeae418 1414 } else {
8bf62ece
AL
1415 /* CHS */
1416
1417 /* Default translation */
1148c3a7
TH
1418 dev->cylinders = id[1];
1419 dev->heads = id[3];
1420 dev->sectors = id[6];
8bf62ece 1421
1148c3a7 1422 if (ata_id_current_chs_valid(id)) {
8bf62ece 1423 /* Current CHS translation is valid. */
1148c3a7
TH
1424 dev->cylinders = id[54];
1425 dev->heads = id[55];
1426 dev->sectors = id[56];
8bf62ece
AL
1427 }
1428
1429 /* print device info to dmesg */
0dd4b21f 1430 if (ata_msg_info(ap))
f15a1daf
TH
1431 ata_dev_printk(dev, KERN_INFO, "ATA-%d, "
1432 "max %s, %Lu sectors: CHS %u/%u/%u\n",
1433 ata_id_major_version(id),
1434 ata_mode_string(xfer_mask),
1435 (unsigned long long)dev->n_sectors,
88574551
TH
1436 dev->cylinders, dev->heads,
1437 dev->sectors);
1da177e4
LT
1438 }
1439
07f6f7d0
AL
1440 if (dev->id[59] & 0x100) {
1441 dev->multi_count = dev->id[59] & 0xff;
0dd4b21f 1442 if (ata_msg_info(ap))
88574551
TH
1443 ata_dev_printk(dev, KERN_INFO,
1444 "ata%u: dev %u multi count %u\n",
1445 ap->id, dev->devno, dev->multi_count);
07f6f7d0
AL
1446 }
1447
6e7846e9 1448 dev->cdb_len = 16;
1da177e4
LT
1449 }
1450
1451 /* ATAPI-specific feature tests */
2c13b7ce 1452 else if (dev->class == ATA_DEV_ATAPI) {
08a556db
AL
1453 char *cdb_intr_string = "";
1454
1148c3a7 1455 rc = atapi_cdb_len(id);
1da177e4 1456 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
0dd4b21f 1457 if (ata_msg_warn(ap))
88574551
TH
1458 ata_dev_printk(dev, KERN_WARNING,
1459 "unsupported CDB len\n");
ffeae418 1460 rc = -EINVAL;
1da177e4
LT
1461 goto err_out_nosup;
1462 }
6e7846e9 1463 dev->cdb_len = (unsigned int) rc;
1da177e4 1464
08a556db 1465 if (ata_id_cdb_intr(dev->id)) {
312f7da2 1466 dev->flags |= ATA_DFLAG_CDB_INTR;
08a556db
AL
1467 cdb_intr_string = ", CDB intr";
1468 }
312f7da2 1469
1da177e4 1470 /* print device info to dmesg */
0dd4b21f 1471 if (ata_msg_info(ap))
12436c30
TH
1472 ata_dev_printk(dev, KERN_INFO, "ATAPI, max %s%s\n",
1473 ata_mode_string(xfer_mask),
1474 cdb_intr_string);
1da177e4
LT
1475 }
1476
6e7846e9
TH
1477 ap->host->max_cmd_len = 0;
1478 for (i = 0; i < ATA_MAX_DEVICES; i++)
1479 ap->host->max_cmd_len = max_t(unsigned int,
1480 ap->host->max_cmd_len,
1481 ap->device[i].cdb_len);
1482
4b2f3ede 1483 /* limit bridge transfers to udma5, 200 sectors */
3373efd8 1484 if (ata_dev_knobble(dev)) {
0dd4b21f 1485 if (ata_msg_info(ap))
f15a1daf
TH
1486 ata_dev_printk(dev, KERN_INFO,
1487 "applying bridge limits\n");
5a529139 1488 dev->udma_mask &= ATA_UDMA5;
4b2f3ede
TH
1489 dev->max_sectors = ATA_MAX_SECTORS;
1490 }
1491
1492 if (ap->ops->dev_config)
1493 ap->ops->dev_config(ap, dev);
1494
0dd4b21f
BP
1495 if (ata_msg_probe(ap))
1496 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
1497 __FUNCTION__, ata_chk_status(ap));
ffeae418 1498 return 0;
1da177e4
LT
1499
1500err_out_nosup:
0dd4b21f 1501 if (ata_msg_probe(ap))
88574551
TH
1502 ata_dev_printk(dev, KERN_DEBUG,
1503 "%s: EXIT, err\n", __FUNCTION__);
ffeae418 1504 return rc;
1da177e4
LT
1505}
1506
1507/**
1508 * ata_bus_probe - Reset and probe ATA bus
1509 * @ap: Bus to probe
1510 *
0cba632b
JG
1511 * Master ATA bus probing function. Initiates a hardware-dependent
1512 * bus reset, then attempts to identify any devices found on
1513 * the bus.
1514 *
1da177e4 1515 * LOCKING:
0cba632b 1516 * PCI/etc. bus probe sem.
1da177e4
LT
1517 *
1518 * RETURNS:
96072e69 1519 * Zero on success, negative errno otherwise.
1da177e4
LT
1520 */
1521
1522static int ata_bus_probe(struct ata_port *ap)
1523{
28ca5c57 1524 unsigned int classes[ATA_MAX_DEVICES];
14d2bac1
TH
1525 int tries[ATA_MAX_DEVICES];
1526 int i, rc, down_xfermask;
e82cbdb9 1527 struct ata_device *dev;
1da177e4 1528
28ca5c57 1529 ata_port_probe(ap);
c19ba8af 1530
14d2bac1
TH
1531 for (i = 0; i < ATA_MAX_DEVICES; i++)
1532 tries[i] = ATA_PROBE_MAX_TRIES;
1533
1534 retry:
1535 down_xfermask = 0;
1536
2044470c 1537 /* reset and determine device classes */
52783c5d 1538 ap->ops->phy_reset(ap);
2061a47a 1539
52783c5d
TH
1540 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1541 dev = &ap->device[i];
c19ba8af 1542
52783c5d
TH
1543 if (!(ap->flags & ATA_FLAG_DISABLED) &&
1544 dev->class != ATA_DEV_UNKNOWN)
1545 classes[dev->devno] = dev->class;
1546 else
1547 classes[dev->devno] = ATA_DEV_NONE;
2044470c 1548
52783c5d 1549 dev->class = ATA_DEV_UNKNOWN;
28ca5c57 1550 }
1da177e4 1551
52783c5d 1552 ata_port_probe(ap);
2044470c 1553
b6079ca4
AC
1554 /* after the reset the device state is PIO 0 and the controller
1555 state is undefined. Record the mode */
1556
1557 for (i = 0; i < ATA_MAX_DEVICES; i++)
1558 ap->device[i].pio_mode = XFER_PIO_0;
1559
28ca5c57 1560 /* read IDENTIFY page and configure devices */
1da177e4 1561 for (i = 0; i < ATA_MAX_DEVICES; i++) {
e82cbdb9 1562 dev = &ap->device[i];
28ca5c57 1563
ec573755
TH
1564 if (tries[i])
1565 dev->class = classes[i];
ffeae418 1566
14d2bac1 1567 if (!ata_dev_enabled(dev))
ffeae418 1568 continue;
ffeae418 1569
3373efd8 1570 rc = ata_dev_read_id(dev, &dev->class, 1, dev->id);
14d2bac1
TH
1571 if (rc)
1572 goto fail;
1573
3373efd8 1574 rc = ata_dev_configure(dev, 1);
14d2bac1
TH
1575 if (rc)
1576 goto fail;
1da177e4
LT
1577 }
1578
e82cbdb9 1579 /* configure transfer mode */
3adcebb2 1580 rc = ata_set_mode(ap, &dev);
51713d35
TH
1581 if (rc) {
1582 down_xfermask = 1;
1583 goto fail;
e82cbdb9 1584 }
1da177e4 1585
e82cbdb9
TH
1586 for (i = 0; i < ATA_MAX_DEVICES; i++)
1587 if (ata_dev_enabled(&ap->device[i]))
1588 return 0;
1da177e4 1589
e82cbdb9
TH
1590 /* no device present, disable port */
1591 ata_port_disable(ap);
1da177e4 1592 ap->ops->port_disable(ap);
96072e69 1593 return -ENODEV;
14d2bac1
TH
1594
1595 fail:
1596 switch (rc) {
1597 case -EINVAL:
1598 case -ENODEV:
1599 tries[dev->devno] = 0;
1600 break;
1601 case -EIO:
3c567b7d 1602 sata_down_spd_limit(ap);
14d2bac1
TH
1603 /* fall through */
1604 default:
1605 tries[dev->devno]--;
1606 if (down_xfermask &&
3373efd8 1607 ata_down_xfermask_limit(dev, tries[dev->devno] == 1))
14d2bac1
TH
1608 tries[dev->devno] = 0;
1609 }
1610
ec573755 1611 if (!tries[dev->devno]) {
3373efd8
TH
1612 ata_down_xfermask_limit(dev, 1);
1613 ata_dev_disable(dev);
ec573755
TH
1614 }
1615
14d2bac1 1616 goto retry;
1da177e4
LT
1617}
1618
1619/**
0cba632b
JG
1620 * ata_port_probe - Mark port as enabled
1621 * @ap: Port for which we indicate enablement
1da177e4 1622 *
0cba632b
JG
1623 * Modify @ap data structure such that the system
1624 * thinks that the entire port is enabled.
1625 *
1626 * LOCKING: host_set lock, or some other form of
1627 * serialization.
1da177e4
LT
1628 */
1629
1630void ata_port_probe(struct ata_port *ap)
1631{
198e0fed 1632 ap->flags &= ~ATA_FLAG_DISABLED;
1da177e4
LT
1633}
1634
3be680b7
TH
1635/**
1636 * sata_print_link_status - Print SATA link status
1637 * @ap: SATA port to printk link status about
1638 *
1639 * This function prints link speed and status of a SATA link.
1640 *
1641 * LOCKING:
1642 * None.
1643 */
1644static void sata_print_link_status(struct ata_port *ap)
1645{
6d5f9732 1646 u32 sstatus, scontrol, tmp;
3be680b7 1647
81952c54 1648 if (sata_scr_read(ap, SCR_STATUS, &sstatus))
3be680b7 1649 return;
81952c54 1650 sata_scr_read(ap, SCR_CONTROL, &scontrol);
3be680b7 1651
81952c54 1652 if (ata_port_online(ap)) {
3be680b7 1653 tmp = (sstatus >> 4) & 0xf;
f15a1daf
TH
1654 ata_port_printk(ap, KERN_INFO,
1655 "SATA link up %s (SStatus %X SControl %X)\n",
1656 sata_spd_string(tmp), sstatus, scontrol);
3be680b7 1657 } else {
f15a1daf
TH
1658 ata_port_printk(ap, KERN_INFO,
1659 "SATA link down (SStatus %X SControl %X)\n",
1660 sstatus, scontrol);
3be680b7
TH
1661 }
1662}
1663
1da177e4 1664/**
780a87f7
JG
1665 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1666 * @ap: SATA port associated with target SATA PHY.
1da177e4 1667 *
780a87f7
JG
1668 * This function issues commands to standard SATA Sxxx
1669 * PHY registers, to wake up the phy (and device), and
1670 * clear any reset condition.
1da177e4
LT
1671 *
1672 * LOCKING:
0cba632b 1673 * PCI/etc. bus probe sem.
1da177e4
LT
1674 *
1675 */
1676void __sata_phy_reset(struct ata_port *ap)
1677{
1678 u32 sstatus;
1679 unsigned long timeout = jiffies + (HZ * 5);
1680
1681 if (ap->flags & ATA_FLAG_SATA_RESET) {
cdcca89e 1682 /* issue phy wake/reset */
81952c54 1683 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
62ba2841
TH
1684 /* Couldn't find anything in SATA I/II specs, but
1685 * AHCI-1.1 10.4.2 says at least 1 ms. */
1686 mdelay(1);
1da177e4 1687 }
81952c54
TH
1688 /* phy wake/clear reset */
1689 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
1da177e4
LT
1690
1691 /* wait for phy to become ready, if necessary */
1692 do {
1693 msleep(200);
81952c54 1694 sata_scr_read(ap, SCR_STATUS, &sstatus);
1da177e4
LT
1695 if ((sstatus & 0xf) != 1)
1696 break;
1697 } while (time_before(jiffies, timeout));
1698
3be680b7
TH
1699 /* print link status */
1700 sata_print_link_status(ap);
656563e3 1701
3be680b7 1702 /* TODO: phy layer with polling, timeouts, etc. */
81952c54 1703 if (!ata_port_offline(ap))
1da177e4 1704 ata_port_probe(ap);
3be680b7 1705 else
1da177e4 1706 ata_port_disable(ap);
1da177e4 1707
198e0fed 1708 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
1709 return;
1710
1711 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
1712 ata_port_disable(ap);
1713 return;
1714 }
1715
1716 ap->cbl = ATA_CBL_SATA;
1717}
1718
1719/**
780a87f7
JG
1720 * sata_phy_reset - Reset SATA bus.
1721 * @ap: SATA port associated with target SATA PHY.
1da177e4 1722 *
780a87f7
JG
1723 * This function resets the SATA bus, and then probes
1724 * the bus for devices.
1da177e4
LT
1725 *
1726 * LOCKING:
0cba632b 1727 * PCI/etc. bus probe sem.
1da177e4
LT
1728 *
1729 */
1730void sata_phy_reset(struct ata_port *ap)
1731{
1732 __sata_phy_reset(ap);
198e0fed 1733 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
1734 return;
1735 ata_bus_reset(ap);
1736}
1737
ebdfca6e
AC
1738/**
1739 * ata_dev_pair - return other device on cable
ebdfca6e
AC
1740 * @adev: device
1741 *
1742 * Obtain the other device on the same cable, or if none is
1743 * present NULL is returned
1744 */
2e9edbf8 1745
3373efd8 1746struct ata_device *ata_dev_pair(struct ata_device *adev)
ebdfca6e 1747{
3373efd8 1748 struct ata_port *ap = adev->ap;
ebdfca6e 1749 struct ata_device *pair = &ap->device[1 - adev->devno];
e1211e3f 1750 if (!ata_dev_enabled(pair))
ebdfca6e
AC
1751 return NULL;
1752 return pair;
1753}
1754
1da177e4 1755/**
780a87f7
JG
1756 * ata_port_disable - Disable port.
1757 * @ap: Port to be disabled.
1da177e4 1758 *
780a87f7
JG
1759 * Modify @ap data structure such that the system
1760 * thinks that the entire port is disabled, and should
1761 * never attempt to probe or communicate with devices
1762 * on this port.
1763 *
1764 * LOCKING: host_set lock, or some other form of
1765 * serialization.
1da177e4
LT
1766 */
1767
1768void ata_port_disable(struct ata_port *ap)
1769{
1770 ap->device[0].class = ATA_DEV_NONE;
1771 ap->device[1].class = ATA_DEV_NONE;
198e0fed 1772 ap->flags |= ATA_FLAG_DISABLED;
1da177e4
LT
1773}
1774
1c3fae4d 1775/**
3c567b7d 1776 * sata_down_spd_limit - adjust SATA spd limit downward
1c3fae4d
TH
1777 * @ap: Port to adjust SATA spd limit for
1778 *
1779 * Adjust SATA spd limit of @ap downward. Note that this
1780 * function only adjusts the limit. The change must be applied
3c567b7d 1781 * using sata_set_spd().
1c3fae4d
TH
1782 *
1783 * LOCKING:
1784 * Inherited from caller.
1785 *
1786 * RETURNS:
1787 * 0 on success, negative errno on failure
1788 */
3c567b7d 1789int sata_down_spd_limit(struct ata_port *ap)
1c3fae4d 1790{
81952c54
TH
1791 u32 sstatus, spd, mask;
1792 int rc, highbit;
1c3fae4d 1793
81952c54
TH
1794 rc = sata_scr_read(ap, SCR_STATUS, &sstatus);
1795 if (rc)
1796 return rc;
1c3fae4d
TH
1797
1798 mask = ap->sata_spd_limit;
1799 if (mask <= 1)
1800 return -EINVAL;
1801 highbit = fls(mask) - 1;
1802 mask &= ~(1 << highbit);
1803
81952c54 1804 spd = (sstatus >> 4) & 0xf;
1c3fae4d
TH
1805 if (spd <= 1)
1806 return -EINVAL;
1807 spd--;
1808 mask &= (1 << spd) - 1;
1809 if (!mask)
1810 return -EINVAL;
1811
1812 ap->sata_spd_limit = mask;
1813
f15a1daf
TH
1814 ata_port_printk(ap, KERN_WARNING, "limiting SATA link speed to %s\n",
1815 sata_spd_string(fls(mask)));
1c3fae4d
TH
1816
1817 return 0;
1818}
1819
3c567b7d 1820static int __sata_set_spd_needed(struct ata_port *ap, u32 *scontrol)
1c3fae4d
TH
1821{
1822 u32 spd, limit;
1823
1824 if (ap->sata_spd_limit == UINT_MAX)
1825 limit = 0;
1826 else
1827 limit = fls(ap->sata_spd_limit);
1828
1829 spd = (*scontrol >> 4) & 0xf;
1830 *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
1831
1832 return spd != limit;
1833}
1834
1835/**
3c567b7d 1836 * sata_set_spd_needed - is SATA spd configuration needed
1c3fae4d
TH
1837 * @ap: Port in question
1838 *
1839 * Test whether the spd limit in SControl matches
1840 * @ap->sata_spd_limit. This function is used to determine
1841 * whether hardreset is necessary to apply SATA spd
1842 * configuration.
1843 *
1844 * LOCKING:
1845 * Inherited from caller.
1846 *
1847 * RETURNS:
1848 * 1 if SATA spd configuration is needed, 0 otherwise.
1849 */
3c567b7d 1850int sata_set_spd_needed(struct ata_port *ap)
1c3fae4d
TH
1851{
1852 u32 scontrol;
1853
81952c54 1854 if (sata_scr_read(ap, SCR_CONTROL, &scontrol))
1c3fae4d
TH
1855 return 0;
1856
3c567b7d 1857 return __sata_set_spd_needed(ap, &scontrol);
1c3fae4d
TH
1858}
1859
1860/**
3c567b7d 1861 * sata_set_spd - set SATA spd according to spd limit
1c3fae4d
TH
1862 * @ap: Port to set SATA spd for
1863 *
1864 * Set SATA spd of @ap according to sata_spd_limit.
1865 *
1866 * LOCKING:
1867 * Inherited from caller.
1868 *
1869 * RETURNS:
1870 * 0 if spd doesn't need to be changed, 1 if spd has been
81952c54 1871 * changed. Negative errno if SCR registers are inaccessible.
1c3fae4d 1872 */
3c567b7d 1873int sata_set_spd(struct ata_port *ap)
1c3fae4d
TH
1874{
1875 u32 scontrol;
81952c54 1876 int rc;
1c3fae4d 1877
81952c54
TH
1878 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
1879 return rc;
1c3fae4d 1880
3c567b7d 1881 if (!__sata_set_spd_needed(ap, &scontrol))
1c3fae4d
TH
1882 return 0;
1883
81952c54
TH
1884 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
1885 return rc;
1886
1c3fae4d
TH
1887 return 1;
1888}
1889
452503f9
AC
1890/*
1891 * This mode timing computation functionality is ported over from
1892 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
1893 */
1894/*
1895 * PIO 0-5, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
1896 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
1897 * for PIO 5, which is a nonstandard extension and UDMA6, which
2e9edbf8 1898 * is currently supported only by Maxtor drives.
452503f9
AC
1899 */
1900
1901static const struct ata_timing ata_timing[] = {
1902
1903 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
1904 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
1905 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
1906 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
1907
1908 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
1909 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
1910 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
1911
1912/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
2e9edbf8 1913
452503f9
AC
1914 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
1915 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
1916 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
2e9edbf8 1917
452503f9
AC
1918 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
1919 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
1920 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
1921
1922/* { XFER_PIO_5, 20, 50, 30, 100, 50, 30, 100, 0 }, */
1923 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
1924 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
1925
1926 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
1927 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
1928 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
1929
1930/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
1931
1932 { 0xFF }
1933};
1934
1935#define ENOUGH(v,unit) (((v)-1)/(unit)+1)
1936#define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
1937
1938static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
1939{
1940 q->setup = EZ(t->setup * 1000, T);
1941 q->act8b = EZ(t->act8b * 1000, T);
1942 q->rec8b = EZ(t->rec8b * 1000, T);
1943 q->cyc8b = EZ(t->cyc8b * 1000, T);
1944 q->active = EZ(t->active * 1000, T);
1945 q->recover = EZ(t->recover * 1000, T);
1946 q->cycle = EZ(t->cycle * 1000, T);
1947 q->udma = EZ(t->udma * 1000, UT);
1948}
1949
1950void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
1951 struct ata_timing *m, unsigned int what)
1952{
1953 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
1954 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
1955 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
1956 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
1957 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
1958 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
1959 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
1960 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
1961}
1962
1963static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
1964{
1965 const struct ata_timing *t;
1966
1967 for (t = ata_timing; t->mode != speed; t++)
91190758 1968 if (t->mode == 0xFF)
452503f9 1969 return NULL;
2e9edbf8 1970 return t;
452503f9
AC
1971}
1972
1973int ata_timing_compute(struct ata_device *adev, unsigned short speed,
1974 struct ata_timing *t, int T, int UT)
1975{
1976 const struct ata_timing *s;
1977 struct ata_timing p;
1978
1979 /*
2e9edbf8 1980 * Find the mode.
75b1f2f8 1981 */
452503f9
AC
1982
1983 if (!(s = ata_timing_find_mode(speed)))
1984 return -EINVAL;
1985
75b1f2f8
AL
1986 memcpy(t, s, sizeof(*s));
1987
452503f9
AC
1988 /*
1989 * If the drive is an EIDE drive, it can tell us it needs extended
1990 * PIO/MW_DMA cycle timing.
1991 */
1992
1993 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
1994 memset(&p, 0, sizeof(p));
1995 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
1996 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
1997 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
1998 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
1999 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2000 }
2001 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2002 }
2003
2004 /*
2005 * Convert the timing to bus clock counts.
2006 */
2007
75b1f2f8 2008 ata_timing_quantize(t, t, T, UT);
452503f9
AC
2009
2010 /*
c893a3ae
RD
2011 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2012 * S.M.A.R.T * and some other commands. We have to ensure that the
2013 * DMA cycle timing is slower/equal than the fastest PIO timing.
452503f9
AC
2014 */
2015
2016 if (speed > XFER_PIO_4) {
2017 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2018 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2019 }
2020
2021 /*
c893a3ae 2022 * Lengthen active & recovery time so that cycle time is correct.
452503f9
AC
2023 */
2024
2025 if (t->act8b + t->rec8b < t->cyc8b) {
2026 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2027 t->rec8b = t->cyc8b - t->act8b;
2028 }
2029
2030 if (t->active + t->recover < t->cycle) {
2031 t->active += (t->cycle - (t->active + t->recover)) / 2;
2032 t->recover = t->cycle - t->active;
2033 }
2034
2035 return 0;
2036}
2037
cf176e1a
TH
2038/**
2039 * ata_down_xfermask_limit - adjust dev xfer masks downward
cf176e1a
TH
2040 * @dev: Device to adjust xfer masks
2041 * @force_pio0: Force PIO0
2042 *
2043 * Adjust xfer masks of @dev downward. Note that this function
2044 * does not apply the change. Invoking ata_set_mode() afterwards
2045 * will apply the limit.
2046 *
2047 * LOCKING:
2048 * Inherited from caller.
2049 *
2050 * RETURNS:
2051 * 0 on success, negative errno on failure
2052 */
3373efd8 2053int ata_down_xfermask_limit(struct ata_device *dev, int force_pio0)
cf176e1a
TH
2054{
2055 unsigned long xfer_mask;
2056 int highbit;
2057
2058 xfer_mask = ata_pack_xfermask(dev->pio_mask, dev->mwdma_mask,
2059 dev->udma_mask);
2060
2061 if (!xfer_mask)
2062 goto fail;
2063 /* don't gear down to MWDMA from UDMA, go directly to PIO */
2064 if (xfer_mask & ATA_MASK_UDMA)
2065 xfer_mask &= ~ATA_MASK_MWDMA;
2066
2067 highbit = fls(xfer_mask) - 1;
2068 xfer_mask &= ~(1 << highbit);
2069 if (force_pio0)
2070 xfer_mask &= 1 << ATA_SHIFT_PIO;
2071 if (!xfer_mask)
2072 goto fail;
2073
2074 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2075 &dev->udma_mask);
2076
f15a1daf
TH
2077 ata_dev_printk(dev, KERN_WARNING, "limiting speed to %s\n",
2078 ata_mode_string(xfer_mask));
cf176e1a
TH
2079
2080 return 0;
2081
2082 fail:
2083 return -EINVAL;
2084}
2085
3373efd8 2086static int ata_dev_set_mode(struct ata_device *dev)
1da177e4 2087{
83206a29
TH
2088 unsigned int err_mask;
2089 int rc;
1da177e4 2090
e8384607 2091 dev->flags &= ~ATA_DFLAG_PIO;
1da177e4
LT
2092 if (dev->xfer_shift == ATA_SHIFT_PIO)
2093 dev->flags |= ATA_DFLAG_PIO;
2094
3373efd8 2095 err_mask = ata_dev_set_xfermode(dev);
83206a29 2096 if (err_mask) {
f15a1daf
TH
2097 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
2098 "(err_mask=0x%x)\n", err_mask);
83206a29
TH
2099 return -EIO;
2100 }
1da177e4 2101
3373efd8 2102 rc = ata_dev_revalidate(dev, 0);
5eb45c02 2103 if (rc)
83206a29 2104 return rc;
48a8a14f 2105
23e71c3d
TH
2106 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
2107 dev->xfer_shift, (int)dev->xfer_mode);
1da177e4 2108
f15a1daf
TH
2109 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
2110 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
83206a29 2111 return 0;
1da177e4
LT
2112}
2113
1da177e4
LT
2114/**
2115 * ata_set_mode - Program timings and issue SET FEATURES - XFER
2116 * @ap: port on which timings will be programmed
e82cbdb9 2117 * @r_failed_dev: out paramter for failed device
1da177e4 2118 *
e82cbdb9
TH
2119 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2120 * ata_set_mode() fails, pointer to the failing device is
2121 * returned in @r_failed_dev.
780a87f7 2122 *
1da177e4 2123 * LOCKING:
0cba632b 2124 * PCI/etc. bus probe sem.
e82cbdb9
TH
2125 *
2126 * RETURNS:
2127 * 0 on success, negative errno otherwise
1da177e4 2128 */
1ad8e7f9 2129int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
1da177e4 2130{
e8e0619f 2131 struct ata_device *dev;
e82cbdb9 2132 int i, rc = 0, used_dma = 0, found = 0;
1da177e4 2133
3adcebb2
TH
2134 /* has private set_mode? */
2135 if (ap->ops->set_mode) {
2136 /* FIXME: make ->set_mode handle no device case and
2137 * return error code and failing device on failure.
2138 */
2139 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2140 if (ata_dev_enabled(&ap->device[i])) {
2141 ap->ops->set_mode(ap);
2142 break;
2143 }
2144 }
2145 return 0;
2146 }
2147
a6d5a51c
TH
2148 /* step 1: calculate xfer_mask */
2149 for (i = 0; i < ATA_MAX_DEVICES; i++) {
acf356b1 2150 unsigned int pio_mask, dma_mask;
a6d5a51c 2151
e8e0619f
TH
2152 dev = &ap->device[i];
2153
e1211e3f 2154 if (!ata_dev_enabled(dev))
a6d5a51c
TH
2155 continue;
2156
3373efd8 2157 ata_dev_xfermask(dev);
1da177e4 2158
acf356b1
TH
2159 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
2160 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2161 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
2162 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
5444a6f4 2163
4f65977d 2164 found = 1;
5444a6f4
AC
2165 if (dev->dma_mode)
2166 used_dma = 1;
a6d5a51c 2167 }
4f65977d 2168 if (!found)
e82cbdb9 2169 goto out;
a6d5a51c
TH
2170
2171 /* step 2: always set host PIO timings */
e8e0619f
TH
2172 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2173 dev = &ap->device[i];
2174 if (!ata_dev_enabled(dev))
2175 continue;
2176
2177 if (!dev->pio_mode) {
f15a1daf 2178 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
e8e0619f 2179 rc = -EINVAL;
e82cbdb9 2180 goto out;
e8e0619f
TH
2181 }
2182
2183 dev->xfer_mode = dev->pio_mode;
2184 dev->xfer_shift = ATA_SHIFT_PIO;
2185 if (ap->ops->set_piomode)
2186 ap->ops->set_piomode(ap, dev);
2187 }
1da177e4 2188
a6d5a51c 2189 /* step 3: set host DMA timings */
e8e0619f
TH
2190 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2191 dev = &ap->device[i];
2192
2193 if (!ata_dev_enabled(dev) || !dev->dma_mode)
2194 continue;
2195
2196 dev->xfer_mode = dev->dma_mode;
2197 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
2198 if (ap->ops->set_dmamode)
2199 ap->ops->set_dmamode(ap, dev);
2200 }
1da177e4
LT
2201
2202 /* step 4: update devices' xfer mode */
83206a29 2203 for (i = 0; i < ATA_MAX_DEVICES; i++) {
e8e0619f 2204 dev = &ap->device[i];
1da177e4 2205
e1211e3f 2206 if (!ata_dev_enabled(dev))
83206a29
TH
2207 continue;
2208
3373efd8 2209 rc = ata_dev_set_mode(dev);
5bbc53f4 2210 if (rc)
e82cbdb9 2211 goto out;
83206a29 2212 }
1da177e4 2213
e8e0619f
TH
2214 /* Record simplex status. If we selected DMA then the other
2215 * host channels are not permitted to do so.
5444a6f4 2216 */
5444a6f4
AC
2217 if (used_dma && (ap->host_set->flags & ATA_HOST_SIMPLEX))
2218 ap->host_set->simplex_claimed = 1;
2219
e8e0619f 2220 /* step5: chip specific finalisation */
1da177e4
LT
2221 if (ap->ops->post_set_mode)
2222 ap->ops->post_set_mode(ap);
2223
e82cbdb9
TH
2224 out:
2225 if (rc)
2226 *r_failed_dev = dev;
2227 return rc;
1da177e4
LT
2228}
2229
1fdffbce
JG
2230/**
2231 * ata_tf_to_host - issue ATA taskfile to host controller
2232 * @ap: port to which command is being issued
2233 * @tf: ATA taskfile register set
2234 *
2235 * Issues ATA taskfile register set to ATA host controller,
2236 * with proper synchronization with interrupt handler and
2237 * other threads.
2238 *
2239 * LOCKING:
2240 * spin_lock_irqsave(host_set lock)
2241 */
2242
2243static inline void ata_tf_to_host(struct ata_port *ap,
2244 const struct ata_taskfile *tf)
2245{
2246 ap->ops->tf_load(ap, tf);
2247 ap->ops->exec_command(ap, tf);
2248}
2249
1da177e4
LT
2250/**
2251 * ata_busy_sleep - sleep until BSY clears, or timeout
2252 * @ap: port containing status register to be polled
2253 * @tmout_pat: impatience timeout
2254 * @tmout: overall timeout
2255 *
780a87f7
JG
2256 * Sleep until ATA Status register bit BSY clears,
2257 * or a timeout occurs.
2258 *
2259 * LOCKING: None.
1da177e4
LT
2260 */
2261
6f8b9958
TH
2262unsigned int ata_busy_sleep (struct ata_port *ap,
2263 unsigned long tmout_pat, unsigned long tmout)
1da177e4
LT
2264{
2265 unsigned long timer_start, timeout;
2266 u8 status;
2267
2268 status = ata_busy_wait(ap, ATA_BUSY, 300);
2269 timer_start = jiffies;
2270 timeout = timer_start + tmout_pat;
2271 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
2272 msleep(50);
2273 status = ata_busy_wait(ap, ATA_BUSY, 3);
2274 }
2275
2276 if (status & ATA_BUSY)
f15a1daf
TH
2277 ata_port_printk(ap, KERN_WARNING,
2278 "port is slow to respond, please be patient\n");
1da177e4
LT
2279
2280 timeout = timer_start + tmout;
2281 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
2282 msleep(50);
2283 status = ata_chk_status(ap);
2284 }
2285
2286 if (status & ATA_BUSY) {
f15a1daf
TH
2287 ata_port_printk(ap, KERN_ERR, "port failed to respond "
2288 "(%lu secs)\n", tmout / HZ);
1da177e4
LT
2289 return 1;
2290 }
2291
2292 return 0;
2293}
2294
2295static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
2296{
2297 struct ata_ioports *ioaddr = &ap->ioaddr;
2298 unsigned int dev0 = devmask & (1 << 0);
2299 unsigned int dev1 = devmask & (1 << 1);
2300 unsigned long timeout;
2301
2302 /* if device 0 was found in ata_devchk, wait for its
2303 * BSY bit to clear
2304 */
2305 if (dev0)
2306 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2307
2308 /* if device 1 was found in ata_devchk, wait for
2309 * register access, then wait for BSY to clear
2310 */
2311 timeout = jiffies + ATA_TMOUT_BOOT;
2312 while (dev1) {
2313 u8 nsect, lbal;
2314
2315 ap->ops->dev_select(ap, 1);
2316 if (ap->flags & ATA_FLAG_MMIO) {
2317 nsect = readb((void __iomem *) ioaddr->nsect_addr);
2318 lbal = readb((void __iomem *) ioaddr->lbal_addr);
2319 } else {
2320 nsect = inb(ioaddr->nsect_addr);
2321 lbal = inb(ioaddr->lbal_addr);
2322 }
2323 if ((nsect == 1) && (lbal == 1))
2324 break;
2325 if (time_after(jiffies, timeout)) {
2326 dev1 = 0;
2327 break;
2328 }
2329 msleep(50); /* give drive a breather */
2330 }
2331 if (dev1)
2332 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2333
2334 /* is all this really necessary? */
2335 ap->ops->dev_select(ap, 0);
2336 if (dev1)
2337 ap->ops->dev_select(ap, 1);
2338 if (dev0)
2339 ap->ops->dev_select(ap, 0);
2340}
2341
1da177e4
LT
2342static unsigned int ata_bus_softreset(struct ata_port *ap,
2343 unsigned int devmask)
2344{
2345 struct ata_ioports *ioaddr = &ap->ioaddr;
2346
2347 DPRINTK("ata%u: bus reset via SRST\n", ap->id);
2348
2349 /* software reset. causes dev0 to be selected */
2350 if (ap->flags & ATA_FLAG_MMIO) {
2351 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2352 udelay(20); /* FIXME: flush */
2353 writeb(ap->ctl | ATA_SRST, (void __iomem *) ioaddr->ctl_addr);
2354 udelay(20); /* FIXME: flush */
2355 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2356 } else {
2357 outb(ap->ctl, ioaddr->ctl_addr);
2358 udelay(10);
2359 outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
2360 udelay(10);
2361 outb(ap->ctl, ioaddr->ctl_addr);
2362 }
2363
2364 /* spec mandates ">= 2ms" before checking status.
2365 * We wait 150ms, because that was the magic delay used for
2366 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
2367 * between when the ATA command register is written, and then
2368 * status is checked. Because waiting for "a while" before
2369 * checking status is fine, post SRST, we perform this magic
2370 * delay here as well.
09c7ad79
AC
2371 *
2372 * Old drivers/ide uses the 2mS rule and then waits for ready
1da177e4
LT
2373 */
2374 msleep(150);
2375
2e9edbf8 2376 /* Before we perform post reset processing we want to see if
298a41ca
TH
2377 * the bus shows 0xFF because the odd clown forgets the D7
2378 * pulldown resistor.
2379 */
987d2f05 2380 if (ata_check_status(ap) == 0xFF) {
f15a1daf 2381 ata_port_printk(ap, KERN_ERR, "SRST failed (status 0xFF)\n");
298a41ca 2382 return AC_ERR_OTHER;
987d2f05 2383 }
09c7ad79 2384
1da177e4
LT
2385 ata_bus_post_reset(ap, devmask);
2386
2387 return 0;
2388}
2389
2390/**
2391 * ata_bus_reset - reset host port and associated ATA channel
2392 * @ap: port to reset
2393 *
2394 * This is typically the first time we actually start issuing
2395 * commands to the ATA channel. We wait for BSY to clear, then
2396 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
2397 * result. Determine what devices, if any, are on the channel
2398 * by looking at the device 0/1 error register. Look at the signature
2399 * stored in each device's taskfile registers, to determine if
2400 * the device is ATA or ATAPI.
2401 *
2402 * LOCKING:
0cba632b
JG
2403 * PCI/etc. bus probe sem.
2404 * Obtains host_set lock.
1da177e4
LT
2405 *
2406 * SIDE EFFECTS:
198e0fed 2407 * Sets ATA_FLAG_DISABLED if bus reset fails.
1da177e4
LT
2408 */
2409
2410void ata_bus_reset(struct ata_port *ap)
2411{
2412 struct ata_ioports *ioaddr = &ap->ioaddr;
2413 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2414 u8 err;
aec5c3c1 2415 unsigned int dev0, dev1 = 0, devmask = 0;
1da177e4
LT
2416
2417 DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no);
2418
2419 /* determine if device 0/1 are present */
2420 if (ap->flags & ATA_FLAG_SATA_RESET)
2421 dev0 = 1;
2422 else {
2423 dev0 = ata_devchk(ap, 0);
2424 if (slave_possible)
2425 dev1 = ata_devchk(ap, 1);
2426 }
2427
2428 if (dev0)
2429 devmask |= (1 << 0);
2430 if (dev1)
2431 devmask |= (1 << 1);
2432
2433 /* select device 0 again */
2434 ap->ops->dev_select(ap, 0);
2435
2436 /* issue bus reset */
2437 if (ap->flags & ATA_FLAG_SRST)
aec5c3c1
TH
2438 if (ata_bus_softreset(ap, devmask))
2439 goto err_out;
1da177e4
LT
2440
2441 /*
2442 * determine by signature whether we have ATA or ATAPI devices
2443 */
b4dc7623 2444 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
1da177e4 2445 if ((slave_possible) && (err != 0x81))
b4dc7623 2446 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
1da177e4
LT
2447
2448 /* re-enable interrupts */
2449 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2450 ata_irq_on(ap);
2451
2452 /* is double-select really necessary? */
2453 if (ap->device[1].class != ATA_DEV_NONE)
2454 ap->ops->dev_select(ap, 1);
2455 if (ap->device[0].class != ATA_DEV_NONE)
2456 ap->ops->dev_select(ap, 0);
2457
2458 /* if no devices were detected, disable this port */
2459 if ((ap->device[0].class == ATA_DEV_NONE) &&
2460 (ap->device[1].class == ATA_DEV_NONE))
2461 goto err_out;
2462
2463 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
2464 /* set up device control for ATA_FLAG_SATA_RESET */
2465 if (ap->flags & ATA_FLAG_MMIO)
2466 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2467 else
2468 outb(ap->ctl, ioaddr->ctl_addr);
2469 }
2470
2471 DPRINTK("EXIT\n");
2472 return;
2473
2474err_out:
f15a1daf 2475 ata_port_printk(ap, KERN_ERR, "disabling port\n");
1da177e4
LT
2476 ap->ops->port_disable(ap);
2477
2478 DPRINTK("EXIT\n");
2479}
2480
d7bb4cc7
TH
2481/**
2482 * sata_phy_debounce - debounce SATA phy status
2483 * @ap: ATA port to debounce SATA phy status for
2484 * @params: timing parameters { interval, duratinon, timeout } in msec
2485 *
2486 * Make sure SStatus of @ap reaches stable state, determined by
2487 * holding the same value where DET is not 1 for @duration polled
2488 * every @interval, before @timeout. Timeout constraints the
2489 * beginning of the stable state. Because, after hot unplugging,
2490 * DET gets stuck at 1 on some controllers, this functions waits
2491 * until timeout then returns 0 if DET is stable at 1.
2492 *
2493 * LOCKING:
2494 * Kernel thread context (may sleep)
2495 *
2496 * RETURNS:
2497 * 0 on success, -errno on failure.
2498 */
2499int sata_phy_debounce(struct ata_port *ap, const unsigned long *params)
7a7921e8 2500{
d7bb4cc7
TH
2501 unsigned long interval_msec = params[0];
2502 unsigned long duration = params[1] * HZ / 1000;
2503 unsigned long timeout = jiffies + params[2] * HZ / 1000;
2504 unsigned long last_jiffies;
2505 u32 last, cur;
2506 int rc;
2507
2508 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
2509 return rc;
2510 cur &= 0xf;
2511
2512 last = cur;
2513 last_jiffies = jiffies;
2514
2515 while (1) {
2516 msleep(interval_msec);
2517 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
2518 return rc;
2519 cur &= 0xf;
2520
2521 /* DET stable? */
2522 if (cur == last) {
2523 if (cur == 1 && time_before(jiffies, timeout))
2524 continue;
2525 if (time_after(jiffies, last_jiffies + duration))
2526 return 0;
2527 continue;
2528 }
2529
2530 /* unstable, start over */
2531 last = cur;
2532 last_jiffies = jiffies;
2533
2534 /* check timeout */
2535 if (time_after(jiffies, timeout))
2536 return -EBUSY;
2537 }
2538}
2539
2540/**
2541 * sata_phy_resume - resume SATA phy
2542 * @ap: ATA port to resume SATA phy for
2543 * @params: timing parameters { interval, duratinon, timeout } in msec
2544 *
2545 * Resume SATA phy of @ap and debounce it.
2546 *
2547 * LOCKING:
2548 * Kernel thread context (may sleep)
2549 *
2550 * RETURNS:
2551 * 0 on success, -errno on failure.
2552 */
2553int sata_phy_resume(struct ata_port *ap, const unsigned long *params)
2554{
2555 u32 scontrol;
81952c54
TH
2556 int rc;
2557
2558 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2559 return rc;
7a7921e8 2560
852ee16a 2561 scontrol = (scontrol & 0x0f0) | 0x300;
81952c54
TH
2562
2563 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2564 return rc;
7a7921e8 2565
d7bb4cc7
TH
2566 /* Some PHYs react badly if SStatus is pounded immediately
2567 * after resuming. Delay 200ms before debouncing.
2568 */
2569 msleep(200);
7a7921e8 2570
d7bb4cc7 2571 return sata_phy_debounce(ap, params);
7a7921e8
TH
2572}
2573
f5914a46
TH
2574static void ata_wait_spinup(struct ata_port *ap)
2575{
2576 struct ata_eh_context *ehc = &ap->eh_context;
2577 unsigned long end, secs;
2578 int rc;
2579
2580 /* first, debounce phy if SATA */
2581 if (ap->cbl == ATA_CBL_SATA) {
2582 rc = sata_phy_debounce(ap, sata_deb_timing_eh);
2583
2584 /* if debounced successfully and offline, no need to wait */
2585 if ((rc == 0 || rc == -EOPNOTSUPP) && ata_port_offline(ap))
2586 return;
2587 }
2588
2589 /* okay, let's give the drive time to spin up */
2590 end = ehc->i.hotplug_timestamp + ATA_SPINUP_WAIT * HZ / 1000;
2591 secs = ((end - jiffies) + HZ - 1) / HZ;
2592
2593 if (time_after(jiffies, end))
2594 return;
2595
2596 if (secs > 5)
2597 ata_port_printk(ap, KERN_INFO, "waiting for device to spin up "
2598 "(%lu secs)\n", secs);
2599
2600 schedule_timeout_uninterruptible(end - jiffies);
2601}
2602
2603/**
2604 * ata_std_prereset - prepare for reset
2605 * @ap: ATA port to be reset
2606 *
2607 * @ap is about to be reset. Initialize it.
2608 *
2609 * LOCKING:
2610 * Kernel thread context (may sleep)
2611 *
2612 * RETURNS:
2613 * 0 on success, -errno otherwise.
2614 */
2615int ata_std_prereset(struct ata_port *ap)
2616{
2617 struct ata_eh_context *ehc = &ap->eh_context;
2618 const unsigned long *timing;
2619 int rc;
2620
2621 /* hotplug? */
2622 if (ehc->i.flags & ATA_EHI_HOTPLUGGED) {
2623 if (ap->flags & ATA_FLAG_HRST_TO_RESUME)
2624 ehc->i.action |= ATA_EH_HARDRESET;
2625 if (ap->flags & ATA_FLAG_SKIP_D2H_BSY)
2626 ata_wait_spinup(ap);
2627 }
2628
2629 /* if we're about to do hardreset, nothing more to do */
2630 if (ehc->i.action & ATA_EH_HARDRESET)
2631 return 0;
2632
2633 /* if SATA, resume phy */
2634 if (ap->cbl == ATA_CBL_SATA) {
2635 if (ap->flags & ATA_FLAG_LOADING)
2636 timing = sata_deb_timing_boot;
2637 else
2638 timing = sata_deb_timing_eh;
2639
2640 rc = sata_phy_resume(ap, timing);
2641 if (rc && rc != -EOPNOTSUPP) {
2642 /* phy resume failed */
2643 ata_port_printk(ap, KERN_WARNING, "failed to resume "
2644 "link for reset (errno=%d)\n", rc);
2645 return rc;
2646 }
2647 }
2648
2649 /* Wait for !BSY if the controller can wait for the first D2H
2650 * Reg FIS and we don't know that no device is attached.
2651 */
2652 if (!(ap->flags & ATA_FLAG_SKIP_D2H_BSY) && !ata_port_offline(ap))
2653 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2654
2655 return 0;
2656}
2657
c2bd5804
TH
2658/**
2659 * ata_std_softreset - reset host port via ATA SRST
2660 * @ap: port to reset
c2bd5804
TH
2661 * @classes: resulting classes of attached devices
2662 *
52783c5d 2663 * Reset host port using ATA SRST.
c2bd5804
TH
2664 *
2665 * LOCKING:
2666 * Kernel thread context (may sleep)
2667 *
2668 * RETURNS:
2669 * 0 on success, -errno otherwise.
2670 */
2bf2cb26 2671int ata_std_softreset(struct ata_port *ap, unsigned int *classes)
c2bd5804
TH
2672{
2673 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2674 unsigned int devmask = 0, err_mask;
2675 u8 err;
2676
2677 DPRINTK("ENTER\n");
2678
81952c54 2679 if (ata_port_offline(ap)) {
3a39746a
TH
2680 classes[0] = ATA_DEV_NONE;
2681 goto out;
2682 }
2683
c2bd5804
TH
2684 /* determine if device 0/1 are present */
2685 if (ata_devchk(ap, 0))
2686 devmask |= (1 << 0);
2687 if (slave_possible && ata_devchk(ap, 1))
2688 devmask |= (1 << 1);
2689
c2bd5804
TH
2690 /* select device 0 again */
2691 ap->ops->dev_select(ap, 0);
2692
2693 /* issue bus reset */
2694 DPRINTK("about to softreset, devmask=%x\n", devmask);
2695 err_mask = ata_bus_softreset(ap, devmask);
2696 if (err_mask) {
f15a1daf
TH
2697 ata_port_printk(ap, KERN_ERR, "SRST failed (err_mask=0x%x)\n",
2698 err_mask);
c2bd5804
TH
2699 return -EIO;
2700 }
2701
2702 /* determine by signature whether we have ATA or ATAPI devices */
2703 classes[0] = ata_dev_try_classify(ap, 0, &err);
2704 if (slave_possible && err != 0x81)
2705 classes[1] = ata_dev_try_classify(ap, 1, &err);
2706
3a39746a 2707 out:
c2bd5804
TH
2708 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
2709 return 0;
2710}
2711
2712/**
2713 * sata_std_hardreset - reset host port via SATA phy reset
2714 * @ap: port to reset
c2bd5804
TH
2715 * @class: resulting class of attached device
2716 *
2717 * SATA phy-reset host port using DET bits of SControl register.
c2bd5804
TH
2718 *
2719 * LOCKING:
2720 * Kernel thread context (may sleep)
2721 *
2722 * RETURNS:
2723 * 0 on success, -errno otherwise.
2724 */
2bf2cb26 2725int sata_std_hardreset(struct ata_port *ap, unsigned int *class)
c2bd5804 2726{
852ee16a 2727 u32 scontrol;
81952c54 2728 int rc;
852ee16a 2729
c2bd5804
TH
2730 DPRINTK("ENTER\n");
2731
3c567b7d 2732 if (sata_set_spd_needed(ap)) {
1c3fae4d
TH
2733 /* SATA spec says nothing about how to reconfigure
2734 * spd. To be on the safe side, turn off phy during
2735 * reconfiguration. This works for at least ICH7 AHCI
2736 * and Sil3124.
2737 */
81952c54
TH
2738 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2739 return rc;
2740
1c3fae4d 2741 scontrol = (scontrol & 0x0f0) | 0x302;
81952c54
TH
2742
2743 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2744 return rc;
1c3fae4d 2745
3c567b7d 2746 sata_set_spd(ap);
1c3fae4d
TH
2747 }
2748
2749 /* issue phy wake/reset */
81952c54
TH
2750 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2751 return rc;
2752
852ee16a 2753 scontrol = (scontrol & 0x0f0) | 0x301;
81952c54
TH
2754
2755 if ((rc = sata_scr_write_flush(ap, SCR_CONTROL, scontrol)))
2756 return rc;
c2bd5804 2757
1c3fae4d 2758 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
c2bd5804
TH
2759 * 10.4.2 says at least 1 ms.
2760 */
2761 msleep(1);
2762
1c3fae4d 2763 /* bring phy back */
d7bb4cc7 2764 sata_phy_resume(ap, sata_deb_timing_eh);
c2bd5804 2765
c2bd5804 2766 /* TODO: phy layer with polling, timeouts, etc. */
81952c54 2767 if (ata_port_offline(ap)) {
c2bd5804
TH
2768 *class = ATA_DEV_NONE;
2769 DPRINTK("EXIT, link offline\n");
2770 return 0;
2771 }
2772
2773 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
f15a1daf
TH
2774 ata_port_printk(ap, KERN_ERR,
2775 "COMRESET failed (device not ready)\n");
c2bd5804
TH
2776 return -EIO;
2777 }
2778
3a39746a
TH
2779 ap->ops->dev_select(ap, 0); /* probably unnecessary */
2780
c2bd5804
TH
2781 *class = ata_dev_try_classify(ap, 0, NULL);
2782
2783 DPRINTK("EXIT, class=%u\n", *class);
2784 return 0;
2785}
2786
2787/**
2788 * ata_std_postreset - standard postreset callback
2789 * @ap: the target ata_port
2790 * @classes: classes of attached devices
2791 *
2792 * This function is invoked after a successful reset. Note that
2793 * the device might have been reset more than once using
2794 * different reset methods before postreset is invoked.
c2bd5804 2795 *
c2bd5804
TH
2796 * LOCKING:
2797 * Kernel thread context (may sleep)
2798 */
2799void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
2800{
dc2b3515
TH
2801 u32 serror;
2802
c2bd5804
TH
2803 DPRINTK("ENTER\n");
2804
c2bd5804 2805 /* print link status */
81952c54 2806 sata_print_link_status(ap);
c2bd5804 2807
dc2b3515
TH
2808 /* clear SError */
2809 if (sata_scr_read(ap, SCR_ERROR, &serror) == 0)
2810 sata_scr_write(ap, SCR_ERROR, serror);
2811
3a39746a 2812 /* re-enable interrupts */
e3180499
TH
2813 if (!ap->ops->error_handler) {
2814 /* FIXME: hack. create a hook instead */
2815 if (ap->ioaddr.ctl_addr)
2816 ata_irq_on(ap);
2817 }
c2bd5804
TH
2818
2819 /* is double-select really necessary? */
2820 if (classes[0] != ATA_DEV_NONE)
2821 ap->ops->dev_select(ap, 1);
2822 if (classes[1] != ATA_DEV_NONE)
2823 ap->ops->dev_select(ap, 0);
2824
3a39746a
TH
2825 /* bail out if no device is present */
2826 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2827 DPRINTK("EXIT, no device\n");
2828 return;
2829 }
2830
2831 /* set up device control */
2832 if (ap->ioaddr.ctl_addr) {
2833 if (ap->flags & ATA_FLAG_MMIO)
2834 writeb(ap->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
2835 else
2836 outb(ap->ctl, ap->ioaddr.ctl_addr);
2837 }
c2bd5804
TH
2838
2839 DPRINTK("EXIT\n");
2840}
2841
623a3128
TH
2842/**
2843 * ata_dev_same_device - Determine whether new ID matches configured device
623a3128
TH
2844 * @dev: device to compare against
2845 * @new_class: class of the new device
2846 * @new_id: IDENTIFY page of the new device
2847 *
2848 * Compare @new_class and @new_id against @dev and determine
2849 * whether @dev is the device indicated by @new_class and
2850 * @new_id.
2851 *
2852 * LOCKING:
2853 * None.
2854 *
2855 * RETURNS:
2856 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
2857 */
3373efd8
TH
2858static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
2859 const u16 *new_id)
623a3128
TH
2860{
2861 const u16 *old_id = dev->id;
2862 unsigned char model[2][41], serial[2][21];
2863 u64 new_n_sectors;
2864
2865 if (dev->class != new_class) {
f15a1daf
TH
2866 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
2867 dev->class, new_class);
623a3128
TH
2868 return 0;
2869 }
2870
2871 ata_id_c_string(old_id, model[0], ATA_ID_PROD_OFS, sizeof(model[0]));
2872 ata_id_c_string(new_id, model[1], ATA_ID_PROD_OFS, sizeof(model[1]));
2873 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO_OFS, sizeof(serial[0]));
2874 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO_OFS, sizeof(serial[1]));
2875 new_n_sectors = ata_id_n_sectors(new_id);
2876
2877 if (strcmp(model[0], model[1])) {
f15a1daf
TH
2878 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
2879 "'%s' != '%s'\n", model[0], model[1]);
623a3128
TH
2880 return 0;
2881 }
2882
2883 if (strcmp(serial[0], serial[1])) {
f15a1daf
TH
2884 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
2885 "'%s' != '%s'\n", serial[0], serial[1]);
623a3128
TH
2886 return 0;
2887 }
2888
2889 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
f15a1daf
TH
2890 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
2891 "%llu != %llu\n",
2892 (unsigned long long)dev->n_sectors,
2893 (unsigned long long)new_n_sectors);
623a3128
TH
2894 return 0;
2895 }
2896
2897 return 1;
2898}
2899
2900/**
2901 * ata_dev_revalidate - Revalidate ATA device
623a3128
TH
2902 * @dev: device to revalidate
2903 * @post_reset: is this revalidation after reset?
2904 *
2905 * Re-read IDENTIFY page and make sure @dev is still attached to
2906 * the port.
2907 *
2908 * LOCKING:
2909 * Kernel thread context (may sleep)
2910 *
2911 * RETURNS:
2912 * 0 on success, negative errno otherwise
2913 */
3373efd8 2914int ata_dev_revalidate(struct ata_device *dev, int post_reset)
623a3128 2915{
5eb45c02 2916 unsigned int class = dev->class;
f15a1daf 2917 u16 *id = (void *)dev->ap->sector_buf;
623a3128
TH
2918 int rc;
2919
5eb45c02
TH
2920 if (!ata_dev_enabled(dev)) {
2921 rc = -ENODEV;
2922 goto fail;
2923 }
623a3128 2924
fe635c7e 2925 /* read ID data */
3373efd8 2926 rc = ata_dev_read_id(dev, &class, post_reset, id);
623a3128
TH
2927 if (rc)
2928 goto fail;
2929
2930 /* is the device still there? */
3373efd8 2931 if (!ata_dev_same_device(dev, class, id)) {
623a3128
TH
2932 rc = -ENODEV;
2933 goto fail;
2934 }
2935
fe635c7e 2936 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
623a3128
TH
2937
2938 /* configure device according to the new ID */
3373efd8 2939 rc = ata_dev_configure(dev, 0);
5eb45c02
TH
2940 if (rc == 0)
2941 return 0;
623a3128
TH
2942
2943 fail:
f15a1daf 2944 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
623a3128
TH
2945 return rc;
2946}
2947
98ac62de 2948static const char * const ata_dma_blacklist [] = {
f4b15fef
AC
2949 "WDC AC11000H", NULL,
2950 "WDC AC22100H", NULL,
2951 "WDC AC32500H", NULL,
2952 "WDC AC33100H", NULL,
2953 "WDC AC31600H", NULL,
2954 "WDC AC32100H", "24.09P07",
2955 "WDC AC23200L", "21.10N21",
2956 "Compaq CRD-8241B", NULL,
2957 "CRD-8400B", NULL,
2958 "CRD-8480B", NULL,
2959 "CRD-8482B", NULL,
2960 "CRD-84", NULL,
2961 "SanDisk SDP3B", NULL,
2962 "SanDisk SDP3B-64", NULL,
2963 "SANYO CD-ROM CRD", NULL,
2964 "HITACHI CDR-8", NULL,
2e9edbf8 2965 "HITACHI CDR-8335", NULL,
f4b15fef 2966 "HITACHI CDR-8435", NULL,
2e9edbf8
JG
2967 "Toshiba CD-ROM XM-6202B", NULL,
2968 "TOSHIBA CD-ROM XM-1702BC", NULL,
2969 "CD-532E-A", NULL,
2970 "E-IDE CD-ROM CR-840", NULL,
2971 "CD-ROM Drive/F5A", NULL,
2972 "WPI CDD-820", NULL,
f4b15fef 2973 "SAMSUNG CD-ROM SC-148C", NULL,
2e9edbf8 2974 "SAMSUNG CD-ROM SC", NULL,
f4b15fef
AC
2975 "SanDisk SDP3B-64", NULL,
2976 "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,
2977 "_NEC DV5800A", NULL,
2978 "SAMSUNG CD-ROM SN-124", "N001"
1da177e4 2979};
2e9edbf8 2980
f4b15fef
AC
2981static int ata_strim(char *s, size_t len)
2982{
2983 len = strnlen(s, len);
2984
2985 /* ATAPI specifies that empty space is blank-filled; remove blanks */
2986 while ((len > 0) && (s[len - 1] == ' ')) {
2987 len--;
2988 s[len] = 0;
2989 }
2990 return len;
2991}
1da177e4 2992
057ace5e 2993static int ata_dma_blacklisted(const struct ata_device *dev)
1da177e4 2994{
f4b15fef
AC
2995 unsigned char model_num[40];
2996 unsigned char model_rev[16];
2997 unsigned int nlen, rlen;
1da177e4
LT
2998 int i;
2999
3a778275
AL
3000 /* We don't support polling DMA.
3001 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
3002 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
3003 */
3004 if ((dev->ap->flags & ATA_FLAG_PIO_POLLING) &&
3005 (dev->flags & ATA_DFLAG_CDB_INTR))
3006 return 1;
3007
f4b15fef
AC
3008 ata_id_string(dev->id, model_num, ATA_ID_PROD_OFS,
3009 sizeof(model_num));
3010 ata_id_string(dev->id, model_rev, ATA_ID_FW_REV_OFS,
3011 sizeof(model_rev));
3012 nlen = ata_strim(model_num, sizeof(model_num));
3013 rlen = ata_strim(model_rev, sizeof(model_rev));
1da177e4 3014
f4b15fef
AC
3015 for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i += 2) {
3016 if (!strncmp(ata_dma_blacklist[i], model_num, nlen)) {
3017 if (ata_dma_blacklist[i+1] == NULL)
3018 return 1;
3019 if (!strncmp(ata_dma_blacklist[i], model_rev, rlen))
3020 return 1;
3021 }
3022 }
1da177e4
LT
3023 return 0;
3024}
3025
a6d5a51c
TH
3026/**
3027 * ata_dev_xfermask - Compute supported xfermask of the given device
a6d5a51c
TH
3028 * @dev: Device to compute xfermask for
3029 *
acf356b1
TH
3030 * Compute supported xfermask of @dev and store it in
3031 * dev->*_mask. This function is responsible for applying all
3032 * known limits including host controller limits, device
3033 * blacklist, etc...
a6d5a51c 3034 *
600511e8
TH
3035 * FIXME: The current implementation limits all transfer modes to
3036 * the fastest of the lowested device on the port. This is not
05c8e0ac 3037 * required on most controllers.
600511e8 3038 *
a6d5a51c
TH
3039 * LOCKING:
3040 * None.
a6d5a51c 3041 */
3373efd8 3042static void ata_dev_xfermask(struct ata_device *dev)
1da177e4 3043{
3373efd8 3044 struct ata_port *ap = dev->ap;
5444a6f4 3045 struct ata_host_set *hs = ap->host_set;
a6d5a51c
TH
3046 unsigned long xfer_mask;
3047 int i;
1da177e4 3048
565083e1
TH
3049 xfer_mask = ata_pack_xfermask(ap->pio_mask,
3050 ap->mwdma_mask, ap->udma_mask);
3051
3052 /* Apply cable rule here. Don't apply it early because when
3053 * we handle hot plug the cable type can itself change.
3054 */
3055 if (ap->cbl == ATA_CBL_PATA40)
3056 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
1da177e4 3057
5444a6f4 3058 /* FIXME: Use port-wide xfermask for now */
a6d5a51c
TH
3059 for (i = 0; i < ATA_MAX_DEVICES; i++) {
3060 struct ata_device *d = &ap->device[i];
565083e1
TH
3061
3062 if (ata_dev_absent(d))
3063 continue;
3064
3065 if (ata_dev_disabled(d)) {
3066 /* to avoid violating device selection timing */
3067 xfer_mask &= ata_pack_xfermask(d->pio_mask,
3068 UINT_MAX, UINT_MAX);
a6d5a51c 3069 continue;
565083e1
TH
3070 }
3071
3072 xfer_mask &= ata_pack_xfermask(d->pio_mask,
3073 d->mwdma_mask, d->udma_mask);
a6d5a51c
TH
3074 xfer_mask &= ata_id_xfermask(d->id);
3075 if (ata_dma_blacklisted(d))
3076 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
1da177e4
LT
3077 }
3078
a6d5a51c 3079 if (ata_dma_blacklisted(dev))
f15a1daf
TH
3080 ata_dev_printk(dev, KERN_WARNING,
3081 "device is on DMA blacklist, disabling DMA\n");
a6d5a51c 3082
5444a6f4
AC
3083 if (hs->flags & ATA_HOST_SIMPLEX) {
3084 if (hs->simplex_claimed)
3085 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3086 }
565083e1 3087
5444a6f4
AC
3088 if (ap->ops->mode_filter)
3089 xfer_mask = ap->ops->mode_filter(ap, dev, xfer_mask);
3090
565083e1
TH
3091 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
3092 &dev->mwdma_mask, &dev->udma_mask);
1da177e4
LT
3093}
3094
1da177e4
LT
3095/**
3096 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
1da177e4
LT
3097 * @dev: Device to which command will be sent
3098 *
780a87f7
JG
3099 * Issue SET FEATURES - XFER MODE command to device @dev
3100 * on port @ap.
3101 *
1da177e4 3102 * LOCKING:
0cba632b 3103 * PCI/etc. bus probe sem.
83206a29
TH
3104 *
3105 * RETURNS:
3106 * 0 on success, AC_ERR_* mask otherwise.
1da177e4
LT
3107 */
3108
3373efd8 3109static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
1da177e4 3110{
a0123703 3111 struct ata_taskfile tf;
83206a29 3112 unsigned int err_mask;
1da177e4
LT
3113
3114 /* set up set-features taskfile */
3115 DPRINTK("set features - xfer mode\n");
3116
3373efd8 3117 ata_tf_init(dev, &tf);
a0123703
TH
3118 tf.command = ATA_CMD_SET_FEATURES;
3119 tf.feature = SETFEATURES_XFER;
3120 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3121 tf.protocol = ATA_PROT_NODATA;
3122 tf.nsect = dev->xfer_mode;
1da177e4 3123
3373efd8 3124 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1da177e4 3125
83206a29
TH
3126 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3127 return err_mask;
1da177e4
LT
3128}
3129
8bf62ece
AL
3130/**
3131 * ata_dev_init_params - Issue INIT DEV PARAMS command
8bf62ece 3132 * @dev: Device to which command will be sent
e2a7f77a
RD
3133 * @heads: Number of heads (taskfile parameter)
3134 * @sectors: Number of sectors (taskfile parameter)
8bf62ece
AL
3135 *
3136 * LOCKING:
6aff8f1f
TH
3137 * Kernel thread context (may sleep)
3138 *
3139 * RETURNS:
3140 * 0 on success, AC_ERR_* mask otherwise.
8bf62ece 3141 */
3373efd8
TH
3142static unsigned int ata_dev_init_params(struct ata_device *dev,
3143 u16 heads, u16 sectors)
8bf62ece 3144{
a0123703 3145 struct ata_taskfile tf;
6aff8f1f 3146 unsigned int err_mask;
8bf62ece
AL
3147
3148 /* Number of sectors per track 1-255. Number of heads 1-16 */
3149 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
00b6f5e9 3150 return AC_ERR_INVALID;
8bf62ece
AL
3151
3152 /* set up init dev params taskfile */
3153 DPRINTK("init dev params \n");
3154
3373efd8 3155 ata_tf_init(dev, &tf);
a0123703
TH
3156 tf.command = ATA_CMD_INIT_DEV_PARAMS;
3157 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3158 tf.protocol = ATA_PROT_NODATA;
3159 tf.nsect = sectors;
3160 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
8bf62ece 3161
3373efd8 3162 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
8bf62ece 3163
6aff8f1f
TH
3164 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3165 return err_mask;
8bf62ece
AL
3166}
3167
1da177e4 3168/**
0cba632b
JG
3169 * ata_sg_clean - Unmap DMA memory associated with command
3170 * @qc: Command containing DMA memory to be released
3171 *
3172 * Unmap all mapped DMA memory associated with this command.
1da177e4
LT
3173 *
3174 * LOCKING:
0cba632b 3175 * spin_lock_irqsave(host_set lock)
1da177e4
LT
3176 */
3177
3178static void ata_sg_clean(struct ata_queued_cmd *qc)
3179{
3180 struct ata_port *ap = qc->ap;
cedc9a47 3181 struct scatterlist *sg = qc->__sg;
1da177e4 3182 int dir = qc->dma_dir;
cedc9a47 3183 void *pad_buf = NULL;
1da177e4 3184
a4631474
TH
3185 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
3186 WARN_ON(sg == NULL);
1da177e4
LT
3187
3188 if (qc->flags & ATA_QCFLAG_SINGLE)
f131883e 3189 WARN_ON(qc->n_elem > 1);
1da177e4 3190
2c13b7ce 3191 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
1da177e4 3192
cedc9a47
JG
3193 /* if we padded the buffer out to 32-bit bound, and data
3194 * xfer direction is from-device, we must copy from the
3195 * pad buffer back into the supplied buffer
3196 */
3197 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
3198 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3199
3200 if (qc->flags & ATA_QCFLAG_SG) {
e1410f2d 3201 if (qc->n_elem)
2f1f610b 3202 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
cedc9a47
JG
3203 /* restore last sg */
3204 sg[qc->orig_n_elem - 1].length += qc->pad_len;
3205 if (pad_buf) {
3206 struct scatterlist *psg = &qc->pad_sgent;
3207 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3208 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
dfa15988 3209 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
3210 }
3211 } else {
2e242fa9 3212 if (qc->n_elem)
2f1f610b 3213 dma_unmap_single(ap->dev,
e1410f2d
JG
3214 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
3215 dir);
cedc9a47
JG
3216 /* restore sg */
3217 sg->length += qc->pad_len;
3218 if (pad_buf)
3219 memcpy(qc->buf_virt + sg->length - qc->pad_len,
3220 pad_buf, qc->pad_len);
3221 }
1da177e4
LT
3222
3223 qc->flags &= ~ATA_QCFLAG_DMAMAP;
cedc9a47 3224 qc->__sg = NULL;
1da177e4
LT
3225}
3226
3227/**
3228 * ata_fill_sg - Fill PCI IDE PRD table
3229 * @qc: Metadata associated with taskfile to be transferred
3230 *
780a87f7
JG
3231 * Fill PCI IDE PRD (scatter-gather) table with segments
3232 * associated with the current disk command.
3233 *
1da177e4 3234 * LOCKING:
780a87f7 3235 * spin_lock_irqsave(host_set lock)
1da177e4
LT
3236 *
3237 */
3238static void ata_fill_sg(struct ata_queued_cmd *qc)
3239{
1da177e4 3240 struct ata_port *ap = qc->ap;
cedc9a47
JG
3241 struct scatterlist *sg;
3242 unsigned int idx;
1da177e4 3243
a4631474 3244 WARN_ON(qc->__sg == NULL);
f131883e 3245 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
1da177e4
LT
3246
3247 idx = 0;
cedc9a47 3248 ata_for_each_sg(sg, qc) {
1da177e4
LT
3249 u32 addr, offset;
3250 u32 sg_len, len;
3251
3252 /* determine if physical DMA addr spans 64K boundary.
3253 * Note h/w doesn't support 64-bit, so we unconditionally
3254 * truncate dma_addr_t to u32.
3255 */
3256 addr = (u32) sg_dma_address(sg);
3257 sg_len = sg_dma_len(sg);
3258
3259 while (sg_len) {
3260 offset = addr & 0xffff;
3261 len = sg_len;
3262 if ((offset + sg_len) > 0x10000)
3263 len = 0x10000 - offset;
3264
3265 ap->prd[idx].addr = cpu_to_le32(addr);
3266 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
3267 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
3268
3269 idx++;
3270 sg_len -= len;
3271 addr += len;
3272 }
3273 }
3274
3275 if (idx)
3276 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
3277}
3278/**
3279 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
3280 * @qc: Metadata associated with taskfile to check
3281 *
780a87f7
JG
3282 * Allow low-level driver to filter ATA PACKET commands, returning
3283 * a status indicating whether or not it is OK to use DMA for the
3284 * supplied PACKET command.
3285 *
1da177e4 3286 * LOCKING:
0cba632b
JG
3287 * spin_lock_irqsave(host_set lock)
3288 *
1da177e4
LT
3289 * RETURNS: 0 when ATAPI DMA can be used
3290 * nonzero otherwise
3291 */
3292int ata_check_atapi_dma(struct ata_queued_cmd *qc)
3293{
3294 struct ata_port *ap = qc->ap;
3295 int rc = 0; /* Assume ATAPI DMA is OK by default */
3296
3297 if (ap->ops->check_atapi_dma)
3298 rc = ap->ops->check_atapi_dma(qc);
3299
3300 return rc;
3301}
3302/**
3303 * ata_qc_prep - Prepare taskfile for submission
3304 * @qc: Metadata associated with taskfile to be prepared
3305 *
780a87f7
JG
3306 * Prepare ATA taskfile for submission.
3307 *
1da177e4
LT
3308 * LOCKING:
3309 * spin_lock_irqsave(host_set lock)
3310 */
3311void ata_qc_prep(struct ata_queued_cmd *qc)
3312{
3313 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
3314 return;
3315
3316 ata_fill_sg(qc);
3317}
3318
e46834cd
BK
3319void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
3320
0cba632b
JG
3321/**
3322 * ata_sg_init_one - Associate command with memory buffer
3323 * @qc: Command to be associated
3324 * @buf: Memory buffer
3325 * @buflen: Length of memory buffer, in bytes.
3326 *
3327 * Initialize the data-related elements of queued_cmd @qc
3328 * to point to a single memory buffer, @buf of byte length @buflen.
3329 *
3330 * LOCKING:
3331 * spin_lock_irqsave(host_set lock)
3332 */
3333
1da177e4
LT
3334void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
3335{
3336 struct scatterlist *sg;
3337
3338 qc->flags |= ATA_QCFLAG_SINGLE;
3339
3340 memset(&qc->sgent, 0, sizeof(qc->sgent));
cedc9a47 3341 qc->__sg = &qc->sgent;
1da177e4 3342 qc->n_elem = 1;
cedc9a47 3343 qc->orig_n_elem = 1;
1da177e4 3344 qc->buf_virt = buf;
233277ca 3345 qc->nbytes = buflen;
1da177e4 3346
cedc9a47 3347 sg = qc->__sg;
f0612bbc 3348 sg_init_one(sg, buf, buflen);
1da177e4
LT
3349}
3350
0cba632b
JG
3351/**
3352 * ata_sg_init - Associate command with scatter-gather table.
3353 * @qc: Command to be associated
3354 * @sg: Scatter-gather table.
3355 * @n_elem: Number of elements in s/g table.
3356 *
3357 * Initialize the data-related elements of queued_cmd @qc
3358 * to point to a scatter-gather table @sg, containing @n_elem
3359 * elements.
3360 *
3361 * LOCKING:
3362 * spin_lock_irqsave(host_set lock)
3363 */
3364
1da177e4
LT
3365void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
3366 unsigned int n_elem)
3367{
3368 qc->flags |= ATA_QCFLAG_SG;
cedc9a47 3369 qc->__sg = sg;
1da177e4 3370 qc->n_elem = n_elem;
cedc9a47 3371 qc->orig_n_elem = n_elem;
1da177e4
LT
3372}
3373
3374/**
0cba632b
JG
3375 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
3376 * @qc: Command with memory buffer to be mapped.
3377 *
3378 * DMA-map the memory buffer associated with queued_cmd @qc.
1da177e4
LT
3379 *
3380 * LOCKING:
3381 * spin_lock_irqsave(host_set lock)
3382 *
3383 * RETURNS:
0cba632b 3384 * Zero on success, negative on error.
1da177e4
LT
3385 */
3386
3387static int ata_sg_setup_one(struct ata_queued_cmd *qc)
3388{
3389 struct ata_port *ap = qc->ap;
3390 int dir = qc->dma_dir;
cedc9a47 3391 struct scatterlist *sg = qc->__sg;
1da177e4 3392 dma_addr_t dma_address;
2e242fa9 3393 int trim_sg = 0;
1da177e4 3394
cedc9a47
JG
3395 /* we must lengthen transfers to end on a 32-bit boundary */
3396 qc->pad_len = sg->length & 3;
3397 if (qc->pad_len) {
3398 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3399 struct scatterlist *psg = &qc->pad_sgent;
3400
a4631474 3401 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
3402
3403 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3404
3405 if (qc->tf.flags & ATA_TFLAG_WRITE)
3406 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
3407 qc->pad_len);
3408
3409 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3410 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3411 /* trim sg */
3412 sg->length -= qc->pad_len;
2e242fa9
TH
3413 if (sg->length == 0)
3414 trim_sg = 1;
cedc9a47
JG
3415
3416 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
3417 sg->length, qc->pad_len);
3418 }
3419
2e242fa9
TH
3420 if (trim_sg) {
3421 qc->n_elem--;
e1410f2d
JG
3422 goto skip_map;
3423 }
3424
2f1f610b 3425 dma_address = dma_map_single(ap->dev, qc->buf_virt,
32529e01 3426 sg->length, dir);
537a95d9
TH
3427 if (dma_mapping_error(dma_address)) {
3428 /* restore sg */
3429 sg->length += qc->pad_len;
1da177e4 3430 return -1;
537a95d9 3431 }
1da177e4
LT
3432
3433 sg_dma_address(sg) = dma_address;
32529e01 3434 sg_dma_len(sg) = sg->length;
1da177e4 3435
2e242fa9 3436skip_map:
1da177e4
LT
3437 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
3438 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3439
3440 return 0;
3441}
3442
3443/**
0cba632b
JG
3444 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
3445 * @qc: Command with scatter-gather table to be mapped.
3446 *
3447 * DMA-map the scatter-gather table associated with queued_cmd @qc.
1da177e4
LT
3448 *
3449 * LOCKING:
3450 * spin_lock_irqsave(host_set lock)
3451 *
3452 * RETURNS:
0cba632b 3453 * Zero on success, negative on error.
1da177e4
LT
3454 *
3455 */
3456
3457static int ata_sg_setup(struct ata_queued_cmd *qc)
3458{
3459 struct ata_port *ap = qc->ap;
cedc9a47
JG
3460 struct scatterlist *sg = qc->__sg;
3461 struct scatterlist *lsg = &sg[qc->n_elem - 1];
e1410f2d 3462 int n_elem, pre_n_elem, dir, trim_sg = 0;
1da177e4
LT
3463
3464 VPRINTK("ENTER, ata%u\n", ap->id);
a4631474 3465 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
1da177e4 3466
cedc9a47
JG
3467 /* we must lengthen transfers to end on a 32-bit boundary */
3468 qc->pad_len = lsg->length & 3;
3469 if (qc->pad_len) {
3470 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3471 struct scatterlist *psg = &qc->pad_sgent;
3472 unsigned int offset;
3473
a4631474 3474 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
3475
3476 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3477
3478 /*
3479 * psg->page/offset are used to copy to-be-written
3480 * data in this function or read data in ata_sg_clean.
3481 */
3482 offset = lsg->offset + lsg->length - qc->pad_len;
3483 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
3484 psg->offset = offset_in_page(offset);
3485
3486 if (qc->tf.flags & ATA_TFLAG_WRITE) {
3487 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3488 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
dfa15988 3489 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
3490 }
3491
3492 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3493 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3494 /* trim last sg */
3495 lsg->length -= qc->pad_len;
e1410f2d
JG
3496 if (lsg->length == 0)
3497 trim_sg = 1;
cedc9a47
JG
3498
3499 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
3500 qc->n_elem - 1, lsg->length, qc->pad_len);
3501 }
3502
e1410f2d
JG
3503 pre_n_elem = qc->n_elem;
3504 if (trim_sg && pre_n_elem)
3505 pre_n_elem--;
3506
3507 if (!pre_n_elem) {
3508 n_elem = 0;
3509 goto skip_map;
3510 }
3511
1da177e4 3512 dir = qc->dma_dir;
2f1f610b 3513 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
537a95d9
TH
3514 if (n_elem < 1) {
3515 /* restore last sg */
3516 lsg->length += qc->pad_len;
1da177e4 3517 return -1;
537a95d9 3518 }
1da177e4
LT
3519
3520 DPRINTK("%d sg elements mapped\n", n_elem);
3521
e1410f2d 3522skip_map:
1da177e4
LT
3523 qc->n_elem = n_elem;
3524
3525 return 0;
3526}
3527
0baab86b 3528/**
c893a3ae 3529 * swap_buf_le16 - swap halves of 16-bit words in place
0baab86b
EF
3530 * @buf: Buffer to swap
3531 * @buf_words: Number of 16-bit words in buffer.
3532 *
3533 * Swap halves of 16-bit words if needed to convert from
3534 * little-endian byte order to native cpu byte order, or
3535 * vice-versa.
3536 *
3537 * LOCKING:
6f0ef4fa 3538 * Inherited from caller.
0baab86b 3539 */
1da177e4
LT
3540void swap_buf_le16(u16 *buf, unsigned int buf_words)
3541{
3542#ifdef __BIG_ENDIAN
3543 unsigned int i;
3544
3545 for (i = 0; i < buf_words; i++)
3546 buf[i] = le16_to_cpu(buf[i]);
3547#endif /* __BIG_ENDIAN */
3548}
3549
6ae4cfb5
AL
3550/**
3551 * ata_mmio_data_xfer - Transfer data by MMIO
bf717b11 3552 * @adev: device for this I/O
6ae4cfb5
AL
3553 * @buf: data buffer
3554 * @buflen: buffer length
344babaa 3555 * @write_data: read/write
6ae4cfb5
AL
3556 *
3557 * Transfer data from/to the device data register by MMIO.
3558 *
3559 * LOCKING:
3560 * Inherited from caller.
6ae4cfb5
AL
3561 */
3562
88574551 3563void ata_mmio_data_xfer(struct ata_device *adev, unsigned char *buf,
a6b2c5d4 3564 unsigned int buflen, int write_data)
1da177e4 3565{
a6b2c5d4 3566 struct ata_port *ap = adev->ap;
1da177e4
LT
3567 unsigned int i;
3568 unsigned int words = buflen >> 1;
3569 u16 *buf16 = (u16 *) buf;
3570 void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr;
3571
6ae4cfb5 3572 /* Transfer multiple of 2 bytes */
1da177e4
LT
3573 if (write_data) {
3574 for (i = 0; i < words; i++)
3575 writew(le16_to_cpu(buf16[i]), mmio);
3576 } else {
3577 for (i = 0; i < words; i++)
3578 buf16[i] = cpu_to_le16(readw(mmio));
3579 }
6ae4cfb5
AL
3580
3581 /* Transfer trailing 1 byte, if any. */
3582 if (unlikely(buflen & 0x01)) {
3583 u16 align_buf[1] = { 0 };
3584 unsigned char *trailing_buf = buf + buflen - 1;
3585
3586 if (write_data) {
3587 memcpy(align_buf, trailing_buf, 1);
3588 writew(le16_to_cpu(align_buf[0]), mmio);
3589 } else {
3590 align_buf[0] = cpu_to_le16(readw(mmio));
3591 memcpy(trailing_buf, align_buf, 1);
3592 }
3593 }
1da177e4
LT
3594}
3595
6ae4cfb5
AL
3596/**
3597 * ata_pio_data_xfer - Transfer data by PIO
a6b2c5d4 3598 * @adev: device to target
6ae4cfb5
AL
3599 * @buf: data buffer
3600 * @buflen: buffer length
344babaa 3601 * @write_data: read/write
6ae4cfb5
AL
3602 *
3603 * Transfer data from/to the device data register by PIO.
3604 *
3605 * LOCKING:
3606 * Inherited from caller.
6ae4cfb5
AL
3607 */
3608
88574551 3609void ata_pio_data_xfer(struct ata_device *adev, unsigned char *buf,
a6b2c5d4 3610 unsigned int buflen, int write_data)
1da177e4 3611{
a6b2c5d4 3612 struct ata_port *ap = adev->ap;
6ae4cfb5 3613 unsigned int words = buflen >> 1;
1da177e4 3614
6ae4cfb5 3615 /* Transfer multiple of 2 bytes */
1da177e4 3616 if (write_data)
6ae4cfb5 3617 outsw(ap->ioaddr.data_addr, buf, words);
1da177e4 3618 else
6ae4cfb5
AL
3619 insw(ap->ioaddr.data_addr, buf, words);
3620
3621 /* Transfer trailing 1 byte, if any. */
3622 if (unlikely(buflen & 0x01)) {
3623 u16 align_buf[1] = { 0 };
3624 unsigned char *trailing_buf = buf + buflen - 1;
3625
3626 if (write_data) {
3627 memcpy(align_buf, trailing_buf, 1);
3628 outw(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
3629 } else {
3630 align_buf[0] = cpu_to_le16(inw(ap->ioaddr.data_addr));
3631 memcpy(trailing_buf, align_buf, 1);
3632 }
3633 }
1da177e4
LT
3634}
3635
75e99585
AC
3636/**
3637 * ata_pio_data_xfer_noirq - Transfer data by PIO
3638 * @adev: device to target
3639 * @buf: data buffer
3640 * @buflen: buffer length
3641 * @write_data: read/write
3642 *
88574551 3643 * Transfer data from/to the device data register by PIO. Do the
75e99585
AC
3644 * transfer with interrupts disabled.
3645 *
3646 * LOCKING:
3647 * Inherited from caller.
3648 */
3649
3650void ata_pio_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
3651 unsigned int buflen, int write_data)
3652{
3653 unsigned long flags;
3654 local_irq_save(flags);
3655 ata_pio_data_xfer(adev, buf, buflen, write_data);
3656 local_irq_restore(flags);
3657}
3658
3659
6ae4cfb5
AL
3660/**
3661 * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
3662 * @qc: Command on going
3663 *
3664 * Transfer ATA_SECT_SIZE of data from/to the ATA device.
3665 *
3666 * LOCKING:
3667 * Inherited from caller.
3668 */
3669
1da177e4
LT
3670static void ata_pio_sector(struct ata_queued_cmd *qc)
3671{
3672 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 3673 struct scatterlist *sg = qc->__sg;
1da177e4
LT
3674 struct ata_port *ap = qc->ap;
3675 struct page *page;
3676 unsigned int offset;
3677 unsigned char *buf;
3678
3679 if (qc->cursect == (qc->nsect - 1))
14be71f4 3680 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
3681
3682 page = sg[qc->cursg].page;
3683 offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE;
3684
3685 /* get the current page and offset */
3686 page = nth_page(page, (offset >> PAGE_SHIFT));
3687 offset %= PAGE_SIZE;
3688
1da177e4
LT
3689 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3690
91b8b313
AL
3691 if (PageHighMem(page)) {
3692 unsigned long flags;
3693
a6b2c5d4 3694 /* FIXME: use a bounce buffer */
91b8b313
AL
3695 local_irq_save(flags);
3696 buf = kmap_atomic(page, KM_IRQ0);
083958d3 3697
91b8b313 3698 /* do the actual data transfer */
a6b2c5d4 3699 ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
1da177e4 3700
91b8b313
AL
3701 kunmap_atomic(buf, KM_IRQ0);
3702 local_irq_restore(flags);
3703 } else {
3704 buf = page_address(page);
a6b2c5d4 3705 ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
91b8b313 3706 }
1da177e4
LT
3707
3708 qc->cursect++;
3709 qc->cursg_ofs++;
3710
32529e01 3711 if ((qc->cursg_ofs * ATA_SECT_SIZE) == (&sg[qc->cursg])->length) {
1da177e4
LT
3712 qc->cursg++;
3713 qc->cursg_ofs = 0;
3714 }
1da177e4 3715}
1da177e4 3716
07f6f7d0
AL
3717/**
3718 * ata_pio_sectors - Transfer one or many 512-byte sectors.
3719 * @qc: Command on going
3720 *
c81e29b4 3721 * Transfer one or many ATA_SECT_SIZE of data from/to the
07f6f7d0
AL
3722 * ATA device for the DRQ request.
3723 *
3724 * LOCKING:
3725 * Inherited from caller.
3726 */
1da177e4 3727
07f6f7d0
AL
3728static void ata_pio_sectors(struct ata_queued_cmd *qc)
3729{
3730 if (is_multi_taskfile(&qc->tf)) {
3731 /* READ/WRITE MULTIPLE */
3732 unsigned int nsect;
3733
587005de 3734 WARN_ON(qc->dev->multi_count == 0);
1da177e4 3735
07f6f7d0
AL
3736 nsect = min(qc->nsect - qc->cursect, qc->dev->multi_count);
3737 while (nsect--)
3738 ata_pio_sector(qc);
3739 } else
3740 ata_pio_sector(qc);
3741}
3742
c71c1857
AL
3743/**
3744 * atapi_send_cdb - Write CDB bytes to hardware
3745 * @ap: Port to which ATAPI device is attached.
3746 * @qc: Taskfile currently active
3747 *
3748 * When device has indicated its readiness to accept
3749 * a CDB, this function is called. Send the CDB.
3750 *
3751 * LOCKING:
3752 * caller.
3753 */
3754
3755static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
3756{
3757 /* send SCSI cdb */
3758 DPRINTK("send cdb\n");
db024d53 3759 WARN_ON(qc->dev->cdb_len < 12);
c71c1857 3760
a6b2c5d4 3761 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
c71c1857
AL
3762 ata_altstatus(ap); /* flush */
3763
3764 switch (qc->tf.protocol) {
3765 case ATA_PROT_ATAPI:
3766 ap->hsm_task_state = HSM_ST;
3767 break;
3768 case ATA_PROT_ATAPI_NODATA:
3769 ap->hsm_task_state = HSM_ST_LAST;
3770 break;
3771 case ATA_PROT_ATAPI_DMA:
3772 ap->hsm_task_state = HSM_ST_LAST;
3773 /* initiate bmdma */
3774 ap->ops->bmdma_start(qc);
3775 break;
3776 }
1da177e4
LT
3777}
3778
6ae4cfb5
AL
3779/**
3780 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
3781 * @qc: Command on going
3782 * @bytes: number of bytes
3783 *
3784 * Transfer Transfer data from/to the ATAPI device.
3785 *
3786 * LOCKING:
3787 * Inherited from caller.
3788 *
3789 */
3790
1da177e4
LT
3791static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
3792{
3793 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 3794 struct scatterlist *sg = qc->__sg;
1da177e4
LT
3795 struct ata_port *ap = qc->ap;
3796 struct page *page;
3797 unsigned char *buf;
3798 unsigned int offset, count;
3799
563a6e1f 3800 if (qc->curbytes + bytes >= qc->nbytes)
14be71f4 3801 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
3802
3803next_sg:
563a6e1f 3804 if (unlikely(qc->cursg >= qc->n_elem)) {
7fb6ec28 3805 /*
563a6e1f
AL
3806 * The end of qc->sg is reached and the device expects
3807 * more data to transfer. In order not to overrun qc->sg
3808 * and fulfill length specified in the byte count register,
3809 * - for read case, discard trailing data from the device
3810 * - for write case, padding zero data to the device
3811 */
3812 u16 pad_buf[1] = { 0 };
3813 unsigned int words = bytes >> 1;
3814 unsigned int i;
3815
3816 if (words) /* warning if bytes > 1 */
f15a1daf
TH
3817 ata_dev_printk(qc->dev, KERN_WARNING,
3818 "%u bytes trailing data\n", bytes);
563a6e1f
AL
3819
3820 for (i = 0; i < words; i++)
a6b2c5d4 3821 ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write);
563a6e1f 3822
14be71f4 3823 ap->hsm_task_state = HSM_ST_LAST;
563a6e1f
AL
3824 return;
3825 }
3826
cedc9a47 3827 sg = &qc->__sg[qc->cursg];
1da177e4 3828
1da177e4
LT
3829 page = sg->page;
3830 offset = sg->offset + qc->cursg_ofs;
3831
3832 /* get the current page and offset */
3833 page = nth_page(page, (offset >> PAGE_SHIFT));
3834 offset %= PAGE_SIZE;
3835
6952df03 3836 /* don't overrun current sg */
32529e01 3837 count = min(sg->length - qc->cursg_ofs, bytes);
1da177e4
LT
3838
3839 /* don't cross page boundaries */
3840 count = min(count, (unsigned int)PAGE_SIZE - offset);
3841
7282aa4b
AL
3842 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3843
91b8b313
AL
3844 if (PageHighMem(page)) {
3845 unsigned long flags;
3846
a6b2c5d4 3847 /* FIXME: use bounce buffer */
91b8b313
AL
3848 local_irq_save(flags);
3849 buf = kmap_atomic(page, KM_IRQ0);
083958d3 3850
91b8b313 3851 /* do the actual data transfer */
a6b2c5d4 3852 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
7282aa4b 3853
91b8b313
AL
3854 kunmap_atomic(buf, KM_IRQ0);
3855 local_irq_restore(flags);
3856 } else {
3857 buf = page_address(page);
a6b2c5d4 3858 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
91b8b313 3859 }
1da177e4
LT
3860
3861 bytes -= count;
3862 qc->curbytes += count;
3863 qc->cursg_ofs += count;
3864
32529e01 3865 if (qc->cursg_ofs == sg->length) {
1da177e4
LT
3866 qc->cursg++;
3867 qc->cursg_ofs = 0;
3868 }
3869
563a6e1f 3870 if (bytes)
1da177e4 3871 goto next_sg;
1da177e4
LT
3872}
3873
6ae4cfb5
AL
3874/**
3875 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
3876 * @qc: Command on going
3877 *
3878 * Transfer Transfer data from/to the ATAPI device.
3879 *
3880 * LOCKING:
3881 * Inherited from caller.
6ae4cfb5
AL
3882 */
3883
1da177e4
LT
3884static void atapi_pio_bytes(struct ata_queued_cmd *qc)
3885{
3886 struct ata_port *ap = qc->ap;
3887 struct ata_device *dev = qc->dev;
3888 unsigned int ireason, bc_lo, bc_hi, bytes;
3889 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
3890
eec4c3f3
AL
3891 /* Abuse qc->result_tf for temp storage of intermediate TF
3892 * here to save some kernel stack usage.
3893 * For normal completion, qc->result_tf is not relevant. For
3894 * error, qc->result_tf is later overwritten by ata_qc_complete().
3895 * So, the correctness of qc->result_tf is not affected.
3896 */
3897 ap->ops->tf_read(ap, &qc->result_tf);
3898 ireason = qc->result_tf.nsect;
3899 bc_lo = qc->result_tf.lbam;
3900 bc_hi = qc->result_tf.lbah;
1da177e4
LT
3901 bytes = (bc_hi << 8) | bc_lo;
3902
3903 /* shall be cleared to zero, indicating xfer of data */
3904 if (ireason & (1 << 0))
3905 goto err_out;
3906
3907 /* make sure transfer direction matches expected */
3908 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
3909 if (do_write != i_write)
3910 goto err_out;
3911
312f7da2
AL
3912 VPRINTK("ata%u: xfering %d bytes\n", ap->id, bytes);
3913
1da177e4
LT
3914 __atapi_pio_bytes(qc, bytes);
3915
3916 return;
3917
3918err_out:
f15a1daf 3919 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
11a56d24 3920 qc->err_mask |= AC_ERR_HSM;
14be71f4 3921 ap->hsm_task_state = HSM_ST_ERR;
1da177e4
LT
3922}
3923
3924/**
c234fb00
AL
3925 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
3926 * @ap: the target ata_port
3927 * @qc: qc on going
1da177e4 3928 *
c234fb00
AL
3929 * RETURNS:
3930 * 1 if ok in workqueue, 0 otherwise.
1da177e4 3931 */
c234fb00
AL
3932
3933static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
1da177e4 3934{
c234fb00
AL
3935 if (qc->tf.flags & ATA_TFLAG_POLLING)
3936 return 1;
1da177e4 3937
c234fb00
AL
3938 if (ap->hsm_task_state == HSM_ST_FIRST) {
3939 if (qc->tf.protocol == ATA_PROT_PIO &&
3940 (qc->tf.flags & ATA_TFLAG_WRITE))
3941 return 1;
1da177e4 3942
c234fb00
AL
3943 if (is_atapi_taskfile(&qc->tf) &&
3944 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
3945 return 1;
fe79e683
AL
3946 }
3947
c234fb00
AL
3948 return 0;
3949}
1da177e4 3950
c17ea20d
TH
3951/**
3952 * ata_hsm_qc_complete - finish a qc running on standard HSM
3953 * @qc: Command to complete
3954 * @in_wq: 1 if called from workqueue, 0 otherwise
3955 *
3956 * Finish @qc which is running on standard HSM.
3957 *
3958 * LOCKING:
3959 * If @in_wq is zero, spin_lock_irqsave(host_set lock).
3960 * Otherwise, none on entry and grabs host lock.
3961 */
3962static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
3963{
3964 struct ata_port *ap = qc->ap;
3965 unsigned long flags;
3966
3967 if (ap->ops->error_handler) {
3968 if (in_wq) {
ba6a1308 3969 spin_lock_irqsave(ap->lock, flags);
c17ea20d
TH
3970
3971 /* EH might have kicked in while host_set lock
3972 * is released.
3973 */
3974 qc = ata_qc_from_tag(ap, qc->tag);
3975 if (qc) {
3976 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
3977 ata_irq_on(ap);
3978 ata_qc_complete(qc);
3979 } else
3980 ata_port_freeze(ap);
3981 }
3982
ba6a1308 3983 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
3984 } else {
3985 if (likely(!(qc->err_mask & AC_ERR_HSM)))
3986 ata_qc_complete(qc);
3987 else
3988 ata_port_freeze(ap);
3989 }
3990 } else {
3991 if (in_wq) {
ba6a1308 3992 spin_lock_irqsave(ap->lock, flags);
c17ea20d
TH
3993 ata_irq_on(ap);
3994 ata_qc_complete(qc);
ba6a1308 3995 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
3996 } else
3997 ata_qc_complete(qc);
3998 }
1da177e4 3999
c81e29b4 4000 ata_altstatus(ap); /* flush */
c17ea20d
TH
4001}
4002
bb5cb290
AL
4003/**
4004 * ata_hsm_move - move the HSM to the next state.
4005 * @ap: the target ata_port
4006 * @qc: qc on going
4007 * @status: current device status
4008 * @in_wq: 1 if called from workqueue, 0 otherwise
4009 *
4010 * RETURNS:
4011 * 1 when poll next status needed, 0 otherwise.
4012 */
9a1004d0
TH
4013int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
4014 u8 status, int in_wq)
e2cec771 4015{
bb5cb290
AL
4016 unsigned long flags = 0;
4017 int poll_next;
4018
6912ccd5
AL
4019 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
4020
bb5cb290
AL
4021 /* Make sure ata_qc_issue_prot() does not throw things
4022 * like DMA polling into the workqueue. Notice that
4023 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
4024 */
c234fb00 4025 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
bb5cb290 4026
e2cec771 4027fsm_start:
999bb6f4
AL
4028 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
4029 ap->id, qc->tf.protocol, ap->hsm_task_state, status);
4030
e2cec771
AL
4031 switch (ap->hsm_task_state) {
4032 case HSM_ST_FIRST:
bb5cb290
AL
4033 /* Send first data block or PACKET CDB */
4034
4035 /* If polling, we will stay in the work queue after
4036 * sending the data. Otherwise, interrupt handler
4037 * takes over after sending the data.
4038 */
4039 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
4040
e2cec771 4041 /* check device status */
3655d1d3
AL
4042 if (unlikely((status & ATA_DRQ) == 0)) {
4043 /* handle BSY=0, DRQ=0 as error */
4044 if (likely(status & (ATA_ERR | ATA_DF)))
4045 /* device stops HSM for abort/error */
4046 qc->err_mask |= AC_ERR_DEV;
4047 else
4048 /* HSM violation. Let EH handle this */
4049 qc->err_mask |= AC_ERR_HSM;
4050
14be71f4 4051 ap->hsm_task_state = HSM_ST_ERR;
e2cec771 4052 goto fsm_start;
1da177e4
LT
4053 }
4054
71601958
AL
4055 /* Device should not ask for data transfer (DRQ=1)
4056 * when it finds something wrong.
eee6c32f
AL
4057 * We ignore DRQ here and stop the HSM by
4058 * changing hsm_task_state to HSM_ST_ERR and
4059 * let the EH abort the command or reset the device.
71601958
AL
4060 */
4061 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4062 printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
4063 ap->id, status);
3655d1d3 4064 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
4065 ap->hsm_task_state = HSM_ST_ERR;
4066 goto fsm_start;
71601958 4067 }
1da177e4 4068
bb5cb290
AL
4069 /* Send the CDB (atapi) or the first data block (ata pio out).
4070 * During the state transition, interrupt handler shouldn't
4071 * be invoked before the data transfer is complete and
4072 * hsm_task_state is changed. Hence, the following locking.
4073 */
4074 if (in_wq)
ba6a1308 4075 spin_lock_irqsave(ap->lock, flags);
1da177e4 4076
bb5cb290
AL
4077 if (qc->tf.protocol == ATA_PROT_PIO) {
4078 /* PIO data out protocol.
4079 * send first data block.
4080 */
0565c26d 4081
bb5cb290
AL
4082 /* ata_pio_sectors() might change the state
4083 * to HSM_ST_LAST. so, the state is changed here
4084 * before ata_pio_sectors().
4085 */
4086 ap->hsm_task_state = HSM_ST;
4087 ata_pio_sectors(qc);
4088 ata_altstatus(ap); /* flush */
4089 } else
4090 /* send CDB */
4091 atapi_send_cdb(ap, qc);
4092
4093 if (in_wq)
ba6a1308 4094 spin_unlock_irqrestore(ap->lock, flags);
bb5cb290
AL
4095
4096 /* if polling, ata_pio_task() handles the rest.
4097 * otherwise, interrupt handler takes over from here.
4098 */
e2cec771 4099 break;
1c848984 4100
e2cec771
AL
4101 case HSM_ST:
4102 /* complete command or read/write the data register */
4103 if (qc->tf.protocol == ATA_PROT_ATAPI) {
4104 /* ATAPI PIO protocol */
4105 if ((status & ATA_DRQ) == 0) {
3655d1d3
AL
4106 /* No more data to transfer or device error.
4107 * Device error will be tagged in HSM_ST_LAST.
4108 */
e2cec771
AL
4109 ap->hsm_task_state = HSM_ST_LAST;
4110 goto fsm_start;
4111 }
1da177e4 4112
71601958
AL
4113 /* Device should not ask for data transfer (DRQ=1)
4114 * when it finds something wrong.
eee6c32f
AL
4115 * We ignore DRQ here and stop the HSM by
4116 * changing hsm_task_state to HSM_ST_ERR and
4117 * let the EH abort the command or reset the device.
71601958
AL
4118 */
4119 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4120 printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
4121 ap->id, status);
3655d1d3 4122 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
4123 ap->hsm_task_state = HSM_ST_ERR;
4124 goto fsm_start;
71601958 4125 }
1da177e4 4126
e2cec771 4127 atapi_pio_bytes(qc);
7fb6ec28 4128
e2cec771
AL
4129 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
4130 /* bad ireason reported by device */
4131 goto fsm_start;
1da177e4 4132
e2cec771
AL
4133 } else {
4134 /* ATA PIO protocol */
4135 if (unlikely((status & ATA_DRQ) == 0)) {
4136 /* handle BSY=0, DRQ=0 as error */
3655d1d3
AL
4137 if (likely(status & (ATA_ERR | ATA_DF)))
4138 /* device stops HSM for abort/error */
4139 qc->err_mask |= AC_ERR_DEV;
4140 else
4141 /* HSM violation. Let EH handle this */
4142 qc->err_mask |= AC_ERR_HSM;
4143
e2cec771
AL
4144 ap->hsm_task_state = HSM_ST_ERR;
4145 goto fsm_start;
4146 }
1da177e4 4147
eee6c32f
AL
4148 /* For PIO reads, some devices may ask for
4149 * data transfer (DRQ=1) alone with ERR=1.
4150 * We respect DRQ here and transfer one
4151 * block of junk data before changing the
4152 * hsm_task_state to HSM_ST_ERR.
4153 *
4154 * For PIO writes, ERR=1 DRQ=1 doesn't make
4155 * sense since the data block has been
4156 * transferred to the device.
71601958
AL
4157 */
4158 if (unlikely(status & (ATA_ERR | ATA_DF))) {
71601958
AL
4159 /* data might be corrputed */
4160 qc->err_mask |= AC_ERR_DEV;
eee6c32f
AL
4161
4162 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
4163 ata_pio_sectors(qc);
4164 ata_altstatus(ap);
4165 status = ata_wait_idle(ap);
4166 }
4167
3655d1d3
AL
4168 if (status & (ATA_BUSY | ATA_DRQ))
4169 qc->err_mask |= AC_ERR_HSM;
4170
eee6c32f
AL
4171 /* ata_pio_sectors() might change the
4172 * state to HSM_ST_LAST. so, the state
4173 * is changed after ata_pio_sectors().
4174 */
4175 ap->hsm_task_state = HSM_ST_ERR;
4176 goto fsm_start;
71601958
AL
4177 }
4178
e2cec771
AL
4179 ata_pio_sectors(qc);
4180
4181 if (ap->hsm_task_state == HSM_ST_LAST &&
4182 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
4183 /* all data read */
4184 ata_altstatus(ap);
52a32205 4185 status = ata_wait_idle(ap);
e2cec771
AL
4186 goto fsm_start;
4187 }
4188 }
4189
4190 ata_altstatus(ap); /* flush */
bb5cb290 4191 poll_next = 1;
1da177e4
LT
4192 break;
4193
14be71f4 4194 case HSM_ST_LAST:
6912ccd5
AL
4195 if (unlikely(!ata_ok(status))) {
4196 qc->err_mask |= __ac_err_mask(status);
e2cec771
AL
4197 ap->hsm_task_state = HSM_ST_ERR;
4198 goto fsm_start;
4199 }
4200
4201 /* no more data to transfer */
4332a771
AL
4202 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
4203 ap->id, qc->dev->devno, status);
e2cec771 4204
6912ccd5
AL
4205 WARN_ON(qc->err_mask);
4206
e2cec771 4207 ap->hsm_task_state = HSM_ST_IDLE;
1da177e4 4208
e2cec771 4209 /* complete taskfile transaction */
c17ea20d 4210 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
4211
4212 poll_next = 0;
1da177e4
LT
4213 break;
4214
14be71f4 4215 case HSM_ST_ERR:
e2cec771
AL
4216 /* make sure qc->err_mask is available to
4217 * know what's wrong and recover
4218 */
4219 WARN_ON(qc->err_mask == 0);
4220
4221 ap->hsm_task_state = HSM_ST_IDLE;
bb5cb290 4222
999bb6f4 4223 /* complete taskfile transaction */
c17ea20d 4224 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
4225
4226 poll_next = 0;
e2cec771
AL
4227 break;
4228 default:
bb5cb290 4229 poll_next = 0;
6912ccd5 4230 BUG();
1da177e4
LT
4231 }
4232
bb5cb290 4233 return poll_next;
1da177e4
LT
4234}
4235
1da177e4 4236static void ata_pio_task(void *_data)
8061f5f0 4237{
c91af2c8
TH
4238 struct ata_queued_cmd *qc = _data;
4239 struct ata_port *ap = qc->ap;
8061f5f0 4240 u8 status;
a1af3734 4241 int poll_next;
8061f5f0 4242
7fb6ec28 4243fsm_start:
a1af3734 4244 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
8061f5f0 4245
a1af3734
AL
4246 /*
4247 * This is purely heuristic. This is a fast path.
4248 * Sometimes when we enter, BSY will be cleared in
4249 * a chk-status or two. If not, the drive is probably seeking
4250 * or something. Snooze for a couple msecs, then
4251 * chk-status again. If still busy, queue delayed work.
4252 */
4253 status = ata_busy_wait(ap, ATA_BUSY, 5);
4254 if (status & ATA_BUSY) {
4255 msleep(2);
4256 status = ata_busy_wait(ap, ATA_BUSY, 10);
4257 if (status & ATA_BUSY) {
31ce6dae 4258 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
a1af3734
AL
4259 return;
4260 }
8061f5f0
TH
4261 }
4262
a1af3734
AL
4263 /* move the HSM */
4264 poll_next = ata_hsm_move(ap, qc, status, 1);
8061f5f0 4265
a1af3734
AL
4266 /* another command or interrupt handler
4267 * may be running at this point.
4268 */
4269 if (poll_next)
7fb6ec28 4270 goto fsm_start;
8061f5f0
TH
4271}
4272
1da177e4
LT
4273/**
4274 * ata_qc_new - Request an available ATA command, for queueing
4275 * @ap: Port associated with device @dev
4276 * @dev: Device from whom we request an available command structure
4277 *
4278 * LOCKING:
0cba632b 4279 * None.
1da177e4
LT
4280 */
4281
4282static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4283{
4284 struct ata_queued_cmd *qc = NULL;
4285 unsigned int i;
4286
e3180499
TH
4287 /* no command while frozen */
4288 if (unlikely(ap->flags & ATA_FLAG_FROZEN))
4289 return NULL;
4290
2ab7db1f
TH
4291 /* the last tag is reserved for internal command. */
4292 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
6cec4a39 4293 if (!test_and_set_bit(i, &ap->qc_allocated)) {
f69499f4 4294 qc = __ata_qc_from_tag(ap, i);
1da177e4
LT
4295 break;
4296 }
4297
4298 if (qc)
4299 qc->tag = i;
4300
4301 return qc;
4302}
4303
4304/**
4305 * ata_qc_new_init - Request an available ATA command, and initialize it
1da177e4
LT
4306 * @dev: Device from whom we request an available command structure
4307 *
4308 * LOCKING:
0cba632b 4309 * None.
1da177e4
LT
4310 */
4311
3373efd8 4312struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
1da177e4 4313{
3373efd8 4314 struct ata_port *ap = dev->ap;
1da177e4
LT
4315 struct ata_queued_cmd *qc;
4316
4317 qc = ata_qc_new(ap);
4318 if (qc) {
1da177e4
LT
4319 qc->scsicmd = NULL;
4320 qc->ap = ap;
4321 qc->dev = dev;
1da177e4 4322
2c13b7ce 4323 ata_qc_reinit(qc);
1da177e4
LT
4324 }
4325
4326 return qc;
4327}
4328
1da177e4
LT
4329/**
4330 * ata_qc_free - free unused ata_queued_cmd
4331 * @qc: Command to complete
4332 *
4333 * Designed to free unused ata_queued_cmd object
4334 * in case something prevents using it.
4335 *
4336 * LOCKING:
0cba632b 4337 * spin_lock_irqsave(host_set lock)
1da177e4
LT
4338 */
4339void ata_qc_free(struct ata_queued_cmd *qc)
4340{
4ba946e9
TH
4341 struct ata_port *ap = qc->ap;
4342 unsigned int tag;
4343
a4631474 4344 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
1da177e4 4345
4ba946e9
TH
4346 qc->flags = 0;
4347 tag = qc->tag;
4348 if (likely(ata_tag_valid(tag))) {
4ba946e9 4349 qc->tag = ATA_TAG_POISON;
6cec4a39 4350 clear_bit(tag, &ap->qc_allocated);
4ba946e9 4351 }
1da177e4
LT
4352}
4353
76014427 4354void __ata_qc_complete(struct ata_queued_cmd *qc)
1da177e4 4355{
dedaf2b0
TH
4356 struct ata_port *ap = qc->ap;
4357
a4631474
TH
4358 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4359 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
1da177e4
LT
4360
4361 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4362 ata_sg_clean(qc);
4363
7401abf2 4364 /* command should be marked inactive atomically with qc completion */
dedaf2b0
TH
4365 if (qc->tf.protocol == ATA_PROT_NCQ)
4366 ap->sactive &= ~(1 << qc->tag);
4367 else
4368 ap->active_tag = ATA_TAG_POISON;
7401abf2 4369
3f3791d3
AL
4370 /* atapi: mark qc as inactive to prevent the interrupt handler
4371 * from completing the command twice later, before the error handler
4372 * is called. (when rc != 0 and atapi request sense is needed)
4373 */
4374 qc->flags &= ~ATA_QCFLAG_ACTIVE;
dedaf2b0 4375 ap->qc_active &= ~(1 << qc->tag);
3f3791d3 4376
1da177e4 4377 /* call completion callback */
77853bf2 4378 qc->complete_fn(qc);
1da177e4
LT
4379}
4380
f686bcb8
TH
4381/**
4382 * ata_qc_complete - Complete an active ATA command
4383 * @qc: Command to complete
4384 * @err_mask: ATA Status register contents
4385 *
4386 * Indicate to the mid and upper layers that an ATA
4387 * command has completed, with either an ok or not-ok status.
4388 *
4389 * LOCKING:
4390 * spin_lock_irqsave(host_set lock)
4391 */
4392void ata_qc_complete(struct ata_queued_cmd *qc)
4393{
4394 struct ata_port *ap = qc->ap;
4395
4396 /* XXX: New EH and old EH use different mechanisms to
4397 * synchronize EH with regular execution path.
4398 *
4399 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4400 * Normal execution path is responsible for not accessing a
4401 * failed qc. libata core enforces the rule by returning NULL
4402 * from ata_qc_from_tag() for failed qcs.
4403 *
4404 * Old EH depends on ata_qc_complete() nullifying completion
4405 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
4406 * not synchronize with interrupt handler. Only PIO task is
4407 * taken care of.
4408 */
4409 if (ap->ops->error_handler) {
4410 WARN_ON(ap->flags & ATA_FLAG_FROZEN);
4411
4412 if (unlikely(qc->err_mask))
4413 qc->flags |= ATA_QCFLAG_FAILED;
4414
4415 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4416 if (!ata_tag_internal(qc->tag)) {
4417 /* always fill result TF for failed qc */
4418 ap->ops->tf_read(ap, &qc->result_tf);
4419 ata_qc_schedule_eh(qc);
4420 return;
4421 }
4422 }
4423
4424 /* read result TF if requested */
4425 if (qc->flags & ATA_QCFLAG_RESULT_TF)
4426 ap->ops->tf_read(ap, &qc->result_tf);
4427
4428 __ata_qc_complete(qc);
4429 } else {
4430 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4431 return;
4432
4433 /* read result TF if failed or requested */
4434 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
4435 ap->ops->tf_read(ap, &qc->result_tf);
4436
4437 __ata_qc_complete(qc);
4438 }
4439}
4440
dedaf2b0
TH
4441/**
4442 * ata_qc_complete_multiple - Complete multiple qcs successfully
4443 * @ap: port in question
4444 * @qc_active: new qc_active mask
4445 * @finish_qc: LLDD callback invoked before completing a qc
4446 *
4447 * Complete in-flight commands. This functions is meant to be
4448 * called from low-level driver's interrupt routine to complete
4449 * requests normally. ap->qc_active and @qc_active is compared
4450 * and commands are completed accordingly.
4451 *
4452 * LOCKING:
4453 * spin_lock_irqsave(host_set lock)
4454 *
4455 * RETURNS:
4456 * Number of completed commands on success, -errno otherwise.
4457 */
4458int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
4459 void (*finish_qc)(struct ata_queued_cmd *))
4460{
4461 int nr_done = 0;
4462 u32 done_mask;
4463 int i;
4464
4465 done_mask = ap->qc_active ^ qc_active;
4466
4467 if (unlikely(done_mask & qc_active)) {
4468 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
4469 "(%08x->%08x)\n", ap->qc_active, qc_active);
4470 return -EINVAL;
4471 }
4472
4473 for (i = 0; i < ATA_MAX_QUEUE; i++) {
4474 struct ata_queued_cmd *qc;
4475
4476 if (!(done_mask & (1 << i)))
4477 continue;
4478
4479 if ((qc = ata_qc_from_tag(ap, i))) {
4480 if (finish_qc)
4481 finish_qc(qc);
4482 ata_qc_complete(qc);
4483 nr_done++;
4484 }
4485 }
4486
4487 return nr_done;
4488}
4489
1da177e4
LT
4490static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
4491{
4492 struct ata_port *ap = qc->ap;
4493
4494 switch (qc->tf.protocol) {
3dc1d881 4495 case ATA_PROT_NCQ:
1da177e4
LT
4496 case ATA_PROT_DMA:
4497 case ATA_PROT_ATAPI_DMA:
4498 return 1;
4499
4500 case ATA_PROT_ATAPI:
4501 case ATA_PROT_PIO:
1da177e4
LT
4502 if (ap->flags & ATA_FLAG_PIO_DMA)
4503 return 1;
4504
4505 /* fall through */
4506
4507 default:
4508 return 0;
4509 }
4510
4511 /* never reached */
4512}
4513
4514/**
4515 * ata_qc_issue - issue taskfile to device
4516 * @qc: command to issue to device
4517 *
4518 * Prepare an ATA command to submission to device.
4519 * This includes mapping the data into a DMA-able
4520 * area, filling in the S/G table, and finally
4521 * writing the taskfile to hardware, starting the command.
4522 *
4523 * LOCKING:
4524 * spin_lock_irqsave(host_set lock)
1da177e4 4525 */
8e0e694a 4526void ata_qc_issue(struct ata_queued_cmd *qc)
1da177e4
LT
4527{
4528 struct ata_port *ap = qc->ap;
4529
dedaf2b0
TH
4530 /* Make sure only one non-NCQ command is outstanding. The
4531 * check is skipped for old EH because it reuses active qc to
4532 * request ATAPI sense.
4533 */
4534 WARN_ON(ap->ops->error_handler && ata_tag_valid(ap->active_tag));
4535
4536 if (qc->tf.protocol == ATA_PROT_NCQ) {
4537 WARN_ON(ap->sactive & (1 << qc->tag));
4538 ap->sactive |= 1 << qc->tag;
4539 } else {
4540 WARN_ON(ap->sactive);
4541 ap->active_tag = qc->tag;
4542 }
4543
e4a70e76 4544 qc->flags |= ATA_QCFLAG_ACTIVE;
dedaf2b0 4545 ap->qc_active |= 1 << qc->tag;
e4a70e76 4546
1da177e4
LT
4547 if (ata_should_dma_map(qc)) {
4548 if (qc->flags & ATA_QCFLAG_SG) {
4549 if (ata_sg_setup(qc))
8e436af9 4550 goto sg_err;
1da177e4
LT
4551 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
4552 if (ata_sg_setup_one(qc))
8e436af9 4553 goto sg_err;
1da177e4
LT
4554 }
4555 } else {
4556 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4557 }
4558
4559 ap->ops->qc_prep(qc);
4560
8e0e694a
TH
4561 qc->err_mask |= ap->ops->qc_issue(qc);
4562 if (unlikely(qc->err_mask))
4563 goto err;
4564 return;
1da177e4 4565
8e436af9
TH
4566sg_err:
4567 qc->flags &= ~ATA_QCFLAG_DMAMAP;
8e0e694a
TH
4568 qc->err_mask |= AC_ERR_SYSTEM;
4569err:
4570 ata_qc_complete(qc);
1da177e4
LT
4571}
4572
4573/**
4574 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
4575 * @qc: command to issue to device
4576 *
4577 * Using various libata functions and hooks, this function
4578 * starts an ATA command. ATA commands are grouped into
4579 * classes called "protocols", and issuing each type of protocol
4580 * is slightly different.
4581 *
0baab86b
EF
4582 * May be used as the qc_issue() entry in ata_port_operations.
4583 *
1da177e4
LT
4584 * LOCKING:
4585 * spin_lock_irqsave(host_set lock)
4586 *
4587 * RETURNS:
9a3d9eb0 4588 * Zero on success, AC_ERR_* mask on failure
1da177e4
LT
4589 */
4590
9a3d9eb0 4591unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
1da177e4
LT
4592{
4593 struct ata_port *ap = qc->ap;
4594
e50362ec
AL
4595 /* Use polling pio if the LLD doesn't handle
4596 * interrupt driven pio and atapi CDB interrupt.
4597 */
4598 if (ap->flags & ATA_FLAG_PIO_POLLING) {
4599 switch (qc->tf.protocol) {
4600 case ATA_PROT_PIO:
4601 case ATA_PROT_ATAPI:
4602 case ATA_PROT_ATAPI_NODATA:
4603 qc->tf.flags |= ATA_TFLAG_POLLING;
4604 break;
4605 case ATA_PROT_ATAPI_DMA:
4606 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
3a778275 4607 /* see ata_dma_blacklisted() */
e50362ec
AL
4608 BUG();
4609 break;
4610 default:
4611 break;
4612 }
4613 }
4614
312f7da2 4615 /* select the device */
1da177e4
LT
4616 ata_dev_select(ap, qc->dev->devno, 1, 0);
4617
312f7da2 4618 /* start the command */
1da177e4
LT
4619 switch (qc->tf.protocol) {
4620 case ATA_PROT_NODATA:
312f7da2
AL
4621 if (qc->tf.flags & ATA_TFLAG_POLLING)
4622 ata_qc_set_polling(qc);
4623
e5338254 4624 ata_tf_to_host(ap, &qc->tf);
312f7da2
AL
4625 ap->hsm_task_state = HSM_ST_LAST;
4626
4627 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 4628 ata_port_queue_task(ap, ata_pio_task, qc, 0);
312f7da2 4629
1da177e4
LT
4630 break;
4631
4632 case ATA_PROT_DMA:
587005de 4633 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 4634
1da177e4
LT
4635 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4636 ap->ops->bmdma_setup(qc); /* set up bmdma */
4637 ap->ops->bmdma_start(qc); /* initiate bmdma */
312f7da2 4638 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
4639 break;
4640
312f7da2
AL
4641 case ATA_PROT_PIO:
4642 if (qc->tf.flags & ATA_TFLAG_POLLING)
4643 ata_qc_set_polling(qc);
1da177e4 4644
e5338254 4645 ata_tf_to_host(ap, &qc->tf);
312f7da2 4646
54f00389
AL
4647 if (qc->tf.flags & ATA_TFLAG_WRITE) {
4648 /* PIO data out protocol */
4649 ap->hsm_task_state = HSM_ST_FIRST;
31ce6dae 4650 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
4651
4652 /* always send first data block using
e27486db 4653 * the ata_pio_task() codepath.
54f00389 4654 */
312f7da2 4655 } else {
54f00389
AL
4656 /* PIO data in protocol */
4657 ap->hsm_task_state = HSM_ST;
4658
4659 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 4660 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
4661
4662 /* if polling, ata_pio_task() handles the rest.
4663 * otherwise, interrupt handler takes over from here.
4664 */
312f7da2
AL
4665 }
4666
1da177e4
LT
4667 break;
4668
1da177e4 4669 case ATA_PROT_ATAPI:
1da177e4 4670 case ATA_PROT_ATAPI_NODATA:
312f7da2
AL
4671 if (qc->tf.flags & ATA_TFLAG_POLLING)
4672 ata_qc_set_polling(qc);
4673
e5338254 4674 ata_tf_to_host(ap, &qc->tf);
f6ef65e6 4675
312f7da2
AL
4676 ap->hsm_task_state = HSM_ST_FIRST;
4677
4678 /* send cdb by polling if no cdb interrupt */
4679 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
4680 (qc->tf.flags & ATA_TFLAG_POLLING))
31ce6dae 4681 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
4682 break;
4683
4684 case ATA_PROT_ATAPI_DMA:
587005de 4685 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 4686
1da177e4
LT
4687 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4688 ap->ops->bmdma_setup(qc); /* set up bmdma */
312f7da2
AL
4689 ap->hsm_task_state = HSM_ST_FIRST;
4690
4691 /* send cdb by polling if no cdb interrupt */
4692 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
31ce6dae 4693 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
4694 break;
4695
4696 default:
4697 WARN_ON(1);
9a3d9eb0 4698 return AC_ERR_SYSTEM;
1da177e4
LT
4699 }
4700
4701 return 0;
4702}
4703
1da177e4
LT
4704/**
4705 * ata_host_intr - Handle host interrupt for given (port, task)
4706 * @ap: Port on which interrupt arrived (possibly...)
4707 * @qc: Taskfile currently active in engine
4708 *
4709 * Handle host interrupt for given queued command. Currently,
4710 * only DMA interrupts are handled. All other commands are
4711 * handled via polling with interrupts disabled (nIEN bit).
4712 *
4713 * LOCKING:
4714 * spin_lock_irqsave(host_set lock)
4715 *
4716 * RETURNS:
4717 * One if interrupt was handled, zero if not (shared irq).
4718 */
4719
4720inline unsigned int ata_host_intr (struct ata_port *ap,
4721 struct ata_queued_cmd *qc)
4722{
312f7da2 4723 u8 status, host_stat = 0;
1da177e4 4724
312f7da2
AL
4725 VPRINTK("ata%u: protocol %d task_state %d\n",
4726 ap->id, qc->tf.protocol, ap->hsm_task_state);
1da177e4 4727
312f7da2
AL
4728 /* Check whether we are expecting interrupt in this state */
4729 switch (ap->hsm_task_state) {
4730 case HSM_ST_FIRST:
6912ccd5
AL
4731 /* Some pre-ATAPI-4 devices assert INTRQ
4732 * at this state when ready to receive CDB.
4733 */
1da177e4 4734
312f7da2
AL
4735 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
4736 * The flag was turned on only for atapi devices.
4737 * No need to check is_atapi_taskfile(&qc->tf) again.
4738 */
4739 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1da177e4 4740 goto idle_irq;
1da177e4 4741 break;
312f7da2
AL
4742 case HSM_ST_LAST:
4743 if (qc->tf.protocol == ATA_PROT_DMA ||
4744 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
4745 /* check status of DMA engine */
4746 host_stat = ap->ops->bmdma_status(ap);
4747 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
4748
4749 /* if it's not our irq... */
4750 if (!(host_stat & ATA_DMA_INTR))
4751 goto idle_irq;
4752
4753 /* before we do anything else, clear DMA-Start bit */
4754 ap->ops->bmdma_stop(qc);
a4f16610
AL
4755
4756 if (unlikely(host_stat & ATA_DMA_ERR)) {
4757 /* error when transfering data to/from memory */
4758 qc->err_mask |= AC_ERR_HOST_BUS;
4759 ap->hsm_task_state = HSM_ST_ERR;
4760 }
312f7da2
AL
4761 }
4762 break;
4763 case HSM_ST:
4764 break;
1da177e4
LT
4765 default:
4766 goto idle_irq;
4767 }
4768
312f7da2
AL
4769 /* check altstatus */
4770 status = ata_altstatus(ap);
4771 if (status & ATA_BUSY)
4772 goto idle_irq;
1da177e4 4773
312f7da2
AL
4774 /* check main status, clearing INTRQ */
4775 status = ata_chk_status(ap);
4776 if (unlikely(status & ATA_BUSY))
4777 goto idle_irq;
1da177e4 4778
312f7da2
AL
4779 /* ack bmdma irq events */
4780 ap->ops->irq_clear(ap);
1da177e4 4781
bb5cb290 4782 ata_hsm_move(ap, qc, status, 0);
1da177e4
LT
4783 return 1; /* irq handled */
4784
4785idle_irq:
4786 ap->stats.idle_irq++;
4787
4788#ifdef ATA_IRQ_TRAP
4789 if ((ap->stats.idle_irq % 1000) == 0) {
1da177e4 4790 ata_irq_ack(ap, 0); /* debug trap */
f15a1daf 4791 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
23cfce89 4792 return 1;
1da177e4
LT
4793 }
4794#endif
4795 return 0; /* irq not handled */
4796}
4797
4798/**
4799 * ata_interrupt - Default ATA host interrupt handler
0cba632b
JG
4800 * @irq: irq line (unused)
4801 * @dev_instance: pointer to our ata_host_set information structure
1da177e4
LT
4802 * @regs: unused
4803 *
0cba632b
JG
4804 * Default interrupt handler for PCI IDE devices. Calls
4805 * ata_host_intr() for each port that is not disabled.
4806 *
1da177e4 4807 * LOCKING:
0cba632b 4808 * Obtains host_set lock during operation.
1da177e4
LT
4809 *
4810 * RETURNS:
0cba632b 4811 * IRQ_NONE or IRQ_HANDLED.
1da177e4
LT
4812 */
4813
4814irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
4815{
4816 struct ata_host_set *host_set = dev_instance;
4817 unsigned int i;
4818 unsigned int handled = 0;
4819 unsigned long flags;
4820
4821 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
4822 spin_lock_irqsave(&host_set->lock, flags);
4823
4824 for (i = 0; i < host_set->n_ports; i++) {
4825 struct ata_port *ap;
4826
4827 ap = host_set->ports[i];
c1389503 4828 if (ap &&
029f5468 4829 !(ap->flags & ATA_FLAG_DISABLED)) {
1da177e4
LT
4830 struct ata_queued_cmd *qc;
4831
4832 qc = ata_qc_from_tag(ap, ap->active_tag);
312f7da2 4833 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
21b1ed74 4834 (qc->flags & ATA_QCFLAG_ACTIVE))
1da177e4
LT
4835 handled |= ata_host_intr(ap, qc);
4836 }
4837 }
4838
4839 spin_unlock_irqrestore(&host_set->lock, flags);
4840
4841 return IRQ_RETVAL(handled);
4842}
4843
34bf2170
TH
4844/**
4845 * sata_scr_valid - test whether SCRs are accessible
4846 * @ap: ATA port to test SCR accessibility for
4847 *
4848 * Test whether SCRs are accessible for @ap.
4849 *
4850 * LOCKING:
4851 * None.
4852 *
4853 * RETURNS:
4854 * 1 if SCRs are accessible, 0 otherwise.
4855 */
4856int sata_scr_valid(struct ata_port *ap)
4857{
4858 return ap->cbl == ATA_CBL_SATA && ap->ops->scr_read;
4859}
4860
4861/**
4862 * sata_scr_read - read SCR register of the specified port
4863 * @ap: ATA port to read SCR for
4864 * @reg: SCR to read
4865 * @val: Place to store read value
4866 *
4867 * Read SCR register @reg of @ap into *@val. This function is
4868 * guaranteed to succeed if the cable type of the port is SATA
4869 * and the port implements ->scr_read.
4870 *
4871 * LOCKING:
4872 * None.
4873 *
4874 * RETURNS:
4875 * 0 on success, negative errno on failure.
4876 */
4877int sata_scr_read(struct ata_port *ap, int reg, u32 *val)
4878{
4879 if (sata_scr_valid(ap)) {
4880 *val = ap->ops->scr_read(ap, reg);
4881 return 0;
4882 }
4883 return -EOPNOTSUPP;
4884}
4885
4886/**
4887 * sata_scr_write - write SCR register of the specified port
4888 * @ap: ATA port to write SCR for
4889 * @reg: SCR to write
4890 * @val: value to write
4891 *
4892 * Write @val to SCR register @reg of @ap. This function is
4893 * guaranteed to succeed if the cable type of the port is SATA
4894 * and the port implements ->scr_read.
4895 *
4896 * LOCKING:
4897 * None.
4898 *
4899 * RETURNS:
4900 * 0 on success, negative errno on failure.
4901 */
4902int sata_scr_write(struct ata_port *ap, int reg, u32 val)
4903{
4904 if (sata_scr_valid(ap)) {
4905 ap->ops->scr_write(ap, reg, val);
4906 return 0;
4907 }
4908 return -EOPNOTSUPP;
4909}
4910
4911/**
4912 * sata_scr_write_flush - write SCR register of the specified port and flush
4913 * @ap: ATA port to write SCR for
4914 * @reg: SCR to write
4915 * @val: value to write
4916 *
4917 * This function is identical to sata_scr_write() except that this
4918 * function performs flush after writing to the register.
4919 *
4920 * LOCKING:
4921 * None.
4922 *
4923 * RETURNS:
4924 * 0 on success, negative errno on failure.
4925 */
4926int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val)
4927{
4928 if (sata_scr_valid(ap)) {
4929 ap->ops->scr_write(ap, reg, val);
4930 ap->ops->scr_read(ap, reg);
4931 return 0;
4932 }
4933 return -EOPNOTSUPP;
4934}
4935
4936/**
4937 * ata_port_online - test whether the given port is online
4938 * @ap: ATA port to test
4939 *
4940 * Test whether @ap is online. Note that this function returns 0
4941 * if online status of @ap cannot be obtained, so
4942 * ata_port_online(ap) != !ata_port_offline(ap).
4943 *
4944 * LOCKING:
4945 * None.
4946 *
4947 * RETURNS:
4948 * 1 if the port online status is available and online.
4949 */
4950int ata_port_online(struct ata_port *ap)
4951{
4952 u32 sstatus;
4953
4954 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) == 0x3)
4955 return 1;
4956 return 0;
4957}
4958
4959/**
4960 * ata_port_offline - test whether the given port is offline
4961 * @ap: ATA port to test
4962 *
4963 * Test whether @ap is offline. Note that this function returns
4964 * 0 if offline status of @ap cannot be obtained, so
4965 * ata_port_online(ap) != !ata_port_offline(ap).
4966 *
4967 * LOCKING:
4968 * None.
4969 *
4970 * RETURNS:
4971 * 1 if the port offline status is available and offline.
4972 */
4973int ata_port_offline(struct ata_port *ap)
4974{
4975 u32 sstatus;
4976
4977 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) != 0x3)
4978 return 1;
4979 return 0;
4980}
0baab86b 4981
77b08fb5 4982int ata_flush_cache(struct ata_device *dev)
9b847548 4983{
977e6b9f 4984 unsigned int err_mask;
9b847548
JA
4985 u8 cmd;
4986
4987 if (!ata_try_flush_cache(dev))
4988 return 0;
4989
4990 if (ata_id_has_flush_ext(dev->id))
4991 cmd = ATA_CMD_FLUSH_EXT;
4992 else
4993 cmd = ATA_CMD_FLUSH;
4994
977e6b9f
TH
4995 err_mask = ata_do_simple_cmd(dev, cmd);
4996 if (err_mask) {
4997 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
4998 return -EIO;
4999 }
5000
5001 return 0;
9b847548
JA
5002}
5003
3373efd8 5004static int ata_standby_drive(struct ata_device *dev)
9b847548 5005{
977e6b9f
TH
5006 unsigned int err_mask;
5007
5008 err_mask = ata_do_simple_cmd(dev, ATA_CMD_STANDBYNOW1);
5009 if (err_mask) {
5010 ata_dev_printk(dev, KERN_ERR, "failed to standby drive "
5011 "(err_mask=0x%x)\n", err_mask);
5012 return -EIO;
5013 }
5014
5015 return 0;
9b847548
JA
5016}
5017
3373efd8 5018static int ata_start_drive(struct ata_device *dev)
9b847548 5019{
977e6b9f
TH
5020 unsigned int err_mask;
5021
5022 err_mask = ata_do_simple_cmd(dev, ATA_CMD_IDLEIMMEDIATE);
5023 if (err_mask) {
5024 ata_dev_printk(dev, KERN_ERR, "failed to start drive "
5025 "(err_mask=0x%x)\n", err_mask);
5026 return -EIO;
5027 }
5028
5029 return 0;
9b847548
JA
5030}
5031
5032/**
5033 * ata_device_resume - wakeup a previously suspended devices
c893a3ae 5034 * @dev: the device to resume
9b847548
JA
5035 *
5036 * Kick the drive back into action, by sending it an idle immediate
5037 * command and making sure its transfer mode matches between drive
5038 * and host.
5039 *
5040 */
3373efd8 5041int ata_device_resume(struct ata_device *dev)
9b847548 5042{
3373efd8
TH
5043 struct ata_port *ap = dev->ap;
5044
9b847548 5045 if (ap->flags & ATA_FLAG_SUSPENDED) {
e82cbdb9 5046 struct ata_device *failed_dev;
e42d7be2 5047
1cca0ebb 5048 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
0737ac89 5049 ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 200000);
e42d7be2 5050
9b847548 5051 ap->flags &= ~ATA_FLAG_SUSPENDED;
e82cbdb9 5052 while (ata_set_mode(ap, &failed_dev))
3373efd8 5053 ata_dev_disable(failed_dev);
9b847548 5054 }
e1211e3f 5055 if (!ata_dev_enabled(dev))
9b847548
JA
5056 return 0;
5057 if (dev->class == ATA_DEV_ATA)
3373efd8 5058 ata_start_drive(dev);
9b847548
JA
5059
5060 return 0;
5061}
5062
5063/**
5064 * ata_device_suspend - prepare a device for suspend
c893a3ae 5065 * @dev: the device to suspend
e2a7f77a 5066 * @state: target power management state
9b847548
JA
5067 *
5068 * Flush the cache on the drive, if appropriate, then issue a
5069 * standbynow command.
9b847548 5070 */
3373efd8 5071int ata_device_suspend(struct ata_device *dev, pm_message_t state)
9b847548 5072{
3373efd8
TH
5073 struct ata_port *ap = dev->ap;
5074
e1211e3f 5075 if (!ata_dev_enabled(dev))
9b847548
JA
5076 return 0;
5077 if (dev->class == ATA_DEV_ATA)
3373efd8 5078 ata_flush_cache(dev);
9b847548 5079
082776e4 5080 if (state.event != PM_EVENT_FREEZE)
3373efd8 5081 ata_standby_drive(dev);
9b847548
JA
5082 ap->flags |= ATA_FLAG_SUSPENDED;
5083 return 0;
5084}
5085
c893a3ae
RD
5086/**
5087 * ata_port_start - Set port up for dma.
5088 * @ap: Port to initialize
5089 *
5090 * Called just after data structures for each port are
5091 * initialized. Allocates space for PRD table.
5092 *
5093 * May be used as the port_start() entry in ata_port_operations.
5094 *
5095 * LOCKING:
5096 * Inherited from caller.
5097 */
5098
1da177e4
LT
5099int ata_port_start (struct ata_port *ap)
5100{
2f1f610b 5101 struct device *dev = ap->dev;
6037d6bb 5102 int rc;
1da177e4
LT
5103
5104 ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL);
5105 if (!ap->prd)
5106 return -ENOMEM;
5107
6037d6bb
JG
5108 rc = ata_pad_alloc(ap, dev);
5109 if (rc) {
cedc9a47 5110 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
6037d6bb 5111 return rc;
cedc9a47
JG
5112 }
5113
1da177e4
LT
5114 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma);
5115
5116 return 0;
5117}
5118
0baab86b
EF
5119
5120/**
5121 * ata_port_stop - Undo ata_port_start()
5122 * @ap: Port to shut down
5123 *
5124 * Frees the PRD table.
5125 *
5126 * May be used as the port_stop() entry in ata_port_operations.
5127 *
5128 * LOCKING:
6f0ef4fa 5129 * Inherited from caller.
0baab86b
EF
5130 */
5131
1da177e4
LT
5132void ata_port_stop (struct ata_port *ap)
5133{
2f1f610b 5134 struct device *dev = ap->dev;
1da177e4
LT
5135
5136 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
6037d6bb 5137 ata_pad_free(ap, dev);
1da177e4
LT
5138}
5139
aa8f0dc6
JG
5140void ata_host_stop (struct ata_host_set *host_set)
5141{
5142 if (host_set->mmio_base)
5143 iounmap(host_set->mmio_base);
5144}
5145
5146
1da177e4
LT
5147/**
5148 * ata_host_remove - Unregister SCSI host structure with upper layers
5149 * @ap: Port to unregister
5150 * @do_unregister: 1 if we fully unregister, 0 to just stop the port
5151 *
5152 * LOCKING:
6f0ef4fa 5153 * Inherited from caller.
1da177e4
LT
5154 */
5155
5156static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister)
5157{
5158 struct Scsi_Host *sh = ap->host;
5159
5160 DPRINTK("ENTER\n");
5161
5162 if (do_unregister)
5163 scsi_remove_host(sh);
5164
5165 ap->ops->port_stop(ap);
5166}
5167
3ef3b43d
TH
5168/**
5169 * ata_dev_init - Initialize an ata_device structure
5170 * @dev: Device structure to initialize
5171 *
5172 * Initialize @dev in preparation for probing.
5173 *
5174 * LOCKING:
5175 * Inherited from caller.
5176 */
5177void ata_dev_init(struct ata_device *dev)
5178{
5179 struct ata_port *ap = dev->ap;
72fa4b74
TH
5180 unsigned long flags;
5181
5a04bf4b
TH
5182 /* SATA spd limit is bound to the first device */
5183 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5184
72fa4b74
TH
5185 /* High bits of dev->flags are used to record warm plug
5186 * requests which occur asynchronously. Synchronize using
5187 * host_set lock.
5188 */
ba6a1308 5189 spin_lock_irqsave(ap->lock, flags);
72fa4b74 5190 dev->flags &= ~ATA_DFLAG_INIT_MASK;
ba6a1308 5191 spin_unlock_irqrestore(ap->lock, flags);
3ef3b43d 5192
72fa4b74
TH
5193 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
5194 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
3ef3b43d
TH
5195 dev->pio_mask = UINT_MAX;
5196 dev->mwdma_mask = UINT_MAX;
5197 dev->udma_mask = UINT_MAX;
5198}
5199
1da177e4
LT
5200/**
5201 * ata_host_init - Initialize an ata_port structure
5202 * @ap: Structure to initialize
5203 * @host: associated SCSI mid-layer structure
5204 * @host_set: Collection of hosts to which @ap belongs
5205 * @ent: Probe information provided by low-level driver
5206 * @port_no: Port number associated with this ata_port
5207 *
0cba632b
JG
5208 * Initialize a new ata_port structure, and its associated
5209 * scsi_host.
5210 *
1da177e4 5211 * LOCKING:
0cba632b 5212 * Inherited from caller.
1da177e4 5213 */
1da177e4
LT
5214static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
5215 struct ata_host_set *host_set,
057ace5e 5216 const struct ata_probe_ent *ent, unsigned int port_no)
1da177e4
LT
5217{
5218 unsigned int i;
5219
5220 host->max_id = 16;
5221 host->max_lun = 1;
5222 host->max_channel = 1;
5223 host->unique_id = ata_unique_id++;
5224 host->max_cmd_len = 12;
12413197 5225
ba6a1308 5226 ap->lock = &host_set->lock;
198e0fed 5227 ap->flags = ATA_FLAG_DISABLED;
1da177e4
LT
5228 ap->id = host->unique_id;
5229 ap->host = host;
5230 ap->ctl = ATA_DEVCTL_OBS;
5231 ap->host_set = host_set;
2f1f610b 5232 ap->dev = ent->dev;
1da177e4
LT
5233 ap->port_no = port_no;
5234 ap->hard_port_no =
5235 ent->legacy_mode ? ent->hard_port_no : port_no;
5236 ap->pio_mask = ent->pio_mask;
5237 ap->mwdma_mask = ent->mwdma_mask;
5238 ap->udma_mask = ent->udma_mask;
5239 ap->flags |= ent->host_flags;
5240 ap->ops = ent->port_ops;
5a04bf4b 5241 ap->hw_sata_spd_limit = UINT_MAX;
1da177e4
LT
5242 ap->active_tag = ATA_TAG_POISON;
5243 ap->last_ctl = 0xFF;
bd5d825c
BP
5244
5245#if defined(ATA_VERBOSE_DEBUG)
5246 /* turn on all debugging levels */
5247 ap->msg_enable = 0x00FF;
5248#elif defined(ATA_DEBUG)
5249 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
88574551 5250#else
0dd4b21f 5251 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
bd5d825c 5252#endif
1da177e4 5253
86e45b6b 5254 INIT_WORK(&ap->port_task, NULL, NULL);
580b2102 5255 INIT_WORK(&ap->hotplug_task, ata_scsi_hotplug, ap);
3057ac3c 5256 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan, ap);
a72ec4ce 5257 INIT_LIST_HEAD(&ap->eh_done_q);
c6cf9e99 5258 init_waitqueue_head(&ap->eh_wait_q);
1da177e4 5259
838df628
TH
5260 /* set cable type */
5261 ap->cbl = ATA_CBL_NONE;
5262 if (ap->flags & ATA_FLAG_SATA)
5263 ap->cbl = ATA_CBL_SATA;
5264
acf356b1
TH
5265 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5266 struct ata_device *dev = &ap->device[i];
38d87234 5267 dev->ap = ap;
72fa4b74 5268 dev->devno = i;
3ef3b43d 5269 ata_dev_init(dev);
acf356b1 5270 }
1da177e4
LT
5271
5272#ifdef ATA_IRQ_TRAP
5273 ap->stats.unhandled_irq = 1;
5274 ap->stats.idle_irq = 1;
5275#endif
5276
5277 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
5278}
5279
5280/**
5281 * ata_host_add - Attach low-level ATA driver to system
5282 * @ent: Information provided by low-level driver
5283 * @host_set: Collections of ports to which we add
5284 * @port_no: Port number associated with this host
5285 *
0cba632b
JG
5286 * Attach low-level ATA driver to system.
5287 *
1da177e4 5288 * LOCKING:
0cba632b 5289 * PCI/etc. bus probe sem.
1da177e4
LT
5290 *
5291 * RETURNS:
0cba632b 5292 * New ata_port on success, for NULL on error.
1da177e4
LT
5293 */
5294
057ace5e 5295static struct ata_port * ata_host_add(const struct ata_probe_ent *ent,
1da177e4
LT
5296 struct ata_host_set *host_set,
5297 unsigned int port_no)
5298{
5299 struct Scsi_Host *host;
5300 struct ata_port *ap;
5301 int rc;
5302
5303 DPRINTK("ENTER\n");
aec5c3c1 5304
52783c5d 5305 if (!ent->port_ops->error_handler &&
aec5c3c1
TH
5306 !(ent->host_flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST))) {
5307 printk(KERN_ERR "ata%u: no reset mechanism available\n",
5308 port_no);
5309 return NULL;
5310 }
5311
1da177e4
LT
5312 host = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
5313 if (!host)
5314 return NULL;
5315
30afc84c
TH
5316 host->transportt = &ata_scsi_transport_template;
5317
35bb94b1 5318 ap = ata_shost_to_port(host);
1da177e4
LT
5319
5320 ata_host_init(ap, host, host_set, ent, port_no);
5321
5322 rc = ap->ops->port_start(ap);
5323 if (rc)
5324 goto err_out;
5325
5326 return ap;
5327
5328err_out:
5329 scsi_host_put(host);
5330 return NULL;
5331}
5332
5333/**
0cba632b
JG
5334 * ata_device_add - Register hardware device with ATA and SCSI layers
5335 * @ent: Probe information describing hardware device to be registered
5336 *
5337 * This function processes the information provided in the probe
5338 * information struct @ent, allocates the necessary ATA and SCSI
5339 * host information structures, initializes them, and registers
5340 * everything with requisite kernel subsystems.
5341 *
5342 * This function requests irqs, probes the ATA bus, and probes
5343 * the SCSI bus.
1da177e4
LT
5344 *
5345 * LOCKING:
0cba632b 5346 * PCI/etc. bus probe sem.
1da177e4
LT
5347 *
5348 * RETURNS:
0cba632b 5349 * Number of ports registered. Zero on error (no ports registered).
1da177e4 5350 */
057ace5e 5351int ata_device_add(const struct ata_probe_ent *ent)
1da177e4
LT
5352{
5353 unsigned int count = 0, i;
5354 struct device *dev = ent->dev;
5355 struct ata_host_set *host_set;
39b07ce6 5356 int rc;
1da177e4
LT
5357
5358 DPRINTK("ENTER\n");
5359 /* alloc a container for our list of ATA ports (buses) */
57f3bda8 5360 host_set = kzalloc(sizeof(struct ata_host_set) +
1da177e4
LT
5361 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
5362 if (!host_set)
5363 return 0;
1da177e4
LT
5364 spin_lock_init(&host_set->lock);
5365
5366 host_set->dev = dev;
5367 host_set->n_ports = ent->n_ports;
5368 host_set->irq = ent->irq;
5369 host_set->mmio_base = ent->mmio_base;
5370 host_set->private_data = ent->private_data;
5371 host_set->ops = ent->port_ops;
5444a6f4 5372 host_set->flags = ent->host_set_flags;
1da177e4
LT
5373
5374 /* register each port bound to this device */
5375 for (i = 0; i < ent->n_ports; i++) {
5376 struct ata_port *ap;
5377 unsigned long xfer_mode_mask;
5378
5379 ap = ata_host_add(ent, host_set, i);
5380 if (!ap)
5381 goto err_out;
5382
5383 host_set->ports[i] = ap;
5384 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
5385 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
5386 (ap->pio_mask << ATA_SHIFT_PIO);
5387
5388 /* print per-port info to dmesg */
f15a1daf
TH
5389 ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%lX "
5390 "ctl 0x%lX bmdma 0x%lX irq %lu\n",
5391 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
5392 ata_mode_string(xfer_mode_mask),
5393 ap->ioaddr.cmd_addr,
5394 ap->ioaddr.ctl_addr,
5395 ap->ioaddr.bmdma_addr,
5396 ent->irq);
1da177e4
LT
5397
5398 ata_chk_status(ap);
5399 host_set->ops->irq_clear(ap);
e3180499 5400 ata_eh_freeze_port(ap); /* freeze port before requesting IRQ */
1da177e4
LT
5401 count++;
5402 }
5403
57f3bda8
RD
5404 if (!count)
5405 goto err_free_ret;
1da177e4
LT
5406
5407 /* obtain irq, that is shared between channels */
39b07ce6
JG
5408 rc = request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags,
5409 DRV_NAME, host_set);
5410 if (rc) {
5411 dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n",
5412 ent->irq, rc);
1da177e4 5413 goto err_out;
39b07ce6 5414 }
1da177e4
LT
5415
5416 /* perform each probe synchronously */
5417 DPRINTK("probe begin\n");
5418 for (i = 0; i < count; i++) {
5419 struct ata_port *ap;
5a04bf4b 5420 u32 scontrol;
1da177e4
LT
5421 int rc;
5422
5423 ap = host_set->ports[i];
5424
5a04bf4b
TH
5425 /* init sata_spd_limit to the current value */
5426 if (sata_scr_read(ap, SCR_CONTROL, &scontrol) == 0) {
5427 int spd = (scontrol >> 4) & 0xf;
5428 ap->hw_sata_spd_limit &= (1 << spd) - 1;
5429 }
5430 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5431
1da177e4
LT
5432 rc = scsi_add_host(ap->host, dev);
5433 if (rc) {
f15a1daf 5434 ata_port_printk(ap, KERN_ERR, "scsi_add_host failed\n");
1da177e4
LT
5435 /* FIXME: do something useful here */
5436 /* FIXME: handle unconditional calls to
5437 * scsi_scan_host and ata_host_remove, below,
5438 * at the very least
5439 */
5440 }
3e706399 5441
52783c5d 5442 if (ap->ops->error_handler) {
3e706399
TH
5443 unsigned long flags;
5444
5445 ata_port_probe(ap);
5446
5447 /* kick EH for boot probing */
ba6a1308 5448 spin_lock_irqsave(ap->lock, flags);
3e706399
TH
5449
5450 ap->eh_info.probe_mask = (1 << ATA_MAX_DEVICES) - 1;
5451 ap->eh_info.action |= ATA_EH_SOFTRESET;
5452
5453 ap->flags |= ATA_FLAG_LOADING;
5454 ata_port_schedule_eh(ap);
5455
ba6a1308 5456 spin_unlock_irqrestore(ap->lock, flags);
3e706399
TH
5457
5458 /* wait for EH to finish */
5459 ata_port_wait_eh(ap);
5460 } else {
5461 DPRINTK("ata%u: bus probe begin\n", ap->id);
5462 rc = ata_bus_probe(ap);
5463 DPRINTK("ata%u: bus probe end\n", ap->id);
5464
5465 if (rc) {
5466 /* FIXME: do something useful here?
5467 * Current libata behavior will
5468 * tear down everything when
5469 * the module is removed
5470 * or the h/w is unplugged.
5471 */
5472 }
5473 }
1da177e4
LT
5474 }
5475
5476 /* probes are done, now scan each port's disk(s) */
c893a3ae 5477 DPRINTK("host probe begin\n");
1da177e4
LT
5478 for (i = 0; i < count; i++) {
5479 struct ata_port *ap = host_set->ports[i];
5480
644dd0cc 5481 ata_scsi_scan_host(ap);
1da177e4
LT
5482 }
5483
5484 dev_set_drvdata(dev, host_set);
5485
5486 VPRINTK("EXIT, returning %u\n", ent->n_ports);
5487 return ent->n_ports; /* success */
5488
5489err_out:
5490 for (i = 0; i < count; i++) {
5491 ata_host_remove(host_set->ports[i], 1);
5492 scsi_host_put(host_set->ports[i]->host);
5493 }
57f3bda8 5494err_free_ret:
1da177e4
LT
5495 kfree(host_set);
5496 VPRINTK("EXIT, returning 0\n");
5497 return 0;
5498}
5499
720ba126
TH
5500/**
5501 * ata_port_detach - Detach ATA port in prepration of device removal
5502 * @ap: ATA port to be detached
5503 *
5504 * Detach all ATA devices and the associated SCSI devices of @ap;
5505 * then, remove the associated SCSI host. @ap is guaranteed to
5506 * be quiescent on return from this function.
5507 *
5508 * LOCKING:
5509 * Kernel thread context (may sleep).
5510 */
5511void ata_port_detach(struct ata_port *ap)
5512{
5513 unsigned long flags;
5514 int i;
5515
5516 if (!ap->ops->error_handler)
5517 return;
5518
5519 /* tell EH we're leaving & flush EH */
ba6a1308 5520 spin_lock_irqsave(ap->lock, flags);
720ba126 5521 ap->flags |= ATA_FLAG_UNLOADING;
ba6a1308 5522 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
5523
5524 ata_port_wait_eh(ap);
5525
5526 /* EH is now guaranteed to see UNLOADING, so no new device
5527 * will be attached. Disable all existing devices.
5528 */
ba6a1308 5529 spin_lock_irqsave(ap->lock, flags);
720ba126
TH
5530
5531 for (i = 0; i < ATA_MAX_DEVICES; i++)
5532 ata_dev_disable(&ap->device[i]);
5533
ba6a1308 5534 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
5535
5536 /* Final freeze & EH. All in-flight commands are aborted. EH
5537 * will be skipped and retrials will be terminated with bad
5538 * target.
5539 */
ba6a1308 5540 spin_lock_irqsave(ap->lock, flags);
720ba126 5541 ata_port_freeze(ap); /* won't be thawed */
ba6a1308 5542 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
5543
5544 ata_port_wait_eh(ap);
5545
5546 /* Flush hotplug task. The sequence is similar to
5547 * ata_port_flush_task().
5548 */
5549 flush_workqueue(ata_aux_wq);
5550 cancel_delayed_work(&ap->hotplug_task);
5551 flush_workqueue(ata_aux_wq);
5552
5553 /* remove the associated SCSI host */
5554 scsi_remove_host(ap->host);
5555}
5556
17b14451
AC
5557/**
5558 * ata_host_set_remove - PCI layer callback for device removal
5559 * @host_set: ATA host set that was removed
5560 *
2e9edbf8 5561 * Unregister all objects associated with this host set. Free those
17b14451
AC
5562 * objects.
5563 *
5564 * LOCKING:
5565 * Inherited from calling layer (may sleep).
5566 */
5567
17b14451
AC
5568void ata_host_set_remove(struct ata_host_set *host_set)
5569{
17b14451
AC
5570 unsigned int i;
5571
720ba126
TH
5572 for (i = 0; i < host_set->n_ports; i++)
5573 ata_port_detach(host_set->ports[i]);
17b14451
AC
5574
5575 free_irq(host_set->irq, host_set);
5576
5577 for (i = 0; i < host_set->n_ports; i++) {
720ba126 5578 struct ata_port *ap = host_set->ports[i];
17b14451
AC
5579
5580 ata_scsi_release(ap->host);
5581
5582 if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
5583 struct ata_ioports *ioaddr = &ap->ioaddr;
5584
5585 if (ioaddr->cmd_addr == 0x1f0)
5586 release_region(0x1f0, 8);
5587 else if (ioaddr->cmd_addr == 0x170)
5588 release_region(0x170, 8);
5589 }
5590
5591 scsi_host_put(ap->host);
5592 }
5593
5594 if (host_set->ops->host_stop)
5595 host_set->ops->host_stop(host_set);
5596
5597 kfree(host_set);
5598}
5599
1da177e4
LT
5600/**
5601 * ata_scsi_release - SCSI layer callback hook for host unload
5602 * @host: libata host to be unloaded
5603 *
5604 * Performs all duties necessary to shut down a libata port...
5605 * Kill port kthread, disable port, and release resources.
5606 *
5607 * LOCKING:
5608 * Inherited from SCSI layer.
5609 *
5610 * RETURNS:
5611 * One.
5612 */
5613
5614int ata_scsi_release(struct Scsi_Host *host)
5615{
35bb94b1 5616 struct ata_port *ap = ata_shost_to_port(host);
1da177e4
LT
5617
5618 DPRINTK("ENTER\n");
5619
5620 ap->ops->port_disable(ap);
5621 ata_host_remove(ap, 0);
5622
5623 DPRINTK("EXIT\n");
5624 return 1;
5625}
5626
5627/**
5628 * ata_std_ports - initialize ioaddr with standard port offsets.
5629 * @ioaddr: IO address structure to be initialized
0baab86b
EF
5630 *
5631 * Utility function which initializes data_addr, error_addr,
5632 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
5633 * device_addr, status_addr, and command_addr to standard offsets
5634 * relative to cmd_addr.
5635 *
5636 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
1da177e4 5637 */
0baab86b 5638
1da177e4
LT
5639void ata_std_ports(struct ata_ioports *ioaddr)
5640{
5641 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
5642 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
5643 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
5644 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
5645 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
5646 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
5647 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
5648 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
5649 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
5650 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
5651}
5652
0baab86b 5653
374b1873
JG
5654#ifdef CONFIG_PCI
5655
5656void ata_pci_host_stop (struct ata_host_set *host_set)
5657{
5658 struct pci_dev *pdev = to_pci_dev(host_set->dev);
5659
5660 pci_iounmap(pdev, host_set->mmio_base);
5661}
5662
1da177e4
LT
5663/**
5664 * ata_pci_remove_one - PCI layer callback for device removal
5665 * @pdev: PCI device that was removed
5666 *
5667 * PCI layer indicates to libata via this hook that
6f0ef4fa 5668 * hot-unplug or module unload event has occurred.
1da177e4
LT
5669 * Handle this by unregistering all objects associated
5670 * with this PCI device. Free those objects. Then finally
5671 * release PCI resources and disable device.
5672 *
5673 * LOCKING:
5674 * Inherited from PCI layer (may sleep).
5675 */
5676
5677void ata_pci_remove_one (struct pci_dev *pdev)
5678{
5679 struct device *dev = pci_dev_to_dev(pdev);
5680 struct ata_host_set *host_set = dev_get_drvdata(dev);
f0eb62b8 5681 struct ata_host_set *host_set2 = host_set->next;
1da177e4 5682
17b14451 5683 ata_host_set_remove(host_set);
f0eb62b8
TH
5684 if (host_set2)
5685 ata_host_set_remove(host_set2);
5686
1da177e4
LT
5687 pci_release_regions(pdev);
5688 pci_disable_device(pdev);
5689 dev_set_drvdata(dev, NULL);
5690}
5691
5692/* move to PCI subsystem */
057ace5e 5693int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
1da177e4
LT
5694{
5695 unsigned long tmp = 0;
5696
5697 switch (bits->width) {
5698 case 1: {
5699 u8 tmp8 = 0;
5700 pci_read_config_byte(pdev, bits->reg, &tmp8);
5701 tmp = tmp8;
5702 break;
5703 }
5704 case 2: {
5705 u16 tmp16 = 0;
5706 pci_read_config_word(pdev, bits->reg, &tmp16);
5707 tmp = tmp16;
5708 break;
5709 }
5710 case 4: {
5711 u32 tmp32 = 0;
5712 pci_read_config_dword(pdev, bits->reg, &tmp32);
5713 tmp = tmp32;
5714 break;
5715 }
5716
5717 default:
5718 return -EINVAL;
5719 }
5720
5721 tmp &= bits->mask;
5722
5723 return (tmp == bits->val) ? 1 : 0;
5724}
9b847548
JA
5725
5726int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state)
5727{
5728 pci_save_state(pdev);
5729 pci_disable_device(pdev);
5730 pci_set_power_state(pdev, PCI_D3hot);
5731 return 0;
5732}
5733
5734int ata_pci_device_resume(struct pci_dev *pdev)
5735{
5736 pci_set_power_state(pdev, PCI_D0);
5737 pci_restore_state(pdev);
5738 pci_enable_device(pdev);
5739 pci_set_master(pdev);
5740 return 0;
5741}
1da177e4
LT
5742#endif /* CONFIG_PCI */
5743
5744
1da177e4
LT
5745static int __init ata_init(void)
5746{
a8601e5f 5747 ata_probe_timeout *= HZ;
1da177e4
LT
5748 ata_wq = create_workqueue("ata");
5749 if (!ata_wq)
5750 return -ENOMEM;
5751
453b07ac
TH
5752 ata_aux_wq = create_singlethread_workqueue("ata_aux");
5753 if (!ata_aux_wq) {
5754 destroy_workqueue(ata_wq);
5755 return -ENOMEM;
5756 }
5757
1da177e4
LT
5758 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
5759 return 0;
5760}
5761
5762static void __exit ata_exit(void)
5763{
5764 destroy_workqueue(ata_wq);
453b07ac 5765 destroy_workqueue(ata_aux_wq);
1da177e4
LT
5766}
5767
5768module_init(ata_init);
5769module_exit(ata_exit);
5770
67846b30 5771static unsigned long ratelimit_time;
34af946a 5772static DEFINE_SPINLOCK(ata_ratelimit_lock);
67846b30
JG
5773
5774int ata_ratelimit(void)
5775{
5776 int rc;
5777 unsigned long flags;
5778
5779 spin_lock_irqsave(&ata_ratelimit_lock, flags);
5780
5781 if (time_after(jiffies, ratelimit_time)) {
5782 rc = 1;
5783 ratelimit_time = jiffies + (HZ/5);
5784 } else
5785 rc = 0;
5786
5787 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
5788
5789 return rc;
5790}
5791
c22daff4
TH
5792/**
5793 * ata_wait_register - wait until register value changes
5794 * @reg: IO-mapped register
5795 * @mask: Mask to apply to read register value
5796 * @val: Wait condition
5797 * @interval_msec: polling interval in milliseconds
5798 * @timeout_msec: timeout in milliseconds
5799 *
5800 * Waiting for some bits of register to change is a common
5801 * operation for ATA controllers. This function reads 32bit LE
5802 * IO-mapped register @reg and tests for the following condition.
5803 *
5804 * (*@reg & mask) != val
5805 *
5806 * If the condition is met, it returns; otherwise, the process is
5807 * repeated after @interval_msec until timeout.
5808 *
5809 * LOCKING:
5810 * Kernel thread context (may sleep)
5811 *
5812 * RETURNS:
5813 * The final register value.
5814 */
5815u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
5816 unsigned long interval_msec,
5817 unsigned long timeout_msec)
5818{
5819 unsigned long timeout;
5820 u32 tmp;
5821
5822 tmp = ioread32(reg);
5823
5824 /* Calculate timeout _after_ the first read to make sure
5825 * preceding writes reach the controller before starting to
5826 * eat away the timeout.
5827 */
5828 timeout = jiffies + (timeout_msec * HZ) / 1000;
5829
5830 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
5831 msleep(interval_msec);
5832 tmp = ioread32(reg);
5833 }
5834
5835 return tmp;
5836}
5837
1da177e4
LT
5838/*
5839 * libata is essentially a library of internal helper functions for
5840 * low-level ATA host controller drivers. As such, the API/ABI is
5841 * likely to change as new drivers are added and updated.
5842 * Do not depend on ABI/API stability.
5843 */
5844
d7bb4cc7
TH
5845EXPORT_SYMBOL_GPL(sata_deb_timing_boot);
5846EXPORT_SYMBOL_GPL(sata_deb_timing_eh);
5847EXPORT_SYMBOL_GPL(sata_deb_timing_before_fsrst);
1da177e4
LT
5848EXPORT_SYMBOL_GPL(ata_std_bios_param);
5849EXPORT_SYMBOL_GPL(ata_std_ports);
5850EXPORT_SYMBOL_GPL(ata_device_add);
720ba126 5851EXPORT_SYMBOL_GPL(ata_port_detach);
17b14451 5852EXPORT_SYMBOL_GPL(ata_host_set_remove);
1da177e4
LT
5853EXPORT_SYMBOL_GPL(ata_sg_init);
5854EXPORT_SYMBOL_GPL(ata_sg_init_one);
9a1004d0 5855EXPORT_SYMBOL_GPL(ata_hsm_move);
f686bcb8 5856EXPORT_SYMBOL_GPL(ata_qc_complete);
dedaf2b0 5857EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
1da177e4 5858EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
1da177e4
LT
5859EXPORT_SYMBOL_GPL(ata_tf_load);
5860EXPORT_SYMBOL_GPL(ata_tf_read);
5861EXPORT_SYMBOL_GPL(ata_noop_dev_select);
5862EXPORT_SYMBOL_GPL(ata_std_dev_select);
5863EXPORT_SYMBOL_GPL(ata_tf_to_fis);
5864EXPORT_SYMBOL_GPL(ata_tf_from_fis);
5865EXPORT_SYMBOL_GPL(ata_check_status);
5866EXPORT_SYMBOL_GPL(ata_altstatus);
1da177e4
LT
5867EXPORT_SYMBOL_GPL(ata_exec_command);
5868EXPORT_SYMBOL_GPL(ata_port_start);
5869EXPORT_SYMBOL_GPL(ata_port_stop);
aa8f0dc6 5870EXPORT_SYMBOL_GPL(ata_host_stop);
1da177e4 5871EXPORT_SYMBOL_GPL(ata_interrupt);
a6b2c5d4
AC
5872EXPORT_SYMBOL_GPL(ata_mmio_data_xfer);
5873EXPORT_SYMBOL_GPL(ata_pio_data_xfer);
75e99585 5874EXPORT_SYMBOL_GPL(ata_pio_data_xfer_noirq);
1da177e4 5875EXPORT_SYMBOL_GPL(ata_qc_prep);
e46834cd 5876EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
1da177e4
LT
5877EXPORT_SYMBOL_GPL(ata_bmdma_setup);
5878EXPORT_SYMBOL_GPL(ata_bmdma_start);
5879EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
5880EXPORT_SYMBOL_GPL(ata_bmdma_status);
5881EXPORT_SYMBOL_GPL(ata_bmdma_stop);
6d97dbd7
TH
5882EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
5883EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
5884EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
5885EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
5886EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
1da177e4 5887EXPORT_SYMBOL_GPL(ata_port_probe);
3c567b7d 5888EXPORT_SYMBOL_GPL(sata_set_spd);
d7bb4cc7
TH
5889EXPORT_SYMBOL_GPL(sata_phy_debounce);
5890EXPORT_SYMBOL_GPL(sata_phy_resume);
1da177e4
LT
5891EXPORT_SYMBOL_GPL(sata_phy_reset);
5892EXPORT_SYMBOL_GPL(__sata_phy_reset);
5893EXPORT_SYMBOL_GPL(ata_bus_reset);
f5914a46 5894EXPORT_SYMBOL_GPL(ata_std_prereset);
c2bd5804
TH
5895EXPORT_SYMBOL_GPL(ata_std_softreset);
5896EXPORT_SYMBOL_GPL(sata_std_hardreset);
5897EXPORT_SYMBOL_GPL(ata_std_postreset);
623a3128 5898EXPORT_SYMBOL_GPL(ata_dev_revalidate);
2e9edbf8
JG
5899EXPORT_SYMBOL_GPL(ata_dev_classify);
5900EXPORT_SYMBOL_GPL(ata_dev_pair);
1da177e4 5901EXPORT_SYMBOL_GPL(ata_port_disable);
67846b30 5902EXPORT_SYMBOL_GPL(ata_ratelimit);
c22daff4 5903EXPORT_SYMBOL_GPL(ata_wait_register);
6f8b9958 5904EXPORT_SYMBOL_GPL(ata_busy_sleep);
86e45b6b 5905EXPORT_SYMBOL_GPL(ata_port_queue_task);
1da177e4
LT
5906EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
5907EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
1da177e4 5908EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
83c47bcb 5909EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
a6e6ce8e 5910EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
1da177e4
LT
5911EXPORT_SYMBOL_GPL(ata_scsi_release);
5912EXPORT_SYMBOL_GPL(ata_host_intr);
34bf2170
TH
5913EXPORT_SYMBOL_GPL(sata_scr_valid);
5914EXPORT_SYMBOL_GPL(sata_scr_read);
5915EXPORT_SYMBOL_GPL(sata_scr_write);
5916EXPORT_SYMBOL_GPL(sata_scr_write_flush);
5917EXPORT_SYMBOL_GPL(ata_port_online);
5918EXPORT_SYMBOL_GPL(ata_port_offline);
6a62a04d
TH
5919EXPORT_SYMBOL_GPL(ata_id_string);
5920EXPORT_SYMBOL_GPL(ata_id_c_string);
1da177e4
LT
5921EXPORT_SYMBOL_GPL(ata_scsi_simulate);
5922
1bc4ccff 5923EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
452503f9
AC
5924EXPORT_SYMBOL_GPL(ata_timing_compute);
5925EXPORT_SYMBOL_GPL(ata_timing_merge);
5926
1da177e4
LT
5927#ifdef CONFIG_PCI
5928EXPORT_SYMBOL_GPL(pci_test_config_bits);
374b1873 5929EXPORT_SYMBOL_GPL(ata_pci_host_stop);
1da177e4
LT
5930EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
5931EXPORT_SYMBOL_GPL(ata_pci_init_one);
5932EXPORT_SYMBOL_GPL(ata_pci_remove_one);
9b847548
JA
5933EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
5934EXPORT_SYMBOL_GPL(ata_pci_device_resume);
67951ade
AC
5935EXPORT_SYMBOL_GPL(ata_pci_default_filter);
5936EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
1da177e4 5937#endif /* CONFIG_PCI */
9b847548
JA
5938
5939EXPORT_SYMBOL_GPL(ata_device_suspend);
5940EXPORT_SYMBOL_GPL(ata_device_resume);
5941EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
5942EXPORT_SYMBOL_GPL(ata_scsi_device_resume);
ece1d636 5943
ece1d636 5944EXPORT_SYMBOL_GPL(ata_eng_timeout);
7b70fc03
TH
5945EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
5946EXPORT_SYMBOL_GPL(ata_port_abort);
e3180499
TH
5947EXPORT_SYMBOL_GPL(ata_port_freeze);
5948EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
5949EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
ece1d636
TH
5950EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
5951EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
022bdb07 5952EXPORT_SYMBOL_GPL(ata_do_eh);
This page took 0.600141 seconds and 5 git commands to generate.