libata-core: auditting chk_status v check_status
[deliverable/linux.git] / drivers / ata / libata-core.c
CommitLineData
1da177e4 1/*
af36d7f0
JG
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
1da177e4
LT
33 */
34
1da177e4
LT
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/pci.h>
38#include <linux/init.h>
39#include <linux/list.h>
40#include <linux/mm.h>
41#include <linux/highmem.h>
42#include <linux/spinlock.h>
43#include <linux/blkdev.h>
44#include <linux/delay.h>
45#include <linux/timer.h>
46#include <linux/interrupt.h>
47#include <linux/completion.h>
48#include <linux/suspend.h>
49#include <linux/workqueue.h>
67846b30 50#include <linux/jiffies.h>
378f058c 51#include <linux/scatterlist.h>
2dcb407e 52#include <linux/io.h>
1da177e4 53#include <scsi/scsi.h>
193515d5 54#include <scsi/scsi_cmnd.h>
1da177e4
LT
55#include <scsi/scsi_host.h>
56#include <linux/libata.h>
1da177e4
LT
57#include <asm/semaphore.h>
58#include <asm/byteorder.h>
59
60#include "libata.h"
61
fda0efc5 62
d7bb4cc7 63/* debounce timing parameters in msecs { interval, duration, timeout } */
e9c83914
TH
64const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
65const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
66const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
d7bb4cc7 67
3373efd8
TH
68static unsigned int ata_dev_init_params(struct ata_device *dev,
69 u16 heads, u16 sectors);
70static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
9f45cbd3 71static unsigned int ata_dev_set_AN(struct ata_device *dev, u8 enable);
3373efd8 72static void ata_dev_xfermask(struct ata_device *dev);
75683fe7 73static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
1da177e4 74
f3187195 75unsigned int ata_print_id = 1;
1da177e4
LT
76static struct workqueue_struct *ata_wq;
77
453b07ac
TH
78struct workqueue_struct *ata_aux_wq;
79
418dc1f5 80int atapi_enabled = 1;
1623c81e
JG
81module_param(atapi_enabled, int, 0444);
82MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
83
95de719a
AL
84int atapi_dmadir = 0;
85module_param(atapi_dmadir, int, 0444);
86MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
87
baf4fdfa
ML
88int atapi_passthru16 = 1;
89module_param(atapi_passthru16, int, 0444);
90MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)");
91
c3c013a2
JG
92int libata_fua = 0;
93module_param_named(fua, libata_fua, int, 0444);
94MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
95
2dcb407e 96static int ata_ignore_hpa;
1e999736
AC
97module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
98MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
99
b3a70601
AC
100static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
101module_param_named(dma, libata_dma_mask, int, 0444);
102MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
103
a8601e5f
AM
104static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
105module_param(ata_probe_timeout, int, 0444);
106MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
107
6ebe9d86 108int libata_noacpi = 0;
d7d0dad6 109module_param_named(noacpi, libata_noacpi, int, 0444);
6ebe9d86 110MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in probe/suspend/resume when set");
11ef697b 111
1da177e4
LT
112MODULE_AUTHOR("Jeff Garzik");
113MODULE_DESCRIPTION("Library module for ATA devices");
114MODULE_LICENSE("GPL");
115MODULE_VERSION(DRV_VERSION);
116
0baab86b 117
1da177e4
LT
118/**
119 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
120 * @tf: Taskfile to convert
1da177e4 121 * @pmp: Port multiplier port
9977126c
TH
122 * @is_cmd: This FIS is for command
123 * @fis: Buffer into which data will output
1da177e4
LT
124 *
125 * Converts a standard ATA taskfile to a Serial ATA
126 * FIS structure (Register - Host to Device).
127 *
128 * LOCKING:
129 * Inherited from caller.
130 */
9977126c 131void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
1da177e4 132{
9977126c
TH
133 fis[0] = 0x27; /* Register - Host to Device FIS */
134 fis[1] = pmp & 0xf; /* Port multiplier number*/
135 if (is_cmd)
136 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */
137
1da177e4
LT
138 fis[2] = tf->command;
139 fis[3] = tf->feature;
140
141 fis[4] = tf->lbal;
142 fis[5] = tf->lbam;
143 fis[6] = tf->lbah;
144 fis[7] = tf->device;
145
146 fis[8] = tf->hob_lbal;
147 fis[9] = tf->hob_lbam;
148 fis[10] = tf->hob_lbah;
149 fis[11] = tf->hob_feature;
150
151 fis[12] = tf->nsect;
152 fis[13] = tf->hob_nsect;
153 fis[14] = 0;
154 fis[15] = tf->ctl;
155
156 fis[16] = 0;
157 fis[17] = 0;
158 fis[18] = 0;
159 fis[19] = 0;
160}
161
162/**
163 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
164 * @fis: Buffer from which data will be input
165 * @tf: Taskfile to output
166 *
e12a1be6 167 * Converts a serial ATA FIS structure to a standard ATA taskfile.
1da177e4
LT
168 *
169 * LOCKING:
170 * Inherited from caller.
171 */
172
057ace5e 173void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
1da177e4
LT
174{
175 tf->command = fis[2]; /* status */
176 tf->feature = fis[3]; /* error */
177
178 tf->lbal = fis[4];
179 tf->lbam = fis[5];
180 tf->lbah = fis[6];
181 tf->device = fis[7];
182
183 tf->hob_lbal = fis[8];
184 tf->hob_lbam = fis[9];
185 tf->hob_lbah = fis[10];
186
187 tf->nsect = fis[12];
188 tf->hob_nsect = fis[13];
189}
190
8cbd6df1
AL
191static const u8 ata_rw_cmds[] = {
192 /* pio multi */
193 ATA_CMD_READ_MULTI,
194 ATA_CMD_WRITE_MULTI,
195 ATA_CMD_READ_MULTI_EXT,
196 ATA_CMD_WRITE_MULTI_EXT,
9a3dccc4
TH
197 0,
198 0,
199 0,
200 ATA_CMD_WRITE_MULTI_FUA_EXT,
8cbd6df1
AL
201 /* pio */
202 ATA_CMD_PIO_READ,
203 ATA_CMD_PIO_WRITE,
204 ATA_CMD_PIO_READ_EXT,
205 ATA_CMD_PIO_WRITE_EXT,
9a3dccc4
TH
206 0,
207 0,
208 0,
209 0,
8cbd6df1
AL
210 /* dma */
211 ATA_CMD_READ,
212 ATA_CMD_WRITE,
213 ATA_CMD_READ_EXT,
9a3dccc4
TH
214 ATA_CMD_WRITE_EXT,
215 0,
216 0,
217 0,
218 ATA_CMD_WRITE_FUA_EXT
8cbd6df1 219};
1da177e4
LT
220
221/**
8cbd6df1 222 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
bd056d7e
TH
223 * @tf: command to examine and configure
224 * @dev: device tf belongs to
1da177e4 225 *
2e9edbf8 226 * Examine the device configuration and tf->flags to calculate
8cbd6df1 227 * the proper read/write commands and protocol to use.
1da177e4
LT
228 *
229 * LOCKING:
230 * caller.
231 */
bd056d7e 232static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
1da177e4 233{
9a3dccc4 234 u8 cmd;
1da177e4 235
9a3dccc4 236 int index, fua, lba48, write;
2e9edbf8 237
9a3dccc4 238 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
8cbd6df1
AL
239 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
240 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
1da177e4 241
8cbd6df1
AL
242 if (dev->flags & ATA_DFLAG_PIO) {
243 tf->protocol = ATA_PROT_PIO;
9a3dccc4 244 index = dev->multi_count ? 0 : 8;
9af5c9c9 245 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
8d238e01
AC
246 /* Unable to use DMA due to host limitation */
247 tf->protocol = ATA_PROT_PIO;
0565c26d 248 index = dev->multi_count ? 0 : 8;
8cbd6df1
AL
249 } else {
250 tf->protocol = ATA_PROT_DMA;
9a3dccc4 251 index = 16;
8cbd6df1 252 }
1da177e4 253
9a3dccc4
TH
254 cmd = ata_rw_cmds[index + fua + lba48 + write];
255 if (cmd) {
256 tf->command = cmd;
257 return 0;
258 }
259 return -1;
1da177e4
LT
260}
261
35b649fe
TH
262/**
263 * ata_tf_read_block - Read block address from ATA taskfile
264 * @tf: ATA taskfile of interest
265 * @dev: ATA device @tf belongs to
266 *
267 * LOCKING:
268 * None.
269 *
270 * Read block address from @tf. This function can handle all
271 * three address formats - LBA, LBA48 and CHS. tf->protocol and
272 * flags select the address format to use.
273 *
274 * RETURNS:
275 * Block address read from @tf.
276 */
277u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
278{
279 u64 block = 0;
280
281 if (tf->flags & ATA_TFLAG_LBA) {
282 if (tf->flags & ATA_TFLAG_LBA48) {
283 block |= (u64)tf->hob_lbah << 40;
284 block |= (u64)tf->hob_lbam << 32;
285 block |= tf->hob_lbal << 24;
286 } else
287 block |= (tf->device & 0xf) << 24;
288
289 block |= tf->lbah << 16;
290 block |= tf->lbam << 8;
291 block |= tf->lbal;
292 } else {
293 u32 cyl, head, sect;
294
295 cyl = tf->lbam | (tf->lbah << 8);
296 head = tf->device & 0xf;
297 sect = tf->lbal;
298
299 block = (cyl * dev->heads + head) * dev->sectors + sect;
300 }
301
302 return block;
303}
304
bd056d7e
TH
305/**
306 * ata_build_rw_tf - Build ATA taskfile for given read/write request
307 * @tf: Target ATA taskfile
308 * @dev: ATA device @tf belongs to
309 * @block: Block address
310 * @n_block: Number of blocks
311 * @tf_flags: RW/FUA etc...
312 * @tag: tag
313 *
314 * LOCKING:
315 * None.
316 *
317 * Build ATA taskfile @tf for read/write request described by
318 * @block, @n_block, @tf_flags and @tag on @dev.
319 *
320 * RETURNS:
321 *
322 * 0 on success, -ERANGE if the request is too large for @dev,
323 * -EINVAL if the request is invalid.
324 */
325int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
326 u64 block, u32 n_block, unsigned int tf_flags,
327 unsigned int tag)
328{
329 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
330 tf->flags |= tf_flags;
331
6d1245bf 332 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
bd056d7e
TH
333 /* yay, NCQ */
334 if (!lba_48_ok(block, n_block))
335 return -ERANGE;
336
337 tf->protocol = ATA_PROT_NCQ;
338 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
339
340 if (tf->flags & ATA_TFLAG_WRITE)
341 tf->command = ATA_CMD_FPDMA_WRITE;
342 else
343 tf->command = ATA_CMD_FPDMA_READ;
344
345 tf->nsect = tag << 3;
346 tf->hob_feature = (n_block >> 8) & 0xff;
347 tf->feature = n_block & 0xff;
348
349 tf->hob_lbah = (block >> 40) & 0xff;
350 tf->hob_lbam = (block >> 32) & 0xff;
351 tf->hob_lbal = (block >> 24) & 0xff;
352 tf->lbah = (block >> 16) & 0xff;
353 tf->lbam = (block >> 8) & 0xff;
354 tf->lbal = block & 0xff;
355
356 tf->device = 1 << 6;
357 if (tf->flags & ATA_TFLAG_FUA)
358 tf->device |= 1 << 7;
359 } else if (dev->flags & ATA_DFLAG_LBA) {
360 tf->flags |= ATA_TFLAG_LBA;
361
362 if (lba_28_ok(block, n_block)) {
363 /* use LBA28 */
364 tf->device |= (block >> 24) & 0xf;
365 } else if (lba_48_ok(block, n_block)) {
366 if (!(dev->flags & ATA_DFLAG_LBA48))
367 return -ERANGE;
368
369 /* use LBA48 */
370 tf->flags |= ATA_TFLAG_LBA48;
371
372 tf->hob_nsect = (n_block >> 8) & 0xff;
373
374 tf->hob_lbah = (block >> 40) & 0xff;
375 tf->hob_lbam = (block >> 32) & 0xff;
376 tf->hob_lbal = (block >> 24) & 0xff;
377 } else
378 /* request too large even for LBA48 */
379 return -ERANGE;
380
381 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
382 return -EINVAL;
383
384 tf->nsect = n_block & 0xff;
385
386 tf->lbah = (block >> 16) & 0xff;
387 tf->lbam = (block >> 8) & 0xff;
388 tf->lbal = block & 0xff;
389
390 tf->device |= ATA_LBA;
391 } else {
392 /* CHS */
393 u32 sect, head, cyl, track;
394
395 /* The request -may- be too large for CHS addressing. */
396 if (!lba_28_ok(block, n_block))
397 return -ERANGE;
398
399 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
400 return -EINVAL;
401
402 /* Convert LBA to CHS */
403 track = (u32)block / dev->sectors;
404 cyl = track / dev->heads;
405 head = track % dev->heads;
406 sect = (u32)block % dev->sectors + 1;
407
408 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
409 (u32)block, track, cyl, head, sect);
410
411 /* Check whether the converted CHS can fit.
412 Cylinder: 0-65535
413 Head: 0-15
414 Sector: 1-255*/
415 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
416 return -ERANGE;
417
418 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
419 tf->lbal = sect;
420 tf->lbam = cyl;
421 tf->lbah = cyl >> 8;
422 tf->device |= head;
423 }
424
425 return 0;
426}
427
cb95d562
TH
428/**
429 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
430 * @pio_mask: pio_mask
431 * @mwdma_mask: mwdma_mask
432 * @udma_mask: udma_mask
433 *
434 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
435 * unsigned int xfer_mask.
436 *
437 * LOCKING:
438 * None.
439 *
440 * RETURNS:
441 * Packed xfer_mask.
442 */
443static unsigned int ata_pack_xfermask(unsigned int pio_mask,
444 unsigned int mwdma_mask,
445 unsigned int udma_mask)
446{
447 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
448 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
449 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
450}
451
c0489e4e
TH
452/**
453 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
454 * @xfer_mask: xfer_mask to unpack
455 * @pio_mask: resulting pio_mask
456 * @mwdma_mask: resulting mwdma_mask
457 * @udma_mask: resulting udma_mask
458 *
459 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
460 * Any NULL distination masks will be ignored.
461 */
462static void ata_unpack_xfermask(unsigned int xfer_mask,
463 unsigned int *pio_mask,
464 unsigned int *mwdma_mask,
465 unsigned int *udma_mask)
466{
467 if (pio_mask)
468 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
469 if (mwdma_mask)
470 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
471 if (udma_mask)
472 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
473}
474
cb95d562 475static const struct ata_xfer_ent {
be9a50c8 476 int shift, bits;
cb95d562
TH
477 u8 base;
478} ata_xfer_tbl[] = {
479 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
480 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
481 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
482 { -1, },
483};
484
485/**
486 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
487 * @xfer_mask: xfer_mask of interest
488 *
489 * Return matching XFER_* value for @xfer_mask. Only the highest
490 * bit of @xfer_mask is considered.
491 *
492 * LOCKING:
493 * None.
494 *
495 * RETURNS:
496 * Matching XFER_* value, 0 if no match found.
497 */
498static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
499{
500 int highbit = fls(xfer_mask) - 1;
501 const struct ata_xfer_ent *ent;
502
503 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
504 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
505 return ent->base + highbit - ent->shift;
506 return 0;
507}
508
509/**
510 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
511 * @xfer_mode: XFER_* of interest
512 *
513 * Return matching xfer_mask for @xfer_mode.
514 *
515 * LOCKING:
516 * None.
517 *
518 * RETURNS:
519 * Matching xfer_mask, 0 if no match found.
520 */
521static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
522{
523 const struct ata_xfer_ent *ent;
524
525 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
526 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
527 return 1 << (ent->shift + xfer_mode - ent->base);
528 return 0;
529}
530
531/**
532 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
533 * @xfer_mode: XFER_* of interest
534 *
535 * Return matching xfer_shift for @xfer_mode.
536 *
537 * LOCKING:
538 * None.
539 *
540 * RETURNS:
541 * Matching xfer_shift, -1 if no match found.
542 */
543static int ata_xfer_mode2shift(unsigned int xfer_mode)
544{
545 const struct ata_xfer_ent *ent;
546
547 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
548 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
549 return ent->shift;
550 return -1;
551}
552
1da177e4 553/**
1da7b0d0
TH
554 * ata_mode_string - convert xfer_mask to string
555 * @xfer_mask: mask of bits supported; only highest bit counts.
1da177e4
LT
556 *
557 * Determine string which represents the highest speed
1da7b0d0 558 * (highest bit in @modemask).
1da177e4
LT
559 *
560 * LOCKING:
561 * None.
562 *
563 * RETURNS:
564 * Constant C string representing highest speed listed in
1da7b0d0 565 * @mode_mask, or the constant C string "<n/a>".
1da177e4 566 */
1da7b0d0 567static const char *ata_mode_string(unsigned int xfer_mask)
1da177e4 568{
75f554bc
TH
569 static const char * const xfer_mode_str[] = {
570 "PIO0",
571 "PIO1",
572 "PIO2",
573 "PIO3",
574 "PIO4",
b352e57d
AC
575 "PIO5",
576 "PIO6",
75f554bc
TH
577 "MWDMA0",
578 "MWDMA1",
579 "MWDMA2",
b352e57d
AC
580 "MWDMA3",
581 "MWDMA4",
75f554bc
TH
582 "UDMA/16",
583 "UDMA/25",
584 "UDMA/33",
585 "UDMA/44",
586 "UDMA/66",
587 "UDMA/100",
588 "UDMA/133",
589 "UDMA7",
590 };
1da7b0d0 591 int highbit;
1da177e4 592
1da7b0d0
TH
593 highbit = fls(xfer_mask) - 1;
594 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
595 return xfer_mode_str[highbit];
1da177e4 596 return "<n/a>";
1da177e4
LT
597}
598
4c360c81
TH
599static const char *sata_spd_string(unsigned int spd)
600{
601 static const char * const spd_str[] = {
602 "1.5 Gbps",
603 "3.0 Gbps",
604 };
605
606 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
607 return "<unknown>";
608 return spd_str[spd - 1];
609}
610
3373efd8 611void ata_dev_disable(struct ata_device *dev)
0b8efb0a 612{
09d7f9b0 613 if (ata_dev_enabled(dev)) {
9af5c9c9 614 if (ata_msg_drv(dev->link->ap))
09d7f9b0 615 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
4ae72a1e
TH
616 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
617 ATA_DNXFER_QUIET);
0b8efb0a
TH
618 dev->class++;
619 }
620}
621
1da177e4 622/**
0d5ff566 623 * ata_devchk - PATA device presence detection
1da177e4
LT
624 * @ap: ATA channel to examine
625 * @device: Device to examine (starting at zero)
626 *
627 * This technique was originally described in
628 * Hale Landis's ATADRVR (www.ata-atapi.com), and
629 * later found its way into the ATA/ATAPI spec.
630 *
631 * Write a pattern to the ATA shadow registers,
632 * and if a device is present, it will respond by
633 * correctly storing and echoing back the
634 * ATA shadow register contents.
635 *
636 * LOCKING:
637 * caller.
638 */
639
0d5ff566 640static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
1da177e4
LT
641{
642 struct ata_ioports *ioaddr = &ap->ioaddr;
643 u8 nsect, lbal;
644
645 ap->ops->dev_select(ap, device);
646
0d5ff566
TH
647 iowrite8(0x55, ioaddr->nsect_addr);
648 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 649
0d5ff566
TH
650 iowrite8(0xaa, ioaddr->nsect_addr);
651 iowrite8(0x55, ioaddr->lbal_addr);
1da177e4 652
0d5ff566
TH
653 iowrite8(0x55, ioaddr->nsect_addr);
654 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 655
0d5ff566
TH
656 nsect = ioread8(ioaddr->nsect_addr);
657 lbal = ioread8(ioaddr->lbal_addr);
1da177e4
LT
658
659 if ((nsect == 0x55) && (lbal == 0xaa))
660 return 1; /* we found a device */
661
662 return 0; /* nothing found */
663}
664
1da177e4
LT
665/**
666 * ata_dev_classify - determine device type based on ATA-spec signature
667 * @tf: ATA taskfile register set for device to be identified
668 *
669 * Determine from taskfile register contents whether a device is
670 * ATA or ATAPI, as per "Signature and persistence" section
671 * of ATA/PI spec (volume 1, sect 5.14).
672 *
673 * LOCKING:
674 * None.
675 *
676 * RETURNS:
633273a3
TH
677 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or
678 * %ATA_DEV_UNKNOWN the event of failure.
1da177e4 679 */
057ace5e 680unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1da177e4
LT
681{
682 /* Apple's open source Darwin code hints that some devices only
683 * put a proper signature into the LBA mid/high registers,
684 * So, we only check those. It's sufficient for uniqueness.
633273a3
TH
685 *
686 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
687 * signatures for ATA and ATAPI devices attached on SerialATA,
688 * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA
689 * spec has never mentioned about using different signatures
690 * for ATA/ATAPI devices. Then, Serial ATA II: Port
691 * Multiplier specification began to use 0x69/0x96 to identify
692 * port multpliers and 0x3c/0xc3 to identify SEMB device.
693 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
694 * 0x69/0x96 shortly and described them as reserved for
695 * SerialATA.
696 *
697 * We follow the current spec and consider that 0x69/0x96
698 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
1da177e4 699 */
633273a3 700 if ((tf->lbam == 0) && (tf->lbah == 0)) {
1da177e4
LT
701 DPRINTK("found ATA device by sig\n");
702 return ATA_DEV_ATA;
703 }
704
633273a3 705 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1da177e4
LT
706 DPRINTK("found ATAPI device by sig\n");
707 return ATA_DEV_ATAPI;
708 }
709
633273a3
TH
710 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
711 DPRINTK("found PMP device by sig\n");
712 return ATA_DEV_PMP;
713 }
714
715 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
2dcb407e 716 printk(KERN_INFO "ata: SEMB device ignored\n");
633273a3
TH
717 return ATA_DEV_SEMB_UNSUP; /* not yet */
718 }
719
1da177e4
LT
720 DPRINTK("unknown device\n");
721 return ATA_DEV_UNKNOWN;
722}
723
724/**
725 * ata_dev_try_classify - Parse returned ATA device signature
3f19859e
TH
726 * @dev: ATA device to classify (starting at zero)
727 * @present: device seems present
b4dc7623 728 * @r_err: Value of error register on completion
1da177e4
LT
729 *
730 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
731 * an ATA/ATAPI-defined set of values is placed in the ATA
732 * shadow registers, indicating the results of device detection
733 * and diagnostics.
734 *
735 * Select the ATA device, and read the values from the ATA shadow
736 * registers. Then parse according to the Error register value,
737 * and the spec-defined values examined by ata_dev_classify().
738 *
739 * LOCKING:
740 * caller.
b4dc7623
TH
741 *
742 * RETURNS:
743 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
1da177e4 744 */
3f19859e
TH
745unsigned int ata_dev_try_classify(struct ata_device *dev, int present,
746 u8 *r_err)
1da177e4 747{
3f19859e 748 struct ata_port *ap = dev->link->ap;
1da177e4
LT
749 struct ata_taskfile tf;
750 unsigned int class;
751 u8 err;
752
3f19859e 753 ap->ops->dev_select(ap, dev->devno);
1da177e4
LT
754
755 memset(&tf, 0, sizeof(tf));
756
1da177e4 757 ap->ops->tf_read(ap, &tf);
0169e284 758 err = tf.feature;
b4dc7623
TH
759 if (r_err)
760 *r_err = err;
1da177e4 761
93590859 762 /* see if device passed diags: if master then continue and warn later */
3f19859e 763 if (err == 0 && dev->devno == 0)
93590859 764 /* diagnostic fail : do nothing _YET_ */
3f19859e 765 dev->horkage |= ATA_HORKAGE_DIAGNOSTIC;
93590859 766 else if (err == 1)
1da177e4 767 /* do nothing */ ;
3f19859e 768 else if ((dev->devno == 0) && (err == 0x81))
1da177e4
LT
769 /* do nothing */ ;
770 else
b4dc7623 771 return ATA_DEV_NONE;
1da177e4 772
b4dc7623 773 /* determine if device is ATA or ATAPI */
1da177e4 774 class = ata_dev_classify(&tf);
b4dc7623 775
d7fbee05
TH
776 if (class == ATA_DEV_UNKNOWN) {
777 /* If the device failed diagnostic, it's likely to
778 * have reported incorrect device signature too.
779 * Assume ATA device if the device seems present but
780 * device signature is invalid with diagnostic
781 * failure.
782 */
783 if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC))
784 class = ATA_DEV_ATA;
785 else
786 class = ATA_DEV_NONE;
787 } else if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
788 class = ATA_DEV_NONE;
789
b4dc7623 790 return class;
1da177e4
LT
791}
792
793/**
6a62a04d 794 * ata_id_string - Convert IDENTIFY DEVICE page into string
1da177e4
LT
795 * @id: IDENTIFY DEVICE results we will examine
796 * @s: string into which data is output
797 * @ofs: offset into identify device page
798 * @len: length of string to return. must be an even number.
799 *
800 * The strings in the IDENTIFY DEVICE page are broken up into
801 * 16-bit chunks. Run through the string, and output each
802 * 8-bit chunk linearly, regardless of platform.
803 *
804 * LOCKING:
805 * caller.
806 */
807
6a62a04d
TH
808void ata_id_string(const u16 *id, unsigned char *s,
809 unsigned int ofs, unsigned int len)
1da177e4
LT
810{
811 unsigned int c;
812
813 while (len > 0) {
814 c = id[ofs] >> 8;
815 *s = c;
816 s++;
817
818 c = id[ofs] & 0xff;
819 *s = c;
820 s++;
821
822 ofs++;
823 len -= 2;
824 }
825}
826
0e949ff3 827/**
6a62a04d 828 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
0e949ff3
TH
829 * @id: IDENTIFY DEVICE results we will examine
830 * @s: string into which data is output
831 * @ofs: offset into identify device page
832 * @len: length of string to return. must be an odd number.
833 *
6a62a04d 834 * This function is identical to ata_id_string except that it
0e949ff3
TH
835 * trims trailing spaces and terminates the resulting string with
836 * null. @len must be actual maximum length (even number) + 1.
837 *
838 * LOCKING:
839 * caller.
840 */
6a62a04d
TH
841void ata_id_c_string(const u16 *id, unsigned char *s,
842 unsigned int ofs, unsigned int len)
0e949ff3
TH
843{
844 unsigned char *p;
845
846 WARN_ON(!(len & 1));
847
6a62a04d 848 ata_id_string(id, s, ofs, len - 1);
0e949ff3
TH
849
850 p = s + strnlen(s, len - 1);
851 while (p > s && p[-1] == ' ')
852 p--;
853 *p = '\0';
854}
0baab86b 855
db6f8759
TH
856static u64 ata_id_n_sectors(const u16 *id)
857{
858 if (ata_id_has_lba(id)) {
859 if (ata_id_has_lba48(id))
860 return ata_id_u64(id, 100);
861 else
862 return ata_id_u32(id, 60);
863 } else {
864 if (ata_id_current_chs_valid(id))
865 return ata_id_u32(id, 57);
866 else
867 return id[1] * id[3] * id[6];
868 }
869}
870
1e999736
AC
871static u64 ata_tf_to_lba48(struct ata_taskfile *tf)
872{
873 u64 sectors = 0;
874
875 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
876 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
877 sectors |= (tf->hob_lbal & 0xff) << 24;
878 sectors |= (tf->lbah & 0xff) << 16;
879 sectors |= (tf->lbam & 0xff) << 8;
880 sectors |= (tf->lbal & 0xff);
881
882 return ++sectors;
883}
884
885static u64 ata_tf_to_lba(struct ata_taskfile *tf)
886{
887 u64 sectors = 0;
888
889 sectors |= (tf->device & 0x0f) << 24;
890 sectors |= (tf->lbah & 0xff) << 16;
891 sectors |= (tf->lbam & 0xff) << 8;
892 sectors |= (tf->lbal & 0xff);
893
894 return ++sectors;
895}
896
897/**
c728a914
TH
898 * ata_read_native_max_address - Read native max address
899 * @dev: target device
900 * @max_sectors: out parameter for the result native max address
1e999736 901 *
c728a914
TH
902 * Perform an LBA48 or LBA28 native size query upon the device in
903 * question.
1e999736 904 *
c728a914
TH
905 * RETURNS:
906 * 0 on success, -EACCES if command is aborted by the drive.
907 * -EIO on other errors.
1e999736 908 */
c728a914 909static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1e999736 910{
c728a914 911 unsigned int err_mask;
1e999736 912 struct ata_taskfile tf;
c728a914 913 int lba48 = ata_id_has_lba48(dev->id);
1e999736
AC
914
915 ata_tf_init(dev, &tf);
916
c728a914 917 /* always clear all address registers */
1e999736 918 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1e999736 919
c728a914
TH
920 if (lba48) {
921 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
922 tf.flags |= ATA_TFLAG_LBA48;
923 } else
924 tf.command = ATA_CMD_READ_NATIVE_MAX;
1e999736 925
1e999736 926 tf.protocol |= ATA_PROT_NODATA;
c728a914
TH
927 tf.device |= ATA_LBA;
928
2b789108 929 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
c728a914
TH
930 if (err_mask) {
931 ata_dev_printk(dev, KERN_WARNING, "failed to read native "
932 "max address (err_mask=0x%x)\n", err_mask);
933 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
934 return -EACCES;
935 return -EIO;
936 }
1e999736 937
c728a914
TH
938 if (lba48)
939 *max_sectors = ata_tf_to_lba48(&tf);
940 else
941 *max_sectors = ata_tf_to_lba(&tf);
2dcb407e 942 if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
93328e11 943 (*max_sectors)--;
c728a914 944 return 0;
1e999736
AC
945}
946
947/**
c728a914
TH
948 * ata_set_max_sectors - Set max sectors
949 * @dev: target device
6b38d1d1 950 * @new_sectors: new max sectors value to set for the device
1e999736 951 *
c728a914
TH
952 * Set max sectors of @dev to @new_sectors.
953 *
954 * RETURNS:
955 * 0 on success, -EACCES if command is aborted or denied (due to
956 * previous non-volatile SET_MAX) by the drive. -EIO on other
957 * errors.
1e999736 958 */
05027adc 959static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1e999736 960{
c728a914 961 unsigned int err_mask;
1e999736 962 struct ata_taskfile tf;
c728a914 963 int lba48 = ata_id_has_lba48(dev->id);
1e999736
AC
964
965 new_sectors--;
966
967 ata_tf_init(dev, &tf);
968
1e999736 969 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
c728a914
TH
970
971 if (lba48) {
972 tf.command = ATA_CMD_SET_MAX_EXT;
973 tf.flags |= ATA_TFLAG_LBA48;
974
975 tf.hob_lbal = (new_sectors >> 24) & 0xff;
976 tf.hob_lbam = (new_sectors >> 32) & 0xff;
977 tf.hob_lbah = (new_sectors >> 40) & 0xff;
1e582ba4 978 } else {
c728a914
TH
979 tf.command = ATA_CMD_SET_MAX;
980
1e582ba4
TH
981 tf.device |= (new_sectors >> 24) & 0xf;
982 }
983
1e999736 984 tf.protocol |= ATA_PROT_NODATA;
c728a914 985 tf.device |= ATA_LBA;
1e999736
AC
986
987 tf.lbal = (new_sectors >> 0) & 0xff;
988 tf.lbam = (new_sectors >> 8) & 0xff;
989 tf.lbah = (new_sectors >> 16) & 0xff;
1e999736 990
2b789108 991 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
c728a914
TH
992 if (err_mask) {
993 ata_dev_printk(dev, KERN_WARNING, "failed to set "
994 "max address (err_mask=0x%x)\n", err_mask);
995 if (err_mask == AC_ERR_DEV &&
996 (tf.feature & (ATA_ABORTED | ATA_IDNF)))
997 return -EACCES;
998 return -EIO;
999 }
1000
c728a914 1001 return 0;
1e999736
AC
1002}
1003
1004/**
1005 * ata_hpa_resize - Resize a device with an HPA set
1006 * @dev: Device to resize
1007 *
1008 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
1009 * it if required to the full size of the media. The caller must check
1010 * the drive has the HPA feature set enabled.
05027adc
TH
1011 *
1012 * RETURNS:
1013 * 0 on success, -errno on failure.
1e999736 1014 */
05027adc 1015static int ata_hpa_resize(struct ata_device *dev)
1e999736 1016{
05027adc
TH
1017 struct ata_eh_context *ehc = &dev->link->eh_context;
1018 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1019 u64 sectors = ata_id_n_sectors(dev->id);
1020 u64 native_sectors;
c728a914 1021 int rc;
a617c09f 1022
05027adc
TH
1023 /* do we need to do it? */
1024 if (dev->class != ATA_DEV_ATA ||
1025 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1026 (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
c728a914 1027 return 0;
1e999736 1028
05027adc
TH
1029 /* read native max address */
1030 rc = ata_read_native_max_address(dev, &native_sectors);
1031 if (rc) {
1032 /* If HPA isn't going to be unlocked, skip HPA
1033 * resizing from the next try.
1034 */
1035 if (!ata_ignore_hpa) {
1036 ata_dev_printk(dev, KERN_WARNING, "HPA support seems "
1037 "broken, will skip HPA handling\n");
1038 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1039
1040 /* we can continue if device aborted the command */
1041 if (rc == -EACCES)
1042 rc = 0;
1e999736 1043 }
37301a55 1044
05027adc
TH
1045 return rc;
1046 }
1047
1048 /* nothing to do? */
1049 if (native_sectors <= sectors || !ata_ignore_hpa) {
1050 if (!print_info || native_sectors == sectors)
1051 return 0;
1052
1053 if (native_sectors > sectors)
1054 ata_dev_printk(dev, KERN_INFO,
1055 "HPA detected: current %llu, native %llu\n",
1056 (unsigned long long)sectors,
1057 (unsigned long long)native_sectors);
1058 else if (native_sectors < sectors)
1059 ata_dev_printk(dev, KERN_WARNING,
1060 "native sectors (%llu) is smaller than "
1061 "sectors (%llu)\n",
1062 (unsigned long long)native_sectors,
1063 (unsigned long long)sectors);
1064 return 0;
1065 }
1066
1067 /* let's unlock HPA */
1068 rc = ata_set_max_sectors(dev, native_sectors);
1069 if (rc == -EACCES) {
1070 /* if device aborted the command, skip HPA resizing */
1071 ata_dev_printk(dev, KERN_WARNING, "device aborted resize "
1072 "(%llu -> %llu), skipping HPA handling\n",
1073 (unsigned long long)sectors,
1074 (unsigned long long)native_sectors);
1075 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1076 return 0;
1077 } else if (rc)
1078 return rc;
1079
1080 /* re-read IDENTIFY data */
1081 rc = ata_dev_reread_id(dev, 0);
1082 if (rc) {
1083 ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY "
1084 "data after HPA resizing\n");
1085 return rc;
1086 }
1087
1088 if (print_info) {
1089 u64 new_sectors = ata_id_n_sectors(dev->id);
1090 ata_dev_printk(dev, KERN_INFO,
1091 "HPA unlocked: %llu -> %llu, native %llu\n",
1092 (unsigned long long)sectors,
1093 (unsigned long long)new_sectors,
1094 (unsigned long long)native_sectors);
1095 }
1096
1097 return 0;
1e999736
AC
1098}
1099
10305f0f
A
1100/**
1101 * ata_id_to_dma_mode - Identify DMA mode from id block
1102 * @dev: device to identify
cc261267 1103 * @unknown: mode to assume if we cannot tell
10305f0f
A
1104 *
1105 * Set up the timing values for the device based upon the identify
1106 * reported values for the DMA mode. This function is used by drivers
1107 * which rely upon firmware configured modes, but wish to report the
1108 * mode correctly when possible.
1109 *
1110 * In addition we emit similarly formatted messages to the default
1111 * ata_dev_set_mode handler, in order to provide consistency of
1112 * presentation.
1113 */
1114
1115void ata_id_to_dma_mode(struct ata_device *dev, u8 unknown)
1116{
1117 unsigned int mask;
1118 u8 mode;
1119
1120 /* Pack the DMA modes */
1121 mask = ((dev->id[63] >> 8) << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA;
1122 if (dev->id[53] & 0x04)
1123 mask |= ((dev->id[88] >> 8) << ATA_SHIFT_UDMA) & ATA_MASK_UDMA;
1124
1125 /* Select the mode in use */
1126 mode = ata_xfer_mask2mode(mask);
1127
1128 if (mode != 0) {
1129 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
1130 ata_mode_string(mask));
1131 } else {
1132 /* SWDMA perhaps ? */
1133 mode = unknown;
1134 ata_dev_printk(dev, KERN_INFO, "configured for DMA\n");
1135 }
1136
1137 /* Configure the device reporting */
1138 dev->xfer_mode = mode;
1139 dev->xfer_shift = ata_xfer_mode2shift(mode);
1140}
1141
0baab86b
EF
1142/**
1143 * ata_noop_dev_select - Select device 0/1 on ATA bus
1144 * @ap: ATA channel to manipulate
1145 * @device: ATA device (numbered from zero) to select
1146 *
1147 * This function performs no actual function.
1148 *
1149 * May be used as the dev_select() entry in ata_port_operations.
1150 *
1151 * LOCKING:
1152 * caller.
1153 */
2dcb407e 1154void ata_noop_dev_select(struct ata_port *ap, unsigned int device)
1da177e4
LT
1155{
1156}
1157
0baab86b 1158
1da177e4
LT
1159/**
1160 * ata_std_dev_select - Select device 0/1 on ATA bus
1161 * @ap: ATA channel to manipulate
1162 * @device: ATA device (numbered from zero) to select
1163 *
1164 * Use the method defined in the ATA specification to
1165 * make either device 0, or device 1, active on the
0baab86b
EF
1166 * ATA channel. Works with both PIO and MMIO.
1167 *
1168 * May be used as the dev_select() entry in ata_port_operations.
1da177e4
LT
1169 *
1170 * LOCKING:
1171 * caller.
1172 */
1173
2dcb407e 1174void ata_std_dev_select(struct ata_port *ap, unsigned int device)
1da177e4
LT
1175{
1176 u8 tmp;
1177
1178 if (device == 0)
1179 tmp = ATA_DEVICE_OBS;
1180 else
1181 tmp = ATA_DEVICE_OBS | ATA_DEV1;
1182
0d5ff566 1183 iowrite8(tmp, ap->ioaddr.device_addr);
1da177e4
LT
1184 ata_pause(ap); /* needed; also flushes, for mmio */
1185}
1186
1187/**
1188 * ata_dev_select - Select device 0/1 on ATA bus
1189 * @ap: ATA channel to manipulate
1190 * @device: ATA device (numbered from zero) to select
1191 * @wait: non-zero to wait for Status register BSY bit to clear
1192 * @can_sleep: non-zero if context allows sleeping
1193 *
1194 * Use the method defined in the ATA specification to
1195 * make either device 0, or device 1, active on the
1196 * ATA channel.
1197 *
1198 * This is a high-level version of ata_std_dev_select(),
1199 * which additionally provides the services of inserting
1200 * the proper pauses and status polling, where needed.
1201 *
1202 * LOCKING:
1203 * caller.
1204 */
1205
1206void ata_dev_select(struct ata_port *ap, unsigned int device,
1207 unsigned int wait, unsigned int can_sleep)
1208{
88574551 1209 if (ata_msg_probe(ap))
44877b4e
TH
1210 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
1211 "device %u, wait %u\n", device, wait);
1da177e4
LT
1212
1213 if (wait)
1214 ata_wait_idle(ap);
1215
1216 ap->ops->dev_select(ap, device);
1217
1218 if (wait) {
9af5c9c9 1219 if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI)
1da177e4
LT
1220 msleep(150);
1221 ata_wait_idle(ap);
1222 }
1223}
1224
1225/**
1226 * ata_dump_id - IDENTIFY DEVICE info debugging output
0bd3300a 1227 * @id: IDENTIFY DEVICE page to dump
1da177e4 1228 *
0bd3300a
TH
1229 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1230 * page.
1da177e4
LT
1231 *
1232 * LOCKING:
1233 * caller.
1234 */
1235
0bd3300a 1236static inline void ata_dump_id(const u16 *id)
1da177e4
LT
1237{
1238 DPRINTK("49==0x%04x "
1239 "53==0x%04x "
1240 "63==0x%04x "
1241 "64==0x%04x "
1242 "75==0x%04x \n",
0bd3300a
TH
1243 id[49],
1244 id[53],
1245 id[63],
1246 id[64],
1247 id[75]);
1da177e4
LT
1248 DPRINTK("80==0x%04x "
1249 "81==0x%04x "
1250 "82==0x%04x "
1251 "83==0x%04x "
1252 "84==0x%04x \n",
0bd3300a
TH
1253 id[80],
1254 id[81],
1255 id[82],
1256 id[83],
1257 id[84]);
1da177e4
LT
1258 DPRINTK("88==0x%04x "
1259 "93==0x%04x\n",
0bd3300a
TH
1260 id[88],
1261 id[93]);
1da177e4
LT
1262}
1263
cb95d562
TH
1264/**
1265 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1266 * @id: IDENTIFY data to compute xfer mask from
1267 *
1268 * Compute the xfermask for this device. This is not as trivial
1269 * as it seems if we must consider early devices correctly.
1270 *
1271 * FIXME: pre IDE drive timing (do we care ?).
1272 *
1273 * LOCKING:
1274 * None.
1275 *
1276 * RETURNS:
1277 * Computed xfermask
1278 */
1279static unsigned int ata_id_xfermask(const u16 *id)
1280{
1281 unsigned int pio_mask, mwdma_mask, udma_mask;
1282
1283 /* Usual case. Word 53 indicates word 64 is valid */
1284 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1285 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1286 pio_mask <<= 3;
1287 pio_mask |= 0x7;
1288 } else {
1289 /* If word 64 isn't valid then Word 51 high byte holds
1290 * the PIO timing number for the maximum. Turn it into
1291 * a mask.
1292 */
7a0f1c8a 1293 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
46767aeb 1294 if (mode < 5) /* Valid PIO range */
2dcb407e 1295 pio_mask = (2 << mode) - 1;
46767aeb
AC
1296 else
1297 pio_mask = 1;
cb95d562
TH
1298
1299 /* But wait.. there's more. Design your standards by
1300 * committee and you too can get a free iordy field to
1301 * process. However its the speeds not the modes that
1302 * are supported... Note drivers using the timing API
1303 * will get this right anyway
1304 */
1305 }
1306
1307 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
fb21f0d0 1308
b352e57d
AC
1309 if (ata_id_is_cfa(id)) {
1310 /*
1311 * Process compact flash extended modes
1312 */
1313 int pio = id[163] & 0x7;
1314 int dma = (id[163] >> 3) & 7;
1315
1316 if (pio)
1317 pio_mask |= (1 << 5);
1318 if (pio > 1)
1319 pio_mask |= (1 << 6);
1320 if (dma)
1321 mwdma_mask |= (1 << 3);
1322 if (dma > 1)
1323 mwdma_mask |= (1 << 4);
1324 }
1325
fb21f0d0
TH
1326 udma_mask = 0;
1327 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1328 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
cb95d562
TH
1329
1330 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1331}
1332
86e45b6b
TH
1333/**
1334 * ata_port_queue_task - Queue port_task
1335 * @ap: The ata_port to queue port_task for
e2a7f77a 1336 * @fn: workqueue function to be scheduled
65f27f38 1337 * @data: data for @fn to use
e2a7f77a 1338 * @delay: delay time for workqueue function
86e45b6b
TH
1339 *
1340 * Schedule @fn(@data) for execution after @delay jiffies using
1341 * port_task. There is one port_task per port and it's the
1342 * user(low level driver)'s responsibility to make sure that only
1343 * one task is active at any given time.
1344 *
1345 * libata core layer takes care of synchronization between
1346 * port_task and EH. ata_port_queue_task() may be ignored for EH
1347 * synchronization.
1348 *
1349 * LOCKING:
1350 * Inherited from caller.
1351 */
65f27f38 1352void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
86e45b6b
TH
1353 unsigned long delay)
1354{
65f27f38
DH
1355 PREPARE_DELAYED_WORK(&ap->port_task, fn);
1356 ap->port_task_data = data;
86e45b6b 1357
45a66c1c
ON
1358 /* may fail if ata_port_flush_task() in progress */
1359 queue_delayed_work(ata_wq, &ap->port_task, delay);
86e45b6b
TH
1360}
1361
1362/**
1363 * ata_port_flush_task - Flush port_task
1364 * @ap: The ata_port to flush port_task for
1365 *
1366 * After this function completes, port_task is guranteed not to
1367 * be running or scheduled.
1368 *
1369 * LOCKING:
1370 * Kernel thread context (may sleep)
1371 */
1372void ata_port_flush_task(struct ata_port *ap)
1373{
86e45b6b
TH
1374 DPRINTK("ENTER\n");
1375
45a66c1c 1376 cancel_rearming_delayed_work(&ap->port_task);
86e45b6b 1377
0dd4b21f
BP
1378 if (ata_msg_ctl(ap))
1379 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
86e45b6b
TH
1380}
1381
7102d230 1382static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
a2a7a662 1383{
77853bf2 1384 struct completion *waiting = qc->private_data;
a2a7a662 1385
a2a7a662 1386 complete(waiting);
a2a7a662
TH
1387}
1388
1389/**
2432697b 1390 * ata_exec_internal_sg - execute libata internal command
a2a7a662
TH
1391 * @dev: Device to which the command is sent
1392 * @tf: Taskfile registers for the command and the result
d69cf37d 1393 * @cdb: CDB for packet command
a2a7a662 1394 * @dma_dir: Data tranfer direction of the command
5c1ad8b3 1395 * @sgl: sg list for the data buffer of the command
2432697b 1396 * @n_elem: Number of sg entries
2b789108 1397 * @timeout: Timeout in msecs (0 for default)
a2a7a662
TH
1398 *
1399 * Executes libata internal command with timeout. @tf contains
1400 * command on entry and result on return. Timeout and error
1401 * conditions are reported via return value. No recovery action
1402 * is taken after a command times out. It's caller's duty to
1403 * clean up after timeout.
1404 *
1405 * LOCKING:
1406 * None. Should be called with kernel context, might sleep.
551e8889
TH
1407 *
1408 * RETURNS:
1409 * Zero on success, AC_ERR_* mask on failure
a2a7a662 1410 */
2432697b
TH
1411unsigned ata_exec_internal_sg(struct ata_device *dev,
1412 struct ata_taskfile *tf, const u8 *cdb,
87260216 1413 int dma_dir, struct scatterlist *sgl,
2b789108 1414 unsigned int n_elem, unsigned long timeout)
a2a7a662 1415{
9af5c9c9
TH
1416 struct ata_link *link = dev->link;
1417 struct ata_port *ap = link->ap;
a2a7a662
TH
1418 u8 command = tf->command;
1419 struct ata_queued_cmd *qc;
2ab7db1f 1420 unsigned int tag, preempted_tag;
dedaf2b0 1421 u32 preempted_sactive, preempted_qc_active;
da917d69 1422 int preempted_nr_active_links;
60be6b9a 1423 DECLARE_COMPLETION_ONSTACK(wait);
a2a7a662 1424 unsigned long flags;
77853bf2 1425 unsigned int err_mask;
d95a717f 1426 int rc;
a2a7a662 1427
ba6a1308 1428 spin_lock_irqsave(ap->lock, flags);
a2a7a662 1429
e3180499 1430 /* no internal command while frozen */
b51e9e5d 1431 if (ap->pflags & ATA_PFLAG_FROZEN) {
ba6a1308 1432 spin_unlock_irqrestore(ap->lock, flags);
e3180499
TH
1433 return AC_ERR_SYSTEM;
1434 }
1435
2ab7db1f 1436 /* initialize internal qc */
a2a7a662 1437
2ab7db1f
TH
1438 /* XXX: Tag 0 is used for drivers with legacy EH as some
1439 * drivers choke if any other tag is given. This breaks
1440 * ata_tag_internal() test for those drivers. Don't use new
1441 * EH stuff without converting to it.
1442 */
1443 if (ap->ops->error_handler)
1444 tag = ATA_TAG_INTERNAL;
1445 else
1446 tag = 0;
1447
6cec4a39 1448 if (test_and_set_bit(tag, &ap->qc_allocated))
2ab7db1f 1449 BUG();
f69499f4 1450 qc = __ata_qc_from_tag(ap, tag);
2ab7db1f
TH
1451
1452 qc->tag = tag;
1453 qc->scsicmd = NULL;
1454 qc->ap = ap;
1455 qc->dev = dev;
1456 ata_qc_reinit(qc);
1457
9af5c9c9
TH
1458 preempted_tag = link->active_tag;
1459 preempted_sactive = link->sactive;
dedaf2b0 1460 preempted_qc_active = ap->qc_active;
da917d69 1461 preempted_nr_active_links = ap->nr_active_links;
9af5c9c9
TH
1462 link->active_tag = ATA_TAG_POISON;
1463 link->sactive = 0;
dedaf2b0 1464 ap->qc_active = 0;
da917d69 1465 ap->nr_active_links = 0;
2ab7db1f
TH
1466
1467 /* prepare & issue qc */
a2a7a662 1468 qc->tf = *tf;
d69cf37d
TH
1469 if (cdb)
1470 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
e61e0672 1471 qc->flags |= ATA_QCFLAG_RESULT_TF;
a2a7a662
TH
1472 qc->dma_dir = dma_dir;
1473 if (dma_dir != DMA_NONE) {
2432697b 1474 unsigned int i, buflen = 0;
87260216 1475 struct scatterlist *sg;
2432697b 1476
87260216
JA
1477 for_each_sg(sgl, sg, n_elem, i)
1478 buflen += sg->length;
2432697b 1479
87260216 1480 ata_sg_init(qc, sgl, n_elem);
49c80429 1481 qc->nbytes = buflen;
a2a7a662
TH
1482 }
1483
77853bf2 1484 qc->private_data = &wait;
a2a7a662
TH
1485 qc->complete_fn = ata_qc_complete_internal;
1486
8e0e694a 1487 ata_qc_issue(qc);
a2a7a662 1488
ba6a1308 1489 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662 1490
2b789108
TH
1491 if (!timeout)
1492 timeout = ata_probe_timeout * 1000 / HZ;
1493
1494 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
d95a717f
TH
1495
1496 ata_port_flush_task(ap);
41ade50c 1497
d95a717f 1498 if (!rc) {
ba6a1308 1499 spin_lock_irqsave(ap->lock, flags);
a2a7a662
TH
1500
1501 /* We're racing with irq here. If we lose, the
1502 * following test prevents us from completing the qc
d95a717f
TH
1503 * twice. If we win, the port is frozen and will be
1504 * cleaned up by ->post_internal_cmd().
a2a7a662 1505 */
77853bf2 1506 if (qc->flags & ATA_QCFLAG_ACTIVE) {
d95a717f
TH
1507 qc->err_mask |= AC_ERR_TIMEOUT;
1508
1509 if (ap->ops->error_handler)
1510 ata_port_freeze(ap);
1511 else
1512 ata_qc_complete(qc);
f15a1daf 1513
0dd4b21f
BP
1514 if (ata_msg_warn(ap))
1515 ata_dev_printk(dev, KERN_WARNING,
88574551 1516 "qc timeout (cmd 0x%x)\n", command);
a2a7a662
TH
1517 }
1518
ba6a1308 1519 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662
TH
1520 }
1521
d95a717f
TH
1522 /* do post_internal_cmd */
1523 if (ap->ops->post_internal_cmd)
1524 ap->ops->post_internal_cmd(qc);
1525
a51d644a
TH
1526 /* perform minimal error analysis */
1527 if (qc->flags & ATA_QCFLAG_FAILED) {
1528 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1529 qc->err_mask |= AC_ERR_DEV;
1530
1531 if (!qc->err_mask)
1532 qc->err_mask |= AC_ERR_OTHER;
1533
1534 if (qc->err_mask & ~AC_ERR_OTHER)
1535 qc->err_mask &= ~AC_ERR_OTHER;
d95a717f
TH
1536 }
1537
15869303 1538 /* finish up */
ba6a1308 1539 spin_lock_irqsave(ap->lock, flags);
15869303 1540
e61e0672 1541 *tf = qc->result_tf;
77853bf2
TH
1542 err_mask = qc->err_mask;
1543
1544 ata_qc_free(qc);
9af5c9c9
TH
1545 link->active_tag = preempted_tag;
1546 link->sactive = preempted_sactive;
dedaf2b0 1547 ap->qc_active = preempted_qc_active;
da917d69 1548 ap->nr_active_links = preempted_nr_active_links;
77853bf2 1549
1f7dd3e9
TH
1550 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1551 * Until those drivers are fixed, we detect the condition
1552 * here, fail the command with AC_ERR_SYSTEM and reenable the
1553 * port.
1554 *
1555 * Note that this doesn't change any behavior as internal
1556 * command failure results in disabling the device in the
1557 * higher layer for LLDDs without new reset/EH callbacks.
1558 *
1559 * Kill the following code as soon as those drivers are fixed.
1560 */
198e0fed 1561 if (ap->flags & ATA_FLAG_DISABLED) {
1f7dd3e9
TH
1562 err_mask |= AC_ERR_SYSTEM;
1563 ata_port_probe(ap);
1564 }
1565
ba6a1308 1566 spin_unlock_irqrestore(ap->lock, flags);
15869303 1567
77853bf2 1568 return err_mask;
a2a7a662
TH
1569}
1570
2432697b 1571/**
33480a0e 1572 * ata_exec_internal - execute libata internal command
2432697b
TH
1573 * @dev: Device to which the command is sent
1574 * @tf: Taskfile registers for the command and the result
1575 * @cdb: CDB for packet command
1576 * @dma_dir: Data tranfer direction of the command
1577 * @buf: Data buffer of the command
1578 * @buflen: Length of data buffer
2b789108 1579 * @timeout: Timeout in msecs (0 for default)
2432697b
TH
1580 *
1581 * Wrapper around ata_exec_internal_sg() which takes simple
1582 * buffer instead of sg list.
1583 *
1584 * LOCKING:
1585 * None. Should be called with kernel context, might sleep.
1586 *
1587 * RETURNS:
1588 * Zero on success, AC_ERR_* mask on failure
1589 */
1590unsigned ata_exec_internal(struct ata_device *dev,
1591 struct ata_taskfile *tf, const u8 *cdb,
2b789108
TH
1592 int dma_dir, void *buf, unsigned int buflen,
1593 unsigned long timeout)
2432697b 1594{
33480a0e
TH
1595 struct scatterlist *psg = NULL, sg;
1596 unsigned int n_elem = 0;
2432697b 1597
33480a0e
TH
1598 if (dma_dir != DMA_NONE) {
1599 WARN_ON(!buf);
1600 sg_init_one(&sg, buf, buflen);
1601 psg = &sg;
1602 n_elem++;
1603 }
2432697b 1604
2b789108
TH
1605 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1606 timeout);
2432697b
TH
1607}
1608
977e6b9f
TH
1609/**
1610 * ata_do_simple_cmd - execute simple internal command
1611 * @dev: Device to which the command is sent
1612 * @cmd: Opcode to execute
1613 *
1614 * Execute a 'simple' command, that only consists of the opcode
1615 * 'cmd' itself, without filling any other registers
1616 *
1617 * LOCKING:
1618 * Kernel thread context (may sleep).
1619 *
1620 * RETURNS:
1621 * Zero on success, AC_ERR_* mask on failure
e58eb583 1622 */
77b08fb5 1623unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
e58eb583
TH
1624{
1625 struct ata_taskfile tf;
e58eb583
TH
1626
1627 ata_tf_init(dev, &tf);
1628
1629 tf.command = cmd;
1630 tf.flags |= ATA_TFLAG_DEVICE;
1631 tf.protocol = ATA_PROT_NODATA;
1632
2b789108 1633 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
e58eb583
TH
1634}
1635
1bc4ccff
AC
1636/**
1637 * ata_pio_need_iordy - check if iordy needed
1638 * @adev: ATA device
1639 *
1640 * Check if the current speed of the device requires IORDY. Used
1641 * by various controllers for chip configuration.
1642 */
a617c09f 1643
1bc4ccff
AC
1644unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1645{
432729f0
AC
1646 /* Controller doesn't support IORDY. Probably a pointless check
1647 as the caller should know this */
9af5c9c9 1648 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1bc4ccff 1649 return 0;
432729f0
AC
1650 /* PIO3 and higher it is mandatory */
1651 if (adev->pio_mode > XFER_PIO_2)
1652 return 1;
1653 /* We turn it on when possible */
1654 if (ata_id_has_iordy(adev->id))
1bc4ccff 1655 return 1;
432729f0
AC
1656 return 0;
1657}
2e9edbf8 1658
432729f0
AC
1659/**
1660 * ata_pio_mask_no_iordy - Return the non IORDY mask
1661 * @adev: ATA device
1662 *
1663 * Compute the highest mode possible if we are not using iordy. Return
1664 * -1 if no iordy mode is available.
1665 */
a617c09f 1666
432729f0
AC
1667static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1668{
1bc4ccff 1669 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1bc4ccff 1670 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
432729f0 1671 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1bc4ccff
AC
1672 /* Is the speed faster than the drive allows non IORDY ? */
1673 if (pio) {
1674 /* This is cycle times not frequency - watch the logic! */
1675 if (pio > 240) /* PIO2 is 240nS per cycle */
432729f0
AC
1676 return 3 << ATA_SHIFT_PIO;
1677 return 7 << ATA_SHIFT_PIO;
1bc4ccff
AC
1678 }
1679 }
432729f0 1680 return 3 << ATA_SHIFT_PIO;
1bc4ccff
AC
1681}
1682
1da177e4 1683/**
49016aca 1684 * ata_dev_read_id - Read ID data from the specified device
49016aca
TH
1685 * @dev: target device
1686 * @p_class: pointer to class of the target device (may be changed)
bff04647 1687 * @flags: ATA_READID_* flags
fe635c7e 1688 * @id: buffer to read IDENTIFY data into
1da177e4 1689 *
49016aca
TH
1690 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1691 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
aec5c3c1
TH
1692 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1693 * for pre-ATA4 drives.
1da177e4 1694 *
50a99018 1695 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right
2dcb407e 1696 * now we abort if we hit that case.
50a99018 1697 *
1da177e4 1698 * LOCKING:
49016aca
TH
1699 * Kernel thread context (may sleep)
1700 *
1701 * RETURNS:
1702 * 0 on success, -errno otherwise.
1da177e4 1703 */
a9beec95 1704int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
bff04647 1705 unsigned int flags, u16 *id)
1da177e4 1706{
9af5c9c9 1707 struct ata_port *ap = dev->link->ap;
49016aca 1708 unsigned int class = *p_class;
a0123703 1709 struct ata_taskfile tf;
49016aca
TH
1710 unsigned int err_mask = 0;
1711 const char *reason;
54936f8b 1712 int may_fallback = 1, tried_spinup = 0;
49016aca 1713 int rc;
1da177e4 1714
0dd4b21f 1715 if (ata_msg_ctl(ap))
44877b4e 1716 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1da177e4 1717
49016aca 1718 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
49016aca 1719 retry:
3373efd8 1720 ata_tf_init(dev, &tf);
a0123703 1721
49016aca
TH
1722 switch (class) {
1723 case ATA_DEV_ATA:
a0123703 1724 tf.command = ATA_CMD_ID_ATA;
49016aca
TH
1725 break;
1726 case ATA_DEV_ATAPI:
a0123703 1727 tf.command = ATA_CMD_ID_ATAPI;
49016aca
TH
1728 break;
1729 default:
1730 rc = -ENODEV;
1731 reason = "unsupported class";
1732 goto err_out;
1da177e4
LT
1733 }
1734
a0123703 1735 tf.protocol = ATA_PROT_PIO;
81afe893
TH
1736
1737 /* Some devices choke if TF registers contain garbage. Make
1738 * sure those are properly initialized.
1739 */
1740 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1741
1742 /* Device presence detection is unreliable on some
1743 * controllers. Always poll IDENTIFY if available.
1744 */
1745 tf.flags |= ATA_TFLAG_POLLING;
1da177e4 1746
3373efd8 1747 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
2b789108 1748 id, sizeof(id[0]) * ATA_ID_WORDS, 0);
a0123703 1749 if (err_mask) {
800b3996 1750 if (err_mask & AC_ERR_NODEV_HINT) {
55a8e2c8 1751 DPRINTK("ata%u.%d: NODEV after polling detection\n",
44877b4e 1752 ap->print_id, dev->devno);
55a8e2c8
TH
1753 return -ENOENT;
1754 }
1755
54936f8b
TH
1756 /* Device or controller might have reported the wrong
1757 * device class. Give a shot at the other IDENTIFY if
1758 * the current one is aborted by the device.
1759 */
1760 if (may_fallback &&
1761 (err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1762 may_fallback = 0;
1763
1764 if (class == ATA_DEV_ATA)
1765 class = ATA_DEV_ATAPI;
1766 else
1767 class = ATA_DEV_ATA;
1768 goto retry;
1769 }
1770
49016aca
TH
1771 rc = -EIO;
1772 reason = "I/O error";
1da177e4
LT
1773 goto err_out;
1774 }
1775
54936f8b
TH
1776 /* Falling back doesn't make sense if ID data was read
1777 * successfully at least once.
1778 */
1779 may_fallback = 0;
1780
49016aca 1781 swap_buf_le16(id, ATA_ID_WORDS);
1da177e4 1782
49016aca 1783 /* sanity check */
a4f5749b 1784 rc = -EINVAL;
6070068b 1785 reason = "device reports invalid type";
a4f5749b
TH
1786
1787 if (class == ATA_DEV_ATA) {
1788 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1789 goto err_out;
1790 } else {
1791 if (ata_id_is_ata(id))
1792 goto err_out;
49016aca
TH
1793 }
1794
169439c2
ML
1795 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1796 tried_spinup = 1;
1797 /*
1798 * Drive powered-up in standby mode, and requires a specific
1799 * SET_FEATURES spin-up subcommand before it will accept
1800 * anything other than the original IDENTIFY command.
1801 */
1802 ata_tf_init(dev, &tf);
1803 tf.command = ATA_CMD_SET_FEATURES;
1804 tf.feature = SETFEATURES_SPINUP;
1805 tf.protocol = ATA_PROT_NODATA;
1806 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2b789108
TH
1807 err_mask = ata_exec_internal(dev, &tf, NULL,
1808 DMA_NONE, NULL, 0, 0);
fb0582f9 1809 if (err_mask && id[2] != 0x738c) {
169439c2
ML
1810 rc = -EIO;
1811 reason = "SPINUP failed";
1812 goto err_out;
1813 }
1814 /*
1815 * If the drive initially returned incomplete IDENTIFY info,
1816 * we now must reissue the IDENTIFY command.
1817 */
1818 if (id[2] == 0x37c8)
1819 goto retry;
1820 }
1821
bff04647 1822 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
49016aca
TH
1823 /*
1824 * The exact sequence expected by certain pre-ATA4 drives is:
1825 * SRST RESET
50a99018
AC
1826 * IDENTIFY (optional in early ATA)
1827 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
49016aca
TH
1828 * anything else..
1829 * Some drives were very specific about that exact sequence.
50a99018
AC
1830 *
1831 * Note that ATA4 says lba is mandatory so the second check
1832 * shoud never trigger.
49016aca
TH
1833 */
1834 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
3373efd8 1835 err_mask = ata_dev_init_params(dev, id[3], id[6]);
49016aca
TH
1836 if (err_mask) {
1837 rc = -EIO;
1838 reason = "INIT_DEV_PARAMS failed";
1839 goto err_out;
1840 }
1841
1842 /* current CHS translation info (id[53-58]) might be
1843 * changed. reread the identify device info.
1844 */
bff04647 1845 flags &= ~ATA_READID_POSTRESET;
49016aca
TH
1846 goto retry;
1847 }
1848 }
1849
1850 *p_class = class;
fe635c7e 1851
49016aca
TH
1852 return 0;
1853
1854 err_out:
88574551 1855 if (ata_msg_warn(ap))
0dd4b21f 1856 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
88574551 1857 "(%s, err_mask=0x%x)\n", reason, err_mask);
49016aca
TH
1858 return rc;
1859}
1860
3373efd8 1861static inline u8 ata_dev_knobble(struct ata_device *dev)
4b2f3ede 1862{
9af5c9c9
TH
1863 struct ata_port *ap = dev->link->ap;
1864 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
4b2f3ede
TH
1865}
1866
a6e6ce8e
TH
1867static void ata_dev_config_ncq(struct ata_device *dev,
1868 char *desc, size_t desc_sz)
1869{
9af5c9c9 1870 struct ata_port *ap = dev->link->ap;
a6e6ce8e
TH
1871 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
1872
1873 if (!ata_id_has_ncq(dev->id)) {
1874 desc[0] = '\0';
1875 return;
1876 }
75683fe7 1877 if (dev->horkage & ATA_HORKAGE_NONCQ) {
6919a0a6
AC
1878 snprintf(desc, desc_sz, "NCQ (not used)");
1879 return;
1880 }
a6e6ce8e 1881 if (ap->flags & ATA_FLAG_NCQ) {
cca3974e 1882 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
a6e6ce8e
TH
1883 dev->flags |= ATA_DFLAG_NCQ;
1884 }
1885
1886 if (hdepth >= ddepth)
1887 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
1888 else
1889 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
1890}
1891
49016aca 1892/**
ffeae418 1893 * ata_dev_configure - Configure the specified ATA/ATAPI device
ffeae418
TH
1894 * @dev: Target device to configure
1895 *
1896 * Configure @dev according to @dev->id. Generic and low-level
1897 * driver specific fixups are also applied.
49016aca
TH
1898 *
1899 * LOCKING:
ffeae418
TH
1900 * Kernel thread context (may sleep)
1901 *
1902 * RETURNS:
1903 * 0 on success, -errno otherwise
49016aca 1904 */
efdaedc4 1905int ata_dev_configure(struct ata_device *dev)
49016aca 1906{
9af5c9c9
TH
1907 struct ata_port *ap = dev->link->ap;
1908 struct ata_eh_context *ehc = &dev->link->eh_context;
6746544c 1909 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1148c3a7 1910 const u16 *id = dev->id;
ff8854b2 1911 unsigned int xfer_mask;
b352e57d 1912 char revbuf[7]; /* XYZ-99\0 */
3f64f565
EM
1913 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
1914 char modelbuf[ATA_ID_PROD_LEN+1];
e6d902a3 1915 int rc;
49016aca 1916
0dd4b21f 1917 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
44877b4e
TH
1918 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
1919 __FUNCTION__);
ffeae418 1920 return 0;
49016aca
TH
1921 }
1922
0dd4b21f 1923 if (ata_msg_probe(ap))
44877b4e 1924 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1da177e4 1925
75683fe7
TH
1926 /* set horkage */
1927 dev->horkage |= ata_dev_blacklisted(dev);
1928
6746544c
TH
1929 /* let ACPI work its magic */
1930 rc = ata_acpi_on_devcfg(dev);
1931 if (rc)
1932 return rc;
08573a86 1933
05027adc
TH
1934 /* massage HPA, do it early as it might change IDENTIFY data */
1935 rc = ata_hpa_resize(dev);
1936 if (rc)
1937 return rc;
1938
c39f5ebe 1939 /* print device capabilities */
0dd4b21f 1940 if (ata_msg_probe(ap))
88574551
TH
1941 ata_dev_printk(dev, KERN_DEBUG,
1942 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
1943 "85:%04x 86:%04x 87:%04x 88:%04x\n",
0dd4b21f 1944 __FUNCTION__,
f15a1daf
TH
1945 id[49], id[82], id[83], id[84],
1946 id[85], id[86], id[87], id[88]);
c39f5ebe 1947
208a9933 1948 /* initialize to-be-configured parameters */
ea1dd4e1 1949 dev->flags &= ~ATA_DFLAG_CFG_MASK;
208a9933
TH
1950 dev->max_sectors = 0;
1951 dev->cdb_len = 0;
1952 dev->n_sectors = 0;
1953 dev->cylinders = 0;
1954 dev->heads = 0;
1955 dev->sectors = 0;
1956
1da177e4
LT
1957 /*
1958 * common ATA, ATAPI feature tests
1959 */
1960
ff8854b2 1961 /* find max transfer mode; for printk only */
1148c3a7 1962 xfer_mask = ata_id_xfermask(id);
1da177e4 1963
0dd4b21f
BP
1964 if (ata_msg_probe(ap))
1965 ata_dump_id(id);
1da177e4 1966
ef143d57
AL
1967 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
1968 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
1969 sizeof(fwrevbuf));
1970
1971 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
1972 sizeof(modelbuf));
1973
1da177e4
LT
1974 /* ATA-specific feature tests */
1975 if (dev->class == ATA_DEV_ATA) {
b352e57d
AC
1976 if (ata_id_is_cfa(id)) {
1977 if (id[162] & 1) /* CPRM may make this media unusable */
44877b4e
TH
1978 ata_dev_printk(dev, KERN_WARNING,
1979 "supports DRM functions and may "
1980 "not be fully accessable.\n");
b352e57d 1981 snprintf(revbuf, 7, "CFA");
2dcb407e
JG
1982 } else
1983 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
b352e57d 1984
1148c3a7 1985 dev->n_sectors = ata_id_n_sectors(id);
2940740b 1986
3f64f565
EM
1987 if (dev->id[59] & 0x100)
1988 dev->multi_count = dev->id[59] & 0xff;
1989
1148c3a7 1990 if (ata_id_has_lba(id)) {
4c2d721a 1991 const char *lba_desc;
a6e6ce8e 1992 char ncq_desc[20];
8bf62ece 1993
4c2d721a
TH
1994 lba_desc = "LBA";
1995 dev->flags |= ATA_DFLAG_LBA;
1148c3a7 1996 if (ata_id_has_lba48(id)) {
8bf62ece 1997 dev->flags |= ATA_DFLAG_LBA48;
4c2d721a 1998 lba_desc = "LBA48";
6fc49adb
TH
1999
2000 if (dev->n_sectors >= (1UL << 28) &&
2001 ata_id_has_flush_ext(id))
2002 dev->flags |= ATA_DFLAG_FLUSH_EXT;
4c2d721a 2003 }
8bf62ece 2004
a6e6ce8e
TH
2005 /* config NCQ */
2006 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2007
8bf62ece 2008 /* print device info to dmesg */
3f64f565
EM
2009 if (ata_msg_drv(ap) && print_info) {
2010 ata_dev_printk(dev, KERN_INFO,
2011 "%s: %s, %s, max %s\n",
2012 revbuf, modelbuf, fwrevbuf,
2013 ata_mode_string(xfer_mask));
2014 ata_dev_printk(dev, KERN_INFO,
2015 "%Lu sectors, multi %u: %s %s\n",
f15a1daf 2016 (unsigned long long)dev->n_sectors,
3f64f565
EM
2017 dev->multi_count, lba_desc, ncq_desc);
2018 }
ffeae418 2019 } else {
8bf62ece
AL
2020 /* CHS */
2021
2022 /* Default translation */
1148c3a7
TH
2023 dev->cylinders = id[1];
2024 dev->heads = id[3];
2025 dev->sectors = id[6];
8bf62ece 2026
1148c3a7 2027 if (ata_id_current_chs_valid(id)) {
8bf62ece 2028 /* Current CHS translation is valid. */
1148c3a7
TH
2029 dev->cylinders = id[54];
2030 dev->heads = id[55];
2031 dev->sectors = id[56];
8bf62ece
AL
2032 }
2033
2034 /* print device info to dmesg */
3f64f565 2035 if (ata_msg_drv(ap) && print_info) {
88574551 2036 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
2037 "%s: %s, %s, max %s\n",
2038 revbuf, modelbuf, fwrevbuf,
2039 ata_mode_string(xfer_mask));
a84471fe 2040 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
2041 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
2042 (unsigned long long)dev->n_sectors,
2043 dev->multi_count, dev->cylinders,
2044 dev->heads, dev->sectors);
2045 }
07f6f7d0
AL
2046 }
2047
6e7846e9 2048 dev->cdb_len = 16;
1da177e4
LT
2049 }
2050
2051 /* ATAPI-specific feature tests */
2c13b7ce 2052 else if (dev->class == ATA_DEV_ATAPI) {
854c73a2
TH
2053 const char *cdb_intr_string = "";
2054 const char *atapi_an_string = "";
7d77b247 2055 u32 sntf;
08a556db 2056
1148c3a7 2057 rc = atapi_cdb_len(id);
1da177e4 2058 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
0dd4b21f 2059 if (ata_msg_warn(ap))
88574551
TH
2060 ata_dev_printk(dev, KERN_WARNING,
2061 "unsupported CDB len\n");
ffeae418 2062 rc = -EINVAL;
1da177e4
LT
2063 goto err_out_nosup;
2064 }
6e7846e9 2065 dev->cdb_len = (unsigned int) rc;
1da177e4 2066
7d77b247
TH
2067 /* Enable ATAPI AN if both the host and device have
2068 * the support. If PMP is attached, SNTF is required
2069 * to enable ATAPI AN to discern between PHY status
2070 * changed notifications and ATAPI ANs.
9f45cbd3 2071 */
7d77b247
TH
2072 if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2073 (!ap->nr_pmp_links ||
2074 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
854c73a2
TH
2075 unsigned int err_mask;
2076
9f45cbd3 2077 /* issue SET feature command to turn this on */
854c73a2
TH
2078 err_mask = ata_dev_set_AN(dev, SETFEATURES_SATA_ENABLE);
2079 if (err_mask)
9f45cbd3 2080 ata_dev_printk(dev, KERN_ERR,
854c73a2
TH
2081 "failed to enable ATAPI AN "
2082 "(err_mask=0x%x)\n", err_mask);
2083 else {
9f45cbd3 2084 dev->flags |= ATA_DFLAG_AN;
854c73a2
TH
2085 atapi_an_string = ", ATAPI AN";
2086 }
9f45cbd3
KCA
2087 }
2088
08a556db 2089 if (ata_id_cdb_intr(dev->id)) {
312f7da2 2090 dev->flags |= ATA_DFLAG_CDB_INTR;
08a556db
AL
2091 cdb_intr_string = ", CDB intr";
2092 }
312f7da2 2093
1da177e4 2094 /* print device info to dmesg */
5afc8142 2095 if (ata_msg_drv(ap) && print_info)
ef143d57 2096 ata_dev_printk(dev, KERN_INFO,
854c73a2 2097 "ATAPI: %s, %s, max %s%s%s\n",
ef143d57 2098 modelbuf, fwrevbuf,
12436c30 2099 ata_mode_string(xfer_mask),
854c73a2 2100 cdb_intr_string, atapi_an_string);
1da177e4
LT
2101 }
2102
914ed354
TH
2103 /* determine max_sectors */
2104 dev->max_sectors = ATA_MAX_SECTORS;
2105 if (dev->flags & ATA_DFLAG_LBA48)
2106 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2107
93590859
AC
2108 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2109 /* Let the user know. We don't want to disallow opens for
2110 rescue purposes, or in case the vendor is just a blithering
2111 idiot */
2dcb407e 2112 if (print_info) {
93590859
AC
2113 ata_dev_printk(dev, KERN_WARNING,
2114"Drive reports diagnostics failure. This may indicate a drive\n");
2115 ata_dev_printk(dev, KERN_WARNING,
2116"fault or invalid emulation. Contact drive vendor for information.\n");
2117 }
2118 }
2119
4b2f3ede 2120 /* limit bridge transfers to udma5, 200 sectors */
3373efd8 2121 if (ata_dev_knobble(dev)) {
5afc8142 2122 if (ata_msg_drv(ap) && print_info)
f15a1daf
TH
2123 ata_dev_printk(dev, KERN_INFO,
2124 "applying bridge limits\n");
5a529139 2125 dev->udma_mask &= ATA_UDMA5;
4b2f3ede
TH
2126 dev->max_sectors = ATA_MAX_SECTORS;
2127 }
2128
75683fe7 2129 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
03ec52de
TH
2130 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2131 dev->max_sectors);
18d6e9d5 2132
4b2f3ede 2133 if (ap->ops->dev_config)
cd0d3bbc 2134 ap->ops->dev_config(dev);
4b2f3ede 2135
0dd4b21f
BP
2136 if (ata_msg_probe(ap))
2137 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
2138 __FUNCTION__, ata_chk_status(ap));
ffeae418 2139 return 0;
1da177e4
LT
2140
2141err_out_nosup:
0dd4b21f 2142 if (ata_msg_probe(ap))
88574551
TH
2143 ata_dev_printk(dev, KERN_DEBUG,
2144 "%s: EXIT, err\n", __FUNCTION__);
ffeae418 2145 return rc;
1da177e4
LT
2146}
2147
be0d18df 2148/**
2e41e8e6 2149 * ata_cable_40wire - return 40 wire cable type
be0d18df
AC
2150 * @ap: port
2151 *
2e41e8e6 2152 * Helper method for drivers which want to hardwire 40 wire cable
be0d18df
AC
2153 * detection.
2154 */
2155
2156int ata_cable_40wire(struct ata_port *ap)
2157{
2158 return ATA_CBL_PATA40;
2159}
2160
2161/**
2e41e8e6 2162 * ata_cable_80wire - return 80 wire cable type
be0d18df
AC
2163 * @ap: port
2164 *
2e41e8e6 2165 * Helper method for drivers which want to hardwire 80 wire cable
be0d18df
AC
2166 * detection.
2167 */
2168
2169int ata_cable_80wire(struct ata_port *ap)
2170{
2171 return ATA_CBL_PATA80;
2172}
2173
2174/**
2175 * ata_cable_unknown - return unknown PATA cable.
2176 * @ap: port
2177 *
2178 * Helper method for drivers which have no PATA cable detection.
2179 */
2180
2181int ata_cable_unknown(struct ata_port *ap)
2182{
2183 return ATA_CBL_PATA_UNK;
2184}
2185
2186/**
2187 * ata_cable_sata - return SATA cable type
2188 * @ap: port
2189 *
2190 * Helper method for drivers which have SATA cables
2191 */
2192
2193int ata_cable_sata(struct ata_port *ap)
2194{
2195 return ATA_CBL_SATA;
2196}
2197
1da177e4
LT
2198/**
2199 * ata_bus_probe - Reset and probe ATA bus
2200 * @ap: Bus to probe
2201 *
0cba632b
JG
2202 * Master ATA bus probing function. Initiates a hardware-dependent
2203 * bus reset, then attempts to identify any devices found on
2204 * the bus.
2205 *
1da177e4 2206 * LOCKING:
0cba632b 2207 * PCI/etc. bus probe sem.
1da177e4
LT
2208 *
2209 * RETURNS:
96072e69 2210 * Zero on success, negative errno otherwise.
1da177e4
LT
2211 */
2212
80289167 2213int ata_bus_probe(struct ata_port *ap)
1da177e4 2214{
28ca5c57 2215 unsigned int classes[ATA_MAX_DEVICES];
14d2bac1 2216 int tries[ATA_MAX_DEVICES];
f58229f8 2217 int rc;
e82cbdb9 2218 struct ata_device *dev;
1da177e4 2219
28ca5c57 2220 ata_port_probe(ap);
c19ba8af 2221
f58229f8
TH
2222 ata_link_for_each_dev(dev, &ap->link)
2223 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
14d2bac1
TH
2224
2225 retry:
2044470c 2226 /* reset and determine device classes */
52783c5d 2227 ap->ops->phy_reset(ap);
2061a47a 2228
f58229f8 2229 ata_link_for_each_dev(dev, &ap->link) {
52783c5d
TH
2230 if (!(ap->flags & ATA_FLAG_DISABLED) &&
2231 dev->class != ATA_DEV_UNKNOWN)
2232 classes[dev->devno] = dev->class;
2233 else
2234 classes[dev->devno] = ATA_DEV_NONE;
2044470c 2235
52783c5d 2236 dev->class = ATA_DEV_UNKNOWN;
28ca5c57 2237 }
1da177e4 2238
52783c5d 2239 ata_port_probe(ap);
2044470c 2240
b6079ca4
AC
2241 /* after the reset the device state is PIO 0 and the controller
2242 state is undefined. Record the mode */
2243
f58229f8
TH
2244 ata_link_for_each_dev(dev, &ap->link)
2245 dev->pio_mode = XFER_PIO_0;
b6079ca4 2246
f31f0cc2
JG
2247 /* read IDENTIFY page and configure devices. We have to do the identify
2248 specific sequence bass-ackwards so that PDIAG- is released by
2249 the slave device */
2250
f58229f8
TH
2251 ata_link_for_each_dev(dev, &ap->link) {
2252 if (tries[dev->devno])
2253 dev->class = classes[dev->devno];
ffeae418 2254
14d2bac1 2255 if (!ata_dev_enabled(dev))
ffeae418 2256 continue;
ffeae418 2257
bff04647
TH
2258 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2259 dev->id);
14d2bac1
TH
2260 if (rc)
2261 goto fail;
f31f0cc2
JG
2262 }
2263
be0d18df
AC
2264 /* Now ask for the cable type as PDIAG- should have been released */
2265 if (ap->ops->cable_detect)
2266 ap->cbl = ap->ops->cable_detect(ap);
2267
614fe29b
AC
2268 /* We may have SATA bridge glue hiding here irrespective of the
2269 reported cable types and sensed types */
2270 ata_link_for_each_dev(dev, &ap->link) {
2271 if (!ata_dev_enabled(dev))
2272 continue;
2273 /* SATA drives indicate we have a bridge. We don't know which
2274 end of the link the bridge is which is a problem */
2275 if (ata_id_is_sata(dev->id))
2276 ap->cbl = ATA_CBL_SATA;
2277 }
2278
f31f0cc2
JG
2279 /* After the identify sequence we can now set up the devices. We do
2280 this in the normal order so that the user doesn't get confused */
2281
f58229f8 2282 ata_link_for_each_dev(dev, &ap->link) {
f31f0cc2
JG
2283 if (!ata_dev_enabled(dev))
2284 continue;
14d2bac1 2285
9af5c9c9 2286 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
efdaedc4 2287 rc = ata_dev_configure(dev);
9af5c9c9 2288 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
14d2bac1
TH
2289 if (rc)
2290 goto fail;
1da177e4
LT
2291 }
2292
e82cbdb9 2293 /* configure transfer mode */
0260731f 2294 rc = ata_set_mode(&ap->link, &dev);
4ae72a1e 2295 if (rc)
51713d35 2296 goto fail;
1da177e4 2297
f58229f8
TH
2298 ata_link_for_each_dev(dev, &ap->link)
2299 if (ata_dev_enabled(dev))
e82cbdb9 2300 return 0;
1da177e4 2301
e82cbdb9
TH
2302 /* no device present, disable port */
2303 ata_port_disable(ap);
96072e69 2304 return -ENODEV;
14d2bac1
TH
2305
2306 fail:
4ae72a1e
TH
2307 tries[dev->devno]--;
2308
14d2bac1
TH
2309 switch (rc) {
2310 case -EINVAL:
4ae72a1e 2311 /* eeek, something went very wrong, give up */
14d2bac1
TH
2312 tries[dev->devno] = 0;
2313 break;
4ae72a1e
TH
2314
2315 case -ENODEV:
2316 /* give it just one more chance */
2317 tries[dev->devno] = min(tries[dev->devno], 1);
14d2bac1 2318 case -EIO:
4ae72a1e
TH
2319 if (tries[dev->devno] == 1) {
2320 /* This is the last chance, better to slow
2321 * down than lose it.
2322 */
936fd732 2323 sata_down_spd_limit(&ap->link);
4ae72a1e
TH
2324 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2325 }
14d2bac1
TH
2326 }
2327
4ae72a1e 2328 if (!tries[dev->devno])
3373efd8 2329 ata_dev_disable(dev);
ec573755 2330
14d2bac1 2331 goto retry;
1da177e4
LT
2332}
2333
2334/**
0cba632b
JG
2335 * ata_port_probe - Mark port as enabled
2336 * @ap: Port for which we indicate enablement
1da177e4 2337 *
0cba632b
JG
2338 * Modify @ap data structure such that the system
2339 * thinks that the entire port is enabled.
2340 *
cca3974e 2341 * LOCKING: host lock, or some other form of
0cba632b 2342 * serialization.
1da177e4
LT
2343 */
2344
2345void ata_port_probe(struct ata_port *ap)
2346{
198e0fed 2347 ap->flags &= ~ATA_FLAG_DISABLED;
1da177e4
LT
2348}
2349
3be680b7
TH
2350/**
2351 * sata_print_link_status - Print SATA link status
936fd732 2352 * @link: SATA link to printk link status about
3be680b7
TH
2353 *
2354 * This function prints link speed and status of a SATA link.
2355 *
2356 * LOCKING:
2357 * None.
2358 */
936fd732 2359void sata_print_link_status(struct ata_link *link)
3be680b7 2360{
6d5f9732 2361 u32 sstatus, scontrol, tmp;
3be680b7 2362
936fd732 2363 if (sata_scr_read(link, SCR_STATUS, &sstatus))
3be680b7 2364 return;
936fd732 2365 sata_scr_read(link, SCR_CONTROL, &scontrol);
3be680b7 2366
936fd732 2367 if (ata_link_online(link)) {
3be680b7 2368 tmp = (sstatus >> 4) & 0xf;
936fd732 2369 ata_link_printk(link, KERN_INFO,
f15a1daf
TH
2370 "SATA link up %s (SStatus %X SControl %X)\n",
2371 sata_spd_string(tmp), sstatus, scontrol);
3be680b7 2372 } else {
936fd732 2373 ata_link_printk(link, KERN_INFO,
f15a1daf
TH
2374 "SATA link down (SStatus %X SControl %X)\n",
2375 sstatus, scontrol);
3be680b7
TH
2376 }
2377}
2378
1da177e4 2379/**
780a87f7
JG
2380 * __sata_phy_reset - Wake/reset a low-level SATA PHY
2381 * @ap: SATA port associated with target SATA PHY.
1da177e4 2382 *
780a87f7
JG
2383 * This function issues commands to standard SATA Sxxx
2384 * PHY registers, to wake up the phy (and device), and
2385 * clear any reset condition.
1da177e4
LT
2386 *
2387 * LOCKING:
0cba632b 2388 * PCI/etc. bus probe sem.
1da177e4
LT
2389 *
2390 */
2391void __sata_phy_reset(struct ata_port *ap)
2392{
936fd732 2393 struct ata_link *link = &ap->link;
1da177e4 2394 unsigned long timeout = jiffies + (HZ * 5);
936fd732 2395 u32 sstatus;
1da177e4
LT
2396
2397 if (ap->flags & ATA_FLAG_SATA_RESET) {
cdcca89e 2398 /* issue phy wake/reset */
936fd732 2399 sata_scr_write_flush(link, SCR_CONTROL, 0x301);
62ba2841
TH
2400 /* Couldn't find anything in SATA I/II specs, but
2401 * AHCI-1.1 10.4.2 says at least 1 ms. */
2402 mdelay(1);
1da177e4 2403 }
81952c54 2404 /* phy wake/clear reset */
936fd732 2405 sata_scr_write_flush(link, SCR_CONTROL, 0x300);
1da177e4
LT
2406
2407 /* wait for phy to become ready, if necessary */
2408 do {
2409 msleep(200);
936fd732 2410 sata_scr_read(link, SCR_STATUS, &sstatus);
1da177e4
LT
2411 if ((sstatus & 0xf) != 1)
2412 break;
2413 } while (time_before(jiffies, timeout));
2414
3be680b7 2415 /* print link status */
936fd732 2416 sata_print_link_status(link);
656563e3 2417
3be680b7 2418 /* TODO: phy layer with polling, timeouts, etc. */
936fd732 2419 if (!ata_link_offline(link))
1da177e4 2420 ata_port_probe(ap);
3be680b7 2421 else
1da177e4 2422 ata_port_disable(ap);
1da177e4 2423
198e0fed 2424 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
2425 return;
2426
2427 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2428 ata_port_disable(ap);
2429 return;
2430 }
2431
2432 ap->cbl = ATA_CBL_SATA;
2433}
2434
2435/**
780a87f7
JG
2436 * sata_phy_reset - Reset SATA bus.
2437 * @ap: SATA port associated with target SATA PHY.
1da177e4 2438 *
780a87f7
JG
2439 * This function resets the SATA bus, and then probes
2440 * the bus for devices.
1da177e4
LT
2441 *
2442 * LOCKING:
0cba632b 2443 * PCI/etc. bus probe sem.
1da177e4
LT
2444 *
2445 */
2446void sata_phy_reset(struct ata_port *ap)
2447{
2448 __sata_phy_reset(ap);
198e0fed 2449 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
2450 return;
2451 ata_bus_reset(ap);
2452}
2453
ebdfca6e
AC
2454/**
2455 * ata_dev_pair - return other device on cable
ebdfca6e
AC
2456 * @adev: device
2457 *
2458 * Obtain the other device on the same cable, or if none is
2459 * present NULL is returned
2460 */
2e9edbf8 2461
3373efd8 2462struct ata_device *ata_dev_pair(struct ata_device *adev)
ebdfca6e 2463{
9af5c9c9
TH
2464 struct ata_link *link = adev->link;
2465 struct ata_device *pair = &link->device[1 - adev->devno];
e1211e3f 2466 if (!ata_dev_enabled(pair))
ebdfca6e
AC
2467 return NULL;
2468 return pair;
2469}
2470
1da177e4 2471/**
780a87f7
JG
2472 * ata_port_disable - Disable port.
2473 * @ap: Port to be disabled.
1da177e4 2474 *
780a87f7
JG
2475 * Modify @ap data structure such that the system
2476 * thinks that the entire port is disabled, and should
2477 * never attempt to probe or communicate with devices
2478 * on this port.
2479 *
cca3974e 2480 * LOCKING: host lock, or some other form of
780a87f7 2481 * serialization.
1da177e4
LT
2482 */
2483
2484void ata_port_disable(struct ata_port *ap)
2485{
9af5c9c9
TH
2486 ap->link.device[0].class = ATA_DEV_NONE;
2487 ap->link.device[1].class = ATA_DEV_NONE;
198e0fed 2488 ap->flags |= ATA_FLAG_DISABLED;
1da177e4
LT
2489}
2490
1c3fae4d 2491/**
3c567b7d 2492 * sata_down_spd_limit - adjust SATA spd limit downward
936fd732 2493 * @link: Link to adjust SATA spd limit for
1c3fae4d 2494 *
936fd732 2495 * Adjust SATA spd limit of @link downward. Note that this
1c3fae4d 2496 * function only adjusts the limit. The change must be applied
3c567b7d 2497 * using sata_set_spd().
1c3fae4d
TH
2498 *
2499 * LOCKING:
2500 * Inherited from caller.
2501 *
2502 * RETURNS:
2503 * 0 on success, negative errno on failure
2504 */
936fd732 2505int sata_down_spd_limit(struct ata_link *link)
1c3fae4d 2506{
81952c54
TH
2507 u32 sstatus, spd, mask;
2508 int rc, highbit;
1c3fae4d 2509
936fd732 2510 if (!sata_scr_valid(link))
008a7896
TH
2511 return -EOPNOTSUPP;
2512
2513 /* If SCR can be read, use it to determine the current SPD.
936fd732 2514 * If not, use cached value in link->sata_spd.
008a7896 2515 */
936fd732 2516 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
008a7896
TH
2517 if (rc == 0)
2518 spd = (sstatus >> 4) & 0xf;
2519 else
936fd732 2520 spd = link->sata_spd;
1c3fae4d 2521
936fd732 2522 mask = link->sata_spd_limit;
1c3fae4d
TH
2523 if (mask <= 1)
2524 return -EINVAL;
008a7896
TH
2525
2526 /* unconditionally mask off the highest bit */
1c3fae4d
TH
2527 highbit = fls(mask) - 1;
2528 mask &= ~(1 << highbit);
2529
008a7896
TH
2530 /* Mask off all speeds higher than or equal to the current
2531 * one. Force 1.5Gbps if current SPD is not available.
2532 */
2533 if (spd > 1)
2534 mask &= (1 << (spd - 1)) - 1;
2535 else
2536 mask &= 1;
2537
2538 /* were we already at the bottom? */
1c3fae4d
TH
2539 if (!mask)
2540 return -EINVAL;
2541
936fd732 2542 link->sata_spd_limit = mask;
1c3fae4d 2543
936fd732 2544 ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n",
f15a1daf 2545 sata_spd_string(fls(mask)));
1c3fae4d
TH
2546
2547 return 0;
2548}
2549
936fd732 2550static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
1c3fae4d
TH
2551{
2552 u32 spd, limit;
2553
936fd732 2554 if (link->sata_spd_limit == UINT_MAX)
1c3fae4d
TH
2555 limit = 0;
2556 else
936fd732 2557 limit = fls(link->sata_spd_limit);
1c3fae4d
TH
2558
2559 spd = (*scontrol >> 4) & 0xf;
2560 *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
2561
2562 return spd != limit;
2563}
2564
2565/**
3c567b7d 2566 * sata_set_spd_needed - is SATA spd configuration needed
936fd732 2567 * @link: Link in question
1c3fae4d
TH
2568 *
2569 * Test whether the spd limit in SControl matches
936fd732 2570 * @link->sata_spd_limit. This function is used to determine
1c3fae4d
TH
2571 * whether hardreset is necessary to apply SATA spd
2572 * configuration.
2573 *
2574 * LOCKING:
2575 * Inherited from caller.
2576 *
2577 * RETURNS:
2578 * 1 if SATA spd configuration is needed, 0 otherwise.
2579 */
936fd732 2580int sata_set_spd_needed(struct ata_link *link)
1c3fae4d
TH
2581{
2582 u32 scontrol;
2583
936fd732 2584 if (sata_scr_read(link, SCR_CONTROL, &scontrol))
1c3fae4d
TH
2585 return 0;
2586
936fd732 2587 return __sata_set_spd_needed(link, &scontrol);
1c3fae4d
TH
2588}
2589
2590/**
3c567b7d 2591 * sata_set_spd - set SATA spd according to spd limit
936fd732 2592 * @link: Link to set SATA spd for
1c3fae4d 2593 *
936fd732 2594 * Set SATA spd of @link according to sata_spd_limit.
1c3fae4d
TH
2595 *
2596 * LOCKING:
2597 * Inherited from caller.
2598 *
2599 * RETURNS:
2600 * 0 if spd doesn't need to be changed, 1 if spd has been
81952c54 2601 * changed. Negative errno if SCR registers are inaccessible.
1c3fae4d 2602 */
936fd732 2603int sata_set_spd(struct ata_link *link)
1c3fae4d
TH
2604{
2605 u32 scontrol;
81952c54 2606 int rc;
1c3fae4d 2607
936fd732 2608 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
81952c54 2609 return rc;
1c3fae4d 2610
936fd732 2611 if (!__sata_set_spd_needed(link, &scontrol))
1c3fae4d
TH
2612 return 0;
2613
936fd732 2614 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
81952c54
TH
2615 return rc;
2616
1c3fae4d
TH
2617 return 1;
2618}
2619
452503f9
AC
2620/*
2621 * This mode timing computation functionality is ported over from
2622 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2623 */
2624/*
b352e57d 2625 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
452503f9 2626 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
b352e57d
AC
2627 * for UDMA6, which is currently supported only by Maxtor drives.
2628 *
2629 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
452503f9
AC
2630 */
2631
2632static const struct ata_timing ata_timing[] = {
2633
2634 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
2635 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
2636 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
2637 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
2638
b352e57d
AC
2639 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
2640 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
452503f9
AC
2641 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
2642 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
2643 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
2644
2645/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
2e9edbf8 2646
452503f9
AC
2647 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
2648 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
2649 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
2e9edbf8 2650
452503f9
AC
2651 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
2652 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
2653 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
2654
b352e57d
AC
2655 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
2656 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
452503f9
AC
2657 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
2658 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
2659
2660 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
2661 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
2662 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
2663
2664/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
2665
2666 { 0xFF }
2667};
2668
2dcb407e
JG
2669#define ENOUGH(v, unit) (((v)-1)/(unit)+1)
2670#define EZ(v, unit) ((v)?ENOUGH(v, unit):0)
452503f9
AC
2671
2672static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2673{
2674 q->setup = EZ(t->setup * 1000, T);
2675 q->act8b = EZ(t->act8b * 1000, T);
2676 q->rec8b = EZ(t->rec8b * 1000, T);
2677 q->cyc8b = EZ(t->cyc8b * 1000, T);
2678 q->active = EZ(t->active * 1000, T);
2679 q->recover = EZ(t->recover * 1000, T);
2680 q->cycle = EZ(t->cycle * 1000, T);
2681 q->udma = EZ(t->udma * 1000, UT);
2682}
2683
2684void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2685 struct ata_timing *m, unsigned int what)
2686{
2687 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2688 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2689 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2690 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2691 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2692 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2693 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2694 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2695}
2696
2dcb407e 2697static const struct ata_timing *ata_timing_find_mode(unsigned short speed)
452503f9
AC
2698{
2699 const struct ata_timing *t;
2700
2701 for (t = ata_timing; t->mode != speed; t++)
91190758 2702 if (t->mode == 0xFF)
452503f9 2703 return NULL;
2e9edbf8 2704 return t;
452503f9
AC
2705}
2706
2707int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2708 struct ata_timing *t, int T, int UT)
2709{
2710 const struct ata_timing *s;
2711 struct ata_timing p;
2712
2713 /*
2e9edbf8 2714 * Find the mode.
75b1f2f8 2715 */
452503f9
AC
2716
2717 if (!(s = ata_timing_find_mode(speed)))
2718 return -EINVAL;
2719
75b1f2f8
AL
2720 memcpy(t, s, sizeof(*s));
2721
452503f9
AC
2722 /*
2723 * If the drive is an EIDE drive, it can tell us it needs extended
2724 * PIO/MW_DMA cycle timing.
2725 */
2726
2727 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
2728 memset(&p, 0, sizeof(p));
2dcb407e 2729 if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
452503f9
AC
2730 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2731 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2dcb407e 2732 } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
452503f9
AC
2733 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2734 }
2735 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2736 }
2737
2738 /*
2739 * Convert the timing to bus clock counts.
2740 */
2741
75b1f2f8 2742 ata_timing_quantize(t, t, T, UT);
452503f9
AC
2743
2744 /*
c893a3ae
RD
2745 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2746 * S.M.A.R.T * and some other commands. We have to ensure that the
2747 * DMA cycle timing is slower/equal than the fastest PIO timing.
452503f9
AC
2748 */
2749
fd3367af 2750 if (speed > XFER_PIO_6) {
452503f9
AC
2751 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2752 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2753 }
2754
2755 /*
c893a3ae 2756 * Lengthen active & recovery time so that cycle time is correct.
452503f9
AC
2757 */
2758
2759 if (t->act8b + t->rec8b < t->cyc8b) {
2760 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2761 t->rec8b = t->cyc8b - t->act8b;
2762 }
2763
2764 if (t->active + t->recover < t->cycle) {
2765 t->active += (t->cycle - (t->active + t->recover)) / 2;
2766 t->recover = t->cycle - t->active;
2767 }
a617c09f 2768
4f701d1e
AC
2769 /* In a few cases quantisation may produce enough errors to
2770 leave t->cycle too low for the sum of active and recovery
2771 if so we must correct this */
2772 if (t->active + t->recover > t->cycle)
2773 t->cycle = t->active + t->recover;
452503f9
AC
2774
2775 return 0;
2776}
2777
cf176e1a
TH
2778/**
2779 * ata_down_xfermask_limit - adjust dev xfer masks downward
cf176e1a 2780 * @dev: Device to adjust xfer masks
458337db 2781 * @sel: ATA_DNXFER_* selector
cf176e1a
TH
2782 *
2783 * Adjust xfer masks of @dev downward. Note that this function
2784 * does not apply the change. Invoking ata_set_mode() afterwards
2785 * will apply the limit.
2786 *
2787 * LOCKING:
2788 * Inherited from caller.
2789 *
2790 * RETURNS:
2791 * 0 on success, negative errno on failure
2792 */
458337db 2793int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
cf176e1a 2794{
458337db
TH
2795 char buf[32];
2796 unsigned int orig_mask, xfer_mask;
2797 unsigned int pio_mask, mwdma_mask, udma_mask;
2798 int quiet, highbit;
cf176e1a 2799
458337db
TH
2800 quiet = !!(sel & ATA_DNXFER_QUIET);
2801 sel &= ~ATA_DNXFER_QUIET;
cf176e1a 2802
458337db
TH
2803 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
2804 dev->mwdma_mask,
2805 dev->udma_mask);
2806 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
cf176e1a 2807
458337db
TH
2808 switch (sel) {
2809 case ATA_DNXFER_PIO:
2810 highbit = fls(pio_mask) - 1;
2811 pio_mask &= ~(1 << highbit);
2812 break;
2813
2814 case ATA_DNXFER_DMA:
2815 if (udma_mask) {
2816 highbit = fls(udma_mask) - 1;
2817 udma_mask &= ~(1 << highbit);
2818 if (!udma_mask)
2819 return -ENOENT;
2820 } else if (mwdma_mask) {
2821 highbit = fls(mwdma_mask) - 1;
2822 mwdma_mask &= ~(1 << highbit);
2823 if (!mwdma_mask)
2824 return -ENOENT;
2825 }
2826 break;
2827
2828 case ATA_DNXFER_40C:
2829 udma_mask &= ATA_UDMA_MASK_40C;
2830 break;
2831
2832 case ATA_DNXFER_FORCE_PIO0:
2833 pio_mask &= 1;
2834 case ATA_DNXFER_FORCE_PIO:
2835 mwdma_mask = 0;
2836 udma_mask = 0;
2837 break;
2838
458337db
TH
2839 default:
2840 BUG();
2841 }
2842
2843 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
2844
2845 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
2846 return -ENOENT;
2847
2848 if (!quiet) {
2849 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
2850 snprintf(buf, sizeof(buf), "%s:%s",
2851 ata_mode_string(xfer_mask),
2852 ata_mode_string(xfer_mask & ATA_MASK_PIO));
2853 else
2854 snprintf(buf, sizeof(buf), "%s",
2855 ata_mode_string(xfer_mask));
2856
2857 ata_dev_printk(dev, KERN_WARNING,
2858 "limiting speed to %s\n", buf);
2859 }
cf176e1a
TH
2860
2861 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2862 &dev->udma_mask);
2863
cf176e1a 2864 return 0;
cf176e1a
TH
2865}
2866
3373efd8 2867static int ata_dev_set_mode(struct ata_device *dev)
1da177e4 2868{
9af5c9c9 2869 struct ata_eh_context *ehc = &dev->link->eh_context;
83206a29
TH
2870 unsigned int err_mask;
2871 int rc;
1da177e4 2872
e8384607 2873 dev->flags &= ~ATA_DFLAG_PIO;
1da177e4
LT
2874 if (dev->xfer_shift == ATA_SHIFT_PIO)
2875 dev->flags |= ATA_DFLAG_PIO;
2876
3373efd8 2877 err_mask = ata_dev_set_xfermode(dev);
2dcb407e 2878
11750a40
A
2879 /* Old CFA may refuse this command, which is just fine */
2880 if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
2dcb407e
JG
2881 err_mask &= ~AC_ERR_DEV;
2882
0bc2a79a
AC
2883 /* Some very old devices and some bad newer ones fail any kind of
2884 SET_XFERMODE request but support PIO0-2 timings and no IORDY */
2885 if (dev->xfer_shift == ATA_SHIFT_PIO && !ata_id_has_iordy(dev->id) &&
2886 dev->pio_mode <= XFER_PIO_2)
2887 err_mask &= ~AC_ERR_DEV;
2dcb407e 2888
83206a29 2889 if (err_mask) {
f15a1daf
TH
2890 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
2891 "(err_mask=0x%x)\n", err_mask);
83206a29
TH
2892 return -EIO;
2893 }
1da177e4 2894
baa1e78a 2895 ehc->i.flags |= ATA_EHI_POST_SETMODE;
422c9daa 2896 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
baa1e78a 2897 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
5eb45c02 2898 if (rc)
83206a29 2899 return rc;
48a8a14f 2900
23e71c3d
TH
2901 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
2902 dev->xfer_shift, (int)dev->xfer_mode);
1da177e4 2903
f15a1daf
TH
2904 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
2905 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
83206a29 2906 return 0;
1da177e4
LT
2907}
2908
1da177e4 2909/**
04351821 2910 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
0260731f 2911 * @link: link on which timings will be programmed
e82cbdb9 2912 * @r_failed_dev: out paramter for failed device
1da177e4 2913 *
04351821
A
2914 * Standard implementation of the function used to tune and set
2915 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2916 * ata_dev_set_mode() fails, pointer to the failing device is
e82cbdb9 2917 * returned in @r_failed_dev.
780a87f7 2918 *
1da177e4 2919 * LOCKING:
0cba632b 2920 * PCI/etc. bus probe sem.
e82cbdb9
TH
2921 *
2922 * RETURNS:
2923 * 0 on success, negative errno otherwise
1da177e4 2924 */
04351821 2925
0260731f 2926int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
1da177e4 2927{
0260731f 2928 struct ata_port *ap = link->ap;
e8e0619f 2929 struct ata_device *dev;
f58229f8 2930 int rc = 0, used_dma = 0, found = 0;
3adcebb2 2931
a6d5a51c 2932 /* step 1: calculate xfer_mask */
f58229f8 2933 ata_link_for_each_dev(dev, link) {
acf356b1 2934 unsigned int pio_mask, dma_mask;
b3a70601 2935 unsigned int mode_mask;
a6d5a51c 2936
e1211e3f 2937 if (!ata_dev_enabled(dev))
a6d5a51c
TH
2938 continue;
2939
b3a70601
AC
2940 mode_mask = ATA_DMA_MASK_ATA;
2941 if (dev->class == ATA_DEV_ATAPI)
2942 mode_mask = ATA_DMA_MASK_ATAPI;
2943 else if (ata_id_is_cfa(dev->id))
2944 mode_mask = ATA_DMA_MASK_CFA;
2945
3373efd8 2946 ata_dev_xfermask(dev);
1da177e4 2947
acf356b1
TH
2948 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
2949 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
b3a70601
AC
2950
2951 if (libata_dma_mask & mode_mask)
2952 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2953 else
2954 dma_mask = 0;
2955
acf356b1
TH
2956 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
2957 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
5444a6f4 2958
4f65977d 2959 found = 1;
5444a6f4
AC
2960 if (dev->dma_mode)
2961 used_dma = 1;
a6d5a51c 2962 }
4f65977d 2963 if (!found)
e82cbdb9 2964 goto out;
a6d5a51c
TH
2965
2966 /* step 2: always set host PIO timings */
f58229f8 2967 ata_link_for_each_dev(dev, link) {
e8e0619f
TH
2968 if (!ata_dev_enabled(dev))
2969 continue;
2970
2971 if (!dev->pio_mode) {
f15a1daf 2972 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
e8e0619f 2973 rc = -EINVAL;
e82cbdb9 2974 goto out;
e8e0619f
TH
2975 }
2976
2977 dev->xfer_mode = dev->pio_mode;
2978 dev->xfer_shift = ATA_SHIFT_PIO;
2979 if (ap->ops->set_piomode)
2980 ap->ops->set_piomode(ap, dev);
2981 }
1da177e4 2982
a6d5a51c 2983 /* step 3: set host DMA timings */
f58229f8 2984 ata_link_for_each_dev(dev, link) {
e8e0619f
TH
2985 if (!ata_dev_enabled(dev) || !dev->dma_mode)
2986 continue;
2987
2988 dev->xfer_mode = dev->dma_mode;
2989 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
2990 if (ap->ops->set_dmamode)
2991 ap->ops->set_dmamode(ap, dev);
2992 }
1da177e4
LT
2993
2994 /* step 4: update devices' xfer mode */
f58229f8 2995 ata_link_for_each_dev(dev, link) {
18d90deb 2996 /* don't update suspended devices' xfer mode */
9666f400 2997 if (!ata_dev_enabled(dev))
83206a29
TH
2998 continue;
2999
3373efd8 3000 rc = ata_dev_set_mode(dev);
5bbc53f4 3001 if (rc)
e82cbdb9 3002 goto out;
83206a29 3003 }
1da177e4 3004
e8e0619f
TH
3005 /* Record simplex status. If we selected DMA then the other
3006 * host channels are not permitted to do so.
5444a6f4 3007 */
cca3974e 3008 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
032af1ce 3009 ap->host->simplex_claimed = ap;
5444a6f4 3010
e82cbdb9
TH
3011 out:
3012 if (rc)
3013 *r_failed_dev = dev;
3014 return rc;
1da177e4
LT
3015}
3016
04351821
A
3017/**
3018 * ata_set_mode - Program timings and issue SET FEATURES - XFER
0260731f 3019 * @link: link on which timings will be programmed
04351821
A
3020 * @r_failed_dev: out paramter for failed device
3021 *
3022 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3023 * ata_set_mode() fails, pointer to the failing device is
3024 * returned in @r_failed_dev.
3025 *
3026 * LOCKING:
3027 * PCI/etc. bus probe sem.
3028 *
3029 * RETURNS:
3030 * 0 on success, negative errno otherwise
3031 */
0260731f 3032int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
04351821 3033{
0260731f
TH
3034 struct ata_port *ap = link->ap;
3035
04351821
A
3036 /* has private set_mode? */
3037 if (ap->ops->set_mode)
0260731f
TH
3038 return ap->ops->set_mode(link, r_failed_dev);
3039 return ata_do_set_mode(link, r_failed_dev);
04351821
A
3040}
3041
1fdffbce
JG
3042/**
3043 * ata_tf_to_host - issue ATA taskfile to host controller
3044 * @ap: port to which command is being issued
3045 * @tf: ATA taskfile register set
3046 *
3047 * Issues ATA taskfile register set to ATA host controller,
3048 * with proper synchronization with interrupt handler and
3049 * other threads.
3050 *
3051 * LOCKING:
cca3974e 3052 * spin_lock_irqsave(host lock)
1fdffbce
JG
3053 */
3054
3055static inline void ata_tf_to_host(struct ata_port *ap,
3056 const struct ata_taskfile *tf)
3057{
3058 ap->ops->tf_load(ap, tf);
3059 ap->ops->exec_command(ap, tf);
3060}
3061
1da177e4
LT
3062/**
3063 * ata_busy_sleep - sleep until BSY clears, or timeout
3064 * @ap: port containing status register to be polled
3065 * @tmout_pat: impatience timeout
3066 * @tmout: overall timeout
3067 *
780a87f7
JG
3068 * Sleep until ATA Status register bit BSY clears,
3069 * or a timeout occurs.
3070 *
d1adc1bb
TH
3071 * LOCKING:
3072 * Kernel thread context (may sleep).
3073 *
3074 * RETURNS:
3075 * 0 on success, -errno otherwise.
1da177e4 3076 */
d1adc1bb
TH
3077int ata_busy_sleep(struct ata_port *ap,
3078 unsigned long tmout_pat, unsigned long tmout)
1da177e4
LT
3079{
3080 unsigned long timer_start, timeout;
3081 u8 status;
3082
3083 status = ata_busy_wait(ap, ATA_BUSY, 300);
3084 timer_start = jiffies;
3085 timeout = timer_start + tmout_pat;
d1adc1bb
TH
3086 while (status != 0xff && (status & ATA_BUSY) &&
3087 time_before(jiffies, timeout)) {
1da177e4
LT
3088 msleep(50);
3089 status = ata_busy_wait(ap, ATA_BUSY, 3);
3090 }
3091
d1adc1bb 3092 if (status != 0xff && (status & ATA_BUSY))
f15a1daf 3093 ata_port_printk(ap, KERN_WARNING,
35aa7a43
JG
3094 "port is slow to respond, please be patient "
3095 "(Status 0x%x)\n", status);
1da177e4
LT
3096
3097 timeout = timer_start + tmout;
d1adc1bb
TH
3098 while (status != 0xff && (status & ATA_BUSY) &&
3099 time_before(jiffies, timeout)) {
1da177e4
LT
3100 msleep(50);
3101 status = ata_chk_status(ap);
3102 }
3103
d1adc1bb
TH
3104 if (status == 0xff)
3105 return -ENODEV;
3106
1da177e4 3107 if (status & ATA_BUSY) {
f15a1daf 3108 ata_port_printk(ap, KERN_ERR, "port failed to respond "
35aa7a43
JG
3109 "(%lu secs, Status 0x%x)\n",
3110 tmout / HZ, status);
d1adc1bb 3111 return -EBUSY;
1da177e4
LT
3112 }
3113
3114 return 0;
3115}
3116
d4b2bab4
TH
3117/**
3118 * ata_wait_ready - sleep until BSY clears, or timeout
3119 * @ap: port containing status register to be polled
3120 * @deadline: deadline jiffies for the operation
3121 *
3122 * Sleep until ATA Status register bit BSY clears, or timeout
3123 * occurs.
3124 *
3125 * LOCKING:
3126 * Kernel thread context (may sleep).
3127 *
3128 * RETURNS:
3129 * 0 on success, -errno otherwise.
3130 */
3131int ata_wait_ready(struct ata_port *ap, unsigned long deadline)
3132{
3133 unsigned long start = jiffies;
3134 int warned = 0;
3135
3136 while (1) {
3137 u8 status = ata_chk_status(ap);
3138 unsigned long now = jiffies;
3139
3140 if (!(status & ATA_BUSY))
3141 return 0;
936fd732 3142 if (!ata_link_online(&ap->link) && status == 0xff)
d4b2bab4
TH
3143 return -ENODEV;
3144 if (time_after(now, deadline))
3145 return -EBUSY;
3146
3147 if (!warned && time_after(now, start + 5 * HZ) &&
3148 (deadline - now > 3 * HZ)) {
3149 ata_port_printk(ap, KERN_WARNING,
3150 "port is slow to respond, please be patient "
3151 "(Status 0x%x)\n", status);
3152 warned = 1;
3153 }
3154
3155 msleep(50);
3156 }
3157}
3158
3159static int ata_bus_post_reset(struct ata_port *ap, unsigned int devmask,
3160 unsigned long deadline)
1da177e4
LT
3161{
3162 struct ata_ioports *ioaddr = &ap->ioaddr;
3163 unsigned int dev0 = devmask & (1 << 0);
3164 unsigned int dev1 = devmask & (1 << 1);
9b89391c 3165 int rc, ret = 0;
1da177e4
LT
3166
3167 /* if device 0 was found in ata_devchk, wait for its
3168 * BSY bit to clear
3169 */
d4b2bab4
TH
3170 if (dev0) {
3171 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3172 if (rc) {
3173 if (rc != -ENODEV)
3174 return rc;
3175 ret = rc;
3176 }
d4b2bab4 3177 }
1da177e4 3178
e141d999
TH
3179 /* if device 1 was found in ata_devchk, wait for register
3180 * access briefly, then wait for BSY to clear.
1da177e4 3181 */
e141d999
TH
3182 if (dev1) {
3183 int i;
1da177e4
LT
3184
3185 ap->ops->dev_select(ap, 1);
e141d999
TH
3186
3187 /* Wait for register access. Some ATAPI devices fail
3188 * to set nsect/lbal after reset, so don't waste too
3189 * much time on it. We're gonna wait for !BSY anyway.
3190 */
3191 for (i = 0; i < 2; i++) {
3192 u8 nsect, lbal;
3193
3194 nsect = ioread8(ioaddr->nsect_addr);
3195 lbal = ioread8(ioaddr->lbal_addr);
3196 if ((nsect == 1) && (lbal == 1))
3197 break;
3198 msleep(50); /* give drive a breather */
3199 }
3200
d4b2bab4 3201 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3202 if (rc) {
3203 if (rc != -ENODEV)
3204 return rc;
3205 ret = rc;
3206 }
d4b2bab4 3207 }
1da177e4
LT
3208
3209 /* is all this really necessary? */
3210 ap->ops->dev_select(ap, 0);
3211 if (dev1)
3212 ap->ops->dev_select(ap, 1);
3213 if (dev0)
3214 ap->ops->dev_select(ap, 0);
d4b2bab4 3215
9b89391c 3216 return ret;
1da177e4
LT
3217}
3218
d4b2bab4
TH
3219static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
3220 unsigned long deadline)
1da177e4
LT
3221{
3222 struct ata_ioports *ioaddr = &ap->ioaddr;
681c80b5
AC
3223 struct ata_device *dev;
3224 int i = 0;
1da177e4 3225
44877b4e 3226 DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
1da177e4
LT
3227
3228 /* software reset. causes dev0 to be selected */
0d5ff566
TH
3229 iowrite8(ap->ctl, ioaddr->ctl_addr);
3230 udelay(20); /* FIXME: flush */
3231 iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
3232 udelay(20); /* FIXME: flush */
3233 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4 3234
681c80b5
AC
3235 /* If we issued an SRST then an ATA drive (not ATAPI)
3236 * may have changed configuration and be in PIO0 timing. If
3237 * we did a hard reset (or are coming from power on) this is
3238 * true for ATA or ATAPI. Until we've set a suitable controller
3239 * mode we should not touch the bus as we may be talking too fast.
3240 */
3241
3242 ata_link_for_each_dev(dev, &ap->link)
3243 dev->pio_mode = XFER_PIO_0;
3244
3245 /* If the controller has a pio mode setup function then use
3246 it to set the chipset to rights. Don't touch the DMA setup
3247 as that will be dealt with when revalidating */
3248 if (ap->ops->set_piomode) {
3249 ata_link_for_each_dev(dev, &ap->link)
3250 if (devmask & (1 << i++))
3251 ap->ops->set_piomode(ap, dev);
3252 }
3253
1da177e4
LT
3254 /* spec mandates ">= 2ms" before checking status.
3255 * We wait 150ms, because that was the magic delay used for
3256 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
3257 * between when the ATA command register is written, and then
3258 * status is checked. Because waiting for "a while" before
3259 * checking status is fine, post SRST, we perform this magic
3260 * delay here as well.
09c7ad79
AC
3261 *
3262 * Old drivers/ide uses the 2mS rule and then waits for ready
1da177e4
LT
3263 */
3264 msleep(150);
3265
2e9edbf8 3266 /* Before we perform post reset processing we want to see if
298a41ca
TH
3267 * the bus shows 0xFF because the odd clown forgets the D7
3268 * pulldown resistor.
3269 */
150981b0 3270 if (ata_chk_status(ap) == 0xFF)
9b89391c 3271 return -ENODEV;
09c7ad79 3272
d4b2bab4 3273 return ata_bus_post_reset(ap, devmask, deadline);
1da177e4
LT
3274}
3275
3276/**
3277 * ata_bus_reset - reset host port and associated ATA channel
3278 * @ap: port to reset
3279 *
3280 * This is typically the first time we actually start issuing
3281 * commands to the ATA channel. We wait for BSY to clear, then
3282 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
3283 * result. Determine what devices, if any, are on the channel
3284 * by looking at the device 0/1 error register. Look at the signature
3285 * stored in each device's taskfile registers, to determine if
3286 * the device is ATA or ATAPI.
3287 *
3288 * LOCKING:
0cba632b 3289 * PCI/etc. bus probe sem.
cca3974e 3290 * Obtains host lock.
1da177e4
LT
3291 *
3292 * SIDE EFFECTS:
198e0fed 3293 * Sets ATA_FLAG_DISABLED if bus reset fails.
1da177e4
LT
3294 */
3295
3296void ata_bus_reset(struct ata_port *ap)
3297{
9af5c9c9 3298 struct ata_device *device = ap->link.device;
1da177e4
LT
3299 struct ata_ioports *ioaddr = &ap->ioaddr;
3300 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3301 u8 err;
aec5c3c1 3302 unsigned int dev0, dev1 = 0, devmask = 0;
9b89391c 3303 int rc;
1da177e4 3304
44877b4e 3305 DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
1da177e4
LT
3306
3307 /* determine if device 0/1 are present */
3308 if (ap->flags & ATA_FLAG_SATA_RESET)
3309 dev0 = 1;
3310 else {
3311 dev0 = ata_devchk(ap, 0);
3312 if (slave_possible)
3313 dev1 = ata_devchk(ap, 1);
3314 }
3315
3316 if (dev0)
3317 devmask |= (1 << 0);
3318 if (dev1)
3319 devmask |= (1 << 1);
3320
3321 /* select device 0 again */
3322 ap->ops->dev_select(ap, 0);
3323
3324 /* issue bus reset */
9b89391c
TH
3325 if (ap->flags & ATA_FLAG_SRST) {
3326 rc = ata_bus_softreset(ap, devmask, jiffies + 40 * HZ);
3327 if (rc && rc != -ENODEV)
aec5c3c1 3328 goto err_out;
9b89391c 3329 }
1da177e4
LT
3330
3331 /*
3332 * determine by signature whether we have ATA or ATAPI devices
3333 */
3f19859e 3334 device[0].class = ata_dev_try_classify(&device[0], dev0, &err);
1da177e4 3335 if ((slave_possible) && (err != 0x81))
3f19859e 3336 device[1].class = ata_dev_try_classify(&device[1], dev1, &err);
1da177e4 3337
1da177e4 3338 /* is double-select really necessary? */
9af5c9c9 3339 if (device[1].class != ATA_DEV_NONE)
1da177e4 3340 ap->ops->dev_select(ap, 1);
9af5c9c9 3341 if (device[0].class != ATA_DEV_NONE)
1da177e4
LT
3342 ap->ops->dev_select(ap, 0);
3343
3344 /* if no devices were detected, disable this port */
9af5c9c9
TH
3345 if ((device[0].class == ATA_DEV_NONE) &&
3346 (device[1].class == ATA_DEV_NONE))
1da177e4
LT
3347 goto err_out;
3348
3349 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
3350 /* set up device control for ATA_FLAG_SATA_RESET */
0d5ff566 3351 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
3352 }
3353
3354 DPRINTK("EXIT\n");
3355 return;
3356
3357err_out:
f15a1daf 3358 ata_port_printk(ap, KERN_ERR, "disabling port\n");
ac8869d5 3359 ata_port_disable(ap);
1da177e4
LT
3360
3361 DPRINTK("EXIT\n");
3362}
3363
d7bb4cc7 3364/**
936fd732
TH
3365 * sata_link_debounce - debounce SATA phy status
3366 * @link: ATA link to debounce SATA phy status for
d7bb4cc7 3367 * @params: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3368 * @deadline: deadline jiffies for the operation
d7bb4cc7 3369 *
936fd732 3370* Make sure SStatus of @link reaches stable state, determined by
d7bb4cc7
TH
3371 * holding the same value where DET is not 1 for @duration polled
3372 * every @interval, before @timeout. Timeout constraints the
d4b2bab4
TH
3373 * beginning of the stable state. Because DET gets stuck at 1 on
3374 * some controllers after hot unplugging, this functions waits
d7bb4cc7
TH
3375 * until timeout then returns 0 if DET is stable at 1.
3376 *
d4b2bab4
TH
3377 * @timeout is further limited by @deadline. The sooner of the
3378 * two is used.
3379 *
d7bb4cc7
TH
3380 * LOCKING:
3381 * Kernel thread context (may sleep)
3382 *
3383 * RETURNS:
3384 * 0 on success, -errno on failure.
3385 */
936fd732
TH
3386int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3387 unsigned long deadline)
7a7921e8 3388{
d7bb4cc7 3389 unsigned long interval_msec = params[0];
d4b2bab4
TH
3390 unsigned long duration = msecs_to_jiffies(params[1]);
3391 unsigned long last_jiffies, t;
d7bb4cc7
TH
3392 u32 last, cur;
3393 int rc;
3394
d4b2bab4
TH
3395 t = jiffies + msecs_to_jiffies(params[2]);
3396 if (time_before(t, deadline))
3397 deadline = t;
3398
936fd732 3399 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
d7bb4cc7
TH
3400 return rc;
3401 cur &= 0xf;
3402
3403 last = cur;
3404 last_jiffies = jiffies;
3405
3406 while (1) {
3407 msleep(interval_msec);
936fd732 3408 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
d7bb4cc7
TH
3409 return rc;
3410 cur &= 0xf;
3411
3412 /* DET stable? */
3413 if (cur == last) {
d4b2bab4 3414 if (cur == 1 && time_before(jiffies, deadline))
d7bb4cc7
TH
3415 continue;
3416 if (time_after(jiffies, last_jiffies + duration))
3417 return 0;
3418 continue;
3419 }
3420
3421 /* unstable, start over */
3422 last = cur;
3423 last_jiffies = jiffies;
3424
f1545154
TH
3425 /* Check deadline. If debouncing failed, return
3426 * -EPIPE to tell upper layer to lower link speed.
3427 */
d4b2bab4 3428 if (time_after(jiffies, deadline))
f1545154 3429 return -EPIPE;
d7bb4cc7
TH
3430 }
3431}
3432
3433/**
936fd732
TH
3434 * sata_link_resume - resume SATA link
3435 * @link: ATA link to resume SATA
d7bb4cc7 3436 * @params: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3437 * @deadline: deadline jiffies for the operation
d7bb4cc7 3438 *
936fd732 3439 * Resume SATA phy @link and debounce it.
d7bb4cc7
TH
3440 *
3441 * LOCKING:
3442 * Kernel thread context (may sleep)
3443 *
3444 * RETURNS:
3445 * 0 on success, -errno on failure.
3446 */
936fd732
TH
3447int sata_link_resume(struct ata_link *link, const unsigned long *params,
3448 unsigned long deadline)
d7bb4cc7
TH
3449{
3450 u32 scontrol;
81952c54
TH
3451 int rc;
3452
936fd732 3453 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
81952c54 3454 return rc;
7a7921e8 3455
852ee16a 3456 scontrol = (scontrol & 0x0f0) | 0x300;
81952c54 3457
936fd732 3458 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
81952c54 3459 return rc;
7a7921e8 3460
d7bb4cc7
TH
3461 /* Some PHYs react badly if SStatus is pounded immediately
3462 * after resuming. Delay 200ms before debouncing.
3463 */
3464 msleep(200);
7a7921e8 3465
936fd732 3466 return sata_link_debounce(link, params, deadline);
7a7921e8
TH
3467}
3468
f5914a46
TH
3469/**
3470 * ata_std_prereset - prepare for reset
cc0680a5 3471 * @link: ATA link to be reset
d4b2bab4 3472 * @deadline: deadline jiffies for the operation
f5914a46 3473 *
cc0680a5 3474 * @link is about to be reset. Initialize it. Failure from
b8cffc6a
TH
3475 * prereset makes libata abort whole reset sequence and give up
3476 * that port, so prereset should be best-effort. It does its
3477 * best to prepare for reset sequence but if things go wrong, it
3478 * should just whine, not fail.
f5914a46
TH
3479 *
3480 * LOCKING:
3481 * Kernel thread context (may sleep)
3482 *
3483 * RETURNS:
3484 * 0 on success, -errno otherwise.
3485 */
cc0680a5 3486int ata_std_prereset(struct ata_link *link, unsigned long deadline)
f5914a46 3487{
cc0680a5 3488 struct ata_port *ap = link->ap;
936fd732 3489 struct ata_eh_context *ehc = &link->eh_context;
e9c83914 3490 const unsigned long *timing = sata_ehc_deb_timing(ehc);
f5914a46
TH
3491 int rc;
3492
31daabda 3493 /* handle link resume */
28324304 3494 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
0c88758b 3495 (link->flags & ATA_LFLAG_HRST_TO_RESUME))
28324304
TH
3496 ehc->i.action |= ATA_EH_HARDRESET;
3497
633273a3
TH
3498 /* Some PMPs don't work with only SRST, force hardreset if PMP
3499 * is supported.
3500 */
3501 if (ap->flags & ATA_FLAG_PMP)
3502 ehc->i.action |= ATA_EH_HARDRESET;
3503
f5914a46
TH
3504 /* if we're about to do hardreset, nothing more to do */
3505 if (ehc->i.action & ATA_EH_HARDRESET)
3506 return 0;
3507
936fd732 3508 /* if SATA, resume link */
a16abc0b 3509 if (ap->flags & ATA_FLAG_SATA) {
936fd732 3510 rc = sata_link_resume(link, timing, deadline);
b8cffc6a
TH
3511 /* whine about phy resume failure but proceed */
3512 if (rc && rc != -EOPNOTSUPP)
cc0680a5 3513 ata_link_printk(link, KERN_WARNING, "failed to resume "
f5914a46 3514 "link for reset (errno=%d)\n", rc);
f5914a46
TH
3515 }
3516
3517 /* Wait for !BSY if the controller can wait for the first D2H
3518 * Reg FIS and we don't know that no device is attached.
3519 */
0c88758b 3520 if (!(link->flags & ATA_LFLAG_SKIP_D2H_BSY) && !ata_link_offline(link)) {
b8cffc6a 3521 rc = ata_wait_ready(ap, deadline);
6dffaf61 3522 if (rc && rc != -ENODEV) {
cc0680a5 3523 ata_link_printk(link, KERN_WARNING, "device not ready "
b8cffc6a
TH
3524 "(errno=%d), forcing hardreset\n", rc);
3525 ehc->i.action |= ATA_EH_HARDRESET;
3526 }
3527 }
f5914a46
TH
3528
3529 return 0;
3530}
3531
c2bd5804
TH
3532/**
3533 * ata_std_softreset - reset host port via ATA SRST
cc0680a5 3534 * @link: ATA link to reset
c2bd5804 3535 * @classes: resulting classes of attached devices
d4b2bab4 3536 * @deadline: deadline jiffies for the operation
c2bd5804 3537 *
52783c5d 3538 * Reset host port using ATA SRST.
c2bd5804
TH
3539 *
3540 * LOCKING:
3541 * Kernel thread context (may sleep)
3542 *
3543 * RETURNS:
3544 * 0 on success, -errno otherwise.
3545 */
cc0680a5 3546int ata_std_softreset(struct ata_link *link, unsigned int *classes,
d4b2bab4 3547 unsigned long deadline)
c2bd5804 3548{
cc0680a5 3549 struct ata_port *ap = link->ap;
c2bd5804 3550 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
d4b2bab4
TH
3551 unsigned int devmask = 0;
3552 int rc;
c2bd5804
TH
3553 u8 err;
3554
3555 DPRINTK("ENTER\n");
3556
936fd732 3557 if (ata_link_offline(link)) {
3a39746a
TH
3558 classes[0] = ATA_DEV_NONE;
3559 goto out;
3560 }
3561
c2bd5804
TH
3562 /* determine if device 0/1 are present */
3563 if (ata_devchk(ap, 0))
3564 devmask |= (1 << 0);
3565 if (slave_possible && ata_devchk(ap, 1))
3566 devmask |= (1 << 1);
3567
c2bd5804
TH
3568 /* select device 0 again */
3569 ap->ops->dev_select(ap, 0);
3570
3571 /* issue bus reset */
3572 DPRINTK("about to softreset, devmask=%x\n", devmask);
d4b2bab4 3573 rc = ata_bus_softreset(ap, devmask, deadline);
9b89391c 3574 /* if link is occupied, -ENODEV too is an error */
936fd732 3575 if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
cc0680a5 3576 ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc);
d4b2bab4 3577 return rc;
c2bd5804
TH
3578 }
3579
3580 /* determine by signature whether we have ATA or ATAPI devices */
3f19859e
TH
3581 classes[0] = ata_dev_try_classify(&link->device[0],
3582 devmask & (1 << 0), &err);
c2bd5804 3583 if (slave_possible && err != 0x81)
3f19859e
TH
3584 classes[1] = ata_dev_try_classify(&link->device[1],
3585 devmask & (1 << 1), &err);
c2bd5804 3586
3a39746a 3587 out:
c2bd5804
TH
3588 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
3589 return 0;
3590}
3591
3592/**
cc0680a5
TH
3593 * sata_link_hardreset - reset link via SATA phy reset
3594 * @link: link to reset
b6103f6d 3595 * @timing: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3596 * @deadline: deadline jiffies for the operation
c2bd5804 3597 *
cc0680a5 3598 * SATA phy-reset @link using DET bits of SControl register.
c2bd5804
TH
3599 *
3600 * LOCKING:
3601 * Kernel thread context (may sleep)
3602 *
3603 * RETURNS:
3604 * 0 on success, -errno otherwise.
3605 */
cc0680a5 3606int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
d4b2bab4 3607 unsigned long deadline)
c2bd5804 3608{
852ee16a 3609 u32 scontrol;
81952c54 3610 int rc;
852ee16a 3611
c2bd5804
TH
3612 DPRINTK("ENTER\n");
3613
936fd732 3614 if (sata_set_spd_needed(link)) {
1c3fae4d
TH
3615 /* SATA spec says nothing about how to reconfigure
3616 * spd. To be on the safe side, turn off phy during
3617 * reconfiguration. This works for at least ICH7 AHCI
3618 * and Sil3124.
3619 */
936fd732 3620 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
b6103f6d 3621 goto out;
81952c54 3622
a34b6fc0 3623 scontrol = (scontrol & 0x0f0) | 0x304;
81952c54 3624
936fd732 3625 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
b6103f6d 3626 goto out;
1c3fae4d 3627
936fd732 3628 sata_set_spd(link);
1c3fae4d
TH
3629 }
3630
3631 /* issue phy wake/reset */
936fd732 3632 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
b6103f6d 3633 goto out;
81952c54 3634
852ee16a 3635 scontrol = (scontrol & 0x0f0) | 0x301;
81952c54 3636
936fd732 3637 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
b6103f6d 3638 goto out;
c2bd5804 3639
1c3fae4d 3640 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
c2bd5804
TH
3641 * 10.4.2 says at least 1 ms.
3642 */
3643 msleep(1);
3644
936fd732
TH
3645 /* bring link back */
3646 rc = sata_link_resume(link, timing, deadline);
b6103f6d
TH
3647 out:
3648 DPRINTK("EXIT, rc=%d\n", rc);
3649 return rc;
3650}
3651
3652/**
3653 * sata_std_hardreset - reset host port via SATA phy reset
cc0680a5 3654 * @link: link to reset
b6103f6d 3655 * @class: resulting class of attached device
d4b2bab4 3656 * @deadline: deadline jiffies for the operation
b6103f6d
TH
3657 *
3658 * SATA phy-reset host port using DET bits of SControl register,
3659 * wait for !BSY and classify the attached device.
3660 *
3661 * LOCKING:
3662 * Kernel thread context (may sleep)
3663 *
3664 * RETURNS:
3665 * 0 on success, -errno otherwise.
3666 */
cc0680a5 3667int sata_std_hardreset(struct ata_link *link, unsigned int *class,
d4b2bab4 3668 unsigned long deadline)
b6103f6d 3669{
cc0680a5 3670 struct ata_port *ap = link->ap;
936fd732 3671 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
b6103f6d
TH
3672 int rc;
3673
3674 DPRINTK("ENTER\n");
3675
3676 /* do hardreset */
cc0680a5 3677 rc = sata_link_hardreset(link, timing, deadline);
b6103f6d 3678 if (rc) {
cc0680a5 3679 ata_link_printk(link, KERN_ERR,
b6103f6d
TH
3680 "COMRESET failed (errno=%d)\n", rc);
3681 return rc;
3682 }
c2bd5804 3683
c2bd5804 3684 /* TODO: phy layer with polling, timeouts, etc. */
936fd732 3685 if (ata_link_offline(link)) {
c2bd5804
TH
3686 *class = ATA_DEV_NONE;
3687 DPRINTK("EXIT, link offline\n");
3688 return 0;
3689 }
3690
34fee227
TH
3691 /* wait a while before checking status, see SRST for more info */
3692 msleep(150);
3693
633273a3
TH
3694 /* If PMP is supported, we have to do follow-up SRST. Note
3695 * that some PMPs don't send D2H Reg FIS after hardreset at
3696 * all if the first port is empty. Wait for it just for a
3697 * second and request follow-up SRST.
3698 */
3699 if (ap->flags & ATA_FLAG_PMP) {
3700 ata_wait_ready(ap, jiffies + HZ);
3701 return -EAGAIN;
3702 }
3703
d4b2bab4 3704 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3705 /* link occupied, -ENODEV too is an error */
3706 if (rc) {
cc0680a5 3707 ata_link_printk(link, KERN_ERR,
d4b2bab4
TH
3708 "COMRESET failed (errno=%d)\n", rc);
3709 return rc;
c2bd5804
TH
3710 }
3711
3a39746a
TH
3712 ap->ops->dev_select(ap, 0); /* probably unnecessary */
3713
3f19859e 3714 *class = ata_dev_try_classify(link->device, 1, NULL);
c2bd5804
TH
3715
3716 DPRINTK("EXIT, class=%u\n", *class);
3717 return 0;
3718}
3719
3720/**
3721 * ata_std_postreset - standard postreset callback
cc0680a5 3722 * @link: the target ata_link
c2bd5804
TH
3723 * @classes: classes of attached devices
3724 *
3725 * This function is invoked after a successful reset. Note that
3726 * the device might have been reset more than once using
3727 * different reset methods before postreset is invoked.
c2bd5804 3728 *
c2bd5804
TH
3729 * LOCKING:
3730 * Kernel thread context (may sleep)
3731 */
cc0680a5 3732void ata_std_postreset(struct ata_link *link, unsigned int *classes)
c2bd5804 3733{
cc0680a5 3734 struct ata_port *ap = link->ap;
dc2b3515
TH
3735 u32 serror;
3736
c2bd5804
TH
3737 DPRINTK("ENTER\n");
3738
c2bd5804 3739 /* print link status */
936fd732 3740 sata_print_link_status(link);
c2bd5804 3741
dc2b3515 3742 /* clear SError */
936fd732
TH
3743 if (sata_scr_read(link, SCR_ERROR, &serror) == 0)
3744 sata_scr_write(link, SCR_ERROR, serror);
dc2b3515 3745
c2bd5804
TH
3746 /* is double-select really necessary? */
3747 if (classes[0] != ATA_DEV_NONE)
3748 ap->ops->dev_select(ap, 1);
3749 if (classes[1] != ATA_DEV_NONE)
3750 ap->ops->dev_select(ap, 0);
3751
3a39746a
TH
3752 /* bail out if no device is present */
3753 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
3754 DPRINTK("EXIT, no device\n");
3755 return;
3756 }
3757
3758 /* set up device control */
0d5ff566
TH
3759 if (ap->ioaddr.ctl_addr)
3760 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
c2bd5804
TH
3761
3762 DPRINTK("EXIT\n");
3763}
3764
623a3128
TH
3765/**
3766 * ata_dev_same_device - Determine whether new ID matches configured device
623a3128
TH
3767 * @dev: device to compare against
3768 * @new_class: class of the new device
3769 * @new_id: IDENTIFY page of the new device
3770 *
3771 * Compare @new_class and @new_id against @dev and determine
3772 * whether @dev is the device indicated by @new_class and
3773 * @new_id.
3774 *
3775 * LOCKING:
3776 * None.
3777 *
3778 * RETURNS:
3779 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3780 */
3373efd8
TH
3781static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3782 const u16 *new_id)
623a3128
TH
3783{
3784 const u16 *old_id = dev->id;
a0cf733b
TH
3785 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3786 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
623a3128
TH
3787
3788 if (dev->class != new_class) {
f15a1daf
TH
3789 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3790 dev->class, new_class);
623a3128
TH
3791 return 0;
3792 }
3793
a0cf733b
TH
3794 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3795 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3796 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3797 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
623a3128
TH
3798
3799 if (strcmp(model[0], model[1])) {
f15a1daf
TH
3800 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3801 "'%s' != '%s'\n", model[0], model[1]);
623a3128
TH
3802 return 0;
3803 }
3804
3805 if (strcmp(serial[0], serial[1])) {
f15a1daf
TH
3806 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3807 "'%s' != '%s'\n", serial[0], serial[1]);
623a3128
TH
3808 return 0;
3809 }
3810
623a3128
TH
3811 return 1;
3812}
3813
3814/**
fe30911b 3815 * ata_dev_reread_id - Re-read IDENTIFY data
3fae450c 3816 * @dev: target ATA device
bff04647 3817 * @readid_flags: read ID flags
623a3128
TH
3818 *
3819 * Re-read IDENTIFY page and make sure @dev is still attached to
3820 * the port.
3821 *
3822 * LOCKING:
3823 * Kernel thread context (may sleep)
3824 *
3825 * RETURNS:
3826 * 0 on success, negative errno otherwise
3827 */
fe30911b 3828int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
623a3128 3829{
5eb45c02 3830 unsigned int class = dev->class;
9af5c9c9 3831 u16 *id = (void *)dev->link->ap->sector_buf;
623a3128
TH
3832 int rc;
3833
fe635c7e 3834 /* read ID data */
bff04647 3835 rc = ata_dev_read_id(dev, &class, readid_flags, id);
623a3128 3836 if (rc)
fe30911b 3837 return rc;
623a3128
TH
3838
3839 /* is the device still there? */
fe30911b
TH
3840 if (!ata_dev_same_device(dev, class, id))
3841 return -ENODEV;
623a3128 3842
fe635c7e 3843 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
fe30911b
TH
3844 return 0;
3845}
3846
3847/**
3848 * ata_dev_revalidate - Revalidate ATA device
3849 * @dev: device to revalidate
422c9daa 3850 * @new_class: new class code
fe30911b
TH
3851 * @readid_flags: read ID flags
3852 *
3853 * Re-read IDENTIFY page, make sure @dev is still attached to the
3854 * port and reconfigure it according to the new IDENTIFY page.
3855 *
3856 * LOCKING:
3857 * Kernel thread context (may sleep)
3858 *
3859 * RETURNS:
3860 * 0 on success, negative errno otherwise
3861 */
422c9daa
TH
3862int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
3863 unsigned int readid_flags)
fe30911b 3864{
6ddcd3b0 3865 u64 n_sectors = dev->n_sectors;
fe30911b
TH
3866 int rc;
3867
3868 if (!ata_dev_enabled(dev))
3869 return -ENODEV;
3870
422c9daa
TH
3871 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
3872 if (ata_class_enabled(new_class) &&
3873 new_class != ATA_DEV_ATA && new_class != ATA_DEV_ATAPI) {
3874 ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n",
3875 dev->class, new_class);
3876 rc = -ENODEV;
3877 goto fail;
3878 }
3879
fe30911b
TH
3880 /* re-read ID */
3881 rc = ata_dev_reread_id(dev, readid_flags);
3882 if (rc)
3883 goto fail;
623a3128
TH
3884
3885 /* configure device according to the new ID */
efdaedc4 3886 rc = ata_dev_configure(dev);
6ddcd3b0
TH
3887 if (rc)
3888 goto fail;
3889
3890 /* verify n_sectors hasn't changed */
b54eebd6
TH
3891 if (dev->class == ATA_DEV_ATA && n_sectors &&
3892 dev->n_sectors != n_sectors) {
6ddcd3b0
TH
3893 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
3894 "%llu != %llu\n",
3895 (unsigned long long)n_sectors,
3896 (unsigned long long)dev->n_sectors);
8270bec4
TH
3897
3898 /* restore original n_sectors */
3899 dev->n_sectors = n_sectors;
3900
6ddcd3b0
TH
3901 rc = -ENODEV;
3902 goto fail;
3903 }
3904
3905 return 0;
623a3128
TH
3906
3907 fail:
f15a1daf 3908 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
623a3128
TH
3909 return rc;
3910}
3911
6919a0a6
AC
3912struct ata_blacklist_entry {
3913 const char *model_num;
3914 const char *model_rev;
3915 unsigned long horkage;
3916};
3917
3918static const struct ata_blacklist_entry ata_device_blacklist [] = {
3919 /* Devices with DMA related problems under Linux */
3920 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
3921 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
3922 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
3923 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
3924 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
3925 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
3926 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
3927 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
3928 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
3929 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
3930 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
3931 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
3932 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
3933 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3934 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
3935 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
3936 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
3937 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
3938 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
3939 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
3940 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
3941 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
3942 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
3943 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
3944 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
3945 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
6919a0a6
AC
3946 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
3947 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
2dcb407e 3948 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
39f19886 3949 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
5acd50f6 3950 { "IOMEGA ZIP 250 ATAPI", NULL, ATA_HORKAGE_NODMA }, /* temporary fix */
39ce7128
TH
3951 { "IOMEGA ZIP 250 ATAPI Floppy",
3952 NULL, ATA_HORKAGE_NODMA },
3af9a77a
TH
3953 /* Odd clown on sil3726/4726 PMPs */
3954 { "Config Disk", NULL, ATA_HORKAGE_NODMA |
3955 ATA_HORKAGE_SKIP_PM },
6919a0a6 3956
18d6e9d5 3957 /* Weird ATAPI devices */
40a1d531 3958 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
18d6e9d5 3959
6919a0a6
AC
3960 /* Devices we expect to fail diagnostics */
3961
3962 /* Devices where NCQ should be avoided */
3963 /* NCQ is slow */
2dcb407e 3964 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
09125ea6
TH
3965 /* http://thread.gmane.org/gmane.linux.ide/14907 */
3966 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
7acfaf30 3967 /* NCQ is broken */
539cc7c7 3968 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
0e3dbc01 3969 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
0b0a43e0
DM
3970 { "HITACHI HDS7250SASUN500G*", NULL, ATA_HORKAGE_NONCQ },
3971 { "HITACHI HDS7225SBSUN250G*", NULL, ATA_HORKAGE_NONCQ },
da6f0ec2 3972 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ },
539cc7c7 3973
36e337d0
RH
3974 /* Blacklist entries taken from Silicon Image 3124/3132
3975 Windows driver .inf file - also several Linux problem reports */
3976 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
3977 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
3978 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
bd9c5a39
TH
3979 /* Drives which do spurious command completion */
3980 { "HTS541680J9SA00", "SB2IC7EP", ATA_HORKAGE_NONCQ, },
2f8fcebb 3981 { "HTS541612J9SA00", "SBDIC7JP", ATA_HORKAGE_NONCQ, },
70edb185 3982 { "HDT722516DLA380", "V43OA96A", ATA_HORKAGE_NONCQ, },
e14cbfa6 3983 { "Hitachi HTS541616J9SA00", "SB4OC70P", ATA_HORKAGE_NONCQ, },
2f8fcebb 3984 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
7f567620 3985 { "WDC WD3200AAJS-00RYA0", "12.01B01", ATA_HORKAGE_NONCQ, },
a520f261 3986 { "FUJITSU MHV2080BH", "00840028", ATA_HORKAGE_NONCQ, },
7f567620 3987 { "ST9120822AS", "3.CLF", ATA_HORKAGE_NONCQ, },
3fb6589c 3988 { "ST9160821AS", "3.CLF", ATA_HORKAGE_NONCQ, },
954bb005 3989 { "ST9160821AS", "3.ALD", ATA_HORKAGE_NONCQ, },
13587960 3990 { "ST9160821AS", "3.CCD", ATA_HORKAGE_NONCQ, },
7f567620
TH
3991 { "ST3160812AS", "3.ADJ", ATA_HORKAGE_NONCQ, },
3992 { "ST980813AS", "3.ADB", ATA_HORKAGE_NONCQ, },
5d6aca8d 3993 { "SAMSUNG HD401LJ", "ZZ100-15", ATA_HORKAGE_NONCQ, },
6919a0a6 3994
16c55b03
TH
3995 /* devices which puke on READ_NATIVE_MAX */
3996 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
3997 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
3998 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
3999 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
6919a0a6 4000
93328e11
AC
4001 /* Devices which report 1 sector over size HPA */
4002 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, },
4003 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, },
4004
6919a0a6
AC
4005 /* End Marker */
4006 { }
1da177e4 4007};
2e9edbf8 4008
539cc7c7
JG
4009int strn_pattern_cmp(const char *patt, const char *name, int wildchar)
4010{
4011 const char *p;
4012 int len;
4013
4014 /*
4015 * check for trailing wildcard: *\0
4016 */
4017 p = strchr(patt, wildchar);
4018 if (p && ((*(p + 1)) == 0))
4019 len = p - patt;
317b50b8 4020 else {
539cc7c7 4021 len = strlen(name);
317b50b8
AP
4022 if (!len) {
4023 if (!*patt)
4024 return 0;
4025 return -1;
4026 }
4027 }
539cc7c7
JG
4028
4029 return strncmp(patt, name, len);
4030}
4031
75683fe7 4032static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
1da177e4 4033{
8bfa79fc
TH
4034 unsigned char model_num[ATA_ID_PROD_LEN + 1];
4035 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
6919a0a6 4036 const struct ata_blacklist_entry *ad = ata_device_blacklist;
3a778275 4037
8bfa79fc
TH
4038 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4039 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
1da177e4 4040
6919a0a6 4041 while (ad->model_num) {
539cc7c7 4042 if (!strn_pattern_cmp(ad->model_num, model_num, '*')) {
6919a0a6
AC
4043 if (ad->model_rev == NULL)
4044 return ad->horkage;
539cc7c7 4045 if (!strn_pattern_cmp(ad->model_rev, model_rev, '*'))
6919a0a6 4046 return ad->horkage;
f4b15fef 4047 }
6919a0a6 4048 ad++;
f4b15fef 4049 }
1da177e4
LT
4050 return 0;
4051}
4052
6919a0a6
AC
4053static int ata_dma_blacklisted(const struct ata_device *dev)
4054{
4055 /* We don't support polling DMA.
4056 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
4057 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4058 */
9af5c9c9 4059 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
6919a0a6
AC
4060 (dev->flags & ATA_DFLAG_CDB_INTR))
4061 return 1;
75683fe7 4062 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
6919a0a6
AC
4063}
4064
a6d5a51c
TH
4065/**
4066 * ata_dev_xfermask - Compute supported xfermask of the given device
a6d5a51c
TH
4067 * @dev: Device to compute xfermask for
4068 *
acf356b1
TH
4069 * Compute supported xfermask of @dev and store it in
4070 * dev->*_mask. This function is responsible for applying all
4071 * known limits including host controller limits, device
4072 * blacklist, etc...
a6d5a51c
TH
4073 *
4074 * LOCKING:
4075 * None.
a6d5a51c 4076 */
3373efd8 4077static void ata_dev_xfermask(struct ata_device *dev)
1da177e4 4078{
9af5c9c9
TH
4079 struct ata_link *link = dev->link;
4080 struct ata_port *ap = link->ap;
cca3974e 4081 struct ata_host *host = ap->host;
a6d5a51c 4082 unsigned long xfer_mask;
1da177e4 4083
37deecb5 4084 /* controller modes available */
565083e1
TH
4085 xfer_mask = ata_pack_xfermask(ap->pio_mask,
4086 ap->mwdma_mask, ap->udma_mask);
4087
8343f889 4088 /* drive modes available */
37deecb5
TH
4089 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4090 dev->mwdma_mask, dev->udma_mask);
4091 xfer_mask &= ata_id_xfermask(dev->id);
565083e1 4092
b352e57d
AC
4093 /*
4094 * CFA Advanced TrueIDE timings are not allowed on a shared
4095 * cable
4096 */
4097 if (ata_dev_pair(dev)) {
4098 /* No PIO5 or PIO6 */
4099 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4100 /* No MWDMA3 or MWDMA 4 */
4101 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4102 }
4103
37deecb5
TH
4104 if (ata_dma_blacklisted(dev)) {
4105 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
f15a1daf
TH
4106 ata_dev_printk(dev, KERN_WARNING,
4107 "device is on DMA blacklist, disabling DMA\n");
37deecb5 4108 }
a6d5a51c 4109
14d66ab7 4110 if ((host->flags & ATA_HOST_SIMPLEX) &&
2dcb407e 4111 host->simplex_claimed && host->simplex_claimed != ap) {
37deecb5
TH
4112 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4113 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
4114 "other device, disabling DMA\n");
5444a6f4 4115 }
565083e1 4116
e424675f
JG
4117 if (ap->flags & ATA_FLAG_NO_IORDY)
4118 xfer_mask &= ata_pio_mask_no_iordy(dev);
4119
5444a6f4 4120 if (ap->ops->mode_filter)
a76b62ca 4121 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
5444a6f4 4122
8343f889
RH
4123 /* Apply cable rule here. Don't apply it early because when
4124 * we handle hot plug the cable type can itself change.
4125 * Check this last so that we know if the transfer rate was
4126 * solely limited by the cable.
4127 * Unknown or 80 wire cables reported host side are checked
4128 * drive side as well. Cases where we know a 40wire cable
4129 * is used safely for 80 are not checked here.
4130 */
4131 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4132 /* UDMA/44 or higher would be available */
2dcb407e
JG
4133 if ((ap->cbl == ATA_CBL_PATA40) ||
4134 (ata_drive_40wire(dev->id) &&
4135 (ap->cbl == ATA_CBL_PATA_UNK ||
4136 ap->cbl == ATA_CBL_PATA80))) {
4137 ata_dev_printk(dev, KERN_WARNING,
8343f889
RH
4138 "limited to UDMA/33 due to 40-wire cable\n");
4139 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4140 }
4141
565083e1
TH
4142 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4143 &dev->mwdma_mask, &dev->udma_mask);
1da177e4
LT
4144}
4145
1da177e4
LT
4146/**
4147 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
1da177e4
LT
4148 * @dev: Device to which command will be sent
4149 *
780a87f7
JG
4150 * Issue SET FEATURES - XFER MODE command to device @dev
4151 * on port @ap.
4152 *
1da177e4 4153 * LOCKING:
0cba632b 4154 * PCI/etc. bus probe sem.
83206a29
TH
4155 *
4156 * RETURNS:
4157 * 0 on success, AC_ERR_* mask otherwise.
1da177e4
LT
4158 */
4159
3373efd8 4160static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
1da177e4 4161{
a0123703 4162 struct ata_taskfile tf;
83206a29 4163 unsigned int err_mask;
1da177e4
LT
4164
4165 /* set up set-features taskfile */
4166 DPRINTK("set features - xfer mode\n");
4167
464cf177
TH
4168 /* Some controllers and ATAPI devices show flaky interrupt
4169 * behavior after setting xfer mode. Use polling instead.
4170 */
3373efd8 4171 ata_tf_init(dev, &tf);
a0123703
TH
4172 tf.command = ATA_CMD_SET_FEATURES;
4173 tf.feature = SETFEATURES_XFER;
464cf177 4174 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
a0123703
TH
4175 tf.protocol = ATA_PROT_NODATA;
4176 tf.nsect = dev->xfer_mode;
1da177e4 4177
2b789108 4178 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
9f45cbd3
KCA
4179
4180 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4181 return err_mask;
4182}
4183
4184/**
4185 * ata_dev_set_AN - Issue SET FEATURES - SATA FEATURES
4186 * @dev: Device to which command will be sent
4187 * @enable: Whether to enable or disable the feature
4188 *
4189 * Issue SET FEATURES - SATA FEATURES command to device @dev
4190 * on port @ap with sector count set to indicate Asynchronous
4191 * Notification feature
4192 *
4193 * LOCKING:
4194 * PCI/etc. bus probe sem.
4195 *
4196 * RETURNS:
4197 * 0 on success, AC_ERR_* mask otherwise.
4198 */
4199static unsigned int ata_dev_set_AN(struct ata_device *dev, u8 enable)
4200{
4201 struct ata_taskfile tf;
4202 unsigned int err_mask;
4203
4204 /* set up set-features taskfile */
4205 DPRINTK("set features - SATA features\n");
4206
4207 ata_tf_init(dev, &tf);
4208 tf.command = ATA_CMD_SET_FEATURES;
4209 tf.feature = enable;
4210 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4211 tf.protocol = ATA_PROT_NODATA;
4212 tf.nsect = SATA_AN;
4213
2b789108 4214 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1da177e4 4215
83206a29
TH
4216 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4217 return err_mask;
1da177e4
LT
4218}
4219
8bf62ece
AL
4220/**
4221 * ata_dev_init_params - Issue INIT DEV PARAMS command
8bf62ece 4222 * @dev: Device to which command will be sent
e2a7f77a
RD
4223 * @heads: Number of heads (taskfile parameter)
4224 * @sectors: Number of sectors (taskfile parameter)
8bf62ece
AL
4225 *
4226 * LOCKING:
6aff8f1f
TH
4227 * Kernel thread context (may sleep)
4228 *
4229 * RETURNS:
4230 * 0 on success, AC_ERR_* mask otherwise.
8bf62ece 4231 */
3373efd8
TH
4232static unsigned int ata_dev_init_params(struct ata_device *dev,
4233 u16 heads, u16 sectors)
8bf62ece 4234{
a0123703 4235 struct ata_taskfile tf;
6aff8f1f 4236 unsigned int err_mask;
8bf62ece
AL
4237
4238 /* Number of sectors per track 1-255. Number of heads 1-16 */
4239 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
00b6f5e9 4240 return AC_ERR_INVALID;
8bf62ece
AL
4241
4242 /* set up init dev params taskfile */
4243 DPRINTK("init dev params \n");
4244
3373efd8 4245 ata_tf_init(dev, &tf);
a0123703
TH
4246 tf.command = ATA_CMD_INIT_DEV_PARAMS;
4247 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4248 tf.protocol = ATA_PROT_NODATA;
4249 tf.nsect = sectors;
4250 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
8bf62ece 4251
2b789108 4252 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
18b2466c
AC
4253 /* A clean abort indicates an original or just out of spec drive
4254 and we should continue as we issue the setup based on the
4255 drive reported working geometry */
4256 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4257 err_mask = 0;
8bf62ece 4258
6aff8f1f
TH
4259 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4260 return err_mask;
8bf62ece
AL
4261}
4262
1da177e4 4263/**
0cba632b
JG
4264 * ata_sg_clean - Unmap DMA memory associated with command
4265 * @qc: Command containing DMA memory to be released
4266 *
4267 * Unmap all mapped DMA memory associated with this command.
1da177e4
LT
4268 *
4269 * LOCKING:
cca3974e 4270 * spin_lock_irqsave(host lock)
1da177e4 4271 */
70e6ad0c 4272void ata_sg_clean(struct ata_queued_cmd *qc)
1da177e4
LT
4273{
4274 struct ata_port *ap = qc->ap;
cedc9a47 4275 struct scatterlist *sg = qc->__sg;
1da177e4 4276 int dir = qc->dma_dir;
cedc9a47 4277 void *pad_buf = NULL;
1da177e4 4278
a4631474
TH
4279 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
4280 WARN_ON(sg == NULL);
1da177e4
LT
4281
4282 if (qc->flags & ATA_QCFLAG_SINGLE)
f131883e 4283 WARN_ON(qc->n_elem > 1);
1da177e4 4284
2c13b7ce 4285 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
1da177e4 4286
cedc9a47
JG
4287 /* if we padded the buffer out to 32-bit bound, and data
4288 * xfer direction is from-device, we must copy from the
4289 * pad buffer back into the supplied buffer
4290 */
4291 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
4292 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4293
4294 if (qc->flags & ATA_QCFLAG_SG) {
e1410f2d 4295 if (qc->n_elem)
2f1f610b 4296 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
cedc9a47 4297 /* restore last sg */
87260216 4298 sg_last(sg, qc->orig_n_elem)->length += qc->pad_len;
cedc9a47
JG
4299 if (pad_buf) {
4300 struct scatterlist *psg = &qc->pad_sgent;
45711f1a 4301 void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
cedc9a47 4302 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
dfa15988 4303 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
4304 }
4305 } else {
2e242fa9 4306 if (qc->n_elem)
2f1f610b 4307 dma_unmap_single(ap->dev,
e1410f2d
JG
4308 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
4309 dir);
cedc9a47
JG
4310 /* restore sg */
4311 sg->length += qc->pad_len;
4312 if (pad_buf)
4313 memcpy(qc->buf_virt + sg->length - qc->pad_len,
4314 pad_buf, qc->pad_len);
4315 }
1da177e4
LT
4316
4317 qc->flags &= ~ATA_QCFLAG_DMAMAP;
cedc9a47 4318 qc->__sg = NULL;
1da177e4
LT
4319}
4320
4321/**
4322 * ata_fill_sg - Fill PCI IDE PRD table
4323 * @qc: Metadata associated with taskfile to be transferred
4324 *
780a87f7
JG
4325 * Fill PCI IDE PRD (scatter-gather) table with segments
4326 * associated with the current disk command.
4327 *
1da177e4 4328 * LOCKING:
cca3974e 4329 * spin_lock_irqsave(host lock)
1da177e4
LT
4330 *
4331 */
4332static void ata_fill_sg(struct ata_queued_cmd *qc)
4333{
1da177e4 4334 struct ata_port *ap = qc->ap;
cedc9a47
JG
4335 struct scatterlist *sg;
4336 unsigned int idx;
1da177e4 4337
a4631474 4338 WARN_ON(qc->__sg == NULL);
f131883e 4339 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
1da177e4
LT
4340
4341 idx = 0;
cedc9a47 4342 ata_for_each_sg(sg, qc) {
1da177e4
LT
4343 u32 addr, offset;
4344 u32 sg_len, len;
4345
4346 /* determine if physical DMA addr spans 64K boundary.
4347 * Note h/w doesn't support 64-bit, so we unconditionally
4348 * truncate dma_addr_t to u32.
4349 */
4350 addr = (u32) sg_dma_address(sg);
4351 sg_len = sg_dma_len(sg);
4352
4353 while (sg_len) {
4354 offset = addr & 0xffff;
4355 len = sg_len;
4356 if ((offset + sg_len) > 0x10000)
4357 len = 0x10000 - offset;
4358
4359 ap->prd[idx].addr = cpu_to_le32(addr);
4360 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
4361 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4362
4363 idx++;
4364 sg_len -= len;
4365 addr += len;
4366 }
4367 }
4368
4369 if (idx)
4370 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4371}
b9a4197e 4372
d26fc955
AC
4373/**
4374 * ata_fill_sg_dumb - Fill PCI IDE PRD table
4375 * @qc: Metadata associated with taskfile to be transferred
4376 *
4377 * Fill PCI IDE PRD (scatter-gather) table with segments
4378 * associated with the current disk command. Perform the fill
4379 * so that we avoid writing any length 64K records for
4380 * controllers that don't follow the spec.
4381 *
4382 * LOCKING:
4383 * spin_lock_irqsave(host lock)
4384 *
4385 */
4386static void ata_fill_sg_dumb(struct ata_queued_cmd *qc)
4387{
4388 struct ata_port *ap = qc->ap;
4389 struct scatterlist *sg;
4390 unsigned int idx;
4391
4392 WARN_ON(qc->__sg == NULL);
4393 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
4394
4395 idx = 0;
4396 ata_for_each_sg(sg, qc) {
4397 u32 addr, offset;
4398 u32 sg_len, len, blen;
4399
2dcb407e 4400 /* determine if physical DMA addr spans 64K boundary.
d26fc955
AC
4401 * Note h/w doesn't support 64-bit, so we unconditionally
4402 * truncate dma_addr_t to u32.
4403 */
4404 addr = (u32) sg_dma_address(sg);
4405 sg_len = sg_dma_len(sg);
4406
4407 while (sg_len) {
4408 offset = addr & 0xffff;
4409 len = sg_len;
4410 if ((offset + sg_len) > 0x10000)
4411 len = 0x10000 - offset;
4412
4413 blen = len & 0xffff;
4414 ap->prd[idx].addr = cpu_to_le32(addr);
4415 if (blen == 0) {
4416 /* Some PATA chipsets like the CS5530 can't
4417 cope with 0x0000 meaning 64K as the spec says */
4418 ap->prd[idx].flags_len = cpu_to_le32(0x8000);
4419 blen = 0x8000;
4420 ap->prd[++idx].addr = cpu_to_le32(addr + 0x8000);
4421 }
4422 ap->prd[idx].flags_len = cpu_to_le32(blen);
4423 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4424
4425 idx++;
4426 sg_len -= len;
4427 addr += len;
4428 }
4429 }
4430
4431 if (idx)
4432 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4433}
4434
1da177e4
LT
4435/**
4436 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
4437 * @qc: Metadata associated with taskfile to check
4438 *
780a87f7
JG
4439 * Allow low-level driver to filter ATA PACKET commands, returning
4440 * a status indicating whether or not it is OK to use DMA for the
4441 * supplied PACKET command.
4442 *
1da177e4 4443 * LOCKING:
cca3974e 4444 * spin_lock_irqsave(host lock)
0cba632b 4445 *
1da177e4
LT
4446 * RETURNS: 0 when ATAPI DMA can be used
4447 * nonzero otherwise
4448 */
4449int ata_check_atapi_dma(struct ata_queued_cmd *qc)
4450{
4451 struct ata_port *ap = qc->ap;
b9a4197e
TH
4452
4453 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
4454 * few ATAPI devices choke on such DMA requests.
4455 */
4456 if (unlikely(qc->nbytes & 15))
4457 return 1;
6f23a31d 4458
1da177e4 4459 if (ap->ops->check_atapi_dma)
b9a4197e 4460 return ap->ops->check_atapi_dma(qc);
1da177e4 4461
b9a4197e 4462 return 0;
1da177e4 4463}
b9a4197e 4464
31cc23b3
TH
4465/**
4466 * ata_std_qc_defer - Check whether a qc needs to be deferred
4467 * @qc: ATA command in question
4468 *
4469 * Non-NCQ commands cannot run with any other command, NCQ or
4470 * not. As upper layer only knows the queue depth, we are
4471 * responsible for maintaining exclusion. This function checks
4472 * whether a new command @qc can be issued.
4473 *
4474 * LOCKING:
4475 * spin_lock_irqsave(host lock)
4476 *
4477 * RETURNS:
4478 * ATA_DEFER_* if deferring is needed, 0 otherwise.
4479 */
4480int ata_std_qc_defer(struct ata_queued_cmd *qc)
4481{
4482 struct ata_link *link = qc->dev->link;
4483
4484 if (qc->tf.protocol == ATA_PROT_NCQ) {
4485 if (!ata_tag_valid(link->active_tag))
4486 return 0;
4487 } else {
4488 if (!ata_tag_valid(link->active_tag) && !link->sactive)
4489 return 0;
4490 }
4491
4492 return ATA_DEFER_LINK;
4493}
4494
1da177e4
LT
4495/**
4496 * ata_qc_prep - Prepare taskfile for submission
4497 * @qc: Metadata associated with taskfile to be prepared
4498 *
780a87f7
JG
4499 * Prepare ATA taskfile for submission.
4500 *
1da177e4 4501 * LOCKING:
cca3974e 4502 * spin_lock_irqsave(host lock)
1da177e4
LT
4503 */
4504void ata_qc_prep(struct ata_queued_cmd *qc)
4505{
4506 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4507 return;
4508
4509 ata_fill_sg(qc);
4510}
4511
d26fc955
AC
4512/**
4513 * ata_dumb_qc_prep - Prepare taskfile for submission
4514 * @qc: Metadata associated with taskfile to be prepared
4515 *
4516 * Prepare ATA taskfile for submission.
4517 *
4518 * LOCKING:
4519 * spin_lock_irqsave(host lock)
4520 */
4521void ata_dumb_qc_prep(struct ata_queued_cmd *qc)
4522{
4523 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4524 return;
4525
4526 ata_fill_sg_dumb(qc);
4527}
4528
e46834cd
BK
4529void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4530
0cba632b
JG
4531/**
4532 * ata_sg_init_one - Associate command with memory buffer
4533 * @qc: Command to be associated
4534 * @buf: Memory buffer
4535 * @buflen: Length of memory buffer, in bytes.
4536 *
4537 * Initialize the data-related elements of queued_cmd @qc
4538 * to point to a single memory buffer, @buf of byte length @buflen.
4539 *
4540 * LOCKING:
cca3974e 4541 * spin_lock_irqsave(host lock)
0cba632b
JG
4542 */
4543
1da177e4
LT
4544void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
4545{
1da177e4
LT
4546 qc->flags |= ATA_QCFLAG_SINGLE;
4547
cedc9a47 4548 qc->__sg = &qc->sgent;
1da177e4 4549 qc->n_elem = 1;
cedc9a47 4550 qc->orig_n_elem = 1;
1da177e4 4551 qc->buf_virt = buf;
233277ca 4552 qc->nbytes = buflen;
87260216 4553 qc->cursg = qc->__sg;
1da177e4 4554
61c0596c 4555 sg_init_one(&qc->sgent, buf, buflen);
1da177e4
LT
4556}
4557
0cba632b
JG
4558/**
4559 * ata_sg_init - Associate command with scatter-gather table.
4560 * @qc: Command to be associated
4561 * @sg: Scatter-gather table.
4562 * @n_elem: Number of elements in s/g table.
4563 *
4564 * Initialize the data-related elements of queued_cmd @qc
4565 * to point to a scatter-gather table @sg, containing @n_elem
4566 * elements.
4567 *
4568 * LOCKING:
cca3974e 4569 * spin_lock_irqsave(host lock)
0cba632b
JG
4570 */
4571
1da177e4
LT
4572void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4573 unsigned int n_elem)
4574{
4575 qc->flags |= ATA_QCFLAG_SG;
cedc9a47 4576 qc->__sg = sg;
1da177e4 4577 qc->n_elem = n_elem;
cedc9a47 4578 qc->orig_n_elem = n_elem;
87260216 4579 qc->cursg = qc->__sg;
1da177e4
LT
4580}
4581
4582/**
0cba632b
JG
4583 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
4584 * @qc: Command with memory buffer to be mapped.
4585 *
4586 * DMA-map the memory buffer associated with queued_cmd @qc.
1da177e4
LT
4587 *
4588 * LOCKING:
cca3974e 4589 * spin_lock_irqsave(host lock)
1da177e4
LT
4590 *
4591 * RETURNS:
0cba632b 4592 * Zero on success, negative on error.
1da177e4
LT
4593 */
4594
4595static int ata_sg_setup_one(struct ata_queued_cmd *qc)
4596{
4597 struct ata_port *ap = qc->ap;
4598 int dir = qc->dma_dir;
cedc9a47 4599 struct scatterlist *sg = qc->__sg;
1da177e4 4600 dma_addr_t dma_address;
2e242fa9 4601 int trim_sg = 0;
1da177e4 4602
cedc9a47
JG
4603 /* we must lengthen transfers to end on a 32-bit boundary */
4604 qc->pad_len = sg->length & 3;
4605 if (qc->pad_len) {
4606 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4607 struct scatterlist *psg = &qc->pad_sgent;
4608
a4631474 4609 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
4610
4611 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4612
4613 if (qc->tf.flags & ATA_TFLAG_WRITE)
4614 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
4615 qc->pad_len);
4616
4617 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4618 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4619 /* trim sg */
4620 sg->length -= qc->pad_len;
2e242fa9
TH
4621 if (sg->length == 0)
4622 trim_sg = 1;
cedc9a47
JG
4623
4624 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
4625 sg->length, qc->pad_len);
4626 }
4627
2e242fa9
TH
4628 if (trim_sg) {
4629 qc->n_elem--;
e1410f2d
JG
4630 goto skip_map;
4631 }
4632
2f1f610b 4633 dma_address = dma_map_single(ap->dev, qc->buf_virt,
32529e01 4634 sg->length, dir);
537a95d9
TH
4635 if (dma_mapping_error(dma_address)) {
4636 /* restore sg */
4637 sg->length += qc->pad_len;
1da177e4 4638 return -1;
537a95d9 4639 }
1da177e4
LT
4640
4641 sg_dma_address(sg) = dma_address;
32529e01 4642 sg_dma_len(sg) = sg->length;
1da177e4 4643
2e242fa9 4644skip_map:
1da177e4
LT
4645 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
4646 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4647
4648 return 0;
4649}
4650
4651/**
0cba632b
JG
4652 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4653 * @qc: Command with scatter-gather table to be mapped.
4654 *
4655 * DMA-map the scatter-gather table associated with queued_cmd @qc.
1da177e4
LT
4656 *
4657 * LOCKING:
cca3974e 4658 * spin_lock_irqsave(host lock)
1da177e4
LT
4659 *
4660 * RETURNS:
0cba632b 4661 * Zero on success, negative on error.
1da177e4
LT
4662 *
4663 */
4664
4665static int ata_sg_setup(struct ata_queued_cmd *qc)
4666{
4667 struct ata_port *ap = qc->ap;
cedc9a47 4668 struct scatterlist *sg = qc->__sg;
87260216 4669 struct scatterlist *lsg = sg_last(qc->__sg, qc->n_elem);
e1410f2d 4670 int n_elem, pre_n_elem, dir, trim_sg = 0;
1da177e4 4671
44877b4e 4672 VPRINTK("ENTER, ata%u\n", ap->print_id);
a4631474 4673 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
1da177e4 4674
cedc9a47
JG
4675 /* we must lengthen transfers to end on a 32-bit boundary */
4676 qc->pad_len = lsg->length & 3;
4677 if (qc->pad_len) {
4678 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4679 struct scatterlist *psg = &qc->pad_sgent;
4680 unsigned int offset;
4681
a4631474 4682 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
4683
4684 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4685
4686 /*
4687 * psg->page/offset are used to copy to-be-written
4688 * data in this function or read data in ata_sg_clean.
4689 */
4690 offset = lsg->offset + lsg->length - qc->pad_len;
45711f1a 4691 sg_set_page(psg, nth_page(sg_page(lsg), offset >> PAGE_SHIFT));
cedc9a47
JG
4692 psg->offset = offset_in_page(offset);
4693
4694 if (qc->tf.flags & ATA_TFLAG_WRITE) {
45711f1a 4695 void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
cedc9a47 4696 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
dfa15988 4697 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
4698 }
4699
4700 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4701 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4702 /* trim last sg */
4703 lsg->length -= qc->pad_len;
e1410f2d
JG
4704 if (lsg->length == 0)
4705 trim_sg = 1;
cedc9a47
JG
4706
4707 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
4708 qc->n_elem - 1, lsg->length, qc->pad_len);
4709 }
4710
e1410f2d
JG
4711 pre_n_elem = qc->n_elem;
4712 if (trim_sg && pre_n_elem)
4713 pre_n_elem--;
4714
4715 if (!pre_n_elem) {
4716 n_elem = 0;
4717 goto skip_map;
4718 }
4719
1da177e4 4720 dir = qc->dma_dir;
2f1f610b 4721 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
537a95d9
TH
4722 if (n_elem < 1) {
4723 /* restore last sg */
4724 lsg->length += qc->pad_len;
1da177e4 4725 return -1;
537a95d9 4726 }
1da177e4
LT
4727
4728 DPRINTK("%d sg elements mapped\n", n_elem);
4729
e1410f2d 4730skip_map:
1da177e4
LT
4731 qc->n_elem = n_elem;
4732
4733 return 0;
4734}
4735
0baab86b 4736/**
c893a3ae 4737 * swap_buf_le16 - swap halves of 16-bit words in place
0baab86b
EF
4738 * @buf: Buffer to swap
4739 * @buf_words: Number of 16-bit words in buffer.
4740 *
4741 * Swap halves of 16-bit words if needed to convert from
4742 * little-endian byte order to native cpu byte order, or
4743 * vice-versa.
4744 *
4745 * LOCKING:
6f0ef4fa 4746 * Inherited from caller.
0baab86b 4747 */
1da177e4
LT
4748void swap_buf_le16(u16 *buf, unsigned int buf_words)
4749{
4750#ifdef __BIG_ENDIAN
4751 unsigned int i;
4752
4753 for (i = 0; i < buf_words; i++)
4754 buf[i] = le16_to_cpu(buf[i]);
4755#endif /* __BIG_ENDIAN */
4756}
4757
6ae4cfb5 4758/**
0d5ff566 4759 * ata_data_xfer - Transfer data by PIO
a6b2c5d4 4760 * @adev: device to target
6ae4cfb5
AL
4761 * @buf: data buffer
4762 * @buflen: buffer length
344babaa 4763 * @write_data: read/write
6ae4cfb5
AL
4764 *
4765 * Transfer data from/to the device data register by PIO.
4766 *
4767 * LOCKING:
4768 * Inherited from caller.
6ae4cfb5 4769 */
0d5ff566
TH
4770void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
4771 unsigned int buflen, int write_data)
1da177e4 4772{
9af5c9c9 4773 struct ata_port *ap = adev->link->ap;
6ae4cfb5 4774 unsigned int words = buflen >> 1;
1da177e4 4775
6ae4cfb5 4776 /* Transfer multiple of 2 bytes */
1da177e4 4777 if (write_data)
0d5ff566 4778 iowrite16_rep(ap->ioaddr.data_addr, buf, words);
1da177e4 4779 else
0d5ff566 4780 ioread16_rep(ap->ioaddr.data_addr, buf, words);
6ae4cfb5
AL
4781
4782 /* Transfer trailing 1 byte, if any. */
4783 if (unlikely(buflen & 0x01)) {
4784 u16 align_buf[1] = { 0 };
4785 unsigned char *trailing_buf = buf + buflen - 1;
4786
4787 if (write_data) {
4788 memcpy(align_buf, trailing_buf, 1);
0d5ff566 4789 iowrite16(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
6ae4cfb5 4790 } else {
0d5ff566 4791 align_buf[0] = cpu_to_le16(ioread16(ap->ioaddr.data_addr));
6ae4cfb5
AL
4792 memcpy(trailing_buf, align_buf, 1);
4793 }
4794 }
1da177e4
LT
4795}
4796
75e99585 4797/**
0d5ff566 4798 * ata_data_xfer_noirq - Transfer data by PIO
75e99585
AC
4799 * @adev: device to target
4800 * @buf: data buffer
4801 * @buflen: buffer length
4802 * @write_data: read/write
4803 *
88574551 4804 * Transfer data from/to the device data register by PIO. Do the
75e99585
AC
4805 * transfer with interrupts disabled.
4806 *
4807 * LOCKING:
4808 * Inherited from caller.
4809 */
0d5ff566
TH
4810void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
4811 unsigned int buflen, int write_data)
75e99585
AC
4812{
4813 unsigned long flags;
4814 local_irq_save(flags);
0d5ff566 4815 ata_data_xfer(adev, buf, buflen, write_data);
75e99585
AC
4816 local_irq_restore(flags);
4817}
4818
4819
6ae4cfb5 4820/**
5a5dbd18 4821 * ata_pio_sector - Transfer a sector of data.
6ae4cfb5
AL
4822 * @qc: Command on going
4823 *
5a5dbd18 4824 * Transfer qc->sect_size bytes of data from/to the ATA device.
6ae4cfb5
AL
4825 *
4826 * LOCKING:
4827 * Inherited from caller.
4828 */
4829
1da177e4
LT
4830static void ata_pio_sector(struct ata_queued_cmd *qc)
4831{
4832 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
1da177e4
LT
4833 struct ata_port *ap = qc->ap;
4834 struct page *page;
4835 unsigned int offset;
4836 unsigned char *buf;
4837
5a5dbd18 4838 if (qc->curbytes == qc->nbytes - qc->sect_size)
14be71f4 4839 ap->hsm_task_state = HSM_ST_LAST;
1da177e4 4840
45711f1a 4841 page = sg_page(qc->cursg);
87260216 4842 offset = qc->cursg->offset + qc->cursg_ofs;
1da177e4
LT
4843
4844 /* get the current page and offset */
4845 page = nth_page(page, (offset >> PAGE_SHIFT));
4846 offset %= PAGE_SIZE;
4847
1da177e4
LT
4848 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4849
91b8b313
AL
4850 if (PageHighMem(page)) {
4851 unsigned long flags;
4852
a6b2c5d4 4853 /* FIXME: use a bounce buffer */
91b8b313
AL
4854 local_irq_save(flags);
4855 buf = kmap_atomic(page, KM_IRQ0);
083958d3 4856
91b8b313 4857 /* do the actual data transfer */
5a5dbd18 4858 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
1da177e4 4859
91b8b313
AL
4860 kunmap_atomic(buf, KM_IRQ0);
4861 local_irq_restore(flags);
4862 } else {
4863 buf = page_address(page);
5a5dbd18 4864 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
91b8b313 4865 }
1da177e4 4866
5a5dbd18
ML
4867 qc->curbytes += qc->sect_size;
4868 qc->cursg_ofs += qc->sect_size;
1da177e4 4869
87260216
JA
4870 if (qc->cursg_ofs == qc->cursg->length) {
4871 qc->cursg = sg_next(qc->cursg);
1da177e4
LT
4872 qc->cursg_ofs = 0;
4873 }
1da177e4 4874}
1da177e4 4875
07f6f7d0 4876/**
5a5dbd18 4877 * ata_pio_sectors - Transfer one or many sectors.
07f6f7d0
AL
4878 * @qc: Command on going
4879 *
5a5dbd18 4880 * Transfer one or many sectors of data from/to the
07f6f7d0
AL
4881 * ATA device for the DRQ request.
4882 *
4883 * LOCKING:
4884 * Inherited from caller.
4885 */
1da177e4 4886
07f6f7d0
AL
4887static void ata_pio_sectors(struct ata_queued_cmd *qc)
4888{
4889 if (is_multi_taskfile(&qc->tf)) {
4890 /* READ/WRITE MULTIPLE */
4891 unsigned int nsect;
4892
587005de 4893 WARN_ON(qc->dev->multi_count == 0);
1da177e4 4894
5a5dbd18 4895 nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
726f0785 4896 qc->dev->multi_count);
07f6f7d0
AL
4897 while (nsect--)
4898 ata_pio_sector(qc);
4899 } else
4900 ata_pio_sector(qc);
4cc980b3
AL
4901
4902 ata_altstatus(qc->ap); /* flush */
07f6f7d0
AL
4903}
4904
c71c1857
AL
4905/**
4906 * atapi_send_cdb - Write CDB bytes to hardware
4907 * @ap: Port to which ATAPI device is attached.
4908 * @qc: Taskfile currently active
4909 *
4910 * When device has indicated its readiness to accept
4911 * a CDB, this function is called. Send the CDB.
4912 *
4913 * LOCKING:
4914 * caller.
4915 */
4916
4917static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
4918{
4919 /* send SCSI cdb */
4920 DPRINTK("send cdb\n");
db024d53 4921 WARN_ON(qc->dev->cdb_len < 12);
c71c1857 4922
a6b2c5d4 4923 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
c71c1857
AL
4924 ata_altstatus(ap); /* flush */
4925
4926 switch (qc->tf.protocol) {
4927 case ATA_PROT_ATAPI:
4928 ap->hsm_task_state = HSM_ST;
4929 break;
4930 case ATA_PROT_ATAPI_NODATA:
4931 ap->hsm_task_state = HSM_ST_LAST;
4932 break;
4933 case ATA_PROT_ATAPI_DMA:
4934 ap->hsm_task_state = HSM_ST_LAST;
4935 /* initiate bmdma */
4936 ap->ops->bmdma_start(qc);
4937 break;
4938 }
1da177e4
LT
4939}
4940
6ae4cfb5
AL
4941/**
4942 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
4943 * @qc: Command on going
4944 * @bytes: number of bytes
4945 *
4946 * Transfer Transfer data from/to the ATAPI device.
4947 *
4948 * LOCKING:
4949 * Inherited from caller.
4950 *
4951 */
4952
1da177e4
LT
4953static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
4954{
4955 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 4956 struct scatterlist *sg = qc->__sg;
0874ee76 4957 struct scatterlist *lsg = sg_last(qc->__sg, qc->n_elem);
1da177e4
LT
4958 struct ata_port *ap = qc->ap;
4959 struct page *page;
4960 unsigned char *buf;
4961 unsigned int offset, count;
0874ee76 4962 int no_more_sg = 0;
1da177e4 4963
563a6e1f 4964 if (qc->curbytes + bytes >= qc->nbytes)
14be71f4 4965 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
4966
4967next_sg:
0874ee76 4968 if (unlikely(no_more_sg)) {
7fb6ec28 4969 /*
563a6e1f
AL
4970 * The end of qc->sg is reached and the device expects
4971 * more data to transfer. In order not to overrun qc->sg
4972 * and fulfill length specified in the byte count register,
4973 * - for read case, discard trailing data from the device
4974 * - for write case, padding zero data to the device
4975 */
4976 u16 pad_buf[1] = { 0 };
4977 unsigned int words = bytes >> 1;
4978 unsigned int i;
4979
4980 if (words) /* warning if bytes > 1 */
f15a1daf
TH
4981 ata_dev_printk(qc->dev, KERN_WARNING,
4982 "%u bytes trailing data\n", bytes);
563a6e1f
AL
4983
4984 for (i = 0; i < words; i++)
2dcb407e 4985 ap->ops->data_xfer(qc->dev, (unsigned char *)pad_buf, 2, do_write);
563a6e1f 4986
14be71f4 4987 ap->hsm_task_state = HSM_ST_LAST;
563a6e1f
AL
4988 return;
4989 }
4990
87260216 4991 sg = qc->cursg;
1da177e4 4992
45711f1a 4993 page = sg_page(sg);
1da177e4
LT
4994 offset = sg->offset + qc->cursg_ofs;
4995
4996 /* get the current page and offset */
4997 page = nth_page(page, (offset >> PAGE_SHIFT));
4998 offset %= PAGE_SIZE;
4999
6952df03 5000 /* don't overrun current sg */
32529e01 5001 count = min(sg->length - qc->cursg_ofs, bytes);
1da177e4
LT
5002
5003 /* don't cross page boundaries */
5004 count = min(count, (unsigned int)PAGE_SIZE - offset);
5005
7282aa4b
AL
5006 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
5007
91b8b313
AL
5008 if (PageHighMem(page)) {
5009 unsigned long flags;
5010
a6b2c5d4 5011 /* FIXME: use bounce buffer */
91b8b313
AL
5012 local_irq_save(flags);
5013 buf = kmap_atomic(page, KM_IRQ0);
083958d3 5014
91b8b313 5015 /* do the actual data transfer */
a6b2c5d4 5016 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
7282aa4b 5017
91b8b313
AL
5018 kunmap_atomic(buf, KM_IRQ0);
5019 local_irq_restore(flags);
5020 } else {
5021 buf = page_address(page);
a6b2c5d4 5022 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
91b8b313 5023 }
1da177e4
LT
5024
5025 bytes -= count;
5026 qc->curbytes += count;
5027 qc->cursg_ofs += count;
5028
32529e01 5029 if (qc->cursg_ofs == sg->length) {
0874ee76
FT
5030 if (qc->cursg == lsg)
5031 no_more_sg = 1;
5032
87260216 5033 qc->cursg = sg_next(qc->cursg);
1da177e4
LT
5034 qc->cursg_ofs = 0;
5035 }
5036
563a6e1f 5037 if (bytes)
1da177e4 5038 goto next_sg;
1da177e4
LT
5039}
5040
6ae4cfb5
AL
5041/**
5042 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
5043 * @qc: Command on going
5044 *
5045 * Transfer Transfer data from/to the ATAPI device.
5046 *
5047 * LOCKING:
5048 * Inherited from caller.
6ae4cfb5
AL
5049 */
5050
1da177e4
LT
5051static void atapi_pio_bytes(struct ata_queued_cmd *qc)
5052{
5053 struct ata_port *ap = qc->ap;
5054 struct ata_device *dev = qc->dev;
5055 unsigned int ireason, bc_lo, bc_hi, bytes;
5056 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
5057
eec4c3f3
AL
5058 /* Abuse qc->result_tf for temp storage of intermediate TF
5059 * here to save some kernel stack usage.
5060 * For normal completion, qc->result_tf is not relevant. For
5061 * error, qc->result_tf is later overwritten by ata_qc_complete().
5062 * So, the correctness of qc->result_tf is not affected.
5063 */
5064 ap->ops->tf_read(ap, &qc->result_tf);
5065 ireason = qc->result_tf.nsect;
5066 bc_lo = qc->result_tf.lbam;
5067 bc_hi = qc->result_tf.lbah;
1da177e4
LT
5068 bytes = (bc_hi << 8) | bc_lo;
5069
5070 /* shall be cleared to zero, indicating xfer of data */
5071 if (ireason & (1 << 0))
5072 goto err_out;
5073
5074 /* make sure transfer direction matches expected */
5075 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
5076 if (do_write != i_write)
5077 goto err_out;
5078
44877b4e 5079 VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
312f7da2 5080
1da177e4 5081 __atapi_pio_bytes(qc, bytes);
4cc980b3 5082 ata_altstatus(ap); /* flush */
1da177e4
LT
5083
5084 return;
5085
5086err_out:
f15a1daf 5087 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
11a56d24 5088 qc->err_mask |= AC_ERR_HSM;
14be71f4 5089 ap->hsm_task_state = HSM_ST_ERR;
1da177e4
LT
5090}
5091
5092/**
c234fb00
AL
5093 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
5094 * @ap: the target ata_port
5095 * @qc: qc on going
1da177e4 5096 *
c234fb00
AL
5097 * RETURNS:
5098 * 1 if ok in workqueue, 0 otherwise.
1da177e4 5099 */
c234fb00
AL
5100
5101static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
1da177e4 5102{
c234fb00
AL
5103 if (qc->tf.flags & ATA_TFLAG_POLLING)
5104 return 1;
1da177e4 5105
c234fb00
AL
5106 if (ap->hsm_task_state == HSM_ST_FIRST) {
5107 if (qc->tf.protocol == ATA_PROT_PIO &&
5108 (qc->tf.flags & ATA_TFLAG_WRITE))
5109 return 1;
1da177e4 5110
c234fb00
AL
5111 if (is_atapi_taskfile(&qc->tf) &&
5112 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
5113 return 1;
fe79e683
AL
5114 }
5115
c234fb00
AL
5116 return 0;
5117}
1da177e4 5118
c17ea20d
TH
5119/**
5120 * ata_hsm_qc_complete - finish a qc running on standard HSM
5121 * @qc: Command to complete
5122 * @in_wq: 1 if called from workqueue, 0 otherwise
5123 *
5124 * Finish @qc which is running on standard HSM.
5125 *
5126 * LOCKING:
cca3974e 5127 * If @in_wq is zero, spin_lock_irqsave(host lock).
c17ea20d
TH
5128 * Otherwise, none on entry and grabs host lock.
5129 */
5130static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
5131{
5132 struct ata_port *ap = qc->ap;
5133 unsigned long flags;
5134
5135 if (ap->ops->error_handler) {
5136 if (in_wq) {
ba6a1308 5137 spin_lock_irqsave(ap->lock, flags);
c17ea20d 5138
cca3974e
JG
5139 /* EH might have kicked in while host lock is
5140 * released.
c17ea20d
TH
5141 */
5142 qc = ata_qc_from_tag(ap, qc->tag);
5143 if (qc) {
5144 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
83625006 5145 ap->ops->irq_on(ap);
c17ea20d
TH
5146 ata_qc_complete(qc);
5147 } else
5148 ata_port_freeze(ap);
5149 }
5150
ba6a1308 5151 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
5152 } else {
5153 if (likely(!(qc->err_mask & AC_ERR_HSM)))
5154 ata_qc_complete(qc);
5155 else
5156 ata_port_freeze(ap);
5157 }
5158 } else {
5159 if (in_wq) {
ba6a1308 5160 spin_lock_irqsave(ap->lock, flags);
83625006 5161 ap->ops->irq_on(ap);
c17ea20d 5162 ata_qc_complete(qc);
ba6a1308 5163 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
5164 } else
5165 ata_qc_complete(qc);
5166 }
5167}
5168
bb5cb290
AL
5169/**
5170 * ata_hsm_move - move the HSM to the next state.
5171 * @ap: the target ata_port
5172 * @qc: qc on going
5173 * @status: current device status
5174 * @in_wq: 1 if called from workqueue, 0 otherwise
5175 *
5176 * RETURNS:
5177 * 1 when poll next status needed, 0 otherwise.
5178 */
9a1004d0
TH
5179int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
5180 u8 status, int in_wq)
e2cec771 5181{
bb5cb290
AL
5182 unsigned long flags = 0;
5183 int poll_next;
5184
6912ccd5
AL
5185 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
5186
bb5cb290
AL
5187 /* Make sure ata_qc_issue_prot() does not throw things
5188 * like DMA polling into the workqueue. Notice that
5189 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
5190 */
c234fb00 5191 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
bb5cb290 5192
e2cec771 5193fsm_start:
999bb6f4 5194 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
44877b4e 5195 ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
999bb6f4 5196
e2cec771
AL
5197 switch (ap->hsm_task_state) {
5198 case HSM_ST_FIRST:
bb5cb290
AL
5199 /* Send first data block or PACKET CDB */
5200
5201 /* If polling, we will stay in the work queue after
5202 * sending the data. Otherwise, interrupt handler
5203 * takes over after sending the data.
5204 */
5205 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
5206
e2cec771 5207 /* check device status */
3655d1d3
AL
5208 if (unlikely((status & ATA_DRQ) == 0)) {
5209 /* handle BSY=0, DRQ=0 as error */
5210 if (likely(status & (ATA_ERR | ATA_DF)))
5211 /* device stops HSM for abort/error */
5212 qc->err_mask |= AC_ERR_DEV;
5213 else
5214 /* HSM violation. Let EH handle this */
5215 qc->err_mask |= AC_ERR_HSM;
5216
14be71f4 5217 ap->hsm_task_state = HSM_ST_ERR;
e2cec771 5218 goto fsm_start;
1da177e4
LT
5219 }
5220
71601958
AL
5221 /* Device should not ask for data transfer (DRQ=1)
5222 * when it finds something wrong.
eee6c32f
AL
5223 * We ignore DRQ here and stop the HSM by
5224 * changing hsm_task_state to HSM_ST_ERR and
5225 * let the EH abort the command or reset the device.
71601958
AL
5226 */
5227 if (unlikely(status & (ATA_ERR | ATA_DF))) {
44877b4e
TH
5228 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with device "
5229 "error, dev_stat 0x%X\n", status);
3655d1d3 5230 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
5231 ap->hsm_task_state = HSM_ST_ERR;
5232 goto fsm_start;
71601958 5233 }
1da177e4 5234
bb5cb290
AL
5235 /* Send the CDB (atapi) or the first data block (ata pio out).
5236 * During the state transition, interrupt handler shouldn't
5237 * be invoked before the data transfer is complete and
5238 * hsm_task_state is changed. Hence, the following locking.
5239 */
5240 if (in_wq)
ba6a1308 5241 spin_lock_irqsave(ap->lock, flags);
1da177e4 5242
bb5cb290
AL
5243 if (qc->tf.protocol == ATA_PROT_PIO) {
5244 /* PIO data out protocol.
5245 * send first data block.
5246 */
0565c26d 5247
bb5cb290
AL
5248 /* ata_pio_sectors() might change the state
5249 * to HSM_ST_LAST. so, the state is changed here
5250 * before ata_pio_sectors().
5251 */
5252 ap->hsm_task_state = HSM_ST;
5253 ata_pio_sectors(qc);
bb5cb290
AL
5254 } else
5255 /* send CDB */
5256 atapi_send_cdb(ap, qc);
5257
5258 if (in_wq)
ba6a1308 5259 spin_unlock_irqrestore(ap->lock, flags);
bb5cb290
AL
5260
5261 /* if polling, ata_pio_task() handles the rest.
5262 * otherwise, interrupt handler takes over from here.
5263 */
e2cec771 5264 break;
1c848984 5265
e2cec771
AL
5266 case HSM_ST:
5267 /* complete command or read/write the data register */
5268 if (qc->tf.protocol == ATA_PROT_ATAPI) {
5269 /* ATAPI PIO protocol */
5270 if ((status & ATA_DRQ) == 0) {
3655d1d3
AL
5271 /* No more data to transfer or device error.
5272 * Device error will be tagged in HSM_ST_LAST.
5273 */
e2cec771
AL
5274 ap->hsm_task_state = HSM_ST_LAST;
5275 goto fsm_start;
5276 }
1da177e4 5277
71601958
AL
5278 /* Device should not ask for data transfer (DRQ=1)
5279 * when it finds something wrong.
eee6c32f
AL
5280 * We ignore DRQ here and stop the HSM by
5281 * changing hsm_task_state to HSM_ST_ERR and
5282 * let the EH abort the command or reset the device.
71601958
AL
5283 */
5284 if (unlikely(status & (ATA_ERR | ATA_DF))) {
44877b4e
TH
5285 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with "
5286 "device error, dev_stat 0x%X\n",
5287 status);
3655d1d3 5288 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
5289 ap->hsm_task_state = HSM_ST_ERR;
5290 goto fsm_start;
71601958 5291 }
1da177e4 5292
e2cec771 5293 atapi_pio_bytes(qc);
7fb6ec28 5294
e2cec771
AL
5295 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
5296 /* bad ireason reported by device */
5297 goto fsm_start;
1da177e4 5298
e2cec771
AL
5299 } else {
5300 /* ATA PIO protocol */
5301 if (unlikely((status & ATA_DRQ) == 0)) {
5302 /* handle BSY=0, DRQ=0 as error */
3655d1d3
AL
5303 if (likely(status & (ATA_ERR | ATA_DF)))
5304 /* device stops HSM for abort/error */
5305 qc->err_mask |= AC_ERR_DEV;
5306 else
55a8e2c8
TH
5307 /* HSM violation. Let EH handle this.
5308 * Phantom devices also trigger this
5309 * condition. Mark hint.
5310 */
5311 qc->err_mask |= AC_ERR_HSM |
5312 AC_ERR_NODEV_HINT;
3655d1d3 5313
e2cec771
AL
5314 ap->hsm_task_state = HSM_ST_ERR;
5315 goto fsm_start;
5316 }
1da177e4 5317
eee6c32f
AL
5318 /* For PIO reads, some devices may ask for
5319 * data transfer (DRQ=1) alone with ERR=1.
5320 * We respect DRQ here and transfer one
5321 * block of junk data before changing the
5322 * hsm_task_state to HSM_ST_ERR.
5323 *
5324 * For PIO writes, ERR=1 DRQ=1 doesn't make
5325 * sense since the data block has been
5326 * transferred to the device.
71601958
AL
5327 */
5328 if (unlikely(status & (ATA_ERR | ATA_DF))) {
71601958
AL
5329 /* data might be corrputed */
5330 qc->err_mask |= AC_ERR_DEV;
eee6c32f
AL
5331
5332 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
5333 ata_pio_sectors(qc);
eee6c32f
AL
5334 status = ata_wait_idle(ap);
5335 }
5336
3655d1d3
AL
5337 if (status & (ATA_BUSY | ATA_DRQ))
5338 qc->err_mask |= AC_ERR_HSM;
5339
eee6c32f
AL
5340 /* ata_pio_sectors() might change the
5341 * state to HSM_ST_LAST. so, the state
5342 * is changed after ata_pio_sectors().
5343 */
5344 ap->hsm_task_state = HSM_ST_ERR;
5345 goto fsm_start;
71601958
AL
5346 }
5347
e2cec771
AL
5348 ata_pio_sectors(qc);
5349
5350 if (ap->hsm_task_state == HSM_ST_LAST &&
5351 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
5352 /* all data read */
52a32205 5353 status = ata_wait_idle(ap);
e2cec771
AL
5354 goto fsm_start;
5355 }
5356 }
5357
bb5cb290 5358 poll_next = 1;
1da177e4
LT
5359 break;
5360
14be71f4 5361 case HSM_ST_LAST:
6912ccd5
AL
5362 if (unlikely(!ata_ok(status))) {
5363 qc->err_mask |= __ac_err_mask(status);
e2cec771
AL
5364 ap->hsm_task_state = HSM_ST_ERR;
5365 goto fsm_start;
5366 }
5367
5368 /* no more data to transfer */
4332a771 5369 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
44877b4e 5370 ap->print_id, qc->dev->devno, status);
e2cec771 5371
6912ccd5
AL
5372 WARN_ON(qc->err_mask);
5373
e2cec771 5374 ap->hsm_task_state = HSM_ST_IDLE;
1da177e4 5375
e2cec771 5376 /* complete taskfile transaction */
c17ea20d 5377 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
5378
5379 poll_next = 0;
1da177e4
LT
5380 break;
5381
14be71f4 5382 case HSM_ST_ERR:
e2cec771
AL
5383 /* make sure qc->err_mask is available to
5384 * know what's wrong and recover
5385 */
5386 WARN_ON(qc->err_mask == 0);
5387
5388 ap->hsm_task_state = HSM_ST_IDLE;
bb5cb290 5389
999bb6f4 5390 /* complete taskfile transaction */
c17ea20d 5391 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
5392
5393 poll_next = 0;
e2cec771
AL
5394 break;
5395 default:
bb5cb290 5396 poll_next = 0;
6912ccd5 5397 BUG();
1da177e4
LT
5398 }
5399
bb5cb290 5400 return poll_next;
1da177e4
LT
5401}
5402
65f27f38 5403static void ata_pio_task(struct work_struct *work)
8061f5f0 5404{
65f27f38
DH
5405 struct ata_port *ap =
5406 container_of(work, struct ata_port, port_task.work);
5407 struct ata_queued_cmd *qc = ap->port_task_data;
8061f5f0 5408 u8 status;
a1af3734 5409 int poll_next;
8061f5f0 5410
7fb6ec28 5411fsm_start:
a1af3734 5412 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
8061f5f0 5413
a1af3734
AL
5414 /*
5415 * This is purely heuristic. This is a fast path.
5416 * Sometimes when we enter, BSY will be cleared in
5417 * a chk-status or two. If not, the drive is probably seeking
5418 * or something. Snooze for a couple msecs, then
5419 * chk-status again. If still busy, queue delayed work.
5420 */
5421 status = ata_busy_wait(ap, ATA_BUSY, 5);
5422 if (status & ATA_BUSY) {
5423 msleep(2);
5424 status = ata_busy_wait(ap, ATA_BUSY, 10);
5425 if (status & ATA_BUSY) {
31ce6dae 5426 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
a1af3734
AL
5427 return;
5428 }
8061f5f0
TH
5429 }
5430
a1af3734
AL
5431 /* move the HSM */
5432 poll_next = ata_hsm_move(ap, qc, status, 1);
8061f5f0 5433
a1af3734
AL
5434 /* another command or interrupt handler
5435 * may be running at this point.
5436 */
5437 if (poll_next)
7fb6ec28 5438 goto fsm_start;
8061f5f0
TH
5439}
5440
1da177e4
LT
5441/**
5442 * ata_qc_new - Request an available ATA command, for queueing
5443 * @ap: Port associated with device @dev
5444 * @dev: Device from whom we request an available command structure
5445 *
5446 * LOCKING:
0cba632b 5447 * None.
1da177e4
LT
5448 */
5449
5450static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
5451{
5452 struct ata_queued_cmd *qc = NULL;
5453 unsigned int i;
5454
e3180499 5455 /* no command while frozen */
b51e9e5d 5456 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
e3180499
TH
5457 return NULL;
5458
2ab7db1f
TH
5459 /* the last tag is reserved for internal command. */
5460 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
6cec4a39 5461 if (!test_and_set_bit(i, &ap->qc_allocated)) {
f69499f4 5462 qc = __ata_qc_from_tag(ap, i);
1da177e4
LT
5463 break;
5464 }
5465
5466 if (qc)
5467 qc->tag = i;
5468
5469 return qc;
5470}
5471
5472/**
5473 * ata_qc_new_init - Request an available ATA command, and initialize it
1da177e4
LT
5474 * @dev: Device from whom we request an available command structure
5475 *
5476 * LOCKING:
0cba632b 5477 * None.
1da177e4
LT
5478 */
5479
3373efd8 5480struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
1da177e4 5481{
9af5c9c9 5482 struct ata_port *ap = dev->link->ap;
1da177e4
LT
5483 struct ata_queued_cmd *qc;
5484
5485 qc = ata_qc_new(ap);
5486 if (qc) {
1da177e4
LT
5487 qc->scsicmd = NULL;
5488 qc->ap = ap;
5489 qc->dev = dev;
1da177e4 5490
2c13b7ce 5491 ata_qc_reinit(qc);
1da177e4
LT
5492 }
5493
5494 return qc;
5495}
5496
1da177e4
LT
5497/**
5498 * ata_qc_free - free unused ata_queued_cmd
5499 * @qc: Command to complete
5500 *
5501 * Designed to free unused ata_queued_cmd object
5502 * in case something prevents using it.
5503 *
5504 * LOCKING:
cca3974e 5505 * spin_lock_irqsave(host lock)
1da177e4
LT
5506 */
5507void ata_qc_free(struct ata_queued_cmd *qc)
5508{
4ba946e9
TH
5509 struct ata_port *ap = qc->ap;
5510 unsigned int tag;
5511
a4631474 5512 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
1da177e4 5513
4ba946e9
TH
5514 qc->flags = 0;
5515 tag = qc->tag;
5516 if (likely(ata_tag_valid(tag))) {
4ba946e9 5517 qc->tag = ATA_TAG_POISON;
6cec4a39 5518 clear_bit(tag, &ap->qc_allocated);
4ba946e9 5519 }
1da177e4
LT
5520}
5521
76014427 5522void __ata_qc_complete(struct ata_queued_cmd *qc)
1da177e4 5523{
dedaf2b0 5524 struct ata_port *ap = qc->ap;
9af5c9c9 5525 struct ata_link *link = qc->dev->link;
dedaf2b0 5526
a4631474
TH
5527 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
5528 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
1da177e4
LT
5529
5530 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
5531 ata_sg_clean(qc);
5532
7401abf2 5533 /* command should be marked inactive atomically with qc completion */
da917d69 5534 if (qc->tf.protocol == ATA_PROT_NCQ) {
9af5c9c9 5535 link->sactive &= ~(1 << qc->tag);
da917d69
TH
5536 if (!link->sactive)
5537 ap->nr_active_links--;
5538 } else {
9af5c9c9 5539 link->active_tag = ATA_TAG_POISON;
da917d69
TH
5540 ap->nr_active_links--;
5541 }
5542
5543 /* clear exclusive status */
5544 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
5545 ap->excl_link == link))
5546 ap->excl_link = NULL;
7401abf2 5547
3f3791d3
AL
5548 /* atapi: mark qc as inactive to prevent the interrupt handler
5549 * from completing the command twice later, before the error handler
5550 * is called. (when rc != 0 and atapi request sense is needed)
5551 */
5552 qc->flags &= ~ATA_QCFLAG_ACTIVE;
dedaf2b0 5553 ap->qc_active &= ~(1 << qc->tag);
3f3791d3 5554
1da177e4 5555 /* call completion callback */
77853bf2 5556 qc->complete_fn(qc);
1da177e4
LT
5557}
5558
39599a53
TH
5559static void fill_result_tf(struct ata_queued_cmd *qc)
5560{
5561 struct ata_port *ap = qc->ap;
5562
39599a53 5563 qc->result_tf.flags = qc->tf.flags;
4742d54f 5564 ap->ops->tf_read(ap, &qc->result_tf);
39599a53
TH
5565}
5566
f686bcb8
TH
5567/**
5568 * ata_qc_complete - Complete an active ATA command
5569 * @qc: Command to complete
5570 * @err_mask: ATA Status register contents
5571 *
5572 * Indicate to the mid and upper layers that an ATA
5573 * command has completed, with either an ok or not-ok status.
5574 *
5575 * LOCKING:
cca3974e 5576 * spin_lock_irqsave(host lock)
f686bcb8
TH
5577 */
5578void ata_qc_complete(struct ata_queued_cmd *qc)
5579{
5580 struct ata_port *ap = qc->ap;
5581
5582 /* XXX: New EH and old EH use different mechanisms to
5583 * synchronize EH with regular execution path.
5584 *
5585 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
5586 * Normal execution path is responsible for not accessing a
5587 * failed qc. libata core enforces the rule by returning NULL
5588 * from ata_qc_from_tag() for failed qcs.
5589 *
5590 * Old EH depends on ata_qc_complete() nullifying completion
5591 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
5592 * not synchronize with interrupt handler. Only PIO task is
5593 * taken care of.
5594 */
5595 if (ap->ops->error_handler) {
b51e9e5d 5596 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
f686bcb8
TH
5597
5598 if (unlikely(qc->err_mask))
5599 qc->flags |= ATA_QCFLAG_FAILED;
5600
5601 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
5602 if (!ata_tag_internal(qc->tag)) {
5603 /* always fill result TF for failed qc */
39599a53 5604 fill_result_tf(qc);
f686bcb8
TH
5605 ata_qc_schedule_eh(qc);
5606 return;
5607 }
5608 }
5609
5610 /* read result TF if requested */
5611 if (qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 5612 fill_result_tf(qc);
f686bcb8
TH
5613
5614 __ata_qc_complete(qc);
5615 } else {
5616 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5617 return;
5618
5619 /* read result TF if failed or requested */
5620 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 5621 fill_result_tf(qc);
f686bcb8
TH
5622
5623 __ata_qc_complete(qc);
5624 }
5625}
5626
dedaf2b0
TH
5627/**
5628 * ata_qc_complete_multiple - Complete multiple qcs successfully
5629 * @ap: port in question
5630 * @qc_active: new qc_active mask
5631 * @finish_qc: LLDD callback invoked before completing a qc
5632 *
5633 * Complete in-flight commands. This functions is meant to be
5634 * called from low-level driver's interrupt routine to complete
5635 * requests normally. ap->qc_active and @qc_active is compared
5636 * and commands are completed accordingly.
5637 *
5638 * LOCKING:
cca3974e 5639 * spin_lock_irqsave(host lock)
dedaf2b0
TH
5640 *
5641 * RETURNS:
5642 * Number of completed commands on success, -errno otherwise.
5643 */
5644int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
5645 void (*finish_qc)(struct ata_queued_cmd *))
5646{
5647 int nr_done = 0;
5648 u32 done_mask;
5649 int i;
5650
5651 done_mask = ap->qc_active ^ qc_active;
5652
5653 if (unlikely(done_mask & qc_active)) {
5654 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
5655 "(%08x->%08x)\n", ap->qc_active, qc_active);
5656 return -EINVAL;
5657 }
5658
5659 for (i = 0; i < ATA_MAX_QUEUE; i++) {
5660 struct ata_queued_cmd *qc;
5661
5662 if (!(done_mask & (1 << i)))
5663 continue;
5664
5665 if ((qc = ata_qc_from_tag(ap, i))) {
5666 if (finish_qc)
5667 finish_qc(qc);
5668 ata_qc_complete(qc);
5669 nr_done++;
5670 }
5671 }
5672
5673 return nr_done;
5674}
5675
1da177e4
LT
5676static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
5677{
5678 struct ata_port *ap = qc->ap;
5679
5680 switch (qc->tf.protocol) {
3dc1d881 5681 case ATA_PROT_NCQ:
1da177e4
LT
5682 case ATA_PROT_DMA:
5683 case ATA_PROT_ATAPI_DMA:
5684 return 1;
5685
5686 case ATA_PROT_ATAPI:
5687 case ATA_PROT_PIO:
1da177e4
LT
5688 if (ap->flags & ATA_FLAG_PIO_DMA)
5689 return 1;
5690
5691 /* fall through */
5692
5693 default:
5694 return 0;
5695 }
5696
5697 /* never reached */
5698}
5699
5700/**
5701 * ata_qc_issue - issue taskfile to device
5702 * @qc: command to issue to device
5703 *
5704 * Prepare an ATA command to submission to device.
5705 * This includes mapping the data into a DMA-able
5706 * area, filling in the S/G table, and finally
5707 * writing the taskfile to hardware, starting the command.
5708 *
5709 * LOCKING:
cca3974e 5710 * spin_lock_irqsave(host lock)
1da177e4 5711 */
8e0e694a 5712void ata_qc_issue(struct ata_queued_cmd *qc)
1da177e4
LT
5713{
5714 struct ata_port *ap = qc->ap;
9af5c9c9 5715 struct ata_link *link = qc->dev->link;
1da177e4 5716
dedaf2b0
TH
5717 /* Make sure only one non-NCQ command is outstanding. The
5718 * check is skipped for old EH because it reuses active qc to
5719 * request ATAPI sense.
5720 */
9af5c9c9 5721 WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag));
dedaf2b0
TH
5722
5723 if (qc->tf.protocol == ATA_PROT_NCQ) {
9af5c9c9 5724 WARN_ON(link->sactive & (1 << qc->tag));
da917d69
TH
5725
5726 if (!link->sactive)
5727 ap->nr_active_links++;
9af5c9c9 5728 link->sactive |= 1 << qc->tag;
dedaf2b0 5729 } else {
9af5c9c9 5730 WARN_ON(link->sactive);
da917d69
TH
5731
5732 ap->nr_active_links++;
9af5c9c9 5733 link->active_tag = qc->tag;
dedaf2b0
TH
5734 }
5735
e4a70e76 5736 qc->flags |= ATA_QCFLAG_ACTIVE;
dedaf2b0 5737 ap->qc_active |= 1 << qc->tag;
e4a70e76 5738
1da177e4
LT
5739 if (ata_should_dma_map(qc)) {
5740 if (qc->flags & ATA_QCFLAG_SG) {
5741 if (ata_sg_setup(qc))
8e436af9 5742 goto sg_err;
1da177e4
LT
5743 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
5744 if (ata_sg_setup_one(qc))
8e436af9 5745 goto sg_err;
1da177e4
LT
5746 }
5747 } else {
5748 qc->flags &= ~ATA_QCFLAG_DMAMAP;
5749 }
5750
5751 ap->ops->qc_prep(qc);
5752
8e0e694a
TH
5753 qc->err_mask |= ap->ops->qc_issue(qc);
5754 if (unlikely(qc->err_mask))
5755 goto err;
5756 return;
1da177e4 5757
8e436af9
TH
5758sg_err:
5759 qc->flags &= ~ATA_QCFLAG_DMAMAP;
8e0e694a
TH
5760 qc->err_mask |= AC_ERR_SYSTEM;
5761err:
5762 ata_qc_complete(qc);
1da177e4
LT
5763}
5764
5765/**
5766 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
5767 * @qc: command to issue to device
5768 *
5769 * Using various libata functions and hooks, this function
5770 * starts an ATA command. ATA commands are grouped into
5771 * classes called "protocols", and issuing each type of protocol
5772 * is slightly different.
5773 *
0baab86b
EF
5774 * May be used as the qc_issue() entry in ata_port_operations.
5775 *
1da177e4 5776 * LOCKING:
cca3974e 5777 * spin_lock_irqsave(host lock)
1da177e4
LT
5778 *
5779 * RETURNS:
9a3d9eb0 5780 * Zero on success, AC_ERR_* mask on failure
1da177e4
LT
5781 */
5782
9a3d9eb0 5783unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
1da177e4
LT
5784{
5785 struct ata_port *ap = qc->ap;
5786
e50362ec
AL
5787 /* Use polling pio if the LLD doesn't handle
5788 * interrupt driven pio and atapi CDB interrupt.
5789 */
5790 if (ap->flags & ATA_FLAG_PIO_POLLING) {
5791 switch (qc->tf.protocol) {
5792 case ATA_PROT_PIO:
e3472cbe 5793 case ATA_PROT_NODATA:
e50362ec
AL
5794 case ATA_PROT_ATAPI:
5795 case ATA_PROT_ATAPI_NODATA:
5796 qc->tf.flags |= ATA_TFLAG_POLLING;
5797 break;
5798 case ATA_PROT_ATAPI_DMA:
5799 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
3a778275 5800 /* see ata_dma_blacklisted() */
e50362ec
AL
5801 BUG();
5802 break;
5803 default:
5804 break;
5805 }
5806 }
5807
312f7da2 5808 /* select the device */
1da177e4
LT
5809 ata_dev_select(ap, qc->dev->devno, 1, 0);
5810
312f7da2 5811 /* start the command */
1da177e4
LT
5812 switch (qc->tf.protocol) {
5813 case ATA_PROT_NODATA:
312f7da2
AL
5814 if (qc->tf.flags & ATA_TFLAG_POLLING)
5815 ata_qc_set_polling(qc);
5816
e5338254 5817 ata_tf_to_host(ap, &qc->tf);
312f7da2
AL
5818 ap->hsm_task_state = HSM_ST_LAST;
5819
5820 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 5821 ata_port_queue_task(ap, ata_pio_task, qc, 0);
312f7da2 5822
1da177e4
LT
5823 break;
5824
5825 case ATA_PROT_DMA:
587005de 5826 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 5827
1da177e4
LT
5828 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
5829 ap->ops->bmdma_setup(qc); /* set up bmdma */
5830 ap->ops->bmdma_start(qc); /* initiate bmdma */
312f7da2 5831 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
5832 break;
5833
312f7da2
AL
5834 case ATA_PROT_PIO:
5835 if (qc->tf.flags & ATA_TFLAG_POLLING)
5836 ata_qc_set_polling(qc);
1da177e4 5837
e5338254 5838 ata_tf_to_host(ap, &qc->tf);
312f7da2 5839
54f00389
AL
5840 if (qc->tf.flags & ATA_TFLAG_WRITE) {
5841 /* PIO data out protocol */
5842 ap->hsm_task_state = HSM_ST_FIRST;
31ce6dae 5843 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
5844
5845 /* always send first data block using
e27486db 5846 * the ata_pio_task() codepath.
54f00389 5847 */
312f7da2 5848 } else {
54f00389
AL
5849 /* PIO data in protocol */
5850 ap->hsm_task_state = HSM_ST;
5851
5852 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 5853 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
5854
5855 /* if polling, ata_pio_task() handles the rest.
5856 * otherwise, interrupt handler takes over from here.
5857 */
312f7da2
AL
5858 }
5859
1da177e4
LT
5860 break;
5861
1da177e4 5862 case ATA_PROT_ATAPI:
1da177e4 5863 case ATA_PROT_ATAPI_NODATA:
312f7da2
AL
5864 if (qc->tf.flags & ATA_TFLAG_POLLING)
5865 ata_qc_set_polling(qc);
5866
e5338254 5867 ata_tf_to_host(ap, &qc->tf);
f6ef65e6 5868
312f7da2
AL
5869 ap->hsm_task_state = HSM_ST_FIRST;
5870
5871 /* send cdb by polling if no cdb interrupt */
5872 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
5873 (qc->tf.flags & ATA_TFLAG_POLLING))
31ce6dae 5874 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
5875 break;
5876
5877 case ATA_PROT_ATAPI_DMA:
587005de 5878 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 5879
1da177e4
LT
5880 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
5881 ap->ops->bmdma_setup(qc); /* set up bmdma */
312f7da2
AL
5882 ap->hsm_task_state = HSM_ST_FIRST;
5883
5884 /* send cdb by polling if no cdb interrupt */
5885 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
31ce6dae 5886 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
5887 break;
5888
5889 default:
5890 WARN_ON(1);
9a3d9eb0 5891 return AC_ERR_SYSTEM;
1da177e4
LT
5892 }
5893
5894 return 0;
5895}
5896
1da177e4
LT
5897/**
5898 * ata_host_intr - Handle host interrupt for given (port, task)
5899 * @ap: Port on which interrupt arrived (possibly...)
5900 * @qc: Taskfile currently active in engine
5901 *
5902 * Handle host interrupt for given queued command. Currently,
5903 * only DMA interrupts are handled. All other commands are
5904 * handled via polling with interrupts disabled (nIEN bit).
5905 *
5906 * LOCKING:
cca3974e 5907 * spin_lock_irqsave(host lock)
1da177e4
LT
5908 *
5909 * RETURNS:
5910 * One if interrupt was handled, zero if not (shared irq).
5911 */
5912
2dcb407e
JG
5913inline unsigned int ata_host_intr(struct ata_port *ap,
5914 struct ata_queued_cmd *qc)
1da177e4 5915{
9af5c9c9 5916 struct ata_eh_info *ehi = &ap->link.eh_info;
312f7da2 5917 u8 status, host_stat = 0;
1da177e4 5918
312f7da2 5919 VPRINTK("ata%u: protocol %d task_state %d\n",
44877b4e 5920 ap->print_id, qc->tf.protocol, ap->hsm_task_state);
1da177e4 5921
312f7da2
AL
5922 /* Check whether we are expecting interrupt in this state */
5923 switch (ap->hsm_task_state) {
5924 case HSM_ST_FIRST:
6912ccd5
AL
5925 /* Some pre-ATAPI-4 devices assert INTRQ
5926 * at this state when ready to receive CDB.
5927 */
1da177e4 5928
312f7da2
AL
5929 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
5930 * The flag was turned on only for atapi devices.
5931 * No need to check is_atapi_taskfile(&qc->tf) again.
5932 */
5933 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1da177e4 5934 goto idle_irq;
1da177e4 5935 break;
312f7da2
AL
5936 case HSM_ST_LAST:
5937 if (qc->tf.protocol == ATA_PROT_DMA ||
5938 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
5939 /* check status of DMA engine */
5940 host_stat = ap->ops->bmdma_status(ap);
44877b4e
TH
5941 VPRINTK("ata%u: host_stat 0x%X\n",
5942 ap->print_id, host_stat);
312f7da2
AL
5943
5944 /* if it's not our irq... */
5945 if (!(host_stat & ATA_DMA_INTR))
5946 goto idle_irq;
5947
5948 /* before we do anything else, clear DMA-Start bit */
5949 ap->ops->bmdma_stop(qc);
a4f16610
AL
5950
5951 if (unlikely(host_stat & ATA_DMA_ERR)) {
5952 /* error when transfering data to/from memory */
5953 qc->err_mask |= AC_ERR_HOST_BUS;
5954 ap->hsm_task_state = HSM_ST_ERR;
5955 }
312f7da2
AL
5956 }
5957 break;
5958 case HSM_ST:
5959 break;
1da177e4
LT
5960 default:
5961 goto idle_irq;
5962 }
5963
312f7da2
AL
5964 /* check altstatus */
5965 status = ata_altstatus(ap);
5966 if (status & ATA_BUSY)
5967 goto idle_irq;
1da177e4 5968
312f7da2
AL
5969 /* check main status, clearing INTRQ */
5970 status = ata_chk_status(ap);
5971 if (unlikely(status & ATA_BUSY))
5972 goto idle_irq;
1da177e4 5973
312f7da2
AL
5974 /* ack bmdma irq events */
5975 ap->ops->irq_clear(ap);
1da177e4 5976
bb5cb290 5977 ata_hsm_move(ap, qc, status, 0);
ea54763f
TH
5978
5979 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
5980 qc->tf.protocol == ATA_PROT_ATAPI_DMA))
5981 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
5982
1da177e4
LT
5983 return 1; /* irq handled */
5984
5985idle_irq:
5986 ap->stats.idle_irq++;
5987
5988#ifdef ATA_IRQ_TRAP
5989 if ((ap->stats.idle_irq % 1000) == 0) {
6d32d30f
JG
5990 ata_chk_status(ap);
5991 ap->ops->irq_clear(ap);
f15a1daf 5992 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
23cfce89 5993 return 1;
1da177e4
LT
5994 }
5995#endif
5996 return 0; /* irq not handled */
5997}
5998
5999/**
6000 * ata_interrupt - Default ATA host interrupt handler
0cba632b 6001 * @irq: irq line (unused)
cca3974e 6002 * @dev_instance: pointer to our ata_host information structure
1da177e4 6003 *
0cba632b
JG
6004 * Default interrupt handler for PCI IDE devices. Calls
6005 * ata_host_intr() for each port that is not disabled.
6006 *
1da177e4 6007 * LOCKING:
cca3974e 6008 * Obtains host lock during operation.
1da177e4
LT
6009 *
6010 * RETURNS:
0cba632b 6011 * IRQ_NONE or IRQ_HANDLED.
1da177e4
LT
6012 */
6013
2dcb407e 6014irqreturn_t ata_interrupt(int irq, void *dev_instance)
1da177e4 6015{
cca3974e 6016 struct ata_host *host = dev_instance;
1da177e4
LT
6017 unsigned int i;
6018 unsigned int handled = 0;
6019 unsigned long flags;
6020
6021 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
cca3974e 6022 spin_lock_irqsave(&host->lock, flags);
1da177e4 6023
cca3974e 6024 for (i = 0; i < host->n_ports; i++) {
1da177e4
LT
6025 struct ata_port *ap;
6026
cca3974e 6027 ap = host->ports[i];
c1389503 6028 if (ap &&
029f5468 6029 !(ap->flags & ATA_FLAG_DISABLED)) {
1da177e4
LT
6030 struct ata_queued_cmd *qc;
6031
9af5c9c9 6032 qc = ata_qc_from_tag(ap, ap->link.active_tag);
312f7da2 6033 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
21b1ed74 6034 (qc->flags & ATA_QCFLAG_ACTIVE))
1da177e4
LT
6035 handled |= ata_host_intr(ap, qc);
6036 }
6037 }
6038
cca3974e 6039 spin_unlock_irqrestore(&host->lock, flags);
1da177e4
LT
6040
6041 return IRQ_RETVAL(handled);
6042}
6043
34bf2170
TH
6044/**
6045 * sata_scr_valid - test whether SCRs are accessible
936fd732 6046 * @link: ATA link to test SCR accessibility for
34bf2170 6047 *
936fd732 6048 * Test whether SCRs are accessible for @link.
34bf2170
TH
6049 *
6050 * LOCKING:
6051 * None.
6052 *
6053 * RETURNS:
6054 * 1 if SCRs are accessible, 0 otherwise.
6055 */
936fd732 6056int sata_scr_valid(struct ata_link *link)
34bf2170 6057{
936fd732
TH
6058 struct ata_port *ap = link->ap;
6059
a16abc0b 6060 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
34bf2170
TH
6061}
6062
6063/**
6064 * sata_scr_read - read SCR register of the specified port
936fd732 6065 * @link: ATA link to read SCR for
34bf2170
TH
6066 * @reg: SCR to read
6067 * @val: Place to store read value
6068 *
936fd732 6069 * Read SCR register @reg of @link into *@val. This function is
633273a3
TH
6070 * guaranteed to succeed if @link is ap->link, the cable type of
6071 * the port is SATA and the port implements ->scr_read.
34bf2170
TH
6072 *
6073 * LOCKING:
633273a3 6074 * None if @link is ap->link. Kernel thread context otherwise.
34bf2170
TH
6075 *
6076 * RETURNS:
6077 * 0 on success, negative errno on failure.
6078 */
936fd732 6079int sata_scr_read(struct ata_link *link, int reg, u32 *val)
34bf2170 6080{
633273a3
TH
6081 if (ata_is_host_link(link)) {
6082 struct ata_port *ap = link->ap;
936fd732 6083
633273a3
TH
6084 if (sata_scr_valid(link))
6085 return ap->ops->scr_read(ap, reg, val);
6086 return -EOPNOTSUPP;
6087 }
6088
6089 return sata_pmp_scr_read(link, reg, val);
34bf2170
TH
6090}
6091
6092/**
6093 * sata_scr_write - write SCR register of the specified port
936fd732 6094 * @link: ATA link to write SCR for
34bf2170
TH
6095 * @reg: SCR to write
6096 * @val: value to write
6097 *
936fd732 6098 * Write @val to SCR register @reg of @link. This function is
633273a3
TH
6099 * guaranteed to succeed if @link is ap->link, the cable type of
6100 * the port is SATA and the port implements ->scr_read.
34bf2170
TH
6101 *
6102 * LOCKING:
633273a3 6103 * None if @link is ap->link. Kernel thread context otherwise.
34bf2170
TH
6104 *
6105 * RETURNS:
6106 * 0 on success, negative errno on failure.
6107 */
936fd732 6108int sata_scr_write(struct ata_link *link, int reg, u32 val)
34bf2170 6109{
633273a3
TH
6110 if (ata_is_host_link(link)) {
6111 struct ata_port *ap = link->ap;
6112
6113 if (sata_scr_valid(link))
6114 return ap->ops->scr_write(ap, reg, val);
6115 return -EOPNOTSUPP;
6116 }
936fd732 6117
633273a3 6118 return sata_pmp_scr_write(link, reg, val);
34bf2170
TH
6119}
6120
6121/**
6122 * sata_scr_write_flush - write SCR register of the specified port and flush
936fd732 6123 * @link: ATA link to write SCR for
34bf2170
TH
6124 * @reg: SCR to write
6125 * @val: value to write
6126 *
6127 * This function is identical to sata_scr_write() except that this
6128 * function performs flush after writing to the register.
6129 *
6130 * LOCKING:
633273a3 6131 * None if @link is ap->link. Kernel thread context otherwise.
34bf2170
TH
6132 *
6133 * RETURNS:
6134 * 0 on success, negative errno on failure.
6135 */
936fd732 6136int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
34bf2170 6137{
633273a3
TH
6138 if (ata_is_host_link(link)) {
6139 struct ata_port *ap = link->ap;
6140 int rc;
da3dbb17 6141
633273a3
TH
6142 if (sata_scr_valid(link)) {
6143 rc = ap->ops->scr_write(ap, reg, val);
6144 if (rc == 0)
6145 rc = ap->ops->scr_read(ap, reg, &val);
6146 return rc;
6147 }
6148 return -EOPNOTSUPP;
34bf2170 6149 }
633273a3
TH
6150
6151 return sata_pmp_scr_write(link, reg, val);
34bf2170
TH
6152}
6153
6154/**
936fd732
TH
6155 * ata_link_online - test whether the given link is online
6156 * @link: ATA link to test
34bf2170 6157 *
936fd732
TH
6158 * Test whether @link is online. Note that this function returns
6159 * 0 if online status of @link cannot be obtained, so
6160 * ata_link_online(link) != !ata_link_offline(link).
34bf2170
TH
6161 *
6162 * LOCKING:
6163 * None.
6164 *
6165 * RETURNS:
6166 * 1 if the port online status is available and online.
6167 */
936fd732 6168int ata_link_online(struct ata_link *link)
34bf2170
TH
6169{
6170 u32 sstatus;
6171
936fd732
TH
6172 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
6173 (sstatus & 0xf) == 0x3)
34bf2170
TH
6174 return 1;
6175 return 0;
6176}
6177
6178/**
936fd732
TH
6179 * ata_link_offline - test whether the given link is offline
6180 * @link: ATA link to test
34bf2170 6181 *
936fd732
TH
6182 * Test whether @link is offline. Note that this function
6183 * returns 0 if offline status of @link cannot be obtained, so
6184 * ata_link_online(link) != !ata_link_offline(link).
34bf2170
TH
6185 *
6186 * LOCKING:
6187 * None.
6188 *
6189 * RETURNS:
6190 * 1 if the port offline status is available and offline.
6191 */
936fd732 6192int ata_link_offline(struct ata_link *link)
34bf2170
TH
6193{
6194 u32 sstatus;
6195
936fd732
TH
6196 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
6197 (sstatus & 0xf) != 0x3)
34bf2170
TH
6198 return 1;
6199 return 0;
6200}
0baab86b 6201
77b08fb5 6202int ata_flush_cache(struct ata_device *dev)
9b847548 6203{
977e6b9f 6204 unsigned int err_mask;
9b847548
JA
6205 u8 cmd;
6206
6207 if (!ata_try_flush_cache(dev))
6208 return 0;
6209
6fc49adb 6210 if (dev->flags & ATA_DFLAG_FLUSH_EXT)
9b847548
JA
6211 cmd = ATA_CMD_FLUSH_EXT;
6212 else
6213 cmd = ATA_CMD_FLUSH;
6214
4f34337b
AC
6215 /* This is wrong. On a failed flush we get back the LBA of the lost
6216 sector and we should (assuming it wasn't aborted as unknown) issue
2dcb407e 6217 a further flush command to continue the writeback until it
4f34337b 6218 does not error */
977e6b9f
TH
6219 err_mask = ata_do_simple_cmd(dev, cmd);
6220 if (err_mask) {
6221 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
6222 return -EIO;
6223 }
6224
6225 return 0;
9b847548
JA
6226}
6227
6ffa01d8 6228#ifdef CONFIG_PM
cca3974e
JG
6229static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
6230 unsigned int action, unsigned int ehi_flags,
6231 int wait)
500530f6
TH
6232{
6233 unsigned long flags;
6234 int i, rc;
6235
cca3974e
JG
6236 for (i = 0; i < host->n_ports; i++) {
6237 struct ata_port *ap = host->ports[i];
e3667ebf 6238 struct ata_link *link;
500530f6
TH
6239
6240 /* Previous resume operation might still be in
6241 * progress. Wait for PM_PENDING to clear.
6242 */
6243 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
6244 ata_port_wait_eh(ap);
6245 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6246 }
6247
6248 /* request PM ops to EH */
6249 spin_lock_irqsave(ap->lock, flags);
6250
6251 ap->pm_mesg = mesg;
6252 if (wait) {
6253 rc = 0;
6254 ap->pm_result = &rc;
6255 }
6256
6257 ap->pflags |= ATA_PFLAG_PM_PENDING;
e3667ebf
TH
6258 __ata_port_for_each_link(link, ap) {
6259 link->eh_info.action |= action;
6260 link->eh_info.flags |= ehi_flags;
6261 }
500530f6
TH
6262
6263 ata_port_schedule_eh(ap);
6264
6265 spin_unlock_irqrestore(ap->lock, flags);
6266
6267 /* wait and check result */
6268 if (wait) {
6269 ata_port_wait_eh(ap);
6270 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6271 if (rc)
6272 return rc;
6273 }
6274 }
6275
6276 return 0;
6277}
6278
6279/**
cca3974e
JG
6280 * ata_host_suspend - suspend host
6281 * @host: host to suspend
500530f6
TH
6282 * @mesg: PM message
6283 *
cca3974e 6284 * Suspend @host. Actual operation is performed by EH. This
500530f6
TH
6285 * function requests EH to perform PM operations and waits for EH
6286 * to finish.
6287 *
6288 * LOCKING:
6289 * Kernel thread context (may sleep).
6290 *
6291 * RETURNS:
6292 * 0 on success, -errno on failure.
6293 */
cca3974e 6294int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
500530f6 6295{
9666f400 6296 int rc;
500530f6 6297
cca3974e 6298 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
9666f400
TH
6299 if (rc == 0)
6300 host->dev->power.power_state = mesg;
500530f6
TH
6301 return rc;
6302}
6303
6304/**
cca3974e
JG
6305 * ata_host_resume - resume host
6306 * @host: host to resume
500530f6 6307 *
cca3974e 6308 * Resume @host. Actual operation is performed by EH. This
500530f6
TH
6309 * function requests EH to perform PM operations and returns.
6310 * Note that all resume operations are performed parallely.
6311 *
6312 * LOCKING:
6313 * Kernel thread context (may sleep).
6314 */
cca3974e 6315void ata_host_resume(struct ata_host *host)
500530f6 6316{
cca3974e
JG
6317 ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
6318 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
6319 host->dev->power.power_state = PMSG_ON;
500530f6 6320}
6ffa01d8 6321#endif
500530f6 6322
c893a3ae
RD
6323/**
6324 * ata_port_start - Set port up for dma.
6325 * @ap: Port to initialize
6326 *
6327 * Called just after data structures for each port are
6328 * initialized. Allocates space for PRD table.
6329 *
6330 * May be used as the port_start() entry in ata_port_operations.
6331 *
6332 * LOCKING:
6333 * Inherited from caller.
6334 */
f0d36efd 6335int ata_port_start(struct ata_port *ap)
1da177e4 6336{
2f1f610b 6337 struct device *dev = ap->dev;
6037d6bb 6338 int rc;
1da177e4 6339
f0d36efd
TH
6340 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
6341 GFP_KERNEL);
1da177e4
LT
6342 if (!ap->prd)
6343 return -ENOMEM;
6344
6037d6bb 6345 rc = ata_pad_alloc(ap, dev);
f0d36efd 6346 if (rc)
6037d6bb 6347 return rc;
1da177e4 6348
f0d36efd
TH
6349 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd,
6350 (unsigned long long)ap->prd_dma);
1da177e4
LT
6351 return 0;
6352}
6353
3ef3b43d
TH
6354/**
6355 * ata_dev_init - Initialize an ata_device structure
6356 * @dev: Device structure to initialize
6357 *
6358 * Initialize @dev in preparation for probing.
6359 *
6360 * LOCKING:
6361 * Inherited from caller.
6362 */
6363void ata_dev_init(struct ata_device *dev)
6364{
9af5c9c9
TH
6365 struct ata_link *link = dev->link;
6366 struct ata_port *ap = link->ap;
72fa4b74
TH
6367 unsigned long flags;
6368
5a04bf4b 6369 /* SATA spd limit is bound to the first device */
9af5c9c9
TH
6370 link->sata_spd_limit = link->hw_sata_spd_limit;
6371 link->sata_spd = 0;
5a04bf4b 6372
72fa4b74
TH
6373 /* High bits of dev->flags are used to record warm plug
6374 * requests which occur asynchronously. Synchronize using
cca3974e 6375 * host lock.
72fa4b74 6376 */
ba6a1308 6377 spin_lock_irqsave(ap->lock, flags);
72fa4b74 6378 dev->flags &= ~ATA_DFLAG_INIT_MASK;
3dcc323f 6379 dev->horkage = 0;
ba6a1308 6380 spin_unlock_irqrestore(ap->lock, flags);
3ef3b43d 6381
72fa4b74
TH
6382 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
6383 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
3ef3b43d
TH
6384 dev->pio_mask = UINT_MAX;
6385 dev->mwdma_mask = UINT_MAX;
6386 dev->udma_mask = UINT_MAX;
6387}
6388
4fb37a25
TH
6389/**
6390 * ata_link_init - Initialize an ata_link structure
6391 * @ap: ATA port link is attached to
6392 * @link: Link structure to initialize
8989805d 6393 * @pmp: Port multiplier port number
4fb37a25
TH
6394 *
6395 * Initialize @link.
6396 *
6397 * LOCKING:
6398 * Kernel thread context (may sleep)
6399 */
fb7fd614 6400void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
4fb37a25
TH
6401{
6402 int i;
6403
6404 /* clear everything except for devices */
6405 memset(link, 0, offsetof(struct ata_link, device[0]));
6406
6407 link->ap = ap;
8989805d 6408 link->pmp = pmp;
4fb37a25
TH
6409 link->active_tag = ATA_TAG_POISON;
6410 link->hw_sata_spd_limit = UINT_MAX;
6411
6412 /* can't use iterator, ap isn't initialized yet */
6413 for (i = 0; i < ATA_MAX_DEVICES; i++) {
6414 struct ata_device *dev = &link->device[i];
6415
6416 dev->link = link;
6417 dev->devno = dev - link->device;
6418 ata_dev_init(dev);
6419 }
6420}
6421
6422/**
6423 * sata_link_init_spd - Initialize link->sata_spd_limit
6424 * @link: Link to configure sata_spd_limit for
6425 *
6426 * Initialize @link->[hw_]sata_spd_limit to the currently
6427 * configured value.
6428 *
6429 * LOCKING:
6430 * Kernel thread context (may sleep).
6431 *
6432 * RETURNS:
6433 * 0 on success, -errno on failure.
6434 */
fb7fd614 6435int sata_link_init_spd(struct ata_link *link)
4fb37a25
TH
6436{
6437 u32 scontrol, spd;
6438 int rc;
6439
6440 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
6441 if (rc)
6442 return rc;
6443
6444 spd = (scontrol >> 4) & 0xf;
6445 if (spd)
6446 link->hw_sata_spd_limit &= (1 << spd) - 1;
6447
6448 link->sata_spd_limit = link->hw_sata_spd_limit;
6449
6450 return 0;
6451}
6452
1da177e4 6453/**
f3187195
TH
6454 * ata_port_alloc - allocate and initialize basic ATA port resources
6455 * @host: ATA host this allocated port belongs to
1da177e4 6456 *
f3187195
TH
6457 * Allocate and initialize basic ATA port resources.
6458 *
6459 * RETURNS:
6460 * Allocate ATA port on success, NULL on failure.
0cba632b 6461 *
1da177e4 6462 * LOCKING:
f3187195 6463 * Inherited from calling layer (may sleep).
1da177e4 6464 */
f3187195 6465struct ata_port *ata_port_alloc(struct ata_host *host)
1da177e4 6466{
f3187195 6467 struct ata_port *ap;
1da177e4 6468
f3187195
TH
6469 DPRINTK("ENTER\n");
6470
6471 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
6472 if (!ap)
6473 return NULL;
6474
f4d6d004 6475 ap->pflags |= ATA_PFLAG_INITIALIZING;
cca3974e 6476 ap->lock = &host->lock;
198e0fed 6477 ap->flags = ATA_FLAG_DISABLED;
f3187195 6478 ap->print_id = -1;
1da177e4 6479 ap->ctl = ATA_DEVCTL_OBS;
cca3974e 6480 ap->host = host;
f3187195 6481 ap->dev = host->dev;
1da177e4 6482 ap->last_ctl = 0xFF;
bd5d825c
BP
6483
6484#if defined(ATA_VERBOSE_DEBUG)
6485 /* turn on all debugging levels */
6486 ap->msg_enable = 0x00FF;
6487#elif defined(ATA_DEBUG)
6488 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
88574551 6489#else
0dd4b21f 6490 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
bd5d825c 6491#endif
1da177e4 6492
65f27f38
DH
6493 INIT_DELAYED_WORK(&ap->port_task, NULL);
6494 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
6495 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
a72ec4ce 6496 INIT_LIST_HEAD(&ap->eh_done_q);
c6cf9e99 6497 init_waitqueue_head(&ap->eh_wait_q);
5ddf24c5
TH
6498 init_timer_deferrable(&ap->fastdrain_timer);
6499 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
6500 ap->fastdrain_timer.data = (unsigned long)ap;
1da177e4 6501
838df628 6502 ap->cbl = ATA_CBL_NONE;
838df628 6503
8989805d 6504 ata_link_init(ap, &ap->link, 0);
1da177e4
LT
6505
6506#ifdef ATA_IRQ_TRAP
6507 ap->stats.unhandled_irq = 1;
6508 ap->stats.idle_irq = 1;
6509#endif
1da177e4 6510 return ap;
1da177e4
LT
6511}
6512
f0d36efd
TH
6513static void ata_host_release(struct device *gendev, void *res)
6514{
6515 struct ata_host *host = dev_get_drvdata(gendev);
6516 int i;
6517
6518 for (i = 0; i < host->n_ports; i++) {
6519 struct ata_port *ap = host->ports[i];
6520
ecef7253
TH
6521 if (!ap)
6522 continue;
6523
6524 if ((host->flags & ATA_HOST_STARTED) && ap->ops->port_stop)
f0d36efd 6525 ap->ops->port_stop(ap);
f0d36efd
TH
6526 }
6527
ecef7253 6528 if ((host->flags & ATA_HOST_STARTED) && host->ops->host_stop)
f0d36efd 6529 host->ops->host_stop(host);
1aa56cca 6530
1aa506e4
TH
6531 for (i = 0; i < host->n_ports; i++) {
6532 struct ata_port *ap = host->ports[i];
6533
4911487a
TH
6534 if (!ap)
6535 continue;
6536
6537 if (ap->scsi_host)
1aa506e4
TH
6538 scsi_host_put(ap->scsi_host);
6539
633273a3 6540 kfree(ap->pmp_link);
4911487a 6541 kfree(ap);
1aa506e4
TH
6542 host->ports[i] = NULL;
6543 }
6544
1aa56cca 6545 dev_set_drvdata(gendev, NULL);
f0d36efd
TH
6546}
6547
f3187195
TH
6548/**
6549 * ata_host_alloc - allocate and init basic ATA host resources
6550 * @dev: generic device this host is associated with
6551 * @max_ports: maximum number of ATA ports associated with this host
6552 *
6553 * Allocate and initialize basic ATA host resources. LLD calls
6554 * this function to allocate a host, initializes it fully and
6555 * attaches it using ata_host_register().
6556 *
6557 * @max_ports ports are allocated and host->n_ports is
6558 * initialized to @max_ports. The caller is allowed to decrease
6559 * host->n_ports before calling ata_host_register(). The unused
6560 * ports will be automatically freed on registration.
6561 *
6562 * RETURNS:
6563 * Allocate ATA host on success, NULL on failure.
6564 *
6565 * LOCKING:
6566 * Inherited from calling layer (may sleep).
6567 */
6568struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
6569{
6570 struct ata_host *host;
6571 size_t sz;
6572 int i;
6573
6574 DPRINTK("ENTER\n");
6575
6576 if (!devres_open_group(dev, NULL, GFP_KERNEL))
6577 return NULL;
6578
6579 /* alloc a container for our list of ATA ports (buses) */
6580 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
6581 /* alloc a container for our list of ATA ports (buses) */
6582 host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
6583 if (!host)
6584 goto err_out;
6585
6586 devres_add(dev, host);
6587 dev_set_drvdata(dev, host);
6588
6589 spin_lock_init(&host->lock);
6590 host->dev = dev;
6591 host->n_ports = max_ports;
6592
6593 /* allocate ports bound to this host */
6594 for (i = 0; i < max_ports; i++) {
6595 struct ata_port *ap;
6596
6597 ap = ata_port_alloc(host);
6598 if (!ap)
6599 goto err_out;
6600
6601 ap->port_no = i;
6602 host->ports[i] = ap;
6603 }
6604
6605 devres_remove_group(dev, NULL);
6606 return host;
6607
6608 err_out:
6609 devres_release_group(dev, NULL);
6610 return NULL;
6611}
6612
f5cda257
TH
6613/**
6614 * ata_host_alloc_pinfo - alloc host and init with port_info array
6615 * @dev: generic device this host is associated with
6616 * @ppi: array of ATA port_info to initialize host with
6617 * @n_ports: number of ATA ports attached to this host
6618 *
6619 * Allocate ATA host and initialize with info from @ppi. If NULL
6620 * terminated, @ppi may contain fewer entries than @n_ports. The
6621 * last entry will be used for the remaining ports.
6622 *
6623 * RETURNS:
6624 * Allocate ATA host on success, NULL on failure.
6625 *
6626 * LOCKING:
6627 * Inherited from calling layer (may sleep).
6628 */
6629struct ata_host *ata_host_alloc_pinfo(struct device *dev,
6630 const struct ata_port_info * const * ppi,
6631 int n_ports)
6632{
6633 const struct ata_port_info *pi;
6634 struct ata_host *host;
6635 int i, j;
6636
6637 host = ata_host_alloc(dev, n_ports);
6638 if (!host)
6639 return NULL;
6640
6641 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
6642 struct ata_port *ap = host->ports[i];
6643
6644 if (ppi[j])
6645 pi = ppi[j++];
6646
6647 ap->pio_mask = pi->pio_mask;
6648 ap->mwdma_mask = pi->mwdma_mask;
6649 ap->udma_mask = pi->udma_mask;
6650 ap->flags |= pi->flags;
0c88758b 6651 ap->link.flags |= pi->link_flags;
f5cda257
TH
6652 ap->ops = pi->port_ops;
6653
6654 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
6655 host->ops = pi->port_ops;
6656 if (!host->private_data && pi->private_data)
6657 host->private_data = pi->private_data;
6658 }
6659
6660 return host;
6661}
6662
ecef7253
TH
6663/**
6664 * ata_host_start - start and freeze ports of an ATA host
6665 * @host: ATA host to start ports for
6666 *
6667 * Start and then freeze ports of @host. Started status is
6668 * recorded in host->flags, so this function can be called
6669 * multiple times. Ports are guaranteed to get started only
f3187195
TH
6670 * once. If host->ops isn't initialized yet, its set to the
6671 * first non-dummy port ops.
ecef7253
TH
6672 *
6673 * LOCKING:
6674 * Inherited from calling layer (may sleep).
6675 *
6676 * RETURNS:
6677 * 0 if all ports are started successfully, -errno otherwise.
6678 */
6679int ata_host_start(struct ata_host *host)
6680{
6681 int i, rc;
6682
6683 if (host->flags & ATA_HOST_STARTED)
6684 return 0;
6685
6686 for (i = 0; i < host->n_ports; i++) {
6687 struct ata_port *ap = host->ports[i];
6688
f3187195
TH
6689 if (!host->ops && !ata_port_is_dummy(ap))
6690 host->ops = ap->ops;
6691
ecef7253
TH
6692 if (ap->ops->port_start) {
6693 rc = ap->ops->port_start(ap);
6694 if (rc) {
6695 ata_port_printk(ap, KERN_ERR, "failed to "
6696 "start port (errno=%d)\n", rc);
6697 goto err_out;
6698 }
6699 }
6700
6701 ata_eh_freeze_port(ap);
6702 }
6703
6704 host->flags |= ATA_HOST_STARTED;
6705 return 0;
6706
6707 err_out:
6708 while (--i >= 0) {
6709 struct ata_port *ap = host->ports[i];
6710
6711 if (ap->ops->port_stop)
6712 ap->ops->port_stop(ap);
6713 }
6714 return rc;
6715}
6716
b03732f0 6717/**
cca3974e
JG
6718 * ata_sas_host_init - Initialize a host struct
6719 * @host: host to initialize
6720 * @dev: device host is attached to
6721 * @flags: host flags
6722 * @ops: port_ops
b03732f0
BK
6723 *
6724 * LOCKING:
6725 * PCI/etc. bus probe sem.
6726 *
6727 */
f3187195 6728/* KILLME - the only user left is ipr */
cca3974e
JG
6729void ata_host_init(struct ata_host *host, struct device *dev,
6730 unsigned long flags, const struct ata_port_operations *ops)
b03732f0 6731{
cca3974e
JG
6732 spin_lock_init(&host->lock);
6733 host->dev = dev;
6734 host->flags = flags;
6735 host->ops = ops;
b03732f0
BK
6736}
6737
f3187195
TH
6738/**
6739 * ata_host_register - register initialized ATA host
6740 * @host: ATA host to register
6741 * @sht: template for SCSI host
6742 *
6743 * Register initialized ATA host. @host is allocated using
6744 * ata_host_alloc() and fully initialized by LLD. This function
6745 * starts ports, registers @host with ATA and SCSI layers and
6746 * probe registered devices.
6747 *
6748 * LOCKING:
6749 * Inherited from calling layer (may sleep).
6750 *
6751 * RETURNS:
6752 * 0 on success, -errno otherwise.
6753 */
6754int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6755{
6756 int i, rc;
6757
6758 /* host must have been started */
6759 if (!(host->flags & ATA_HOST_STARTED)) {
6760 dev_printk(KERN_ERR, host->dev,
6761 "BUG: trying to register unstarted host\n");
6762 WARN_ON(1);
6763 return -EINVAL;
6764 }
6765
6766 /* Blow away unused ports. This happens when LLD can't
6767 * determine the exact number of ports to allocate at
6768 * allocation time.
6769 */
6770 for (i = host->n_ports; host->ports[i]; i++)
6771 kfree(host->ports[i]);
6772
6773 /* give ports names and add SCSI hosts */
6774 for (i = 0; i < host->n_ports; i++)
6775 host->ports[i]->print_id = ata_print_id++;
6776
6777 rc = ata_scsi_add_hosts(host, sht);
6778 if (rc)
6779 return rc;
6780
fafbae87
TH
6781 /* associate with ACPI nodes */
6782 ata_acpi_associate(host);
6783
f3187195
TH
6784 /* set cable, sata_spd_limit and report */
6785 for (i = 0; i < host->n_ports; i++) {
6786 struct ata_port *ap = host->ports[i];
f3187195
TH
6787 unsigned long xfer_mask;
6788
6789 /* set SATA cable type if still unset */
6790 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
6791 ap->cbl = ATA_CBL_SATA;
6792
6793 /* init sata_spd_limit to the current value */
4fb37a25 6794 sata_link_init_spd(&ap->link);
f3187195 6795
cbcdd875 6796 /* print per-port info to dmesg */
f3187195
TH
6797 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
6798 ap->udma_mask);
6799
abf6e8ed 6800 if (!ata_port_is_dummy(ap)) {
cbcdd875
TH
6801 ata_port_printk(ap, KERN_INFO,
6802 "%cATA max %s %s\n",
a16abc0b 6803 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
f3187195 6804 ata_mode_string(xfer_mask),
cbcdd875 6805 ap->link.eh_info.desc);
abf6e8ed
TH
6806 ata_ehi_clear_desc(&ap->link.eh_info);
6807 } else
f3187195
TH
6808 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
6809 }
6810
6811 /* perform each probe synchronously */
6812 DPRINTK("probe begin\n");
6813 for (i = 0; i < host->n_ports; i++) {
6814 struct ata_port *ap = host->ports[i];
6815 int rc;
6816
6817 /* probe */
6818 if (ap->ops->error_handler) {
9af5c9c9 6819 struct ata_eh_info *ehi = &ap->link.eh_info;
f3187195
TH
6820 unsigned long flags;
6821
6822 ata_port_probe(ap);
6823
6824 /* kick EH for boot probing */
6825 spin_lock_irqsave(ap->lock, flags);
6826
f58229f8
TH
6827 ehi->probe_mask =
6828 (1 << ata_link_max_devices(&ap->link)) - 1;
f3187195
TH
6829 ehi->action |= ATA_EH_SOFTRESET;
6830 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
6831
f4d6d004 6832 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
f3187195
TH
6833 ap->pflags |= ATA_PFLAG_LOADING;
6834 ata_port_schedule_eh(ap);
6835
6836 spin_unlock_irqrestore(ap->lock, flags);
6837
6838 /* wait for EH to finish */
6839 ata_port_wait_eh(ap);
6840 } else {
6841 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
6842 rc = ata_bus_probe(ap);
6843 DPRINTK("ata%u: bus probe end\n", ap->print_id);
6844
6845 if (rc) {
6846 /* FIXME: do something useful here?
6847 * Current libata behavior will
6848 * tear down everything when
6849 * the module is removed
6850 * or the h/w is unplugged.
6851 */
6852 }
6853 }
6854 }
6855
6856 /* probes are done, now scan each port's disk(s) */
6857 DPRINTK("host probe begin\n");
6858 for (i = 0; i < host->n_ports; i++) {
6859 struct ata_port *ap = host->ports[i];
6860
1ae46317 6861 ata_scsi_scan_host(ap, 1);
f3187195
TH
6862 }
6863
6864 return 0;
6865}
6866
f5cda257
TH
6867/**
6868 * ata_host_activate - start host, request IRQ and register it
6869 * @host: target ATA host
6870 * @irq: IRQ to request
6871 * @irq_handler: irq_handler used when requesting IRQ
6872 * @irq_flags: irq_flags used when requesting IRQ
6873 * @sht: scsi_host_template to use when registering the host
6874 *
6875 * After allocating an ATA host and initializing it, most libata
6876 * LLDs perform three steps to activate the host - start host,
6877 * request IRQ and register it. This helper takes necessasry
6878 * arguments and performs the three steps in one go.
6879 *
6880 * LOCKING:
6881 * Inherited from calling layer (may sleep).
6882 *
6883 * RETURNS:
6884 * 0 on success, -errno otherwise.
6885 */
6886int ata_host_activate(struct ata_host *host, int irq,
6887 irq_handler_t irq_handler, unsigned long irq_flags,
6888 struct scsi_host_template *sht)
6889{
cbcdd875 6890 int i, rc;
f5cda257
TH
6891
6892 rc = ata_host_start(host);
6893 if (rc)
6894 return rc;
6895
6896 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
6897 dev_driver_string(host->dev), host);
6898 if (rc)
6899 return rc;
6900
cbcdd875
TH
6901 for (i = 0; i < host->n_ports; i++)
6902 ata_port_desc(host->ports[i], "irq %d", irq);
4031826b 6903
f5cda257
TH
6904 rc = ata_host_register(host, sht);
6905 /* if failed, just free the IRQ and leave ports alone */
6906 if (rc)
6907 devm_free_irq(host->dev, irq, host);
6908
6909 return rc;
6910}
6911
720ba126
TH
6912/**
6913 * ata_port_detach - Detach ATA port in prepration of device removal
6914 * @ap: ATA port to be detached
6915 *
6916 * Detach all ATA devices and the associated SCSI devices of @ap;
6917 * then, remove the associated SCSI host. @ap is guaranteed to
6918 * be quiescent on return from this function.
6919 *
6920 * LOCKING:
6921 * Kernel thread context (may sleep).
6922 */
6923void ata_port_detach(struct ata_port *ap)
6924{
6925 unsigned long flags;
41bda9c9 6926 struct ata_link *link;
f58229f8 6927 struct ata_device *dev;
720ba126
TH
6928
6929 if (!ap->ops->error_handler)
c3cf30a9 6930 goto skip_eh;
720ba126
TH
6931
6932 /* tell EH we're leaving & flush EH */
ba6a1308 6933 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 6934 ap->pflags |= ATA_PFLAG_UNLOADING;
ba6a1308 6935 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
6936
6937 ata_port_wait_eh(ap);
6938
6939 /* EH is now guaranteed to see UNLOADING, so no new device
6940 * will be attached. Disable all existing devices.
6941 */
ba6a1308 6942 spin_lock_irqsave(ap->lock, flags);
720ba126 6943
41bda9c9
TH
6944 ata_port_for_each_link(link, ap) {
6945 ata_link_for_each_dev(dev, link)
6946 ata_dev_disable(dev);
6947 }
720ba126 6948
ba6a1308 6949 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
6950
6951 /* Final freeze & EH. All in-flight commands are aborted. EH
6952 * will be skipped and retrials will be terminated with bad
6953 * target.
6954 */
ba6a1308 6955 spin_lock_irqsave(ap->lock, flags);
720ba126 6956 ata_port_freeze(ap); /* won't be thawed */
ba6a1308 6957 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
6958
6959 ata_port_wait_eh(ap);
45a66c1c 6960 cancel_rearming_delayed_work(&ap->hotplug_task);
720ba126 6961
c3cf30a9 6962 skip_eh:
720ba126 6963 /* remove the associated SCSI host */
cca3974e 6964 scsi_remove_host(ap->scsi_host);
720ba126
TH
6965}
6966
0529c159
TH
6967/**
6968 * ata_host_detach - Detach all ports of an ATA host
6969 * @host: Host to detach
6970 *
6971 * Detach all ports of @host.
6972 *
6973 * LOCKING:
6974 * Kernel thread context (may sleep).
6975 */
6976void ata_host_detach(struct ata_host *host)
6977{
6978 int i;
6979
6980 for (i = 0; i < host->n_ports; i++)
6981 ata_port_detach(host->ports[i]);
6982}
6983
1da177e4
LT
6984/**
6985 * ata_std_ports - initialize ioaddr with standard port offsets.
6986 * @ioaddr: IO address structure to be initialized
0baab86b
EF
6987 *
6988 * Utility function which initializes data_addr, error_addr,
6989 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
6990 * device_addr, status_addr, and command_addr to standard offsets
6991 * relative to cmd_addr.
6992 *
6993 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
1da177e4 6994 */
0baab86b 6995
1da177e4
LT
6996void ata_std_ports(struct ata_ioports *ioaddr)
6997{
6998 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
6999 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
7000 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
7001 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
7002 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
7003 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
7004 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
7005 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
7006 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
7007 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
7008}
7009
0baab86b 7010
374b1873
JG
7011#ifdef CONFIG_PCI
7012
1da177e4
LT
7013/**
7014 * ata_pci_remove_one - PCI layer callback for device removal
7015 * @pdev: PCI device that was removed
7016 *
b878ca5d
TH
7017 * PCI layer indicates to libata via this hook that hot-unplug or
7018 * module unload event has occurred. Detach all ports. Resource
7019 * release is handled via devres.
1da177e4
LT
7020 *
7021 * LOCKING:
7022 * Inherited from PCI layer (may sleep).
7023 */
f0d36efd 7024void ata_pci_remove_one(struct pci_dev *pdev)
1da177e4 7025{
2855568b 7026 struct device *dev = &pdev->dev;
cca3974e 7027 struct ata_host *host = dev_get_drvdata(dev);
1da177e4 7028
b878ca5d 7029 ata_host_detach(host);
1da177e4
LT
7030}
7031
7032/* move to PCI subsystem */
057ace5e 7033int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
1da177e4
LT
7034{
7035 unsigned long tmp = 0;
7036
7037 switch (bits->width) {
7038 case 1: {
7039 u8 tmp8 = 0;
7040 pci_read_config_byte(pdev, bits->reg, &tmp8);
7041 tmp = tmp8;
7042 break;
7043 }
7044 case 2: {
7045 u16 tmp16 = 0;
7046 pci_read_config_word(pdev, bits->reg, &tmp16);
7047 tmp = tmp16;
7048 break;
7049 }
7050 case 4: {
7051 u32 tmp32 = 0;
7052 pci_read_config_dword(pdev, bits->reg, &tmp32);
7053 tmp = tmp32;
7054 break;
7055 }
7056
7057 default:
7058 return -EINVAL;
7059 }
7060
7061 tmp &= bits->mask;
7062
7063 return (tmp == bits->val) ? 1 : 0;
7064}
9b847548 7065
6ffa01d8 7066#ifdef CONFIG_PM
3c5100c1 7067void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
9b847548
JA
7068{
7069 pci_save_state(pdev);
4c90d971 7070 pci_disable_device(pdev);
500530f6 7071
4c90d971 7072 if (mesg.event == PM_EVENT_SUSPEND)
500530f6 7073 pci_set_power_state(pdev, PCI_D3hot);
9b847548
JA
7074}
7075
553c4aa6 7076int ata_pci_device_do_resume(struct pci_dev *pdev)
9b847548 7077{
553c4aa6
TH
7078 int rc;
7079
9b847548
JA
7080 pci_set_power_state(pdev, PCI_D0);
7081 pci_restore_state(pdev);
553c4aa6 7082
b878ca5d 7083 rc = pcim_enable_device(pdev);
553c4aa6
TH
7084 if (rc) {
7085 dev_printk(KERN_ERR, &pdev->dev,
7086 "failed to enable device after resume (%d)\n", rc);
7087 return rc;
7088 }
7089
9b847548 7090 pci_set_master(pdev);
553c4aa6 7091 return 0;
500530f6
TH
7092}
7093
3c5100c1 7094int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
500530f6 7095{
cca3974e 7096 struct ata_host *host = dev_get_drvdata(&pdev->dev);
500530f6
TH
7097 int rc = 0;
7098
cca3974e 7099 rc = ata_host_suspend(host, mesg);
500530f6
TH
7100 if (rc)
7101 return rc;
7102
3c5100c1 7103 ata_pci_device_do_suspend(pdev, mesg);
500530f6
TH
7104
7105 return 0;
7106}
7107
7108int ata_pci_device_resume(struct pci_dev *pdev)
7109{
cca3974e 7110 struct ata_host *host = dev_get_drvdata(&pdev->dev);
553c4aa6 7111 int rc;
500530f6 7112
553c4aa6
TH
7113 rc = ata_pci_device_do_resume(pdev);
7114 if (rc == 0)
7115 ata_host_resume(host);
7116 return rc;
9b847548 7117}
6ffa01d8
TH
7118#endif /* CONFIG_PM */
7119
1da177e4
LT
7120#endif /* CONFIG_PCI */
7121
7122
1da177e4
LT
7123static int __init ata_init(void)
7124{
a8601e5f 7125 ata_probe_timeout *= HZ;
1da177e4
LT
7126 ata_wq = create_workqueue("ata");
7127 if (!ata_wq)
7128 return -ENOMEM;
7129
453b07ac
TH
7130 ata_aux_wq = create_singlethread_workqueue("ata_aux");
7131 if (!ata_aux_wq) {
7132 destroy_workqueue(ata_wq);
7133 return -ENOMEM;
7134 }
7135
1da177e4
LT
7136 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
7137 return 0;
7138}
7139
7140static void __exit ata_exit(void)
7141{
7142 destroy_workqueue(ata_wq);
453b07ac 7143 destroy_workqueue(ata_aux_wq);
1da177e4
LT
7144}
7145
a4625085 7146subsys_initcall(ata_init);
1da177e4
LT
7147module_exit(ata_exit);
7148
67846b30 7149static unsigned long ratelimit_time;
34af946a 7150static DEFINE_SPINLOCK(ata_ratelimit_lock);
67846b30
JG
7151
7152int ata_ratelimit(void)
7153{
7154 int rc;
7155 unsigned long flags;
7156
7157 spin_lock_irqsave(&ata_ratelimit_lock, flags);
7158
7159 if (time_after(jiffies, ratelimit_time)) {
7160 rc = 1;
7161 ratelimit_time = jiffies + (HZ/5);
7162 } else
7163 rc = 0;
7164
7165 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
7166
7167 return rc;
7168}
7169
c22daff4
TH
7170/**
7171 * ata_wait_register - wait until register value changes
7172 * @reg: IO-mapped register
7173 * @mask: Mask to apply to read register value
7174 * @val: Wait condition
7175 * @interval_msec: polling interval in milliseconds
7176 * @timeout_msec: timeout in milliseconds
7177 *
7178 * Waiting for some bits of register to change is a common
7179 * operation for ATA controllers. This function reads 32bit LE
7180 * IO-mapped register @reg and tests for the following condition.
7181 *
7182 * (*@reg & mask) != val
7183 *
7184 * If the condition is met, it returns; otherwise, the process is
7185 * repeated after @interval_msec until timeout.
7186 *
7187 * LOCKING:
7188 * Kernel thread context (may sleep)
7189 *
7190 * RETURNS:
7191 * The final register value.
7192 */
7193u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
7194 unsigned long interval_msec,
7195 unsigned long timeout_msec)
7196{
7197 unsigned long timeout;
7198 u32 tmp;
7199
7200 tmp = ioread32(reg);
7201
7202 /* Calculate timeout _after_ the first read to make sure
7203 * preceding writes reach the controller before starting to
7204 * eat away the timeout.
7205 */
7206 timeout = jiffies + (timeout_msec * HZ) / 1000;
7207
7208 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
7209 msleep(interval_msec);
7210 tmp = ioread32(reg);
7211 }
7212
7213 return tmp;
7214}
7215
dd5b06c4
TH
7216/*
7217 * Dummy port_ops
7218 */
7219static void ata_dummy_noret(struct ata_port *ap) { }
7220static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
7221static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
7222
7223static u8 ata_dummy_check_status(struct ata_port *ap)
7224{
7225 return ATA_DRDY;
7226}
7227
7228static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
7229{
7230 return AC_ERR_SYSTEM;
7231}
7232
7233const struct ata_port_operations ata_dummy_port_ops = {
dd5b06c4
TH
7234 .check_status = ata_dummy_check_status,
7235 .check_altstatus = ata_dummy_check_status,
7236 .dev_select = ata_noop_dev_select,
7237 .qc_prep = ata_noop_qc_prep,
7238 .qc_issue = ata_dummy_qc_issue,
7239 .freeze = ata_dummy_noret,
7240 .thaw = ata_dummy_noret,
7241 .error_handler = ata_dummy_noret,
7242 .post_internal_cmd = ata_dummy_qc_noret,
7243 .irq_clear = ata_dummy_noret,
7244 .port_start = ata_dummy_ret0,
7245 .port_stop = ata_dummy_noret,
7246};
7247
21b0ad4f
TH
7248const struct ata_port_info ata_dummy_port_info = {
7249 .port_ops = &ata_dummy_port_ops,
7250};
7251
1da177e4
LT
7252/*
7253 * libata is essentially a library of internal helper functions for
7254 * low-level ATA host controller drivers. As such, the API/ABI is
7255 * likely to change as new drivers are added and updated.
7256 * Do not depend on ABI/API stability.
7257 */
7258
e9c83914
TH
7259EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
7260EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
7261EXPORT_SYMBOL_GPL(sata_deb_timing_long);
dd5b06c4 7262EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
21b0ad4f 7263EXPORT_SYMBOL_GPL(ata_dummy_port_info);
1da177e4
LT
7264EXPORT_SYMBOL_GPL(ata_std_bios_param);
7265EXPORT_SYMBOL_GPL(ata_std_ports);
cca3974e 7266EXPORT_SYMBOL_GPL(ata_host_init);
f3187195 7267EXPORT_SYMBOL_GPL(ata_host_alloc);
f5cda257 7268EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
ecef7253 7269EXPORT_SYMBOL_GPL(ata_host_start);
f3187195 7270EXPORT_SYMBOL_GPL(ata_host_register);
f5cda257 7271EXPORT_SYMBOL_GPL(ata_host_activate);
0529c159 7272EXPORT_SYMBOL_GPL(ata_host_detach);
1da177e4
LT
7273EXPORT_SYMBOL_GPL(ata_sg_init);
7274EXPORT_SYMBOL_GPL(ata_sg_init_one);
9a1004d0 7275EXPORT_SYMBOL_GPL(ata_hsm_move);
f686bcb8 7276EXPORT_SYMBOL_GPL(ata_qc_complete);
dedaf2b0 7277EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
1da177e4 7278EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
1da177e4
LT
7279EXPORT_SYMBOL_GPL(ata_tf_load);
7280EXPORT_SYMBOL_GPL(ata_tf_read);
7281EXPORT_SYMBOL_GPL(ata_noop_dev_select);
7282EXPORT_SYMBOL_GPL(ata_std_dev_select);
43727fbc 7283EXPORT_SYMBOL_GPL(sata_print_link_status);
1da177e4
LT
7284EXPORT_SYMBOL_GPL(ata_tf_to_fis);
7285EXPORT_SYMBOL_GPL(ata_tf_from_fis);
7286EXPORT_SYMBOL_GPL(ata_check_status);
7287EXPORT_SYMBOL_GPL(ata_altstatus);
1da177e4
LT
7288EXPORT_SYMBOL_GPL(ata_exec_command);
7289EXPORT_SYMBOL_GPL(ata_port_start);
d92e74d3 7290EXPORT_SYMBOL_GPL(ata_sff_port_start);
1da177e4 7291EXPORT_SYMBOL_GPL(ata_interrupt);
04351821 7292EXPORT_SYMBOL_GPL(ata_do_set_mode);
0d5ff566
TH
7293EXPORT_SYMBOL_GPL(ata_data_xfer);
7294EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
31cc23b3 7295EXPORT_SYMBOL_GPL(ata_std_qc_defer);
1da177e4 7296EXPORT_SYMBOL_GPL(ata_qc_prep);
d26fc955 7297EXPORT_SYMBOL_GPL(ata_dumb_qc_prep);
e46834cd 7298EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
1da177e4
LT
7299EXPORT_SYMBOL_GPL(ata_bmdma_setup);
7300EXPORT_SYMBOL_GPL(ata_bmdma_start);
7301EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
7302EXPORT_SYMBOL_GPL(ata_bmdma_status);
7303EXPORT_SYMBOL_GPL(ata_bmdma_stop);
6d97dbd7
TH
7304EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
7305EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
7306EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
7307EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
7308EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
1da177e4 7309EXPORT_SYMBOL_GPL(ata_port_probe);
10305f0f 7310EXPORT_SYMBOL_GPL(ata_dev_disable);
3c567b7d 7311EXPORT_SYMBOL_GPL(sata_set_spd);
936fd732
TH
7312EXPORT_SYMBOL_GPL(sata_link_debounce);
7313EXPORT_SYMBOL_GPL(sata_link_resume);
1da177e4
LT
7314EXPORT_SYMBOL_GPL(sata_phy_reset);
7315EXPORT_SYMBOL_GPL(__sata_phy_reset);
7316EXPORT_SYMBOL_GPL(ata_bus_reset);
f5914a46 7317EXPORT_SYMBOL_GPL(ata_std_prereset);
c2bd5804 7318EXPORT_SYMBOL_GPL(ata_std_softreset);
cc0680a5 7319EXPORT_SYMBOL_GPL(sata_link_hardreset);
c2bd5804
TH
7320EXPORT_SYMBOL_GPL(sata_std_hardreset);
7321EXPORT_SYMBOL_GPL(ata_std_postreset);
2e9edbf8
JG
7322EXPORT_SYMBOL_GPL(ata_dev_classify);
7323EXPORT_SYMBOL_GPL(ata_dev_pair);
1da177e4 7324EXPORT_SYMBOL_GPL(ata_port_disable);
67846b30 7325EXPORT_SYMBOL_GPL(ata_ratelimit);
c22daff4 7326EXPORT_SYMBOL_GPL(ata_wait_register);
6f8b9958 7327EXPORT_SYMBOL_GPL(ata_busy_sleep);
d4b2bab4 7328EXPORT_SYMBOL_GPL(ata_wait_ready);
86e45b6b 7329EXPORT_SYMBOL_GPL(ata_port_queue_task);
1da177e4
LT
7330EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
7331EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
1da177e4 7332EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
83c47bcb 7333EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
a6e6ce8e 7334EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
1da177e4 7335EXPORT_SYMBOL_GPL(ata_host_intr);
34bf2170
TH
7336EXPORT_SYMBOL_GPL(sata_scr_valid);
7337EXPORT_SYMBOL_GPL(sata_scr_read);
7338EXPORT_SYMBOL_GPL(sata_scr_write);
7339EXPORT_SYMBOL_GPL(sata_scr_write_flush);
936fd732
TH
7340EXPORT_SYMBOL_GPL(ata_link_online);
7341EXPORT_SYMBOL_GPL(ata_link_offline);
6ffa01d8 7342#ifdef CONFIG_PM
cca3974e
JG
7343EXPORT_SYMBOL_GPL(ata_host_suspend);
7344EXPORT_SYMBOL_GPL(ata_host_resume);
6ffa01d8 7345#endif /* CONFIG_PM */
6a62a04d
TH
7346EXPORT_SYMBOL_GPL(ata_id_string);
7347EXPORT_SYMBOL_GPL(ata_id_c_string);
10305f0f 7348EXPORT_SYMBOL_GPL(ata_id_to_dma_mode);
1da177e4
LT
7349EXPORT_SYMBOL_GPL(ata_scsi_simulate);
7350
1bc4ccff 7351EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
452503f9
AC
7352EXPORT_SYMBOL_GPL(ata_timing_compute);
7353EXPORT_SYMBOL_GPL(ata_timing_merge);
7354
1da177e4
LT
7355#ifdef CONFIG_PCI
7356EXPORT_SYMBOL_GPL(pci_test_config_bits);
d583bc18 7357EXPORT_SYMBOL_GPL(ata_pci_init_sff_host);
1626aeb8 7358EXPORT_SYMBOL_GPL(ata_pci_init_bmdma);
d583bc18 7359EXPORT_SYMBOL_GPL(ata_pci_prepare_sff_host);
1da177e4
LT
7360EXPORT_SYMBOL_GPL(ata_pci_init_one);
7361EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6ffa01d8 7362#ifdef CONFIG_PM
500530f6
TH
7363EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
7364EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
9b847548
JA
7365EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
7366EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6ffa01d8 7367#endif /* CONFIG_PM */
67951ade
AC
7368EXPORT_SYMBOL_GPL(ata_pci_default_filter);
7369EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
1da177e4 7370#endif /* CONFIG_PCI */
9b847548 7371
31f88384 7372EXPORT_SYMBOL_GPL(sata_pmp_qc_defer_cmd_switch);
3af9a77a
TH
7373EXPORT_SYMBOL_GPL(sata_pmp_std_prereset);
7374EXPORT_SYMBOL_GPL(sata_pmp_std_hardreset);
7375EXPORT_SYMBOL_GPL(sata_pmp_std_postreset);
7376EXPORT_SYMBOL_GPL(sata_pmp_do_eh);
7377
b64bbc39
TH
7378EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
7379EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
7380EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
cbcdd875
TH
7381EXPORT_SYMBOL_GPL(ata_port_desc);
7382#ifdef CONFIG_PCI
7383EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
7384#endif /* CONFIG_PCI */
ece1d636 7385EXPORT_SYMBOL_GPL(ata_eng_timeout);
7b70fc03 7386EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
dbd82616 7387EXPORT_SYMBOL_GPL(ata_link_abort);
7b70fc03 7388EXPORT_SYMBOL_GPL(ata_port_abort);
e3180499 7389EXPORT_SYMBOL_GPL(ata_port_freeze);
7d77b247 7390EXPORT_SYMBOL_GPL(sata_async_notification);
e3180499
TH
7391EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
7392EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
ece1d636
TH
7393EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
7394EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
022bdb07 7395EXPORT_SYMBOL_GPL(ata_do_eh);
83625006 7396EXPORT_SYMBOL_GPL(ata_irq_on);
a619f981 7397EXPORT_SYMBOL_GPL(ata_dev_try_classify);
be0d18df
AC
7398
7399EXPORT_SYMBOL_GPL(ata_cable_40wire);
7400EXPORT_SYMBOL_GPL(ata_cable_80wire);
7401EXPORT_SYMBOL_GPL(ata_cable_unknown);
7402EXPORT_SYMBOL_GPL(ata_cable_sata);
This page took 1.582016 seconds and 5 git commands to generate.