ata/sata_fsl: Remove ata_scsi_suspend/resume callbacks
[deliverable/linux.git] / drivers / ata / libata-core.c
CommitLineData
1da177e4 1/*
af36d7f0
JG
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
1da177e4
LT
33 */
34
1da177e4
LT
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/pci.h>
38#include <linux/init.h>
39#include <linux/list.h>
40#include <linux/mm.h>
41#include <linux/highmem.h>
42#include <linux/spinlock.h>
43#include <linux/blkdev.h>
44#include <linux/delay.h>
45#include <linux/timer.h>
46#include <linux/interrupt.h>
47#include <linux/completion.h>
48#include <linux/suspend.h>
49#include <linux/workqueue.h>
67846b30 50#include <linux/jiffies.h>
378f058c 51#include <linux/scatterlist.h>
2dcb407e 52#include <linux/io.h>
1da177e4 53#include <scsi/scsi.h>
193515d5 54#include <scsi/scsi_cmnd.h>
1da177e4
LT
55#include <scsi/scsi_host.h>
56#include <linux/libata.h>
1da177e4
LT
57#include <asm/semaphore.h>
58#include <asm/byteorder.h>
59
60#include "libata.h"
61
fda0efc5 62
d7bb4cc7 63/* debounce timing parameters in msecs { interval, duration, timeout } */
e9c83914
TH
64const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
65const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
66const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
d7bb4cc7 67
3373efd8
TH
68static unsigned int ata_dev_init_params(struct ata_device *dev,
69 u16 heads, u16 sectors);
70static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
218f3d30
JG
71static unsigned int ata_dev_set_feature(struct ata_device *dev,
72 u8 enable, u8 feature);
3373efd8 73static void ata_dev_xfermask(struct ata_device *dev);
75683fe7 74static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
1da177e4 75
f3187195 76unsigned int ata_print_id = 1;
1da177e4
LT
77static struct workqueue_struct *ata_wq;
78
453b07ac
TH
79struct workqueue_struct *ata_aux_wq;
80
418dc1f5 81int atapi_enabled = 1;
1623c81e
JG
82module_param(atapi_enabled, int, 0444);
83MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
84
95de719a
AL
85int atapi_dmadir = 0;
86module_param(atapi_dmadir, int, 0444);
87MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
88
baf4fdfa
ML
89int atapi_passthru16 = 1;
90module_param(atapi_passthru16, int, 0444);
91MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)");
92
c3c013a2
JG
93int libata_fua = 0;
94module_param_named(fua, libata_fua, int, 0444);
95MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
96
2dcb407e 97static int ata_ignore_hpa;
1e999736
AC
98module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
99MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
100
b3a70601
AC
101static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
102module_param_named(dma, libata_dma_mask, int, 0444);
103MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
104
a8601e5f
AM
105static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
106module_param(ata_probe_timeout, int, 0444);
107MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
108
6ebe9d86 109int libata_noacpi = 0;
d7d0dad6 110module_param_named(noacpi, libata_noacpi, int, 0444);
6ebe9d86 111MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in probe/suspend/resume when set");
11ef697b 112
1da177e4
LT
113MODULE_AUTHOR("Jeff Garzik");
114MODULE_DESCRIPTION("Library module for ATA devices");
115MODULE_LICENSE("GPL");
116MODULE_VERSION(DRV_VERSION);
117
0baab86b 118
1da177e4
LT
119/**
120 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
121 * @tf: Taskfile to convert
1da177e4 122 * @pmp: Port multiplier port
9977126c
TH
123 * @is_cmd: This FIS is for command
124 * @fis: Buffer into which data will output
1da177e4
LT
125 *
126 * Converts a standard ATA taskfile to a Serial ATA
127 * FIS structure (Register - Host to Device).
128 *
129 * LOCKING:
130 * Inherited from caller.
131 */
9977126c 132void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
1da177e4 133{
9977126c
TH
134 fis[0] = 0x27; /* Register - Host to Device FIS */
135 fis[1] = pmp & 0xf; /* Port multiplier number*/
136 if (is_cmd)
137 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */
138
1da177e4
LT
139 fis[2] = tf->command;
140 fis[3] = tf->feature;
141
142 fis[4] = tf->lbal;
143 fis[5] = tf->lbam;
144 fis[6] = tf->lbah;
145 fis[7] = tf->device;
146
147 fis[8] = tf->hob_lbal;
148 fis[9] = tf->hob_lbam;
149 fis[10] = tf->hob_lbah;
150 fis[11] = tf->hob_feature;
151
152 fis[12] = tf->nsect;
153 fis[13] = tf->hob_nsect;
154 fis[14] = 0;
155 fis[15] = tf->ctl;
156
157 fis[16] = 0;
158 fis[17] = 0;
159 fis[18] = 0;
160 fis[19] = 0;
161}
162
163/**
164 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
165 * @fis: Buffer from which data will be input
166 * @tf: Taskfile to output
167 *
e12a1be6 168 * Converts a serial ATA FIS structure to a standard ATA taskfile.
1da177e4
LT
169 *
170 * LOCKING:
171 * Inherited from caller.
172 */
173
057ace5e 174void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
1da177e4
LT
175{
176 tf->command = fis[2]; /* status */
177 tf->feature = fis[3]; /* error */
178
179 tf->lbal = fis[4];
180 tf->lbam = fis[5];
181 tf->lbah = fis[6];
182 tf->device = fis[7];
183
184 tf->hob_lbal = fis[8];
185 tf->hob_lbam = fis[9];
186 tf->hob_lbah = fis[10];
187
188 tf->nsect = fis[12];
189 tf->hob_nsect = fis[13];
190}
191
8cbd6df1
AL
192static const u8 ata_rw_cmds[] = {
193 /* pio multi */
194 ATA_CMD_READ_MULTI,
195 ATA_CMD_WRITE_MULTI,
196 ATA_CMD_READ_MULTI_EXT,
197 ATA_CMD_WRITE_MULTI_EXT,
9a3dccc4
TH
198 0,
199 0,
200 0,
201 ATA_CMD_WRITE_MULTI_FUA_EXT,
8cbd6df1
AL
202 /* pio */
203 ATA_CMD_PIO_READ,
204 ATA_CMD_PIO_WRITE,
205 ATA_CMD_PIO_READ_EXT,
206 ATA_CMD_PIO_WRITE_EXT,
9a3dccc4
TH
207 0,
208 0,
209 0,
210 0,
8cbd6df1
AL
211 /* dma */
212 ATA_CMD_READ,
213 ATA_CMD_WRITE,
214 ATA_CMD_READ_EXT,
9a3dccc4
TH
215 ATA_CMD_WRITE_EXT,
216 0,
217 0,
218 0,
219 ATA_CMD_WRITE_FUA_EXT
8cbd6df1 220};
1da177e4
LT
221
222/**
8cbd6df1 223 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
bd056d7e
TH
224 * @tf: command to examine and configure
225 * @dev: device tf belongs to
1da177e4 226 *
2e9edbf8 227 * Examine the device configuration and tf->flags to calculate
8cbd6df1 228 * the proper read/write commands and protocol to use.
1da177e4
LT
229 *
230 * LOCKING:
231 * caller.
232 */
bd056d7e 233static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
1da177e4 234{
9a3dccc4 235 u8 cmd;
1da177e4 236
9a3dccc4 237 int index, fua, lba48, write;
2e9edbf8 238
9a3dccc4 239 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
8cbd6df1
AL
240 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
241 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
1da177e4 242
8cbd6df1
AL
243 if (dev->flags & ATA_DFLAG_PIO) {
244 tf->protocol = ATA_PROT_PIO;
9a3dccc4 245 index = dev->multi_count ? 0 : 8;
9af5c9c9 246 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
8d238e01
AC
247 /* Unable to use DMA due to host limitation */
248 tf->protocol = ATA_PROT_PIO;
0565c26d 249 index = dev->multi_count ? 0 : 8;
8cbd6df1
AL
250 } else {
251 tf->protocol = ATA_PROT_DMA;
9a3dccc4 252 index = 16;
8cbd6df1 253 }
1da177e4 254
9a3dccc4
TH
255 cmd = ata_rw_cmds[index + fua + lba48 + write];
256 if (cmd) {
257 tf->command = cmd;
258 return 0;
259 }
260 return -1;
1da177e4
LT
261}
262
35b649fe
TH
263/**
264 * ata_tf_read_block - Read block address from ATA taskfile
265 * @tf: ATA taskfile of interest
266 * @dev: ATA device @tf belongs to
267 *
268 * LOCKING:
269 * None.
270 *
271 * Read block address from @tf. This function can handle all
272 * three address formats - LBA, LBA48 and CHS. tf->protocol and
273 * flags select the address format to use.
274 *
275 * RETURNS:
276 * Block address read from @tf.
277 */
278u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
279{
280 u64 block = 0;
281
282 if (tf->flags & ATA_TFLAG_LBA) {
283 if (tf->flags & ATA_TFLAG_LBA48) {
284 block |= (u64)tf->hob_lbah << 40;
285 block |= (u64)tf->hob_lbam << 32;
286 block |= tf->hob_lbal << 24;
287 } else
288 block |= (tf->device & 0xf) << 24;
289
290 block |= tf->lbah << 16;
291 block |= tf->lbam << 8;
292 block |= tf->lbal;
293 } else {
294 u32 cyl, head, sect;
295
296 cyl = tf->lbam | (tf->lbah << 8);
297 head = tf->device & 0xf;
298 sect = tf->lbal;
299
300 block = (cyl * dev->heads + head) * dev->sectors + sect;
301 }
302
303 return block;
304}
305
bd056d7e
TH
306/**
307 * ata_build_rw_tf - Build ATA taskfile for given read/write request
308 * @tf: Target ATA taskfile
309 * @dev: ATA device @tf belongs to
310 * @block: Block address
311 * @n_block: Number of blocks
312 * @tf_flags: RW/FUA etc...
313 * @tag: tag
314 *
315 * LOCKING:
316 * None.
317 *
318 * Build ATA taskfile @tf for read/write request described by
319 * @block, @n_block, @tf_flags and @tag on @dev.
320 *
321 * RETURNS:
322 *
323 * 0 on success, -ERANGE if the request is too large for @dev,
324 * -EINVAL if the request is invalid.
325 */
326int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
327 u64 block, u32 n_block, unsigned int tf_flags,
328 unsigned int tag)
329{
330 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
331 tf->flags |= tf_flags;
332
6d1245bf 333 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
bd056d7e
TH
334 /* yay, NCQ */
335 if (!lba_48_ok(block, n_block))
336 return -ERANGE;
337
338 tf->protocol = ATA_PROT_NCQ;
339 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
340
341 if (tf->flags & ATA_TFLAG_WRITE)
342 tf->command = ATA_CMD_FPDMA_WRITE;
343 else
344 tf->command = ATA_CMD_FPDMA_READ;
345
346 tf->nsect = tag << 3;
347 tf->hob_feature = (n_block >> 8) & 0xff;
348 tf->feature = n_block & 0xff;
349
350 tf->hob_lbah = (block >> 40) & 0xff;
351 tf->hob_lbam = (block >> 32) & 0xff;
352 tf->hob_lbal = (block >> 24) & 0xff;
353 tf->lbah = (block >> 16) & 0xff;
354 tf->lbam = (block >> 8) & 0xff;
355 tf->lbal = block & 0xff;
356
357 tf->device = 1 << 6;
358 if (tf->flags & ATA_TFLAG_FUA)
359 tf->device |= 1 << 7;
360 } else if (dev->flags & ATA_DFLAG_LBA) {
361 tf->flags |= ATA_TFLAG_LBA;
362
363 if (lba_28_ok(block, n_block)) {
364 /* use LBA28 */
365 tf->device |= (block >> 24) & 0xf;
366 } else if (lba_48_ok(block, n_block)) {
367 if (!(dev->flags & ATA_DFLAG_LBA48))
368 return -ERANGE;
369
370 /* use LBA48 */
371 tf->flags |= ATA_TFLAG_LBA48;
372
373 tf->hob_nsect = (n_block >> 8) & 0xff;
374
375 tf->hob_lbah = (block >> 40) & 0xff;
376 tf->hob_lbam = (block >> 32) & 0xff;
377 tf->hob_lbal = (block >> 24) & 0xff;
378 } else
379 /* request too large even for LBA48 */
380 return -ERANGE;
381
382 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
383 return -EINVAL;
384
385 tf->nsect = n_block & 0xff;
386
387 tf->lbah = (block >> 16) & 0xff;
388 tf->lbam = (block >> 8) & 0xff;
389 tf->lbal = block & 0xff;
390
391 tf->device |= ATA_LBA;
392 } else {
393 /* CHS */
394 u32 sect, head, cyl, track;
395
396 /* The request -may- be too large for CHS addressing. */
397 if (!lba_28_ok(block, n_block))
398 return -ERANGE;
399
400 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
401 return -EINVAL;
402
403 /* Convert LBA to CHS */
404 track = (u32)block / dev->sectors;
405 cyl = track / dev->heads;
406 head = track % dev->heads;
407 sect = (u32)block % dev->sectors + 1;
408
409 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
410 (u32)block, track, cyl, head, sect);
411
412 /* Check whether the converted CHS can fit.
413 Cylinder: 0-65535
414 Head: 0-15
415 Sector: 1-255*/
416 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
417 return -ERANGE;
418
419 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
420 tf->lbal = sect;
421 tf->lbam = cyl;
422 tf->lbah = cyl >> 8;
423 tf->device |= head;
424 }
425
426 return 0;
427}
428
cb95d562
TH
429/**
430 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
431 * @pio_mask: pio_mask
432 * @mwdma_mask: mwdma_mask
433 * @udma_mask: udma_mask
434 *
435 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
436 * unsigned int xfer_mask.
437 *
438 * LOCKING:
439 * None.
440 *
441 * RETURNS:
442 * Packed xfer_mask.
443 */
444static unsigned int ata_pack_xfermask(unsigned int pio_mask,
445 unsigned int mwdma_mask,
446 unsigned int udma_mask)
447{
448 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
449 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
450 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
451}
452
c0489e4e
TH
453/**
454 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
455 * @xfer_mask: xfer_mask to unpack
456 * @pio_mask: resulting pio_mask
457 * @mwdma_mask: resulting mwdma_mask
458 * @udma_mask: resulting udma_mask
459 *
460 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
461 * Any NULL distination masks will be ignored.
462 */
463static void ata_unpack_xfermask(unsigned int xfer_mask,
464 unsigned int *pio_mask,
465 unsigned int *mwdma_mask,
466 unsigned int *udma_mask)
467{
468 if (pio_mask)
469 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
470 if (mwdma_mask)
471 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
472 if (udma_mask)
473 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
474}
475
cb95d562 476static const struct ata_xfer_ent {
be9a50c8 477 int shift, bits;
cb95d562
TH
478 u8 base;
479} ata_xfer_tbl[] = {
480 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
481 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
482 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
483 { -1, },
484};
485
486/**
487 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
488 * @xfer_mask: xfer_mask of interest
489 *
490 * Return matching XFER_* value for @xfer_mask. Only the highest
491 * bit of @xfer_mask is considered.
492 *
493 * LOCKING:
494 * None.
495 *
496 * RETURNS:
497 * Matching XFER_* value, 0 if no match found.
498 */
499static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
500{
501 int highbit = fls(xfer_mask) - 1;
502 const struct ata_xfer_ent *ent;
503
504 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
505 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
506 return ent->base + highbit - ent->shift;
507 return 0;
508}
509
510/**
511 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
512 * @xfer_mode: XFER_* of interest
513 *
514 * Return matching xfer_mask for @xfer_mode.
515 *
516 * LOCKING:
517 * None.
518 *
519 * RETURNS:
520 * Matching xfer_mask, 0 if no match found.
521 */
522static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
523{
524 const struct ata_xfer_ent *ent;
525
526 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
527 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
528 return 1 << (ent->shift + xfer_mode - ent->base);
529 return 0;
530}
531
532/**
533 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
534 * @xfer_mode: XFER_* of interest
535 *
536 * Return matching xfer_shift for @xfer_mode.
537 *
538 * LOCKING:
539 * None.
540 *
541 * RETURNS:
542 * Matching xfer_shift, -1 if no match found.
543 */
544static int ata_xfer_mode2shift(unsigned int xfer_mode)
545{
546 const struct ata_xfer_ent *ent;
547
548 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
549 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
550 return ent->shift;
551 return -1;
552}
553
1da177e4 554/**
1da7b0d0
TH
555 * ata_mode_string - convert xfer_mask to string
556 * @xfer_mask: mask of bits supported; only highest bit counts.
1da177e4
LT
557 *
558 * Determine string which represents the highest speed
1da7b0d0 559 * (highest bit in @modemask).
1da177e4
LT
560 *
561 * LOCKING:
562 * None.
563 *
564 * RETURNS:
565 * Constant C string representing highest speed listed in
1da7b0d0 566 * @mode_mask, or the constant C string "<n/a>".
1da177e4 567 */
1da7b0d0 568static const char *ata_mode_string(unsigned int xfer_mask)
1da177e4 569{
75f554bc
TH
570 static const char * const xfer_mode_str[] = {
571 "PIO0",
572 "PIO1",
573 "PIO2",
574 "PIO3",
575 "PIO4",
b352e57d
AC
576 "PIO5",
577 "PIO6",
75f554bc
TH
578 "MWDMA0",
579 "MWDMA1",
580 "MWDMA2",
b352e57d
AC
581 "MWDMA3",
582 "MWDMA4",
75f554bc
TH
583 "UDMA/16",
584 "UDMA/25",
585 "UDMA/33",
586 "UDMA/44",
587 "UDMA/66",
588 "UDMA/100",
589 "UDMA/133",
590 "UDMA7",
591 };
1da7b0d0 592 int highbit;
1da177e4 593
1da7b0d0
TH
594 highbit = fls(xfer_mask) - 1;
595 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
596 return xfer_mode_str[highbit];
1da177e4 597 return "<n/a>";
1da177e4
LT
598}
599
4c360c81
TH
600static const char *sata_spd_string(unsigned int spd)
601{
602 static const char * const spd_str[] = {
603 "1.5 Gbps",
604 "3.0 Gbps",
605 };
606
607 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
608 return "<unknown>";
609 return spd_str[spd - 1];
610}
611
3373efd8 612void ata_dev_disable(struct ata_device *dev)
0b8efb0a 613{
09d7f9b0 614 if (ata_dev_enabled(dev)) {
9af5c9c9 615 if (ata_msg_drv(dev->link->ap))
09d7f9b0 616 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
4ae72a1e
TH
617 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
618 ATA_DNXFER_QUIET);
0b8efb0a
TH
619 dev->class++;
620 }
621}
622
ca77329f
KCA
623static int ata_dev_set_dipm(struct ata_device *dev, enum link_pm policy)
624{
625 struct ata_link *link = dev->link;
626 struct ata_port *ap = link->ap;
627 u32 scontrol;
628 unsigned int err_mask;
629 int rc;
630
631 /*
632 * disallow DIPM for drivers which haven't set
633 * ATA_FLAG_IPM. This is because when DIPM is enabled,
634 * phy ready will be set in the interrupt status on
635 * state changes, which will cause some drivers to
636 * think there are errors - additionally drivers will
637 * need to disable hot plug.
638 */
639 if (!(ap->flags & ATA_FLAG_IPM) || !ata_dev_enabled(dev)) {
640 ap->pm_policy = NOT_AVAILABLE;
641 return -EINVAL;
642 }
643
644 /*
645 * For DIPM, we will only enable it for the
646 * min_power setting.
647 *
648 * Why? Because Disks are too stupid to know that
649 * If the host rejects a request to go to SLUMBER
650 * they should retry at PARTIAL, and instead it
651 * just would give up. So, for medium_power to
652 * work at all, we need to only allow HIPM.
653 */
654 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
655 if (rc)
656 return rc;
657
658 switch (policy) {
659 case MIN_POWER:
660 /* no restrictions on IPM transitions */
661 scontrol &= ~(0x3 << 8);
662 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
663 if (rc)
664 return rc;
665
666 /* enable DIPM */
667 if (dev->flags & ATA_DFLAG_DIPM)
668 err_mask = ata_dev_set_feature(dev,
669 SETFEATURES_SATA_ENABLE, SATA_DIPM);
670 break;
671 case MEDIUM_POWER:
672 /* allow IPM to PARTIAL */
673 scontrol &= ~(0x1 << 8);
674 scontrol |= (0x2 << 8);
675 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
676 if (rc)
677 return rc;
678
679 /* disable DIPM */
680 if (ata_dev_enabled(dev) && (dev->flags & ATA_DFLAG_DIPM))
681 err_mask = ata_dev_set_feature(dev,
682 SETFEATURES_SATA_DISABLE, SATA_DIPM);
683 break;
684 case NOT_AVAILABLE:
685 case MAX_PERFORMANCE:
686 /* disable all IPM transitions */
687 scontrol |= (0x3 << 8);
688 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
689 if (rc)
690 return rc;
691
692 /* disable DIPM */
693 if (ata_dev_enabled(dev) && (dev->flags & ATA_DFLAG_DIPM))
694 err_mask = ata_dev_set_feature(dev,
695 SETFEATURES_SATA_DISABLE, SATA_DIPM);
696 break;
697 }
698
699 /* FIXME: handle SET FEATURES failure */
700 (void) err_mask;
701
702 return 0;
703}
704
705/**
706 * ata_dev_enable_pm - enable SATA interface power management
707 * @device - device to enable ipm for
708 * @policy - the link power management policy
709 *
710 * Enable SATA Interface power management. This will enable
711 * Device Interface Power Management (DIPM) for min_power
712 * policy, and then call driver specific callbacks for
713 * enabling Host Initiated Power management.
714 *
715 * Locking: Caller.
716 * Returns: -EINVAL if IPM is not supported, 0 otherwise.
717 */
718void ata_dev_enable_pm(struct ata_device *dev, enum link_pm policy)
719{
720 int rc = 0;
721 struct ata_port *ap = dev->link->ap;
722
723 /* set HIPM first, then DIPM */
724 if (ap->ops->enable_pm)
725 rc = ap->ops->enable_pm(ap, policy);
726 if (rc)
727 goto enable_pm_out;
728 rc = ata_dev_set_dipm(dev, policy);
729
730enable_pm_out:
731 if (rc)
732 ap->pm_policy = MAX_PERFORMANCE;
733 else
734 ap->pm_policy = policy;
735 return /* rc */; /* hopefully we can use 'rc' eventually */
736}
737
738/**
739 * ata_dev_disable_pm - disable SATA interface power management
740 * @device - device to enable ipm for
741 *
742 * Disable SATA Interface power management. This will disable
743 * Device Interface Power Management (DIPM) without changing
744 * policy, call driver specific callbacks for disabling Host
745 * Initiated Power management.
746 *
747 * Locking: Caller.
748 * Returns: void
749 */
750static void ata_dev_disable_pm(struct ata_device *dev)
751{
752 struct ata_port *ap = dev->link->ap;
753
754 ata_dev_set_dipm(dev, MAX_PERFORMANCE);
755 if (ap->ops->disable_pm)
756 ap->ops->disable_pm(ap);
757}
758
759void ata_lpm_schedule(struct ata_port *ap, enum link_pm policy)
760{
761 ap->pm_policy = policy;
762 ap->link.eh_info.action |= ATA_EHI_LPM;
763 ap->link.eh_info.flags |= ATA_EHI_NO_AUTOPSY;
764 ata_port_schedule_eh(ap);
765}
766
767static void ata_lpm_enable(struct ata_host *host)
768{
769 struct ata_link *link;
770 struct ata_port *ap;
771 struct ata_device *dev;
772 int i;
773
774 for (i = 0; i < host->n_ports; i++) {
775 ap = host->ports[i];
776 ata_port_for_each_link(link, ap) {
777 ata_link_for_each_dev(dev, link)
778 ata_dev_disable_pm(dev);
779 }
780 }
781}
782
783static void ata_lpm_disable(struct ata_host *host)
784{
785 int i;
786
787 for (i = 0; i < host->n_ports; i++) {
788 struct ata_port *ap = host->ports[i];
789 ata_lpm_schedule(ap, ap->pm_policy);
790 }
791}
792
793
1da177e4 794/**
0d5ff566 795 * ata_devchk - PATA device presence detection
1da177e4
LT
796 * @ap: ATA channel to examine
797 * @device: Device to examine (starting at zero)
798 *
799 * This technique was originally described in
800 * Hale Landis's ATADRVR (www.ata-atapi.com), and
801 * later found its way into the ATA/ATAPI spec.
802 *
803 * Write a pattern to the ATA shadow registers,
804 * and if a device is present, it will respond by
805 * correctly storing and echoing back the
806 * ATA shadow register contents.
807 *
808 * LOCKING:
809 * caller.
810 */
811
0d5ff566 812static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
1da177e4
LT
813{
814 struct ata_ioports *ioaddr = &ap->ioaddr;
815 u8 nsect, lbal;
816
817 ap->ops->dev_select(ap, device);
818
0d5ff566
TH
819 iowrite8(0x55, ioaddr->nsect_addr);
820 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 821
0d5ff566
TH
822 iowrite8(0xaa, ioaddr->nsect_addr);
823 iowrite8(0x55, ioaddr->lbal_addr);
1da177e4 824
0d5ff566
TH
825 iowrite8(0x55, ioaddr->nsect_addr);
826 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 827
0d5ff566
TH
828 nsect = ioread8(ioaddr->nsect_addr);
829 lbal = ioread8(ioaddr->lbal_addr);
1da177e4
LT
830
831 if ((nsect == 0x55) && (lbal == 0xaa))
832 return 1; /* we found a device */
833
834 return 0; /* nothing found */
835}
836
1da177e4
LT
837/**
838 * ata_dev_classify - determine device type based on ATA-spec signature
839 * @tf: ATA taskfile register set for device to be identified
840 *
841 * Determine from taskfile register contents whether a device is
842 * ATA or ATAPI, as per "Signature and persistence" section
843 * of ATA/PI spec (volume 1, sect 5.14).
844 *
845 * LOCKING:
846 * None.
847 *
848 * RETURNS:
633273a3
TH
849 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or
850 * %ATA_DEV_UNKNOWN the event of failure.
1da177e4 851 */
057ace5e 852unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1da177e4
LT
853{
854 /* Apple's open source Darwin code hints that some devices only
855 * put a proper signature into the LBA mid/high registers,
856 * So, we only check those. It's sufficient for uniqueness.
633273a3
TH
857 *
858 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
859 * signatures for ATA and ATAPI devices attached on SerialATA,
860 * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA
861 * spec has never mentioned about using different signatures
862 * for ATA/ATAPI devices. Then, Serial ATA II: Port
863 * Multiplier specification began to use 0x69/0x96 to identify
864 * port multpliers and 0x3c/0xc3 to identify SEMB device.
865 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
866 * 0x69/0x96 shortly and described them as reserved for
867 * SerialATA.
868 *
869 * We follow the current spec and consider that 0x69/0x96
870 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
1da177e4 871 */
633273a3 872 if ((tf->lbam == 0) && (tf->lbah == 0)) {
1da177e4
LT
873 DPRINTK("found ATA device by sig\n");
874 return ATA_DEV_ATA;
875 }
876
633273a3 877 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1da177e4
LT
878 DPRINTK("found ATAPI device by sig\n");
879 return ATA_DEV_ATAPI;
880 }
881
633273a3
TH
882 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
883 DPRINTK("found PMP device by sig\n");
884 return ATA_DEV_PMP;
885 }
886
887 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
2dcb407e 888 printk(KERN_INFO "ata: SEMB device ignored\n");
633273a3
TH
889 return ATA_DEV_SEMB_UNSUP; /* not yet */
890 }
891
1da177e4
LT
892 DPRINTK("unknown device\n");
893 return ATA_DEV_UNKNOWN;
894}
895
896/**
897 * ata_dev_try_classify - Parse returned ATA device signature
3f19859e
TH
898 * @dev: ATA device to classify (starting at zero)
899 * @present: device seems present
b4dc7623 900 * @r_err: Value of error register on completion
1da177e4
LT
901 *
902 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
903 * an ATA/ATAPI-defined set of values is placed in the ATA
904 * shadow registers, indicating the results of device detection
905 * and diagnostics.
906 *
907 * Select the ATA device, and read the values from the ATA shadow
908 * registers. Then parse according to the Error register value,
909 * and the spec-defined values examined by ata_dev_classify().
910 *
911 * LOCKING:
912 * caller.
b4dc7623
TH
913 *
914 * RETURNS:
915 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
1da177e4 916 */
3f19859e
TH
917unsigned int ata_dev_try_classify(struct ata_device *dev, int present,
918 u8 *r_err)
1da177e4 919{
3f19859e 920 struct ata_port *ap = dev->link->ap;
1da177e4
LT
921 struct ata_taskfile tf;
922 unsigned int class;
923 u8 err;
924
3f19859e 925 ap->ops->dev_select(ap, dev->devno);
1da177e4
LT
926
927 memset(&tf, 0, sizeof(tf));
928
1da177e4 929 ap->ops->tf_read(ap, &tf);
0169e284 930 err = tf.feature;
b4dc7623
TH
931 if (r_err)
932 *r_err = err;
1da177e4 933
93590859 934 /* see if device passed diags: if master then continue and warn later */
3f19859e 935 if (err == 0 && dev->devno == 0)
93590859 936 /* diagnostic fail : do nothing _YET_ */
3f19859e 937 dev->horkage |= ATA_HORKAGE_DIAGNOSTIC;
93590859 938 else if (err == 1)
1da177e4 939 /* do nothing */ ;
3f19859e 940 else if ((dev->devno == 0) && (err == 0x81))
1da177e4
LT
941 /* do nothing */ ;
942 else
b4dc7623 943 return ATA_DEV_NONE;
1da177e4 944
b4dc7623 945 /* determine if device is ATA or ATAPI */
1da177e4 946 class = ata_dev_classify(&tf);
b4dc7623 947
d7fbee05
TH
948 if (class == ATA_DEV_UNKNOWN) {
949 /* If the device failed diagnostic, it's likely to
950 * have reported incorrect device signature too.
951 * Assume ATA device if the device seems present but
952 * device signature is invalid with diagnostic
953 * failure.
954 */
955 if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC))
956 class = ATA_DEV_ATA;
957 else
958 class = ATA_DEV_NONE;
959 } else if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
960 class = ATA_DEV_NONE;
961
b4dc7623 962 return class;
1da177e4
LT
963}
964
965/**
6a62a04d 966 * ata_id_string - Convert IDENTIFY DEVICE page into string
1da177e4
LT
967 * @id: IDENTIFY DEVICE results we will examine
968 * @s: string into which data is output
969 * @ofs: offset into identify device page
970 * @len: length of string to return. must be an even number.
971 *
972 * The strings in the IDENTIFY DEVICE page are broken up into
973 * 16-bit chunks. Run through the string, and output each
974 * 8-bit chunk linearly, regardless of platform.
975 *
976 * LOCKING:
977 * caller.
978 */
979
6a62a04d
TH
980void ata_id_string(const u16 *id, unsigned char *s,
981 unsigned int ofs, unsigned int len)
1da177e4
LT
982{
983 unsigned int c;
984
985 while (len > 0) {
986 c = id[ofs] >> 8;
987 *s = c;
988 s++;
989
990 c = id[ofs] & 0xff;
991 *s = c;
992 s++;
993
994 ofs++;
995 len -= 2;
996 }
997}
998
0e949ff3 999/**
6a62a04d 1000 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
0e949ff3
TH
1001 * @id: IDENTIFY DEVICE results we will examine
1002 * @s: string into which data is output
1003 * @ofs: offset into identify device page
1004 * @len: length of string to return. must be an odd number.
1005 *
6a62a04d 1006 * This function is identical to ata_id_string except that it
0e949ff3
TH
1007 * trims trailing spaces and terminates the resulting string with
1008 * null. @len must be actual maximum length (even number) + 1.
1009 *
1010 * LOCKING:
1011 * caller.
1012 */
6a62a04d
TH
1013void ata_id_c_string(const u16 *id, unsigned char *s,
1014 unsigned int ofs, unsigned int len)
0e949ff3
TH
1015{
1016 unsigned char *p;
1017
1018 WARN_ON(!(len & 1));
1019
6a62a04d 1020 ata_id_string(id, s, ofs, len - 1);
0e949ff3
TH
1021
1022 p = s + strnlen(s, len - 1);
1023 while (p > s && p[-1] == ' ')
1024 p--;
1025 *p = '\0';
1026}
0baab86b 1027
db6f8759
TH
1028static u64 ata_id_n_sectors(const u16 *id)
1029{
1030 if (ata_id_has_lba(id)) {
1031 if (ata_id_has_lba48(id))
1032 return ata_id_u64(id, 100);
1033 else
1034 return ata_id_u32(id, 60);
1035 } else {
1036 if (ata_id_current_chs_valid(id))
1037 return ata_id_u32(id, 57);
1038 else
1039 return id[1] * id[3] * id[6];
1040 }
1041}
1042
1e999736
AC
1043static u64 ata_tf_to_lba48(struct ata_taskfile *tf)
1044{
1045 u64 sectors = 0;
1046
1047 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1048 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1049 sectors |= (tf->hob_lbal & 0xff) << 24;
1050 sectors |= (tf->lbah & 0xff) << 16;
1051 sectors |= (tf->lbam & 0xff) << 8;
1052 sectors |= (tf->lbal & 0xff);
1053
1054 return ++sectors;
1055}
1056
1057static u64 ata_tf_to_lba(struct ata_taskfile *tf)
1058{
1059 u64 sectors = 0;
1060
1061 sectors |= (tf->device & 0x0f) << 24;
1062 sectors |= (tf->lbah & 0xff) << 16;
1063 sectors |= (tf->lbam & 0xff) << 8;
1064 sectors |= (tf->lbal & 0xff);
1065
1066 return ++sectors;
1067}
1068
1069/**
c728a914
TH
1070 * ata_read_native_max_address - Read native max address
1071 * @dev: target device
1072 * @max_sectors: out parameter for the result native max address
1e999736 1073 *
c728a914
TH
1074 * Perform an LBA48 or LBA28 native size query upon the device in
1075 * question.
1e999736 1076 *
c728a914
TH
1077 * RETURNS:
1078 * 0 on success, -EACCES if command is aborted by the drive.
1079 * -EIO on other errors.
1e999736 1080 */
c728a914 1081static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1e999736 1082{
c728a914 1083 unsigned int err_mask;
1e999736 1084 struct ata_taskfile tf;
c728a914 1085 int lba48 = ata_id_has_lba48(dev->id);
1e999736
AC
1086
1087 ata_tf_init(dev, &tf);
1088
c728a914 1089 /* always clear all address registers */
1e999736 1090 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1e999736 1091
c728a914
TH
1092 if (lba48) {
1093 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1094 tf.flags |= ATA_TFLAG_LBA48;
1095 } else
1096 tf.command = ATA_CMD_READ_NATIVE_MAX;
1e999736 1097
1e999736 1098 tf.protocol |= ATA_PROT_NODATA;
c728a914
TH
1099 tf.device |= ATA_LBA;
1100
2b789108 1101 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
c728a914
TH
1102 if (err_mask) {
1103 ata_dev_printk(dev, KERN_WARNING, "failed to read native "
1104 "max address (err_mask=0x%x)\n", err_mask);
1105 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1106 return -EACCES;
1107 return -EIO;
1108 }
1e999736 1109
c728a914
TH
1110 if (lba48)
1111 *max_sectors = ata_tf_to_lba48(&tf);
1112 else
1113 *max_sectors = ata_tf_to_lba(&tf);
2dcb407e 1114 if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
93328e11 1115 (*max_sectors)--;
c728a914 1116 return 0;
1e999736
AC
1117}
1118
1119/**
c728a914
TH
1120 * ata_set_max_sectors - Set max sectors
1121 * @dev: target device
6b38d1d1 1122 * @new_sectors: new max sectors value to set for the device
1e999736 1123 *
c728a914
TH
1124 * Set max sectors of @dev to @new_sectors.
1125 *
1126 * RETURNS:
1127 * 0 on success, -EACCES if command is aborted or denied (due to
1128 * previous non-volatile SET_MAX) by the drive. -EIO on other
1129 * errors.
1e999736 1130 */
05027adc 1131static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1e999736 1132{
c728a914 1133 unsigned int err_mask;
1e999736 1134 struct ata_taskfile tf;
c728a914 1135 int lba48 = ata_id_has_lba48(dev->id);
1e999736
AC
1136
1137 new_sectors--;
1138
1139 ata_tf_init(dev, &tf);
1140
1e999736 1141 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
c728a914
TH
1142
1143 if (lba48) {
1144 tf.command = ATA_CMD_SET_MAX_EXT;
1145 tf.flags |= ATA_TFLAG_LBA48;
1146
1147 tf.hob_lbal = (new_sectors >> 24) & 0xff;
1148 tf.hob_lbam = (new_sectors >> 32) & 0xff;
1149 tf.hob_lbah = (new_sectors >> 40) & 0xff;
1e582ba4 1150 } else {
c728a914
TH
1151 tf.command = ATA_CMD_SET_MAX;
1152
1e582ba4
TH
1153 tf.device |= (new_sectors >> 24) & 0xf;
1154 }
1155
1e999736 1156 tf.protocol |= ATA_PROT_NODATA;
c728a914 1157 tf.device |= ATA_LBA;
1e999736
AC
1158
1159 tf.lbal = (new_sectors >> 0) & 0xff;
1160 tf.lbam = (new_sectors >> 8) & 0xff;
1161 tf.lbah = (new_sectors >> 16) & 0xff;
1e999736 1162
2b789108 1163 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
c728a914
TH
1164 if (err_mask) {
1165 ata_dev_printk(dev, KERN_WARNING, "failed to set "
1166 "max address (err_mask=0x%x)\n", err_mask);
1167 if (err_mask == AC_ERR_DEV &&
1168 (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1169 return -EACCES;
1170 return -EIO;
1171 }
1172
c728a914 1173 return 0;
1e999736
AC
1174}
1175
1176/**
1177 * ata_hpa_resize - Resize a device with an HPA set
1178 * @dev: Device to resize
1179 *
1180 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
1181 * it if required to the full size of the media. The caller must check
1182 * the drive has the HPA feature set enabled.
05027adc
TH
1183 *
1184 * RETURNS:
1185 * 0 on success, -errno on failure.
1e999736 1186 */
05027adc 1187static int ata_hpa_resize(struct ata_device *dev)
1e999736 1188{
05027adc
TH
1189 struct ata_eh_context *ehc = &dev->link->eh_context;
1190 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1191 u64 sectors = ata_id_n_sectors(dev->id);
1192 u64 native_sectors;
c728a914 1193 int rc;
a617c09f 1194
05027adc
TH
1195 /* do we need to do it? */
1196 if (dev->class != ATA_DEV_ATA ||
1197 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1198 (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
c728a914 1199 return 0;
1e999736 1200
05027adc
TH
1201 /* read native max address */
1202 rc = ata_read_native_max_address(dev, &native_sectors);
1203 if (rc) {
1204 /* If HPA isn't going to be unlocked, skip HPA
1205 * resizing from the next try.
1206 */
1207 if (!ata_ignore_hpa) {
1208 ata_dev_printk(dev, KERN_WARNING, "HPA support seems "
1209 "broken, will skip HPA handling\n");
1210 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1211
1212 /* we can continue if device aborted the command */
1213 if (rc == -EACCES)
1214 rc = 0;
1e999736 1215 }
37301a55 1216
05027adc
TH
1217 return rc;
1218 }
1219
1220 /* nothing to do? */
1221 if (native_sectors <= sectors || !ata_ignore_hpa) {
1222 if (!print_info || native_sectors == sectors)
1223 return 0;
1224
1225 if (native_sectors > sectors)
1226 ata_dev_printk(dev, KERN_INFO,
1227 "HPA detected: current %llu, native %llu\n",
1228 (unsigned long long)sectors,
1229 (unsigned long long)native_sectors);
1230 else if (native_sectors < sectors)
1231 ata_dev_printk(dev, KERN_WARNING,
1232 "native sectors (%llu) is smaller than "
1233 "sectors (%llu)\n",
1234 (unsigned long long)native_sectors,
1235 (unsigned long long)sectors);
1236 return 0;
1237 }
1238
1239 /* let's unlock HPA */
1240 rc = ata_set_max_sectors(dev, native_sectors);
1241 if (rc == -EACCES) {
1242 /* if device aborted the command, skip HPA resizing */
1243 ata_dev_printk(dev, KERN_WARNING, "device aborted resize "
1244 "(%llu -> %llu), skipping HPA handling\n",
1245 (unsigned long long)sectors,
1246 (unsigned long long)native_sectors);
1247 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1248 return 0;
1249 } else if (rc)
1250 return rc;
1251
1252 /* re-read IDENTIFY data */
1253 rc = ata_dev_reread_id(dev, 0);
1254 if (rc) {
1255 ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY "
1256 "data after HPA resizing\n");
1257 return rc;
1258 }
1259
1260 if (print_info) {
1261 u64 new_sectors = ata_id_n_sectors(dev->id);
1262 ata_dev_printk(dev, KERN_INFO,
1263 "HPA unlocked: %llu -> %llu, native %llu\n",
1264 (unsigned long long)sectors,
1265 (unsigned long long)new_sectors,
1266 (unsigned long long)native_sectors);
1267 }
1268
1269 return 0;
1e999736
AC
1270}
1271
10305f0f
A
1272/**
1273 * ata_id_to_dma_mode - Identify DMA mode from id block
1274 * @dev: device to identify
cc261267 1275 * @unknown: mode to assume if we cannot tell
10305f0f
A
1276 *
1277 * Set up the timing values for the device based upon the identify
1278 * reported values for the DMA mode. This function is used by drivers
1279 * which rely upon firmware configured modes, but wish to report the
1280 * mode correctly when possible.
1281 *
1282 * In addition we emit similarly formatted messages to the default
1283 * ata_dev_set_mode handler, in order to provide consistency of
1284 * presentation.
1285 */
1286
1287void ata_id_to_dma_mode(struct ata_device *dev, u8 unknown)
1288{
1289 unsigned int mask;
1290 u8 mode;
1291
1292 /* Pack the DMA modes */
1293 mask = ((dev->id[63] >> 8) << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA;
1294 if (dev->id[53] & 0x04)
1295 mask |= ((dev->id[88] >> 8) << ATA_SHIFT_UDMA) & ATA_MASK_UDMA;
1296
1297 /* Select the mode in use */
1298 mode = ata_xfer_mask2mode(mask);
1299
1300 if (mode != 0) {
1301 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
1302 ata_mode_string(mask));
1303 } else {
1304 /* SWDMA perhaps ? */
1305 mode = unknown;
1306 ata_dev_printk(dev, KERN_INFO, "configured for DMA\n");
1307 }
1308
1309 /* Configure the device reporting */
1310 dev->xfer_mode = mode;
1311 dev->xfer_shift = ata_xfer_mode2shift(mode);
1312}
1313
0baab86b
EF
1314/**
1315 * ata_noop_dev_select - Select device 0/1 on ATA bus
1316 * @ap: ATA channel to manipulate
1317 * @device: ATA device (numbered from zero) to select
1318 *
1319 * This function performs no actual function.
1320 *
1321 * May be used as the dev_select() entry in ata_port_operations.
1322 *
1323 * LOCKING:
1324 * caller.
1325 */
2dcb407e 1326void ata_noop_dev_select(struct ata_port *ap, unsigned int device)
1da177e4
LT
1327{
1328}
1329
0baab86b 1330
1da177e4
LT
1331/**
1332 * ata_std_dev_select - Select device 0/1 on ATA bus
1333 * @ap: ATA channel to manipulate
1334 * @device: ATA device (numbered from zero) to select
1335 *
1336 * Use the method defined in the ATA specification to
1337 * make either device 0, or device 1, active on the
0baab86b
EF
1338 * ATA channel. Works with both PIO and MMIO.
1339 *
1340 * May be used as the dev_select() entry in ata_port_operations.
1da177e4
LT
1341 *
1342 * LOCKING:
1343 * caller.
1344 */
1345
2dcb407e 1346void ata_std_dev_select(struct ata_port *ap, unsigned int device)
1da177e4
LT
1347{
1348 u8 tmp;
1349
1350 if (device == 0)
1351 tmp = ATA_DEVICE_OBS;
1352 else
1353 tmp = ATA_DEVICE_OBS | ATA_DEV1;
1354
0d5ff566 1355 iowrite8(tmp, ap->ioaddr.device_addr);
1da177e4
LT
1356 ata_pause(ap); /* needed; also flushes, for mmio */
1357}
1358
1359/**
1360 * ata_dev_select - Select device 0/1 on ATA bus
1361 * @ap: ATA channel to manipulate
1362 * @device: ATA device (numbered from zero) to select
1363 * @wait: non-zero to wait for Status register BSY bit to clear
1364 * @can_sleep: non-zero if context allows sleeping
1365 *
1366 * Use the method defined in the ATA specification to
1367 * make either device 0, or device 1, active on the
1368 * ATA channel.
1369 *
1370 * This is a high-level version of ata_std_dev_select(),
1371 * which additionally provides the services of inserting
1372 * the proper pauses and status polling, where needed.
1373 *
1374 * LOCKING:
1375 * caller.
1376 */
1377
1378void ata_dev_select(struct ata_port *ap, unsigned int device,
1379 unsigned int wait, unsigned int can_sleep)
1380{
88574551 1381 if (ata_msg_probe(ap))
44877b4e
TH
1382 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
1383 "device %u, wait %u\n", device, wait);
1da177e4
LT
1384
1385 if (wait)
1386 ata_wait_idle(ap);
1387
1388 ap->ops->dev_select(ap, device);
1389
1390 if (wait) {
9af5c9c9 1391 if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI)
1da177e4
LT
1392 msleep(150);
1393 ata_wait_idle(ap);
1394 }
1395}
1396
1397/**
1398 * ata_dump_id - IDENTIFY DEVICE info debugging output
0bd3300a 1399 * @id: IDENTIFY DEVICE page to dump
1da177e4 1400 *
0bd3300a
TH
1401 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1402 * page.
1da177e4
LT
1403 *
1404 * LOCKING:
1405 * caller.
1406 */
1407
0bd3300a 1408static inline void ata_dump_id(const u16 *id)
1da177e4
LT
1409{
1410 DPRINTK("49==0x%04x "
1411 "53==0x%04x "
1412 "63==0x%04x "
1413 "64==0x%04x "
1414 "75==0x%04x \n",
0bd3300a
TH
1415 id[49],
1416 id[53],
1417 id[63],
1418 id[64],
1419 id[75]);
1da177e4
LT
1420 DPRINTK("80==0x%04x "
1421 "81==0x%04x "
1422 "82==0x%04x "
1423 "83==0x%04x "
1424 "84==0x%04x \n",
0bd3300a
TH
1425 id[80],
1426 id[81],
1427 id[82],
1428 id[83],
1429 id[84]);
1da177e4
LT
1430 DPRINTK("88==0x%04x "
1431 "93==0x%04x\n",
0bd3300a
TH
1432 id[88],
1433 id[93]);
1da177e4
LT
1434}
1435
cb95d562
TH
1436/**
1437 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1438 * @id: IDENTIFY data to compute xfer mask from
1439 *
1440 * Compute the xfermask for this device. This is not as trivial
1441 * as it seems if we must consider early devices correctly.
1442 *
1443 * FIXME: pre IDE drive timing (do we care ?).
1444 *
1445 * LOCKING:
1446 * None.
1447 *
1448 * RETURNS:
1449 * Computed xfermask
1450 */
1451static unsigned int ata_id_xfermask(const u16 *id)
1452{
1453 unsigned int pio_mask, mwdma_mask, udma_mask;
1454
1455 /* Usual case. Word 53 indicates word 64 is valid */
1456 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1457 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1458 pio_mask <<= 3;
1459 pio_mask |= 0x7;
1460 } else {
1461 /* If word 64 isn't valid then Word 51 high byte holds
1462 * the PIO timing number for the maximum. Turn it into
1463 * a mask.
1464 */
7a0f1c8a 1465 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
46767aeb 1466 if (mode < 5) /* Valid PIO range */
2dcb407e 1467 pio_mask = (2 << mode) - 1;
46767aeb
AC
1468 else
1469 pio_mask = 1;
cb95d562
TH
1470
1471 /* But wait.. there's more. Design your standards by
1472 * committee and you too can get a free iordy field to
1473 * process. However its the speeds not the modes that
1474 * are supported... Note drivers using the timing API
1475 * will get this right anyway
1476 */
1477 }
1478
1479 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
fb21f0d0 1480
b352e57d
AC
1481 if (ata_id_is_cfa(id)) {
1482 /*
1483 * Process compact flash extended modes
1484 */
1485 int pio = id[163] & 0x7;
1486 int dma = (id[163] >> 3) & 7;
1487
1488 if (pio)
1489 pio_mask |= (1 << 5);
1490 if (pio > 1)
1491 pio_mask |= (1 << 6);
1492 if (dma)
1493 mwdma_mask |= (1 << 3);
1494 if (dma > 1)
1495 mwdma_mask |= (1 << 4);
1496 }
1497
fb21f0d0
TH
1498 udma_mask = 0;
1499 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1500 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
cb95d562
TH
1501
1502 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1503}
1504
86e45b6b
TH
1505/**
1506 * ata_port_queue_task - Queue port_task
1507 * @ap: The ata_port to queue port_task for
e2a7f77a 1508 * @fn: workqueue function to be scheduled
65f27f38 1509 * @data: data for @fn to use
e2a7f77a 1510 * @delay: delay time for workqueue function
86e45b6b
TH
1511 *
1512 * Schedule @fn(@data) for execution after @delay jiffies using
1513 * port_task. There is one port_task per port and it's the
1514 * user(low level driver)'s responsibility to make sure that only
1515 * one task is active at any given time.
1516 *
1517 * libata core layer takes care of synchronization between
1518 * port_task and EH. ata_port_queue_task() may be ignored for EH
1519 * synchronization.
1520 *
1521 * LOCKING:
1522 * Inherited from caller.
1523 */
65f27f38 1524void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
86e45b6b
TH
1525 unsigned long delay)
1526{
65f27f38
DH
1527 PREPARE_DELAYED_WORK(&ap->port_task, fn);
1528 ap->port_task_data = data;
86e45b6b 1529
45a66c1c
ON
1530 /* may fail if ata_port_flush_task() in progress */
1531 queue_delayed_work(ata_wq, &ap->port_task, delay);
86e45b6b
TH
1532}
1533
1534/**
1535 * ata_port_flush_task - Flush port_task
1536 * @ap: The ata_port to flush port_task for
1537 *
1538 * After this function completes, port_task is guranteed not to
1539 * be running or scheduled.
1540 *
1541 * LOCKING:
1542 * Kernel thread context (may sleep)
1543 */
1544void ata_port_flush_task(struct ata_port *ap)
1545{
86e45b6b
TH
1546 DPRINTK("ENTER\n");
1547
45a66c1c 1548 cancel_rearming_delayed_work(&ap->port_task);
86e45b6b 1549
0dd4b21f
BP
1550 if (ata_msg_ctl(ap))
1551 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
86e45b6b
TH
1552}
1553
7102d230 1554static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
a2a7a662 1555{
77853bf2 1556 struct completion *waiting = qc->private_data;
a2a7a662 1557
a2a7a662 1558 complete(waiting);
a2a7a662
TH
1559}
1560
1561/**
2432697b 1562 * ata_exec_internal_sg - execute libata internal command
a2a7a662
TH
1563 * @dev: Device to which the command is sent
1564 * @tf: Taskfile registers for the command and the result
d69cf37d 1565 * @cdb: CDB for packet command
a2a7a662 1566 * @dma_dir: Data tranfer direction of the command
5c1ad8b3 1567 * @sgl: sg list for the data buffer of the command
2432697b 1568 * @n_elem: Number of sg entries
2b789108 1569 * @timeout: Timeout in msecs (0 for default)
a2a7a662
TH
1570 *
1571 * Executes libata internal command with timeout. @tf contains
1572 * command on entry and result on return. Timeout and error
1573 * conditions are reported via return value. No recovery action
1574 * is taken after a command times out. It's caller's duty to
1575 * clean up after timeout.
1576 *
1577 * LOCKING:
1578 * None. Should be called with kernel context, might sleep.
551e8889
TH
1579 *
1580 * RETURNS:
1581 * Zero on success, AC_ERR_* mask on failure
a2a7a662 1582 */
2432697b
TH
1583unsigned ata_exec_internal_sg(struct ata_device *dev,
1584 struct ata_taskfile *tf, const u8 *cdb,
87260216 1585 int dma_dir, struct scatterlist *sgl,
2b789108 1586 unsigned int n_elem, unsigned long timeout)
a2a7a662 1587{
9af5c9c9
TH
1588 struct ata_link *link = dev->link;
1589 struct ata_port *ap = link->ap;
a2a7a662
TH
1590 u8 command = tf->command;
1591 struct ata_queued_cmd *qc;
2ab7db1f 1592 unsigned int tag, preempted_tag;
dedaf2b0 1593 u32 preempted_sactive, preempted_qc_active;
da917d69 1594 int preempted_nr_active_links;
60be6b9a 1595 DECLARE_COMPLETION_ONSTACK(wait);
a2a7a662 1596 unsigned long flags;
77853bf2 1597 unsigned int err_mask;
d95a717f 1598 int rc;
a2a7a662 1599
ba6a1308 1600 spin_lock_irqsave(ap->lock, flags);
a2a7a662 1601
e3180499 1602 /* no internal command while frozen */
b51e9e5d 1603 if (ap->pflags & ATA_PFLAG_FROZEN) {
ba6a1308 1604 spin_unlock_irqrestore(ap->lock, flags);
e3180499
TH
1605 return AC_ERR_SYSTEM;
1606 }
1607
2ab7db1f 1608 /* initialize internal qc */
a2a7a662 1609
2ab7db1f
TH
1610 /* XXX: Tag 0 is used for drivers with legacy EH as some
1611 * drivers choke if any other tag is given. This breaks
1612 * ata_tag_internal() test for those drivers. Don't use new
1613 * EH stuff without converting to it.
1614 */
1615 if (ap->ops->error_handler)
1616 tag = ATA_TAG_INTERNAL;
1617 else
1618 tag = 0;
1619
6cec4a39 1620 if (test_and_set_bit(tag, &ap->qc_allocated))
2ab7db1f 1621 BUG();
f69499f4 1622 qc = __ata_qc_from_tag(ap, tag);
2ab7db1f
TH
1623
1624 qc->tag = tag;
1625 qc->scsicmd = NULL;
1626 qc->ap = ap;
1627 qc->dev = dev;
1628 ata_qc_reinit(qc);
1629
9af5c9c9
TH
1630 preempted_tag = link->active_tag;
1631 preempted_sactive = link->sactive;
dedaf2b0 1632 preempted_qc_active = ap->qc_active;
da917d69 1633 preempted_nr_active_links = ap->nr_active_links;
9af5c9c9
TH
1634 link->active_tag = ATA_TAG_POISON;
1635 link->sactive = 0;
dedaf2b0 1636 ap->qc_active = 0;
da917d69 1637 ap->nr_active_links = 0;
2ab7db1f
TH
1638
1639 /* prepare & issue qc */
a2a7a662 1640 qc->tf = *tf;
d69cf37d
TH
1641 if (cdb)
1642 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
e61e0672 1643 qc->flags |= ATA_QCFLAG_RESULT_TF;
a2a7a662
TH
1644 qc->dma_dir = dma_dir;
1645 if (dma_dir != DMA_NONE) {
2432697b 1646 unsigned int i, buflen = 0;
87260216 1647 struct scatterlist *sg;
2432697b 1648
87260216
JA
1649 for_each_sg(sgl, sg, n_elem, i)
1650 buflen += sg->length;
2432697b 1651
87260216 1652 ata_sg_init(qc, sgl, n_elem);
49c80429 1653 qc->nbytes = buflen;
a2a7a662
TH
1654 }
1655
77853bf2 1656 qc->private_data = &wait;
a2a7a662
TH
1657 qc->complete_fn = ata_qc_complete_internal;
1658
8e0e694a 1659 ata_qc_issue(qc);
a2a7a662 1660
ba6a1308 1661 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662 1662
2b789108
TH
1663 if (!timeout)
1664 timeout = ata_probe_timeout * 1000 / HZ;
1665
1666 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
d95a717f
TH
1667
1668 ata_port_flush_task(ap);
41ade50c 1669
d95a717f 1670 if (!rc) {
ba6a1308 1671 spin_lock_irqsave(ap->lock, flags);
a2a7a662
TH
1672
1673 /* We're racing with irq here. If we lose, the
1674 * following test prevents us from completing the qc
d95a717f
TH
1675 * twice. If we win, the port is frozen and will be
1676 * cleaned up by ->post_internal_cmd().
a2a7a662 1677 */
77853bf2 1678 if (qc->flags & ATA_QCFLAG_ACTIVE) {
d95a717f
TH
1679 qc->err_mask |= AC_ERR_TIMEOUT;
1680
1681 if (ap->ops->error_handler)
1682 ata_port_freeze(ap);
1683 else
1684 ata_qc_complete(qc);
f15a1daf 1685
0dd4b21f
BP
1686 if (ata_msg_warn(ap))
1687 ata_dev_printk(dev, KERN_WARNING,
88574551 1688 "qc timeout (cmd 0x%x)\n", command);
a2a7a662
TH
1689 }
1690
ba6a1308 1691 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662
TH
1692 }
1693
d95a717f
TH
1694 /* do post_internal_cmd */
1695 if (ap->ops->post_internal_cmd)
1696 ap->ops->post_internal_cmd(qc);
1697
a51d644a
TH
1698 /* perform minimal error analysis */
1699 if (qc->flags & ATA_QCFLAG_FAILED) {
1700 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1701 qc->err_mask |= AC_ERR_DEV;
1702
1703 if (!qc->err_mask)
1704 qc->err_mask |= AC_ERR_OTHER;
1705
1706 if (qc->err_mask & ~AC_ERR_OTHER)
1707 qc->err_mask &= ~AC_ERR_OTHER;
d95a717f
TH
1708 }
1709
15869303 1710 /* finish up */
ba6a1308 1711 spin_lock_irqsave(ap->lock, flags);
15869303 1712
e61e0672 1713 *tf = qc->result_tf;
77853bf2
TH
1714 err_mask = qc->err_mask;
1715
1716 ata_qc_free(qc);
9af5c9c9
TH
1717 link->active_tag = preempted_tag;
1718 link->sactive = preempted_sactive;
dedaf2b0 1719 ap->qc_active = preempted_qc_active;
da917d69 1720 ap->nr_active_links = preempted_nr_active_links;
77853bf2 1721
1f7dd3e9
TH
1722 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1723 * Until those drivers are fixed, we detect the condition
1724 * here, fail the command with AC_ERR_SYSTEM and reenable the
1725 * port.
1726 *
1727 * Note that this doesn't change any behavior as internal
1728 * command failure results in disabling the device in the
1729 * higher layer for LLDDs without new reset/EH callbacks.
1730 *
1731 * Kill the following code as soon as those drivers are fixed.
1732 */
198e0fed 1733 if (ap->flags & ATA_FLAG_DISABLED) {
1f7dd3e9
TH
1734 err_mask |= AC_ERR_SYSTEM;
1735 ata_port_probe(ap);
1736 }
1737
ba6a1308 1738 spin_unlock_irqrestore(ap->lock, flags);
15869303 1739
77853bf2 1740 return err_mask;
a2a7a662
TH
1741}
1742
2432697b 1743/**
33480a0e 1744 * ata_exec_internal - execute libata internal command
2432697b
TH
1745 * @dev: Device to which the command is sent
1746 * @tf: Taskfile registers for the command and the result
1747 * @cdb: CDB for packet command
1748 * @dma_dir: Data tranfer direction of the command
1749 * @buf: Data buffer of the command
1750 * @buflen: Length of data buffer
2b789108 1751 * @timeout: Timeout in msecs (0 for default)
2432697b
TH
1752 *
1753 * Wrapper around ata_exec_internal_sg() which takes simple
1754 * buffer instead of sg list.
1755 *
1756 * LOCKING:
1757 * None. Should be called with kernel context, might sleep.
1758 *
1759 * RETURNS:
1760 * Zero on success, AC_ERR_* mask on failure
1761 */
1762unsigned ata_exec_internal(struct ata_device *dev,
1763 struct ata_taskfile *tf, const u8 *cdb,
2b789108
TH
1764 int dma_dir, void *buf, unsigned int buflen,
1765 unsigned long timeout)
2432697b 1766{
33480a0e
TH
1767 struct scatterlist *psg = NULL, sg;
1768 unsigned int n_elem = 0;
2432697b 1769
33480a0e
TH
1770 if (dma_dir != DMA_NONE) {
1771 WARN_ON(!buf);
1772 sg_init_one(&sg, buf, buflen);
1773 psg = &sg;
1774 n_elem++;
1775 }
2432697b 1776
2b789108
TH
1777 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1778 timeout);
2432697b
TH
1779}
1780
977e6b9f
TH
1781/**
1782 * ata_do_simple_cmd - execute simple internal command
1783 * @dev: Device to which the command is sent
1784 * @cmd: Opcode to execute
1785 *
1786 * Execute a 'simple' command, that only consists of the opcode
1787 * 'cmd' itself, without filling any other registers
1788 *
1789 * LOCKING:
1790 * Kernel thread context (may sleep).
1791 *
1792 * RETURNS:
1793 * Zero on success, AC_ERR_* mask on failure
e58eb583 1794 */
77b08fb5 1795unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
e58eb583
TH
1796{
1797 struct ata_taskfile tf;
e58eb583
TH
1798
1799 ata_tf_init(dev, &tf);
1800
1801 tf.command = cmd;
1802 tf.flags |= ATA_TFLAG_DEVICE;
1803 tf.protocol = ATA_PROT_NODATA;
1804
2b789108 1805 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
e58eb583
TH
1806}
1807
1bc4ccff
AC
1808/**
1809 * ata_pio_need_iordy - check if iordy needed
1810 * @adev: ATA device
1811 *
1812 * Check if the current speed of the device requires IORDY. Used
1813 * by various controllers for chip configuration.
1814 */
a617c09f 1815
1bc4ccff
AC
1816unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1817{
432729f0
AC
1818 /* Controller doesn't support IORDY. Probably a pointless check
1819 as the caller should know this */
9af5c9c9 1820 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1bc4ccff 1821 return 0;
432729f0
AC
1822 /* PIO3 and higher it is mandatory */
1823 if (adev->pio_mode > XFER_PIO_2)
1824 return 1;
1825 /* We turn it on when possible */
1826 if (ata_id_has_iordy(adev->id))
1bc4ccff 1827 return 1;
432729f0
AC
1828 return 0;
1829}
2e9edbf8 1830
432729f0
AC
1831/**
1832 * ata_pio_mask_no_iordy - Return the non IORDY mask
1833 * @adev: ATA device
1834 *
1835 * Compute the highest mode possible if we are not using iordy. Return
1836 * -1 if no iordy mode is available.
1837 */
a617c09f 1838
432729f0
AC
1839static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1840{
1bc4ccff 1841 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1bc4ccff 1842 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
432729f0 1843 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1bc4ccff
AC
1844 /* Is the speed faster than the drive allows non IORDY ? */
1845 if (pio) {
1846 /* This is cycle times not frequency - watch the logic! */
1847 if (pio > 240) /* PIO2 is 240nS per cycle */
432729f0
AC
1848 return 3 << ATA_SHIFT_PIO;
1849 return 7 << ATA_SHIFT_PIO;
1bc4ccff
AC
1850 }
1851 }
432729f0 1852 return 3 << ATA_SHIFT_PIO;
1bc4ccff
AC
1853}
1854
1da177e4 1855/**
49016aca 1856 * ata_dev_read_id - Read ID data from the specified device
49016aca
TH
1857 * @dev: target device
1858 * @p_class: pointer to class of the target device (may be changed)
bff04647 1859 * @flags: ATA_READID_* flags
fe635c7e 1860 * @id: buffer to read IDENTIFY data into
1da177e4 1861 *
49016aca
TH
1862 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1863 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
aec5c3c1
TH
1864 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1865 * for pre-ATA4 drives.
1da177e4 1866 *
50a99018 1867 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right
2dcb407e 1868 * now we abort if we hit that case.
50a99018 1869 *
1da177e4 1870 * LOCKING:
49016aca
TH
1871 * Kernel thread context (may sleep)
1872 *
1873 * RETURNS:
1874 * 0 on success, -errno otherwise.
1da177e4 1875 */
a9beec95 1876int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
bff04647 1877 unsigned int flags, u16 *id)
1da177e4 1878{
9af5c9c9 1879 struct ata_port *ap = dev->link->ap;
49016aca 1880 unsigned int class = *p_class;
a0123703 1881 struct ata_taskfile tf;
49016aca
TH
1882 unsigned int err_mask = 0;
1883 const char *reason;
54936f8b 1884 int may_fallback = 1, tried_spinup = 0;
49016aca 1885 int rc;
1da177e4 1886
0dd4b21f 1887 if (ata_msg_ctl(ap))
44877b4e 1888 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1da177e4 1889
49016aca 1890 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
49016aca 1891 retry:
3373efd8 1892 ata_tf_init(dev, &tf);
a0123703 1893
49016aca
TH
1894 switch (class) {
1895 case ATA_DEV_ATA:
a0123703 1896 tf.command = ATA_CMD_ID_ATA;
49016aca
TH
1897 break;
1898 case ATA_DEV_ATAPI:
a0123703 1899 tf.command = ATA_CMD_ID_ATAPI;
49016aca
TH
1900 break;
1901 default:
1902 rc = -ENODEV;
1903 reason = "unsupported class";
1904 goto err_out;
1da177e4
LT
1905 }
1906
a0123703 1907 tf.protocol = ATA_PROT_PIO;
81afe893
TH
1908
1909 /* Some devices choke if TF registers contain garbage. Make
1910 * sure those are properly initialized.
1911 */
1912 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1913
1914 /* Device presence detection is unreliable on some
1915 * controllers. Always poll IDENTIFY if available.
1916 */
1917 tf.flags |= ATA_TFLAG_POLLING;
1da177e4 1918
3373efd8 1919 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
2b789108 1920 id, sizeof(id[0]) * ATA_ID_WORDS, 0);
a0123703 1921 if (err_mask) {
800b3996 1922 if (err_mask & AC_ERR_NODEV_HINT) {
55a8e2c8 1923 DPRINTK("ata%u.%d: NODEV after polling detection\n",
44877b4e 1924 ap->print_id, dev->devno);
55a8e2c8
TH
1925 return -ENOENT;
1926 }
1927
54936f8b
TH
1928 /* Device or controller might have reported the wrong
1929 * device class. Give a shot at the other IDENTIFY if
1930 * the current one is aborted by the device.
1931 */
1932 if (may_fallback &&
1933 (err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1934 may_fallback = 0;
1935
1936 if (class == ATA_DEV_ATA)
1937 class = ATA_DEV_ATAPI;
1938 else
1939 class = ATA_DEV_ATA;
1940 goto retry;
1941 }
1942
49016aca
TH
1943 rc = -EIO;
1944 reason = "I/O error";
1da177e4
LT
1945 goto err_out;
1946 }
1947
54936f8b
TH
1948 /* Falling back doesn't make sense if ID data was read
1949 * successfully at least once.
1950 */
1951 may_fallback = 0;
1952
49016aca 1953 swap_buf_le16(id, ATA_ID_WORDS);
1da177e4 1954
49016aca 1955 /* sanity check */
a4f5749b 1956 rc = -EINVAL;
6070068b 1957 reason = "device reports invalid type";
a4f5749b
TH
1958
1959 if (class == ATA_DEV_ATA) {
1960 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1961 goto err_out;
1962 } else {
1963 if (ata_id_is_ata(id))
1964 goto err_out;
49016aca
TH
1965 }
1966
169439c2
ML
1967 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1968 tried_spinup = 1;
1969 /*
1970 * Drive powered-up in standby mode, and requires a specific
1971 * SET_FEATURES spin-up subcommand before it will accept
1972 * anything other than the original IDENTIFY command.
1973 */
218f3d30 1974 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
fb0582f9 1975 if (err_mask && id[2] != 0x738c) {
169439c2
ML
1976 rc = -EIO;
1977 reason = "SPINUP failed";
1978 goto err_out;
1979 }
1980 /*
1981 * If the drive initially returned incomplete IDENTIFY info,
1982 * we now must reissue the IDENTIFY command.
1983 */
1984 if (id[2] == 0x37c8)
1985 goto retry;
1986 }
1987
bff04647 1988 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
49016aca
TH
1989 /*
1990 * The exact sequence expected by certain pre-ATA4 drives is:
1991 * SRST RESET
50a99018
AC
1992 * IDENTIFY (optional in early ATA)
1993 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
49016aca
TH
1994 * anything else..
1995 * Some drives were very specific about that exact sequence.
50a99018
AC
1996 *
1997 * Note that ATA4 says lba is mandatory so the second check
1998 * shoud never trigger.
49016aca
TH
1999 */
2000 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
3373efd8 2001 err_mask = ata_dev_init_params(dev, id[3], id[6]);
49016aca
TH
2002 if (err_mask) {
2003 rc = -EIO;
2004 reason = "INIT_DEV_PARAMS failed";
2005 goto err_out;
2006 }
2007
2008 /* current CHS translation info (id[53-58]) might be
2009 * changed. reread the identify device info.
2010 */
bff04647 2011 flags &= ~ATA_READID_POSTRESET;
49016aca
TH
2012 goto retry;
2013 }
2014 }
2015
2016 *p_class = class;
fe635c7e 2017
49016aca
TH
2018 return 0;
2019
2020 err_out:
88574551 2021 if (ata_msg_warn(ap))
0dd4b21f 2022 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
88574551 2023 "(%s, err_mask=0x%x)\n", reason, err_mask);
49016aca
TH
2024 return rc;
2025}
2026
3373efd8 2027static inline u8 ata_dev_knobble(struct ata_device *dev)
4b2f3ede 2028{
9af5c9c9
TH
2029 struct ata_port *ap = dev->link->ap;
2030 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
4b2f3ede
TH
2031}
2032
a6e6ce8e
TH
2033static void ata_dev_config_ncq(struct ata_device *dev,
2034 char *desc, size_t desc_sz)
2035{
9af5c9c9 2036 struct ata_port *ap = dev->link->ap;
a6e6ce8e
TH
2037 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2038
2039 if (!ata_id_has_ncq(dev->id)) {
2040 desc[0] = '\0';
2041 return;
2042 }
75683fe7 2043 if (dev->horkage & ATA_HORKAGE_NONCQ) {
6919a0a6
AC
2044 snprintf(desc, desc_sz, "NCQ (not used)");
2045 return;
2046 }
a6e6ce8e 2047 if (ap->flags & ATA_FLAG_NCQ) {
cca3974e 2048 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
a6e6ce8e
TH
2049 dev->flags |= ATA_DFLAG_NCQ;
2050 }
2051
2052 if (hdepth >= ddepth)
2053 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
2054 else
2055 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
2056}
2057
49016aca 2058/**
ffeae418 2059 * ata_dev_configure - Configure the specified ATA/ATAPI device
ffeae418
TH
2060 * @dev: Target device to configure
2061 *
2062 * Configure @dev according to @dev->id. Generic and low-level
2063 * driver specific fixups are also applied.
49016aca
TH
2064 *
2065 * LOCKING:
ffeae418
TH
2066 * Kernel thread context (may sleep)
2067 *
2068 * RETURNS:
2069 * 0 on success, -errno otherwise
49016aca 2070 */
efdaedc4 2071int ata_dev_configure(struct ata_device *dev)
49016aca 2072{
9af5c9c9
TH
2073 struct ata_port *ap = dev->link->ap;
2074 struct ata_eh_context *ehc = &dev->link->eh_context;
6746544c 2075 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1148c3a7 2076 const u16 *id = dev->id;
ff8854b2 2077 unsigned int xfer_mask;
b352e57d 2078 char revbuf[7]; /* XYZ-99\0 */
3f64f565
EM
2079 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2080 char modelbuf[ATA_ID_PROD_LEN+1];
e6d902a3 2081 int rc;
49016aca 2082
0dd4b21f 2083 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
44877b4e
TH
2084 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
2085 __FUNCTION__);
ffeae418 2086 return 0;
49016aca
TH
2087 }
2088
0dd4b21f 2089 if (ata_msg_probe(ap))
44877b4e 2090 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1da177e4 2091
75683fe7
TH
2092 /* set horkage */
2093 dev->horkage |= ata_dev_blacklisted(dev);
2094
6746544c
TH
2095 /* let ACPI work its magic */
2096 rc = ata_acpi_on_devcfg(dev);
2097 if (rc)
2098 return rc;
08573a86 2099
05027adc
TH
2100 /* massage HPA, do it early as it might change IDENTIFY data */
2101 rc = ata_hpa_resize(dev);
2102 if (rc)
2103 return rc;
2104
c39f5ebe 2105 /* print device capabilities */
0dd4b21f 2106 if (ata_msg_probe(ap))
88574551
TH
2107 ata_dev_printk(dev, KERN_DEBUG,
2108 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2109 "85:%04x 86:%04x 87:%04x 88:%04x\n",
0dd4b21f 2110 __FUNCTION__,
f15a1daf
TH
2111 id[49], id[82], id[83], id[84],
2112 id[85], id[86], id[87], id[88]);
c39f5ebe 2113
208a9933 2114 /* initialize to-be-configured parameters */
ea1dd4e1 2115 dev->flags &= ~ATA_DFLAG_CFG_MASK;
208a9933
TH
2116 dev->max_sectors = 0;
2117 dev->cdb_len = 0;
2118 dev->n_sectors = 0;
2119 dev->cylinders = 0;
2120 dev->heads = 0;
2121 dev->sectors = 0;
2122
1da177e4
LT
2123 /*
2124 * common ATA, ATAPI feature tests
2125 */
2126
ff8854b2 2127 /* find max transfer mode; for printk only */
1148c3a7 2128 xfer_mask = ata_id_xfermask(id);
1da177e4 2129
0dd4b21f
BP
2130 if (ata_msg_probe(ap))
2131 ata_dump_id(id);
1da177e4 2132
ef143d57
AL
2133 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2134 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2135 sizeof(fwrevbuf));
2136
2137 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2138 sizeof(modelbuf));
2139
1da177e4
LT
2140 /* ATA-specific feature tests */
2141 if (dev->class == ATA_DEV_ATA) {
b352e57d
AC
2142 if (ata_id_is_cfa(id)) {
2143 if (id[162] & 1) /* CPRM may make this media unusable */
44877b4e
TH
2144 ata_dev_printk(dev, KERN_WARNING,
2145 "supports DRM functions and may "
2146 "not be fully accessable.\n");
b352e57d 2147 snprintf(revbuf, 7, "CFA");
2dcb407e
JG
2148 } else
2149 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
b352e57d 2150
1148c3a7 2151 dev->n_sectors = ata_id_n_sectors(id);
2940740b 2152
3f64f565
EM
2153 if (dev->id[59] & 0x100)
2154 dev->multi_count = dev->id[59] & 0xff;
2155
1148c3a7 2156 if (ata_id_has_lba(id)) {
4c2d721a 2157 const char *lba_desc;
a6e6ce8e 2158 char ncq_desc[20];
8bf62ece 2159
4c2d721a
TH
2160 lba_desc = "LBA";
2161 dev->flags |= ATA_DFLAG_LBA;
1148c3a7 2162 if (ata_id_has_lba48(id)) {
8bf62ece 2163 dev->flags |= ATA_DFLAG_LBA48;
4c2d721a 2164 lba_desc = "LBA48";
6fc49adb
TH
2165
2166 if (dev->n_sectors >= (1UL << 28) &&
2167 ata_id_has_flush_ext(id))
2168 dev->flags |= ATA_DFLAG_FLUSH_EXT;
4c2d721a 2169 }
8bf62ece 2170
a6e6ce8e
TH
2171 /* config NCQ */
2172 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2173
8bf62ece 2174 /* print device info to dmesg */
3f64f565
EM
2175 if (ata_msg_drv(ap) && print_info) {
2176 ata_dev_printk(dev, KERN_INFO,
2177 "%s: %s, %s, max %s\n",
2178 revbuf, modelbuf, fwrevbuf,
2179 ata_mode_string(xfer_mask));
2180 ata_dev_printk(dev, KERN_INFO,
2181 "%Lu sectors, multi %u: %s %s\n",
f15a1daf 2182 (unsigned long long)dev->n_sectors,
3f64f565
EM
2183 dev->multi_count, lba_desc, ncq_desc);
2184 }
ffeae418 2185 } else {
8bf62ece
AL
2186 /* CHS */
2187
2188 /* Default translation */
1148c3a7
TH
2189 dev->cylinders = id[1];
2190 dev->heads = id[3];
2191 dev->sectors = id[6];
8bf62ece 2192
1148c3a7 2193 if (ata_id_current_chs_valid(id)) {
8bf62ece 2194 /* Current CHS translation is valid. */
1148c3a7
TH
2195 dev->cylinders = id[54];
2196 dev->heads = id[55];
2197 dev->sectors = id[56];
8bf62ece
AL
2198 }
2199
2200 /* print device info to dmesg */
3f64f565 2201 if (ata_msg_drv(ap) && print_info) {
88574551 2202 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
2203 "%s: %s, %s, max %s\n",
2204 revbuf, modelbuf, fwrevbuf,
2205 ata_mode_string(xfer_mask));
a84471fe 2206 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
2207 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
2208 (unsigned long long)dev->n_sectors,
2209 dev->multi_count, dev->cylinders,
2210 dev->heads, dev->sectors);
2211 }
07f6f7d0
AL
2212 }
2213
6e7846e9 2214 dev->cdb_len = 16;
1da177e4
LT
2215 }
2216
2217 /* ATAPI-specific feature tests */
2c13b7ce 2218 else if (dev->class == ATA_DEV_ATAPI) {
854c73a2
TH
2219 const char *cdb_intr_string = "";
2220 const char *atapi_an_string = "";
7d77b247 2221 u32 sntf;
08a556db 2222
1148c3a7 2223 rc = atapi_cdb_len(id);
1da177e4 2224 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
0dd4b21f 2225 if (ata_msg_warn(ap))
88574551
TH
2226 ata_dev_printk(dev, KERN_WARNING,
2227 "unsupported CDB len\n");
ffeae418 2228 rc = -EINVAL;
1da177e4
LT
2229 goto err_out_nosup;
2230 }
6e7846e9 2231 dev->cdb_len = (unsigned int) rc;
1da177e4 2232
7d77b247
TH
2233 /* Enable ATAPI AN if both the host and device have
2234 * the support. If PMP is attached, SNTF is required
2235 * to enable ATAPI AN to discern between PHY status
2236 * changed notifications and ATAPI ANs.
9f45cbd3 2237 */
7d77b247
TH
2238 if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2239 (!ap->nr_pmp_links ||
2240 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
854c73a2
TH
2241 unsigned int err_mask;
2242
9f45cbd3 2243 /* issue SET feature command to turn this on */
218f3d30
JG
2244 err_mask = ata_dev_set_feature(dev,
2245 SETFEATURES_SATA_ENABLE, SATA_AN);
854c73a2 2246 if (err_mask)
9f45cbd3 2247 ata_dev_printk(dev, KERN_ERR,
854c73a2
TH
2248 "failed to enable ATAPI AN "
2249 "(err_mask=0x%x)\n", err_mask);
2250 else {
9f45cbd3 2251 dev->flags |= ATA_DFLAG_AN;
854c73a2
TH
2252 atapi_an_string = ", ATAPI AN";
2253 }
9f45cbd3
KCA
2254 }
2255
08a556db 2256 if (ata_id_cdb_intr(dev->id)) {
312f7da2 2257 dev->flags |= ATA_DFLAG_CDB_INTR;
08a556db
AL
2258 cdb_intr_string = ", CDB intr";
2259 }
312f7da2 2260
1da177e4 2261 /* print device info to dmesg */
5afc8142 2262 if (ata_msg_drv(ap) && print_info)
ef143d57 2263 ata_dev_printk(dev, KERN_INFO,
854c73a2 2264 "ATAPI: %s, %s, max %s%s%s\n",
ef143d57 2265 modelbuf, fwrevbuf,
12436c30 2266 ata_mode_string(xfer_mask),
854c73a2 2267 cdb_intr_string, atapi_an_string);
1da177e4
LT
2268 }
2269
914ed354
TH
2270 /* determine max_sectors */
2271 dev->max_sectors = ATA_MAX_SECTORS;
2272 if (dev->flags & ATA_DFLAG_LBA48)
2273 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2274
ca77329f
KCA
2275 if (!(dev->horkage & ATA_HORKAGE_IPM)) {
2276 if (ata_id_has_hipm(dev->id))
2277 dev->flags |= ATA_DFLAG_HIPM;
2278 if (ata_id_has_dipm(dev->id))
2279 dev->flags |= ATA_DFLAG_DIPM;
2280 }
2281
93590859
AC
2282 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2283 /* Let the user know. We don't want to disallow opens for
2284 rescue purposes, or in case the vendor is just a blithering
2285 idiot */
2dcb407e 2286 if (print_info) {
93590859
AC
2287 ata_dev_printk(dev, KERN_WARNING,
2288"Drive reports diagnostics failure. This may indicate a drive\n");
2289 ata_dev_printk(dev, KERN_WARNING,
2290"fault or invalid emulation. Contact drive vendor for information.\n");
2291 }
2292 }
2293
4b2f3ede 2294 /* limit bridge transfers to udma5, 200 sectors */
3373efd8 2295 if (ata_dev_knobble(dev)) {
5afc8142 2296 if (ata_msg_drv(ap) && print_info)
f15a1daf
TH
2297 ata_dev_printk(dev, KERN_INFO,
2298 "applying bridge limits\n");
5a529139 2299 dev->udma_mask &= ATA_UDMA5;
4b2f3ede
TH
2300 dev->max_sectors = ATA_MAX_SECTORS;
2301 }
2302
75683fe7 2303 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
03ec52de
TH
2304 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2305 dev->max_sectors);
18d6e9d5 2306
ca77329f
KCA
2307 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_IPM) {
2308 dev->horkage |= ATA_HORKAGE_IPM;
2309
2310 /* reset link pm_policy for this port to no pm */
2311 ap->pm_policy = MAX_PERFORMANCE;
2312 }
2313
4b2f3ede 2314 if (ap->ops->dev_config)
cd0d3bbc 2315 ap->ops->dev_config(dev);
4b2f3ede 2316
0dd4b21f
BP
2317 if (ata_msg_probe(ap))
2318 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
2319 __FUNCTION__, ata_chk_status(ap));
ffeae418 2320 return 0;
1da177e4
LT
2321
2322err_out_nosup:
0dd4b21f 2323 if (ata_msg_probe(ap))
88574551
TH
2324 ata_dev_printk(dev, KERN_DEBUG,
2325 "%s: EXIT, err\n", __FUNCTION__);
ffeae418 2326 return rc;
1da177e4
LT
2327}
2328
be0d18df 2329/**
2e41e8e6 2330 * ata_cable_40wire - return 40 wire cable type
be0d18df
AC
2331 * @ap: port
2332 *
2e41e8e6 2333 * Helper method for drivers which want to hardwire 40 wire cable
be0d18df
AC
2334 * detection.
2335 */
2336
2337int ata_cable_40wire(struct ata_port *ap)
2338{
2339 return ATA_CBL_PATA40;
2340}
2341
2342/**
2e41e8e6 2343 * ata_cable_80wire - return 80 wire cable type
be0d18df
AC
2344 * @ap: port
2345 *
2e41e8e6 2346 * Helper method for drivers which want to hardwire 80 wire cable
be0d18df
AC
2347 * detection.
2348 */
2349
2350int ata_cable_80wire(struct ata_port *ap)
2351{
2352 return ATA_CBL_PATA80;
2353}
2354
2355/**
2356 * ata_cable_unknown - return unknown PATA cable.
2357 * @ap: port
2358 *
2359 * Helper method for drivers which have no PATA cable detection.
2360 */
2361
2362int ata_cable_unknown(struct ata_port *ap)
2363{
2364 return ATA_CBL_PATA_UNK;
2365}
2366
2367/**
2368 * ata_cable_sata - return SATA cable type
2369 * @ap: port
2370 *
2371 * Helper method for drivers which have SATA cables
2372 */
2373
2374int ata_cable_sata(struct ata_port *ap)
2375{
2376 return ATA_CBL_SATA;
2377}
2378
1da177e4
LT
2379/**
2380 * ata_bus_probe - Reset and probe ATA bus
2381 * @ap: Bus to probe
2382 *
0cba632b
JG
2383 * Master ATA bus probing function. Initiates a hardware-dependent
2384 * bus reset, then attempts to identify any devices found on
2385 * the bus.
2386 *
1da177e4 2387 * LOCKING:
0cba632b 2388 * PCI/etc. bus probe sem.
1da177e4
LT
2389 *
2390 * RETURNS:
96072e69 2391 * Zero on success, negative errno otherwise.
1da177e4
LT
2392 */
2393
80289167 2394int ata_bus_probe(struct ata_port *ap)
1da177e4 2395{
28ca5c57 2396 unsigned int classes[ATA_MAX_DEVICES];
14d2bac1 2397 int tries[ATA_MAX_DEVICES];
f58229f8 2398 int rc;
e82cbdb9 2399 struct ata_device *dev;
1da177e4 2400
28ca5c57 2401 ata_port_probe(ap);
c19ba8af 2402
f58229f8
TH
2403 ata_link_for_each_dev(dev, &ap->link)
2404 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
14d2bac1
TH
2405
2406 retry:
cdeab114
TH
2407 ata_link_for_each_dev(dev, &ap->link) {
2408 /* If we issue an SRST then an ATA drive (not ATAPI)
2409 * may change configuration and be in PIO0 timing. If
2410 * we do a hard reset (or are coming from power on)
2411 * this is true for ATA or ATAPI. Until we've set a
2412 * suitable controller mode we should not touch the
2413 * bus as we may be talking too fast.
2414 */
2415 dev->pio_mode = XFER_PIO_0;
2416
2417 /* If the controller has a pio mode setup function
2418 * then use it to set the chipset to rights. Don't
2419 * touch the DMA setup as that will be dealt with when
2420 * configuring devices.
2421 */
2422 if (ap->ops->set_piomode)
2423 ap->ops->set_piomode(ap, dev);
2424 }
2425
2044470c 2426 /* reset and determine device classes */
52783c5d 2427 ap->ops->phy_reset(ap);
2061a47a 2428
f58229f8 2429 ata_link_for_each_dev(dev, &ap->link) {
52783c5d
TH
2430 if (!(ap->flags & ATA_FLAG_DISABLED) &&
2431 dev->class != ATA_DEV_UNKNOWN)
2432 classes[dev->devno] = dev->class;
2433 else
2434 classes[dev->devno] = ATA_DEV_NONE;
2044470c 2435
52783c5d 2436 dev->class = ATA_DEV_UNKNOWN;
28ca5c57 2437 }
1da177e4 2438
52783c5d 2439 ata_port_probe(ap);
2044470c 2440
f31f0cc2
JG
2441 /* read IDENTIFY page and configure devices. We have to do the identify
2442 specific sequence bass-ackwards so that PDIAG- is released by
2443 the slave device */
2444
f58229f8
TH
2445 ata_link_for_each_dev(dev, &ap->link) {
2446 if (tries[dev->devno])
2447 dev->class = classes[dev->devno];
ffeae418 2448
14d2bac1 2449 if (!ata_dev_enabled(dev))
ffeae418 2450 continue;
ffeae418 2451
bff04647
TH
2452 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2453 dev->id);
14d2bac1
TH
2454 if (rc)
2455 goto fail;
f31f0cc2
JG
2456 }
2457
be0d18df
AC
2458 /* Now ask for the cable type as PDIAG- should have been released */
2459 if (ap->ops->cable_detect)
2460 ap->cbl = ap->ops->cable_detect(ap);
2461
614fe29b
AC
2462 /* We may have SATA bridge glue hiding here irrespective of the
2463 reported cable types and sensed types */
2464 ata_link_for_each_dev(dev, &ap->link) {
2465 if (!ata_dev_enabled(dev))
2466 continue;
2467 /* SATA drives indicate we have a bridge. We don't know which
2468 end of the link the bridge is which is a problem */
2469 if (ata_id_is_sata(dev->id))
2470 ap->cbl = ATA_CBL_SATA;
2471 }
2472
f31f0cc2
JG
2473 /* After the identify sequence we can now set up the devices. We do
2474 this in the normal order so that the user doesn't get confused */
2475
f58229f8 2476 ata_link_for_each_dev(dev, &ap->link) {
f31f0cc2
JG
2477 if (!ata_dev_enabled(dev))
2478 continue;
14d2bac1 2479
9af5c9c9 2480 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
efdaedc4 2481 rc = ata_dev_configure(dev);
9af5c9c9 2482 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
14d2bac1
TH
2483 if (rc)
2484 goto fail;
1da177e4
LT
2485 }
2486
e82cbdb9 2487 /* configure transfer mode */
0260731f 2488 rc = ata_set_mode(&ap->link, &dev);
4ae72a1e 2489 if (rc)
51713d35 2490 goto fail;
1da177e4 2491
f58229f8
TH
2492 ata_link_for_each_dev(dev, &ap->link)
2493 if (ata_dev_enabled(dev))
e82cbdb9 2494 return 0;
1da177e4 2495
e82cbdb9
TH
2496 /* no device present, disable port */
2497 ata_port_disable(ap);
96072e69 2498 return -ENODEV;
14d2bac1
TH
2499
2500 fail:
4ae72a1e
TH
2501 tries[dev->devno]--;
2502
14d2bac1
TH
2503 switch (rc) {
2504 case -EINVAL:
4ae72a1e 2505 /* eeek, something went very wrong, give up */
14d2bac1
TH
2506 tries[dev->devno] = 0;
2507 break;
4ae72a1e
TH
2508
2509 case -ENODEV:
2510 /* give it just one more chance */
2511 tries[dev->devno] = min(tries[dev->devno], 1);
14d2bac1 2512 case -EIO:
4ae72a1e
TH
2513 if (tries[dev->devno] == 1) {
2514 /* This is the last chance, better to slow
2515 * down than lose it.
2516 */
936fd732 2517 sata_down_spd_limit(&ap->link);
4ae72a1e
TH
2518 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2519 }
14d2bac1
TH
2520 }
2521
4ae72a1e 2522 if (!tries[dev->devno])
3373efd8 2523 ata_dev_disable(dev);
ec573755 2524
14d2bac1 2525 goto retry;
1da177e4
LT
2526}
2527
2528/**
0cba632b
JG
2529 * ata_port_probe - Mark port as enabled
2530 * @ap: Port for which we indicate enablement
1da177e4 2531 *
0cba632b
JG
2532 * Modify @ap data structure such that the system
2533 * thinks that the entire port is enabled.
2534 *
cca3974e 2535 * LOCKING: host lock, or some other form of
0cba632b 2536 * serialization.
1da177e4
LT
2537 */
2538
2539void ata_port_probe(struct ata_port *ap)
2540{
198e0fed 2541 ap->flags &= ~ATA_FLAG_DISABLED;
1da177e4
LT
2542}
2543
3be680b7
TH
2544/**
2545 * sata_print_link_status - Print SATA link status
936fd732 2546 * @link: SATA link to printk link status about
3be680b7
TH
2547 *
2548 * This function prints link speed and status of a SATA link.
2549 *
2550 * LOCKING:
2551 * None.
2552 */
936fd732 2553void sata_print_link_status(struct ata_link *link)
3be680b7 2554{
6d5f9732 2555 u32 sstatus, scontrol, tmp;
3be680b7 2556
936fd732 2557 if (sata_scr_read(link, SCR_STATUS, &sstatus))
3be680b7 2558 return;
936fd732 2559 sata_scr_read(link, SCR_CONTROL, &scontrol);
3be680b7 2560
936fd732 2561 if (ata_link_online(link)) {
3be680b7 2562 tmp = (sstatus >> 4) & 0xf;
936fd732 2563 ata_link_printk(link, KERN_INFO,
f15a1daf
TH
2564 "SATA link up %s (SStatus %X SControl %X)\n",
2565 sata_spd_string(tmp), sstatus, scontrol);
3be680b7 2566 } else {
936fd732 2567 ata_link_printk(link, KERN_INFO,
f15a1daf
TH
2568 "SATA link down (SStatus %X SControl %X)\n",
2569 sstatus, scontrol);
3be680b7
TH
2570 }
2571}
2572
1da177e4 2573/**
780a87f7
JG
2574 * __sata_phy_reset - Wake/reset a low-level SATA PHY
2575 * @ap: SATA port associated with target SATA PHY.
1da177e4 2576 *
780a87f7
JG
2577 * This function issues commands to standard SATA Sxxx
2578 * PHY registers, to wake up the phy (and device), and
2579 * clear any reset condition.
1da177e4
LT
2580 *
2581 * LOCKING:
0cba632b 2582 * PCI/etc. bus probe sem.
1da177e4
LT
2583 *
2584 */
2585void __sata_phy_reset(struct ata_port *ap)
2586{
936fd732 2587 struct ata_link *link = &ap->link;
1da177e4 2588 unsigned long timeout = jiffies + (HZ * 5);
936fd732 2589 u32 sstatus;
1da177e4
LT
2590
2591 if (ap->flags & ATA_FLAG_SATA_RESET) {
cdcca89e 2592 /* issue phy wake/reset */
936fd732 2593 sata_scr_write_flush(link, SCR_CONTROL, 0x301);
62ba2841
TH
2594 /* Couldn't find anything in SATA I/II specs, but
2595 * AHCI-1.1 10.4.2 says at least 1 ms. */
2596 mdelay(1);
1da177e4 2597 }
81952c54 2598 /* phy wake/clear reset */
936fd732 2599 sata_scr_write_flush(link, SCR_CONTROL, 0x300);
1da177e4
LT
2600
2601 /* wait for phy to become ready, if necessary */
2602 do {
2603 msleep(200);
936fd732 2604 sata_scr_read(link, SCR_STATUS, &sstatus);
1da177e4
LT
2605 if ((sstatus & 0xf) != 1)
2606 break;
2607 } while (time_before(jiffies, timeout));
2608
3be680b7 2609 /* print link status */
936fd732 2610 sata_print_link_status(link);
656563e3 2611
3be680b7 2612 /* TODO: phy layer with polling, timeouts, etc. */
936fd732 2613 if (!ata_link_offline(link))
1da177e4 2614 ata_port_probe(ap);
3be680b7 2615 else
1da177e4 2616 ata_port_disable(ap);
1da177e4 2617
198e0fed 2618 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
2619 return;
2620
2621 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2622 ata_port_disable(ap);
2623 return;
2624 }
2625
2626 ap->cbl = ATA_CBL_SATA;
2627}
2628
2629/**
780a87f7
JG
2630 * sata_phy_reset - Reset SATA bus.
2631 * @ap: SATA port associated with target SATA PHY.
1da177e4 2632 *
780a87f7
JG
2633 * This function resets the SATA bus, and then probes
2634 * the bus for devices.
1da177e4
LT
2635 *
2636 * LOCKING:
0cba632b 2637 * PCI/etc. bus probe sem.
1da177e4
LT
2638 *
2639 */
2640void sata_phy_reset(struct ata_port *ap)
2641{
2642 __sata_phy_reset(ap);
198e0fed 2643 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
2644 return;
2645 ata_bus_reset(ap);
2646}
2647
ebdfca6e
AC
2648/**
2649 * ata_dev_pair - return other device on cable
ebdfca6e
AC
2650 * @adev: device
2651 *
2652 * Obtain the other device on the same cable, or if none is
2653 * present NULL is returned
2654 */
2e9edbf8 2655
3373efd8 2656struct ata_device *ata_dev_pair(struct ata_device *adev)
ebdfca6e 2657{
9af5c9c9
TH
2658 struct ata_link *link = adev->link;
2659 struct ata_device *pair = &link->device[1 - adev->devno];
e1211e3f 2660 if (!ata_dev_enabled(pair))
ebdfca6e
AC
2661 return NULL;
2662 return pair;
2663}
2664
1da177e4 2665/**
780a87f7
JG
2666 * ata_port_disable - Disable port.
2667 * @ap: Port to be disabled.
1da177e4 2668 *
780a87f7
JG
2669 * Modify @ap data structure such that the system
2670 * thinks that the entire port is disabled, and should
2671 * never attempt to probe or communicate with devices
2672 * on this port.
2673 *
cca3974e 2674 * LOCKING: host lock, or some other form of
780a87f7 2675 * serialization.
1da177e4
LT
2676 */
2677
2678void ata_port_disable(struct ata_port *ap)
2679{
9af5c9c9
TH
2680 ap->link.device[0].class = ATA_DEV_NONE;
2681 ap->link.device[1].class = ATA_DEV_NONE;
198e0fed 2682 ap->flags |= ATA_FLAG_DISABLED;
1da177e4
LT
2683}
2684
1c3fae4d 2685/**
3c567b7d 2686 * sata_down_spd_limit - adjust SATA spd limit downward
936fd732 2687 * @link: Link to adjust SATA spd limit for
1c3fae4d 2688 *
936fd732 2689 * Adjust SATA spd limit of @link downward. Note that this
1c3fae4d 2690 * function only adjusts the limit. The change must be applied
3c567b7d 2691 * using sata_set_spd().
1c3fae4d
TH
2692 *
2693 * LOCKING:
2694 * Inherited from caller.
2695 *
2696 * RETURNS:
2697 * 0 on success, negative errno on failure
2698 */
936fd732 2699int sata_down_spd_limit(struct ata_link *link)
1c3fae4d 2700{
81952c54
TH
2701 u32 sstatus, spd, mask;
2702 int rc, highbit;
1c3fae4d 2703
936fd732 2704 if (!sata_scr_valid(link))
008a7896
TH
2705 return -EOPNOTSUPP;
2706
2707 /* If SCR can be read, use it to determine the current SPD.
936fd732 2708 * If not, use cached value in link->sata_spd.
008a7896 2709 */
936fd732 2710 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
008a7896
TH
2711 if (rc == 0)
2712 spd = (sstatus >> 4) & 0xf;
2713 else
936fd732 2714 spd = link->sata_spd;
1c3fae4d 2715
936fd732 2716 mask = link->sata_spd_limit;
1c3fae4d
TH
2717 if (mask <= 1)
2718 return -EINVAL;
008a7896
TH
2719
2720 /* unconditionally mask off the highest bit */
1c3fae4d
TH
2721 highbit = fls(mask) - 1;
2722 mask &= ~(1 << highbit);
2723
008a7896
TH
2724 /* Mask off all speeds higher than or equal to the current
2725 * one. Force 1.5Gbps if current SPD is not available.
2726 */
2727 if (spd > 1)
2728 mask &= (1 << (spd - 1)) - 1;
2729 else
2730 mask &= 1;
2731
2732 /* were we already at the bottom? */
1c3fae4d
TH
2733 if (!mask)
2734 return -EINVAL;
2735
936fd732 2736 link->sata_spd_limit = mask;
1c3fae4d 2737
936fd732 2738 ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n",
f15a1daf 2739 sata_spd_string(fls(mask)));
1c3fae4d
TH
2740
2741 return 0;
2742}
2743
936fd732 2744static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
1c3fae4d
TH
2745{
2746 u32 spd, limit;
2747
936fd732 2748 if (link->sata_spd_limit == UINT_MAX)
1c3fae4d
TH
2749 limit = 0;
2750 else
936fd732 2751 limit = fls(link->sata_spd_limit);
1c3fae4d
TH
2752
2753 spd = (*scontrol >> 4) & 0xf;
2754 *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
2755
2756 return spd != limit;
2757}
2758
2759/**
3c567b7d 2760 * sata_set_spd_needed - is SATA spd configuration needed
936fd732 2761 * @link: Link in question
1c3fae4d
TH
2762 *
2763 * Test whether the spd limit in SControl matches
936fd732 2764 * @link->sata_spd_limit. This function is used to determine
1c3fae4d
TH
2765 * whether hardreset is necessary to apply SATA spd
2766 * configuration.
2767 *
2768 * LOCKING:
2769 * Inherited from caller.
2770 *
2771 * RETURNS:
2772 * 1 if SATA spd configuration is needed, 0 otherwise.
2773 */
936fd732 2774int sata_set_spd_needed(struct ata_link *link)
1c3fae4d
TH
2775{
2776 u32 scontrol;
2777
936fd732 2778 if (sata_scr_read(link, SCR_CONTROL, &scontrol))
1c3fae4d
TH
2779 return 0;
2780
936fd732 2781 return __sata_set_spd_needed(link, &scontrol);
1c3fae4d
TH
2782}
2783
2784/**
3c567b7d 2785 * sata_set_spd - set SATA spd according to spd limit
936fd732 2786 * @link: Link to set SATA spd for
1c3fae4d 2787 *
936fd732 2788 * Set SATA spd of @link according to sata_spd_limit.
1c3fae4d
TH
2789 *
2790 * LOCKING:
2791 * Inherited from caller.
2792 *
2793 * RETURNS:
2794 * 0 if spd doesn't need to be changed, 1 if spd has been
81952c54 2795 * changed. Negative errno if SCR registers are inaccessible.
1c3fae4d 2796 */
936fd732 2797int sata_set_spd(struct ata_link *link)
1c3fae4d
TH
2798{
2799 u32 scontrol;
81952c54 2800 int rc;
1c3fae4d 2801
936fd732 2802 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
81952c54 2803 return rc;
1c3fae4d 2804
936fd732 2805 if (!__sata_set_spd_needed(link, &scontrol))
1c3fae4d
TH
2806 return 0;
2807
936fd732 2808 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
81952c54
TH
2809 return rc;
2810
1c3fae4d
TH
2811 return 1;
2812}
2813
452503f9
AC
2814/*
2815 * This mode timing computation functionality is ported over from
2816 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2817 */
2818/*
b352e57d 2819 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
452503f9 2820 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
b352e57d
AC
2821 * for UDMA6, which is currently supported only by Maxtor drives.
2822 *
2823 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
452503f9
AC
2824 */
2825
2826static const struct ata_timing ata_timing[] = {
2827
2828 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
2829 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
2830 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
2831 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
2832
b352e57d
AC
2833 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
2834 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
452503f9
AC
2835 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
2836 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
2837 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
2838
2839/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
2e9edbf8 2840
452503f9
AC
2841 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
2842 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
2843 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
2e9edbf8 2844
452503f9
AC
2845 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
2846 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
2847 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
2848
b352e57d
AC
2849 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
2850 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
452503f9
AC
2851 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
2852 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
2853
2854 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
2855 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
2856 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
2857
2858/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
2859
2860 { 0xFF }
2861};
2862
2dcb407e
JG
2863#define ENOUGH(v, unit) (((v)-1)/(unit)+1)
2864#define EZ(v, unit) ((v)?ENOUGH(v, unit):0)
452503f9
AC
2865
2866static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2867{
2868 q->setup = EZ(t->setup * 1000, T);
2869 q->act8b = EZ(t->act8b * 1000, T);
2870 q->rec8b = EZ(t->rec8b * 1000, T);
2871 q->cyc8b = EZ(t->cyc8b * 1000, T);
2872 q->active = EZ(t->active * 1000, T);
2873 q->recover = EZ(t->recover * 1000, T);
2874 q->cycle = EZ(t->cycle * 1000, T);
2875 q->udma = EZ(t->udma * 1000, UT);
2876}
2877
2878void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2879 struct ata_timing *m, unsigned int what)
2880{
2881 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2882 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2883 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2884 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2885 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2886 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2887 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2888 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2889}
2890
2dcb407e 2891static const struct ata_timing *ata_timing_find_mode(unsigned short speed)
452503f9
AC
2892{
2893 const struct ata_timing *t;
2894
2895 for (t = ata_timing; t->mode != speed; t++)
91190758 2896 if (t->mode == 0xFF)
452503f9 2897 return NULL;
2e9edbf8 2898 return t;
452503f9
AC
2899}
2900
2901int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2902 struct ata_timing *t, int T, int UT)
2903{
2904 const struct ata_timing *s;
2905 struct ata_timing p;
2906
2907 /*
2e9edbf8 2908 * Find the mode.
75b1f2f8 2909 */
452503f9
AC
2910
2911 if (!(s = ata_timing_find_mode(speed)))
2912 return -EINVAL;
2913
75b1f2f8
AL
2914 memcpy(t, s, sizeof(*s));
2915
452503f9
AC
2916 /*
2917 * If the drive is an EIDE drive, it can tell us it needs extended
2918 * PIO/MW_DMA cycle timing.
2919 */
2920
2921 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
2922 memset(&p, 0, sizeof(p));
2dcb407e 2923 if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
452503f9
AC
2924 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2925 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2dcb407e 2926 } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
452503f9
AC
2927 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2928 }
2929 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2930 }
2931
2932 /*
2933 * Convert the timing to bus clock counts.
2934 */
2935
75b1f2f8 2936 ata_timing_quantize(t, t, T, UT);
452503f9
AC
2937
2938 /*
c893a3ae
RD
2939 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2940 * S.M.A.R.T * and some other commands. We have to ensure that the
2941 * DMA cycle timing is slower/equal than the fastest PIO timing.
452503f9
AC
2942 */
2943
fd3367af 2944 if (speed > XFER_PIO_6) {
452503f9
AC
2945 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2946 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2947 }
2948
2949 /*
c893a3ae 2950 * Lengthen active & recovery time so that cycle time is correct.
452503f9
AC
2951 */
2952
2953 if (t->act8b + t->rec8b < t->cyc8b) {
2954 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2955 t->rec8b = t->cyc8b - t->act8b;
2956 }
2957
2958 if (t->active + t->recover < t->cycle) {
2959 t->active += (t->cycle - (t->active + t->recover)) / 2;
2960 t->recover = t->cycle - t->active;
2961 }
a617c09f 2962
4f701d1e
AC
2963 /* In a few cases quantisation may produce enough errors to
2964 leave t->cycle too low for the sum of active and recovery
2965 if so we must correct this */
2966 if (t->active + t->recover > t->cycle)
2967 t->cycle = t->active + t->recover;
452503f9
AC
2968
2969 return 0;
2970}
2971
cf176e1a
TH
2972/**
2973 * ata_down_xfermask_limit - adjust dev xfer masks downward
cf176e1a 2974 * @dev: Device to adjust xfer masks
458337db 2975 * @sel: ATA_DNXFER_* selector
cf176e1a
TH
2976 *
2977 * Adjust xfer masks of @dev downward. Note that this function
2978 * does not apply the change. Invoking ata_set_mode() afterwards
2979 * will apply the limit.
2980 *
2981 * LOCKING:
2982 * Inherited from caller.
2983 *
2984 * RETURNS:
2985 * 0 on success, negative errno on failure
2986 */
458337db 2987int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
cf176e1a 2988{
458337db
TH
2989 char buf[32];
2990 unsigned int orig_mask, xfer_mask;
2991 unsigned int pio_mask, mwdma_mask, udma_mask;
2992 int quiet, highbit;
cf176e1a 2993
458337db
TH
2994 quiet = !!(sel & ATA_DNXFER_QUIET);
2995 sel &= ~ATA_DNXFER_QUIET;
cf176e1a 2996
458337db
TH
2997 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
2998 dev->mwdma_mask,
2999 dev->udma_mask);
3000 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
cf176e1a 3001
458337db
TH
3002 switch (sel) {
3003 case ATA_DNXFER_PIO:
3004 highbit = fls(pio_mask) - 1;
3005 pio_mask &= ~(1 << highbit);
3006 break;
3007
3008 case ATA_DNXFER_DMA:
3009 if (udma_mask) {
3010 highbit = fls(udma_mask) - 1;
3011 udma_mask &= ~(1 << highbit);
3012 if (!udma_mask)
3013 return -ENOENT;
3014 } else if (mwdma_mask) {
3015 highbit = fls(mwdma_mask) - 1;
3016 mwdma_mask &= ~(1 << highbit);
3017 if (!mwdma_mask)
3018 return -ENOENT;
3019 }
3020 break;
3021
3022 case ATA_DNXFER_40C:
3023 udma_mask &= ATA_UDMA_MASK_40C;
3024 break;
3025
3026 case ATA_DNXFER_FORCE_PIO0:
3027 pio_mask &= 1;
3028 case ATA_DNXFER_FORCE_PIO:
3029 mwdma_mask = 0;
3030 udma_mask = 0;
3031 break;
3032
458337db
TH
3033 default:
3034 BUG();
3035 }
3036
3037 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3038
3039 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3040 return -ENOENT;
3041
3042 if (!quiet) {
3043 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3044 snprintf(buf, sizeof(buf), "%s:%s",
3045 ata_mode_string(xfer_mask),
3046 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3047 else
3048 snprintf(buf, sizeof(buf), "%s",
3049 ata_mode_string(xfer_mask));
3050
3051 ata_dev_printk(dev, KERN_WARNING,
3052 "limiting speed to %s\n", buf);
3053 }
cf176e1a
TH
3054
3055 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3056 &dev->udma_mask);
3057
cf176e1a 3058 return 0;
cf176e1a
TH
3059}
3060
3373efd8 3061static int ata_dev_set_mode(struct ata_device *dev)
1da177e4 3062{
9af5c9c9 3063 struct ata_eh_context *ehc = &dev->link->eh_context;
83206a29
TH
3064 unsigned int err_mask;
3065 int rc;
1da177e4 3066
e8384607 3067 dev->flags &= ~ATA_DFLAG_PIO;
1da177e4
LT
3068 if (dev->xfer_shift == ATA_SHIFT_PIO)
3069 dev->flags |= ATA_DFLAG_PIO;
3070
3373efd8 3071 err_mask = ata_dev_set_xfermode(dev);
2dcb407e 3072
11750a40
A
3073 /* Old CFA may refuse this command, which is just fine */
3074 if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
2dcb407e
JG
3075 err_mask &= ~AC_ERR_DEV;
3076
0bc2a79a
AC
3077 /* Some very old devices and some bad newer ones fail any kind of
3078 SET_XFERMODE request but support PIO0-2 timings and no IORDY */
3079 if (dev->xfer_shift == ATA_SHIFT_PIO && !ata_id_has_iordy(dev->id) &&
3080 dev->pio_mode <= XFER_PIO_2)
3081 err_mask &= ~AC_ERR_DEV;
2dcb407e 3082
3acaf94b
AC
3083 /* Early MWDMA devices do DMA but don't allow DMA mode setting.
3084 Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
3085 if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3086 dev->dma_mode == XFER_MW_DMA_0 &&
3087 (dev->id[63] >> 8) & 1)
3088 err_mask &= ~AC_ERR_DEV;
3089
83206a29 3090 if (err_mask) {
f15a1daf
TH
3091 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
3092 "(err_mask=0x%x)\n", err_mask);
83206a29
TH
3093 return -EIO;
3094 }
1da177e4 3095
baa1e78a 3096 ehc->i.flags |= ATA_EHI_POST_SETMODE;
422c9daa 3097 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
baa1e78a 3098 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
5eb45c02 3099 if (rc)
83206a29 3100 return rc;
48a8a14f 3101
23e71c3d
TH
3102 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3103 dev->xfer_shift, (int)dev->xfer_mode);
1da177e4 3104
f15a1daf
TH
3105 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
3106 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
83206a29 3107 return 0;
1da177e4
LT
3108}
3109
1da177e4 3110/**
04351821 3111 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
0260731f 3112 * @link: link on which timings will be programmed
e82cbdb9 3113 * @r_failed_dev: out paramter for failed device
1da177e4 3114 *
04351821
A
3115 * Standard implementation of the function used to tune and set
3116 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3117 * ata_dev_set_mode() fails, pointer to the failing device is
e82cbdb9 3118 * returned in @r_failed_dev.
780a87f7 3119 *
1da177e4 3120 * LOCKING:
0cba632b 3121 * PCI/etc. bus probe sem.
e82cbdb9
TH
3122 *
3123 * RETURNS:
3124 * 0 on success, negative errno otherwise
1da177e4 3125 */
04351821 3126
0260731f 3127int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
1da177e4 3128{
0260731f 3129 struct ata_port *ap = link->ap;
e8e0619f 3130 struct ata_device *dev;
f58229f8 3131 int rc = 0, used_dma = 0, found = 0;
3adcebb2 3132
a6d5a51c 3133 /* step 1: calculate xfer_mask */
f58229f8 3134 ata_link_for_each_dev(dev, link) {
acf356b1 3135 unsigned int pio_mask, dma_mask;
b3a70601 3136 unsigned int mode_mask;
a6d5a51c 3137
e1211e3f 3138 if (!ata_dev_enabled(dev))
a6d5a51c
TH
3139 continue;
3140
b3a70601
AC
3141 mode_mask = ATA_DMA_MASK_ATA;
3142 if (dev->class == ATA_DEV_ATAPI)
3143 mode_mask = ATA_DMA_MASK_ATAPI;
3144 else if (ata_id_is_cfa(dev->id))
3145 mode_mask = ATA_DMA_MASK_CFA;
3146
3373efd8 3147 ata_dev_xfermask(dev);
1da177e4 3148
acf356b1
TH
3149 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3150 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
b3a70601
AC
3151
3152 if (libata_dma_mask & mode_mask)
3153 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3154 else
3155 dma_mask = 0;
3156
acf356b1
TH
3157 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3158 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
5444a6f4 3159
4f65977d 3160 found = 1;
5444a6f4
AC
3161 if (dev->dma_mode)
3162 used_dma = 1;
a6d5a51c 3163 }
4f65977d 3164 if (!found)
e82cbdb9 3165 goto out;
a6d5a51c
TH
3166
3167 /* step 2: always set host PIO timings */
f58229f8 3168 ata_link_for_each_dev(dev, link) {
e8e0619f
TH
3169 if (!ata_dev_enabled(dev))
3170 continue;
3171
3172 if (!dev->pio_mode) {
f15a1daf 3173 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
e8e0619f 3174 rc = -EINVAL;
e82cbdb9 3175 goto out;
e8e0619f
TH
3176 }
3177
3178 dev->xfer_mode = dev->pio_mode;
3179 dev->xfer_shift = ATA_SHIFT_PIO;
3180 if (ap->ops->set_piomode)
3181 ap->ops->set_piomode(ap, dev);
3182 }
1da177e4 3183
a6d5a51c 3184 /* step 3: set host DMA timings */
f58229f8 3185 ata_link_for_each_dev(dev, link) {
e8e0619f
TH
3186 if (!ata_dev_enabled(dev) || !dev->dma_mode)
3187 continue;
3188
3189 dev->xfer_mode = dev->dma_mode;
3190 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3191 if (ap->ops->set_dmamode)
3192 ap->ops->set_dmamode(ap, dev);
3193 }
1da177e4
LT
3194
3195 /* step 4: update devices' xfer mode */
f58229f8 3196 ata_link_for_each_dev(dev, link) {
18d90deb 3197 /* don't update suspended devices' xfer mode */
9666f400 3198 if (!ata_dev_enabled(dev))
83206a29
TH
3199 continue;
3200
3373efd8 3201 rc = ata_dev_set_mode(dev);
5bbc53f4 3202 if (rc)
e82cbdb9 3203 goto out;
83206a29 3204 }
1da177e4 3205
e8e0619f
TH
3206 /* Record simplex status. If we selected DMA then the other
3207 * host channels are not permitted to do so.
5444a6f4 3208 */
cca3974e 3209 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
032af1ce 3210 ap->host->simplex_claimed = ap;
5444a6f4 3211
e82cbdb9
TH
3212 out:
3213 if (rc)
3214 *r_failed_dev = dev;
3215 return rc;
1da177e4
LT
3216}
3217
04351821
A
3218/**
3219 * ata_set_mode - Program timings and issue SET FEATURES - XFER
0260731f 3220 * @link: link on which timings will be programmed
04351821
A
3221 * @r_failed_dev: out paramter for failed device
3222 *
3223 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3224 * ata_set_mode() fails, pointer to the failing device is
3225 * returned in @r_failed_dev.
3226 *
3227 * LOCKING:
3228 * PCI/etc. bus probe sem.
3229 *
3230 * RETURNS:
3231 * 0 on success, negative errno otherwise
3232 */
0260731f 3233int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
04351821 3234{
0260731f
TH
3235 struct ata_port *ap = link->ap;
3236
04351821
A
3237 /* has private set_mode? */
3238 if (ap->ops->set_mode)
0260731f
TH
3239 return ap->ops->set_mode(link, r_failed_dev);
3240 return ata_do_set_mode(link, r_failed_dev);
04351821
A
3241}
3242
1fdffbce
JG
3243/**
3244 * ata_tf_to_host - issue ATA taskfile to host controller
3245 * @ap: port to which command is being issued
3246 * @tf: ATA taskfile register set
3247 *
3248 * Issues ATA taskfile register set to ATA host controller,
3249 * with proper synchronization with interrupt handler and
3250 * other threads.
3251 *
3252 * LOCKING:
cca3974e 3253 * spin_lock_irqsave(host lock)
1fdffbce
JG
3254 */
3255
3256static inline void ata_tf_to_host(struct ata_port *ap,
3257 const struct ata_taskfile *tf)
3258{
3259 ap->ops->tf_load(ap, tf);
3260 ap->ops->exec_command(ap, tf);
3261}
3262
1da177e4
LT
3263/**
3264 * ata_busy_sleep - sleep until BSY clears, or timeout
3265 * @ap: port containing status register to be polled
3266 * @tmout_pat: impatience timeout
3267 * @tmout: overall timeout
3268 *
780a87f7
JG
3269 * Sleep until ATA Status register bit BSY clears,
3270 * or a timeout occurs.
3271 *
d1adc1bb
TH
3272 * LOCKING:
3273 * Kernel thread context (may sleep).
3274 *
3275 * RETURNS:
3276 * 0 on success, -errno otherwise.
1da177e4 3277 */
d1adc1bb
TH
3278int ata_busy_sleep(struct ata_port *ap,
3279 unsigned long tmout_pat, unsigned long tmout)
1da177e4
LT
3280{
3281 unsigned long timer_start, timeout;
3282 u8 status;
3283
3284 status = ata_busy_wait(ap, ATA_BUSY, 300);
3285 timer_start = jiffies;
3286 timeout = timer_start + tmout_pat;
d1adc1bb
TH
3287 while (status != 0xff && (status & ATA_BUSY) &&
3288 time_before(jiffies, timeout)) {
1da177e4
LT
3289 msleep(50);
3290 status = ata_busy_wait(ap, ATA_BUSY, 3);
3291 }
3292
d1adc1bb 3293 if (status != 0xff && (status & ATA_BUSY))
f15a1daf 3294 ata_port_printk(ap, KERN_WARNING,
35aa7a43
JG
3295 "port is slow to respond, please be patient "
3296 "(Status 0x%x)\n", status);
1da177e4
LT
3297
3298 timeout = timer_start + tmout;
d1adc1bb
TH
3299 while (status != 0xff && (status & ATA_BUSY) &&
3300 time_before(jiffies, timeout)) {
1da177e4
LT
3301 msleep(50);
3302 status = ata_chk_status(ap);
3303 }
3304
d1adc1bb
TH
3305 if (status == 0xff)
3306 return -ENODEV;
3307
1da177e4 3308 if (status & ATA_BUSY) {
f15a1daf 3309 ata_port_printk(ap, KERN_ERR, "port failed to respond "
35aa7a43
JG
3310 "(%lu secs, Status 0x%x)\n",
3311 tmout / HZ, status);
d1adc1bb 3312 return -EBUSY;
1da177e4
LT
3313 }
3314
3315 return 0;
3316}
3317
88ff6eaf
TH
3318/**
3319 * ata_wait_after_reset - wait before checking status after reset
3320 * @ap: port containing status register to be polled
3321 * @deadline: deadline jiffies for the operation
3322 *
3323 * After reset, we need to pause a while before reading status.
3324 * Also, certain combination of controller and device report 0xff
3325 * for some duration (e.g. until SATA PHY is up and running)
3326 * which is interpreted as empty port in ATA world. This
3327 * function also waits for such devices to get out of 0xff
3328 * status.
3329 *
3330 * LOCKING:
3331 * Kernel thread context (may sleep).
3332 */
3333void ata_wait_after_reset(struct ata_port *ap, unsigned long deadline)
3334{
3335 unsigned long until = jiffies + ATA_TMOUT_FF_WAIT;
3336
3337 if (time_before(until, deadline))
3338 deadline = until;
3339
3340 /* Spec mandates ">= 2ms" before checking status. We wait
3341 * 150ms, because that was the magic delay used for ATAPI
3342 * devices in Hale Landis's ATADRVR, for the period of time
3343 * between when the ATA command register is written, and then
3344 * status is checked. Because waiting for "a while" before
3345 * checking status is fine, post SRST, we perform this magic
3346 * delay here as well.
3347 *
3348 * Old drivers/ide uses the 2mS rule and then waits for ready.
3349 */
3350 msleep(150);
3351
3352 /* Wait for 0xff to clear. Some SATA devices take a long time
3353 * to clear 0xff after reset. For example, HHD424020F7SV00
3354 * iVDR needs >= 800ms while. Quantum GoVault needs even more
3355 * than that.
3356 */
3357 while (1) {
3358 u8 status = ata_chk_status(ap);
3359
3360 if (status != 0xff || time_after(jiffies, deadline))
3361 return;
3362
3363 msleep(50);
3364 }
3365}
3366
d4b2bab4
TH
3367/**
3368 * ata_wait_ready - sleep until BSY clears, or timeout
3369 * @ap: port containing status register to be polled
3370 * @deadline: deadline jiffies for the operation
3371 *
3372 * Sleep until ATA Status register bit BSY clears, or timeout
3373 * occurs.
3374 *
3375 * LOCKING:
3376 * Kernel thread context (may sleep).
3377 *
3378 * RETURNS:
3379 * 0 on success, -errno otherwise.
3380 */
3381int ata_wait_ready(struct ata_port *ap, unsigned long deadline)
3382{
3383 unsigned long start = jiffies;
3384 int warned = 0;
3385
3386 while (1) {
3387 u8 status = ata_chk_status(ap);
3388 unsigned long now = jiffies;
3389
3390 if (!(status & ATA_BUSY))
3391 return 0;
936fd732 3392 if (!ata_link_online(&ap->link) && status == 0xff)
d4b2bab4
TH
3393 return -ENODEV;
3394 if (time_after(now, deadline))
3395 return -EBUSY;
3396
3397 if (!warned && time_after(now, start + 5 * HZ) &&
3398 (deadline - now > 3 * HZ)) {
3399 ata_port_printk(ap, KERN_WARNING,
3400 "port is slow to respond, please be patient "
3401 "(Status 0x%x)\n", status);
3402 warned = 1;
3403 }
3404
3405 msleep(50);
3406 }
3407}
3408
3409static int ata_bus_post_reset(struct ata_port *ap, unsigned int devmask,
3410 unsigned long deadline)
1da177e4
LT
3411{
3412 struct ata_ioports *ioaddr = &ap->ioaddr;
3413 unsigned int dev0 = devmask & (1 << 0);
3414 unsigned int dev1 = devmask & (1 << 1);
9b89391c 3415 int rc, ret = 0;
1da177e4
LT
3416
3417 /* if device 0 was found in ata_devchk, wait for its
3418 * BSY bit to clear
3419 */
d4b2bab4
TH
3420 if (dev0) {
3421 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3422 if (rc) {
3423 if (rc != -ENODEV)
3424 return rc;
3425 ret = rc;
3426 }
d4b2bab4 3427 }
1da177e4 3428
e141d999
TH
3429 /* if device 1 was found in ata_devchk, wait for register
3430 * access briefly, then wait for BSY to clear.
1da177e4 3431 */
e141d999
TH
3432 if (dev1) {
3433 int i;
1da177e4
LT
3434
3435 ap->ops->dev_select(ap, 1);
e141d999
TH
3436
3437 /* Wait for register access. Some ATAPI devices fail
3438 * to set nsect/lbal after reset, so don't waste too
3439 * much time on it. We're gonna wait for !BSY anyway.
3440 */
3441 for (i = 0; i < 2; i++) {
3442 u8 nsect, lbal;
3443
3444 nsect = ioread8(ioaddr->nsect_addr);
3445 lbal = ioread8(ioaddr->lbal_addr);
3446 if ((nsect == 1) && (lbal == 1))
3447 break;
3448 msleep(50); /* give drive a breather */
3449 }
3450
d4b2bab4 3451 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3452 if (rc) {
3453 if (rc != -ENODEV)
3454 return rc;
3455 ret = rc;
3456 }
d4b2bab4 3457 }
1da177e4
LT
3458
3459 /* is all this really necessary? */
3460 ap->ops->dev_select(ap, 0);
3461 if (dev1)
3462 ap->ops->dev_select(ap, 1);
3463 if (dev0)
3464 ap->ops->dev_select(ap, 0);
d4b2bab4 3465
9b89391c 3466 return ret;
1da177e4
LT
3467}
3468
d4b2bab4
TH
3469static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
3470 unsigned long deadline)
1da177e4
LT
3471{
3472 struct ata_ioports *ioaddr = &ap->ioaddr;
3473
44877b4e 3474 DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
1da177e4
LT
3475
3476 /* software reset. causes dev0 to be selected */
0d5ff566
TH
3477 iowrite8(ap->ctl, ioaddr->ctl_addr);
3478 udelay(20); /* FIXME: flush */
3479 iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
3480 udelay(20); /* FIXME: flush */
3481 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4 3482
88ff6eaf
TH
3483 /* wait a while before checking status */
3484 ata_wait_after_reset(ap, deadline);
1da177e4 3485
2e9edbf8 3486 /* Before we perform post reset processing we want to see if
298a41ca
TH
3487 * the bus shows 0xFF because the odd clown forgets the D7
3488 * pulldown resistor.
3489 */
150981b0 3490 if (ata_chk_status(ap) == 0xFF)
9b89391c 3491 return -ENODEV;
09c7ad79 3492
d4b2bab4 3493 return ata_bus_post_reset(ap, devmask, deadline);
1da177e4
LT
3494}
3495
3496/**
3497 * ata_bus_reset - reset host port and associated ATA channel
3498 * @ap: port to reset
3499 *
3500 * This is typically the first time we actually start issuing
3501 * commands to the ATA channel. We wait for BSY to clear, then
3502 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
3503 * result. Determine what devices, if any, are on the channel
3504 * by looking at the device 0/1 error register. Look at the signature
3505 * stored in each device's taskfile registers, to determine if
3506 * the device is ATA or ATAPI.
3507 *
3508 * LOCKING:
0cba632b 3509 * PCI/etc. bus probe sem.
cca3974e 3510 * Obtains host lock.
1da177e4
LT
3511 *
3512 * SIDE EFFECTS:
198e0fed 3513 * Sets ATA_FLAG_DISABLED if bus reset fails.
1da177e4
LT
3514 */
3515
3516void ata_bus_reset(struct ata_port *ap)
3517{
9af5c9c9 3518 struct ata_device *device = ap->link.device;
1da177e4
LT
3519 struct ata_ioports *ioaddr = &ap->ioaddr;
3520 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3521 u8 err;
aec5c3c1 3522 unsigned int dev0, dev1 = 0, devmask = 0;
9b89391c 3523 int rc;
1da177e4 3524
44877b4e 3525 DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
1da177e4
LT
3526
3527 /* determine if device 0/1 are present */
3528 if (ap->flags & ATA_FLAG_SATA_RESET)
3529 dev0 = 1;
3530 else {
3531 dev0 = ata_devchk(ap, 0);
3532 if (slave_possible)
3533 dev1 = ata_devchk(ap, 1);
3534 }
3535
3536 if (dev0)
3537 devmask |= (1 << 0);
3538 if (dev1)
3539 devmask |= (1 << 1);
3540
3541 /* select device 0 again */
3542 ap->ops->dev_select(ap, 0);
3543
3544 /* issue bus reset */
9b89391c
TH
3545 if (ap->flags & ATA_FLAG_SRST) {
3546 rc = ata_bus_softreset(ap, devmask, jiffies + 40 * HZ);
3547 if (rc && rc != -ENODEV)
aec5c3c1 3548 goto err_out;
9b89391c 3549 }
1da177e4
LT
3550
3551 /*
3552 * determine by signature whether we have ATA or ATAPI devices
3553 */
3f19859e 3554 device[0].class = ata_dev_try_classify(&device[0], dev0, &err);
1da177e4 3555 if ((slave_possible) && (err != 0x81))
3f19859e 3556 device[1].class = ata_dev_try_classify(&device[1], dev1, &err);
1da177e4 3557
1da177e4 3558 /* is double-select really necessary? */
9af5c9c9 3559 if (device[1].class != ATA_DEV_NONE)
1da177e4 3560 ap->ops->dev_select(ap, 1);
9af5c9c9 3561 if (device[0].class != ATA_DEV_NONE)
1da177e4
LT
3562 ap->ops->dev_select(ap, 0);
3563
3564 /* if no devices were detected, disable this port */
9af5c9c9
TH
3565 if ((device[0].class == ATA_DEV_NONE) &&
3566 (device[1].class == ATA_DEV_NONE))
1da177e4
LT
3567 goto err_out;
3568
3569 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
3570 /* set up device control for ATA_FLAG_SATA_RESET */
0d5ff566 3571 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
3572 }
3573
3574 DPRINTK("EXIT\n");
3575 return;
3576
3577err_out:
f15a1daf 3578 ata_port_printk(ap, KERN_ERR, "disabling port\n");
ac8869d5 3579 ata_port_disable(ap);
1da177e4
LT
3580
3581 DPRINTK("EXIT\n");
3582}
3583
d7bb4cc7 3584/**
936fd732
TH
3585 * sata_link_debounce - debounce SATA phy status
3586 * @link: ATA link to debounce SATA phy status for
d7bb4cc7 3587 * @params: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3588 * @deadline: deadline jiffies for the operation
d7bb4cc7 3589 *
936fd732 3590* Make sure SStatus of @link reaches stable state, determined by
d7bb4cc7
TH
3591 * holding the same value where DET is not 1 for @duration polled
3592 * every @interval, before @timeout. Timeout constraints the
d4b2bab4
TH
3593 * beginning of the stable state. Because DET gets stuck at 1 on
3594 * some controllers after hot unplugging, this functions waits
d7bb4cc7
TH
3595 * until timeout then returns 0 if DET is stable at 1.
3596 *
d4b2bab4
TH
3597 * @timeout is further limited by @deadline. The sooner of the
3598 * two is used.
3599 *
d7bb4cc7
TH
3600 * LOCKING:
3601 * Kernel thread context (may sleep)
3602 *
3603 * RETURNS:
3604 * 0 on success, -errno on failure.
3605 */
936fd732
TH
3606int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3607 unsigned long deadline)
7a7921e8 3608{
d7bb4cc7 3609 unsigned long interval_msec = params[0];
d4b2bab4
TH
3610 unsigned long duration = msecs_to_jiffies(params[1]);
3611 unsigned long last_jiffies, t;
d7bb4cc7
TH
3612 u32 last, cur;
3613 int rc;
3614
d4b2bab4
TH
3615 t = jiffies + msecs_to_jiffies(params[2]);
3616 if (time_before(t, deadline))
3617 deadline = t;
3618
936fd732 3619 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
d7bb4cc7
TH
3620 return rc;
3621 cur &= 0xf;
3622
3623 last = cur;
3624 last_jiffies = jiffies;
3625
3626 while (1) {
3627 msleep(interval_msec);
936fd732 3628 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
d7bb4cc7
TH
3629 return rc;
3630 cur &= 0xf;
3631
3632 /* DET stable? */
3633 if (cur == last) {
d4b2bab4 3634 if (cur == 1 && time_before(jiffies, deadline))
d7bb4cc7
TH
3635 continue;
3636 if (time_after(jiffies, last_jiffies + duration))
3637 return 0;
3638 continue;
3639 }
3640
3641 /* unstable, start over */
3642 last = cur;
3643 last_jiffies = jiffies;
3644
f1545154
TH
3645 /* Check deadline. If debouncing failed, return
3646 * -EPIPE to tell upper layer to lower link speed.
3647 */
d4b2bab4 3648 if (time_after(jiffies, deadline))
f1545154 3649 return -EPIPE;
d7bb4cc7
TH
3650 }
3651}
3652
3653/**
936fd732
TH
3654 * sata_link_resume - resume SATA link
3655 * @link: ATA link to resume SATA
d7bb4cc7 3656 * @params: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3657 * @deadline: deadline jiffies for the operation
d7bb4cc7 3658 *
936fd732 3659 * Resume SATA phy @link and debounce it.
d7bb4cc7
TH
3660 *
3661 * LOCKING:
3662 * Kernel thread context (may sleep)
3663 *
3664 * RETURNS:
3665 * 0 on success, -errno on failure.
3666 */
936fd732
TH
3667int sata_link_resume(struct ata_link *link, const unsigned long *params,
3668 unsigned long deadline)
d7bb4cc7
TH
3669{
3670 u32 scontrol;
81952c54
TH
3671 int rc;
3672
936fd732 3673 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
81952c54 3674 return rc;
7a7921e8 3675
852ee16a 3676 scontrol = (scontrol & 0x0f0) | 0x300;
81952c54 3677
936fd732 3678 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
81952c54 3679 return rc;
7a7921e8 3680
d7bb4cc7
TH
3681 /* Some PHYs react badly if SStatus is pounded immediately
3682 * after resuming. Delay 200ms before debouncing.
3683 */
3684 msleep(200);
7a7921e8 3685
936fd732 3686 return sata_link_debounce(link, params, deadline);
7a7921e8
TH
3687}
3688
f5914a46
TH
3689/**
3690 * ata_std_prereset - prepare for reset
cc0680a5 3691 * @link: ATA link to be reset
d4b2bab4 3692 * @deadline: deadline jiffies for the operation
f5914a46 3693 *
cc0680a5 3694 * @link is about to be reset. Initialize it. Failure from
b8cffc6a
TH
3695 * prereset makes libata abort whole reset sequence and give up
3696 * that port, so prereset should be best-effort. It does its
3697 * best to prepare for reset sequence but if things go wrong, it
3698 * should just whine, not fail.
f5914a46
TH
3699 *
3700 * LOCKING:
3701 * Kernel thread context (may sleep)
3702 *
3703 * RETURNS:
3704 * 0 on success, -errno otherwise.
3705 */
cc0680a5 3706int ata_std_prereset(struct ata_link *link, unsigned long deadline)
f5914a46 3707{
cc0680a5 3708 struct ata_port *ap = link->ap;
936fd732 3709 struct ata_eh_context *ehc = &link->eh_context;
e9c83914 3710 const unsigned long *timing = sata_ehc_deb_timing(ehc);
f5914a46
TH
3711 int rc;
3712
31daabda 3713 /* handle link resume */
28324304 3714 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
0c88758b 3715 (link->flags & ATA_LFLAG_HRST_TO_RESUME))
28324304
TH
3716 ehc->i.action |= ATA_EH_HARDRESET;
3717
633273a3
TH
3718 /* Some PMPs don't work with only SRST, force hardreset if PMP
3719 * is supported.
3720 */
3721 if (ap->flags & ATA_FLAG_PMP)
3722 ehc->i.action |= ATA_EH_HARDRESET;
3723
f5914a46
TH
3724 /* if we're about to do hardreset, nothing more to do */
3725 if (ehc->i.action & ATA_EH_HARDRESET)
3726 return 0;
3727
936fd732 3728 /* if SATA, resume link */
a16abc0b 3729 if (ap->flags & ATA_FLAG_SATA) {
936fd732 3730 rc = sata_link_resume(link, timing, deadline);
b8cffc6a
TH
3731 /* whine about phy resume failure but proceed */
3732 if (rc && rc != -EOPNOTSUPP)
cc0680a5 3733 ata_link_printk(link, KERN_WARNING, "failed to resume "
f5914a46 3734 "link for reset (errno=%d)\n", rc);
f5914a46
TH
3735 }
3736
3737 /* Wait for !BSY if the controller can wait for the first D2H
3738 * Reg FIS and we don't know that no device is attached.
3739 */
0c88758b 3740 if (!(link->flags & ATA_LFLAG_SKIP_D2H_BSY) && !ata_link_offline(link)) {
b8cffc6a 3741 rc = ata_wait_ready(ap, deadline);
6dffaf61 3742 if (rc && rc != -ENODEV) {
cc0680a5 3743 ata_link_printk(link, KERN_WARNING, "device not ready "
b8cffc6a
TH
3744 "(errno=%d), forcing hardreset\n", rc);
3745 ehc->i.action |= ATA_EH_HARDRESET;
3746 }
3747 }
f5914a46
TH
3748
3749 return 0;
3750}
3751
c2bd5804
TH
3752/**
3753 * ata_std_softreset - reset host port via ATA SRST
cc0680a5 3754 * @link: ATA link to reset
c2bd5804 3755 * @classes: resulting classes of attached devices
d4b2bab4 3756 * @deadline: deadline jiffies for the operation
c2bd5804 3757 *
52783c5d 3758 * Reset host port using ATA SRST.
c2bd5804
TH
3759 *
3760 * LOCKING:
3761 * Kernel thread context (may sleep)
3762 *
3763 * RETURNS:
3764 * 0 on success, -errno otherwise.
3765 */
cc0680a5 3766int ata_std_softreset(struct ata_link *link, unsigned int *classes,
d4b2bab4 3767 unsigned long deadline)
c2bd5804 3768{
cc0680a5 3769 struct ata_port *ap = link->ap;
c2bd5804 3770 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
d4b2bab4
TH
3771 unsigned int devmask = 0;
3772 int rc;
c2bd5804
TH
3773 u8 err;
3774
3775 DPRINTK("ENTER\n");
3776
936fd732 3777 if (ata_link_offline(link)) {
3a39746a
TH
3778 classes[0] = ATA_DEV_NONE;
3779 goto out;
3780 }
3781
c2bd5804
TH
3782 /* determine if device 0/1 are present */
3783 if (ata_devchk(ap, 0))
3784 devmask |= (1 << 0);
3785 if (slave_possible && ata_devchk(ap, 1))
3786 devmask |= (1 << 1);
3787
c2bd5804
TH
3788 /* select device 0 again */
3789 ap->ops->dev_select(ap, 0);
3790
3791 /* issue bus reset */
3792 DPRINTK("about to softreset, devmask=%x\n", devmask);
d4b2bab4 3793 rc = ata_bus_softreset(ap, devmask, deadline);
9b89391c 3794 /* if link is occupied, -ENODEV too is an error */
936fd732 3795 if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
cc0680a5 3796 ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc);
d4b2bab4 3797 return rc;
c2bd5804
TH
3798 }
3799
3800 /* determine by signature whether we have ATA or ATAPI devices */
3f19859e
TH
3801 classes[0] = ata_dev_try_classify(&link->device[0],
3802 devmask & (1 << 0), &err);
c2bd5804 3803 if (slave_possible && err != 0x81)
3f19859e
TH
3804 classes[1] = ata_dev_try_classify(&link->device[1],
3805 devmask & (1 << 1), &err);
c2bd5804 3806
3a39746a 3807 out:
c2bd5804
TH
3808 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
3809 return 0;
3810}
3811
3812/**
cc0680a5
TH
3813 * sata_link_hardreset - reset link via SATA phy reset
3814 * @link: link to reset
b6103f6d 3815 * @timing: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3816 * @deadline: deadline jiffies for the operation
c2bd5804 3817 *
cc0680a5 3818 * SATA phy-reset @link using DET bits of SControl register.
c2bd5804
TH
3819 *
3820 * LOCKING:
3821 * Kernel thread context (may sleep)
3822 *
3823 * RETURNS:
3824 * 0 on success, -errno otherwise.
3825 */
cc0680a5 3826int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
d4b2bab4 3827 unsigned long deadline)
c2bd5804 3828{
852ee16a 3829 u32 scontrol;
81952c54 3830 int rc;
852ee16a 3831
c2bd5804
TH
3832 DPRINTK("ENTER\n");
3833
936fd732 3834 if (sata_set_spd_needed(link)) {
1c3fae4d
TH
3835 /* SATA spec says nothing about how to reconfigure
3836 * spd. To be on the safe side, turn off phy during
3837 * reconfiguration. This works for at least ICH7 AHCI
3838 * and Sil3124.
3839 */
936fd732 3840 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
b6103f6d 3841 goto out;
81952c54 3842
a34b6fc0 3843 scontrol = (scontrol & 0x0f0) | 0x304;
81952c54 3844
936fd732 3845 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
b6103f6d 3846 goto out;
1c3fae4d 3847
936fd732 3848 sata_set_spd(link);
1c3fae4d
TH
3849 }
3850
3851 /* issue phy wake/reset */
936fd732 3852 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
b6103f6d 3853 goto out;
81952c54 3854
852ee16a 3855 scontrol = (scontrol & 0x0f0) | 0x301;
81952c54 3856
936fd732 3857 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
b6103f6d 3858 goto out;
c2bd5804 3859
1c3fae4d 3860 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
c2bd5804
TH
3861 * 10.4.2 says at least 1 ms.
3862 */
3863 msleep(1);
3864
936fd732
TH
3865 /* bring link back */
3866 rc = sata_link_resume(link, timing, deadline);
b6103f6d
TH
3867 out:
3868 DPRINTK("EXIT, rc=%d\n", rc);
3869 return rc;
3870}
3871
3872/**
3873 * sata_std_hardreset - reset host port via SATA phy reset
cc0680a5 3874 * @link: link to reset
b6103f6d 3875 * @class: resulting class of attached device
d4b2bab4 3876 * @deadline: deadline jiffies for the operation
b6103f6d
TH
3877 *
3878 * SATA phy-reset host port using DET bits of SControl register,
3879 * wait for !BSY and classify the attached device.
3880 *
3881 * LOCKING:
3882 * Kernel thread context (may sleep)
3883 *
3884 * RETURNS:
3885 * 0 on success, -errno otherwise.
3886 */
cc0680a5 3887int sata_std_hardreset(struct ata_link *link, unsigned int *class,
d4b2bab4 3888 unsigned long deadline)
b6103f6d 3889{
cc0680a5 3890 struct ata_port *ap = link->ap;
936fd732 3891 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
b6103f6d
TH
3892 int rc;
3893
3894 DPRINTK("ENTER\n");
3895
3896 /* do hardreset */
cc0680a5 3897 rc = sata_link_hardreset(link, timing, deadline);
b6103f6d 3898 if (rc) {
cc0680a5 3899 ata_link_printk(link, KERN_ERR,
b6103f6d
TH
3900 "COMRESET failed (errno=%d)\n", rc);
3901 return rc;
3902 }
c2bd5804 3903
c2bd5804 3904 /* TODO: phy layer with polling, timeouts, etc. */
936fd732 3905 if (ata_link_offline(link)) {
c2bd5804
TH
3906 *class = ATA_DEV_NONE;
3907 DPRINTK("EXIT, link offline\n");
3908 return 0;
3909 }
3910
88ff6eaf
TH
3911 /* wait a while before checking status */
3912 ata_wait_after_reset(ap, deadline);
34fee227 3913
633273a3
TH
3914 /* If PMP is supported, we have to do follow-up SRST. Note
3915 * that some PMPs don't send D2H Reg FIS after hardreset at
3916 * all if the first port is empty. Wait for it just for a
3917 * second and request follow-up SRST.
3918 */
3919 if (ap->flags & ATA_FLAG_PMP) {
3920 ata_wait_ready(ap, jiffies + HZ);
3921 return -EAGAIN;
3922 }
3923
d4b2bab4 3924 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3925 /* link occupied, -ENODEV too is an error */
3926 if (rc) {
cc0680a5 3927 ata_link_printk(link, KERN_ERR,
d4b2bab4
TH
3928 "COMRESET failed (errno=%d)\n", rc);
3929 return rc;
c2bd5804
TH
3930 }
3931
3a39746a
TH
3932 ap->ops->dev_select(ap, 0); /* probably unnecessary */
3933
3f19859e 3934 *class = ata_dev_try_classify(link->device, 1, NULL);
c2bd5804
TH
3935
3936 DPRINTK("EXIT, class=%u\n", *class);
3937 return 0;
3938}
3939
3940/**
3941 * ata_std_postreset - standard postreset callback
cc0680a5 3942 * @link: the target ata_link
c2bd5804
TH
3943 * @classes: classes of attached devices
3944 *
3945 * This function is invoked after a successful reset. Note that
3946 * the device might have been reset more than once using
3947 * different reset methods before postreset is invoked.
c2bd5804 3948 *
c2bd5804
TH
3949 * LOCKING:
3950 * Kernel thread context (may sleep)
3951 */
cc0680a5 3952void ata_std_postreset(struct ata_link *link, unsigned int *classes)
c2bd5804 3953{
cc0680a5 3954 struct ata_port *ap = link->ap;
dc2b3515
TH
3955 u32 serror;
3956
c2bd5804
TH
3957 DPRINTK("ENTER\n");
3958
c2bd5804 3959 /* print link status */
936fd732 3960 sata_print_link_status(link);
c2bd5804 3961
dc2b3515 3962 /* clear SError */
936fd732
TH
3963 if (sata_scr_read(link, SCR_ERROR, &serror) == 0)
3964 sata_scr_write(link, SCR_ERROR, serror);
dc2b3515 3965
c2bd5804
TH
3966 /* is double-select really necessary? */
3967 if (classes[0] != ATA_DEV_NONE)
3968 ap->ops->dev_select(ap, 1);
3969 if (classes[1] != ATA_DEV_NONE)
3970 ap->ops->dev_select(ap, 0);
3971
3a39746a
TH
3972 /* bail out if no device is present */
3973 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
3974 DPRINTK("EXIT, no device\n");
3975 return;
3976 }
3977
3978 /* set up device control */
0d5ff566
TH
3979 if (ap->ioaddr.ctl_addr)
3980 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
c2bd5804
TH
3981
3982 DPRINTK("EXIT\n");
3983}
3984
623a3128
TH
3985/**
3986 * ata_dev_same_device - Determine whether new ID matches configured device
623a3128
TH
3987 * @dev: device to compare against
3988 * @new_class: class of the new device
3989 * @new_id: IDENTIFY page of the new device
3990 *
3991 * Compare @new_class and @new_id against @dev and determine
3992 * whether @dev is the device indicated by @new_class and
3993 * @new_id.
3994 *
3995 * LOCKING:
3996 * None.
3997 *
3998 * RETURNS:
3999 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
4000 */
3373efd8
TH
4001static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
4002 const u16 *new_id)
623a3128
TH
4003{
4004 const u16 *old_id = dev->id;
a0cf733b
TH
4005 unsigned char model[2][ATA_ID_PROD_LEN + 1];
4006 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
623a3128
TH
4007
4008 if (dev->class != new_class) {
f15a1daf
TH
4009 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
4010 dev->class, new_class);
623a3128
TH
4011 return 0;
4012 }
4013
a0cf733b
TH
4014 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
4015 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
4016 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
4017 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
623a3128
TH
4018
4019 if (strcmp(model[0], model[1])) {
f15a1daf
TH
4020 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
4021 "'%s' != '%s'\n", model[0], model[1]);
623a3128
TH
4022 return 0;
4023 }
4024
4025 if (strcmp(serial[0], serial[1])) {
f15a1daf
TH
4026 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
4027 "'%s' != '%s'\n", serial[0], serial[1]);
623a3128
TH
4028 return 0;
4029 }
4030
623a3128
TH
4031 return 1;
4032}
4033
4034/**
fe30911b 4035 * ata_dev_reread_id - Re-read IDENTIFY data
3fae450c 4036 * @dev: target ATA device
bff04647 4037 * @readid_flags: read ID flags
623a3128
TH
4038 *
4039 * Re-read IDENTIFY page and make sure @dev is still attached to
4040 * the port.
4041 *
4042 * LOCKING:
4043 * Kernel thread context (may sleep)
4044 *
4045 * RETURNS:
4046 * 0 on success, negative errno otherwise
4047 */
fe30911b 4048int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
623a3128 4049{
5eb45c02 4050 unsigned int class = dev->class;
9af5c9c9 4051 u16 *id = (void *)dev->link->ap->sector_buf;
623a3128
TH
4052 int rc;
4053
fe635c7e 4054 /* read ID data */
bff04647 4055 rc = ata_dev_read_id(dev, &class, readid_flags, id);
623a3128 4056 if (rc)
fe30911b 4057 return rc;
623a3128
TH
4058
4059 /* is the device still there? */
fe30911b
TH
4060 if (!ata_dev_same_device(dev, class, id))
4061 return -ENODEV;
623a3128 4062
fe635c7e 4063 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
fe30911b
TH
4064 return 0;
4065}
4066
4067/**
4068 * ata_dev_revalidate - Revalidate ATA device
4069 * @dev: device to revalidate
422c9daa 4070 * @new_class: new class code
fe30911b
TH
4071 * @readid_flags: read ID flags
4072 *
4073 * Re-read IDENTIFY page, make sure @dev is still attached to the
4074 * port and reconfigure it according to the new IDENTIFY page.
4075 *
4076 * LOCKING:
4077 * Kernel thread context (may sleep)
4078 *
4079 * RETURNS:
4080 * 0 on success, negative errno otherwise
4081 */
422c9daa
TH
4082int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
4083 unsigned int readid_flags)
fe30911b 4084{
6ddcd3b0 4085 u64 n_sectors = dev->n_sectors;
fe30911b
TH
4086 int rc;
4087
4088 if (!ata_dev_enabled(dev))
4089 return -ENODEV;
4090
422c9daa
TH
4091 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
4092 if (ata_class_enabled(new_class) &&
4093 new_class != ATA_DEV_ATA && new_class != ATA_DEV_ATAPI) {
4094 ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n",
4095 dev->class, new_class);
4096 rc = -ENODEV;
4097 goto fail;
4098 }
4099
fe30911b
TH
4100 /* re-read ID */
4101 rc = ata_dev_reread_id(dev, readid_flags);
4102 if (rc)
4103 goto fail;
623a3128
TH
4104
4105 /* configure device according to the new ID */
efdaedc4 4106 rc = ata_dev_configure(dev);
6ddcd3b0
TH
4107 if (rc)
4108 goto fail;
4109
4110 /* verify n_sectors hasn't changed */
b54eebd6
TH
4111 if (dev->class == ATA_DEV_ATA && n_sectors &&
4112 dev->n_sectors != n_sectors) {
6ddcd3b0
TH
4113 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
4114 "%llu != %llu\n",
4115 (unsigned long long)n_sectors,
4116 (unsigned long long)dev->n_sectors);
8270bec4
TH
4117
4118 /* restore original n_sectors */
4119 dev->n_sectors = n_sectors;
4120
6ddcd3b0
TH
4121 rc = -ENODEV;
4122 goto fail;
4123 }
4124
4125 return 0;
623a3128
TH
4126
4127 fail:
f15a1daf 4128 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
623a3128
TH
4129 return rc;
4130}
4131
6919a0a6
AC
4132struct ata_blacklist_entry {
4133 const char *model_num;
4134 const char *model_rev;
4135 unsigned long horkage;
4136};
4137
4138static const struct ata_blacklist_entry ata_device_blacklist [] = {
4139 /* Devices with DMA related problems under Linux */
4140 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
4141 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
4142 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
4143 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
4144 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
4145 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
4146 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
4147 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
4148 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
4149 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
4150 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
4151 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
4152 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
4153 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
4154 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
4155 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
4156 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
4157 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
4158 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
4159 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
4160 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
4161 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
4162 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
4163 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
4164 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
4165 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
6919a0a6
AC
4166 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
4167 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
2dcb407e 4168 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
39f19886 4169 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
3af9a77a
TH
4170 /* Odd clown on sil3726/4726 PMPs */
4171 { "Config Disk", NULL, ATA_HORKAGE_NODMA |
4172 ATA_HORKAGE_SKIP_PM },
6919a0a6 4173
18d6e9d5 4174 /* Weird ATAPI devices */
40a1d531 4175 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
18d6e9d5 4176
6919a0a6
AC
4177 /* Devices we expect to fail diagnostics */
4178
4179 /* Devices where NCQ should be avoided */
4180 /* NCQ is slow */
2dcb407e 4181 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
09125ea6
TH
4182 /* http://thread.gmane.org/gmane.linux.ide/14907 */
4183 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
7acfaf30 4184 /* NCQ is broken */
539cc7c7 4185 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
0e3dbc01 4186 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
0b0a43e0
DM
4187 { "HITACHI HDS7250SASUN500G*", NULL, ATA_HORKAGE_NONCQ },
4188 { "HITACHI HDS7225SBSUN250G*", NULL, ATA_HORKAGE_NONCQ },
da6f0ec2 4189 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ },
539cc7c7 4190
36e337d0
RH
4191 /* Blacklist entries taken from Silicon Image 3124/3132
4192 Windows driver .inf file - also several Linux problem reports */
4193 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
4194 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
4195 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
bd9c5a39
TH
4196 /* Drives which do spurious command completion */
4197 { "HTS541680J9SA00", "SB2IC7EP", ATA_HORKAGE_NONCQ, },
2f8fcebb 4198 { "HTS541612J9SA00", "SBDIC7JP", ATA_HORKAGE_NONCQ, },
70edb185 4199 { "HDT722516DLA380", "V43OA96A", ATA_HORKAGE_NONCQ, },
e14cbfa6 4200 { "Hitachi HTS541616J9SA00", "SB4OC70P", ATA_HORKAGE_NONCQ, },
0c173174 4201 { "Hitachi HTS542525K9SA00", "BBFOC31P", ATA_HORKAGE_NONCQ, },
2f8fcebb 4202 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
7f567620 4203 { "WDC WD3200AAJS-00RYA0", "12.01B01", ATA_HORKAGE_NONCQ, },
a520f261 4204 { "FUJITSU MHV2080BH", "00840028", ATA_HORKAGE_NONCQ, },
7f567620 4205 { "ST9120822AS", "3.CLF", ATA_HORKAGE_NONCQ, },
3fb6589c 4206 { "ST9160821AS", "3.CLF", ATA_HORKAGE_NONCQ, },
954bb005 4207 { "ST9160821AS", "3.ALD", ATA_HORKAGE_NONCQ, },
13587960 4208 { "ST9160821AS", "3.CCD", ATA_HORKAGE_NONCQ, },
7f567620
TH
4209 { "ST3160812AS", "3.ADJ", ATA_HORKAGE_NONCQ, },
4210 { "ST980813AS", "3.ADB", ATA_HORKAGE_NONCQ, },
5d6aca8d 4211 { "SAMSUNG HD401LJ", "ZZ100-15", ATA_HORKAGE_NONCQ, },
12850ffe 4212 { "Maxtor 7V300F0", "VA111900", ATA_HORKAGE_NONCQ, },
6919a0a6 4213
16c55b03
TH
4214 /* devices which puke on READ_NATIVE_MAX */
4215 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
4216 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
4217 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
4218 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
6919a0a6 4219
93328e11
AC
4220 /* Devices which report 1 sector over size HPA */
4221 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, },
4222 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, },
4223
6919a0a6
AC
4224 /* End Marker */
4225 { }
1da177e4 4226};
2e9edbf8 4227
741b7763 4228static int strn_pattern_cmp(const char *patt, const char *name, int wildchar)
539cc7c7
JG
4229{
4230 const char *p;
4231 int len;
4232
4233 /*
4234 * check for trailing wildcard: *\0
4235 */
4236 p = strchr(patt, wildchar);
4237 if (p && ((*(p + 1)) == 0))
4238 len = p - patt;
317b50b8 4239 else {
539cc7c7 4240 len = strlen(name);
317b50b8
AP
4241 if (!len) {
4242 if (!*patt)
4243 return 0;
4244 return -1;
4245 }
4246 }
539cc7c7
JG
4247
4248 return strncmp(patt, name, len);
4249}
4250
75683fe7 4251static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
1da177e4 4252{
8bfa79fc
TH
4253 unsigned char model_num[ATA_ID_PROD_LEN + 1];
4254 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
6919a0a6 4255 const struct ata_blacklist_entry *ad = ata_device_blacklist;
3a778275 4256
8bfa79fc
TH
4257 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4258 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
1da177e4 4259
6919a0a6 4260 while (ad->model_num) {
539cc7c7 4261 if (!strn_pattern_cmp(ad->model_num, model_num, '*')) {
6919a0a6
AC
4262 if (ad->model_rev == NULL)
4263 return ad->horkage;
539cc7c7 4264 if (!strn_pattern_cmp(ad->model_rev, model_rev, '*'))
6919a0a6 4265 return ad->horkage;
f4b15fef 4266 }
6919a0a6 4267 ad++;
f4b15fef 4268 }
1da177e4
LT
4269 return 0;
4270}
4271
6919a0a6
AC
4272static int ata_dma_blacklisted(const struct ata_device *dev)
4273{
4274 /* We don't support polling DMA.
4275 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
4276 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4277 */
9af5c9c9 4278 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
6919a0a6
AC
4279 (dev->flags & ATA_DFLAG_CDB_INTR))
4280 return 1;
75683fe7 4281 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
6919a0a6
AC
4282}
4283
a6d5a51c
TH
4284/**
4285 * ata_dev_xfermask - Compute supported xfermask of the given device
a6d5a51c
TH
4286 * @dev: Device to compute xfermask for
4287 *
acf356b1
TH
4288 * Compute supported xfermask of @dev and store it in
4289 * dev->*_mask. This function is responsible for applying all
4290 * known limits including host controller limits, device
4291 * blacklist, etc...
a6d5a51c
TH
4292 *
4293 * LOCKING:
4294 * None.
a6d5a51c 4295 */
3373efd8 4296static void ata_dev_xfermask(struct ata_device *dev)
1da177e4 4297{
9af5c9c9
TH
4298 struct ata_link *link = dev->link;
4299 struct ata_port *ap = link->ap;
cca3974e 4300 struct ata_host *host = ap->host;
a6d5a51c 4301 unsigned long xfer_mask;
1da177e4 4302
37deecb5 4303 /* controller modes available */
565083e1
TH
4304 xfer_mask = ata_pack_xfermask(ap->pio_mask,
4305 ap->mwdma_mask, ap->udma_mask);
4306
8343f889 4307 /* drive modes available */
37deecb5
TH
4308 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4309 dev->mwdma_mask, dev->udma_mask);
4310 xfer_mask &= ata_id_xfermask(dev->id);
565083e1 4311
b352e57d
AC
4312 /*
4313 * CFA Advanced TrueIDE timings are not allowed on a shared
4314 * cable
4315 */
4316 if (ata_dev_pair(dev)) {
4317 /* No PIO5 or PIO6 */
4318 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4319 /* No MWDMA3 or MWDMA 4 */
4320 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4321 }
4322
37deecb5
TH
4323 if (ata_dma_blacklisted(dev)) {
4324 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
f15a1daf
TH
4325 ata_dev_printk(dev, KERN_WARNING,
4326 "device is on DMA blacklist, disabling DMA\n");
37deecb5 4327 }
a6d5a51c 4328
14d66ab7 4329 if ((host->flags & ATA_HOST_SIMPLEX) &&
2dcb407e 4330 host->simplex_claimed && host->simplex_claimed != ap) {
37deecb5
TH
4331 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4332 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
4333 "other device, disabling DMA\n");
5444a6f4 4334 }
565083e1 4335
e424675f
JG
4336 if (ap->flags & ATA_FLAG_NO_IORDY)
4337 xfer_mask &= ata_pio_mask_no_iordy(dev);
4338
5444a6f4 4339 if (ap->ops->mode_filter)
a76b62ca 4340 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
5444a6f4 4341
8343f889
RH
4342 /* Apply cable rule here. Don't apply it early because when
4343 * we handle hot plug the cable type can itself change.
4344 * Check this last so that we know if the transfer rate was
4345 * solely limited by the cable.
4346 * Unknown or 80 wire cables reported host side are checked
4347 * drive side as well. Cases where we know a 40wire cable
4348 * is used safely for 80 are not checked here.
4349 */
4350 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4351 /* UDMA/44 or higher would be available */
2dcb407e
JG
4352 if ((ap->cbl == ATA_CBL_PATA40) ||
4353 (ata_drive_40wire(dev->id) &&
4354 (ap->cbl == ATA_CBL_PATA_UNK ||
4355 ap->cbl == ATA_CBL_PATA80))) {
4356 ata_dev_printk(dev, KERN_WARNING,
8343f889
RH
4357 "limited to UDMA/33 due to 40-wire cable\n");
4358 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4359 }
4360
565083e1
TH
4361 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4362 &dev->mwdma_mask, &dev->udma_mask);
1da177e4
LT
4363}
4364
1da177e4
LT
4365/**
4366 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
1da177e4
LT
4367 * @dev: Device to which command will be sent
4368 *
780a87f7
JG
4369 * Issue SET FEATURES - XFER MODE command to device @dev
4370 * on port @ap.
4371 *
1da177e4 4372 * LOCKING:
0cba632b 4373 * PCI/etc. bus probe sem.
83206a29
TH
4374 *
4375 * RETURNS:
4376 * 0 on success, AC_ERR_* mask otherwise.
1da177e4
LT
4377 */
4378
3373efd8 4379static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
1da177e4 4380{
a0123703 4381 struct ata_taskfile tf;
83206a29 4382 unsigned int err_mask;
1da177e4
LT
4383
4384 /* set up set-features taskfile */
4385 DPRINTK("set features - xfer mode\n");
4386
464cf177
TH
4387 /* Some controllers and ATAPI devices show flaky interrupt
4388 * behavior after setting xfer mode. Use polling instead.
4389 */
3373efd8 4390 ata_tf_init(dev, &tf);
a0123703
TH
4391 tf.command = ATA_CMD_SET_FEATURES;
4392 tf.feature = SETFEATURES_XFER;
464cf177 4393 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
a0123703
TH
4394 tf.protocol = ATA_PROT_NODATA;
4395 tf.nsect = dev->xfer_mode;
1da177e4 4396
2b789108 4397 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
9f45cbd3
KCA
4398
4399 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4400 return err_mask;
4401}
9f45cbd3 4402/**
218f3d30 4403 * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
9f45cbd3
KCA
4404 * @dev: Device to which command will be sent
4405 * @enable: Whether to enable or disable the feature
218f3d30 4406 * @feature: The sector count represents the feature to set
9f45cbd3
KCA
4407 *
4408 * Issue SET FEATURES - SATA FEATURES command to device @dev
218f3d30 4409 * on port @ap with sector count
9f45cbd3
KCA
4410 *
4411 * LOCKING:
4412 * PCI/etc. bus probe sem.
4413 *
4414 * RETURNS:
4415 * 0 on success, AC_ERR_* mask otherwise.
4416 */
218f3d30
JG
4417static unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable,
4418 u8 feature)
9f45cbd3
KCA
4419{
4420 struct ata_taskfile tf;
4421 unsigned int err_mask;
4422
4423 /* set up set-features taskfile */
4424 DPRINTK("set features - SATA features\n");
4425
4426 ata_tf_init(dev, &tf);
4427 tf.command = ATA_CMD_SET_FEATURES;
4428 tf.feature = enable;
4429 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4430 tf.protocol = ATA_PROT_NODATA;
218f3d30 4431 tf.nsect = feature;
9f45cbd3 4432
2b789108 4433 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1da177e4 4434
83206a29
TH
4435 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4436 return err_mask;
1da177e4
LT
4437}
4438
8bf62ece
AL
4439/**
4440 * ata_dev_init_params - Issue INIT DEV PARAMS command
8bf62ece 4441 * @dev: Device to which command will be sent
e2a7f77a
RD
4442 * @heads: Number of heads (taskfile parameter)
4443 * @sectors: Number of sectors (taskfile parameter)
8bf62ece
AL
4444 *
4445 * LOCKING:
6aff8f1f
TH
4446 * Kernel thread context (may sleep)
4447 *
4448 * RETURNS:
4449 * 0 on success, AC_ERR_* mask otherwise.
8bf62ece 4450 */
3373efd8
TH
4451static unsigned int ata_dev_init_params(struct ata_device *dev,
4452 u16 heads, u16 sectors)
8bf62ece 4453{
a0123703 4454 struct ata_taskfile tf;
6aff8f1f 4455 unsigned int err_mask;
8bf62ece
AL
4456
4457 /* Number of sectors per track 1-255. Number of heads 1-16 */
4458 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
00b6f5e9 4459 return AC_ERR_INVALID;
8bf62ece
AL
4460
4461 /* set up init dev params taskfile */
4462 DPRINTK("init dev params \n");
4463
3373efd8 4464 ata_tf_init(dev, &tf);
a0123703
TH
4465 tf.command = ATA_CMD_INIT_DEV_PARAMS;
4466 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4467 tf.protocol = ATA_PROT_NODATA;
4468 tf.nsect = sectors;
4469 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
8bf62ece 4470
2b789108 4471 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
18b2466c
AC
4472 /* A clean abort indicates an original or just out of spec drive
4473 and we should continue as we issue the setup based on the
4474 drive reported working geometry */
4475 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4476 err_mask = 0;
8bf62ece 4477
6aff8f1f
TH
4478 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4479 return err_mask;
8bf62ece
AL
4480}
4481
1da177e4 4482/**
0cba632b
JG
4483 * ata_sg_clean - Unmap DMA memory associated with command
4484 * @qc: Command containing DMA memory to be released
4485 *
4486 * Unmap all mapped DMA memory associated with this command.
1da177e4
LT
4487 *
4488 * LOCKING:
cca3974e 4489 * spin_lock_irqsave(host lock)
1da177e4 4490 */
70e6ad0c 4491void ata_sg_clean(struct ata_queued_cmd *qc)
1da177e4
LT
4492{
4493 struct ata_port *ap = qc->ap;
cedc9a47 4494 struct scatterlist *sg = qc->__sg;
1da177e4 4495 int dir = qc->dma_dir;
cedc9a47 4496 void *pad_buf = NULL;
1da177e4 4497
a4631474
TH
4498 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
4499 WARN_ON(sg == NULL);
1da177e4
LT
4500
4501 if (qc->flags & ATA_QCFLAG_SINGLE)
f131883e 4502 WARN_ON(qc->n_elem > 1);
1da177e4 4503
2c13b7ce 4504 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
1da177e4 4505
cedc9a47
JG
4506 /* if we padded the buffer out to 32-bit bound, and data
4507 * xfer direction is from-device, we must copy from the
4508 * pad buffer back into the supplied buffer
4509 */
4510 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
4511 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4512
4513 if (qc->flags & ATA_QCFLAG_SG) {
e1410f2d 4514 if (qc->n_elem)
2f1f610b 4515 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
cedc9a47 4516 /* restore last sg */
87260216 4517 sg_last(sg, qc->orig_n_elem)->length += qc->pad_len;
cedc9a47
JG
4518 if (pad_buf) {
4519 struct scatterlist *psg = &qc->pad_sgent;
45711f1a 4520 void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
cedc9a47 4521 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
dfa15988 4522 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
4523 }
4524 } else {
2e242fa9 4525 if (qc->n_elem)
2f1f610b 4526 dma_unmap_single(ap->dev,
e1410f2d
JG
4527 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
4528 dir);
cedc9a47
JG
4529 /* restore sg */
4530 sg->length += qc->pad_len;
4531 if (pad_buf)
4532 memcpy(qc->buf_virt + sg->length - qc->pad_len,
4533 pad_buf, qc->pad_len);
4534 }
1da177e4
LT
4535
4536 qc->flags &= ~ATA_QCFLAG_DMAMAP;
cedc9a47 4537 qc->__sg = NULL;
1da177e4
LT
4538}
4539
4540/**
4541 * ata_fill_sg - Fill PCI IDE PRD table
4542 * @qc: Metadata associated with taskfile to be transferred
4543 *
780a87f7
JG
4544 * Fill PCI IDE PRD (scatter-gather) table with segments
4545 * associated with the current disk command.
4546 *
1da177e4 4547 * LOCKING:
cca3974e 4548 * spin_lock_irqsave(host lock)
1da177e4
LT
4549 *
4550 */
4551static void ata_fill_sg(struct ata_queued_cmd *qc)
4552{
1da177e4 4553 struct ata_port *ap = qc->ap;
cedc9a47
JG
4554 struct scatterlist *sg;
4555 unsigned int idx;
1da177e4 4556
a4631474 4557 WARN_ON(qc->__sg == NULL);
f131883e 4558 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
1da177e4
LT
4559
4560 idx = 0;
cedc9a47 4561 ata_for_each_sg(sg, qc) {
1da177e4
LT
4562 u32 addr, offset;
4563 u32 sg_len, len;
4564
4565 /* determine if physical DMA addr spans 64K boundary.
4566 * Note h/w doesn't support 64-bit, so we unconditionally
4567 * truncate dma_addr_t to u32.
4568 */
4569 addr = (u32) sg_dma_address(sg);
4570 sg_len = sg_dma_len(sg);
4571
4572 while (sg_len) {
4573 offset = addr & 0xffff;
4574 len = sg_len;
4575 if ((offset + sg_len) > 0x10000)
4576 len = 0x10000 - offset;
4577
4578 ap->prd[idx].addr = cpu_to_le32(addr);
4579 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
4580 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4581
4582 idx++;
4583 sg_len -= len;
4584 addr += len;
4585 }
4586 }
4587
4588 if (idx)
4589 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4590}
b9a4197e 4591
d26fc955
AC
4592/**
4593 * ata_fill_sg_dumb - Fill PCI IDE PRD table
4594 * @qc: Metadata associated with taskfile to be transferred
4595 *
4596 * Fill PCI IDE PRD (scatter-gather) table with segments
4597 * associated with the current disk command. Perform the fill
4598 * so that we avoid writing any length 64K records for
4599 * controllers that don't follow the spec.
4600 *
4601 * LOCKING:
4602 * spin_lock_irqsave(host lock)
4603 *
4604 */
4605static void ata_fill_sg_dumb(struct ata_queued_cmd *qc)
4606{
4607 struct ata_port *ap = qc->ap;
4608 struct scatterlist *sg;
4609 unsigned int idx;
4610
4611 WARN_ON(qc->__sg == NULL);
4612 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
4613
4614 idx = 0;
4615 ata_for_each_sg(sg, qc) {
4616 u32 addr, offset;
4617 u32 sg_len, len, blen;
4618
2dcb407e 4619 /* determine if physical DMA addr spans 64K boundary.
d26fc955
AC
4620 * Note h/w doesn't support 64-bit, so we unconditionally
4621 * truncate dma_addr_t to u32.
4622 */
4623 addr = (u32) sg_dma_address(sg);
4624 sg_len = sg_dma_len(sg);
4625
4626 while (sg_len) {
4627 offset = addr & 0xffff;
4628 len = sg_len;
4629 if ((offset + sg_len) > 0x10000)
4630 len = 0x10000 - offset;
4631
4632 blen = len & 0xffff;
4633 ap->prd[idx].addr = cpu_to_le32(addr);
4634 if (blen == 0) {
4635 /* Some PATA chipsets like the CS5530 can't
4636 cope with 0x0000 meaning 64K as the spec says */
4637 ap->prd[idx].flags_len = cpu_to_le32(0x8000);
4638 blen = 0x8000;
4639 ap->prd[++idx].addr = cpu_to_le32(addr + 0x8000);
4640 }
4641 ap->prd[idx].flags_len = cpu_to_le32(blen);
4642 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4643
4644 idx++;
4645 sg_len -= len;
4646 addr += len;
4647 }
4648 }
4649
4650 if (idx)
4651 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4652}
4653
1da177e4
LT
4654/**
4655 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
4656 * @qc: Metadata associated with taskfile to check
4657 *
780a87f7
JG
4658 * Allow low-level driver to filter ATA PACKET commands, returning
4659 * a status indicating whether or not it is OK to use DMA for the
4660 * supplied PACKET command.
4661 *
1da177e4 4662 * LOCKING:
cca3974e 4663 * spin_lock_irqsave(host lock)
0cba632b 4664 *
1da177e4
LT
4665 * RETURNS: 0 when ATAPI DMA can be used
4666 * nonzero otherwise
4667 */
4668int ata_check_atapi_dma(struct ata_queued_cmd *qc)
4669{
4670 struct ata_port *ap = qc->ap;
b9a4197e
TH
4671
4672 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
4673 * few ATAPI devices choke on such DMA requests.
4674 */
4675 if (unlikely(qc->nbytes & 15))
4676 return 1;
6f23a31d 4677
1da177e4 4678 if (ap->ops->check_atapi_dma)
b9a4197e 4679 return ap->ops->check_atapi_dma(qc);
1da177e4 4680
b9a4197e 4681 return 0;
1da177e4 4682}
b9a4197e 4683
31cc23b3
TH
4684/**
4685 * ata_std_qc_defer - Check whether a qc needs to be deferred
4686 * @qc: ATA command in question
4687 *
4688 * Non-NCQ commands cannot run with any other command, NCQ or
4689 * not. As upper layer only knows the queue depth, we are
4690 * responsible for maintaining exclusion. This function checks
4691 * whether a new command @qc can be issued.
4692 *
4693 * LOCKING:
4694 * spin_lock_irqsave(host lock)
4695 *
4696 * RETURNS:
4697 * ATA_DEFER_* if deferring is needed, 0 otherwise.
4698 */
4699int ata_std_qc_defer(struct ata_queued_cmd *qc)
4700{
4701 struct ata_link *link = qc->dev->link;
4702
4703 if (qc->tf.protocol == ATA_PROT_NCQ) {
4704 if (!ata_tag_valid(link->active_tag))
4705 return 0;
4706 } else {
4707 if (!ata_tag_valid(link->active_tag) && !link->sactive)
4708 return 0;
4709 }
4710
4711 return ATA_DEFER_LINK;
4712}
4713
1da177e4
LT
4714/**
4715 * ata_qc_prep - Prepare taskfile for submission
4716 * @qc: Metadata associated with taskfile to be prepared
4717 *
780a87f7
JG
4718 * Prepare ATA taskfile for submission.
4719 *
1da177e4 4720 * LOCKING:
cca3974e 4721 * spin_lock_irqsave(host lock)
1da177e4
LT
4722 */
4723void ata_qc_prep(struct ata_queued_cmd *qc)
4724{
4725 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4726 return;
4727
4728 ata_fill_sg(qc);
4729}
4730
d26fc955
AC
4731/**
4732 * ata_dumb_qc_prep - Prepare taskfile for submission
4733 * @qc: Metadata associated with taskfile to be prepared
4734 *
4735 * Prepare ATA taskfile for submission.
4736 *
4737 * LOCKING:
4738 * spin_lock_irqsave(host lock)
4739 */
4740void ata_dumb_qc_prep(struct ata_queued_cmd *qc)
4741{
4742 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4743 return;
4744
4745 ata_fill_sg_dumb(qc);
4746}
4747
e46834cd
BK
4748void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4749
0cba632b
JG
4750/**
4751 * ata_sg_init_one - Associate command with memory buffer
4752 * @qc: Command to be associated
4753 * @buf: Memory buffer
4754 * @buflen: Length of memory buffer, in bytes.
4755 *
4756 * Initialize the data-related elements of queued_cmd @qc
4757 * to point to a single memory buffer, @buf of byte length @buflen.
4758 *
4759 * LOCKING:
cca3974e 4760 * spin_lock_irqsave(host lock)
0cba632b
JG
4761 */
4762
1da177e4
LT
4763void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
4764{
1da177e4
LT
4765 qc->flags |= ATA_QCFLAG_SINGLE;
4766
cedc9a47 4767 qc->__sg = &qc->sgent;
1da177e4 4768 qc->n_elem = 1;
cedc9a47 4769 qc->orig_n_elem = 1;
1da177e4 4770 qc->buf_virt = buf;
233277ca 4771 qc->nbytes = buflen;
87260216 4772 qc->cursg = qc->__sg;
1da177e4 4773
61c0596c 4774 sg_init_one(&qc->sgent, buf, buflen);
1da177e4
LT
4775}
4776
0cba632b
JG
4777/**
4778 * ata_sg_init - Associate command with scatter-gather table.
4779 * @qc: Command to be associated
4780 * @sg: Scatter-gather table.
4781 * @n_elem: Number of elements in s/g table.
4782 *
4783 * Initialize the data-related elements of queued_cmd @qc
4784 * to point to a scatter-gather table @sg, containing @n_elem
4785 * elements.
4786 *
4787 * LOCKING:
cca3974e 4788 * spin_lock_irqsave(host lock)
0cba632b
JG
4789 */
4790
1da177e4
LT
4791void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4792 unsigned int n_elem)
4793{
4794 qc->flags |= ATA_QCFLAG_SG;
cedc9a47 4795 qc->__sg = sg;
1da177e4 4796 qc->n_elem = n_elem;
cedc9a47 4797 qc->orig_n_elem = n_elem;
87260216 4798 qc->cursg = qc->__sg;
1da177e4
LT
4799}
4800
4801/**
0cba632b
JG
4802 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
4803 * @qc: Command with memory buffer to be mapped.
4804 *
4805 * DMA-map the memory buffer associated with queued_cmd @qc.
1da177e4
LT
4806 *
4807 * LOCKING:
cca3974e 4808 * spin_lock_irqsave(host lock)
1da177e4
LT
4809 *
4810 * RETURNS:
0cba632b 4811 * Zero on success, negative on error.
1da177e4
LT
4812 */
4813
4814static int ata_sg_setup_one(struct ata_queued_cmd *qc)
4815{
4816 struct ata_port *ap = qc->ap;
4817 int dir = qc->dma_dir;
cedc9a47 4818 struct scatterlist *sg = qc->__sg;
1da177e4 4819 dma_addr_t dma_address;
2e242fa9 4820 int trim_sg = 0;
1da177e4 4821
cedc9a47
JG
4822 /* we must lengthen transfers to end on a 32-bit boundary */
4823 qc->pad_len = sg->length & 3;
4824 if (qc->pad_len) {
4825 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4826 struct scatterlist *psg = &qc->pad_sgent;
4827
a4631474 4828 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
4829
4830 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4831
4832 if (qc->tf.flags & ATA_TFLAG_WRITE)
4833 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
4834 qc->pad_len);
4835
4836 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4837 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4838 /* trim sg */
4839 sg->length -= qc->pad_len;
2e242fa9
TH
4840 if (sg->length == 0)
4841 trim_sg = 1;
cedc9a47
JG
4842
4843 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
4844 sg->length, qc->pad_len);
4845 }
4846
2e242fa9
TH
4847 if (trim_sg) {
4848 qc->n_elem--;
e1410f2d
JG
4849 goto skip_map;
4850 }
4851
2f1f610b 4852 dma_address = dma_map_single(ap->dev, qc->buf_virt,
32529e01 4853 sg->length, dir);
537a95d9
TH
4854 if (dma_mapping_error(dma_address)) {
4855 /* restore sg */
4856 sg->length += qc->pad_len;
1da177e4 4857 return -1;
537a95d9 4858 }
1da177e4
LT
4859
4860 sg_dma_address(sg) = dma_address;
32529e01 4861 sg_dma_len(sg) = sg->length;
1da177e4 4862
2e242fa9 4863skip_map:
1da177e4
LT
4864 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
4865 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4866
4867 return 0;
4868}
4869
4870/**
0cba632b
JG
4871 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4872 * @qc: Command with scatter-gather table to be mapped.
4873 *
4874 * DMA-map the scatter-gather table associated with queued_cmd @qc.
1da177e4
LT
4875 *
4876 * LOCKING:
cca3974e 4877 * spin_lock_irqsave(host lock)
1da177e4
LT
4878 *
4879 * RETURNS:
0cba632b 4880 * Zero on success, negative on error.
1da177e4
LT
4881 *
4882 */
4883
4884static int ata_sg_setup(struct ata_queued_cmd *qc)
4885{
4886 struct ata_port *ap = qc->ap;
cedc9a47 4887 struct scatterlist *sg = qc->__sg;
87260216 4888 struct scatterlist *lsg = sg_last(qc->__sg, qc->n_elem);
e1410f2d 4889 int n_elem, pre_n_elem, dir, trim_sg = 0;
1da177e4 4890
44877b4e 4891 VPRINTK("ENTER, ata%u\n", ap->print_id);
a4631474 4892 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
1da177e4 4893
cedc9a47
JG
4894 /* we must lengthen transfers to end on a 32-bit boundary */
4895 qc->pad_len = lsg->length & 3;
4896 if (qc->pad_len) {
4897 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4898 struct scatterlist *psg = &qc->pad_sgent;
4899 unsigned int offset;
4900
a4631474 4901 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
4902
4903 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4904
4905 /*
4906 * psg->page/offset are used to copy to-be-written
4907 * data in this function or read data in ata_sg_clean.
4908 */
4909 offset = lsg->offset + lsg->length - qc->pad_len;
acd054a5 4910 sg_init_table(psg, 1);
642f1490
JA
4911 sg_set_page(psg, nth_page(sg_page(lsg), offset >> PAGE_SHIFT),
4912 qc->pad_len, offset_in_page(offset));
cedc9a47
JG
4913
4914 if (qc->tf.flags & ATA_TFLAG_WRITE) {
45711f1a 4915 void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
cedc9a47 4916 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
dfa15988 4917 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
4918 }
4919
4920 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4921 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4922 /* trim last sg */
4923 lsg->length -= qc->pad_len;
e1410f2d
JG
4924 if (lsg->length == 0)
4925 trim_sg = 1;
cedc9a47
JG
4926
4927 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
4928 qc->n_elem - 1, lsg->length, qc->pad_len);
4929 }
4930
e1410f2d
JG
4931 pre_n_elem = qc->n_elem;
4932 if (trim_sg && pre_n_elem)
4933 pre_n_elem--;
4934
4935 if (!pre_n_elem) {
4936 n_elem = 0;
4937 goto skip_map;
4938 }
4939
1da177e4 4940 dir = qc->dma_dir;
2f1f610b 4941 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
537a95d9
TH
4942 if (n_elem < 1) {
4943 /* restore last sg */
4944 lsg->length += qc->pad_len;
1da177e4 4945 return -1;
537a95d9 4946 }
1da177e4
LT
4947
4948 DPRINTK("%d sg elements mapped\n", n_elem);
4949
e1410f2d 4950skip_map:
1da177e4
LT
4951 qc->n_elem = n_elem;
4952
4953 return 0;
4954}
4955
0baab86b 4956/**
c893a3ae 4957 * swap_buf_le16 - swap halves of 16-bit words in place
0baab86b
EF
4958 * @buf: Buffer to swap
4959 * @buf_words: Number of 16-bit words in buffer.
4960 *
4961 * Swap halves of 16-bit words if needed to convert from
4962 * little-endian byte order to native cpu byte order, or
4963 * vice-versa.
4964 *
4965 * LOCKING:
6f0ef4fa 4966 * Inherited from caller.
0baab86b 4967 */
1da177e4
LT
4968void swap_buf_le16(u16 *buf, unsigned int buf_words)
4969{
4970#ifdef __BIG_ENDIAN
4971 unsigned int i;
4972
4973 for (i = 0; i < buf_words; i++)
4974 buf[i] = le16_to_cpu(buf[i]);
4975#endif /* __BIG_ENDIAN */
4976}
4977
6ae4cfb5 4978/**
0d5ff566 4979 * ata_data_xfer - Transfer data by PIO
a6b2c5d4 4980 * @adev: device to target
6ae4cfb5
AL
4981 * @buf: data buffer
4982 * @buflen: buffer length
344babaa 4983 * @write_data: read/write
6ae4cfb5
AL
4984 *
4985 * Transfer data from/to the device data register by PIO.
4986 *
4987 * LOCKING:
4988 * Inherited from caller.
6ae4cfb5 4989 */
0d5ff566
TH
4990void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
4991 unsigned int buflen, int write_data)
1da177e4 4992{
9af5c9c9 4993 struct ata_port *ap = adev->link->ap;
6ae4cfb5 4994 unsigned int words = buflen >> 1;
1da177e4 4995
6ae4cfb5 4996 /* Transfer multiple of 2 bytes */
1da177e4 4997 if (write_data)
0d5ff566 4998 iowrite16_rep(ap->ioaddr.data_addr, buf, words);
1da177e4 4999 else
0d5ff566 5000 ioread16_rep(ap->ioaddr.data_addr, buf, words);
6ae4cfb5
AL
5001
5002 /* Transfer trailing 1 byte, if any. */
5003 if (unlikely(buflen & 0x01)) {
5004 u16 align_buf[1] = { 0 };
5005 unsigned char *trailing_buf = buf + buflen - 1;
5006
5007 if (write_data) {
5008 memcpy(align_buf, trailing_buf, 1);
0d5ff566 5009 iowrite16(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
6ae4cfb5 5010 } else {
0d5ff566 5011 align_buf[0] = cpu_to_le16(ioread16(ap->ioaddr.data_addr));
6ae4cfb5
AL
5012 memcpy(trailing_buf, align_buf, 1);
5013 }
5014 }
1da177e4
LT
5015}
5016
75e99585 5017/**
0d5ff566 5018 * ata_data_xfer_noirq - Transfer data by PIO
75e99585
AC
5019 * @adev: device to target
5020 * @buf: data buffer
5021 * @buflen: buffer length
5022 * @write_data: read/write
5023 *
88574551 5024 * Transfer data from/to the device data register by PIO. Do the
75e99585
AC
5025 * transfer with interrupts disabled.
5026 *
5027 * LOCKING:
5028 * Inherited from caller.
5029 */
0d5ff566
TH
5030void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
5031 unsigned int buflen, int write_data)
75e99585
AC
5032{
5033 unsigned long flags;
5034 local_irq_save(flags);
0d5ff566 5035 ata_data_xfer(adev, buf, buflen, write_data);
75e99585
AC
5036 local_irq_restore(flags);
5037}
5038
5039
6ae4cfb5 5040/**
5a5dbd18 5041 * ata_pio_sector - Transfer a sector of data.
6ae4cfb5
AL
5042 * @qc: Command on going
5043 *
5a5dbd18 5044 * Transfer qc->sect_size bytes of data from/to the ATA device.
6ae4cfb5
AL
5045 *
5046 * LOCKING:
5047 * Inherited from caller.
5048 */
5049
1da177e4
LT
5050static void ata_pio_sector(struct ata_queued_cmd *qc)
5051{
5052 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
1da177e4
LT
5053 struct ata_port *ap = qc->ap;
5054 struct page *page;
5055 unsigned int offset;
5056 unsigned char *buf;
5057
5a5dbd18 5058 if (qc->curbytes == qc->nbytes - qc->sect_size)
14be71f4 5059 ap->hsm_task_state = HSM_ST_LAST;
1da177e4 5060
45711f1a 5061 page = sg_page(qc->cursg);
87260216 5062 offset = qc->cursg->offset + qc->cursg_ofs;
1da177e4
LT
5063
5064 /* get the current page and offset */
5065 page = nth_page(page, (offset >> PAGE_SHIFT));
5066 offset %= PAGE_SIZE;
5067
1da177e4
LT
5068 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
5069
91b8b313
AL
5070 if (PageHighMem(page)) {
5071 unsigned long flags;
5072
a6b2c5d4 5073 /* FIXME: use a bounce buffer */
91b8b313
AL
5074 local_irq_save(flags);
5075 buf = kmap_atomic(page, KM_IRQ0);
083958d3 5076
91b8b313 5077 /* do the actual data transfer */
5a5dbd18 5078 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
1da177e4 5079
91b8b313
AL
5080 kunmap_atomic(buf, KM_IRQ0);
5081 local_irq_restore(flags);
5082 } else {
5083 buf = page_address(page);
5a5dbd18 5084 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
91b8b313 5085 }
1da177e4 5086
5a5dbd18
ML
5087 qc->curbytes += qc->sect_size;
5088 qc->cursg_ofs += qc->sect_size;
1da177e4 5089
87260216
JA
5090 if (qc->cursg_ofs == qc->cursg->length) {
5091 qc->cursg = sg_next(qc->cursg);
1da177e4
LT
5092 qc->cursg_ofs = 0;
5093 }
1da177e4 5094}
1da177e4 5095
07f6f7d0 5096/**
5a5dbd18 5097 * ata_pio_sectors - Transfer one or many sectors.
07f6f7d0
AL
5098 * @qc: Command on going
5099 *
5a5dbd18 5100 * Transfer one or many sectors of data from/to the
07f6f7d0
AL
5101 * ATA device for the DRQ request.
5102 *
5103 * LOCKING:
5104 * Inherited from caller.
5105 */
1da177e4 5106
07f6f7d0
AL
5107static void ata_pio_sectors(struct ata_queued_cmd *qc)
5108{
5109 if (is_multi_taskfile(&qc->tf)) {
5110 /* READ/WRITE MULTIPLE */
5111 unsigned int nsect;
5112
587005de 5113 WARN_ON(qc->dev->multi_count == 0);
1da177e4 5114
5a5dbd18 5115 nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
726f0785 5116 qc->dev->multi_count);
07f6f7d0
AL
5117 while (nsect--)
5118 ata_pio_sector(qc);
5119 } else
5120 ata_pio_sector(qc);
4cc980b3
AL
5121
5122 ata_altstatus(qc->ap); /* flush */
07f6f7d0
AL
5123}
5124
c71c1857
AL
5125/**
5126 * atapi_send_cdb - Write CDB bytes to hardware
5127 * @ap: Port to which ATAPI device is attached.
5128 * @qc: Taskfile currently active
5129 *
5130 * When device has indicated its readiness to accept
5131 * a CDB, this function is called. Send the CDB.
5132 *
5133 * LOCKING:
5134 * caller.
5135 */
5136
5137static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
5138{
5139 /* send SCSI cdb */
5140 DPRINTK("send cdb\n");
db024d53 5141 WARN_ON(qc->dev->cdb_len < 12);
c71c1857 5142
a6b2c5d4 5143 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
c71c1857
AL
5144 ata_altstatus(ap); /* flush */
5145
5146 switch (qc->tf.protocol) {
5147 case ATA_PROT_ATAPI:
5148 ap->hsm_task_state = HSM_ST;
5149 break;
5150 case ATA_PROT_ATAPI_NODATA:
5151 ap->hsm_task_state = HSM_ST_LAST;
5152 break;
5153 case ATA_PROT_ATAPI_DMA:
5154 ap->hsm_task_state = HSM_ST_LAST;
5155 /* initiate bmdma */
5156 ap->ops->bmdma_start(qc);
5157 break;
5158 }
1da177e4
LT
5159}
5160
6ae4cfb5
AL
5161/**
5162 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
5163 * @qc: Command on going
5164 * @bytes: number of bytes
5165 *
5166 * Transfer Transfer data from/to the ATAPI device.
5167 *
5168 * LOCKING:
5169 * Inherited from caller.
5170 *
5171 */
5172
1da177e4
LT
5173static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
5174{
5175 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 5176 struct scatterlist *sg = qc->__sg;
0874ee76 5177 struct scatterlist *lsg = sg_last(qc->__sg, qc->n_elem);
1da177e4
LT
5178 struct ata_port *ap = qc->ap;
5179 struct page *page;
5180 unsigned char *buf;
5181 unsigned int offset, count;
0874ee76 5182 int no_more_sg = 0;
1da177e4 5183
563a6e1f 5184 if (qc->curbytes + bytes >= qc->nbytes)
14be71f4 5185 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
5186
5187next_sg:
0874ee76 5188 if (unlikely(no_more_sg)) {
7fb6ec28 5189 /*
563a6e1f
AL
5190 * The end of qc->sg is reached and the device expects
5191 * more data to transfer. In order not to overrun qc->sg
5192 * and fulfill length specified in the byte count register,
5193 * - for read case, discard trailing data from the device
5194 * - for write case, padding zero data to the device
5195 */
5196 u16 pad_buf[1] = { 0 };
5197 unsigned int words = bytes >> 1;
5198 unsigned int i;
5199
5200 if (words) /* warning if bytes > 1 */
f15a1daf
TH
5201 ata_dev_printk(qc->dev, KERN_WARNING,
5202 "%u bytes trailing data\n", bytes);
563a6e1f
AL
5203
5204 for (i = 0; i < words; i++)
2dcb407e 5205 ap->ops->data_xfer(qc->dev, (unsigned char *)pad_buf, 2, do_write);
563a6e1f 5206
14be71f4 5207 ap->hsm_task_state = HSM_ST_LAST;
563a6e1f
AL
5208 return;
5209 }
5210
87260216 5211 sg = qc->cursg;
1da177e4 5212
45711f1a 5213 page = sg_page(sg);
1da177e4
LT
5214 offset = sg->offset + qc->cursg_ofs;
5215
5216 /* get the current page and offset */
5217 page = nth_page(page, (offset >> PAGE_SHIFT));
5218 offset %= PAGE_SIZE;
5219
6952df03 5220 /* don't overrun current sg */
32529e01 5221 count = min(sg->length - qc->cursg_ofs, bytes);
1da177e4
LT
5222
5223 /* don't cross page boundaries */
5224 count = min(count, (unsigned int)PAGE_SIZE - offset);
5225
7282aa4b
AL
5226 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
5227
91b8b313
AL
5228 if (PageHighMem(page)) {
5229 unsigned long flags;
5230
a6b2c5d4 5231 /* FIXME: use bounce buffer */
91b8b313
AL
5232 local_irq_save(flags);
5233 buf = kmap_atomic(page, KM_IRQ0);
083958d3 5234
91b8b313 5235 /* do the actual data transfer */
a6b2c5d4 5236 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
7282aa4b 5237
91b8b313
AL
5238 kunmap_atomic(buf, KM_IRQ0);
5239 local_irq_restore(flags);
5240 } else {
5241 buf = page_address(page);
a6b2c5d4 5242 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
91b8b313 5243 }
1da177e4
LT
5244
5245 bytes -= count;
5246 qc->curbytes += count;
5247 qc->cursg_ofs += count;
5248
32529e01 5249 if (qc->cursg_ofs == sg->length) {
0874ee76
FT
5250 if (qc->cursg == lsg)
5251 no_more_sg = 1;
5252
87260216 5253 qc->cursg = sg_next(qc->cursg);
1da177e4
LT
5254 qc->cursg_ofs = 0;
5255 }
5256
563a6e1f 5257 if (bytes)
1da177e4 5258 goto next_sg;
1da177e4
LT
5259}
5260
6ae4cfb5
AL
5261/**
5262 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
5263 * @qc: Command on going
5264 *
5265 * Transfer Transfer data from/to the ATAPI device.
5266 *
5267 * LOCKING:
5268 * Inherited from caller.
6ae4cfb5
AL
5269 */
5270
1da177e4
LT
5271static void atapi_pio_bytes(struct ata_queued_cmd *qc)
5272{
5273 struct ata_port *ap = qc->ap;
5274 struct ata_device *dev = qc->dev;
5275 unsigned int ireason, bc_lo, bc_hi, bytes;
5276 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
5277
eec4c3f3
AL
5278 /* Abuse qc->result_tf for temp storage of intermediate TF
5279 * here to save some kernel stack usage.
5280 * For normal completion, qc->result_tf is not relevant. For
5281 * error, qc->result_tf is later overwritten by ata_qc_complete().
5282 * So, the correctness of qc->result_tf is not affected.
5283 */
5284 ap->ops->tf_read(ap, &qc->result_tf);
5285 ireason = qc->result_tf.nsect;
5286 bc_lo = qc->result_tf.lbam;
5287 bc_hi = qc->result_tf.lbah;
1da177e4
LT
5288 bytes = (bc_hi << 8) | bc_lo;
5289
5290 /* shall be cleared to zero, indicating xfer of data */
5291 if (ireason & (1 << 0))
5292 goto err_out;
5293
5294 /* make sure transfer direction matches expected */
5295 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
5296 if (do_write != i_write)
5297 goto err_out;
5298
44877b4e 5299 VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
312f7da2 5300
1da177e4 5301 __atapi_pio_bytes(qc, bytes);
4cc980b3 5302 ata_altstatus(ap); /* flush */
1da177e4
LT
5303
5304 return;
5305
5306err_out:
f15a1daf 5307 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
11a56d24 5308 qc->err_mask |= AC_ERR_HSM;
14be71f4 5309 ap->hsm_task_state = HSM_ST_ERR;
1da177e4
LT
5310}
5311
5312/**
c234fb00
AL
5313 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
5314 * @ap: the target ata_port
5315 * @qc: qc on going
1da177e4 5316 *
c234fb00
AL
5317 * RETURNS:
5318 * 1 if ok in workqueue, 0 otherwise.
1da177e4 5319 */
c234fb00
AL
5320
5321static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
1da177e4 5322{
c234fb00
AL
5323 if (qc->tf.flags & ATA_TFLAG_POLLING)
5324 return 1;
1da177e4 5325
c234fb00
AL
5326 if (ap->hsm_task_state == HSM_ST_FIRST) {
5327 if (qc->tf.protocol == ATA_PROT_PIO &&
5328 (qc->tf.flags & ATA_TFLAG_WRITE))
5329 return 1;
1da177e4 5330
c234fb00
AL
5331 if (is_atapi_taskfile(&qc->tf) &&
5332 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
5333 return 1;
fe79e683
AL
5334 }
5335
c234fb00
AL
5336 return 0;
5337}
1da177e4 5338
c17ea20d
TH
5339/**
5340 * ata_hsm_qc_complete - finish a qc running on standard HSM
5341 * @qc: Command to complete
5342 * @in_wq: 1 if called from workqueue, 0 otherwise
5343 *
5344 * Finish @qc which is running on standard HSM.
5345 *
5346 * LOCKING:
cca3974e 5347 * If @in_wq is zero, spin_lock_irqsave(host lock).
c17ea20d
TH
5348 * Otherwise, none on entry and grabs host lock.
5349 */
5350static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
5351{
5352 struct ata_port *ap = qc->ap;
5353 unsigned long flags;
5354
5355 if (ap->ops->error_handler) {
5356 if (in_wq) {
ba6a1308 5357 spin_lock_irqsave(ap->lock, flags);
c17ea20d 5358
cca3974e
JG
5359 /* EH might have kicked in while host lock is
5360 * released.
c17ea20d
TH
5361 */
5362 qc = ata_qc_from_tag(ap, qc->tag);
5363 if (qc) {
5364 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
83625006 5365 ap->ops->irq_on(ap);
c17ea20d
TH
5366 ata_qc_complete(qc);
5367 } else
5368 ata_port_freeze(ap);
5369 }
5370
ba6a1308 5371 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
5372 } else {
5373 if (likely(!(qc->err_mask & AC_ERR_HSM)))
5374 ata_qc_complete(qc);
5375 else
5376 ata_port_freeze(ap);
5377 }
5378 } else {
5379 if (in_wq) {
ba6a1308 5380 spin_lock_irqsave(ap->lock, flags);
83625006 5381 ap->ops->irq_on(ap);
c17ea20d 5382 ata_qc_complete(qc);
ba6a1308 5383 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
5384 } else
5385 ata_qc_complete(qc);
5386 }
5387}
5388
bb5cb290
AL
5389/**
5390 * ata_hsm_move - move the HSM to the next state.
5391 * @ap: the target ata_port
5392 * @qc: qc on going
5393 * @status: current device status
5394 * @in_wq: 1 if called from workqueue, 0 otherwise
5395 *
5396 * RETURNS:
5397 * 1 when poll next status needed, 0 otherwise.
5398 */
9a1004d0
TH
5399int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
5400 u8 status, int in_wq)
e2cec771 5401{
bb5cb290
AL
5402 unsigned long flags = 0;
5403 int poll_next;
5404
6912ccd5
AL
5405 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
5406
bb5cb290
AL
5407 /* Make sure ata_qc_issue_prot() does not throw things
5408 * like DMA polling into the workqueue. Notice that
5409 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
5410 */
c234fb00 5411 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
bb5cb290 5412
e2cec771 5413fsm_start:
999bb6f4 5414 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
44877b4e 5415 ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
999bb6f4 5416
e2cec771
AL
5417 switch (ap->hsm_task_state) {
5418 case HSM_ST_FIRST:
bb5cb290
AL
5419 /* Send first data block or PACKET CDB */
5420
5421 /* If polling, we will stay in the work queue after
5422 * sending the data. Otherwise, interrupt handler
5423 * takes over after sending the data.
5424 */
5425 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
5426
e2cec771 5427 /* check device status */
3655d1d3
AL
5428 if (unlikely((status & ATA_DRQ) == 0)) {
5429 /* handle BSY=0, DRQ=0 as error */
5430 if (likely(status & (ATA_ERR | ATA_DF)))
5431 /* device stops HSM for abort/error */
5432 qc->err_mask |= AC_ERR_DEV;
5433 else
5434 /* HSM violation. Let EH handle this */
5435 qc->err_mask |= AC_ERR_HSM;
5436
14be71f4 5437 ap->hsm_task_state = HSM_ST_ERR;
e2cec771 5438 goto fsm_start;
1da177e4
LT
5439 }
5440
71601958
AL
5441 /* Device should not ask for data transfer (DRQ=1)
5442 * when it finds something wrong.
eee6c32f
AL
5443 * We ignore DRQ here and stop the HSM by
5444 * changing hsm_task_state to HSM_ST_ERR and
5445 * let the EH abort the command or reset the device.
71601958
AL
5446 */
5447 if (unlikely(status & (ATA_ERR | ATA_DF))) {
44877b4e
TH
5448 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with device "
5449 "error, dev_stat 0x%X\n", status);
3655d1d3 5450 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
5451 ap->hsm_task_state = HSM_ST_ERR;
5452 goto fsm_start;
71601958 5453 }
1da177e4 5454
bb5cb290
AL
5455 /* Send the CDB (atapi) or the first data block (ata pio out).
5456 * During the state transition, interrupt handler shouldn't
5457 * be invoked before the data transfer is complete and
5458 * hsm_task_state is changed. Hence, the following locking.
5459 */
5460 if (in_wq)
ba6a1308 5461 spin_lock_irqsave(ap->lock, flags);
1da177e4 5462
bb5cb290
AL
5463 if (qc->tf.protocol == ATA_PROT_PIO) {
5464 /* PIO data out protocol.
5465 * send first data block.
5466 */
0565c26d 5467
bb5cb290
AL
5468 /* ata_pio_sectors() might change the state
5469 * to HSM_ST_LAST. so, the state is changed here
5470 * before ata_pio_sectors().
5471 */
5472 ap->hsm_task_state = HSM_ST;
5473 ata_pio_sectors(qc);
bb5cb290
AL
5474 } else
5475 /* send CDB */
5476 atapi_send_cdb(ap, qc);
5477
5478 if (in_wq)
ba6a1308 5479 spin_unlock_irqrestore(ap->lock, flags);
bb5cb290
AL
5480
5481 /* if polling, ata_pio_task() handles the rest.
5482 * otherwise, interrupt handler takes over from here.
5483 */
e2cec771 5484 break;
1c848984 5485
e2cec771
AL
5486 case HSM_ST:
5487 /* complete command or read/write the data register */
5488 if (qc->tf.protocol == ATA_PROT_ATAPI) {
5489 /* ATAPI PIO protocol */
5490 if ((status & ATA_DRQ) == 0) {
3655d1d3
AL
5491 /* No more data to transfer or device error.
5492 * Device error will be tagged in HSM_ST_LAST.
5493 */
e2cec771
AL
5494 ap->hsm_task_state = HSM_ST_LAST;
5495 goto fsm_start;
5496 }
1da177e4 5497
71601958
AL
5498 /* Device should not ask for data transfer (DRQ=1)
5499 * when it finds something wrong.
eee6c32f
AL
5500 * We ignore DRQ here and stop the HSM by
5501 * changing hsm_task_state to HSM_ST_ERR and
5502 * let the EH abort the command or reset the device.
71601958
AL
5503 */
5504 if (unlikely(status & (ATA_ERR | ATA_DF))) {
44877b4e
TH
5505 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with "
5506 "device error, dev_stat 0x%X\n",
5507 status);
3655d1d3 5508 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
5509 ap->hsm_task_state = HSM_ST_ERR;
5510 goto fsm_start;
71601958 5511 }
1da177e4 5512
e2cec771 5513 atapi_pio_bytes(qc);
7fb6ec28 5514
e2cec771
AL
5515 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
5516 /* bad ireason reported by device */
5517 goto fsm_start;
1da177e4 5518
e2cec771
AL
5519 } else {
5520 /* ATA PIO protocol */
5521 if (unlikely((status & ATA_DRQ) == 0)) {
5522 /* handle BSY=0, DRQ=0 as error */
3655d1d3
AL
5523 if (likely(status & (ATA_ERR | ATA_DF)))
5524 /* device stops HSM for abort/error */
5525 qc->err_mask |= AC_ERR_DEV;
5526 else
55a8e2c8
TH
5527 /* HSM violation. Let EH handle this.
5528 * Phantom devices also trigger this
5529 * condition. Mark hint.
5530 */
5531 qc->err_mask |= AC_ERR_HSM |
5532 AC_ERR_NODEV_HINT;
3655d1d3 5533
e2cec771
AL
5534 ap->hsm_task_state = HSM_ST_ERR;
5535 goto fsm_start;
5536 }
1da177e4 5537
eee6c32f
AL
5538 /* For PIO reads, some devices may ask for
5539 * data transfer (DRQ=1) alone with ERR=1.
5540 * We respect DRQ here and transfer one
5541 * block of junk data before changing the
5542 * hsm_task_state to HSM_ST_ERR.
5543 *
5544 * For PIO writes, ERR=1 DRQ=1 doesn't make
5545 * sense since the data block has been
5546 * transferred to the device.
71601958
AL
5547 */
5548 if (unlikely(status & (ATA_ERR | ATA_DF))) {
71601958
AL
5549 /* data might be corrputed */
5550 qc->err_mask |= AC_ERR_DEV;
eee6c32f
AL
5551
5552 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
5553 ata_pio_sectors(qc);
eee6c32f
AL
5554 status = ata_wait_idle(ap);
5555 }
5556
3655d1d3
AL
5557 if (status & (ATA_BUSY | ATA_DRQ))
5558 qc->err_mask |= AC_ERR_HSM;
5559
eee6c32f
AL
5560 /* ata_pio_sectors() might change the
5561 * state to HSM_ST_LAST. so, the state
5562 * is changed after ata_pio_sectors().
5563 */
5564 ap->hsm_task_state = HSM_ST_ERR;
5565 goto fsm_start;
71601958
AL
5566 }
5567
e2cec771
AL
5568 ata_pio_sectors(qc);
5569
5570 if (ap->hsm_task_state == HSM_ST_LAST &&
5571 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
5572 /* all data read */
52a32205 5573 status = ata_wait_idle(ap);
e2cec771
AL
5574 goto fsm_start;
5575 }
5576 }
5577
bb5cb290 5578 poll_next = 1;
1da177e4
LT
5579 break;
5580
14be71f4 5581 case HSM_ST_LAST:
6912ccd5
AL
5582 if (unlikely(!ata_ok(status))) {
5583 qc->err_mask |= __ac_err_mask(status);
e2cec771
AL
5584 ap->hsm_task_state = HSM_ST_ERR;
5585 goto fsm_start;
5586 }
5587
5588 /* no more data to transfer */
4332a771 5589 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
44877b4e 5590 ap->print_id, qc->dev->devno, status);
e2cec771 5591
6912ccd5
AL
5592 WARN_ON(qc->err_mask);
5593
e2cec771 5594 ap->hsm_task_state = HSM_ST_IDLE;
1da177e4 5595
e2cec771 5596 /* complete taskfile transaction */
c17ea20d 5597 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
5598
5599 poll_next = 0;
1da177e4
LT
5600 break;
5601
14be71f4 5602 case HSM_ST_ERR:
e2cec771
AL
5603 /* make sure qc->err_mask is available to
5604 * know what's wrong and recover
5605 */
5606 WARN_ON(qc->err_mask == 0);
5607
5608 ap->hsm_task_state = HSM_ST_IDLE;
bb5cb290 5609
999bb6f4 5610 /* complete taskfile transaction */
c17ea20d 5611 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
5612
5613 poll_next = 0;
e2cec771
AL
5614 break;
5615 default:
bb5cb290 5616 poll_next = 0;
6912ccd5 5617 BUG();
1da177e4
LT
5618 }
5619
bb5cb290 5620 return poll_next;
1da177e4
LT
5621}
5622
65f27f38 5623static void ata_pio_task(struct work_struct *work)
8061f5f0 5624{
65f27f38
DH
5625 struct ata_port *ap =
5626 container_of(work, struct ata_port, port_task.work);
5627 struct ata_queued_cmd *qc = ap->port_task_data;
8061f5f0 5628 u8 status;
a1af3734 5629 int poll_next;
8061f5f0 5630
7fb6ec28 5631fsm_start:
a1af3734 5632 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
8061f5f0 5633
a1af3734
AL
5634 /*
5635 * This is purely heuristic. This is a fast path.
5636 * Sometimes when we enter, BSY will be cleared in
5637 * a chk-status or two. If not, the drive is probably seeking
5638 * or something. Snooze for a couple msecs, then
5639 * chk-status again. If still busy, queue delayed work.
5640 */
5641 status = ata_busy_wait(ap, ATA_BUSY, 5);
5642 if (status & ATA_BUSY) {
5643 msleep(2);
5644 status = ata_busy_wait(ap, ATA_BUSY, 10);
5645 if (status & ATA_BUSY) {
31ce6dae 5646 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
a1af3734
AL
5647 return;
5648 }
8061f5f0
TH
5649 }
5650
a1af3734
AL
5651 /* move the HSM */
5652 poll_next = ata_hsm_move(ap, qc, status, 1);
8061f5f0 5653
a1af3734
AL
5654 /* another command or interrupt handler
5655 * may be running at this point.
5656 */
5657 if (poll_next)
7fb6ec28 5658 goto fsm_start;
8061f5f0
TH
5659}
5660
1da177e4
LT
5661/**
5662 * ata_qc_new - Request an available ATA command, for queueing
5663 * @ap: Port associated with device @dev
5664 * @dev: Device from whom we request an available command structure
5665 *
5666 * LOCKING:
0cba632b 5667 * None.
1da177e4
LT
5668 */
5669
5670static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
5671{
5672 struct ata_queued_cmd *qc = NULL;
5673 unsigned int i;
5674
e3180499 5675 /* no command while frozen */
b51e9e5d 5676 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
e3180499
TH
5677 return NULL;
5678
2ab7db1f
TH
5679 /* the last tag is reserved for internal command. */
5680 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
6cec4a39 5681 if (!test_and_set_bit(i, &ap->qc_allocated)) {
f69499f4 5682 qc = __ata_qc_from_tag(ap, i);
1da177e4
LT
5683 break;
5684 }
5685
5686 if (qc)
5687 qc->tag = i;
5688
5689 return qc;
5690}
5691
5692/**
5693 * ata_qc_new_init - Request an available ATA command, and initialize it
1da177e4
LT
5694 * @dev: Device from whom we request an available command structure
5695 *
5696 * LOCKING:
0cba632b 5697 * None.
1da177e4
LT
5698 */
5699
3373efd8 5700struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
1da177e4 5701{
9af5c9c9 5702 struct ata_port *ap = dev->link->ap;
1da177e4
LT
5703 struct ata_queued_cmd *qc;
5704
5705 qc = ata_qc_new(ap);
5706 if (qc) {
1da177e4
LT
5707 qc->scsicmd = NULL;
5708 qc->ap = ap;
5709 qc->dev = dev;
1da177e4 5710
2c13b7ce 5711 ata_qc_reinit(qc);
1da177e4
LT
5712 }
5713
5714 return qc;
5715}
5716
1da177e4
LT
5717/**
5718 * ata_qc_free - free unused ata_queued_cmd
5719 * @qc: Command to complete
5720 *
5721 * Designed to free unused ata_queued_cmd object
5722 * in case something prevents using it.
5723 *
5724 * LOCKING:
cca3974e 5725 * spin_lock_irqsave(host lock)
1da177e4
LT
5726 */
5727void ata_qc_free(struct ata_queued_cmd *qc)
5728{
4ba946e9
TH
5729 struct ata_port *ap = qc->ap;
5730 unsigned int tag;
5731
a4631474 5732 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
1da177e4 5733
4ba946e9
TH
5734 qc->flags = 0;
5735 tag = qc->tag;
5736 if (likely(ata_tag_valid(tag))) {
4ba946e9 5737 qc->tag = ATA_TAG_POISON;
6cec4a39 5738 clear_bit(tag, &ap->qc_allocated);
4ba946e9 5739 }
1da177e4
LT
5740}
5741
76014427 5742void __ata_qc_complete(struct ata_queued_cmd *qc)
1da177e4 5743{
dedaf2b0 5744 struct ata_port *ap = qc->ap;
9af5c9c9 5745 struct ata_link *link = qc->dev->link;
dedaf2b0 5746
a4631474
TH
5747 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
5748 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
1da177e4
LT
5749
5750 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
5751 ata_sg_clean(qc);
5752
7401abf2 5753 /* command should be marked inactive atomically with qc completion */
da917d69 5754 if (qc->tf.protocol == ATA_PROT_NCQ) {
9af5c9c9 5755 link->sactive &= ~(1 << qc->tag);
da917d69
TH
5756 if (!link->sactive)
5757 ap->nr_active_links--;
5758 } else {
9af5c9c9 5759 link->active_tag = ATA_TAG_POISON;
da917d69
TH
5760 ap->nr_active_links--;
5761 }
5762
5763 /* clear exclusive status */
5764 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
5765 ap->excl_link == link))
5766 ap->excl_link = NULL;
7401abf2 5767
3f3791d3
AL
5768 /* atapi: mark qc as inactive to prevent the interrupt handler
5769 * from completing the command twice later, before the error handler
5770 * is called. (when rc != 0 and atapi request sense is needed)
5771 */
5772 qc->flags &= ~ATA_QCFLAG_ACTIVE;
dedaf2b0 5773 ap->qc_active &= ~(1 << qc->tag);
3f3791d3 5774
1da177e4 5775 /* call completion callback */
77853bf2 5776 qc->complete_fn(qc);
1da177e4
LT
5777}
5778
39599a53
TH
5779static void fill_result_tf(struct ata_queued_cmd *qc)
5780{
5781 struct ata_port *ap = qc->ap;
5782
39599a53 5783 qc->result_tf.flags = qc->tf.flags;
4742d54f 5784 ap->ops->tf_read(ap, &qc->result_tf);
39599a53
TH
5785}
5786
f686bcb8
TH
5787/**
5788 * ata_qc_complete - Complete an active ATA command
5789 * @qc: Command to complete
5790 * @err_mask: ATA Status register contents
5791 *
5792 * Indicate to the mid and upper layers that an ATA
5793 * command has completed, with either an ok or not-ok status.
5794 *
5795 * LOCKING:
cca3974e 5796 * spin_lock_irqsave(host lock)
f686bcb8
TH
5797 */
5798void ata_qc_complete(struct ata_queued_cmd *qc)
5799{
5800 struct ata_port *ap = qc->ap;
5801
5802 /* XXX: New EH and old EH use different mechanisms to
5803 * synchronize EH with regular execution path.
5804 *
5805 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
5806 * Normal execution path is responsible for not accessing a
5807 * failed qc. libata core enforces the rule by returning NULL
5808 * from ata_qc_from_tag() for failed qcs.
5809 *
5810 * Old EH depends on ata_qc_complete() nullifying completion
5811 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
5812 * not synchronize with interrupt handler. Only PIO task is
5813 * taken care of.
5814 */
5815 if (ap->ops->error_handler) {
4dbfa39b
TH
5816 struct ata_device *dev = qc->dev;
5817 struct ata_eh_info *ehi = &dev->link->eh_info;
5818
b51e9e5d 5819 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
f686bcb8
TH
5820
5821 if (unlikely(qc->err_mask))
5822 qc->flags |= ATA_QCFLAG_FAILED;
5823
5824 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
5825 if (!ata_tag_internal(qc->tag)) {
5826 /* always fill result TF for failed qc */
39599a53 5827 fill_result_tf(qc);
f686bcb8
TH
5828 ata_qc_schedule_eh(qc);
5829 return;
5830 }
5831 }
5832
5833 /* read result TF if requested */
5834 if (qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 5835 fill_result_tf(qc);
f686bcb8 5836
4dbfa39b
TH
5837 /* Some commands need post-processing after successful
5838 * completion.
5839 */
5840 switch (qc->tf.command) {
5841 case ATA_CMD_SET_FEATURES:
5842 if (qc->tf.feature != SETFEATURES_WC_ON &&
5843 qc->tf.feature != SETFEATURES_WC_OFF)
5844 break;
5845 /* fall through */
5846 case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
5847 case ATA_CMD_SET_MULTI: /* multi_count changed */
5848 /* revalidate device */
5849 ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
5850 ata_port_schedule_eh(ap);
5851 break;
054a5fba
TH
5852
5853 case ATA_CMD_SLEEP:
5854 dev->flags |= ATA_DFLAG_SLEEPING;
5855 break;
4dbfa39b
TH
5856 }
5857
f686bcb8
TH
5858 __ata_qc_complete(qc);
5859 } else {
5860 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5861 return;
5862
5863 /* read result TF if failed or requested */
5864 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 5865 fill_result_tf(qc);
f686bcb8
TH
5866
5867 __ata_qc_complete(qc);
5868 }
5869}
5870
dedaf2b0
TH
5871/**
5872 * ata_qc_complete_multiple - Complete multiple qcs successfully
5873 * @ap: port in question
5874 * @qc_active: new qc_active mask
5875 * @finish_qc: LLDD callback invoked before completing a qc
5876 *
5877 * Complete in-flight commands. This functions is meant to be
5878 * called from low-level driver's interrupt routine to complete
5879 * requests normally. ap->qc_active and @qc_active is compared
5880 * and commands are completed accordingly.
5881 *
5882 * LOCKING:
cca3974e 5883 * spin_lock_irqsave(host lock)
dedaf2b0
TH
5884 *
5885 * RETURNS:
5886 * Number of completed commands on success, -errno otherwise.
5887 */
5888int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
5889 void (*finish_qc)(struct ata_queued_cmd *))
5890{
5891 int nr_done = 0;
5892 u32 done_mask;
5893 int i;
5894
5895 done_mask = ap->qc_active ^ qc_active;
5896
5897 if (unlikely(done_mask & qc_active)) {
5898 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
5899 "(%08x->%08x)\n", ap->qc_active, qc_active);
5900 return -EINVAL;
5901 }
5902
5903 for (i = 0; i < ATA_MAX_QUEUE; i++) {
5904 struct ata_queued_cmd *qc;
5905
5906 if (!(done_mask & (1 << i)))
5907 continue;
5908
5909 if ((qc = ata_qc_from_tag(ap, i))) {
5910 if (finish_qc)
5911 finish_qc(qc);
5912 ata_qc_complete(qc);
5913 nr_done++;
5914 }
5915 }
5916
5917 return nr_done;
5918}
5919
1da177e4
LT
5920static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
5921{
5922 struct ata_port *ap = qc->ap;
5923
5924 switch (qc->tf.protocol) {
3dc1d881 5925 case ATA_PROT_NCQ:
1da177e4
LT
5926 case ATA_PROT_DMA:
5927 case ATA_PROT_ATAPI_DMA:
5928 return 1;
5929
5930 case ATA_PROT_ATAPI:
5931 case ATA_PROT_PIO:
1da177e4
LT
5932 if (ap->flags & ATA_FLAG_PIO_DMA)
5933 return 1;
5934
5935 /* fall through */
5936
5937 default:
5938 return 0;
5939 }
5940
5941 /* never reached */
5942}
5943
5944/**
5945 * ata_qc_issue - issue taskfile to device
5946 * @qc: command to issue to device
5947 *
5948 * Prepare an ATA command to submission to device.
5949 * This includes mapping the data into a DMA-able
5950 * area, filling in the S/G table, and finally
5951 * writing the taskfile to hardware, starting the command.
5952 *
5953 * LOCKING:
cca3974e 5954 * spin_lock_irqsave(host lock)
1da177e4 5955 */
8e0e694a 5956void ata_qc_issue(struct ata_queued_cmd *qc)
1da177e4
LT
5957{
5958 struct ata_port *ap = qc->ap;
9af5c9c9 5959 struct ata_link *link = qc->dev->link;
1da177e4 5960
dedaf2b0
TH
5961 /* Make sure only one non-NCQ command is outstanding. The
5962 * check is skipped for old EH because it reuses active qc to
5963 * request ATAPI sense.
5964 */
9af5c9c9 5965 WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag));
dedaf2b0
TH
5966
5967 if (qc->tf.protocol == ATA_PROT_NCQ) {
9af5c9c9 5968 WARN_ON(link->sactive & (1 << qc->tag));
da917d69
TH
5969
5970 if (!link->sactive)
5971 ap->nr_active_links++;
9af5c9c9 5972 link->sactive |= 1 << qc->tag;
dedaf2b0 5973 } else {
9af5c9c9 5974 WARN_ON(link->sactive);
da917d69
TH
5975
5976 ap->nr_active_links++;
9af5c9c9 5977 link->active_tag = qc->tag;
dedaf2b0
TH
5978 }
5979
e4a70e76 5980 qc->flags |= ATA_QCFLAG_ACTIVE;
dedaf2b0 5981 ap->qc_active |= 1 << qc->tag;
e4a70e76 5982
1da177e4
LT
5983 if (ata_should_dma_map(qc)) {
5984 if (qc->flags & ATA_QCFLAG_SG) {
5985 if (ata_sg_setup(qc))
8e436af9 5986 goto sg_err;
1da177e4
LT
5987 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
5988 if (ata_sg_setup_one(qc))
8e436af9 5989 goto sg_err;
1da177e4
LT
5990 }
5991 } else {
5992 qc->flags &= ~ATA_QCFLAG_DMAMAP;
5993 }
5994
054a5fba
TH
5995 /* if device is sleeping, schedule softreset and abort the link */
5996 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
5997 link->eh_info.action |= ATA_EH_SOFTRESET;
5998 ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
5999 ata_link_abort(link);
6000 return;
6001 }
6002
1da177e4
LT
6003 ap->ops->qc_prep(qc);
6004
8e0e694a
TH
6005 qc->err_mask |= ap->ops->qc_issue(qc);
6006 if (unlikely(qc->err_mask))
6007 goto err;
6008 return;
1da177e4 6009
8e436af9
TH
6010sg_err:
6011 qc->flags &= ~ATA_QCFLAG_DMAMAP;
8e0e694a
TH
6012 qc->err_mask |= AC_ERR_SYSTEM;
6013err:
6014 ata_qc_complete(qc);
1da177e4
LT
6015}
6016
6017/**
6018 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
6019 * @qc: command to issue to device
6020 *
6021 * Using various libata functions and hooks, this function
6022 * starts an ATA command. ATA commands are grouped into
6023 * classes called "protocols", and issuing each type of protocol
6024 * is slightly different.
6025 *
0baab86b
EF
6026 * May be used as the qc_issue() entry in ata_port_operations.
6027 *
1da177e4 6028 * LOCKING:
cca3974e 6029 * spin_lock_irqsave(host lock)
1da177e4
LT
6030 *
6031 * RETURNS:
9a3d9eb0 6032 * Zero on success, AC_ERR_* mask on failure
1da177e4
LT
6033 */
6034
9a3d9eb0 6035unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
1da177e4
LT
6036{
6037 struct ata_port *ap = qc->ap;
6038
e50362ec
AL
6039 /* Use polling pio if the LLD doesn't handle
6040 * interrupt driven pio and atapi CDB interrupt.
6041 */
6042 if (ap->flags & ATA_FLAG_PIO_POLLING) {
6043 switch (qc->tf.protocol) {
6044 case ATA_PROT_PIO:
e3472cbe 6045 case ATA_PROT_NODATA:
e50362ec
AL
6046 case ATA_PROT_ATAPI:
6047 case ATA_PROT_ATAPI_NODATA:
6048 qc->tf.flags |= ATA_TFLAG_POLLING;
6049 break;
6050 case ATA_PROT_ATAPI_DMA:
6051 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
3a778275 6052 /* see ata_dma_blacklisted() */
e50362ec
AL
6053 BUG();
6054 break;
6055 default:
6056 break;
6057 }
6058 }
6059
312f7da2 6060 /* select the device */
1da177e4
LT
6061 ata_dev_select(ap, qc->dev->devno, 1, 0);
6062
312f7da2 6063 /* start the command */
1da177e4
LT
6064 switch (qc->tf.protocol) {
6065 case ATA_PROT_NODATA:
312f7da2
AL
6066 if (qc->tf.flags & ATA_TFLAG_POLLING)
6067 ata_qc_set_polling(qc);
6068
e5338254 6069 ata_tf_to_host(ap, &qc->tf);
312f7da2
AL
6070 ap->hsm_task_state = HSM_ST_LAST;
6071
6072 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 6073 ata_port_queue_task(ap, ata_pio_task, qc, 0);
312f7da2 6074
1da177e4
LT
6075 break;
6076
6077 case ATA_PROT_DMA:
587005de 6078 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 6079
1da177e4
LT
6080 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
6081 ap->ops->bmdma_setup(qc); /* set up bmdma */
6082 ap->ops->bmdma_start(qc); /* initiate bmdma */
312f7da2 6083 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
6084 break;
6085
312f7da2
AL
6086 case ATA_PROT_PIO:
6087 if (qc->tf.flags & ATA_TFLAG_POLLING)
6088 ata_qc_set_polling(qc);
1da177e4 6089
e5338254 6090 ata_tf_to_host(ap, &qc->tf);
312f7da2 6091
54f00389
AL
6092 if (qc->tf.flags & ATA_TFLAG_WRITE) {
6093 /* PIO data out protocol */
6094 ap->hsm_task_state = HSM_ST_FIRST;
31ce6dae 6095 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
6096
6097 /* always send first data block using
e27486db 6098 * the ata_pio_task() codepath.
54f00389 6099 */
312f7da2 6100 } else {
54f00389
AL
6101 /* PIO data in protocol */
6102 ap->hsm_task_state = HSM_ST;
6103
6104 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 6105 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
6106
6107 /* if polling, ata_pio_task() handles the rest.
6108 * otherwise, interrupt handler takes over from here.
6109 */
312f7da2
AL
6110 }
6111
1da177e4
LT
6112 break;
6113
1da177e4 6114 case ATA_PROT_ATAPI:
1da177e4 6115 case ATA_PROT_ATAPI_NODATA:
312f7da2
AL
6116 if (qc->tf.flags & ATA_TFLAG_POLLING)
6117 ata_qc_set_polling(qc);
6118
e5338254 6119 ata_tf_to_host(ap, &qc->tf);
f6ef65e6 6120
312f7da2
AL
6121 ap->hsm_task_state = HSM_ST_FIRST;
6122
6123 /* send cdb by polling if no cdb interrupt */
6124 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
6125 (qc->tf.flags & ATA_TFLAG_POLLING))
31ce6dae 6126 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
6127 break;
6128
6129 case ATA_PROT_ATAPI_DMA:
587005de 6130 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 6131
1da177e4
LT
6132 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
6133 ap->ops->bmdma_setup(qc); /* set up bmdma */
312f7da2
AL
6134 ap->hsm_task_state = HSM_ST_FIRST;
6135
6136 /* send cdb by polling if no cdb interrupt */
6137 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
31ce6dae 6138 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
6139 break;
6140
6141 default:
6142 WARN_ON(1);
9a3d9eb0 6143 return AC_ERR_SYSTEM;
1da177e4
LT
6144 }
6145
6146 return 0;
6147}
6148
1da177e4
LT
6149/**
6150 * ata_host_intr - Handle host interrupt for given (port, task)
6151 * @ap: Port on which interrupt arrived (possibly...)
6152 * @qc: Taskfile currently active in engine
6153 *
6154 * Handle host interrupt for given queued command. Currently,
6155 * only DMA interrupts are handled. All other commands are
6156 * handled via polling with interrupts disabled (nIEN bit).
6157 *
6158 * LOCKING:
cca3974e 6159 * spin_lock_irqsave(host lock)
1da177e4
LT
6160 *
6161 * RETURNS:
6162 * One if interrupt was handled, zero if not (shared irq).
6163 */
6164
2dcb407e
JG
6165inline unsigned int ata_host_intr(struct ata_port *ap,
6166 struct ata_queued_cmd *qc)
1da177e4 6167{
9af5c9c9 6168 struct ata_eh_info *ehi = &ap->link.eh_info;
312f7da2 6169 u8 status, host_stat = 0;
1da177e4 6170
312f7da2 6171 VPRINTK("ata%u: protocol %d task_state %d\n",
44877b4e 6172 ap->print_id, qc->tf.protocol, ap->hsm_task_state);
1da177e4 6173
312f7da2
AL
6174 /* Check whether we are expecting interrupt in this state */
6175 switch (ap->hsm_task_state) {
6176 case HSM_ST_FIRST:
6912ccd5
AL
6177 /* Some pre-ATAPI-4 devices assert INTRQ
6178 * at this state when ready to receive CDB.
6179 */
1da177e4 6180
312f7da2
AL
6181 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
6182 * The flag was turned on only for atapi devices.
6183 * No need to check is_atapi_taskfile(&qc->tf) again.
6184 */
6185 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1da177e4 6186 goto idle_irq;
1da177e4 6187 break;
312f7da2
AL
6188 case HSM_ST_LAST:
6189 if (qc->tf.protocol == ATA_PROT_DMA ||
6190 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
6191 /* check status of DMA engine */
6192 host_stat = ap->ops->bmdma_status(ap);
44877b4e
TH
6193 VPRINTK("ata%u: host_stat 0x%X\n",
6194 ap->print_id, host_stat);
312f7da2
AL
6195
6196 /* if it's not our irq... */
6197 if (!(host_stat & ATA_DMA_INTR))
6198 goto idle_irq;
6199
6200 /* before we do anything else, clear DMA-Start bit */
6201 ap->ops->bmdma_stop(qc);
a4f16610
AL
6202
6203 if (unlikely(host_stat & ATA_DMA_ERR)) {
6204 /* error when transfering data to/from memory */
6205 qc->err_mask |= AC_ERR_HOST_BUS;
6206 ap->hsm_task_state = HSM_ST_ERR;
6207 }
312f7da2
AL
6208 }
6209 break;
6210 case HSM_ST:
6211 break;
1da177e4
LT
6212 default:
6213 goto idle_irq;
6214 }
6215
312f7da2
AL
6216 /* check altstatus */
6217 status = ata_altstatus(ap);
6218 if (status & ATA_BUSY)
6219 goto idle_irq;
1da177e4 6220
312f7da2
AL
6221 /* check main status, clearing INTRQ */
6222 status = ata_chk_status(ap);
6223 if (unlikely(status & ATA_BUSY))
6224 goto idle_irq;
1da177e4 6225
312f7da2
AL
6226 /* ack bmdma irq events */
6227 ap->ops->irq_clear(ap);
1da177e4 6228
bb5cb290 6229 ata_hsm_move(ap, qc, status, 0);
ea54763f
TH
6230
6231 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
6232 qc->tf.protocol == ATA_PROT_ATAPI_DMA))
6233 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
6234
1da177e4
LT
6235 return 1; /* irq handled */
6236
6237idle_irq:
6238 ap->stats.idle_irq++;
6239
6240#ifdef ATA_IRQ_TRAP
6241 if ((ap->stats.idle_irq % 1000) == 0) {
6d32d30f
JG
6242 ata_chk_status(ap);
6243 ap->ops->irq_clear(ap);
f15a1daf 6244 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
23cfce89 6245 return 1;
1da177e4
LT
6246 }
6247#endif
6248 return 0; /* irq not handled */
6249}
6250
6251/**
6252 * ata_interrupt - Default ATA host interrupt handler
0cba632b 6253 * @irq: irq line (unused)
cca3974e 6254 * @dev_instance: pointer to our ata_host information structure
1da177e4 6255 *
0cba632b
JG
6256 * Default interrupt handler for PCI IDE devices. Calls
6257 * ata_host_intr() for each port that is not disabled.
6258 *
1da177e4 6259 * LOCKING:
cca3974e 6260 * Obtains host lock during operation.
1da177e4
LT
6261 *
6262 * RETURNS:
0cba632b 6263 * IRQ_NONE or IRQ_HANDLED.
1da177e4
LT
6264 */
6265
2dcb407e 6266irqreturn_t ata_interrupt(int irq, void *dev_instance)
1da177e4 6267{
cca3974e 6268 struct ata_host *host = dev_instance;
1da177e4
LT
6269 unsigned int i;
6270 unsigned int handled = 0;
6271 unsigned long flags;
6272
6273 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
cca3974e 6274 spin_lock_irqsave(&host->lock, flags);
1da177e4 6275
cca3974e 6276 for (i = 0; i < host->n_ports; i++) {
1da177e4
LT
6277 struct ata_port *ap;
6278
cca3974e 6279 ap = host->ports[i];
c1389503 6280 if (ap &&
029f5468 6281 !(ap->flags & ATA_FLAG_DISABLED)) {
1da177e4
LT
6282 struct ata_queued_cmd *qc;
6283
9af5c9c9 6284 qc = ata_qc_from_tag(ap, ap->link.active_tag);
312f7da2 6285 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
21b1ed74 6286 (qc->flags & ATA_QCFLAG_ACTIVE))
1da177e4
LT
6287 handled |= ata_host_intr(ap, qc);
6288 }
6289 }
6290
cca3974e 6291 spin_unlock_irqrestore(&host->lock, flags);
1da177e4
LT
6292
6293 return IRQ_RETVAL(handled);
6294}
6295
34bf2170
TH
6296/**
6297 * sata_scr_valid - test whether SCRs are accessible
936fd732 6298 * @link: ATA link to test SCR accessibility for
34bf2170 6299 *
936fd732 6300 * Test whether SCRs are accessible for @link.
34bf2170
TH
6301 *
6302 * LOCKING:
6303 * None.
6304 *
6305 * RETURNS:
6306 * 1 if SCRs are accessible, 0 otherwise.
6307 */
936fd732 6308int sata_scr_valid(struct ata_link *link)
34bf2170 6309{
936fd732
TH
6310 struct ata_port *ap = link->ap;
6311
a16abc0b 6312 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
34bf2170
TH
6313}
6314
6315/**
6316 * sata_scr_read - read SCR register of the specified port
936fd732 6317 * @link: ATA link to read SCR for
34bf2170
TH
6318 * @reg: SCR to read
6319 * @val: Place to store read value
6320 *
936fd732 6321 * Read SCR register @reg of @link into *@val. This function is
633273a3
TH
6322 * guaranteed to succeed if @link is ap->link, the cable type of
6323 * the port is SATA and the port implements ->scr_read.
34bf2170
TH
6324 *
6325 * LOCKING:
633273a3 6326 * None if @link is ap->link. Kernel thread context otherwise.
34bf2170
TH
6327 *
6328 * RETURNS:
6329 * 0 on success, negative errno on failure.
6330 */
936fd732 6331int sata_scr_read(struct ata_link *link, int reg, u32 *val)
34bf2170 6332{
633273a3
TH
6333 if (ata_is_host_link(link)) {
6334 struct ata_port *ap = link->ap;
936fd732 6335
633273a3
TH
6336 if (sata_scr_valid(link))
6337 return ap->ops->scr_read(ap, reg, val);
6338 return -EOPNOTSUPP;
6339 }
6340
6341 return sata_pmp_scr_read(link, reg, val);
34bf2170
TH
6342}
6343
6344/**
6345 * sata_scr_write - write SCR register of the specified port
936fd732 6346 * @link: ATA link to write SCR for
34bf2170
TH
6347 * @reg: SCR to write
6348 * @val: value to write
6349 *
936fd732 6350 * Write @val to SCR register @reg of @link. This function is
633273a3
TH
6351 * guaranteed to succeed if @link is ap->link, the cable type of
6352 * the port is SATA and the port implements ->scr_read.
34bf2170
TH
6353 *
6354 * LOCKING:
633273a3 6355 * None if @link is ap->link. Kernel thread context otherwise.
34bf2170
TH
6356 *
6357 * RETURNS:
6358 * 0 on success, negative errno on failure.
6359 */
936fd732 6360int sata_scr_write(struct ata_link *link, int reg, u32 val)
34bf2170 6361{
633273a3
TH
6362 if (ata_is_host_link(link)) {
6363 struct ata_port *ap = link->ap;
6364
6365 if (sata_scr_valid(link))
6366 return ap->ops->scr_write(ap, reg, val);
6367 return -EOPNOTSUPP;
6368 }
936fd732 6369
633273a3 6370 return sata_pmp_scr_write(link, reg, val);
34bf2170
TH
6371}
6372
6373/**
6374 * sata_scr_write_flush - write SCR register of the specified port and flush
936fd732 6375 * @link: ATA link to write SCR for
34bf2170
TH
6376 * @reg: SCR to write
6377 * @val: value to write
6378 *
6379 * This function is identical to sata_scr_write() except that this
6380 * function performs flush after writing to the register.
6381 *
6382 * LOCKING:
633273a3 6383 * None if @link is ap->link. Kernel thread context otherwise.
34bf2170
TH
6384 *
6385 * RETURNS:
6386 * 0 on success, negative errno on failure.
6387 */
936fd732 6388int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
34bf2170 6389{
633273a3
TH
6390 if (ata_is_host_link(link)) {
6391 struct ata_port *ap = link->ap;
6392 int rc;
da3dbb17 6393
633273a3
TH
6394 if (sata_scr_valid(link)) {
6395 rc = ap->ops->scr_write(ap, reg, val);
6396 if (rc == 0)
6397 rc = ap->ops->scr_read(ap, reg, &val);
6398 return rc;
6399 }
6400 return -EOPNOTSUPP;
34bf2170 6401 }
633273a3
TH
6402
6403 return sata_pmp_scr_write(link, reg, val);
34bf2170
TH
6404}
6405
6406/**
936fd732
TH
6407 * ata_link_online - test whether the given link is online
6408 * @link: ATA link to test
34bf2170 6409 *
936fd732
TH
6410 * Test whether @link is online. Note that this function returns
6411 * 0 if online status of @link cannot be obtained, so
6412 * ata_link_online(link) != !ata_link_offline(link).
34bf2170
TH
6413 *
6414 * LOCKING:
6415 * None.
6416 *
6417 * RETURNS:
6418 * 1 if the port online status is available and online.
6419 */
936fd732 6420int ata_link_online(struct ata_link *link)
34bf2170
TH
6421{
6422 u32 sstatus;
6423
936fd732
TH
6424 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
6425 (sstatus & 0xf) == 0x3)
34bf2170
TH
6426 return 1;
6427 return 0;
6428}
6429
6430/**
936fd732
TH
6431 * ata_link_offline - test whether the given link is offline
6432 * @link: ATA link to test
34bf2170 6433 *
936fd732
TH
6434 * Test whether @link is offline. Note that this function
6435 * returns 0 if offline status of @link cannot be obtained, so
6436 * ata_link_online(link) != !ata_link_offline(link).
34bf2170
TH
6437 *
6438 * LOCKING:
6439 * None.
6440 *
6441 * RETURNS:
6442 * 1 if the port offline status is available and offline.
6443 */
936fd732 6444int ata_link_offline(struct ata_link *link)
34bf2170
TH
6445{
6446 u32 sstatus;
6447
936fd732
TH
6448 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
6449 (sstatus & 0xf) != 0x3)
34bf2170
TH
6450 return 1;
6451 return 0;
6452}
0baab86b 6453
77b08fb5 6454int ata_flush_cache(struct ata_device *dev)
9b847548 6455{
977e6b9f 6456 unsigned int err_mask;
9b847548
JA
6457 u8 cmd;
6458
6459 if (!ata_try_flush_cache(dev))
6460 return 0;
6461
6fc49adb 6462 if (dev->flags & ATA_DFLAG_FLUSH_EXT)
9b847548
JA
6463 cmd = ATA_CMD_FLUSH_EXT;
6464 else
6465 cmd = ATA_CMD_FLUSH;
6466
4f34337b
AC
6467 /* This is wrong. On a failed flush we get back the LBA of the lost
6468 sector and we should (assuming it wasn't aborted as unknown) issue
2dcb407e 6469 a further flush command to continue the writeback until it
4f34337b 6470 does not error */
977e6b9f
TH
6471 err_mask = ata_do_simple_cmd(dev, cmd);
6472 if (err_mask) {
6473 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
6474 return -EIO;
6475 }
6476
6477 return 0;
9b847548
JA
6478}
6479
6ffa01d8 6480#ifdef CONFIG_PM
cca3974e
JG
6481static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
6482 unsigned int action, unsigned int ehi_flags,
6483 int wait)
500530f6
TH
6484{
6485 unsigned long flags;
6486 int i, rc;
6487
cca3974e
JG
6488 for (i = 0; i < host->n_ports; i++) {
6489 struct ata_port *ap = host->ports[i];
e3667ebf 6490 struct ata_link *link;
500530f6
TH
6491
6492 /* Previous resume operation might still be in
6493 * progress. Wait for PM_PENDING to clear.
6494 */
6495 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
6496 ata_port_wait_eh(ap);
6497 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6498 }
6499
6500 /* request PM ops to EH */
6501 spin_lock_irqsave(ap->lock, flags);
6502
6503 ap->pm_mesg = mesg;
6504 if (wait) {
6505 rc = 0;
6506 ap->pm_result = &rc;
6507 }
6508
6509 ap->pflags |= ATA_PFLAG_PM_PENDING;
e3667ebf
TH
6510 __ata_port_for_each_link(link, ap) {
6511 link->eh_info.action |= action;
6512 link->eh_info.flags |= ehi_flags;
6513 }
500530f6
TH
6514
6515 ata_port_schedule_eh(ap);
6516
6517 spin_unlock_irqrestore(ap->lock, flags);
6518
6519 /* wait and check result */
6520 if (wait) {
6521 ata_port_wait_eh(ap);
6522 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6523 if (rc)
6524 return rc;
6525 }
6526 }
6527
6528 return 0;
6529}
6530
6531/**
cca3974e
JG
6532 * ata_host_suspend - suspend host
6533 * @host: host to suspend
500530f6
TH
6534 * @mesg: PM message
6535 *
cca3974e 6536 * Suspend @host. Actual operation is performed by EH. This
500530f6
TH
6537 * function requests EH to perform PM operations and waits for EH
6538 * to finish.
6539 *
6540 * LOCKING:
6541 * Kernel thread context (may sleep).
6542 *
6543 * RETURNS:
6544 * 0 on success, -errno on failure.
6545 */
cca3974e 6546int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
500530f6 6547{
9666f400 6548 int rc;
500530f6 6549
ca77329f
KCA
6550 /*
6551 * disable link pm on all ports before requesting
6552 * any pm activity
6553 */
6554 ata_lpm_enable(host);
6555
cca3974e 6556 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
9666f400
TH
6557 if (rc == 0)
6558 host->dev->power.power_state = mesg;
500530f6
TH
6559 return rc;
6560}
6561
6562/**
cca3974e
JG
6563 * ata_host_resume - resume host
6564 * @host: host to resume
500530f6 6565 *
cca3974e 6566 * Resume @host. Actual operation is performed by EH. This
500530f6
TH
6567 * function requests EH to perform PM operations and returns.
6568 * Note that all resume operations are performed parallely.
6569 *
6570 * LOCKING:
6571 * Kernel thread context (may sleep).
6572 */
cca3974e 6573void ata_host_resume(struct ata_host *host)
500530f6 6574{
cca3974e
JG
6575 ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
6576 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
6577 host->dev->power.power_state = PMSG_ON;
ca77329f
KCA
6578
6579 /* reenable link pm */
6580 ata_lpm_disable(host);
500530f6 6581}
6ffa01d8 6582#endif
500530f6 6583
c893a3ae
RD
6584/**
6585 * ata_port_start - Set port up for dma.
6586 * @ap: Port to initialize
6587 *
6588 * Called just after data structures for each port are
6589 * initialized. Allocates space for PRD table.
6590 *
6591 * May be used as the port_start() entry in ata_port_operations.
6592 *
6593 * LOCKING:
6594 * Inherited from caller.
6595 */
f0d36efd 6596int ata_port_start(struct ata_port *ap)
1da177e4 6597{
2f1f610b 6598 struct device *dev = ap->dev;
6037d6bb 6599 int rc;
1da177e4 6600
f0d36efd
TH
6601 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
6602 GFP_KERNEL);
1da177e4
LT
6603 if (!ap->prd)
6604 return -ENOMEM;
6605
6037d6bb 6606 rc = ata_pad_alloc(ap, dev);
f0d36efd 6607 if (rc)
6037d6bb 6608 return rc;
1da177e4 6609
f0d36efd
TH
6610 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd,
6611 (unsigned long long)ap->prd_dma);
1da177e4
LT
6612 return 0;
6613}
6614
3ef3b43d
TH
6615/**
6616 * ata_dev_init - Initialize an ata_device structure
6617 * @dev: Device structure to initialize
6618 *
6619 * Initialize @dev in preparation for probing.
6620 *
6621 * LOCKING:
6622 * Inherited from caller.
6623 */
6624void ata_dev_init(struct ata_device *dev)
6625{
9af5c9c9
TH
6626 struct ata_link *link = dev->link;
6627 struct ata_port *ap = link->ap;
72fa4b74
TH
6628 unsigned long flags;
6629
5a04bf4b 6630 /* SATA spd limit is bound to the first device */
9af5c9c9
TH
6631 link->sata_spd_limit = link->hw_sata_spd_limit;
6632 link->sata_spd = 0;
5a04bf4b 6633
72fa4b74
TH
6634 /* High bits of dev->flags are used to record warm plug
6635 * requests which occur asynchronously. Synchronize using
cca3974e 6636 * host lock.
72fa4b74 6637 */
ba6a1308 6638 spin_lock_irqsave(ap->lock, flags);
72fa4b74 6639 dev->flags &= ~ATA_DFLAG_INIT_MASK;
3dcc323f 6640 dev->horkage = 0;
ba6a1308 6641 spin_unlock_irqrestore(ap->lock, flags);
3ef3b43d 6642
72fa4b74
TH
6643 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
6644 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
3ef3b43d
TH
6645 dev->pio_mask = UINT_MAX;
6646 dev->mwdma_mask = UINT_MAX;
6647 dev->udma_mask = UINT_MAX;
6648}
6649
4fb37a25
TH
6650/**
6651 * ata_link_init - Initialize an ata_link structure
6652 * @ap: ATA port link is attached to
6653 * @link: Link structure to initialize
8989805d 6654 * @pmp: Port multiplier port number
4fb37a25
TH
6655 *
6656 * Initialize @link.
6657 *
6658 * LOCKING:
6659 * Kernel thread context (may sleep)
6660 */
fb7fd614 6661void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
4fb37a25
TH
6662{
6663 int i;
6664
6665 /* clear everything except for devices */
6666 memset(link, 0, offsetof(struct ata_link, device[0]));
6667
6668 link->ap = ap;
8989805d 6669 link->pmp = pmp;
4fb37a25
TH
6670 link->active_tag = ATA_TAG_POISON;
6671 link->hw_sata_spd_limit = UINT_MAX;
6672
6673 /* can't use iterator, ap isn't initialized yet */
6674 for (i = 0; i < ATA_MAX_DEVICES; i++) {
6675 struct ata_device *dev = &link->device[i];
6676
6677 dev->link = link;
6678 dev->devno = dev - link->device;
6679 ata_dev_init(dev);
6680 }
6681}
6682
6683/**
6684 * sata_link_init_spd - Initialize link->sata_spd_limit
6685 * @link: Link to configure sata_spd_limit for
6686 *
6687 * Initialize @link->[hw_]sata_spd_limit to the currently
6688 * configured value.
6689 *
6690 * LOCKING:
6691 * Kernel thread context (may sleep).
6692 *
6693 * RETURNS:
6694 * 0 on success, -errno on failure.
6695 */
fb7fd614 6696int sata_link_init_spd(struct ata_link *link)
4fb37a25
TH
6697{
6698 u32 scontrol, spd;
6699 int rc;
6700
6701 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
6702 if (rc)
6703 return rc;
6704
6705 spd = (scontrol >> 4) & 0xf;
6706 if (spd)
6707 link->hw_sata_spd_limit &= (1 << spd) - 1;
6708
6709 link->sata_spd_limit = link->hw_sata_spd_limit;
6710
6711 return 0;
6712}
6713
1da177e4 6714/**
f3187195
TH
6715 * ata_port_alloc - allocate and initialize basic ATA port resources
6716 * @host: ATA host this allocated port belongs to
1da177e4 6717 *
f3187195
TH
6718 * Allocate and initialize basic ATA port resources.
6719 *
6720 * RETURNS:
6721 * Allocate ATA port on success, NULL on failure.
0cba632b 6722 *
1da177e4 6723 * LOCKING:
f3187195 6724 * Inherited from calling layer (may sleep).
1da177e4 6725 */
f3187195 6726struct ata_port *ata_port_alloc(struct ata_host *host)
1da177e4 6727{
f3187195 6728 struct ata_port *ap;
1da177e4 6729
f3187195
TH
6730 DPRINTK("ENTER\n");
6731
6732 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
6733 if (!ap)
6734 return NULL;
6735
f4d6d004 6736 ap->pflags |= ATA_PFLAG_INITIALIZING;
cca3974e 6737 ap->lock = &host->lock;
198e0fed 6738 ap->flags = ATA_FLAG_DISABLED;
f3187195 6739 ap->print_id = -1;
1da177e4 6740 ap->ctl = ATA_DEVCTL_OBS;
cca3974e 6741 ap->host = host;
f3187195 6742 ap->dev = host->dev;
1da177e4 6743 ap->last_ctl = 0xFF;
bd5d825c
BP
6744
6745#if defined(ATA_VERBOSE_DEBUG)
6746 /* turn on all debugging levels */
6747 ap->msg_enable = 0x00FF;
6748#elif defined(ATA_DEBUG)
6749 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
88574551 6750#else
0dd4b21f 6751 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
bd5d825c 6752#endif
1da177e4 6753
65f27f38
DH
6754 INIT_DELAYED_WORK(&ap->port_task, NULL);
6755 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
6756 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
a72ec4ce 6757 INIT_LIST_HEAD(&ap->eh_done_q);
c6cf9e99 6758 init_waitqueue_head(&ap->eh_wait_q);
5ddf24c5
TH
6759 init_timer_deferrable(&ap->fastdrain_timer);
6760 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
6761 ap->fastdrain_timer.data = (unsigned long)ap;
1da177e4 6762
838df628 6763 ap->cbl = ATA_CBL_NONE;
838df628 6764
8989805d 6765 ata_link_init(ap, &ap->link, 0);
1da177e4
LT
6766
6767#ifdef ATA_IRQ_TRAP
6768 ap->stats.unhandled_irq = 1;
6769 ap->stats.idle_irq = 1;
6770#endif
1da177e4 6771 return ap;
1da177e4
LT
6772}
6773
f0d36efd
TH
6774static void ata_host_release(struct device *gendev, void *res)
6775{
6776 struct ata_host *host = dev_get_drvdata(gendev);
6777 int i;
6778
6779 for (i = 0; i < host->n_ports; i++) {
6780 struct ata_port *ap = host->ports[i];
6781
ecef7253
TH
6782 if (!ap)
6783 continue;
6784
6785 if ((host->flags & ATA_HOST_STARTED) && ap->ops->port_stop)
f0d36efd 6786 ap->ops->port_stop(ap);
f0d36efd
TH
6787 }
6788
ecef7253 6789 if ((host->flags & ATA_HOST_STARTED) && host->ops->host_stop)
f0d36efd 6790 host->ops->host_stop(host);
1aa56cca 6791
1aa506e4
TH
6792 for (i = 0; i < host->n_ports; i++) {
6793 struct ata_port *ap = host->ports[i];
6794
4911487a
TH
6795 if (!ap)
6796 continue;
6797
6798 if (ap->scsi_host)
1aa506e4
TH
6799 scsi_host_put(ap->scsi_host);
6800
633273a3 6801 kfree(ap->pmp_link);
4911487a 6802 kfree(ap);
1aa506e4
TH
6803 host->ports[i] = NULL;
6804 }
6805
1aa56cca 6806 dev_set_drvdata(gendev, NULL);
f0d36efd
TH
6807}
6808
f3187195
TH
6809/**
6810 * ata_host_alloc - allocate and init basic ATA host resources
6811 * @dev: generic device this host is associated with
6812 * @max_ports: maximum number of ATA ports associated with this host
6813 *
6814 * Allocate and initialize basic ATA host resources. LLD calls
6815 * this function to allocate a host, initializes it fully and
6816 * attaches it using ata_host_register().
6817 *
6818 * @max_ports ports are allocated and host->n_ports is
6819 * initialized to @max_ports. The caller is allowed to decrease
6820 * host->n_ports before calling ata_host_register(). The unused
6821 * ports will be automatically freed on registration.
6822 *
6823 * RETURNS:
6824 * Allocate ATA host on success, NULL on failure.
6825 *
6826 * LOCKING:
6827 * Inherited from calling layer (may sleep).
6828 */
6829struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
6830{
6831 struct ata_host *host;
6832 size_t sz;
6833 int i;
6834
6835 DPRINTK("ENTER\n");
6836
6837 if (!devres_open_group(dev, NULL, GFP_KERNEL))
6838 return NULL;
6839
6840 /* alloc a container for our list of ATA ports (buses) */
6841 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
6842 /* alloc a container for our list of ATA ports (buses) */
6843 host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
6844 if (!host)
6845 goto err_out;
6846
6847 devres_add(dev, host);
6848 dev_set_drvdata(dev, host);
6849
6850 spin_lock_init(&host->lock);
6851 host->dev = dev;
6852 host->n_ports = max_ports;
6853
6854 /* allocate ports bound to this host */
6855 for (i = 0; i < max_ports; i++) {
6856 struct ata_port *ap;
6857
6858 ap = ata_port_alloc(host);
6859 if (!ap)
6860 goto err_out;
6861
6862 ap->port_no = i;
6863 host->ports[i] = ap;
6864 }
6865
6866 devres_remove_group(dev, NULL);
6867 return host;
6868
6869 err_out:
6870 devres_release_group(dev, NULL);
6871 return NULL;
6872}
6873
f5cda257
TH
6874/**
6875 * ata_host_alloc_pinfo - alloc host and init with port_info array
6876 * @dev: generic device this host is associated with
6877 * @ppi: array of ATA port_info to initialize host with
6878 * @n_ports: number of ATA ports attached to this host
6879 *
6880 * Allocate ATA host and initialize with info from @ppi. If NULL
6881 * terminated, @ppi may contain fewer entries than @n_ports. The
6882 * last entry will be used for the remaining ports.
6883 *
6884 * RETURNS:
6885 * Allocate ATA host on success, NULL on failure.
6886 *
6887 * LOCKING:
6888 * Inherited from calling layer (may sleep).
6889 */
6890struct ata_host *ata_host_alloc_pinfo(struct device *dev,
6891 const struct ata_port_info * const * ppi,
6892 int n_ports)
6893{
6894 const struct ata_port_info *pi;
6895 struct ata_host *host;
6896 int i, j;
6897
6898 host = ata_host_alloc(dev, n_ports);
6899 if (!host)
6900 return NULL;
6901
6902 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
6903 struct ata_port *ap = host->ports[i];
6904
6905 if (ppi[j])
6906 pi = ppi[j++];
6907
6908 ap->pio_mask = pi->pio_mask;
6909 ap->mwdma_mask = pi->mwdma_mask;
6910 ap->udma_mask = pi->udma_mask;
6911 ap->flags |= pi->flags;
0c88758b 6912 ap->link.flags |= pi->link_flags;
f5cda257
TH
6913 ap->ops = pi->port_ops;
6914
6915 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
6916 host->ops = pi->port_ops;
6917 if (!host->private_data && pi->private_data)
6918 host->private_data = pi->private_data;
6919 }
6920
6921 return host;
6922}
6923
ecef7253
TH
6924/**
6925 * ata_host_start - start and freeze ports of an ATA host
6926 * @host: ATA host to start ports for
6927 *
6928 * Start and then freeze ports of @host. Started status is
6929 * recorded in host->flags, so this function can be called
6930 * multiple times. Ports are guaranteed to get started only
f3187195
TH
6931 * once. If host->ops isn't initialized yet, its set to the
6932 * first non-dummy port ops.
ecef7253
TH
6933 *
6934 * LOCKING:
6935 * Inherited from calling layer (may sleep).
6936 *
6937 * RETURNS:
6938 * 0 if all ports are started successfully, -errno otherwise.
6939 */
6940int ata_host_start(struct ata_host *host)
6941{
6942 int i, rc;
6943
6944 if (host->flags & ATA_HOST_STARTED)
6945 return 0;
6946
6947 for (i = 0; i < host->n_ports; i++) {
6948 struct ata_port *ap = host->ports[i];
6949
f3187195
TH
6950 if (!host->ops && !ata_port_is_dummy(ap))
6951 host->ops = ap->ops;
6952
ecef7253
TH
6953 if (ap->ops->port_start) {
6954 rc = ap->ops->port_start(ap);
6955 if (rc) {
6956 ata_port_printk(ap, KERN_ERR, "failed to "
6957 "start port (errno=%d)\n", rc);
6958 goto err_out;
6959 }
6960 }
6961
6962 ata_eh_freeze_port(ap);
6963 }
6964
6965 host->flags |= ATA_HOST_STARTED;
6966 return 0;
6967
6968 err_out:
6969 while (--i >= 0) {
6970 struct ata_port *ap = host->ports[i];
6971
6972 if (ap->ops->port_stop)
6973 ap->ops->port_stop(ap);
6974 }
6975 return rc;
6976}
6977
b03732f0 6978/**
cca3974e
JG
6979 * ata_sas_host_init - Initialize a host struct
6980 * @host: host to initialize
6981 * @dev: device host is attached to
6982 * @flags: host flags
6983 * @ops: port_ops
b03732f0
BK
6984 *
6985 * LOCKING:
6986 * PCI/etc. bus probe sem.
6987 *
6988 */
f3187195 6989/* KILLME - the only user left is ipr */
cca3974e
JG
6990void ata_host_init(struct ata_host *host, struct device *dev,
6991 unsigned long flags, const struct ata_port_operations *ops)
b03732f0 6992{
cca3974e
JG
6993 spin_lock_init(&host->lock);
6994 host->dev = dev;
6995 host->flags = flags;
6996 host->ops = ops;
b03732f0
BK
6997}
6998
f3187195
TH
6999/**
7000 * ata_host_register - register initialized ATA host
7001 * @host: ATA host to register
7002 * @sht: template for SCSI host
7003 *
7004 * Register initialized ATA host. @host is allocated using
7005 * ata_host_alloc() and fully initialized by LLD. This function
7006 * starts ports, registers @host with ATA and SCSI layers and
7007 * probe registered devices.
7008 *
7009 * LOCKING:
7010 * Inherited from calling layer (may sleep).
7011 *
7012 * RETURNS:
7013 * 0 on success, -errno otherwise.
7014 */
7015int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
7016{
7017 int i, rc;
7018
7019 /* host must have been started */
7020 if (!(host->flags & ATA_HOST_STARTED)) {
7021 dev_printk(KERN_ERR, host->dev,
7022 "BUG: trying to register unstarted host\n");
7023 WARN_ON(1);
7024 return -EINVAL;
7025 }
7026
7027 /* Blow away unused ports. This happens when LLD can't
7028 * determine the exact number of ports to allocate at
7029 * allocation time.
7030 */
7031 for (i = host->n_ports; host->ports[i]; i++)
7032 kfree(host->ports[i]);
7033
7034 /* give ports names and add SCSI hosts */
7035 for (i = 0; i < host->n_ports; i++)
7036 host->ports[i]->print_id = ata_print_id++;
7037
7038 rc = ata_scsi_add_hosts(host, sht);
7039 if (rc)
7040 return rc;
7041
fafbae87
TH
7042 /* associate with ACPI nodes */
7043 ata_acpi_associate(host);
7044
f3187195
TH
7045 /* set cable, sata_spd_limit and report */
7046 for (i = 0; i < host->n_ports; i++) {
7047 struct ata_port *ap = host->ports[i];
f3187195
TH
7048 unsigned long xfer_mask;
7049
7050 /* set SATA cable type if still unset */
7051 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
7052 ap->cbl = ATA_CBL_SATA;
7053
7054 /* init sata_spd_limit to the current value */
4fb37a25 7055 sata_link_init_spd(&ap->link);
f3187195 7056
cbcdd875 7057 /* print per-port info to dmesg */
f3187195
TH
7058 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
7059 ap->udma_mask);
7060
abf6e8ed 7061 if (!ata_port_is_dummy(ap)) {
cbcdd875
TH
7062 ata_port_printk(ap, KERN_INFO,
7063 "%cATA max %s %s\n",
a16abc0b 7064 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
f3187195 7065 ata_mode_string(xfer_mask),
cbcdd875 7066 ap->link.eh_info.desc);
abf6e8ed
TH
7067 ata_ehi_clear_desc(&ap->link.eh_info);
7068 } else
f3187195
TH
7069 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
7070 }
7071
7072 /* perform each probe synchronously */
7073 DPRINTK("probe begin\n");
7074 for (i = 0; i < host->n_ports; i++) {
7075 struct ata_port *ap = host->ports[i];
7076 int rc;
7077
7078 /* probe */
7079 if (ap->ops->error_handler) {
9af5c9c9 7080 struct ata_eh_info *ehi = &ap->link.eh_info;
f3187195
TH
7081 unsigned long flags;
7082
7083 ata_port_probe(ap);
7084
7085 /* kick EH for boot probing */
7086 spin_lock_irqsave(ap->lock, flags);
7087
f58229f8
TH
7088 ehi->probe_mask =
7089 (1 << ata_link_max_devices(&ap->link)) - 1;
f3187195
TH
7090 ehi->action |= ATA_EH_SOFTRESET;
7091 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
7092
f4d6d004 7093 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
f3187195
TH
7094 ap->pflags |= ATA_PFLAG_LOADING;
7095 ata_port_schedule_eh(ap);
7096
7097 spin_unlock_irqrestore(ap->lock, flags);
7098
7099 /* wait for EH to finish */
7100 ata_port_wait_eh(ap);
7101 } else {
7102 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
7103 rc = ata_bus_probe(ap);
7104 DPRINTK("ata%u: bus probe end\n", ap->print_id);
7105
7106 if (rc) {
7107 /* FIXME: do something useful here?
7108 * Current libata behavior will
7109 * tear down everything when
7110 * the module is removed
7111 * or the h/w is unplugged.
7112 */
7113 }
7114 }
7115 }
7116
7117 /* probes are done, now scan each port's disk(s) */
7118 DPRINTK("host probe begin\n");
7119 for (i = 0; i < host->n_ports; i++) {
7120 struct ata_port *ap = host->ports[i];
7121
1ae46317 7122 ata_scsi_scan_host(ap, 1);
ca77329f 7123 ata_lpm_schedule(ap, ap->pm_policy);
f3187195
TH
7124 }
7125
7126 return 0;
7127}
7128
f5cda257
TH
7129/**
7130 * ata_host_activate - start host, request IRQ and register it
7131 * @host: target ATA host
7132 * @irq: IRQ to request
7133 * @irq_handler: irq_handler used when requesting IRQ
7134 * @irq_flags: irq_flags used when requesting IRQ
7135 * @sht: scsi_host_template to use when registering the host
7136 *
7137 * After allocating an ATA host and initializing it, most libata
7138 * LLDs perform three steps to activate the host - start host,
7139 * request IRQ and register it. This helper takes necessasry
7140 * arguments and performs the three steps in one go.
7141 *
7142 * LOCKING:
7143 * Inherited from calling layer (may sleep).
7144 *
7145 * RETURNS:
7146 * 0 on success, -errno otherwise.
7147 */
7148int ata_host_activate(struct ata_host *host, int irq,
7149 irq_handler_t irq_handler, unsigned long irq_flags,
7150 struct scsi_host_template *sht)
7151{
cbcdd875 7152 int i, rc;
f5cda257
TH
7153
7154 rc = ata_host_start(host);
7155 if (rc)
7156 return rc;
7157
7158 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
7159 dev_driver_string(host->dev), host);
7160 if (rc)
7161 return rc;
7162
cbcdd875
TH
7163 for (i = 0; i < host->n_ports; i++)
7164 ata_port_desc(host->ports[i], "irq %d", irq);
4031826b 7165
f5cda257
TH
7166 rc = ata_host_register(host, sht);
7167 /* if failed, just free the IRQ and leave ports alone */
7168 if (rc)
7169 devm_free_irq(host->dev, irq, host);
7170
7171 return rc;
7172}
7173
720ba126
TH
7174/**
7175 * ata_port_detach - Detach ATA port in prepration of device removal
7176 * @ap: ATA port to be detached
7177 *
7178 * Detach all ATA devices and the associated SCSI devices of @ap;
7179 * then, remove the associated SCSI host. @ap is guaranteed to
7180 * be quiescent on return from this function.
7181 *
7182 * LOCKING:
7183 * Kernel thread context (may sleep).
7184 */
741b7763 7185static void ata_port_detach(struct ata_port *ap)
720ba126
TH
7186{
7187 unsigned long flags;
41bda9c9 7188 struct ata_link *link;
f58229f8 7189 struct ata_device *dev;
720ba126
TH
7190
7191 if (!ap->ops->error_handler)
c3cf30a9 7192 goto skip_eh;
720ba126
TH
7193
7194 /* tell EH we're leaving & flush EH */
ba6a1308 7195 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 7196 ap->pflags |= ATA_PFLAG_UNLOADING;
ba6a1308 7197 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
7198
7199 ata_port_wait_eh(ap);
7200
7201 /* EH is now guaranteed to see UNLOADING, so no new device
7202 * will be attached. Disable all existing devices.
7203 */
ba6a1308 7204 spin_lock_irqsave(ap->lock, flags);
720ba126 7205
41bda9c9
TH
7206 ata_port_for_each_link(link, ap) {
7207 ata_link_for_each_dev(dev, link)
7208 ata_dev_disable(dev);
7209 }
720ba126 7210
ba6a1308 7211 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
7212
7213 /* Final freeze & EH. All in-flight commands are aborted. EH
7214 * will be skipped and retrials will be terminated with bad
7215 * target.
7216 */
ba6a1308 7217 spin_lock_irqsave(ap->lock, flags);
720ba126 7218 ata_port_freeze(ap); /* won't be thawed */
ba6a1308 7219 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
7220
7221 ata_port_wait_eh(ap);
45a66c1c 7222 cancel_rearming_delayed_work(&ap->hotplug_task);
720ba126 7223
c3cf30a9 7224 skip_eh:
720ba126 7225 /* remove the associated SCSI host */
cca3974e 7226 scsi_remove_host(ap->scsi_host);
720ba126
TH
7227}
7228
0529c159
TH
7229/**
7230 * ata_host_detach - Detach all ports of an ATA host
7231 * @host: Host to detach
7232 *
7233 * Detach all ports of @host.
7234 *
7235 * LOCKING:
7236 * Kernel thread context (may sleep).
7237 */
7238void ata_host_detach(struct ata_host *host)
7239{
7240 int i;
7241
7242 for (i = 0; i < host->n_ports; i++)
7243 ata_port_detach(host->ports[i]);
7244}
7245
1da177e4
LT
7246/**
7247 * ata_std_ports - initialize ioaddr with standard port offsets.
7248 * @ioaddr: IO address structure to be initialized
0baab86b
EF
7249 *
7250 * Utility function which initializes data_addr, error_addr,
7251 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
7252 * device_addr, status_addr, and command_addr to standard offsets
7253 * relative to cmd_addr.
7254 *
7255 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
1da177e4 7256 */
0baab86b 7257
1da177e4
LT
7258void ata_std_ports(struct ata_ioports *ioaddr)
7259{
7260 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
7261 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
7262 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
7263 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
7264 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
7265 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
7266 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
7267 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
7268 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
7269 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
7270}
7271
0baab86b 7272
374b1873
JG
7273#ifdef CONFIG_PCI
7274
1da177e4
LT
7275/**
7276 * ata_pci_remove_one - PCI layer callback for device removal
7277 * @pdev: PCI device that was removed
7278 *
b878ca5d
TH
7279 * PCI layer indicates to libata via this hook that hot-unplug or
7280 * module unload event has occurred. Detach all ports. Resource
7281 * release is handled via devres.
1da177e4
LT
7282 *
7283 * LOCKING:
7284 * Inherited from PCI layer (may sleep).
7285 */
f0d36efd 7286void ata_pci_remove_one(struct pci_dev *pdev)
1da177e4 7287{
2855568b 7288 struct device *dev = &pdev->dev;
cca3974e 7289 struct ata_host *host = dev_get_drvdata(dev);
1da177e4 7290
b878ca5d 7291 ata_host_detach(host);
1da177e4
LT
7292}
7293
7294/* move to PCI subsystem */
057ace5e 7295int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
1da177e4
LT
7296{
7297 unsigned long tmp = 0;
7298
7299 switch (bits->width) {
7300 case 1: {
7301 u8 tmp8 = 0;
7302 pci_read_config_byte(pdev, bits->reg, &tmp8);
7303 tmp = tmp8;
7304 break;
7305 }
7306 case 2: {
7307 u16 tmp16 = 0;
7308 pci_read_config_word(pdev, bits->reg, &tmp16);
7309 tmp = tmp16;
7310 break;
7311 }
7312 case 4: {
7313 u32 tmp32 = 0;
7314 pci_read_config_dword(pdev, bits->reg, &tmp32);
7315 tmp = tmp32;
7316 break;
7317 }
7318
7319 default:
7320 return -EINVAL;
7321 }
7322
7323 tmp &= bits->mask;
7324
7325 return (tmp == bits->val) ? 1 : 0;
7326}
9b847548 7327
6ffa01d8 7328#ifdef CONFIG_PM
3c5100c1 7329void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
9b847548
JA
7330{
7331 pci_save_state(pdev);
4c90d971 7332 pci_disable_device(pdev);
500530f6 7333
4c90d971 7334 if (mesg.event == PM_EVENT_SUSPEND)
500530f6 7335 pci_set_power_state(pdev, PCI_D3hot);
9b847548
JA
7336}
7337
553c4aa6 7338int ata_pci_device_do_resume(struct pci_dev *pdev)
9b847548 7339{
553c4aa6
TH
7340 int rc;
7341
9b847548
JA
7342 pci_set_power_state(pdev, PCI_D0);
7343 pci_restore_state(pdev);
553c4aa6 7344
b878ca5d 7345 rc = pcim_enable_device(pdev);
553c4aa6
TH
7346 if (rc) {
7347 dev_printk(KERN_ERR, &pdev->dev,
7348 "failed to enable device after resume (%d)\n", rc);
7349 return rc;
7350 }
7351
9b847548 7352 pci_set_master(pdev);
553c4aa6 7353 return 0;
500530f6
TH
7354}
7355
3c5100c1 7356int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
500530f6 7357{
cca3974e 7358 struct ata_host *host = dev_get_drvdata(&pdev->dev);
500530f6
TH
7359 int rc = 0;
7360
cca3974e 7361 rc = ata_host_suspend(host, mesg);
500530f6
TH
7362 if (rc)
7363 return rc;
7364
3c5100c1 7365 ata_pci_device_do_suspend(pdev, mesg);
500530f6
TH
7366
7367 return 0;
7368}
7369
7370int ata_pci_device_resume(struct pci_dev *pdev)
7371{
cca3974e 7372 struct ata_host *host = dev_get_drvdata(&pdev->dev);
553c4aa6 7373 int rc;
500530f6 7374
553c4aa6
TH
7375 rc = ata_pci_device_do_resume(pdev);
7376 if (rc == 0)
7377 ata_host_resume(host);
7378 return rc;
9b847548 7379}
6ffa01d8
TH
7380#endif /* CONFIG_PM */
7381
1da177e4
LT
7382#endif /* CONFIG_PCI */
7383
7384
1da177e4
LT
7385static int __init ata_init(void)
7386{
a8601e5f 7387 ata_probe_timeout *= HZ;
1da177e4
LT
7388 ata_wq = create_workqueue("ata");
7389 if (!ata_wq)
7390 return -ENOMEM;
7391
453b07ac
TH
7392 ata_aux_wq = create_singlethread_workqueue("ata_aux");
7393 if (!ata_aux_wq) {
7394 destroy_workqueue(ata_wq);
7395 return -ENOMEM;
7396 }
7397
1da177e4
LT
7398 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
7399 return 0;
7400}
7401
7402static void __exit ata_exit(void)
7403{
7404 destroy_workqueue(ata_wq);
453b07ac 7405 destroy_workqueue(ata_aux_wq);
1da177e4
LT
7406}
7407
a4625085 7408subsys_initcall(ata_init);
1da177e4
LT
7409module_exit(ata_exit);
7410
67846b30 7411static unsigned long ratelimit_time;
34af946a 7412static DEFINE_SPINLOCK(ata_ratelimit_lock);
67846b30
JG
7413
7414int ata_ratelimit(void)
7415{
7416 int rc;
7417 unsigned long flags;
7418
7419 spin_lock_irqsave(&ata_ratelimit_lock, flags);
7420
7421 if (time_after(jiffies, ratelimit_time)) {
7422 rc = 1;
7423 ratelimit_time = jiffies + (HZ/5);
7424 } else
7425 rc = 0;
7426
7427 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
7428
7429 return rc;
7430}
7431
c22daff4
TH
7432/**
7433 * ata_wait_register - wait until register value changes
7434 * @reg: IO-mapped register
7435 * @mask: Mask to apply to read register value
7436 * @val: Wait condition
7437 * @interval_msec: polling interval in milliseconds
7438 * @timeout_msec: timeout in milliseconds
7439 *
7440 * Waiting for some bits of register to change is a common
7441 * operation for ATA controllers. This function reads 32bit LE
7442 * IO-mapped register @reg and tests for the following condition.
7443 *
7444 * (*@reg & mask) != val
7445 *
7446 * If the condition is met, it returns; otherwise, the process is
7447 * repeated after @interval_msec until timeout.
7448 *
7449 * LOCKING:
7450 * Kernel thread context (may sleep)
7451 *
7452 * RETURNS:
7453 * The final register value.
7454 */
7455u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
7456 unsigned long interval_msec,
7457 unsigned long timeout_msec)
7458{
7459 unsigned long timeout;
7460 u32 tmp;
7461
7462 tmp = ioread32(reg);
7463
7464 /* Calculate timeout _after_ the first read to make sure
7465 * preceding writes reach the controller before starting to
7466 * eat away the timeout.
7467 */
7468 timeout = jiffies + (timeout_msec * HZ) / 1000;
7469
7470 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
7471 msleep(interval_msec);
7472 tmp = ioread32(reg);
7473 }
7474
7475 return tmp;
7476}
7477
dd5b06c4
TH
7478/*
7479 * Dummy port_ops
7480 */
7481static void ata_dummy_noret(struct ata_port *ap) { }
7482static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
7483static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
7484
7485static u8 ata_dummy_check_status(struct ata_port *ap)
7486{
7487 return ATA_DRDY;
7488}
7489
7490static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
7491{
7492 return AC_ERR_SYSTEM;
7493}
7494
7495const struct ata_port_operations ata_dummy_port_ops = {
dd5b06c4
TH
7496 .check_status = ata_dummy_check_status,
7497 .check_altstatus = ata_dummy_check_status,
7498 .dev_select = ata_noop_dev_select,
7499 .qc_prep = ata_noop_qc_prep,
7500 .qc_issue = ata_dummy_qc_issue,
7501 .freeze = ata_dummy_noret,
7502 .thaw = ata_dummy_noret,
7503 .error_handler = ata_dummy_noret,
7504 .post_internal_cmd = ata_dummy_qc_noret,
7505 .irq_clear = ata_dummy_noret,
7506 .port_start = ata_dummy_ret0,
7507 .port_stop = ata_dummy_noret,
7508};
7509
21b0ad4f
TH
7510const struct ata_port_info ata_dummy_port_info = {
7511 .port_ops = &ata_dummy_port_ops,
7512};
7513
1da177e4
LT
7514/*
7515 * libata is essentially a library of internal helper functions for
7516 * low-level ATA host controller drivers. As such, the API/ABI is
7517 * likely to change as new drivers are added and updated.
7518 * Do not depend on ABI/API stability.
7519 */
e9c83914
TH
7520EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
7521EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
7522EXPORT_SYMBOL_GPL(sata_deb_timing_long);
dd5b06c4 7523EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
21b0ad4f 7524EXPORT_SYMBOL_GPL(ata_dummy_port_info);
1da177e4
LT
7525EXPORT_SYMBOL_GPL(ata_std_bios_param);
7526EXPORT_SYMBOL_GPL(ata_std_ports);
cca3974e 7527EXPORT_SYMBOL_GPL(ata_host_init);
f3187195 7528EXPORT_SYMBOL_GPL(ata_host_alloc);
f5cda257 7529EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
ecef7253 7530EXPORT_SYMBOL_GPL(ata_host_start);
f3187195 7531EXPORT_SYMBOL_GPL(ata_host_register);
f5cda257 7532EXPORT_SYMBOL_GPL(ata_host_activate);
0529c159 7533EXPORT_SYMBOL_GPL(ata_host_detach);
1da177e4
LT
7534EXPORT_SYMBOL_GPL(ata_sg_init);
7535EXPORT_SYMBOL_GPL(ata_sg_init_one);
9a1004d0 7536EXPORT_SYMBOL_GPL(ata_hsm_move);
f686bcb8 7537EXPORT_SYMBOL_GPL(ata_qc_complete);
dedaf2b0 7538EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
1da177e4 7539EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
1da177e4
LT
7540EXPORT_SYMBOL_GPL(ata_tf_load);
7541EXPORT_SYMBOL_GPL(ata_tf_read);
7542EXPORT_SYMBOL_GPL(ata_noop_dev_select);
7543EXPORT_SYMBOL_GPL(ata_std_dev_select);
43727fbc 7544EXPORT_SYMBOL_GPL(sata_print_link_status);
1da177e4
LT
7545EXPORT_SYMBOL_GPL(ata_tf_to_fis);
7546EXPORT_SYMBOL_GPL(ata_tf_from_fis);
7547EXPORT_SYMBOL_GPL(ata_check_status);
7548EXPORT_SYMBOL_GPL(ata_altstatus);
1da177e4
LT
7549EXPORT_SYMBOL_GPL(ata_exec_command);
7550EXPORT_SYMBOL_GPL(ata_port_start);
d92e74d3 7551EXPORT_SYMBOL_GPL(ata_sff_port_start);
1da177e4 7552EXPORT_SYMBOL_GPL(ata_interrupt);
04351821 7553EXPORT_SYMBOL_GPL(ata_do_set_mode);
0d5ff566
TH
7554EXPORT_SYMBOL_GPL(ata_data_xfer);
7555EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
31cc23b3 7556EXPORT_SYMBOL_GPL(ata_std_qc_defer);
1da177e4 7557EXPORT_SYMBOL_GPL(ata_qc_prep);
d26fc955 7558EXPORT_SYMBOL_GPL(ata_dumb_qc_prep);
e46834cd 7559EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
1da177e4
LT
7560EXPORT_SYMBOL_GPL(ata_bmdma_setup);
7561EXPORT_SYMBOL_GPL(ata_bmdma_start);
7562EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
7563EXPORT_SYMBOL_GPL(ata_bmdma_status);
7564EXPORT_SYMBOL_GPL(ata_bmdma_stop);
6d97dbd7
TH
7565EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
7566EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
7567EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
7568EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
7569EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
1da177e4 7570EXPORT_SYMBOL_GPL(ata_port_probe);
10305f0f 7571EXPORT_SYMBOL_GPL(ata_dev_disable);
3c567b7d 7572EXPORT_SYMBOL_GPL(sata_set_spd);
936fd732
TH
7573EXPORT_SYMBOL_GPL(sata_link_debounce);
7574EXPORT_SYMBOL_GPL(sata_link_resume);
1da177e4
LT
7575EXPORT_SYMBOL_GPL(sata_phy_reset);
7576EXPORT_SYMBOL_GPL(__sata_phy_reset);
7577EXPORT_SYMBOL_GPL(ata_bus_reset);
f5914a46 7578EXPORT_SYMBOL_GPL(ata_std_prereset);
c2bd5804 7579EXPORT_SYMBOL_GPL(ata_std_softreset);
cc0680a5 7580EXPORT_SYMBOL_GPL(sata_link_hardreset);
c2bd5804
TH
7581EXPORT_SYMBOL_GPL(sata_std_hardreset);
7582EXPORT_SYMBOL_GPL(ata_std_postreset);
2e9edbf8
JG
7583EXPORT_SYMBOL_GPL(ata_dev_classify);
7584EXPORT_SYMBOL_GPL(ata_dev_pair);
1da177e4 7585EXPORT_SYMBOL_GPL(ata_port_disable);
67846b30 7586EXPORT_SYMBOL_GPL(ata_ratelimit);
c22daff4 7587EXPORT_SYMBOL_GPL(ata_wait_register);
6f8b9958 7588EXPORT_SYMBOL_GPL(ata_busy_sleep);
88ff6eaf 7589EXPORT_SYMBOL_GPL(ata_wait_after_reset);
d4b2bab4 7590EXPORT_SYMBOL_GPL(ata_wait_ready);
86e45b6b 7591EXPORT_SYMBOL_GPL(ata_port_queue_task);
1da177e4
LT
7592EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
7593EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
1da177e4 7594EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
83c47bcb 7595EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
a6e6ce8e 7596EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
1da177e4 7597EXPORT_SYMBOL_GPL(ata_host_intr);
34bf2170
TH
7598EXPORT_SYMBOL_GPL(sata_scr_valid);
7599EXPORT_SYMBOL_GPL(sata_scr_read);
7600EXPORT_SYMBOL_GPL(sata_scr_write);
7601EXPORT_SYMBOL_GPL(sata_scr_write_flush);
936fd732
TH
7602EXPORT_SYMBOL_GPL(ata_link_online);
7603EXPORT_SYMBOL_GPL(ata_link_offline);
6ffa01d8 7604#ifdef CONFIG_PM
cca3974e
JG
7605EXPORT_SYMBOL_GPL(ata_host_suspend);
7606EXPORT_SYMBOL_GPL(ata_host_resume);
6ffa01d8 7607#endif /* CONFIG_PM */
6a62a04d
TH
7608EXPORT_SYMBOL_GPL(ata_id_string);
7609EXPORT_SYMBOL_GPL(ata_id_c_string);
10305f0f 7610EXPORT_SYMBOL_GPL(ata_id_to_dma_mode);
1da177e4
LT
7611EXPORT_SYMBOL_GPL(ata_scsi_simulate);
7612
1bc4ccff 7613EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
452503f9
AC
7614EXPORT_SYMBOL_GPL(ata_timing_compute);
7615EXPORT_SYMBOL_GPL(ata_timing_merge);
7616
1da177e4
LT
7617#ifdef CONFIG_PCI
7618EXPORT_SYMBOL_GPL(pci_test_config_bits);
d583bc18 7619EXPORT_SYMBOL_GPL(ata_pci_init_sff_host);
1626aeb8 7620EXPORT_SYMBOL_GPL(ata_pci_init_bmdma);
d583bc18 7621EXPORT_SYMBOL_GPL(ata_pci_prepare_sff_host);
1da177e4
LT
7622EXPORT_SYMBOL_GPL(ata_pci_init_one);
7623EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6ffa01d8 7624#ifdef CONFIG_PM
500530f6
TH
7625EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
7626EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
9b847548
JA
7627EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
7628EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6ffa01d8 7629#endif /* CONFIG_PM */
67951ade
AC
7630EXPORT_SYMBOL_GPL(ata_pci_default_filter);
7631EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
1da177e4 7632#endif /* CONFIG_PCI */
9b847548 7633
31f88384 7634EXPORT_SYMBOL_GPL(sata_pmp_qc_defer_cmd_switch);
3af9a77a
TH
7635EXPORT_SYMBOL_GPL(sata_pmp_std_prereset);
7636EXPORT_SYMBOL_GPL(sata_pmp_std_hardreset);
7637EXPORT_SYMBOL_GPL(sata_pmp_std_postreset);
7638EXPORT_SYMBOL_GPL(sata_pmp_do_eh);
7639
b64bbc39
TH
7640EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
7641EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
7642EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
cbcdd875
TH
7643EXPORT_SYMBOL_GPL(ata_port_desc);
7644#ifdef CONFIG_PCI
7645EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
7646#endif /* CONFIG_PCI */
ece1d636 7647EXPORT_SYMBOL_GPL(ata_eng_timeout);
7b70fc03 7648EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
dbd82616 7649EXPORT_SYMBOL_GPL(ata_link_abort);
7b70fc03 7650EXPORT_SYMBOL_GPL(ata_port_abort);
e3180499 7651EXPORT_SYMBOL_GPL(ata_port_freeze);
7d77b247 7652EXPORT_SYMBOL_GPL(sata_async_notification);
e3180499
TH
7653EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
7654EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
ece1d636
TH
7655EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
7656EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
022bdb07 7657EXPORT_SYMBOL_GPL(ata_do_eh);
83625006 7658EXPORT_SYMBOL_GPL(ata_irq_on);
a619f981 7659EXPORT_SYMBOL_GPL(ata_dev_try_classify);
be0d18df
AC
7660
7661EXPORT_SYMBOL_GPL(ata_cable_40wire);
7662EXPORT_SYMBOL_GPL(ata_cable_80wire);
7663EXPORT_SYMBOL_GPL(ata_cable_unknown);
7664EXPORT_SYMBOL_GPL(ata_cable_sata);
This page took 1.489283 seconds and 5 git commands to generate.