git-libata-all: forward declare struct device
[deliverable/linux.git] / drivers / ata / libata-core.c
CommitLineData
1da177e4 1/*
af36d7f0
JG
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
1da177e4
LT
33 */
34
1da177e4
LT
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/pci.h>
38#include <linux/init.h>
39#include <linux/list.h>
40#include <linux/mm.h>
41#include <linux/highmem.h>
42#include <linux/spinlock.h>
43#include <linux/blkdev.h>
44#include <linux/delay.h>
45#include <linux/timer.h>
46#include <linux/interrupt.h>
47#include <linux/completion.h>
48#include <linux/suspend.h>
49#include <linux/workqueue.h>
67846b30 50#include <linux/jiffies.h>
378f058c 51#include <linux/scatterlist.h>
1da177e4 52#include <scsi/scsi.h>
193515d5 53#include <scsi/scsi_cmnd.h>
1da177e4
LT
54#include <scsi/scsi_host.h>
55#include <linux/libata.h>
56#include <asm/io.h>
57#include <asm/semaphore.h>
58#include <asm/byteorder.h>
59
60#include "libata.h"
61
fda0efc5
JG
62#define DRV_VERSION "2.10" /* must be exactly four chars */
63
64
d7bb4cc7 65/* debounce timing parameters in msecs { interval, duration, timeout } */
e9c83914
TH
66const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
67const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
68const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
d7bb4cc7 69
3373efd8
TH
70static unsigned int ata_dev_init_params(struct ata_device *dev,
71 u16 heads, u16 sectors);
72static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
73static void ata_dev_xfermask(struct ata_device *dev);
1da177e4
LT
74
75static unsigned int ata_unique_id = 1;
76static struct workqueue_struct *ata_wq;
77
453b07ac
TH
78struct workqueue_struct *ata_aux_wq;
79
418dc1f5 80int atapi_enabled = 1;
1623c81e
JG
81module_param(atapi_enabled, int, 0444);
82MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
83
95de719a
AL
84int atapi_dmadir = 0;
85module_param(atapi_dmadir, int, 0444);
86MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
87
c3c013a2
JG
88int libata_fua = 0;
89module_param_named(fua, libata_fua, int, 0444);
90MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
91
a8601e5f
AM
92static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
93module_param(ata_probe_timeout, int, 0444);
94MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
95
1da177e4
LT
96MODULE_AUTHOR("Jeff Garzik");
97MODULE_DESCRIPTION("Library module for ATA devices");
98MODULE_LICENSE("GPL");
99MODULE_VERSION(DRV_VERSION);
100
0baab86b 101
1da177e4
LT
102/**
103 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
104 * @tf: Taskfile to convert
105 * @fis: Buffer into which data will output
106 * @pmp: Port multiplier port
107 *
108 * Converts a standard ATA taskfile to a Serial ATA
109 * FIS structure (Register - Host to Device).
110 *
111 * LOCKING:
112 * Inherited from caller.
113 */
114
057ace5e 115void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
1da177e4
LT
116{
117 fis[0] = 0x27; /* Register - Host to Device FIS */
118 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
119 bit 7 indicates Command FIS */
120 fis[2] = tf->command;
121 fis[3] = tf->feature;
122
123 fis[4] = tf->lbal;
124 fis[5] = tf->lbam;
125 fis[6] = tf->lbah;
126 fis[7] = tf->device;
127
128 fis[8] = tf->hob_lbal;
129 fis[9] = tf->hob_lbam;
130 fis[10] = tf->hob_lbah;
131 fis[11] = tf->hob_feature;
132
133 fis[12] = tf->nsect;
134 fis[13] = tf->hob_nsect;
135 fis[14] = 0;
136 fis[15] = tf->ctl;
137
138 fis[16] = 0;
139 fis[17] = 0;
140 fis[18] = 0;
141 fis[19] = 0;
142}
143
144/**
145 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
146 * @fis: Buffer from which data will be input
147 * @tf: Taskfile to output
148 *
e12a1be6 149 * Converts a serial ATA FIS structure to a standard ATA taskfile.
1da177e4
LT
150 *
151 * LOCKING:
152 * Inherited from caller.
153 */
154
057ace5e 155void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
1da177e4
LT
156{
157 tf->command = fis[2]; /* status */
158 tf->feature = fis[3]; /* error */
159
160 tf->lbal = fis[4];
161 tf->lbam = fis[5];
162 tf->lbah = fis[6];
163 tf->device = fis[7];
164
165 tf->hob_lbal = fis[8];
166 tf->hob_lbam = fis[9];
167 tf->hob_lbah = fis[10];
168
169 tf->nsect = fis[12];
170 tf->hob_nsect = fis[13];
171}
172
8cbd6df1
AL
173static const u8 ata_rw_cmds[] = {
174 /* pio multi */
175 ATA_CMD_READ_MULTI,
176 ATA_CMD_WRITE_MULTI,
177 ATA_CMD_READ_MULTI_EXT,
178 ATA_CMD_WRITE_MULTI_EXT,
9a3dccc4
TH
179 0,
180 0,
181 0,
182 ATA_CMD_WRITE_MULTI_FUA_EXT,
8cbd6df1
AL
183 /* pio */
184 ATA_CMD_PIO_READ,
185 ATA_CMD_PIO_WRITE,
186 ATA_CMD_PIO_READ_EXT,
187 ATA_CMD_PIO_WRITE_EXT,
9a3dccc4
TH
188 0,
189 0,
190 0,
191 0,
8cbd6df1
AL
192 /* dma */
193 ATA_CMD_READ,
194 ATA_CMD_WRITE,
195 ATA_CMD_READ_EXT,
9a3dccc4
TH
196 ATA_CMD_WRITE_EXT,
197 0,
198 0,
199 0,
200 ATA_CMD_WRITE_FUA_EXT
8cbd6df1 201};
1da177e4
LT
202
203/**
8cbd6df1 204 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
bd056d7e
TH
205 * @tf: command to examine and configure
206 * @dev: device tf belongs to
1da177e4 207 *
2e9edbf8 208 * Examine the device configuration and tf->flags to calculate
8cbd6df1 209 * the proper read/write commands and protocol to use.
1da177e4
LT
210 *
211 * LOCKING:
212 * caller.
213 */
bd056d7e 214static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
1da177e4 215{
9a3dccc4 216 u8 cmd;
1da177e4 217
9a3dccc4 218 int index, fua, lba48, write;
2e9edbf8 219
9a3dccc4 220 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
8cbd6df1
AL
221 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
222 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
1da177e4 223
8cbd6df1
AL
224 if (dev->flags & ATA_DFLAG_PIO) {
225 tf->protocol = ATA_PROT_PIO;
9a3dccc4 226 index = dev->multi_count ? 0 : 8;
bd056d7e 227 } else if (lba48 && (dev->ap->flags & ATA_FLAG_PIO_LBA48)) {
8d238e01
AC
228 /* Unable to use DMA due to host limitation */
229 tf->protocol = ATA_PROT_PIO;
0565c26d 230 index = dev->multi_count ? 0 : 8;
8cbd6df1
AL
231 } else {
232 tf->protocol = ATA_PROT_DMA;
9a3dccc4 233 index = 16;
8cbd6df1 234 }
1da177e4 235
9a3dccc4
TH
236 cmd = ata_rw_cmds[index + fua + lba48 + write];
237 if (cmd) {
238 tf->command = cmd;
239 return 0;
240 }
241 return -1;
1da177e4
LT
242}
243
35b649fe
TH
244/**
245 * ata_tf_read_block - Read block address from ATA taskfile
246 * @tf: ATA taskfile of interest
247 * @dev: ATA device @tf belongs to
248 *
249 * LOCKING:
250 * None.
251 *
252 * Read block address from @tf. This function can handle all
253 * three address formats - LBA, LBA48 and CHS. tf->protocol and
254 * flags select the address format to use.
255 *
256 * RETURNS:
257 * Block address read from @tf.
258 */
259u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
260{
261 u64 block = 0;
262
263 if (tf->flags & ATA_TFLAG_LBA) {
264 if (tf->flags & ATA_TFLAG_LBA48) {
265 block |= (u64)tf->hob_lbah << 40;
266 block |= (u64)tf->hob_lbam << 32;
267 block |= tf->hob_lbal << 24;
268 } else
269 block |= (tf->device & 0xf) << 24;
270
271 block |= tf->lbah << 16;
272 block |= tf->lbam << 8;
273 block |= tf->lbal;
274 } else {
275 u32 cyl, head, sect;
276
277 cyl = tf->lbam | (tf->lbah << 8);
278 head = tf->device & 0xf;
279 sect = tf->lbal;
280
281 block = (cyl * dev->heads + head) * dev->sectors + sect;
282 }
283
284 return block;
285}
286
bd056d7e
TH
287/**
288 * ata_build_rw_tf - Build ATA taskfile for given read/write request
289 * @tf: Target ATA taskfile
290 * @dev: ATA device @tf belongs to
291 * @block: Block address
292 * @n_block: Number of blocks
293 * @tf_flags: RW/FUA etc...
294 * @tag: tag
295 *
296 * LOCKING:
297 * None.
298 *
299 * Build ATA taskfile @tf for read/write request described by
300 * @block, @n_block, @tf_flags and @tag on @dev.
301 *
302 * RETURNS:
303 *
304 * 0 on success, -ERANGE if the request is too large for @dev,
305 * -EINVAL if the request is invalid.
306 */
307int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
308 u64 block, u32 n_block, unsigned int tf_flags,
309 unsigned int tag)
310{
311 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
312 tf->flags |= tf_flags;
313
314 if ((dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ_OFF |
70e6ad0c
TH
315 ATA_DFLAG_NCQ)) == ATA_DFLAG_NCQ &&
316 likely(tag != ATA_TAG_INTERNAL)) {
bd056d7e
TH
317 /* yay, NCQ */
318 if (!lba_48_ok(block, n_block))
319 return -ERANGE;
320
321 tf->protocol = ATA_PROT_NCQ;
322 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
323
324 if (tf->flags & ATA_TFLAG_WRITE)
325 tf->command = ATA_CMD_FPDMA_WRITE;
326 else
327 tf->command = ATA_CMD_FPDMA_READ;
328
329 tf->nsect = tag << 3;
330 tf->hob_feature = (n_block >> 8) & 0xff;
331 tf->feature = n_block & 0xff;
332
333 tf->hob_lbah = (block >> 40) & 0xff;
334 tf->hob_lbam = (block >> 32) & 0xff;
335 tf->hob_lbal = (block >> 24) & 0xff;
336 tf->lbah = (block >> 16) & 0xff;
337 tf->lbam = (block >> 8) & 0xff;
338 tf->lbal = block & 0xff;
339
340 tf->device = 1 << 6;
341 if (tf->flags & ATA_TFLAG_FUA)
342 tf->device |= 1 << 7;
343 } else if (dev->flags & ATA_DFLAG_LBA) {
344 tf->flags |= ATA_TFLAG_LBA;
345
346 if (lba_28_ok(block, n_block)) {
347 /* use LBA28 */
348 tf->device |= (block >> 24) & 0xf;
349 } else if (lba_48_ok(block, n_block)) {
350 if (!(dev->flags & ATA_DFLAG_LBA48))
351 return -ERANGE;
352
353 /* use LBA48 */
354 tf->flags |= ATA_TFLAG_LBA48;
355
356 tf->hob_nsect = (n_block >> 8) & 0xff;
357
358 tf->hob_lbah = (block >> 40) & 0xff;
359 tf->hob_lbam = (block >> 32) & 0xff;
360 tf->hob_lbal = (block >> 24) & 0xff;
361 } else
362 /* request too large even for LBA48 */
363 return -ERANGE;
364
365 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
366 return -EINVAL;
367
368 tf->nsect = n_block & 0xff;
369
370 tf->lbah = (block >> 16) & 0xff;
371 tf->lbam = (block >> 8) & 0xff;
372 tf->lbal = block & 0xff;
373
374 tf->device |= ATA_LBA;
375 } else {
376 /* CHS */
377 u32 sect, head, cyl, track;
378
379 /* The request -may- be too large for CHS addressing. */
380 if (!lba_28_ok(block, n_block))
381 return -ERANGE;
382
383 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
384 return -EINVAL;
385
386 /* Convert LBA to CHS */
387 track = (u32)block / dev->sectors;
388 cyl = track / dev->heads;
389 head = track % dev->heads;
390 sect = (u32)block % dev->sectors + 1;
391
392 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
393 (u32)block, track, cyl, head, sect);
394
395 /* Check whether the converted CHS can fit.
396 Cylinder: 0-65535
397 Head: 0-15
398 Sector: 1-255*/
399 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
400 return -ERANGE;
401
402 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
403 tf->lbal = sect;
404 tf->lbam = cyl;
405 tf->lbah = cyl >> 8;
406 tf->device |= head;
407 }
408
409 return 0;
410}
411
cb95d562
TH
412/**
413 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
414 * @pio_mask: pio_mask
415 * @mwdma_mask: mwdma_mask
416 * @udma_mask: udma_mask
417 *
418 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
419 * unsigned int xfer_mask.
420 *
421 * LOCKING:
422 * None.
423 *
424 * RETURNS:
425 * Packed xfer_mask.
426 */
427static unsigned int ata_pack_xfermask(unsigned int pio_mask,
428 unsigned int mwdma_mask,
429 unsigned int udma_mask)
430{
431 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
432 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
433 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
434}
435
c0489e4e
TH
436/**
437 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
438 * @xfer_mask: xfer_mask to unpack
439 * @pio_mask: resulting pio_mask
440 * @mwdma_mask: resulting mwdma_mask
441 * @udma_mask: resulting udma_mask
442 *
443 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
444 * Any NULL distination masks will be ignored.
445 */
446static void ata_unpack_xfermask(unsigned int xfer_mask,
447 unsigned int *pio_mask,
448 unsigned int *mwdma_mask,
449 unsigned int *udma_mask)
450{
451 if (pio_mask)
452 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
453 if (mwdma_mask)
454 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
455 if (udma_mask)
456 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
457}
458
cb95d562 459static const struct ata_xfer_ent {
be9a50c8 460 int shift, bits;
cb95d562
TH
461 u8 base;
462} ata_xfer_tbl[] = {
463 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
464 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
465 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
466 { -1, },
467};
468
469/**
470 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
471 * @xfer_mask: xfer_mask of interest
472 *
473 * Return matching XFER_* value for @xfer_mask. Only the highest
474 * bit of @xfer_mask is considered.
475 *
476 * LOCKING:
477 * None.
478 *
479 * RETURNS:
480 * Matching XFER_* value, 0 if no match found.
481 */
482static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
483{
484 int highbit = fls(xfer_mask) - 1;
485 const struct ata_xfer_ent *ent;
486
487 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
488 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
489 return ent->base + highbit - ent->shift;
490 return 0;
491}
492
493/**
494 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
495 * @xfer_mode: XFER_* of interest
496 *
497 * Return matching xfer_mask for @xfer_mode.
498 *
499 * LOCKING:
500 * None.
501 *
502 * RETURNS:
503 * Matching xfer_mask, 0 if no match found.
504 */
505static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
506{
507 const struct ata_xfer_ent *ent;
508
509 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
510 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
511 return 1 << (ent->shift + xfer_mode - ent->base);
512 return 0;
513}
514
515/**
516 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
517 * @xfer_mode: XFER_* of interest
518 *
519 * Return matching xfer_shift for @xfer_mode.
520 *
521 * LOCKING:
522 * None.
523 *
524 * RETURNS:
525 * Matching xfer_shift, -1 if no match found.
526 */
527static int ata_xfer_mode2shift(unsigned int xfer_mode)
528{
529 const struct ata_xfer_ent *ent;
530
531 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
532 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
533 return ent->shift;
534 return -1;
535}
536
1da177e4 537/**
1da7b0d0
TH
538 * ata_mode_string - convert xfer_mask to string
539 * @xfer_mask: mask of bits supported; only highest bit counts.
1da177e4
LT
540 *
541 * Determine string which represents the highest speed
1da7b0d0 542 * (highest bit in @modemask).
1da177e4
LT
543 *
544 * LOCKING:
545 * None.
546 *
547 * RETURNS:
548 * Constant C string representing highest speed listed in
1da7b0d0 549 * @mode_mask, or the constant C string "<n/a>".
1da177e4 550 */
1da7b0d0 551static const char *ata_mode_string(unsigned int xfer_mask)
1da177e4 552{
75f554bc
TH
553 static const char * const xfer_mode_str[] = {
554 "PIO0",
555 "PIO1",
556 "PIO2",
557 "PIO3",
558 "PIO4",
b352e57d
AC
559 "PIO5",
560 "PIO6",
75f554bc
TH
561 "MWDMA0",
562 "MWDMA1",
563 "MWDMA2",
b352e57d
AC
564 "MWDMA3",
565 "MWDMA4",
75f554bc
TH
566 "UDMA/16",
567 "UDMA/25",
568 "UDMA/33",
569 "UDMA/44",
570 "UDMA/66",
571 "UDMA/100",
572 "UDMA/133",
573 "UDMA7",
574 };
1da7b0d0 575 int highbit;
1da177e4 576
1da7b0d0
TH
577 highbit = fls(xfer_mask) - 1;
578 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
579 return xfer_mode_str[highbit];
1da177e4 580 return "<n/a>";
1da177e4
LT
581}
582
4c360c81
TH
583static const char *sata_spd_string(unsigned int spd)
584{
585 static const char * const spd_str[] = {
586 "1.5 Gbps",
587 "3.0 Gbps",
588 };
589
590 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
591 return "<unknown>";
592 return spd_str[spd - 1];
593}
594
3373efd8 595void ata_dev_disable(struct ata_device *dev)
0b8efb0a 596{
0dd4b21f 597 if (ata_dev_enabled(dev) && ata_msg_drv(dev->ap)) {
f15a1daf 598 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
0b8efb0a
TH
599 dev->class++;
600 }
601}
602
1da177e4 603/**
0d5ff566 604 * ata_devchk - PATA device presence detection
1da177e4
LT
605 * @ap: ATA channel to examine
606 * @device: Device to examine (starting at zero)
607 *
608 * This technique was originally described in
609 * Hale Landis's ATADRVR (www.ata-atapi.com), and
610 * later found its way into the ATA/ATAPI spec.
611 *
612 * Write a pattern to the ATA shadow registers,
613 * and if a device is present, it will respond by
614 * correctly storing and echoing back the
615 * ATA shadow register contents.
616 *
617 * LOCKING:
618 * caller.
619 */
620
0d5ff566 621static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
1da177e4
LT
622{
623 struct ata_ioports *ioaddr = &ap->ioaddr;
624 u8 nsect, lbal;
625
626 ap->ops->dev_select(ap, device);
627
0d5ff566
TH
628 iowrite8(0x55, ioaddr->nsect_addr);
629 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 630
0d5ff566
TH
631 iowrite8(0xaa, ioaddr->nsect_addr);
632 iowrite8(0x55, ioaddr->lbal_addr);
1da177e4 633
0d5ff566
TH
634 iowrite8(0x55, ioaddr->nsect_addr);
635 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 636
0d5ff566
TH
637 nsect = ioread8(ioaddr->nsect_addr);
638 lbal = ioread8(ioaddr->lbal_addr);
1da177e4
LT
639
640 if ((nsect == 0x55) && (lbal == 0xaa))
641 return 1; /* we found a device */
642
643 return 0; /* nothing found */
644}
645
1da177e4
LT
646/**
647 * ata_dev_classify - determine device type based on ATA-spec signature
648 * @tf: ATA taskfile register set for device to be identified
649 *
650 * Determine from taskfile register contents whether a device is
651 * ATA or ATAPI, as per "Signature and persistence" section
652 * of ATA/PI spec (volume 1, sect 5.14).
653 *
654 * LOCKING:
655 * None.
656 *
657 * RETURNS:
658 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
659 * the event of failure.
660 */
661
057ace5e 662unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1da177e4
LT
663{
664 /* Apple's open source Darwin code hints that some devices only
665 * put a proper signature into the LBA mid/high registers,
666 * So, we only check those. It's sufficient for uniqueness.
667 */
668
669 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
670 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
671 DPRINTK("found ATA device by sig\n");
672 return ATA_DEV_ATA;
673 }
674
675 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
676 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
677 DPRINTK("found ATAPI device by sig\n");
678 return ATA_DEV_ATAPI;
679 }
680
681 DPRINTK("unknown device\n");
682 return ATA_DEV_UNKNOWN;
683}
684
685/**
686 * ata_dev_try_classify - Parse returned ATA device signature
687 * @ap: ATA channel to examine
688 * @device: Device to examine (starting at zero)
b4dc7623 689 * @r_err: Value of error register on completion
1da177e4
LT
690 *
691 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
692 * an ATA/ATAPI-defined set of values is placed in the ATA
693 * shadow registers, indicating the results of device detection
694 * and diagnostics.
695 *
696 * Select the ATA device, and read the values from the ATA shadow
697 * registers. Then parse according to the Error register value,
698 * and the spec-defined values examined by ata_dev_classify().
699 *
700 * LOCKING:
701 * caller.
b4dc7623
TH
702 *
703 * RETURNS:
704 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
1da177e4
LT
705 */
706
b4dc7623
TH
707static unsigned int
708ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
1da177e4 709{
1da177e4
LT
710 struct ata_taskfile tf;
711 unsigned int class;
712 u8 err;
713
714 ap->ops->dev_select(ap, device);
715
716 memset(&tf, 0, sizeof(tf));
717
1da177e4 718 ap->ops->tf_read(ap, &tf);
0169e284 719 err = tf.feature;
b4dc7623
TH
720 if (r_err)
721 *r_err = err;
1da177e4 722
93590859
AC
723 /* see if device passed diags: if master then continue and warn later */
724 if (err == 0 && device == 0)
725 /* diagnostic fail : do nothing _YET_ */
726 ap->device[device].horkage |= ATA_HORKAGE_DIAGNOSTIC;
727 else if (err == 1)
1da177e4
LT
728 /* do nothing */ ;
729 else if ((device == 0) && (err == 0x81))
730 /* do nothing */ ;
731 else
b4dc7623 732 return ATA_DEV_NONE;
1da177e4 733
b4dc7623 734 /* determine if device is ATA or ATAPI */
1da177e4 735 class = ata_dev_classify(&tf);
b4dc7623 736
1da177e4 737 if (class == ATA_DEV_UNKNOWN)
b4dc7623 738 return ATA_DEV_NONE;
1da177e4 739 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
b4dc7623
TH
740 return ATA_DEV_NONE;
741 return class;
1da177e4
LT
742}
743
744/**
6a62a04d 745 * ata_id_string - Convert IDENTIFY DEVICE page into string
1da177e4
LT
746 * @id: IDENTIFY DEVICE results we will examine
747 * @s: string into which data is output
748 * @ofs: offset into identify device page
749 * @len: length of string to return. must be an even number.
750 *
751 * The strings in the IDENTIFY DEVICE page are broken up into
752 * 16-bit chunks. Run through the string, and output each
753 * 8-bit chunk linearly, regardless of platform.
754 *
755 * LOCKING:
756 * caller.
757 */
758
6a62a04d
TH
759void ata_id_string(const u16 *id, unsigned char *s,
760 unsigned int ofs, unsigned int len)
1da177e4
LT
761{
762 unsigned int c;
763
764 while (len > 0) {
765 c = id[ofs] >> 8;
766 *s = c;
767 s++;
768
769 c = id[ofs] & 0xff;
770 *s = c;
771 s++;
772
773 ofs++;
774 len -= 2;
775 }
776}
777
0e949ff3 778/**
6a62a04d 779 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
0e949ff3
TH
780 * @id: IDENTIFY DEVICE results we will examine
781 * @s: string into which data is output
782 * @ofs: offset into identify device page
783 * @len: length of string to return. must be an odd number.
784 *
6a62a04d 785 * This function is identical to ata_id_string except that it
0e949ff3
TH
786 * trims trailing spaces and terminates the resulting string with
787 * null. @len must be actual maximum length (even number) + 1.
788 *
789 * LOCKING:
790 * caller.
791 */
6a62a04d
TH
792void ata_id_c_string(const u16 *id, unsigned char *s,
793 unsigned int ofs, unsigned int len)
0e949ff3
TH
794{
795 unsigned char *p;
796
797 WARN_ON(!(len & 1));
798
6a62a04d 799 ata_id_string(id, s, ofs, len - 1);
0e949ff3
TH
800
801 p = s + strnlen(s, len - 1);
802 while (p > s && p[-1] == ' ')
803 p--;
804 *p = '\0';
805}
0baab86b 806
2940740b
TH
807static u64 ata_id_n_sectors(const u16 *id)
808{
809 if (ata_id_has_lba(id)) {
810 if (ata_id_has_lba48(id))
811 return ata_id_u64(id, 100);
812 else
813 return ata_id_u32(id, 60);
814 } else {
815 if (ata_id_current_chs_valid(id))
816 return ata_id_u32(id, 57);
817 else
818 return id[1] * id[3] * id[6];
819 }
820}
821
0baab86b
EF
822/**
823 * ata_noop_dev_select - Select device 0/1 on ATA bus
824 * @ap: ATA channel to manipulate
825 * @device: ATA device (numbered from zero) to select
826 *
827 * This function performs no actual function.
828 *
829 * May be used as the dev_select() entry in ata_port_operations.
830 *
831 * LOCKING:
832 * caller.
833 */
1da177e4
LT
834void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
835{
836}
837
0baab86b 838
1da177e4
LT
839/**
840 * ata_std_dev_select - Select device 0/1 on ATA bus
841 * @ap: ATA channel to manipulate
842 * @device: ATA device (numbered from zero) to select
843 *
844 * Use the method defined in the ATA specification to
845 * make either device 0, or device 1, active on the
0baab86b
EF
846 * ATA channel. Works with both PIO and MMIO.
847 *
848 * May be used as the dev_select() entry in ata_port_operations.
1da177e4
LT
849 *
850 * LOCKING:
851 * caller.
852 */
853
854void ata_std_dev_select (struct ata_port *ap, unsigned int device)
855{
856 u8 tmp;
857
858 if (device == 0)
859 tmp = ATA_DEVICE_OBS;
860 else
861 tmp = ATA_DEVICE_OBS | ATA_DEV1;
862
0d5ff566 863 iowrite8(tmp, ap->ioaddr.device_addr);
1da177e4
LT
864 ata_pause(ap); /* needed; also flushes, for mmio */
865}
866
867/**
868 * ata_dev_select - Select device 0/1 on ATA bus
869 * @ap: ATA channel to manipulate
870 * @device: ATA device (numbered from zero) to select
871 * @wait: non-zero to wait for Status register BSY bit to clear
872 * @can_sleep: non-zero if context allows sleeping
873 *
874 * Use the method defined in the ATA specification to
875 * make either device 0, or device 1, active on the
876 * ATA channel.
877 *
878 * This is a high-level version of ata_std_dev_select(),
879 * which additionally provides the services of inserting
880 * the proper pauses and status polling, where needed.
881 *
882 * LOCKING:
883 * caller.
884 */
885
886void ata_dev_select(struct ata_port *ap, unsigned int device,
887 unsigned int wait, unsigned int can_sleep)
888{
88574551 889 if (ata_msg_probe(ap))
0dd4b21f 890 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, ata%u: "
88574551 891 "device %u, wait %u\n", ap->id, device, wait);
1da177e4
LT
892
893 if (wait)
894 ata_wait_idle(ap);
895
896 ap->ops->dev_select(ap, device);
897
898 if (wait) {
899 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
900 msleep(150);
901 ata_wait_idle(ap);
902 }
903}
904
905/**
906 * ata_dump_id - IDENTIFY DEVICE info debugging output
0bd3300a 907 * @id: IDENTIFY DEVICE page to dump
1da177e4 908 *
0bd3300a
TH
909 * Dump selected 16-bit words from the given IDENTIFY DEVICE
910 * page.
1da177e4
LT
911 *
912 * LOCKING:
913 * caller.
914 */
915
0bd3300a 916static inline void ata_dump_id(const u16 *id)
1da177e4
LT
917{
918 DPRINTK("49==0x%04x "
919 "53==0x%04x "
920 "63==0x%04x "
921 "64==0x%04x "
922 "75==0x%04x \n",
0bd3300a
TH
923 id[49],
924 id[53],
925 id[63],
926 id[64],
927 id[75]);
1da177e4
LT
928 DPRINTK("80==0x%04x "
929 "81==0x%04x "
930 "82==0x%04x "
931 "83==0x%04x "
932 "84==0x%04x \n",
0bd3300a
TH
933 id[80],
934 id[81],
935 id[82],
936 id[83],
937 id[84]);
1da177e4
LT
938 DPRINTK("88==0x%04x "
939 "93==0x%04x\n",
0bd3300a
TH
940 id[88],
941 id[93]);
1da177e4
LT
942}
943
cb95d562
TH
944/**
945 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
946 * @id: IDENTIFY data to compute xfer mask from
947 *
948 * Compute the xfermask for this device. This is not as trivial
949 * as it seems if we must consider early devices correctly.
950 *
951 * FIXME: pre IDE drive timing (do we care ?).
952 *
953 * LOCKING:
954 * None.
955 *
956 * RETURNS:
957 * Computed xfermask
958 */
959static unsigned int ata_id_xfermask(const u16 *id)
960{
961 unsigned int pio_mask, mwdma_mask, udma_mask;
962
963 /* Usual case. Word 53 indicates word 64 is valid */
964 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
965 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
966 pio_mask <<= 3;
967 pio_mask |= 0x7;
968 } else {
969 /* If word 64 isn't valid then Word 51 high byte holds
970 * the PIO timing number for the maximum. Turn it into
971 * a mask.
972 */
7a0f1c8a 973 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
46767aeb
AC
974 if (mode < 5) /* Valid PIO range */
975 pio_mask = (2 << mode) - 1;
976 else
977 pio_mask = 1;
cb95d562
TH
978
979 /* But wait.. there's more. Design your standards by
980 * committee and you too can get a free iordy field to
981 * process. However its the speeds not the modes that
982 * are supported... Note drivers using the timing API
983 * will get this right anyway
984 */
985 }
986
987 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
fb21f0d0 988
b352e57d
AC
989 if (ata_id_is_cfa(id)) {
990 /*
991 * Process compact flash extended modes
992 */
993 int pio = id[163] & 0x7;
994 int dma = (id[163] >> 3) & 7;
995
996 if (pio)
997 pio_mask |= (1 << 5);
998 if (pio > 1)
999 pio_mask |= (1 << 6);
1000 if (dma)
1001 mwdma_mask |= (1 << 3);
1002 if (dma > 1)
1003 mwdma_mask |= (1 << 4);
1004 }
1005
fb21f0d0
TH
1006 udma_mask = 0;
1007 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1008 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
cb95d562
TH
1009
1010 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1011}
1012
86e45b6b
TH
1013/**
1014 * ata_port_queue_task - Queue port_task
1015 * @ap: The ata_port to queue port_task for
e2a7f77a 1016 * @fn: workqueue function to be scheduled
65f27f38 1017 * @data: data for @fn to use
e2a7f77a 1018 * @delay: delay time for workqueue function
86e45b6b
TH
1019 *
1020 * Schedule @fn(@data) for execution after @delay jiffies using
1021 * port_task. There is one port_task per port and it's the
1022 * user(low level driver)'s responsibility to make sure that only
1023 * one task is active at any given time.
1024 *
1025 * libata core layer takes care of synchronization between
1026 * port_task and EH. ata_port_queue_task() may be ignored for EH
1027 * synchronization.
1028 *
1029 * LOCKING:
1030 * Inherited from caller.
1031 */
65f27f38 1032void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
86e45b6b
TH
1033 unsigned long delay)
1034{
1035 int rc;
1036
b51e9e5d 1037 if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK)
86e45b6b
TH
1038 return;
1039
65f27f38
DH
1040 PREPARE_DELAYED_WORK(&ap->port_task, fn);
1041 ap->port_task_data = data;
86e45b6b 1042
52bad64d 1043 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
86e45b6b
TH
1044
1045 /* rc == 0 means that another user is using port task */
1046 WARN_ON(rc == 0);
1047}
1048
1049/**
1050 * ata_port_flush_task - Flush port_task
1051 * @ap: The ata_port to flush port_task for
1052 *
1053 * After this function completes, port_task is guranteed not to
1054 * be running or scheduled.
1055 *
1056 * LOCKING:
1057 * Kernel thread context (may sleep)
1058 */
1059void ata_port_flush_task(struct ata_port *ap)
1060{
1061 unsigned long flags;
1062
1063 DPRINTK("ENTER\n");
1064
ba6a1308 1065 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 1066 ap->pflags |= ATA_PFLAG_FLUSH_PORT_TASK;
ba6a1308 1067 spin_unlock_irqrestore(ap->lock, flags);
86e45b6b
TH
1068
1069 DPRINTK("flush #1\n");
1070 flush_workqueue(ata_wq);
1071
1072 /*
1073 * At this point, if a task is running, it's guaranteed to see
1074 * the FLUSH flag; thus, it will never queue pio tasks again.
1075 * Cancel and flush.
1076 */
1077 if (!cancel_delayed_work(&ap->port_task)) {
0dd4b21f 1078 if (ata_msg_ctl(ap))
88574551
TH
1079 ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n",
1080 __FUNCTION__);
86e45b6b
TH
1081 flush_workqueue(ata_wq);
1082 }
1083
ba6a1308 1084 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 1085 ap->pflags &= ~ATA_PFLAG_FLUSH_PORT_TASK;
ba6a1308 1086 spin_unlock_irqrestore(ap->lock, flags);
86e45b6b 1087
0dd4b21f
BP
1088 if (ata_msg_ctl(ap))
1089 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
86e45b6b
TH
1090}
1091
7102d230 1092static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
a2a7a662 1093{
77853bf2 1094 struct completion *waiting = qc->private_data;
a2a7a662 1095
a2a7a662 1096 complete(waiting);
a2a7a662
TH
1097}
1098
1099/**
2432697b 1100 * ata_exec_internal_sg - execute libata internal command
a2a7a662
TH
1101 * @dev: Device to which the command is sent
1102 * @tf: Taskfile registers for the command and the result
d69cf37d 1103 * @cdb: CDB for packet command
a2a7a662 1104 * @dma_dir: Data tranfer direction of the command
2432697b
TH
1105 * @sg: sg list for the data buffer of the command
1106 * @n_elem: Number of sg entries
a2a7a662
TH
1107 *
1108 * Executes libata internal command with timeout. @tf contains
1109 * command on entry and result on return. Timeout and error
1110 * conditions are reported via return value. No recovery action
1111 * is taken after a command times out. It's caller's duty to
1112 * clean up after timeout.
1113 *
1114 * LOCKING:
1115 * None. Should be called with kernel context, might sleep.
551e8889
TH
1116 *
1117 * RETURNS:
1118 * Zero on success, AC_ERR_* mask on failure
a2a7a662 1119 */
2432697b
TH
1120unsigned ata_exec_internal_sg(struct ata_device *dev,
1121 struct ata_taskfile *tf, const u8 *cdb,
1122 int dma_dir, struct scatterlist *sg,
1123 unsigned int n_elem)
a2a7a662 1124{
3373efd8 1125 struct ata_port *ap = dev->ap;
a2a7a662
TH
1126 u8 command = tf->command;
1127 struct ata_queued_cmd *qc;
2ab7db1f 1128 unsigned int tag, preempted_tag;
dedaf2b0 1129 u32 preempted_sactive, preempted_qc_active;
60be6b9a 1130 DECLARE_COMPLETION_ONSTACK(wait);
a2a7a662 1131 unsigned long flags;
77853bf2 1132 unsigned int err_mask;
d95a717f 1133 int rc;
a2a7a662 1134
ba6a1308 1135 spin_lock_irqsave(ap->lock, flags);
a2a7a662 1136
e3180499 1137 /* no internal command while frozen */
b51e9e5d 1138 if (ap->pflags & ATA_PFLAG_FROZEN) {
ba6a1308 1139 spin_unlock_irqrestore(ap->lock, flags);
e3180499
TH
1140 return AC_ERR_SYSTEM;
1141 }
1142
2ab7db1f 1143 /* initialize internal qc */
a2a7a662 1144
2ab7db1f
TH
1145 /* XXX: Tag 0 is used for drivers with legacy EH as some
1146 * drivers choke if any other tag is given. This breaks
1147 * ata_tag_internal() test for those drivers. Don't use new
1148 * EH stuff without converting to it.
1149 */
1150 if (ap->ops->error_handler)
1151 tag = ATA_TAG_INTERNAL;
1152 else
1153 tag = 0;
1154
6cec4a39 1155 if (test_and_set_bit(tag, &ap->qc_allocated))
2ab7db1f 1156 BUG();
f69499f4 1157 qc = __ata_qc_from_tag(ap, tag);
2ab7db1f
TH
1158
1159 qc->tag = tag;
1160 qc->scsicmd = NULL;
1161 qc->ap = ap;
1162 qc->dev = dev;
1163 ata_qc_reinit(qc);
1164
1165 preempted_tag = ap->active_tag;
dedaf2b0
TH
1166 preempted_sactive = ap->sactive;
1167 preempted_qc_active = ap->qc_active;
2ab7db1f 1168 ap->active_tag = ATA_TAG_POISON;
dedaf2b0
TH
1169 ap->sactive = 0;
1170 ap->qc_active = 0;
2ab7db1f
TH
1171
1172 /* prepare & issue qc */
a2a7a662 1173 qc->tf = *tf;
d69cf37d
TH
1174 if (cdb)
1175 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
e61e0672 1176 qc->flags |= ATA_QCFLAG_RESULT_TF;
a2a7a662
TH
1177 qc->dma_dir = dma_dir;
1178 if (dma_dir != DMA_NONE) {
2432697b
TH
1179 unsigned int i, buflen = 0;
1180
1181 for (i = 0; i < n_elem; i++)
1182 buflen += sg[i].length;
1183
1184 ata_sg_init(qc, sg, n_elem);
49c80429 1185 qc->nbytes = buflen;
a2a7a662
TH
1186 }
1187
77853bf2 1188 qc->private_data = &wait;
a2a7a662
TH
1189 qc->complete_fn = ata_qc_complete_internal;
1190
8e0e694a 1191 ata_qc_issue(qc);
a2a7a662 1192
ba6a1308 1193 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662 1194
a8601e5f 1195 rc = wait_for_completion_timeout(&wait, ata_probe_timeout);
d95a717f
TH
1196
1197 ata_port_flush_task(ap);
41ade50c 1198
d95a717f 1199 if (!rc) {
ba6a1308 1200 spin_lock_irqsave(ap->lock, flags);
a2a7a662
TH
1201
1202 /* We're racing with irq here. If we lose, the
1203 * following test prevents us from completing the qc
d95a717f
TH
1204 * twice. If we win, the port is frozen and will be
1205 * cleaned up by ->post_internal_cmd().
a2a7a662 1206 */
77853bf2 1207 if (qc->flags & ATA_QCFLAG_ACTIVE) {
d95a717f
TH
1208 qc->err_mask |= AC_ERR_TIMEOUT;
1209
1210 if (ap->ops->error_handler)
1211 ata_port_freeze(ap);
1212 else
1213 ata_qc_complete(qc);
f15a1daf 1214
0dd4b21f
BP
1215 if (ata_msg_warn(ap))
1216 ata_dev_printk(dev, KERN_WARNING,
88574551 1217 "qc timeout (cmd 0x%x)\n", command);
a2a7a662
TH
1218 }
1219
ba6a1308 1220 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662
TH
1221 }
1222
d95a717f
TH
1223 /* do post_internal_cmd */
1224 if (ap->ops->post_internal_cmd)
1225 ap->ops->post_internal_cmd(qc);
1226
18d90deb 1227 if ((qc->flags & ATA_QCFLAG_FAILED) && !qc->err_mask) {
0dd4b21f 1228 if (ata_msg_warn(ap))
88574551 1229 ata_dev_printk(dev, KERN_WARNING,
0dd4b21f 1230 "zero err_mask for failed "
88574551 1231 "internal command, assuming AC_ERR_OTHER\n");
d95a717f
TH
1232 qc->err_mask |= AC_ERR_OTHER;
1233 }
1234
15869303 1235 /* finish up */
ba6a1308 1236 spin_lock_irqsave(ap->lock, flags);
15869303 1237
e61e0672 1238 *tf = qc->result_tf;
77853bf2
TH
1239 err_mask = qc->err_mask;
1240
1241 ata_qc_free(qc);
2ab7db1f 1242 ap->active_tag = preempted_tag;
dedaf2b0
TH
1243 ap->sactive = preempted_sactive;
1244 ap->qc_active = preempted_qc_active;
77853bf2 1245
1f7dd3e9
TH
1246 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1247 * Until those drivers are fixed, we detect the condition
1248 * here, fail the command with AC_ERR_SYSTEM and reenable the
1249 * port.
1250 *
1251 * Note that this doesn't change any behavior as internal
1252 * command failure results in disabling the device in the
1253 * higher layer for LLDDs without new reset/EH callbacks.
1254 *
1255 * Kill the following code as soon as those drivers are fixed.
1256 */
198e0fed 1257 if (ap->flags & ATA_FLAG_DISABLED) {
1f7dd3e9
TH
1258 err_mask |= AC_ERR_SYSTEM;
1259 ata_port_probe(ap);
1260 }
1261
ba6a1308 1262 spin_unlock_irqrestore(ap->lock, flags);
15869303 1263
77853bf2 1264 return err_mask;
a2a7a662
TH
1265}
1266
2432697b 1267/**
33480a0e 1268 * ata_exec_internal - execute libata internal command
2432697b
TH
1269 * @dev: Device to which the command is sent
1270 * @tf: Taskfile registers for the command and the result
1271 * @cdb: CDB for packet command
1272 * @dma_dir: Data tranfer direction of the command
1273 * @buf: Data buffer of the command
1274 * @buflen: Length of data buffer
1275 *
1276 * Wrapper around ata_exec_internal_sg() which takes simple
1277 * buffer instead of sg list.
1278 *
1279 * LOCKING:
1280 * None. Should be called with kernel context, might sleep.
1281 *
1282 * RETURNS:
1283 * Zero on success, AC_ERR_* mask on failure
1284 */
1285unsigned ata_exec_internal(struct ata_device *dev,
1286 struct ata_taskfile *tf, const u8 *cdb,
1287 int dma_dir, void *buf, unsigned int buflen)
1288{
33480a0e
TH
1289 struct scatterlist *psg = NULL, sg;
1290 unsigned int n_elem = 0;
2432697b 1291
33480a0e
TH
1292 if (dma_dir != DMA_NONE) {
1293 WARN_ON(!buf);
1294 sg_init_one(&sg, buf, buflen);
1295 psg = &sg;
1296 n_elem++;
1297 }
2432697b 1298
33480a0e 1299 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem);
2432697b
TH
1300}
1301
977e6b9f
TH
1302/**
1303 * ata_do_simple_cmd - execute simple internal command
1304 * @dev: Device to which the command is sent
1305 * @cmd: Opcode to execute
1306 *
1307 * Execute a 'simple' command, that only consists of the opcode
1308 * 'cmd' itself, without filling any other registers
1309 *
1310 * LOCKING:
1311 * Kernel thread context (may sleep).
1312 *
1313 * RETURNS:
1314 * Zero on success, AC_ERR_* mask on failure
e58eb583 1315 */
77b08fb5 1316unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
e58eb583
TH
1317{
1318 struct ata_taskfile tf;
e58eb583
TH
1319
1320 ata_tf_init(dev, &tf);
1321
1322 tf.command = cmd;
1323 tf.flags |= ATA_TFLAG_DEVICE;
1324 tf.protocol = ATA_PROT_NODATA;
1325
977e6b9f 1326 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
e58eb583
TH
1327}
1328
1bc4ccff
AC
1329/**
1330 * ata_pio_need_iordy - check if iordy needed
1331 * @adev: ATA device
1332 *
1333 * Check if the current speed of the device requires IORDY. Used
1334 * by various controllers for chip configuration.
1335 */
1336
1337unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1338{
1339 int pio;
1340 int speed = adev->pio_mode - XFER_PIO_0;
1341
1342 if (speed < 2)
1343 return 0;
1344 if (speed > 2)
1345 return 1;
2e9edbf8 1346
1bc4ccff
AC
1347 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1348
1349 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1350 pio = adev->id[ATA_ID_EIDE_PIO];
1351 /* Is the speed faster than the drive allows non IORDY ? */
1352 if (pio) {
1353 /* This is cycle times not frequency - watch the logic! */
1354 if (pio > 240) /* PIO2 is 240nS per cycle */
1355 return 1;
1356 return 0;
1357 }
1358 }
1359 return 0;
1360}
1361
1da177e4 1362/**
49016aca 1363 * ata_dev_read_id - Read ID data from the specified device
49016aca
TH
1364 * @dev: target device
1365 * @p_class: pointer to class of the target device (may be changed)
bff04647 1366 * @flags: ATA_READID_* flags
fe635c7e 1367 * @id: buffer to read IDENTIFY data into
1da177e4 1368 *
49016aca
TH
1369 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1370 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
aec5c3c1
TH
1371 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1372 * for pre-ATA4 drives.
1da177e4
LT
1373 *
1374 * LOCKING:
49016aca
TH
1375 * Kernel thread context (may sleep)
1376 *
1377 * RETURNS:
1378 * 0 on success, -errno otherwise.
1da177e4 1379 */
a9beec95 1380int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
bff04647 1381 unsigned int flags, u16 *id)
1da177e4 1382{
3373efd8 1383 struct ata_port *ap = dev->ap;
49016aca 1384 unsigned int class = *p_class;
a0123703 1385 struct ata_taskfile tf;
49016aca
TH
1386 unsigned int err_mask = 0;
1387 const char *reason;
1388 int rc;
1da177e4 1389
0dd4b21f 1390 if (ata_msg_ctl(ap))
88574551
TH
1391 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n",
1392 __FUNCTION__, ap->id, dev->devno);
1da177e4 1393
49016aca 1394 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1da177e4 1395
49016aca 1396 retry:
3373efd8 1397 ata_tf_init(dev, &tf);
a0123703 1398
49016aca
TH
1399 switch (class) {
1400 case ATA_DEV_ATA:
a0123703 1401 tf.command = ATA_CMD_ID_ATA;
49016aca
TH
1402 break;
1403 case ATA_DEV_ATAPI:
a0123703 1404 tf.command = ATA_CMD_ID_ATAPI;
49016aca
TH
1405 break;
1406 default:
1407 rc = -ENODEV;
1408 reason = "unsupported class";
1409 goto err_out;
1da177e4
LT
1410 }
1411
a0123703 1412 tf.protocol = ATA_PROT_PIO;
800b3996 1413 tf.flags |= ATA_TFLAG_POLLING; /* for polling presence detection */
1da177e4 1414
3373efd8 1415 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
49016aca 1416 id, sizeof(id[0]) * ATA_ID_WORDS);
a0123703 1417 if (err_mask) {
800b3996 1418 if (err_mask & AC_ERR_NODEV_HINT) {
55a8e2c8
TH
1419 DPRINTK("ata%u.%d: NODEV after polling detection\n",
1420 ap->id, dev->devno);
1421 return -ENOENT;
1422 }
1423
49016aca
TH
1424 rc = -EIO;
1425 reason = "I/O error";
1da177e4
LT
1426 goto err_out;
1427 }
1428
49016aca 1429 swap_buf_le16(id, ATA_ID_WORDS);
1da177e4 1430
49016aca 1431 /* sanity check */
a4f5749b
TH
1432 rc = -EINVAL;
1433 reason = "device reports illegal type";
1434
1435 if (class == ATA_DEV_ATA) {
1436 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1437 goto err_out;
1438 } else {
1439 if (ata_id_is_ata(id))
1440 goto err_out;
49016aca
TH
1441 }
1442
bff04647 1443 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
49016aca
TH
1444 /*
1445 * The exact sequence expected by certain pre-ATA4 drives is:
1446 * SRST RESET
1447 * IDENTIFY
1448 * INITIALIZE DEVICE PARAMETERS
1449 * anything else..
1450 * Some drives were very specific about that exact sequence.
1451 */
1452 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
3373efd8 1453 err_mask = ata_dev_init_params(dev, id[3], id[6]);
49016aca
TH
1454 if (err_mask) {
1455 rc = -EIO;
1456 reason = "INIT_DEV_PARAMS failed";
1457 goto err_out;
1458 }
1459
1460 /* current CHS translation info (id[53-58]) might be
1461 * changed. reread the identify device info.
1462 */
bff04647 1463 flags &= ~ATA_READID_POSTRESET;
49016aca
TH
1464 goto retry;
1465 }
1466 }
1467
1468 *p_class = class;
fe635c7e 1469
49016aca
TH
1470 return 0;
1471
1472 err_out:
88574551 1473 if (ata_msg_warn(ap))
0dd4b21f 1474 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
88574551 1475 "(%s, err_mask=0x%x)\n", reason, err_mask);
49016aca
TH
1476 return rc;
1477}
1478
3373efd8 1479static inline u8 ata_dev_knobble(struct ata_device *dev)
4b2f3ede 1480{
3373efd8 1481 return ((dev->ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
4b2f3ede
TH
1482}
1483
a6e6ce8e
TH
1484static void ata_dev_config_ncq(struct ata_device *dev,
1485 char *desc, size_t desc_sz)
1486{
1487 struct ata_port *ap = dev->ap;
1488 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
1489
1490 if (!ata_id_has_ncq(dev->id)) {
1491 desc[0] = '\0';
1492 return;
1493 }
6919a0a6
AC
1494 if (ata_device_blacklisted(dev) & ATA_HORKAGE_NONCQ) {
1495 snprintf(desc, desc_sz, "NCQ (not used)");
1496 return;
1497 }
a6e6ce8e 1498 if (ap->flags & ATA_FLAG_NCQ) {
cca3974e 1499 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
a6e6ce8e
TH
1500 dev->flags |= ATA_DFLAG_NCQ;
1501 }
1502
1503 if (hdepth >= ddepth)
1504 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
1505 else
1506 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
1507}
1508
e6d902a3
BK
1509static void ata_set_port_max_cmd_len(struct ata_port *ap)
1510{
1511 int i;
1512
cca3974e
JG
1513 if (ap->scsi_host) {
1514 unsigned int len = 0;
1515
e6d902a3 1516 for (i = 0; i < ATA_MAX_DEVICES; i++)
cca3974e
JG
1517 len = max(len, ap->device[i].cdb_len);
1518
1519 ap->scsi_host->max_cmd_len = len;
e6d902a3
BK
1520 }
1521}
1522
49016aca 1523/**
ffeae418 1524 * ata_dev_configure - Configure the specified ATA/ATAPI device
ffeae418
TH
1525 * @dev: Target device to configure
1526 *
1527 * Configure @dev according to @dev->id. Generic and low-level
1528 * driver specific fixups are also applied.
49016aca
TH
1529 *
1530 * LOCKING:
ffeae418
TH
1531 * Kernel thread context (may sleep)
1532 *
1533 * RETURNS:
1534 * 0 on success, -errno otherwise
49016aca 1535 */
efdaedc4 1536int ata_dev_configure(struct ata_device *dev)
49016aca 1537{
3373efd8 1538 struct ata_port *ap = dev->ap;
efdaedc4 1539 int print_info = ap->eh_context.i.flags & ATA_EHI_PRINTINFO;
1148c3a7 1540 const u16 *id = dev->id;
ff8854b2 1541 unsigned int xfer_mask;
b352e57d 1542 char revbuf[7]; /* XYZ-99\0 */
e6d902a3 1543 int rc;
49016aca 1544
0dd4b21f 1545 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
88574551
TH
1546 ata_dev_printk(dev, KERN_INFO,
1547 "%s: ENTER/EXIT (host %u, dev %u) -- nodev\n",
1548 __FUNCTION__, ap->id, dev->devno);
ffeae418 1549 return 0;
49016aca
TH
1550 }
1551
0dd4b21f 1552 if (ata_msg_probe(ap))
88574551
TH
1553 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n",
1554 __FUNCTION__, ap->id, dev->devno);
1da177e4 1555
c39f5ebe 1556 /* print device capabilities */
0dd4b21f 1557 if (ata_msg_probe(ap))
88574551
TH
1558 ata_dev_printk(dev, KERN_DEBUG,
1559 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
1560 "85:%04x 86:%04x 87:%04x 88:%04x\n",
0dd4b21f 1561 __FUNCTION__,
f15a1daf
TH
1562 id[49], id[82], id[83], id[84],
1563 id[85], id[86], id[87], id[88]);
c39f5ebe 1564
208a9933 1565 /* initialize to-be-configured parameters */
ea1dd4e1 1566 dev->flags &= ~ATA_DFLAG_CFG_MASK;
208a9933
TH
1567 dev->max_sectors = 0;
1568 dev->cdb_len = 0;
1569 dev->n_sectors = 0;
1570 dev->cylinders = 0;
1571 dev->heads = 0;
1572 dev->sectors = 0;
1573
1da177e4
LT
1574 /*
1575 * common ATA, ATAPI feature tests
1576 */
1577
ff8854b2 1578 /* find max transfer mode; for printk only */
1148c3a7 1579 xfer_mask = ata_id_xfermask(id);
1da177e4 1580
0dd4b21f
BP
1581 if (ata_msg_probe(ap))
1582 ata_dump_id(id);
1da177e4
LT
1583
1584 /* ATA-specific feature tests */
1585 if (dev->class == ATA_DEV_ATA) {
b352e57d
AC
1586 if (ata_id_is_cfa(id)) {
1587 if (id[162] & 1) /* CPRM may make this media unusable */
1588 ata_dev_printk(dev, KERN_WARNING, "ata%u: device %u supports DRM functions and may not be fully accessable.\n",
1589 ap->id, dev->devno);
1590 snprintf(revbuf, 7, "CFA");
1591 }
1592 else
1593 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
1594
1148c3a7 1595 dev->n_sectors = ata_id_n_sectors(id);
2940740b 1596
1148c3a7 1597 if (ata_id_has_lba(id)) {
4c2d721a 1598 const char *lba_desc;
a6e6ce8e 1599 char ncq_desc[20];
8bf62ece 1600
4c2d721a
TH
1601 lba_desc = "LBA";
1602 dev->flags |= ATA_DFLAG_LBA;
1148c3a7 1603 if (ata_id_has_lba48(id)) {
8bf62ece 1604 dev->flags |= ATA_DFLAG_LBA48;
4c2d721a 1605 lba_desc = "LBA48";
6fc49adb
TH
1606
1607 if (dev->n_sectors >= (1UL << 28) &&
1608 ata_id_has_flush_ext(id))
1609 dev->flags |= ATA_DFLAG_FLUSH_EXT;
4c2d721a 1610 }
8bf62ece 1611
a6e6ce8e
TH
1612 /* config NCQ */
1613 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
1614
8bf62ece 1615 /* print device info to dmesg */
5afc8142 1616 if (ata_msg_drv(ap) && print_info)
b352e57d 1617 ata_dev_printk(dev, KERN_INFO, "%s, "
a6e6ce8e 1618 "max %s, %Lu sectors: %s %s\n",
b352e57d 1619 revbuf,
f15a1daf
TH
1620 ata_mode_string(xfer_mask),
1621 (unsigned long long)dev->n_sectors,
a6e6ce8e 1622 lba_desc, ncq_desc);
ffeae418 1623 } else {
8bf62ece
AL
1624 /* CHS */
1625
1626 /* Default translation */
1148c3a7
TH
1627 dev->cylinders = id[1];
1628 dev->heads = id[3];
1629 dev->sectors = id[6];
8bf62ece 1630
1148c3a7 1631 if (ata_id_current_chs_valid(id)) {
8bf62ece 1632 /* Current CHS translation is valid. */
1148c3a7
TH
1633 dev->cylinders = id[54];
1634 dev->heads = id[55];
1635 dev->sectors = id[56];
8bf62ece
AL
1636 }
1637
1638 /* print device info to dmesg */
5afc8142 1639 if (ata_msg_drv(ap) && print_info)
b352e57d 1640 ata_dev_printk(dev, KERN_INFO, "%s, "
f15a1daf 1641 "max %s, %Lu sectors: CHS %u/%u/%u\n",
b352e57d 1642 revbuf,
f15a1daf
TH
1643 ata_mode_string(xfer_mask),
1644 (unsigned long long)dev->n_sectors,
88574551
TH
1645 dev->cylinders, dev->heads,
1646 dev->sectors);
1da177e4
LT
1647 }
1648
07f6f7d0
AL
1649 if (dev->id[59] & 0x100) {
1650 dev->multi_count = dev->id[59] & 0xff;
5afc8142 1651 if (ata_msg_drv(ap) && print_info)
88574551
TH
1652 ata_dev_printk(dev, KERN_INFO,
1653 "ata%u: dev %u multi count %u\n",
1654 ap->id, dev->devno, dev->multi_count);
07f6f7d0
AL
1655 }
1656
6e7846e9 1657 dev->cdb_len = 16;
1da177e4
LT
1658 }
1659
1660 /* ATAPI-specific feature tests */
2c13b7ce 1661 else if (dev->class == ATA_DEV_ATAPI) {
08a556db
AL
1662 char *cdb_intr_string = "";
1663
1148c3a7 1664 rc = atapi_cdb_len(id);
1da177e4 1665 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
0dd4b21f 1666 if (ata_msg_warn(ap))
88574551
TH
1667 ata_dev_printk(dev, KERN_WARNING,
1668 "unsupported CDB len\n");
ffeae418 1669 rc = -EINVAL;
1da177e4
LT
1670 goto err_out_nosup;
1671 }
6e7846e9 1672 dev->cdb_len = (unsigned int) rc;
1da177e4 1673
08a556db 1674 if (ata_id_cdb_intr(dev->id)) {
312f7da2 1675 dev->flags |= ATA_DFLAG_CDB_INTR;
08a556db
AL
1676 cdb_intr_string = ", CDB intr";
1677 }
312f7da2 1678
1da177e4 1679 /* print device info to dmesg */
5afc8142 1680 if (ata_msg_drv(ap) && print_info)
12436c30
TH
1681 ata_dev_printk(dev, KERN_INFO, "ATAPI, max %s%s\n",
1682 ata_mode_string(xfer_mask),
1683 cdb_intr_string);
1da177e4
LT
1684 }
1685
914ed354
TH
1686 /* determine max_sectors */
1687 dev->max_sectors = ATA_MAX_SECTORS;
1688 if (dev->flags & ATA_DFLAG_LBA48)
1689 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
1690
93590859
AC
1691 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
1692 /* Let the user know. We don't want to disallow opens for
1693 rescue purposes, or in case the vendor is just a blithering
1694 idiot */
1695 if (print_info) {
1696 ata_dev_printk(dev, KERN_WARNING,
1697"Drive reports diagnostics failure. This may indicate a drive\n");
1698 ata_dev_printk(dev, KERN_WARNING,
1699"fault or invalid emulation. Contact drive vendor for information.\n");
1700 }
1701 }
1702
e6d902a3 1703 ata_set_port_max_cmd_len(ap);
6e7846e9 1704
4b2f3ede 1705 /* limit bridge transfers to udma5, 200 sectors */
3373efd8 1706 if (ata_dev_knobble(dev)) {
5afc8142 1707 if (ata_msg_drv(ap) && print_info)
f15a1daf
TH
1708 ata_dev_printk(dev, KERN_INFO,
1709 "applying bridge limits\n");
5a529139 1710 dev->udma_mask &= ATA_UDMA5;
4b2f3ede
TH
1711 dev->max_sectors = ATA_MAX_SECTORS;
1712 }
1713
1714 if (ap->ops->dev_config)
1715 ap->ops->dev_config(ap, dev);
1716
0dd4b21f
BP
1717 if (ata_msg_probe(ap))
1718 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
1719 __FUNCTION__, ata_chk_status(ap));
ffeae418 1720 return 0;
1da177e4
LT
1721
1722err_out_nosup:
0dd4b21f 1723 if (ata_msg_probe(ap))
88574551
TH
1724 ata_dev_printk(dev, KERN_DEBUG,
1725 "%s: EXIT, err\n", __FUNCTION__);
ffeae418 1726 return rc;
1da177e4
LT
1727}
1728
1729/**
1730 * ata_bus_probe - Reset and probe ATA bus
1731 * @ap: Bus to probe
1732 *
0cba632b
JG
1733 * Master ATA bus probing function. Initiates a hardware-dependent
1734 * bus reset, then attempts to identify any devices found on
1735 * the bus.
1736 *
1da177e4 1737 * LOCKING:
0cba632b 1738 * PCI/etc. bus probe sem.
1da177e4
LT
1739 *
1740 * RETURNS:
96072e69 1741 * Zero on success, negative errno otherwise.
1da177e4
LT
1742 */
1743
80289167 1744int ata_bus_probe(struct ata_port *ap)
1da177e4 1745{
28ca5c57 1746 unsigned int classes[ATA_MAX_DEVICES];
14d2bac1
TH
1747 int tries[ATA_MAX_DEVICES];
1748 int i, rc, down_xfermask;
e82cbdb9 1749 struct ata_device *dev;
1da177e4 1750
28ca5c57 1751 ata_port_probe(ap);
c19ba8af 1752
14d2bac1
TH
1753 for (i = 0; i < ATA_MAX_DEVICES; i++)
1754 tries[i] = ATA_PROBE_MAX_TRIES;
1755
1756 retry:
1757 down_xfermask = 0;
1758
2044470c 1759 /* reset and determine device classes */
52783c5d 1760 ap->ops->phy_reset(ap);
2061a47a 1761
52783c5d
TH
1762 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1763 dev = &ap->device[i];
c19ba8af 1764
52783c5d
TH
1765 if (!(ap->flags & ATA_FLAG_DISABLED) &&
1766 dev->class != ATA_DEV_UNKNOWN)
1767 classes[dev->devno] = dev->class;
1768 else
1769 classes[dev->devno] = ATA_DEV_NONE;
2044470c 1770
52783c5d 1771 dev->class = ATA_DEV_UNKNOWN;
28ca5c57 1772 }
1da177e4 1773
52783c5d 1774 ata_port_probe(ap);
2044470c 1775
b6079ca4
AC
1776 /* after the reset the device state is PIO 0 and the controller
1777 state is undefined. Record the mode */
1778
1779 for (i = 0; i < ATA_MAX_DEVICES; i++)
1780 ap->device[i].pio_mode = XFER_PIO_0;
1781
28ca5c57 1782 /* read IDENTIFY page and configure devices */
1da177e4 1783 for (i = 0; i < ATA_MAX_DEVICES; i++) {
e82cbdb9 1784 dev = &ap->device[i];
28ca5c57 1785
ec573755
TH
1786 if (tries[i])
1787 dev->class = classes[i];
ffeae418 1788
14d2bac1 1789 if (!ata_dev_enabled(dev))
ffeae418 1790 continue;
ffeae418 1791
bff04647
TH
1792 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
1793 dev->id);
14d2bac1
TH
1794 if (rc)
1795 goto fail;
1796
efdaedc4
TH
1797 ap->eh_context.i.flags |= ATA_EHI_PRINTINFO;
1798 rc = ata_dev_configure(dev);
1799 ap->eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
14d2bac1
TH
1800 if (rc)
1801 goto fail;
1da177e4
LT
1802 }
1803
e82cbdb9 1804 /* configure transfer mode */
3adcebb2 1805 rc = ata_set_mode(ap, &dev);
51713d35
TH
1806 if (rc) {
1807 down_xfermask = 1;
1808 goto fail;
e82cbdb9 1809 }
1da177e4 1810
e82cbdb9
TH
1811 for (i = 0; i < ATA_MAX_DEVICES; i++)
1812 if (ata_dev_enabled(&ap->device[i]))
1813 return 0;
1da177e4 1814
e82cbdb9
TH
1815 /* no device present, disable port */
1816 ata_port_disable(ap);
1da177e4 1817 ap->ops->port_disable(ap);
96072e69 1818 return -ENODEV;
14d2bac1
TH
1819
1820 fail:
1821 switch (rc) {
1822 case -EINVAL:
1823 case -ENODEV:
1824 tries[dev->devno] = 0;
1825 break;
1826 case -EIO:
3c567b7d 1827 sata_down_spd_limit(ap);
14d2bac1
TH
1828 /* fall through */
1829 default:
1830 tries[dev->devno]--;
1831 if (down_xfermask &&
3373efd8 1832 ata_down_xfermask_limit(dev, tries[dev->devno] == 1))
14d2bac1
TH
1833 tries[dev->devno] = 0;
1834 }
1835
ec573755 1836 if (!tries[dev->devno]) {
3373efd8
TH
1837 ata_down_xfermask_limit(dev, 1);
1838 ata_dev_disable(dev);
ec573755
TH
1839 }
1840
14d2bac1 1841 goto retry;
1da177e4
LT
1842}
1843
1844/**
0cba632b
JG
1845 * ata_port_probe - Mark port as enabled
1846 * @ap: Port for which we indicate enablement
1da177e4 1847 *
0cba632b
JG
1848 * Modify @ap data structure such that the system
1849 * thinks that the entire port is enabled.
1850 *
cca3974e 1851 * LOCKING: host lock, or some other form of
0cba632b 1852 * serialization.
1da177e4
LT
1853 */
1854
1855void ata_port_probe(struct ata_port *ap)
1856{
198e0fed 1857 ap->flags &= ~ATA_FLAG_DISABLED;
1da177e4
LT
1858}
1859
3be680b7
TH
1860/**
1861 * sata_print_link_status - Print SATA link status
1862 * @ap: SATA port to printk link status about
1863 *
1864 * This function prints link speed and status of a SATA link.
1865 *
1866 * LOCKING:
1867 * None.
1868 */
1869static void sata_print_link_status(struct ata_port *ap)
1870{
6d5f9732 1871 u32 sstatus, scontrol, tmp;
3be680b7 1872
81952c54 1873 if (sata_scr_read(ap, SCR_STATUS, &sstatus))
3be680b7 1874 return;
81952c54 1875 sata_scr_read(ap, SCR_CONTROL, &scontrol);
3be680b7 1876
81952c54 1877 if (ata_port_online(ap)) {
3be680b7 1878 tmp = (sstatus >> 4) & 0xf;
f15a1daf
TH
1879 ata_port_printk(ap, KERN_INFO,
1880 "SATA link up %s (SStatus %X SControl %X)\n",
1881 sata_spd_string(tmp), sstatus, scontrol);
3be680b7 1882 } else {
f15a1daf
TH
1883 ata_port_printk(ap, KERN_INFO,
1884 "SATA link down (SStatus %X SControl %X)\n",
1885 sstatus, scontrol);
3be680b7
TH
1886 }
1887}
1888
1da177e4 1889/**
780a87f7
JG
1890 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1891 * @ap: SATA port associated with target SATA PHY.
1da177e4 1892 *
780a87f7
JG
1893 * This function issues commands to standard SATA Sxxx
1894 * PHY registers, to wake up the phy (and device), and
1895 * clear any reset condition.
1da177e4
LT
1896 *
1897 * LOCKING:
0cba632b 1898 * PCI/etc. bus probe sem.
1da177e4
LT
1899 *
1900 */
1901void __sata_phy_reset(struct ata_port *ap)
1902{
1903 u32 sstatus;
1904 unsigned long timeout = jiffies + (HZ * 5);
1905
1906 if (ap->flags & ATA_FLAG_SATA_RESET) {
cdcca89e 1907 /* issue phy wake/reset */
81952c54 1908 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
62ba2841
TH
1909 /* Couldn't find anything in SATA I/II specs, but
1910 * AHCI-1.1 10.4.2 says at least 1 ms. */
1911 mdelay(1);
1da177e4 1912 }
81952c54
TH
1913 /* phy wake/clear reset */
1914 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
1da177e4
LT
1915
1916 /* wait for phy to become ready, if necessary */
1917 do {
1918 msleep(200);
81952c54 1919 sata_scr_read(ap, SCR_STATUS, &sstatus);
1da177e4
LT
1920 if ((sstatus & 0xf) != 1)
1921 break;
1922 } while (time_before(jiffies, timeout));
1923
3be680b7
TH
1924 /* print link status */
1925 sata_print_link_status(ap);
656563e3 1926
3be680b7 1927 /* TODO: phy layer with polling, timeouts, etc. */
81952c54 1928 if (!ata_port_offline(ap))
1da177e4 1929 ata_port_probe(ap);
3be680b7 1930 else
1da177e4 1931 ata_port_disable(ap);
1da177e4 1932
198e0fed 1933 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
1934 return;
1935
1936 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
1937 ata_port_disable(ap);
1938 return;
1939 }
1940
1941 ap->cbl = ATA_CBL_SATA;
1942}
1943
1944/**
780a87f7
JG
1945 * sata_phy_reset - Reset SATA bus.
1946 * @ap: SATA port associated with target SATA PHY.
1da177e4 1947 *
780a87f7
JG
1948 * This function resets the SATA bus, and then probes
1949 * the bus for devices.
1da177e4
LT
1950 *
1951 * LOCKING:
0cba632b 1952 * PCI/etc. bus probe sem.
1da177e4
LT
1953 *
1954 */
1955void sata_phy_reset(struct ata_port *ap)
1956{
1957 __sata_phy_reset(ap);
198e0fed 1958 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
1959 return;
1960 ata_bus_reset(ap);
1961}
1962
ebdfca6e
AC
1963/**
1964 * ata_dev_pair - return other device on cable
ebdfca6e
AC
1965 * @adev: device
1966 *
1967 * Obtain the other device on the same cable, or if none is
1968 * present NULL is returned
1969 */
2e9edbf8 1970
3373efd8 1971struct ata_device *ata_dev_pair(struct ata_device *adev)
ebdfca6e 1972{
3373efd8 1973 struct ata_port *ap = adev->ap;
ebdfca6e 1974 struct ata_device *pair = &ap->device[1 - adev->devno];
e1211e3f 1975 if (!ata_dev_enabled(pair))
ebdfca6e
AC
1976 return NULL;
1977 return pair;
1978}
1979
1da177e4 1980/**
780a87f7
JG
1981 * ata_port_disable - Disable port.
1982 * @ap: Port to be disabled.
1da177e4 1983 *
780a87f7
JG
1984 * Modify @ap data structure such that the system
1985 * thinks that the entire port is disabled, and should
1986 * never attempt to probe or communicate with devices
1987 * on this port.
1988 *
cca3974e 1989 * LOCKING: host lock, or some other form of
780a87f7 1990 * serialization.
1da177e4
LT
1991 */
1992
1993void ata_port_disable(struct ata_port *ap)
1994{
1995 ap->device[0].class = ATA_DEV_NONE;
1996 ap->device[1].class = ATA_DEV_NONE;
198e0fed 1997 ap->flags |= ATA_FLAG_DISABLED;
1da177e4
LT
1998}
1999
1c3fae4d 2000/**
3c567b7d 2001 * sata_down_spd_limit - adjust SATA spd limit downward
1c3fae4d
TH
2002 * @ap: Port to adjust SATA spd limit for
2003 *
2004 * Adjust SATA spd limit of @ap downward. Note that this
2005 * function only adjusts the limit. The change must be applied
3c567b7d 2006 * using sata_set_spd().
1c3fae4d
TH
2007 *
2008 * LOCKING:
2009 * Inherited from caller.
2010 *
2011 * RETURNS:
2012 * 0 on success, negative errno on failure
2013 */
3c567b7d 2014int sata_down_spd_limit(struct ata_port *ap)
1c3fae4d 2015{
81952c54
TH
2016 u32 sstatus, spd, mask;
2017 int rc, highbit;
1c3fae4d 2018
81952c54
TH
2019 rc = sata_scr_read(ap, SCR_STATUS, &sstatus);
2020 if (rc)
2021 return rc;
1c3fae4d
TH
2022
2023 mask = ap->sata_spd_limit;
2024 if (mask <= 1)
2025 return -EINVAL;
2026 highbit = fls(mask) - 1;
2027 mask &= ~(1 << highbit);
2028
81952c54 2029 spd = (sstatus >> 4) & 0xf;
1c3fae4d
TH
2030 if (spd <= 1)
2031 return -EINVAL;
2032 spd--;
2033 mask &= (1 << spd) - 1;
2034 if (!mask)
2035 return -EINVAL;
2036
2037 ap->sata_spd_limit = mask;
2038
f15a1daf
TH
2039 ata_port_printk(ap, KERN_WARNING, "limiting SATA link speed to %s\n",
2040 sata_spd_string(fls(mask)));
1c3fae4d
TH
2041
2042 return 0;
2043}
2044
3c567b7d 2045static int __sata_set_spd_needed(struct ata_port *ap, u32 *scontrol)
1c3fae4d
TH
2046{
2047 u32 spd, limit;
2048
2049 if (ap->sata_spd_limit == UINT_MAX)
2050 limit = 0;
2051 else
2052 limit = fls(ap->sata_spd_limit);
2053
2054 spd = (*scontrol >> 4) & 0xf;
2055 *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
2056
2057 return spd != limit;
2058}
2059
2060/**
3c567b7d 2061 * sata_set_spd_needed - is SATA spd configuration needed
1c3fae4d
TH
2062 * @ap: Port in question
2063 *
2064 * Test whether the spd limit in SControl matches
2065 * @ap->sata_spd_limit. This function is used to determine
2066 * whether hardreset is necessary to apply SATA spd
2067 * configuration.
2068 *
2069 * LOCKING:
2070 * Inherited from caller.
2071 *
2072 * RETURNS:
2073 * 1 if SATA spd configuration is needed, 0 otherwise.
2074 */
3c567b7d 2075int sata_set_spd_needed(struct ata_port *ap)
1c3fae4d
TH
2076{
2077 u32 scontrol;
2078
81952c54 2079 if (sata_scr_read(ap, SCR_CONTROL, &scontrol))
1c3fae4d
TH
2080 return 0;
2081
3c567b7d 2082 return __sata_set_spd_needed(ap, &scontrol);
1c3fae4d
TH
2083}
2084
2085/**
3c567b7d 2086 * sata_set_spd - set SATA spd according to spd limit
1c3fae4d
TH
2087 * @ap: Port to set SATA spd for
2088 *
2089 * Set SATA spd of @ap according to sata_spd_limit.
2090 *
2091 * LOCKING:
2092 * Inherited from caller.
2093 *
2094 * RETURNS:
2095 * 0 if spd doesn't need to be changed, 1 if spd has been
81952c54 2096 * changed. Negative errno if SCR registers are inaccessible.
1c3fae4d 2097 */
3c567b7d 2098int sata_set_spd(struct ata_port *ap)
1c3fae4d
TH
2099{
2100 u32 scontrol;
81952c54 2101 int rc;
1c3fae4d 2102
81952c54
TH
2103 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2104 return rc;
1c3fae4d 2105
3c567b7d 2106 if (!__sata_set_spd_needed(ap, &scontrol))
1c3fae4d
TH
2107 return 0;
2108
81952c54
TH
2109 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2110 return rc;
2111
1c3fae4d
TH
2112 return 1;
2113}
2114
452503f9
AC
2115/*
2116 * This mode timing computation functionality is ported over from
2117 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2118 */
2119/*
b352e57d 2120 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
452503f9 2121 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
b352e57d
AC
2122 * for UDMA6, which is currently supported only by Maxtor drives.
2123 *
2124 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
452503f9
AC
2125 */
2126
2127static const struct ata_timing ata_timing[] = {
2128
2129 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
2130 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
2131 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
2132 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
2133
b352e57d
AC
2134 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
2135 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
452503f9
AC
2136 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
2137 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
2138 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
2139
2140/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
2e9edbf8 2141
452503f9
AC
2142 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
2143 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
2144 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
2e9edbf8 2145
452503f9
AC
2146 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
2147 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
2148 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
2149
b352e57d
AC
2150 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
2151 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
452503f9
AC
2152 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
2153 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
2154
2155 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
2156 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
2157 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
2158
2159/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
2160
2161 { 0xFF }
2162};
2163
2164#define ENOUGH(v,unit) (((v)-1)/(unit)+1)
2165#define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
2166
2167static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2168{
2169 q->setup = EZ(t->setup * 1000, T);
2170 q->act8b = EZ(t->act8b * 1000, T);
2171 q->rec8b = EZ(t->rec8b * 1000, T);
2172 q->cyc8b = EZ(t->cyc8b * 1000, T);
2173 q->active = EZ(t->active * 1000, T);
2174 q->recover = EZ(t->recover * 1000, T);
2175 q->cycle = EZ(t->cycle * 1000, T);
2176 q->udma = EZ(t->udma * 1000, UT);
2177}
2178
2179void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2180 struct ata_timing *m, unsigned int what)
2181{
2182 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2183 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2184 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2185 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2186 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2187 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2188 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2189 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2190}
2191
2192static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
2193{
2194 const struct ata_timing *t;
2195
2196 for (t = ata_timing; t->mode != speed; t++)
91190758 2197 if (t->mode == 0xFF)
452503f9 2198 return NULL;
2e9edbf8 2199 return t;
452503f9
AC
2200}
2201
2202int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2203 struct ata_timing *t, int T, int UT)
2204{
2205 const struct ata_timing *s;
2206 struct ata_timing p;
2207
2208 /*
2e9edbf8 2209 * Find the mode.
75b1f2f8 2210 */
452503f9
AC
2211
2212 if (!(s = ata_timing_find_mode(speed)))
2213 return -EINVAL;
2214
75b1f2f8
AL
2215 memcpy(t, s, sizeof(*s));
2216
452503f9
AC
2217 /*
2218 * If the drive is an EIDE drive, it can tell us it needs extended
2219 * PIO/MW_DMA cycle timing.
2220 */
2221
2222 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
2223 memset(&p, 0, sizeof(p));
2224 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2225 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2226 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2227 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2228 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2229 }
2230 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2231 }
2232
2233 /*
2234 * Convert the timing to bus clock counts.
2235 */
2236
75b1f2f8 2237 ata_timing_quantize(t, t, T, UT);
452503f9
AC
2238
2239 /*
c893a3ae
RD
2240 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2241 * S.M.A.R.T * and some other commands. We have to ensure that the
2242 * DMA cycle timing is slower/equal than the fastest PIO timing.
452503f9
AC
2243 */
2244
fd3367af 2245 if (speed > XFER_PIO_6) {
452503f9
AC
2246 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2247 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2248 }
2249
2250 /*
c893a3ae 2251 * Lengthen active & recovery time so that cycle time is correct.
452503f9
AC
2252 */
2253
2254 if (t->act8b + t->rec8b < t->cyc8b) {
2255 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2256 t->rec8b = t->cyc8b - t->act8b;
2257 }
2258
2259 if (t->active + t->recover < t->cycle) {
2260 t->active += (t->cycle - (t->active + t->recover)) / 2;
2261 t->recover = t->cycle - t->active;
2262 }
2263
2264 return 0;
2265}
2266
cf176e1a
TH
2267/**
2268 * ata_down_xfermask_limit - adjust dev xfer masks downward
cf176e1a
TH
2269 * @dev: Device to adjust xfer masks
2270 * @force_pio0: Force PIO0
2271 *
2272 * Adjust xfer masks of @dev downward. Note that this function
2273 * does not apply the change. Invoking ata_set_mode() afterwards
2274 * will apply the limit.
2275 *
2276 * LOCKING:
2277 * Inherited from caller.
2278 *
2279 * RETURNS:
2280 * 0 on success, negative errno on failure
2281 */
3373efd8 2282int ata_down_xfermask_limit(struct ata_device *dev, int force_pio0)
cf176e1a
TH
2283{
2284 unsigned long xfer_mask;
2285 int highbit;
2286
2287 xfer_mask = ata_pack_xfermask(dev->pio_mask, dev->mwdma_mask,
2288 dev->udma_mask);
2289
2290 if (!xfer_mask)
2291 goto fail;
2292 /* don't gear down to MWDMA from UDMA, go directly to PIO */
2293 if (xfer_mask & ATA_MASK_UDMA)
2294 xfer_mask &= ~ATA_MASK_MWDMA;
2295
2296 highbit = fls(xfer_mask) - 1;
2297 xfer_mask &= ~(1 << highbit);
2298 if (force_pio0)
2299 xfer_mask &= 1 << ATA_SHIFT_PIO;
2300 if (!xfer_mask)
2301 goto fail;
2302
2303 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2304 &dev->udma_mask);
2305
f15a1daf
TH
2306 ata_dev_printk(dev, KERN_WARNING, "limiting speed to %s\n",
2307 ata_mode_string(xfer_mask));
cf176e1a
TH
2308
2309 return 0;
2310
2311 fail:
2312 return -EINVAL;
2313}
2314
3373efd8 2315static int ata_dev_set_mode(struct ata_device *dev)
1da177e4 2316{
baa1e78a 2317 struct ata_eh_context *ehc = &dev->ap->eh_context;
83206a29
TH
2318 unsigned int err_mask;
2319 int rc;
1da177e4 2320
e8384607 2321 dev->flags &= ~ATA_DFLAG_PIO;
1da177e4
LT
2322 if (dev->xfer_shift == ATA_SHIFT_PIO)
2323 dev->flags |= ATA_DFLAG_PIO;
2324
3373efd8 2325 err_mask = ata_dev_set_xfermode(dev);
83206a29 2326 if (err_mask) {
f15a1daf
TH
2327 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
2328 "(err_mask=0x%x)\n", err_mask);
83206a29
TH
2329 return -EIO;
2330 }
1da177e4 2331
baa1e78a 2332 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3373efd8 2333 rc = ata_dev_revalidate(dev, 0);
baa1e78a 2334 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
5eb45c02 2335 if (rc)
83206a29 2336 return rc;
48a8a14f 2337
23e71c3d
TH
2338 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
2339 dev->xfer_shift, (int)dev->xfer_mode);
1da177e4 2340
f15a1daf
TH
2341 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
2342 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
83206a29 2343 return 0;
1da177e4
LT
2344}
2345
1da177e4
LT
2346/**
2347 * ata_set_mode - Program timings and issue SET FEATURES - XFER
2348 * @ap: port on which timings will be programmed
e82cbdb9 2349 * @r_failed_dev: out paramter for failed device
1da177e4 2350 *
e82cbdb9
TH
2351 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2352 * ata_set_mode() fails, pointer to the failing device is
2353 * returned in @r_failed_dev.
780a87f7 2354 *
1da177e4 2355 * LOCKING:
0cba632b 2356 * PCI/etc. bus probe sem.
e82cbdb9
TH
2357 *
2358 * RETURNS:
2359 * 0 on success, negative errno otherwise
1da177e4 2360 */
1ad8e7f9 2361int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
1da177e4 2362{
e8e0619f 2363 struct ata_device *dev;
e82cbdb9 2364 int i, rc = 0, used_dma = 0, found = 0;
1da177e4 2365
3adcebb2 2366 /* has private set_mode? */
b229a7b0
A
2367 if (ap->ops->set_mode)
2368 return ap->ops->set_mode(ap, r_failed_dev);
3adcebb2 2369
a6d5a51c
TH
2370 /* step 1: calculate xfer_mask */
2371 for (i = 0; i < ATA_MAX_DEVICES; i++) {
acf356b1 2372 unsigned int pio_mask, dma_mask;
a6d5a51c 2373
e8e0619f
TH
2374 dev = &ap->device[i];
2375
e1211e3f 2376 if (!ata_dev_enabled(dev))
a6d5a51c
TH
2377 continue;
2378
3373efd8 2379 ata_dev_xfermask(dev);
1da177e4 2380
acf356b1
TH
2381 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
2382 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2383 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
2384 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
5444a6f4 2385
4f65977d 2386 found = 1;
5444a6f4
AC
2387 if (dev->dma_mode)
2388 used_dma = 1;
a6d5a51c 2389 }
4f65977d 2390 if (!found)
e82cbdb9 2391 goto out;
a6d5a51c
TH
2392
2393 /* step 2: always set host PIO timings */
e8e0619f
TH
2394 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2395 dev = &ap->device[i];
2396 if (!ata_dev_enabled(dev))
2397 continue;
2398
2399 if (!dev->pio_mode) {
f15a1daf 2400 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
e8e0619f 2401 rc = -EINVAL;
e82cbdb9 2402 goto out;
e8e0619f
TH
2403 }
2404
2405 dev->xfer_mode = dev->pio_mode;
2406 dev->xfer_shift = ATA_SHIFT_PIO;
2407 if (ap->ops->set_piomode)
2408 ap->ops->set_piomode(ap, dev);
2409 }
1da177e4 2410
a6d5a51c 2411 /* step 3: set host DMA timings */
e8e0619f
TH
2412 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2413 dev = &ap->device[i];
2414
2415 if (!ata_dev_enabled(dev) || !dev->dma_mode)
2416 continue;
2417
2418 dev->xfer_mode = dev->dma_mode;
2419 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
2420 if (ap->ops->set_dmamode)
2421 ap->ops->set_dmamode(ap, dev);
2422 }
1da177e4
LT
2423
2424 /* step 4: update devices' xfer mode */
83206a29 2425 for (i = 0; i < ATA_MAX_DEVICES; i++) {
e8e0619f 2426 dev = &ap->device[i];
1da177e4 2427
18d90deb 2428 /* don't update suspended devices' xfer mode */
02670bf3 2429 if (!ata_dev_ready(dev))
83206a29
TH
2430 continue;
2431
3373efd8 2432 rc = ata_dev_set_mode(dev);
5bbc53f4 2433 if (rc)
e82cbdb9 2434 goto out;
83206a29 2435 }
1da177e4 2436
e8e0619f
TH
2437 /* Record simplex status. If we selected DMA then the other
2438 * host channels are not permitted to do so.
5444a6f4 2439 */
cca3974e
JG
2440 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
2441 ap->host->simplex_claimed = 1;
5444a6f4 2442
e8e0619f 2443 /* step5: chip specific finalisation */
1da177e4
LT
2444 if (ap->ops->post_set_mode)
2445 ap->ops->post_set_mode(ap);
2446
e82cbdb9
TH
2447 out:
2448 if (rc)
2449 *r_failed_dev = dev;
2450 return rc;
1da177e4
LT
2451}
2452
1fdffbce
JG
2453/**
2454 * ata_tf_to_host - issue ATA taskfile to host controller
2455 * @ap: port to which command is being issued
2456 * @tf: ATA taskfile register set
2457 *
2458 * Issues ATA taskfile register set to ATA host controller,
2459 * with proper synchronization with interrupt handler and
2460 * other threads.
2461 *
2462 * LOCKING:
cca3974e 2463 * spin_lock_irqsave(host lock)
1fdffbce
JG
2464 */
2465
2466static inline void ata_tf_to_host(struct ata_port *ap,
2467 const struct ata_taskfile *tf)
2468{
2469 ap->ops->tf_load(ap, tf);
2470 ap->ops->exec_command(ap, tf);
2471}
2472
1da177e4
LT
2473/**
2474 * ata_busy_sleep - sleep until BSY clears, or timeout
2475 * @ap: port containing status register to be polled
2476 * @tmout_pat: impatience timeout
2477 * @tmout: overall timeout
2478 *
780a87f7
JG
2479 * Sleep until ATA Status register bit BSY clears,
2480 * or a timeout occurs.
2481 *
d1adc1bb
TH
2482 * LOCKING:
2483 * Kernel thread context (may sleep).
2484 *
2485 * RETURNS:
2486 * 0 on success, -errno otherwise.
1da177e4 2487 */
d1adc1bb
TH
2488int ata_busy_sleep(struct ata_port *ap,
2489 unsigned long tmout_pat, unsigned long tmout)
1da177e4
LT
2490{
2491 unsigned long timer_start, timeout;
2492 u8 status;
2493
2494 status = ata_busy_wait(ap, ATA_BUSY, 300);
2495 timer_start = jiffies;
2496 timeout = timer_start + tmout_pat;
d1adc1bb
TH
2497 while (status != 0xff && (status & ATA_BUSY) &&
2498 time_before(jiffies, timeout)) {
1da177e4
LT
2499 msleep(50);
2500 status = ata_busy_wait(ap, ATA_BUSY, 3);
2501 }
2502
d1adc1bb 2503 if (status != 0xff && (status & ATA_BUSY))
f15a1daf 2504 ata_port_printk(ap, KERN_WARNING,
35aa7a43
JG
2505 "port is slow to respond, please be patient "
2506 "(Status 0x%x)\n", status);
1da177e4
LT
2507
2508 timeout = timer_start + tmout;
d1adc1bb
TH
2509 while (status != 0xff && (status & ATA_BUSY) &&
2510 time_before(jiffies, timeout)) {
1da177e4
LT
2511 msleep(50);
2512 status = ata_chk_status(ap);
2513 }
2514
d1adc1bb
TH
2515 if (status == 0xff)
2516 return -ENODEV;
2517
1da177e4 2518 if (status & ATA_BUSY) {
f15a1daf 2519 ata_port_printk(ap, KERN_ERR, "port failed to respond "
35aa7a43
JG
2520 "(%lu secs, Status 0x%x)\n",
2521 tmout / HZ, status);
d1adc1bb 2522 return -EBUSY;
1da177e4
LT
2523 }
2524
2525 return 0;
2526}
2527
2528static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
2529{
2530 struct ata_ioports *ioaddr = &ap->ioaddr;
2531 unsigned int dev0 = devmask & (1 << 0);
2532 unsigned int dev1 = devmask & (1 << 1);
2533 unsigned long timeout;
2534
2535 /* if device 0 was found in ata_devchk, wait for its
2536 * BSY bit to clear
2537 */
2538 if (dev0)
2539 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2540
2541 /* if device 1 was found in ata_devchk, wait for
2542 * register access, then wait for BSY to clear
2543 */
2544 timeout = jiffies + ATA_TMOUT_BOOT;
2545 while (dev1) {
2546 u8 nsect, lbal;
2547
2548 ap->ops->dev_select(ap, 1);
0d5ff566
TH
2549 nsect = ioread8(ioaddr->nsect_addr);
2550 lbal = ioread8(ioaddr->lbal_addr);
1da177e4
LT
2551 if ((nsect == 1) && (lbal == 1))
2552 break;
2553 if (time_after(jiffies, timeout)) {
2554 dev1 = 0;
2555 break;
2556 }
2557 msleep(50); /* give drive a breather */
2558 }
2559 if (dev1)
2560 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2561
2562 /* is all this really necessary? */
2563 ap->ops->dev_select(ap, 0);
2564 if (dev1)
2565 ap->ops->dev_select(ap, 1);
2566 if (dev0)
2567 ap->ops->dev_select(ap, 0);
2568}
2569
1da177e4
LT
2570static unsigned int ata_bus_softreset(struct ata_port *ap,
2571 unsigned int devmask)
2572{
2573 struct ata_ioports *ioaddr = &ap->ioaddr;
2574
2575 DPRINTK("ata%u: bus reset via SRST\n", ap->id);
2576
2577 /* software reset. causes dev0 to be selected */
0d5ff566
TH
2578 iowrite8(ap->ctl, ioaddr->ctl_addr);
2579 udelay(20); /* FIXME: flush */
2580 iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
2581 udelay(20); /* FIXME: flush */
2582 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
2583
2584 /* spec mandates ">= 2ms" before checking status.
2585 * We wait 150ms, because that was the magic delay used for
2586 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
2587 * between when the ATA command register is written, and then
2588 * status is checked. Because waiting for "a while" before
2589 * checking status is fine, post SRST, we perform this magic
2590 * delay here as well.
09c7ad79
AC
2591 *
2592 * Old drivers/ide uses the 2mS rule and then waits for ready
1da177e4
LT
2593 */
2594 msleep(150);
2595
2e9edbf8 2596 /* Before we perform post reset processing we want to see if
298a41ca
TH
2597 * the bus shows 0xFF because the odd clown forgets the D7
2598 * pulldown resistor.
2599 */
d1adc1bb
TH
2600 if (ata_check_status(ap) == 0xFF)
2601 return 0;
09c7ad79 2602
1da177e4
LT
2603 ata_bus_post_reset(ap, devmask);
2604
2605 return 0;
2606}
2607
2608/**
2609 * ata_bus_reset - reset host port and associated ATA channel
2610 * @ap: port to reset
2611 *
2612 * This is typically the first time we actually start issuing
2613 * commands to the ATA channel. We wait for BSY to clear, then
2614 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
2615 * result. Determine what devices, if any, are on the channel
2616 * by looking at the device 0/1 error register. Look at the signature
2617 * stored in each device's taskfile registers, to determine if
2618 * the device is ATA or ATAPI.
2619 *
2620 * LOCKING:
0cba632b 2621 * PCI/etc. bus probe sem.
cca3974e 2622 * Obtains host lock.
1da177e4
LT
2623 *
2624 * SIDE EFFECTS:
198e0fed 2625 * Sets ATA_FLAG_DISABLED if bus reset fails.
1da177e4
LT
2626 */
2627
2628void ata_bus_reset(struct ata_port *ap)
2629{
2630 struct ata_ioports *ioaddr = &ap->ioaddr;
2631 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2632 u8 err;
aec5c3c1 2633 unsigned int dev0, dev1 = 0, devmask = 0;
1da177e4
LT
2634
2635 DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no);
2636
2637 /* determine if device 0/1 are present */
2638 if (ap->flags & ATA_FLAG_SATA_RESET)
2639 dev0 = 1;
2640 else {
2641 dev0 = ata_devchk(ap, 0);
2642 if (slave_possible)
2643 dev1 = ata_devchk(ap, 1);
2644 }
2645
2646 if (dev0)
2647 devmask |= (1 << 0);
2648 if (dev1)
2649 devmask |= (1 << 1);
2650
2651 /* select device 0 again */
2652 ap->ops->dev_select(ap, 0);
2653
2654 /* issue bus reset */
2655 if (ap->flags & ATA_FLAG_SRST)
aec5c3c1
TH
2656 if (ata_bus_softreset(ap, devmask))
2657 goto err_out;
1da177e4
LT
2658
2659 /*
2660 * determine by signature whether we have ATA or ATAPI devices
2661 */
b4dc7623 2662 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
1da177e4 2663 if ((slave_possible) && (err != 0x81))
b4dc7623 2664 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
1da177e4
LT
2665
2666 /* re-enable interrupts */
2667 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2668 ata_irq_on(ap);
2669
2670 /* is double-select really necessary? */
2671 if (ap->device[1].class != ATA_DEV_NONE)
2672 ap->ops->dev_select(ap, 1);
2673 if (ap->device[0].class != ATA_DEV_NONE)
2674 ap->ops->dev_select(ap, 0);
2675
2676 /* if no devices were detected, disable this port */
2677 if ((ap->device[0].class == ATA_DEV_NONE) &&
2678 (ap->device[1].class == ATA_DEV_NONE))
2679 goto err_out;
2680
2681 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
2682 /* set up device control for ATA_FLAG_SATA_RESET */
0d5ff566 2683 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
2684 }
2685
2686 DPRINTK("EXIT\n");
2687 return;
2688
2689err_out:
f15a1daf 2690 ata_port_printk(ap, KERN_ERR, "disabling port\n");
1da177e4
LT
2691 ap->ops->port_disable(ap);
2692
2693 DPRINTK("EXIT\n");
2694}
2695
d7bb4cc7
TH
2696/**
2697 * sata_phy_debounce - debounce SATA phy status
2698 * @ap: ATA port to debounce SATA phy status for
2699 * @params: timing parameters { interval, duratinon, timeout } in msec
2700 *
2701 * Make sure SStatus of @ap reaches stable state, determined by
2702 * holding the same value where DET is not 1 for @duration polled
2703 * every @interval, before @timeout. Timeout constraints the
2704 * beginning of the stable state. Because, after hot unplugging,
2705 * DET gets stuck at 1 on some controllers, this functions waits
2706 * until timeout then returns 0 if DET is stable at 1.
2707 *
2708 * LOCKING:
2709 * Kernel thread context (may sleep)
2710 *
2711 * RETURNS:
2712 * 0 on success, -errno on failure.
2713 */
2714int sata_phy_debounce(struct ata_port *ap, const unsigned long *params)
7a7921e8 2715{
d7bb4cc7
TH
2716 unsigned long interval_msec = params[0];
2717 unsigned long duration = params[1] * HZ / 1000;
2718 unsigned long timeout = jiffies + params[2] * HZ / 1000;
2719 unsigned long last_jiffies;
2720 u32 last, cur;
2721 int rc;
2722
2723 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
2724 return rc;
2725 cur &= 0xf;
2726
2727 last = cur;
2728 last_jiffies = jiffies;
2729
2730 while (1) {
2731 msleep(interval_msec);
2732 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
2733 return rc;
2734 cur &= 0xf;
2735
2736 /* DET stable? */
2737 if (cur == last) {
2738 if (cur == 1 && time_before(jiffies, timeout))
2739 continue;
2740 if (time_after(jiffies, last_jiffies + duration))
2741 return 0;
2742 continue;
2743 }
2744
2745 /* unstable, start over */
2746 last = cur;
2747 last_jiffies = jiffies;
2748
2749 /* check timeout */
2750 if (time_after(jiffies, timeout))
2751 return -EBUSY;
2752 }
2753}
2754
2755/**
2756 * sata_phy_resume - resume SATA phy
2757 * @ap: ATA port to resume SATA phy for
2758 * @params: timing parameters { interval, duratinon, timeout } in msec
2759 *
2760 * Resume SATA phy of @ap and debounce it.
2761 *
2762 * LOCKING:
2763 * Kernel thread context (may sleep)
2764 *
2765 * RETURNS:
2766 * 0 on success, -errno on failure.
2767 */
2768int sata_phy_resume(struct ata_port *ap, const unsigned long *params)
2769{
2770 u32 scontrol;
81952c54
TH
2771 int rc;
2772
2773 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2774 return rc;
7a7921e8 2775
852ee16a 2776 scontrol = (scontrol & 0x0f0) | 0x300;
81952c54
TH
2777
2778 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2779 return rc;
7a7921e8 2780
d7bb4cc7
TH
2781 /* Some PHYs react badly if SStatus is pounded immediately
2782 * after resuming. Delay 200ms before debouncing.
2783 */
2784 msleep(200);
7a7921e8 2785
d7bb4cc7 2786 return sata_phy_debounce(ap, params);
7a7921e8
TH
2787}
2788
f5914a46
TH
2789static void ata_wait_spinup(struct ata_port *ap)
2790{
2791 struct ata_eh_context *ehc = &ap->eh_context;
2792 unsigned long end, secs;
2793 int rc;
2794
2795 /* first, debounce phy if SATA */
2796 if (ap->cbl == ATA_CBL_SATA) {
e9c83914 2797 rc = sata_phy_debounce(ap, sata_deb_timing_hotplug);
f5914a46
TH
2798
2799 /* if debounced successfully and offline, no need to wait */
2800 if ((rc == 0 || rc == -EOPNOTSUPP) && ata_port_offline(ap))
2801 return;
2802 }
2803
2804 /* okay, let's give the drive time to spin up */
2805 end = ehc->i.hotplug_timestamp + ATA_SPINUP_WAIT * HZ / 1000;
2806 secs = ((end - jiffies) + HZ - 1) / HZ;
2807
2808 if (time_after(jiffies, end))
2809 return;
2810
2811 if (secs > 5)
2812 ata_port_printk(ap, KERN_INFO, "waiting for device to spin up "
2813 "(%lu secs)\n", secs);
2814
2815 schedule_timeout_uninterruptible(end - jiffies);
2816}
2817
2818/**
2819 * ata_std_prereset - prepare for reset
2820 * @ap: ATA port to be reset
2821 *
2822 * @ap is about to be reset. Initialize it.
2823 *
2824 * LOCKING:
2825 * Kernel thread context (may sleep)
2826 *
2827 * RETURNS:
2828 * 0 on success, -errno otherwise.
2829 */
2830int ata_std_prereset(struct ata_port *ap)
2831{
2832 struct ata_eh_context *ehc = &ap->eh_context;
e9c83914 2833 const unsigned long *timing = sata_ehc_deb_timing(ehc);
f5914a46
TH
2834 int rc;
2835
28324304
TH
2836 /* handle link resume & hotplug spinup */
2837 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
2838 (ap->flags & ATA_FLAG_HRST_TO_RESUME))
2839 ehc->i.action |= ATA_EH_HARDRESET;
2840
2841 if ((ehc->i.flags & ATA_EHI_HOTPLUGGED) &&
2842 (ap->flags & ATA_FLAG_SKIP_D2H_BSY))
2843 ata_wait_spinup(ap);
f5914a46
TH
2844
2845 /* if we're about to do hardreset, nothing more to do */
2846 if (ehc->i.action & ATA_EH_HARDRESET)
2847 return 0;
2848
2849 /* if SATA, resume phy */
2850 if (ap->cbl == ATA_CBL_SATA) {
f5914a46
TH
2851 rc = sata_phy_resume(ap, timing);
2852 if (rc && rc != -EOPNOTSUPP) {
2853 /* phy resume failed */
2854 ata_port_printk(ap, KERN_WARNING, "failed to resume "
2855 "link for reset (errno=%d)\n", rc);
2856 return rc;
2857 }
2858 }
2859
2860 /* Wait for !BSY if the controller can wait for the first D2H
2861 * Reg FIS and we don't know that no device is attached.
2862 */
2863 if (!(ap->flags & ATA_FLAG_SKIP_D2H_BSY) && !ata_port_offline(ap))
2864 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2865
2866 return 0;
2867}
2868
c2bd5804
TH
2869/**
2870 * ata_std_softreset - reset host port via ATA SRST
2871 * @ap: port to reset
c2bd5804
TH
2872 * @classes: resulting classes of attached devices
2873 *
52783c5d 2874 * Reset host port using ATA SRST.
c2bd5804
TH
2875 *
2876 * LOCKING:
2877 * Kernel thread context (may sleep)
2878 *
2879 * RETURNS:
2880 * 0 on success, -errno otherwise.
2881 */
2bf2cb26 2882int ata_std_softreset(struct ata_port *ap, unsigned int *classes)
c2bd5804
TH
2883{
2884 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2885 unsigned int devmask = 0, err_mask;
2886 u8 err;
2887
2888 DPRINTK("ENTER\n");
2889
81952c54 2890 if (ata_port_offline(ap)) {
3a39746a
TH
2891 classes[0] = ATA_DEV_NONE;
2892 goto out;
2893 }
2894
c2bd5804
TH
2895 /* determine if device 0/1 are present */
2896 if (ata_devchk(ap, 0))
2897 devmask |= (1 << 0);
2898 if (slave_possible && ata_devchk(ap, 1))
2899 devmask |= (1 << 1);
2900
c2bd5804
TH
2901 /* select device 0 again */
2902 ap->ops->dev_select(ap, 0);
2903
2904 /* issue bus reset */
2905 DPRINTK("about to softreset, devmask=%x\n", devmask);
2906 err_mask = ata_bus_softreset(ap, devmask);
2907 if (err_mask) {
f15a1daf
TH
2908 ata_port_printk(ap, KERN_ERR, "SRST failed (err_mask=0x%x)\n",
2909 err_mask);
c2bd5804
TH
2910 return -EIO;
2911 }
2912
2913 /* determine by signature whether we have ATA or ATAPI devices */
2914 classes[0] = ata_dev_try_classify(ap, 0, &err);
2915 if (slave_possible && err != 0x81)
2916 classes[1] = ata_dev_try_classify(ap, 1, &err);
2917
3a39746a 2918 out:
c2bd5804
TH
2919 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
2920 return 0;
2921}
2922
2923/**
b6103f6d 2924 * sata_port_hardreset - reset port via SATA phy reset
c2bd5804 2925 * @ap: port to reset
b6103f6d 2926 * @timing: timing parameters { interval, duratinon, timeout } in msec
c2bd5804
TH
2927 *
2928 * SATA phy-reset host port using DET bits of SControl register.
c2bd5804
TH
2929 *
2930 * LOCKING:
2931 * Kernel thread context (may sleep)
2932 *
2933 * RETURNS:
2934 * 0 on success, -errno otherwise.
2935 */
b6103f6d 2936int sata_port_hardreset(struct ata_port *ap, const unsigned long *timing)
c2bd5804 2937{
852ee16a 2938 u32 scontrol;
81952c54 2939 int rc;
852ee16a 2940
c2bd5804
TH
2941 DPRINTK("ENTER\n");
2942
3c567b7d 2943 if (sata_set_spd_needed(ap)) {
1c3fae4d
TH
2944 /* SATA spec says nothing about how to reconfigure
2945 * spd. To be on the safe side, turn off phy during
2946 * reconfiguration. This works for at least ICH7 AHCI
2947 * and Sil3124.
2948 */
81952c54 2949 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
b6103f6d 2950 goto out;
81952c54 2951
a34b6fc0 2952 scontrol = (scontrol & 0x0f0) | 0x304;
81952c54
TH
2953
2954 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
b6103f6d 2955 goto out;
1c3fae4d 2956
3c567b7d 2957 sata_set_spd(ap);
1c3fae4d
TH
2958 }
2959
2960 /* issue phy wake/reset */
81952c54 2961 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
b6103f6d 2962 goto out;
81952c54 2963
852ee16a 2964 scontrol = (scontrol & 0x0f0) | 0x301;
81952c54
TH
2965
2966 if ((rc = sata_scr_write_flush(ap, SCR_CONTROL, scontrol)))
b6103f6d 2967 goto out;
c2bd5804 2968
1c3fae4d 2969 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
c2bd5804
TH
2970 * 10.4.2 says at least 1 ms.
2971 */
2972 msleep(1);
2973
1c3fae4d 2974 /* bring phy back */
b6103f6d
TH
2975 rc = sata_phy_resume(ap, timing);
2976 out:
2977 DPRINTK("EXIT, rc=%d\n", rc);
2978 return rc;
2979}
2980
2981/**
2982 * sata_std_hardreset - reset host port via SATA phy reset
2983 * @ap: port to reset
2984 * @class: resulting class of attached device
2985 *
2986 * SATA phy-reset host port using DET bits of SControl register,
2987 * wait for !BSY and classify the attached device.
2988 *
2989 * LOCKING:
2990 * Kernel thread context (may sleep)
2991 *
2992 * RETURNS:
2993 * 0 on success, -errno otherwise.
2994 */
2995int sata_std_hardreset(struct ata_port *ap, unsigned int *class)
2996{
2997 const unsigned long *timing = sata_ehc_deb_timing(&ap->eh_context);
2998 int rc;
2999
3000 DPRINTK("ENTER\n");
3001
3002 /* do hardreset */
3003 rc = sata_port_hardreset(ap, timing);
3004 if (rc) {
3005 ata_port_printk(ap, KERN_ERR,
3006 "COMRESET failed (errno=%d)\n", rc);
3007 return rc;
3008 }
c2bd5804 3009
c2bd5804 3010 /* TODO: phy layer with polling, timeouts, etc. */
81952c54 3011 if (ata_port_offline(ap)) {
c2bd5804
TH
3012 *class = ATA_DEV_NONE;
3013 DPRINTK("EXIT, link offline\n");
3014 return 0;
3015 }
3016
3017 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
f15a1daf
TH
3018 ata_port_printk(ap, KERN_ERR,
3019 "COMRESET failed (device not ready)\n");
c2bd5804
TH
3020 return -EIO;
3021 }
3022
3a39746a
TH
3023 ap->ops->dev_select(ap, 0); /* probably unnecessary */
3024
c2bd5804
TH
3025 *class = ata_dev_try_classify(ap, 0, NULL);
3026
3027 DPRINTK("EXIT, class=%u\n", *class);
3028 return 0;
3029}
3030
3031/**
3032 * ata_std_postreset - standard postreset callback
3033 * @ap: the target ata_port
3034 * @classes: classes of attached devices
3035 *
3036 * This function is invoked after a successful reset. Note that
3037 * the device might have been reset more than once using
3038 * different reset methods before postreset is invoked.
c2bd5804 3039 *
c2bd5804
TH
3040 * LOCKING:
3041 * Kernel thread context (may sleep)
3042 */
3043void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
3044{
dc2b3515
TH
3045 u32 serror;
3046
c2bd5804
TH
3047 DPRINTK("ENTER\n");
3048
c2bd5804 3049 /* print link status */
81952c54 3050 sata_print_link_status(ap);
c2bd5804 3051
dc2b3515
TH
3052 /* clear SError */
3053 if (sata_scr_read(ap, SCR_ERROR, &serror) == 0)
3054 sata_scr_write(ap, SCR_ERROR, serror);
3055
3a39746a 3056 /* re-enable interrupts */
e3180499
TH
3057 if (!ap->ops->error_handler) {
3058 /* FIXME: hack. create a hook instead */
3059 if (ap->ioaddr.ctl_addr)
3060 ata_irq_on(ap);
3061 }
c2bd5804
TH
3062
3063 /* is double-select really necessary? */
3064 if (classes[0] != ATA_DEV_NONE)
3065 ap->ops->dev_select(ap, 1);
3066 if (classes[1] != ATA_DEV_NONE)
3067 ap->ops->dev_select(ap, 0);
3068
3a39746a
TH
3069 /* bail out if no device is present */
3070 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
3071 DPRINTK("EXIT, no device\n");
3072 return;
3073 }
3074
3075 /* set up device control */
0d5ff566
TH
3076 if (ap->ioaddr.ctl_addr)
3077 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
c2bd5804
TH
3078
3079 DPRINTK("EXIT\n");
3080}
3081
623a3128
TH
3082/**
3083 * ata_dev_same_device - Determine whether new ID matches configured device
623a3128
TH
3084 * @dev: device to compare against
3085 * @new_class: class of the new device
3086 * @new_id: IDENTIFY page of the new device
3087 *
3088 * Compare @new_class and @new_id against @dev and determine
3089 * whether @dev is the device indicated by @new_class and
3090 * @new_id.
3091 *
3092 * LOCKING:
3093 * None.
3094 *
3095 * RETURNS:
3096 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3097 */
3373efd8
TH
3098static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3099 const u16 *new_id)
623a3128
TH
3100{
3101 const u16 *old_id = dev->id;
a0cf733b
TH
3102 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3103 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
623a3128
TH
3104 u64 new_n_sectors;
3105
3106 if (dev->class != new_class) {
f15a1daf
TH
3107 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3108 dev->class, new_class);
623a3128
TH
3109 return 0;
3110 }
3111
a0cf733b
TH
3112 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3113 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3114 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3115 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
623a3128
TH
3116 new_n_sectors = ata_id_n_sectors(new_id);
3117
3118 if (strcmp(model[0], model[1])) {
f15a1daf
TH
3119 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3120 "'%s' != '%s'\n", model[0], model[1]);
623a3128
TH
3121 return 0;
3122 }
3123
3124 if (strcmp(serial[0], serial[1])) {
f15a1daf
TH
3125 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3126 "'%s' != '%s'\n", serial[0], serial[1]);
623a3128
TH
3127 return 0;
3128 }
3129
3130 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
f15a1daf
TH
3131 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
3132 "%llu != %llu\n",
3133 (unsigned long long)dev->n_sectors,
3134 (unsigned long long)new_n_sectors);
623a3128
TH
3135 return 0;
3136 }
3137
3138 return 1;
3139}
3140
3141/**
3142 * ata_dev_revalidate - Revalidate ATA device
623a3128 3143 * @dev: device to revalidate
bff04647 3144 * @readid_flags: read ID flags
623a3128
TH
3145 *
3146 * Re-read IDENTIFY page and make sure @dev is still attached to
3147 * the port.
3148 *
3149 * LOCKING:
3150 * Kernel thread context (may sleep)
3151 *
3152 * RETURNS:
3153 * 0 on success, negative errno otherwise
3154 */
bff04647 3155int ata_dev_revalidate(struct ata_device *dev, unsigned int readid_flags)
623a3128 3156{
5eb45c02 3157 unsigned int class = dev->class;
f15a1daf 3158 u16 *id = (void *)dev->ap->sector_buf;
623a3128
TH
3159 int rc;
3160
5eb45c02
TH
3161 if (!ata_dev_enabled(dev)) {
3162 rc = -ENODEV;
3163 goto fail;
3164 }
623a3128 3165
fe635c7e 3166 /* read ID data */
bff04647 3167 rc = ata_dev_read_id(dev, &class, readid_flags, id);
623a3128
TH
3168 if (rc)
3169 goto fail;
3170
3171 /* is the device still there? */
3373efd8 3172 if (!ata_dev_same_device(dev, class, id)) {
623a3128
TH
3173 rc = -ENODEV;
3174 goto fail;
3175 }
3176
fe635c7e 3177 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
623a3128
TH
3178
3179 /* configure device according to the new ID */
efdaedc4 3180 rc = ata_dev_configure(dev);
5eb45c02
TH
3181 if (rc == 0)
3182 return 0;
623a3128
TH
3183
3184 fail:
f15a1daf 3185 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
623a3128
TH
3186 return rc;
3187}
3188
6919a0a6
AC
3189struct ata_blacklist_entry {
3190 const char *model_num;
3191 const char *model_rev;
3192 unsigned long horkage;
3193};
3194
3195static const struct ata_blacklist_entry ata_device_blacklist [] = {
3196 /* Devices with DMA related problems under Linux */
3197 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
3198 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
3199 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
3200 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
3201 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
3202 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
3203 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
3204 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
3205 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
3206 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
3207 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
3208 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
3209 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
3210 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3211 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
3212 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
3213 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
3214 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
3215 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
3216 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
3217 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
3218 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
3219 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
3220 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
3221 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
3222 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
3223 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3224 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
3225 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
3226 { "SAMSUNG CD-ROM SN-124","N001", ATA_HORKAGE_NODMA },
3227
3228 /* Devices we expect to fail diagnostics */
3229
3230 /* Devices where NCQ should be avoided */
3231 /* NCQ is slow */
3232 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
3233
3234 /* Devices with NCQ limits */
3235
3236 /* End Marker */
3237 { }
1da177e4 3238};
2e9edbf8 3239
6919a0a6 3240unsigned long ata_device_blacklisted(const struct ata_device *dev)
1da177e4 3241{
8bfa79fc
TH
3242 unsigned char model_num[ATA_ID_PROD_LEN + 1];
3243 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
6919a0a6 3244 const struct ata_blacklist_entry *ad = ata_device_blacklist;
3a778275 3245
8bfa79fc
TH
3246 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
3247 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
1da177e4 3248
6919a0a6 3249 while (ad->model_num) {
8bfa79fc 3250 if (!strcmp(ad->model_num, model_num)) {
6919a0a6
AC
3251 if (ad->model_rev == NULL)
3252 return ad->horkage;
8bfa79fc 3253 if (!strcmp(ad->model_rev, model_rev))
6919a0a6 3254 return ad->horkage;
f4b15fef 3255 }
6919a0a6 3256 ad++;
f4b15fef 3257 }
1da177e4
LT
3258 return 0;
3259}
3260
6919a0a6
AC
3261static int ata_dma_blacklisted(const struct ata_device *dev)
3262{
3263 /* We don't support polling DMA.
3264 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
3265 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
3266 */
3267 if ((dev->ap->flags & ATA_FLAG_PIO_POLLING) &&
3268 (dev->flags & ATA_DFLAG_CDB_INTR))
3269 return 1;
3270 return (ata_device_blacklisted(dev) & ATA_HORKAGE_NODMA) ? 1 : 0;
3271}
3272
a6d5a51c
TH
3273/**
3274 * ata_dev_xfermask - Compute supported xfermask of the given device
a6d5a51c
TH
3275 * @dev: Device to compute xfermask for
3276 *
acf356b1
TH
3277 * Compute supported xfermask of @dev and store it in
3278 * dev->*_mask. This function is responsible for applying all
3279 * known limits including host controller limits, device
3280 * blacklist, etc...
a6d5a51c
TH
3281 *
3282 * LOCKING:
3283 * None.
a6d5a51c 3284 */
3373efd8 3285static void ata_dev_xfermask(struct ata_device *dev)
1da177e4 3286{
3373efd8 3287 struct ata_port *ap = dev->ap;
cca3974e 3288 struct ata_host *host = ap->host;
a6d5a51c 3289 unsigned long xfer_mask;
1da177e4 3290
37deecb5 3291 /* controller modes available */
565083e1
TH
3292 xfer_mask = ata_pack_xfermask(ap->pio_mask,
3293 ap->mwdma_mask, ap->udma_mask);
3294
3295 /* Apply cable rule here. Don't apply it early because when
3296 * we handle hot plug the cable type can itself change.
3297 */
3298 if (ap->cbl == ATA_CBL_PATA40)
3299 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
fc085150
AC
3300 /* Apply drive side cable rule. Unknown or 80 pin cables reported
3301 * host side are checked drive side as well. Cases where we know a
3302 * 40wire cable is used safely for 80 are not checked here.
3303 */
3304 if (ata_drive_40wire(dev->id) && (ap->cbl == ATA_CBL_PATA_UNK || ap->cbl == ATA_CBL_PATA80))
3305 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
3306
1da177e4 3307
37deecb5
TH
3308 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
3309 dev->mwdma_mask, dev->udma_mask);
3310 xfer_mask &= ata_id_xfermask(dev->id);
565083e1 3311
b352e57d
AC
3312 /*
3313 * CFA Advanced TrueIDE timings are not allowed on a shared
3314 * cable
3315 */
3316 if (ata_dev_pair(dev)) {
3317 /* No PIO5 or PIO6 */
3318 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
3319 /* No MWDMA3 or MWDMA 4 */
3320 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
3321 }
3322
37deecb5
TH
3323 if (ata_dma_blacklisted(dev)) {
3324 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
f15a1daf
TH
3325 ata_dev_printk(dev, KERN_WARNING,
3326 "device is on DMA blacklist, disabling DMA\n");
37deecb5 3327 }
a6d5a51c 3328
cca3974e 3329 if ((host->flags & ATA_HOST_SIMPLEX) && host->simplex_claimed) {
37deecb5
TH
3330 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3331 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
3332 "other device, disabling DMA\n");
5444a6f4 3333 }
565083e1 3334
5444a6f4
AC
3335 if (ap->ops->mode_filter)
3336 xfer_mask = ap->ops->mode_filter(ap, dev, xfer_mask);
3337
565083e1
TH
3338 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
3339 &dev->mwdma_mask, &dev->udma_mask);
1da177e4
LT
3340}
3341
1da177e4
LT
3342/**
3343 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
1da177e4
LT
3344 * @dev: Device to which command will be sent
3345 *
780a87f7
JG
3346 * Issue SET FEATURES - XFER MODE command to device @dev
3347 * on port @ap.
3348 *
1da177e4 3349 * LOCKING:
0cba632b 3350 * PCI/etc. bus probe sem.
83206a29
TH
3351 *
3352 * RETURNS:
3353 * 0 on success, AC_ERR_* mask otherwise.
1da177e4
LT
3354 */
3355
3373efd8 3356static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
1da177e4 3357{
a0123703 3358 struct ata_taskfile tf;
83206a29 3359 unsigned int err_mask;
1da177e4
LT
3360
3361 /* set up set-features taskfile */
3362 DPRINTK("set features - xfer mode\n");
3363
3373efd8 3364 ata_tf_init(dev, &tf);
a0123703
TH
3365 tf.command = ATA_CMD_SET_FEATURES;
3366 tf.feature = SETFEATURES_XFER;
3367 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3368 tf.protocol = ATA_PROT_NODATA;
3369 tf.nsect = dev->xfer_mode;
1da177e4 3370
3373efd8 3371 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1da177e4 3372
83206a29
TH
3373 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3374 return err_mask;
1da177e4
LT
3375}
3376
8bf62ece
AL
3377/**
3378 * ata_dev_init_params - Issue INIT DEV PARAMS command
8bf62ece 3379 * @dev: Device to which command will be sent
e2a7f77a
RD
3380 * @heads: Number of heads (taskfile parameter)
3381 * @sectors: Number of sectors (taskfile parameter)
8bf62ece
AL
3382 *
3383 * LOCKING:
6aff8f1f
TH
3384 * Kernel thread context (may sleep)
3385 *
3386 * RETURNS:
3387 * 0 on success, AC_ERR_* mask otherwise.
8bf62ece 3388 */
3373efd8
TH
3389static unsigned int ata_dev_init_params(struct ata_device *dev,
3390 u16 heads, u16 sectors)
8bf62ece 3391{
a0123703 3392 struct ata_taskfile tf;
6aff8f1f 3393 unsigned int err_mask;
8bf62ece
AL
3394
3395 /* Number of sectors per track 1-255. Number of heads 1-16 */
3396 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
00b6f5e9 3397 return AC_ERR_INVALID;
8bf62ece
AL
3398
3399 /* set up init dev params taskfile */
3400 DPRINTK("init dev params \n");
3401
3373efd8 3402 ata_tf_init(dev, &tf);
a0123703
TH
3403 tf.command = ATA_CMD_INIT_DEV_PARAMS;
3404 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3405 tf.protocol = ATA_PROT_NODATA;
3406 tf.nsect = sectors;
3407 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
8bf62ece 3408
3373efd8 3409 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
8bf62ece 3410
6aff8f1f
TH
3411 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3412 return err_mask;
8bf62ece
AL
3413}
3414
1da177e4 3415/**
0cba632b
JG
3416 * ata_sg_clean - Unmap DMA memory associated with command
3417 * @qc: Command containing DMA memory to be released
3418 *
3419 * Unmap all mapped DMA memory associated with this command.
1da177e4
LT
3420 *
3421 * LOCKING:
cca3974e 3422 * spin_lock_irqsave(host lock)
1da177e4 3423 */
70e6ad0c 3424void ata_sg_clean(struct ata_queued_cmd *qc)
1da177e4
LT
3425{
3426 struct ata_port *ap = qc->ap;
cedc9a47 3427 struct scatterlist *sg = qc->__sg;
1da177e4 3428 int dir = qc->dma_dir;
cedc9a47 3429 void *pad_buf = NULL;
1da177e4 3430
a4631474
TH
3431 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
3432 WARN_ON(sg == NULL);
1da177e4
LT
3433
3434 if (qc->flags & ATA_QCFLAG_SINGLE)
f131883e 3435 WARN_ON(qc->n_elem > 1);
1da177e4 3436
2c13b7ce 3437 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
1da177e4 3438
cedc9a47
JG
3439 /* if we padded the buffer out to 32-bit bound, and data
3440 * xfer direction is from-device, we must copy from the
3441 * pad buffer back into the supplied buffer
3442 */
3443 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
3444 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3445
3446 if (qc->flags & ATA_QCFLAG_SG) {
e1410f2d 3447 if (qc->n_elem)
2f1f610b 3448 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
cedc9a47
JG
3449 /* restore last sg */
3450 sg[qc->orig_n_elem - 1].length += qc->pad_len;
3451 if (pad_buf) {
3452 struct scatterlist *psg = &qc->pad_sgent;
3453 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3454 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
dfa15988 3455 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
3456 }
3457 } else {
2e242fa9 3458 if (qc->n_elem)
2f1f610b 3459 dma_unmap_single(ap->dev,
e1410f2d
JG
3460 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
3461 dir);
cedc9a47
JG
3462 /* restore sg */
3463 sg->length += qc->pad_len;
3464 if (pad_buf)
3465 memcpy(qc->buf_virt + sg->length - qc->pad_len,
3466 pad_buf, qc->pad_len);
3467 }
1da177e4
LT
3468
3469 qc->flags &= ~ATA_QCFLAG_DMAMAP;
cedc9a47 3470 qc->__sg = NULL;
1da177e4
LT
3471}
3472
3473/**
3474 * ata_fill_sg - Fill PCI IDE PRD table
3475 * @qc: Metadata associated with taskfile to be transferred
3476 *
780a87f7
JG
3477 * Fill PCI IDE PRD (scatter-gather) table with segments
3478 * associated with the current disk command.
3479 *
1da177e4 3480 * LOCKING:
cca3974e 3481 * spin_lock_irqsave(host lock)
1da177e4
LT
3482 *
3483 */
3484static void ata_fill_sg(struct ata_queued_cmd *qc)
3485{
1da177e4 3486 struct ata_port *ap = qc->ap;
cedc9a47
JG
3487 struct scatterlist *sg;
3488 unsigned int idx;
1da177e4 3489
a4631474 3490 WARN_ON(qc->__sg == NULL);
f131883e 3491 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
1da177e4
LT
3492
3493 idx = 0;
cedc9a47 3494 ata_for_each_sg(sg, qc) {
1da177e4
LT
3495 u32 addr, offset;
3496 u32 sg_len, len;
3497
3498 /* determine if physical DMA addr spans 64K boundary.
3499 * Note h/w doesn't support 64-bit, so we unconditionally
3500 * truncate dma_addr_t to u32.
3501 */
3502 addr = (u32) sg_dma_address(sg);
3503 sg_len = sg_dma_len(sg);
3504
3505 while (sg_len) {
3506 offset = addr & 0xffff;
3507 len = sg_len;
3508 if ((offset + sg_len) > 0x10000)
3509 len = 0x10000 - offset;
3510
3511 ap->prd[idx].addr = cpu_to_le32(addr);
3512 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
3513 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
3514
3515 idx++;
3516 sg_len -= len;
3517 addr += len;
3518 }
3519 }
3520
3521 if (idx)
3522 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
3523}
3524/**
3525 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
3526 * @qc: Metadata associated with taskfile to check
3527 *
780a87f7
JG
3528 * Allow low-level driver to filter ATA PACKET commands, returning
3529 * a status indicating whether or not it is OK to use DMA for the
3530 * supplied PACKET command.
3531 *
1da177e4 3532 * LOCKING:
cca3974e 3533 * spin_lock_irqsave(host lock)
0cba632b 3534 *
1da177e4
LT
3535 * RETURNS: 0 when ATAPI DMA can be used
3536 * nonzero otherwise
3537 */
3538int ata_check_atapi_dma(struct ata_queued_cmd *qc)
3539{
3540 struct ata_port *ap = qc->ap;
3541 int rc = 0; /* Assume ATAPI DMA is OK by default */
3542
3543 if (ap->ops->check_atapi_dma)
3544 rc = ap->ops->check_atapi_dma(qc);
3545
3546 return rc;
3547}
3548/**
3549 * ata_qc_prep - Prepare taskfile for submission
3550 * @qc: Metadata associated with taskfile to be prepared
3551 *
780a87f7
JG
3552 * Prepare ATA taskfile for submission.
3553 *
1da177e4 3554 * LOCKING:
cca3974e 3555 * spin_lock_irqsave(host lock)
1da177e4
LT
3556 */
3557void ata_qc_prep(struct ata_queued_cmd *qc)
3558{
3559 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
3560 return;
3561
3562 ata_fill_sg(qc);
3563}
3564
e46834cd
BK
3565void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
3566
0cba632b
JG
3567/**
3568 * ata_sg_init_one - Associate command with memory buffer
3569 * @qc: Command to be associated
3570 * @buf: Memory buffer
3571 * @buflen: Length of memory buffer, in bytes.
3572 *
3573 * Initialize the data-related elements of queued_cmd @qc
3574 * to point to a single memory buffer, @buf of byte length @buflen.
3575 *
3576 * LOCKING:
cca3974e 3577 * spin_lock_irqsave(host lock)
0cba632b
JG
3578 */
3579
1da177e4
LT
3580void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
3581{
1da177e4
LT
3582 qc->flags |= ATA_QCFLAG_SINGLE;
3583
cedc9a47 3584 qc->__sg = &qc->sgent;
1da177e4 3585 qc->n_elem = 1;
cedc9a47 3586 qc->orig_n_elem = 1;
1da177e4 3587 qc->buf_virt = buf;
233277ca 3588 qc->nbytes = buflen;
1da177e4 3589
61c0596c 3590 sg_init_one(&qc->sgent, buf, buflen);
1da177e4
LT
3591}
3592
0cba632b
JG
3593/**
3594 * ata_sg_init - Associate command with scatter-gather table.
3595 * @qc: Command to be associated
3596 * @sg: Scatter-gather table.
3597 * @n_elem: Number of elements in s/g table.
3598 *
3599 * Initialize the data-related elements of queued_cmd @qc
3600 * to point to a scatter-gather table @sg, containing @n_elem
3601 * elements.
3602 *
3603 * LOCKING:
cca3974e 3604 * spin_lock_irqsave(host lock)
0cba632b
JG
3605 */
3606
1da177e4
LT
3607void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
3608 unsigned int n_elem)
3609{
3610 qc->flags |= ATA_QCFLAG_SG;
cedc9a47 3611 qc->__sg = sg;
1da177e4 3612 qc->n_elem = n_elem;
cedc9a47 3613 qc->orig_n_elem = n_elem;
1da177e4
LT
3614}
3615
3616/**
0cba632b
JG
3617 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
3618 * @qc: Command with memory buffer to be mapped.
3619 *
3620 * DMA-map the memory buffer associated with queued_cmd @qc.
1da177e4
LT
3621 *
3622 * LOCKING:
cca3974e 3623 * spin_lock_irqsave(host lock)
1da177e4
LT
3624 *
3625 * RETURNS:
0cba632b 3626 * Zero on success, negative on error.
1da177e4
LT
3627 */
3628
3629static int ata_sg_setup_one(struct ata_queued_cmd *qc)
3630{
3631 struct ata_port *ap = qc->ap;
3632 int dir = qc->dma_dir;
cedc9a47 3633 struct scatterlist *sg = qc->__sg;
1da177e4 3634 dma_addr_t dma_address;
2e242fa9 3635 int trim_sg = 0;
1da177e4 3636
cedc9a47
JG
3637 /* we must lengthen transfers to end on a 32-bit boundary */
3638 qc->pad_len = sg->length & 3;
3639 if (qc->pad_len) {
3640 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3641 struct scatterlist *psg = &qc->pad_sgent;
3642
a4631474 3643 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
3644
3645 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3646
3647 if (qc->tf.flags & ATA_TFLAG_WRITE)
3648 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
3649 qc->pad_len);
3650
3651 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3652 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3653 /* trim sg */
3654 sg->length -= qc->pad_len;
2e242fa9
TH
3655 if (sg->length == 0)
3656 trim_sg = 1;
cedc9a47
JG
3657
3658 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
3659 sg->length, qc->pad_len);
3660 }
3661
2e242fa9
TH
3662 if (trim_sg) {
3663 qc->n_elem--;
e1410f2d
JG
3664 goto skip_map;
3665 }
3666
2f1f610b 3667 dma_address = dma_map_single(ap->dev, qc->buf_virt,
32529e01 3668 sg->length, dir);
537a95d9
TH
3669 if (dma_mapping_error(dma_address)) {
3670 /* restore sg */
3671 sg->length += qc->pad_len;
1da177e4 3672 return -1;
537a95d9 3673 }
1da177e4
LT
3674
3675 sg_dma_address(sg) = dma_address;
32529e01 3676 sg_dma_len(sg) = sg->length;
1da177e4 3677
2e242fa9 3678skip_map:
1da177e4
LT
3679 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
3680 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3681
3682 return 0;
3683}
3684
3685/**
0cba632b
JG
3686 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
3687 * @qc: Command with scatter-gather table to be mapped.
3688 *
3689 * DMA-map the scatter-gather table associated with queued_cmd @qc.
1da177e4
LT
3690 *
3691 * LOCKING:
cca3974e 3692 * spin_lock_irqsave(host lock)
1da177e4
LT
3693 *
3694 * RETURNS:
0cba632b 3695 * Zero on success, negative on error.
1da177e4
LT
3696 *
3697 */
3698
3699static int ata_sg_setup(struct ata_queued_cmd *qc)
3700{
3701 struct ata_port *ap = qc->ap;
cedc9a47
JG
3702 struct scatterlist *sg = qc->__sg;
3703 struct scatterlist *lsg = &sg[qc->n_elem - 1];
e1410f2d 3704 int n_elem, pre_n_elem, dir, trim_sg = 0;
1da177e4
LT
3705
3706 VPRINTK("ENTER, ata%u\n", ap->id);
a4631474 3707 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
1da177e4 3708
cedc9a47
JG
3709 /* we must lengthen transfers to end on a 32-bit boundary */
3710 qc->pad_len = lsg->length & 3;
3711 if (qc->pad_len) {
3712 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3713 struct scatterlist *psg = &qc->pad_sgent;
3714 unsigned int offset;
3715
a4631474 3716 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
3717
3718 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3719
3720 /*
3721 * psg->page/offset are used to copy to-be-written
3722 * data in this function or read data in ata_sg_clean.
3723 */
3724 offset = lsg->offset + lsg->length - qc->pad_len;
3725 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
3726 psg->offset = offset_in_page(offset);
3727
3728 if (qc->tf.flags & ATA_TFLAG_WRITE) {
3729 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3730 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
dfa15988 3731 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
3732 }
3733
3734 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3735 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3736 /* trim last sg */
3737 lsg->length -= qc->pad_len;
e1410f2d
JG
3738 if (lsg->length == 0)
3739 trim_sg = 1;
cedc9a47
JG
3740
3741 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
3742 qc->n_elem - 1, lsg->length, qc->pad_len);
3743 }
3744
e1410f2d
JG
3745 pre_n_elem = qc->n_elem;
3746 if (trim_sg && pre_n_elem)
3747 pre_n_elem--;
3748
3749 if (!pre_n_elem) {
3750 n_elem = 0;
3751 goto skip_map;
3752 }
3753
1da177e4 3754 dir = qc->dma_dir;
2f1f610b 3755 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
537a95d9
TH
3756 if (n_elem < 1) {
3757 /* restore last sg */
3758 lsg->length += qc->pad_len;
1da177e4 3759 return -1;
537a95d9 3760 }
1da177e4
LT
3761
3762 DPRINTK("%d sg elements mapped\n", n_elem);
3763
e1410f2d 3764skip_map:
1da177e4
LT
3765 qc->n_elem = n_elem;
3766
3767 return 0;
3768}
3769
0baab86b 3770/**
c893a3ae 3771 * swap_buf_le16 - swap halves of 16-bit words in place
0baab86b
EF
3772 * @buf: Buffer to swap
3773 * @buf_words: Number of 16-bit words in buffer.
3774 *
3775 * Swap halves of 16-bit words if needed to convert from
3776 * little-endian byte order to native cpu byte order, or
3777 * vice-versa.
3778 *
3779 * LOCKING:
6f0ef4fa 3780 * Inherited from caller.
0baab86b 3781 */
1da177e4
LT
3782void swap_buf_le16(u16 *buf, unsigned int buf_words)
3783{
3784#ifdef __BIG_ENDIAN
3785 unsigned int i;
3786
3787 for (i = 0; i < buf_words; i++)
3788 buf[i] = le16_to_cpu(buf[i]);
3789#endif /* __BIG_ENDIAN */
3790}
3791
6ae4cfb5 3792/**
0d5ff566 3793 * ata_data_xfer - Transfer data by PIO
a6b2c5d4 3794 * @adev: device to target
6ae4cfb5
AL
3795 * @buf: data buffer
3796 * @buflen: buffer length
344babaa 3797 * @write_data: read/write
6ae4cfb5
AL
3798 *
3799 * Transfer data from/to the device data register by PIO.
3800 *
3801 * LOCKING:
3802 * Inherited from caller.
6ae4cfb5 3803 */
0d5ff566
TH
3804void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
3805 unsigned int buflen, int write_data)
1da177e4 3806{
a6b2c5d4 3807 struct ata_port *ap = adev->ap;
6ae4cfb5 3808 unsigned int words = buflen >> 1;
1da177e4 3809
6ae4cfb5 3810 /* Transfer multiple of 2 bytes */
1da177e4 3811 if (write_data)
0d5ff566 3812 iowrite16_rep(ap->ioaddr.data_addr, buf, words);
1da177e4 3813 else
0d5ff566 3814 ioread16_rep(ap->ioaddr.data_addr, buf, words);
6ae4cfb5
AL
3815
3816 /* Transfer trailing 1 byte, if any. */
3817 if (unlikely(buflen & 0x01)) {
3818 u16 align_buf[1] = { 0 };
3819 unsigned char *trailing_buf = buf + buflen - 1;
3820
3821 if (write_data) {
3822 memcpy(align_buf, trailing_buf, 1);
0d5ff566 3823 iowrite16(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
6ae4cfb5 3824 } else {
0d5ff566 3825 align_buf[0] = cpu_to_le16(ioread16(ap->ioaddr.data_addr));
6ae4cfb5
AL
3826 memcpy(trailing_buf, align_buf, 1);
3827 }
3828 }
1da177e4
LT
3829}
3830
75e99585 3831/**
0d5ff566 3832 * ata_data_xfer_noirq - Transfer data by PIO
75e99585
AC
3833 * @adev: device to target
3834 * @buf: data buffer
3835 * @buflen: buffer length
3836 * @write_data: read/write
3837 *
88574551 3838 * Transfer data from/to the device data register by PIO. Do the
75e99585
AC
3839 * transfer with interrupts disabled.
3840 *
3841 * LOCKING:
3842 * Inherited from caller.
3843 */
0d5ff566
TH
3844void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
3845 unsigned int buflen, int write_data)
75e99585
AC
3846{
3847 unsigned long flags;
3848 local_irq_save(flags);
0d5ff566 3849 ata_data_xfer(adev, buf, buflen, write_data);
75e99585
AC
3850 local_irq_restore(flags);
3851}
3852
3853
6ae4cfb5
AL
3854/**
3855 * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
3856 * @qc: Command on going
3857 *
3858 * Transfer ATA_SECT_SIZE of data from/to the ATA device.
3859 *
3860 * LOCKING:
3861 * Inherited from caller.
3862 */
3863
1da177e4
LT
3864static void ata_pio_sector(struct ata_queued_cmd *qc)
3865{
3866 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 3867 struct scatterlist *sg = qc->__sg;
1da177e4
LT
3868 struct ata_port *ap = qc->ap;
3869 struct page *page;
3870 unsigned int offset;
3871 unsigned char *buf;
3872
726f0785 3873 if (qc->curbytes == qc->nbytes - ATA_SECT_SIZE)
14be71f4 3874 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
3875
3876 page = sg[qc->cursg].page;
726f0785 3877 offset = sg[qc->cursg].offset + qc->cursg_ofs;
1da177e4
LT
3878
3879 /* get the current page and offset */
3880 page = nth_page(page, (offset >> PAGE_SHIFT));
3881 offset %= PAGE_SIZE;
3882
1da177e4
LT
3883 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3884
91b8b313
AL
3885 if (PageHighMem(page)) {
3886 unsigned long flags;
3887
a6b2c5d4 3888 /* FIXME: use a bounce buffer */
91b8b313
AL
3889 local_irq_save(flags);
3890 buf = kmap_atomic(page, KM_IRQ0);
083958d3 3891
91b8b313 3892 /* do the actual data transfer */
a6b2c5d4 3893 ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
1da177e4 3894
91b8b313
AL
3895 kunmap_atomic(buf, KM_IRQ0);
3896 local_irq_restore(flags);
3897 } else {
3898 buf = page_address(page);
a6b2c5d4 3899 ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
91b8b313 3900 }
1da177e4 3901
726f0785
TH
3902 qc->curbytes += ATA_SECT_SIZE;
3903 qc->cursg_ofs += ATA_SECT_SIZE;
1da177e4 3904
726f0785 3905 if (qc->cursg_ofs == (&sg[qc->cursg])->length) {
1da177e4
LT
3906 qc->cursg++;
3907 qc->cursg_ofs = 0;
3908 }
1da177e4 3909}
1da177e4 3910
07f6f7d0
AL
3911/**
3912 * ata_pio_sectors - Transfer one or many 512-byte sectors.
3913 * @qc: Command on going
3914 *
c81e29b4 3915 * Transfer one or many ATA_SECT_SIZE of data from/to the
07f6f7d0
AL
3916 * ATA device for the DRQ request.
3917 *
3918 * LOCKING:
3919 * Inherited from caller.
3920 */
1da177e4 3921
07f6f7d0
AL
3922static void ata_pio_sectors(struct ata_queued_cmd *qc)
3923{
3924 if (is_multi_taskfile(&qc->tf)) {
3925 /* READ/WRITE MULTIPLE */
3926 unsigned int nsect;
3927
587005de 3928 WARN_ON(qc->dev->multi_count == 0);
1da177e4 3929
726f0785
TH
3930 nsect = min((qc->nbytes - qc->curbytes) / ATA_SECT_SIZE,
3931 qc->dev->multi_count);
07f6f7d0
AL
3932 while (nsect--)
3933 ata_pio_sector(qc);
3934 } else
3935 ata_pio_sector(qc);
3936}
3937
c71c1857
AL
3938/**
3939 * atapi_send_cdb - Write CDB bytes to hardware
3940 * @ap: Port to which ATAPI device is attached.
3941 * @qc: Taskfile currently active
3942 *
3943 * When device has indicated its readiness to accept
3944 * a CDB, this function is called. Send the CDB.
3945 *
3946 * LOCKING:
3947 * caller.
3948 */
3949
3950static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
3951{
3952 /* send SCSI cdb */
3953 DPRINTK("send cdb\n");
db024d53 3954 WARN_ON(qc->dev->cdb_len < 12);
c71c1857 3955
a6b2c5d4 3956 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
c71c1857
AL
3957 ata_altstatus(ap); /* flush */
3958
3959 switch (qc->tf.protocol) {
3960 case ATA_PROT_ATAPI:
3961 ap->hsm_task_state = HSM_ST;
3962 break;
3963 case ATA_PROT_ATAPI_NODATA:
3964 ap->hsm_task_state = HSM_ST_LAST;
3965 break;
3966 case ATA_PROT_ATAPI_DMA:
3967 ap->hsm_task_state = HSM_ST_LAST;
3968 /* initiate bmdma */
3969 ap->ops->bmdma_start(qc);
3970 break;
3971 }
1da177e4
LT
3972}
3973
6ae4cfb5
AL
3974/**
3975 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
3976 * @qc: Command on going
3977 * @bytes: number of bytes
3978 *
3979 * Transfer Transfer data from/to the ATAPI device.
3980 *
3981 * LOCKING:
3982 * Inherited from caller.
3983 *
3984 */
3985
1da177e4
LT
3986static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
3987{
3988 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 3989 struct scatterlist *sg = qc->__sg;
1da177e4
LT
3990 struct ata_port *ap = qc->ap;
3991 struct page *page;
3992 unsigned char *buf;
3993 unsigned int offset, count;
3994
563a6e1f 3995 if (qc->curbytes + bytes >= qc->nbytes)
14be71f4 3996 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
3997
3998next_sg:
563a6e1f 3999 if (unlikely(qc->cursg >= qc->n_elem)) {
7fb6ec28 4000 /*
563a6e1f
AL
4001 * The end of qc->sg is reached and the device expects
4002 * more data to transfer. In order not to overrun qc->sg
4003 * and fulfill length specified in the byte count register,
4004 * - for read case, discard trailing data from the device
4005 * - for write case, padding zero data to the device
4006 */
4007 u16 pad_buf[1] = { 0 };
4008 unsigned int words = bytes >> 1;
4009 unsigned int i;
4010
4011 if (words) /* warning if bytes > 1 */
f15a1daf
TH
4012 ata_dev_printk(qc->dev, KERN_WARNING,
4013 "%u bytes trailing data\n", bytes);
563a6e1f
AL
4014
4015 for (i = 0; i < words; i++)
a6b2c5d4 4016 ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write);
563a6e1f 4017
14be71f4 4018 ap->hsm_task_state = HSM_ST_LAST;
563a6e1f
AL
4019 return;
4020 }
4021
cedc9a47 4022 sg = &qc->__sg[qc->cursg];
1da177e4 4023
1da177e4
LT
4024 page = sg->page;
4025 offset = sg->offset + qc->cursg_ofs;
4026
4027 /* get the current page and offset */
4028 page = nth_page(page, (offset >> PAGE_SHIFT));
4029 offset %= PAGE_SIZE;
4030
6952df03 4031 /* don't overrun current sg */
32529e01 4032 count = min(sg->length - qc->cursg_ofs, bytes);
1da177e4
LT
4033
4034 /* don't cross page boundaries */
4035 count = min(count, (unsigned int)PAGE_SIZE - offset);
4036
7282aa4b
AL
4037 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4038
91b8b313
AL
4039 if (PageHighMem(page)) {
4040 unsigned long flags;
4041
a6b2c5d4 4042 /* FIXME: use bounce buffer */
91b8b313
AL
4043 local_irq_save(flags);
4044 buf = kmap_atomic(page, KM_IRQ0);
083958d3 4045
91b8b313 4046 /* do the actual data transfer */
a6b2c5d4 4047 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
7282aa4b 4048
91b8b313
AL
4049 kunmap_atomic(buf, KM_IRQ0);
4050 local_irq_restore(flags);
4051 } else {
4052 buf = page_address(page);
a6b2c5d4 4053 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
91b8b313 4054 }
1da177e4
LT
4055
4056 bytes -= count;
4057 qc->curbytes += count;
4058 qc->cursg_ofs += count;
4059
32529e01 4060 if (qc->cursg_ofs == sg->length) {
1da177e4
LT
4061 qc->cursg++;
4062 qc->cursg_ofs = 0;
4063 }
4064
563a6e1f 4065 if (bytes)
1da177e4 4066 goto next_sg;
1da177e4
LT
4067}
4068
6ae4cfb5
AL
4069/**
4070 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
4071 * @qc: Command on going
4072 *
4073 * Transfer Transfer data from/to the ATAPI device.
4074 *
4075 * LOCKING:
4076 * Inherited from caller.
6ae4cfb5
AL
4077 */
4078
1da177e4
LT
4079static void atapi_pio_bytes(struct ata_queued_cmd *qc)
4080{
4081 struct ata_port *ap = qc->ap;
4082 struct ata_device *dev = qc->dev;
4083 unsigned int ireason, bc_lo, bc_hi, bytes;
4084 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
4085
eec4c3f3
AL
4086 /* Abuse qc->result_tf for temp storage of intermediate TF
4087 * here to save some kernel stack usage.
4088 * For normal completion, qc->result_tf is not relevant. For
4089 * error, qc->result_tf is later overwritten by ata_qc_complete().
4090 * So, the correctness of qc->result_tf is not affected.
4091 */
4092 ap->ops->tf_read(ap, &qc->result_tf);
4093 ireason = qc->result_tf.nsect;
4094 bc_lo = qc->result_tf.lbam;
4095 bc_hi = qc->result_tf.lbah;
1da177e4
LT
4096 bytes = (bc_hi << 8) | bc_lo;
4097
4098 /* shall be cleared to zero, indicating xfer of data */
4099 if (ireason & (1 << 0))
4100 goto err_out;
4101
4102 /* make sure transfer direction matches expected */
4103 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
4104 if (do_write != i_write)
4105 goto err_out;
4106
312f7da2
AL
4107 VPRINTK("ata%u: xfering %d bytes\n", ap->id, bytes);
4108
1da177e4
LT
4109 __atapi_pio_bytes(qc, bytes);
4110
4111 return;
4112
4113err_out:
f15a1daf 4114 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
11a56d24 4115 qc->err_mask |= AC_ERR_HSM;
14be71f4 4116 ap->hsm_task_state = HSM_ST_ERR;
1da177e4
LT
4117}
4118
4119/**
c234fb00
AL
4120 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
4121 * @ap: the target ata_port
4122 * @qc: qc on going
1da177e4 4123 *
c234fb00
AL
4124 * RETURNS:
4125 * 1 if ok in workqueue, 0 otherwise.
1da177e4 4126 */
c234fb00
AL
4127
4128static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
1da177e4 4129{
c234fb00
AL
4130 if (qc->tf.flags & ATA_TFLAG_POLLING)
4131 return 1;
1da177e4 4132
c234fb00
AL
4133 if (ap->hsm_task_state == HSM_ST_FIRST) {
4134 if (qc->tf.protocol == ATA_PROT_PIO &&
4135 (qc->tf.flags & ATA_TFLAG_WRITE))
4136 return 1;
1da177e4 4137
c234fb00
AL
4138 if (is_atapi_taskfile(&qc->tf) &&
4139 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4140 return 1;
fe79e683
AL
4141 }
4142
c234fb00
AL
4143 return 0;
4144}
1da177e4 4145
c17ea20d
TH
4146/**
4147 * ata_hsm_qc_complete - finish a qc running on standard HSM
4148 * @qc: Command to complete
4149 * @in_wq: 1 if called from workqueue, 0 otherwise
4150 *
4151 * Finish @qc which is running on standard HSM.
4152 *
4153 * LOCKING:
cca3974e 4154 * If @in_wq is zero, spin_lock_irqsave(host lock).
c17ea20d
TH
4155 * Otherwise, none on entry and grabs host lock.
4156 */
4157static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
4158{
4159 struct ata_port *ap = qc->ap;
4160 unsigned long flags;
4161
4162 if (ap->ops->error_handler) {
4163 if (in_wq) {
ba6a1308 4164 spin_lock_irqsave(ap->lock, flags);
c17ea20d 4165
cca3974e
JG
4166 /* EH might have kicked in while host lock is
4167 * released.
c17ea20d
TH
4168 */
4169 qc = ata_qc_from_tag(ap, qc->tag);
4170 if (qc) {
4171 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
4172 ata_irq_on(ap);
4173 ata_qc_complete(qc);
4174 } else
4175 ata_port_freeze(ap);
4176 }
4177
ba6a1308 4178 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
4179 } else {
4180 if (likely(!(qc->err_mask & AC_ERR_HSM)))
4181 ata_qc_complete(qc);
4182 else
4183 ata_port_freeze(ap);
4184 }
4185 } else {
4186 if (in_wq) {
ba6a1308 4187 spin_lock_irqsave(ap->lock, flags);
c17ea20d
TH
4188 ata_irq_on(ap);
4189 ata_qc_complete(qc);
ba6a1308 4190 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
4191 } else
4192 ata_qc_complete(qc);
4193 }
1da177e4 4194
c81e29b4 4195 ata_altstatus(ap); /* flush */
c17ea20d
TH
4196}
4197
bb5cb290
AL
4198/**
4199 * ata_hsm_move - move the HSM to the next state.
4200 * @ap: the target ata_port
4201 * @qc: qc on going
4202 * @status: current device status
4203 * @in_wq: 1 if called from workqueue, 0 otherwise
4204 *
4205 * RETURNS:
4206 * 1 when poll next status needed, 0 otherwise.
4207 */
9a1004d0
TH
4208int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
4209 u8 status, int in_wq)
e2cec771 4210{
bb5cb290
AL
4211 unsigned long flags = 0;
4212 int poll_next;
4213
6912ccd5
AL
4214 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
4215
bb5cb290
AL
4216 /* Make sure ata_qc_issue_prot() does not throw things
4217 * like DMA polling into the workqueue. Notice that
4218 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
4219 */
c234fb00 4220 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
bb5cb290 4221
e2cec771 4222fsm_start:
999bb6f4
AL
4223 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
4224 ap->id, qc->tf.protocol, ap->hsm_task_state, status);
4225
e2cec771
AL
4226 switch (ap->hsm_task_state) {
4227 case HSM_ST_FIRST:
bb5cb290
AL
4228 /* Send first data block or PACKET CDB */
4229
4230 /* If polling, we will stay in the work queue after
4231 * sending the data. Otherwise, interrupt handler
4232 * takes over after sending the data.
4233 */
4234 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
4235
e2cec771 4236 /* check device status */
3655d1d3
AL
4237 if (unlikely((status & ATA_DRQ) == 0)) {
4238 /* handle BSY=0, DRQ=0 as error */
4239 if (likely(status & (ATA_ERR | ATA_DF)))
4240 /* device stops HSM for abort/error */
4241 qc->err_mask |= AC_ERR_DEV;
4242 else
4243 /* HSM violation. Let EH handle this */
4244 qc->err_mask |= AC_ERR_HSM;
4245
14be71f4 4246 ap->hsm_task_state = HSM_ST_ERR;
e2cec771 4247 goto fsm_start;
1da177e4
LT
4248 }
4249
71601958
AL
4250 /* Device should not ask for data transfer (DRQ=1)
4251 * when it finds something wrong.
eee6c32f
AL
4252 * We ignore DRQ here and stop the HSM by
4253 * changing hsm_task_state to HSM_ST_ERR and
4254 * let the EH abort the command or reset the device.
71601958
AL
4255 */
4256 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4257 printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
4258 ap->id, status);
3655d1d3 4259 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
4260 ap->hsm_task_state = HSM_ST_ERR;
4261 goto fsm_start;
71601958 4262 }
1da177e4 4263
bb5cb290
AL
4264 /* Send the CDB (atapi) or the first data block (ata pio out).
4265 * During the state transition, interrupt handler shouldn't
4266 * be invoked before the data transfer is complete and
4267 * hsm_task_state is changed. Hence, the following locking.
4268 */
4269 if (in_wq)
ba6a1308 4270 spin_lock_irqsave(ap->lock, flags);
1da177e4 4271
bb5cb290
AL
4272 if (qc->tf.protocol == ATA_PROT_PIO) {
4273 /* PIO data out protocol.
4274 * send first data block.
4275 */
0565c26d 4276
bb5cb290
AL
4277 /* ata_pio_sectors() might change the state
4278 * to HSM_ST_LAST. so, the state is changed here
4279 * before ata_pio_sectors().
4280 */
4281 ap->hsm_task_state = HSM_ST;
4282 ata_pio_sectors(qc);
4283 ata_altstatus(ap); /* flush */
4284 } else
4285 /* send CDB */
4286 atapi_send_cdb(ap, qc);
4287
4288 if (in_wq)
ba6a1308 4289 spin_unlock_irqrestore(ap->lock, flags);
bb5cb290
AL
4290
4291 /* if polling, ata_pio_task() handles the rest.
4292 * otherwise, interrupt handler takes over from here.
4293 */
e2cec771 4294 break;
1c848984 4295
e2cec771
AL
4296 case HSM_ST:
4297 /* complete command or read/write the data register */
4298 if (qc->tf.protocol == ATA_PROT_ATAPI) {
4299 /* ATAPI PIO protocol */
4300 if ((status & ATA_DRQ) == 0) {
3655d1d3
AL
4301 /* No more data to transfer or device error.
4302 * Device error will be tagged in HSM_ST_LAST.
4303 */
e2cec771
AL
4304 ap->hsm_task_state = HSM_ST_LAST;
4305 goto fsm_start;
4306 }
1da177e4 4307
71601958
AL
4308 /* Device should not ask for data transfer (DRQ=1)
4309 * when it finds something wrong.
eee6c32f
AL
4310 * We ignore DRQ here and stop the HSM by
4311 * changing hsm_task_state to HSM_ST_ERR and
4312 * let the EH abort the command or reset the device.
71601958
AL
4313 */
4314 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4315 printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
4316 ap->id, status);
3655d1d3 4317 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
4318 ap->hsm_task_state = HSM_ST_ERR;
4319 goto fsm_start;
71601958 4320 }
1da177e4 4321
e2cec771 4322 atapi_pio_bytes(qc);
7fb6ec28 4323
e2cec771
AL
4324 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
4325 /* bad ireason reported by device */
4326 goto fsm_start;
1da177e4 4327
e2cec771
AL
4328 } else {
4329 /* ATA PIO protocol */
4330 if (unlikely((status & ATA_DRQ) == 0)) {
4331 /* handle BSY=0, DRQ=0 as error */
3655d1d3
AL
4332 if (likely(status & (ATA_ERR | ATA_DF)))
4333 /* device stops HSM for abort/error */
4334 qc->err_mask |= AC_ERR_DEV;
4335 else
55a8e2c8
TH
4336 /* HSM violation. Let EH handle this.
4337 * Phantom devices also trigger this
4338 * condition. Mark hint.
4339 */
4340 qc->err_mask |= AC_ERR_HSM |
4341 AC_ERR_NODEV_HINT;
3655d1d3 4342
e2cec771
AL
4343 ap->hsm_task_state = HSM_ST_ERR;
4344 goto fsm_start;
4345 }
1da177e4 4346
eee6c32f
AL
4347 /* For PIO reads, some devices may ask for
4348 * data transfer (DRQ=1) alone with ERR=1.
4349 * We respect DRQ here and transfer one
4350 * block of junk data before changing the
4351 * hsm_task_state to HSM_ST_ERR.
4352 *
4353 * For PIO writes, ERR=1 DRQ=1 doesn't make
4354 * sense since the data block has been
4355 * transferred to the device.
71601958
AL
4356 */
4357 if (unlikely(status & (ATA_ERR | ATA_DF))) {
71601958
AL
4358 /* data might be corrputed */
4359 qc->err_mask |= AC_ERR_DEV;
eee6c32f
AL
4360
4361 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
4362 ata_pio_sectors(qc);
4363 ata_altstatus(ap);
4364 status = ata_wait_idle(ap);
4365 }
4366
3655d1d3
AL
4367 if (status & (ATA_BUSY | ATA_DRQ))
4368 qc->err_mask |= AC_ERR_HSM;
4369
eee6c32f
AL
4370 /* ata_pio_sectors() might change the
4371 * state to HSM_ST_LAST. so, the state
4372 * is changed after ata_pio_sectors().
4373 */
4374 ap->hsm_task_state = HSM_ST_ERR;
4375 goto fsm_start;
71601958
AL
4376 }
4377
e2cec771
AL
4378 ata_pio_sectors(qc);
4379
4380 if (ap->hsm_task_state == HSM_ST_LAST &&
4381 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
4382 /* all data read */
4383 ata_altstatus(ap);
52a32205 4384 status = ata_wait_idle(ap);
e2cec771
AL
4385 goto fsm_start;
4386 }
4387 }
4388
4389 ata_altstatus(ap); /* flush */
bb5cb290 4390 poll_next = 1;
1da177e4
LT
4391 break;
4392
14be71f4 4393 case HSM_ST_LAST:
6912ccd5
AL
4394 if (unlikely(!ata_ok(status))) {
4395 qc->err_mask |= __ac_err_mask(status);
e2cec771
AL
4396 ap->hsm_task_state = HSM_ST_ERR;
4397 goto fsm_start;
4398 }
4399
4400 /* no more data to transfer */
4332a771
AL
4401 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
4402 ap->id, qc->dev->devno, status);
e2cec771 4403
6912ccd5
AL
4404 WARN_ON(qc->err_mask);
4405
e2cec771 4406 ap->hsm_task_state = HSM_ST_IDLE;
1da177e4 4407
e2cec771 4408 /* complete taskfile transaction */
c17ea20d 4409 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
4410
4411 poll_next = 0;
1da177e4
LT
4412 break;
4413
14be71f4 4414 case HSM_ST_ERR:
e2cec771
AL
4415 /* make sure qc->err_mask is available to
4416 * know what's wrong and recover
4417 */
4418 WARN_ON(qc->err_mask == 0);
4419
4420 ap->hsm_task_state = HSM_ST_IDLE;
bb5cb290 4421
999bb6f4 4422 /* complete taskfile transaction */
c17ea20d 4423 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
4424
4425 poll_next = 0;
e2cec771
AL
4426 break;
4427 default:
bb5cb290 4428 poll_next = 0;
6912ccd5 4429 BUG();
1da177e4
LT
4430 }
4431
bb5cb290 4432 return poll_next;
1da177e4
LT
4433}
4434
65f27f38 4435static void ata_pio_task(struct work_struct *work)
8061f5f0 4436{
65f27f38
DH
4437 struct ata_port *ap =
4438 container_of(work, struct ata_port, port_task.work);
4439 struct ata_queued_cmd *qc = ap->port_task_data;
8061f5f0 4440 u8 status;
a1af3734 4441 int poll_next;
8061f5f0 4442
7fb6ec28 4443fsm_start:
a1af3734 4444 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
8061f5f0 4445
a1af3734
AL
4446 /*
4447 * This is purely heuristic. This is a fast path.
4448 * Sometimes when we enter, BSY will be cleared in
4449 * a chk-status or two. If not, the drive is probably seeking
4450 * or something. Snooze for a couple msecs, then
4451 * chk-status again. If still busy, queue delayed work.
4452 */
4453 status = ata_busy_wait(ap, ATA_BUSY, 5);
4454 if (status & ATA_BUSY) {
4455 msleep(2);
4456 status = ata_busy_wait(ap, ATA_BUSY, 10);
4457 if (status & ATA_BUSY) {
31ce6dae 4458 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
a1af3734
AL
4459 return;
4460 }
8061f5f0
TH
4461 }
4462
a1af3734
AL
4463 /* move the HSM */
4464 poll_next = ata_hsm_move(ap, qc, status, 1);
8061f5f0 4465
a1af3734
AL
4466 /* another command or interrupt handler
4467 * may be running at this point.
4468 */
4469 if (poll_next)
7fb6ec28 4470 goto fsm_start;
8061f5f0
TH
4471}
4472
1da177e4
LT
4473/**
4474 * ata_qc_new - Request an available ATA command, for queueing
4475 * @ap: Port associated with device @dev
4476 * @dev: Device from whom we request an available command structure
4477 *
4478 * LOCKING:
0cba632b 4479 * None.
1da177e4
LT
4480 */
4481
4482static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4483{
4484 struct ata_queued_cmd *qc = NULL;
4485 unsigned int i;
4486
e3180499 4487 /* no command while frozen */
b51e9e5d 4488 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
e3180499
TH
4489 return NULL;
4490
2ab7db1f
TH
4491 /* the last tag is reserved for internal command. */
4492 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
6cec4a39 4493 if (!test_and_set_bit(i, &ap->qc_allocated)) {
f69499f4 4494 qc = __ata_qc_from_tag(ap, i);
1da177e4
LT
4495 break;
4496 }
4497
4498 if (qc)
4499 qc->tag = i;
4500
4501 return qc;
4502}
4503
4504/**
4505 * ata_qc_new_init - Request an available ATA command, and initialize it
1da177e4
LT
4506 * @dev: Device from whom we request an available command structure
4507 *
4508 * LOCKING:
0cba632b 4509 * None.
1da177e4
LT
4510 */
4511
3373efd8 4512struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
1da177e4 4513{
3373efd8 4514 struct ata_port *ap = dev->ap;
1da177e4
LT
4515 struct ata_queued_cmd *qc;
4516
4517 qc = ata_qc_new(ap);
4518 if (qc) {
1da177e4
LT
4519 qc->scsicmd = NULL;
4520 qc->ap = ap;
4521 qc->dev = dev;
1da177e4 4522
2c13b7ce 4523 ata_qc_reinit(qc);
1da177e4
LT
4524 }
4525
4526 return qc;
4527}
4528
1da177e4
LT
4529/**
4530 * ata_qc_free - free unused ata_queued_cmd
4531 * @qc: Command to complete
4532 *
4533 * Designed to free unused ata_queued_cmd object
4534 * in case something prevents using it.
4535 *
4536 * LOCKING:
cca3974e 4537 * spin_lock_irqsave(host lock)
1da177e4
LT
4538 */
4539void ata_qc_free(struct ata_queued_cmd *qc)
4540{
4ba946e9
TH
4541 struct ata_port *ap = qc->ap;
4542 unsigned int tag;
4543
a4631474 4544 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
1da177e4 4545
4ba946e9
TH
4546 qc->flags = 0;
4547 tag = qc->tag;
4548 if (likely(ata_tag_valid(tag))) {
4ba946e9 4549 qc->tag = ATA_TAG_POISON;
6cec4a39 4550 clear_bit(tag, &ap->qc_allocated);
4ba946e9 4551 }
1da177e4
LT
4552}
4553
76014427 4554void __ata_qc_complete(struct ata_queued_cmd *qc)
1da177e4 4555{
dedaf2b0
TH
4556 struct ata_port *ap = qc->ap;
4557
a4631474
TH
4558 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4559 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
1da177e4
LT
4560
4561 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4562 ata_sg_clean(qc);
4563
7401abf2 4564 /* command should be marked inactive atomically with qc completion */
dedaf2b0
TH
4565 if (qc->tf.protocol == ATA_PROT_NCQ)
4566 ap->sactive &= ~(1 << qc->tag);
4567 else
4568 ap->active_tag = ATA_TAG_POISON;
7401abf2 4569
3f3791d3
AL
4570 /* atapi: mark qc as inactive to prevent the interrupt handler
4571 * from completing the command twice later, before the error handler
4572 * is called. (when rc != 0 and atapi request sense is needed)
4573 */
4574 qc->flags &= ~ATA_QCFLAG_ACTIVE;
dedaf2b0 4575 ap->qc_active &= ~(1 << qc->tag);
3f3791d3 4576
1da177e4 4577 /* call completion callback */
77853bf2 4578 qc->complete_fn(qc);
1da177e4
LT
4579}
4580
39599a53
TH
4581static void fill_result_tf(struct ata_queued_cmd *qc)
4582{
4583 struct ata_port *ap = qc->ap;
4584
4585 ap->ops->tf_read(ap, &qc->result_tf);
4586 qc->result_tf.flags = qc->tf.flags;
4587}
4588
f686bcb8
TH
4589/**
4590 * ata_qc_complete - Complete an active ATA command
4591 * @qc: Command to complete
4592 * @err_mask: ATA Status register contents
4593 *
4594 * Indicate to the mid and upper layers that an ATA
4595 * command has completed, with either an ok or not-ok status.
4596 *
4597 * LOCKING:
cca3974e 4598 * spin_lock_irqsave(host lock)
f686bcb8
TH
4599 */
4600void ata_qc_complete(struct ata_queued_cmd *qc)
4601{
4602 struct ata_port *ap = qc->ap;
4603
4604 /* XXX: New EH and old EH use different mechanisms to
4605 * synchronize EH with regular execution path.
4606 *
4607 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4608 * Normal execution path is responsible for not accessing a
4609 * failed qc. libata core enforces the rule by returning NULL
4610 * from ata_qc_from_tag() for failed qcs.
4611 *
4612 * Old EH depends on ata_qc_complete() nullifying completion
4613 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
4614 * not synchronize with interrupt handler. Only PIO task is
4615 * taken care of.
4616 */
4617 if (ap->ops->error_handler) {
b51e9e5d 4618 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
f686bcb8
TH
4619
4620 if (unlikely(qc->err_mask))
4621 qc->flags |= ATA_QCFLAG_FAILED;
4622
4623 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4624 if (!ata_tag_internal(qc->tag)) {
4625 /* always fill result TF for failed qc */
39599a53 4626 fill_result_tf(qc);
f686bcb8
TH
4627 ata_qc_schedule_eh(qc);
4628 return;
4629 }
4630 }
4631
4632 /* read result TF if requested */
4633 if (qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 4634 fill_result_tf(qc);
f686bcb8
TH
4635
4636 __ata_qc_complete(qc);
4637 } else {
4638 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4639 return;
4640
4641 /* read result TF if failed or requested */
4642 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 4643 fill_result_tf(qc);
f686bcb8
TH
4644
4645 __ata_qc_complete(qc);
4646 }
4647}
4648
dedaf2b0
TH
4649/**
4650 * ata_qc_complete_multiple - Complete multiple qcs successfully
4651 * @ap: port in question
4652 * @qc_active: new qc_active mask
4653 * @finish_qc: LLDD callback invoked before completing a qc
4654 *
4655 * Complete in-flight commands. This functions is meant to be
4656 * called from low-level driver's interrupt routine to complete
4657 * requests normally. ap->qc_active and @qc_active is compared
4658 * and commands are completed accordingly.
4659 *
4660 * LOCKING:
cca3974e 4661 * spin_lock_irqsave(host lock)
dedaf2b0
TH
4662 *
4663 * RETURNS:
4664 * Number of completed commands on success, -errno otherwise.
4665 */
4666int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
4667 void (*finish_qc)(struct ata_queued_cmd *))
4668{
4669 int nr_done = 0;
4670 u32 done_mask;
4671 int i;
4672
4673 done_mask = ap->qc_active ^ qc_active;
4674
4675 if (unlikely(done_mask & qc_active)) {
4676 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
4677 "(%08x->%08x)\n", ap->qc_active, qc_active);
4678 return -EINVAL;
4679 }
4680
4681 for (i = 0; i < ATA_MAX_QUEUE; i++) {
4682 struct ata_queued_cmd *qc;
4683
4684 if (!(done_mask & (1 << i)))
4685 continue;
4686
4687 if ((qc = ata_qc_from_tag(ap, i))) {
4688 if (finish_qc)
4689 finish_qc(qc);
4690 ata_qc_complete(qc);
4691 nr_done++;
4692 }
4693 }
4694
4695 return nr_done;
4696}
4697
1da177e4
LT
4698static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
4699{
4700 struct ata_port *ap = qc->ap;
4701
4702 switch (qc->tf.protocol) {
3dc1d881 4703 case ATA_PROT_NCQ:
1da177e4
LT
4704 case ATA_PROT_DMA:
4705 case ATA_PROT_ATAPI_DMA:
4706 return 1;
4707
4708 case ATA_PROT_ATAPI:
4709 case ATA_PROT_PIO:
1da177e4
LT
4710 if (ap->flags & ATA_FLAG_PIO_DMA)
4711 return 1;
4712
4713 /* fall through */
4714
4715 default:
4716 return 0;
4717 }
4718
4719 /* never reached */
4720}
4721
4722/**
4723 * ata_qc_issue - issue taskfile to device
4724 * @qc: command to issue to device
4725 *
4726 * Prepare an ATA command to submission to device.
4727 * This includes mapping the data into a DMA-able
4728 * area, filling in the S/G table, and finally
4729 * writing the taskfile to hardware, starting the command.
4730 *
4731 * LOCKING:
cca3974e 4732 * spin_lock_irqsave(host lock)
1da177e4 4733 */
8e0e694a 4734void ata_qc_issue(struct ata_queued_cmd *qc)
1da177e4
LT
4735{
4736 struct ata_port *ap = qc->ap;
4737
dedaf2b0
TH
4738 /* Make sure only one non-NCQ command is outstanding. The
4739 * check is skipped for old EH because it reuses active qc to
4740 * request ATAPI sense.
4741 */
4742 WARN_ON(ap->ops->error_handler && ata_tag_valid(ap->active_tag));
4743
4744 if (qc->tf.protocol == ATA_PROT_NCQ) {
4745 WARN_ON(ap->sactive & (1 << qc->tag));
4746 ap->sactive |= 1 << qc->tag;
4747 } else {
4748 WARN_ON(ap->sactive);
4749 ap->active_tag = qc->tag;
4750 }
4751
e4a70e76 4752 qc->flags |= ATA_QCFLAG_ACTIVE;
dedaf2b0 4753 ap->qc_active |= 1 << qc->tag;
e4a70e76 4754
1da177e4
LT
4755 if (ata_should_dma_map(qc)) {
4756 if (qc->flags & ATA_QCFLAG_SG) {
4757 if (ata_sg_setup(qc))
8e436af9 4758 goto sg_err;
1da177e4
LT
4759 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
4760 if (ata_sg_setup_one(qc))
8e436af9 4761 goto sg_err;
1da177e4
LT
4762 }
4763 } else {
4764 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4765 }
4766
4767 ap->ops->qc_prep(qc);
4768
8e0e694a
TH
4769 qc->err_mask |= ap->ops->qc_issue(qc);
4770 if (unlikely(qc->err_mask))
4771 goto err;
4772 return;
1da177e4 4773
8e436af9
TH
4774sg_err:
4775 qc->flags &= ~ATA_QCFLAG_DMAMAP;
8e0e694a
TH
4776 qc->err_mask |= AC_ERR_SYSTEM;
4777err:
4778 ata_qc_complete(qc);
1da177e4
LT
4779}
4780
4781/**
4782 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
4783 * @qc: command to issue to device
4784 *
4785 * Using various libata functions and hooks, this function
4786 * starts an ATA command. ATA commands are grouped into
4787 * classes called "protocols", and issuing each type of protocol
4788 * is slightly different.
4789 *
0baab86b
EF
4790 * May be used as the qc_issue() entry in ata_port_operations.
4791 *
1da177e4 4792 * LOCKING:
cca3974e 4793 * spin_lock_irqsave(host lock)
1da177e4
LT
4794 *
4795 * RETURNS:
9a3d9eb0 4796 * Zero on success, AC_ERR_* mask on failure
1da177e4
LT
4797 */
4798
9a3d9eb0 4799unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
1da177e4
LT
4800{
4801 struct ata_port *ap = qc->ap;
4802
e50362ec
AL
4803 /* Use polling pio if the LLD doesn't handle
4804 * interrupt driven pio and atapi CDB interrupt.
4805 */
4806 if (ap->flags & ATA_FLAG_PIO_POLLING) {
4807 switch (qc->tf.protocol) {
4808 case ATA_PROT_PIO:
e3472cbe 4809 case ATA_PROT_NODATA:
e50362ec
AL
4810 case ATA_PROT_ATAPI:
4811 case ATA_PROT_ATAPI_NODATA:
4812 qc->tf.flags |= ATA_TFLAG_POLLING;
4813 break;
4814 case ATA_PROT_ATAPI_DMA:
4815 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
3a778275 4816 /* see ata_dma_blacklisted() */
e50362ec
AL
4817 BUG();
4818 break;
4819 default:
4820 break;
4821 }
4822 }
4823
3d3cca37
TH
4824 /* Some controllers show flaky interrupt behavior after
4825 * setting xfer mode. Use polling instead.
4826 */
4827 if (unlikely(qc->tf.command == ATA_CMD_SET_FEATURES &&
4828 qc->tf.feature == SETFEATURES_XFER) &&
4829 (ap->flags & ATA_FLAG_SETXFER_POLLING))
4830 qc->tf.flags |= ATA_TFLAG_POLLING;
4831
312f7da2 4832 /* select the device */
1da177e4
LT
4833 ata_dev_select(ap, qc->dev->devno, 1, 0);
4834
312f7da2 4835 /* start the command */
1da177e4
LT
4836 switch (qc->tf.protocol) {
4837 case ATA_PROT_NODATA:
312f7da2
AL
4838 if (qc->tf.flags & ATA_TFLAG_POLLING)
4839 ata_qc_set_polling(qc);
4840
e5338254 4841 ata_tf_to_host(ap, &qc->tf);
312f7da2
AL
4842 ap->hsm_task_state = HSM_ST_LAST;
4843
4844 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 4845 ata_port_queue_task(ap, ata_pio_task, qc, 0);
312f7da2 4846
1da177e4
LT
4847 break;
4848
4849 case ATA_PROT_DMA:
587005de 4850 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 4851
1da177e4
LT
4852 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4853 ap->ops->bmdma_setup(qc); /* set up bmdma */
4854 ap->ops->bmdma_start(qc); /* initiate bmdma */
312f7da2 4855 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
4856 break;
4857
312f7da2
AL
4858 case ATA_PROT_PIO:
4859 if (qc->tf.flags & ATA_TFLAG_POLLING)
4860 ata_qc_set_polling(qc);
1da177e4 4861
e5338254 4862 ata_tf_to_host(ap, &qc->tf);
312f7da2 4863
54f00389
AL
4864 if (qc->tf.flags & ATA_TFLAG_WRITE) {
4865 /* PIO data out protocol */
4866 ap->hsm_task_state = HSM_ST_FIRST;
31ce6dae 4867 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
4868
4869 /* always send first data block using
e27486db 4870 * the ata_pio_task() codepath.
54f00389 4871 */
312f7da2 4872 } else {
54f00389
AL
4873 /* PIO data in protocol */
4874 ap->hsm_task_state = HSM_ST;
4875
4876 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 4877 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
4878
4879 /* if polling, ata_pio_task() handles the rest.
4880 * otherwise, interrupt handler takes over from here.
4881 */
312f7da2
AL
4882 }
4883
1da177e4
LT
4884 break;
4885
1da177e4 4886 case ATA_PROT_ATAPI:
1da177e4 4887 case ATA_PROT_ATAPI_NODATA:
312f7da2
AL
4888 if (qc->tf.flags & ATA_TFLAG_POLLING)
4889 ata_qc_set_polling(qc);
4890
e5338254 4891 ata_tf_to_host(ap, &qc->tf);
f6ef65e6 4892
312f7da2
AL
4893 ap->hsm_task_state = HSM_ST_FIRST;
4894
4895 /* send cdb by polling if no cdb interrupt */
4896 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
4897 (qc->tf.flags & ATA_TFLAG_POLLING))
31ce6dae 4898 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
4899 break;
4900
4901 case ATA_PROT_ATAPI_DMA:
587005de 4902 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 4903
1da177e4
LT
4904 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4905 ap->ops->bmdma_setup(qc); /* set up bmdma */
312f7da2
AL
4906 ap->hsm_task_state = HSM_ST_FIRST;
4907
4908 /* send cdb by polling if no cdb interrupt */
4909 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
31ce6dae 4910 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
4911 break;
4912
4913 default:
4914 WARN_ON(1);
9a3d9eb0 4915 return AC_ERR_SYSTEM;
1da177e4
LT
4916 }
4917
4918 return 0;
4919}
4920
1da177e4
LT
4921/**
4922 * ata_host_intr - Handle host interrupt for given (port, task)
4923 * @ap: Port on which interrupt arrived (possibly...)
4924 * @qc: Taskfile currently active in engine
4925 *
4926 * Handle host interrupt for given queued command. Currently,
4927 * only DMA interrupts are handled. All other commands are
4928 * handled via polling with interrupts disabled (nIEN bit).
4929 *
4930 * LOCKING:
cca3974e 4931 * spin_lock_irqsave(host lock)
1da177e4
LT
4932 *
4933 * RETURNS:
4934 * One if interrupt was handled, zero if not (shared irq).
4935 */
4936
4937inline unsigned int ata_host_intr (struct ata_port *ap,
4938 struct ata_queued_cmd *qc)
4939{
ea54763f 4940 struct ata_eh_info *ehi = &ap->eh_info;
312f7da2 4941 u8 status, host_stat = 0;
1da177e4 4942
312f7da2
AL
4943 VPRINTK("ata%u: protocol %d task_state %d\n",
4944 ap->id, qc->tf.protocol, ap->hsm_task_state);
1da177e4 4945
312f7da2
AL
4946 /* Check whether we are expecting interrupt in this state */
4947 switch (ap->hsm_task_state) {
4948 case HSM_ST_FIRST:
6912ccd5
AL
4949 /* Some pre-ATAPI-4 devices assert INTRQ
4950 * at this state when ready to receive CDB.
4951 */
1da177e4 4952
312f7da2
AL
4953 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
4954 * The flag was turned on only for atapi devices.
4955 * No need to check is_atapi_taskfile(&qc->tf) again.
4956 */
4957 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1da177e4 4958 goto idle_irq;
1da177e4 4959 break;
312f7da2
AL
4960 case HSM_ST_LAST:
4961 if (qc->tf.protocol == ATA_PROT_DMA ||
4962 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
4963 /* check status of DMA engine */
4964 host_stat = ap->ops->bmdma_status(ap);
4965 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
4966
4967 /* if it's not our irq... */
4968 if (!(host_stat & ATA_DMA_INTR))
4969 goto idle_irq;
4970
4971 /* before we do anything else, clear DMA-Start bit */
4972 ap->ops->bmdma_stop(qc);
a4f16610
AL
4973
4974 if (unlikely(host_stat & ATA_DMA_ERR)) {
4975 /* error when transfering data to/from memory */
4976 qc->err_mask |= AC_ERR_HOST_BUS;
4977 ap->hsm_task_state = HSM_ST_ERR;
4978 }
312f7da2
AL
4979 }
4980 break;
4981 case HSM_ST:
4982 break;
1da177e4
LT
4983 default:
4984 goto idle_irq;
4985 }
4986
312f7da2
AL
4987 /* check altstatus */
4988 status = ata_altstatus(ap);
4989 if (status & ATA_BUSY)
4990 goto idle_irq;
1da177e4 4991
312f7da2
AL
4992 /* check main status, clearing INTRQ */
4993 status = ata_chk_status(ap);
4994 if (unlikely(status & ATA_BUSY))
4995 goto idle_irq;
1da177e4 4996
312f7da2
AL
4997 /* ack bmdma irq events */
4998 ap->ops->irq_clear(ap);
1da177e4 4999
bb5cb290 5000 ata_hsm_move(ap, qc, status, 0);
ea54763f
TH
5001
5002 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
5003 qc->tf.protocol == ATA_PROT_ATAPI_DMA))
5004 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
5005
1da177e4
LT
5006 return 1; /* irq handled */
5007
5008idle_irq:
5009 ap->stats.idle_irq++;
5010
5011#ifdef ATA_IRQ_TRAP
5012 if ((ap->stats.idle_irq % 1000) == 0) {
1da177e4 5013 ata_irq_ack(ap, 0); /* debug trap */
f15a1daf 5014 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
23cfce89 5015 return 1;
1da177e4
LT
5016 }
5017#endif
5018 return 0; /* irq not handled */
5019}
5020
5021/**
5022 * ata_interrupt - Default ATA host interrupt handler
0cba632b 5023 * @irq: irq line (unused)
cca3974e 5024 * @dev_instance: pointer to our ata_host information structure
1da177e4 5025 *
0cba632b
JG
5026 * Default interrupt handler for PCI IDE devices. Calls
5027 * ata_host_intr() for each port that is not disabled.
5028 *
1da177e4 5029 * LOCKING:
cca3974e 5030 * Obtains host lock during operation.
1da177e4
LT
5031 *
5032 * RETURNS:
0cba632b 5033 * IRQ_NONE or IRQ_HANDLED.
1da177e4
LT
5034 */
5035
7d12e780 5036irqreturn_t ata_interrupt (int irq, void *dev_instance)
1da177e4 5037{
cca3974e 5038 struct ata_host *host = dev_instance;
1da177e4
LT
5039 unsigned int i;
5040 unsigned int handled = 0;
5041 unsigned long flags;
5042
5043 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
cca3974e 5044 spin_lock_irqsave(&host->lock, flags);
1da177e4 5045
cca3974e 5046 for (i = 0; i < host->n_ports; i++) {
1da177e4
LT
5047 struct ata_port *ap;
5048
cca3974e 5049 ap = host->ports[i];
c1389503 5050 if (ap &&
029f5468 5051 !(ap->flags & ATA_FLAG_DISABLED)) {
1da177e4
LT
5052 struct ata_queued_cmd *qc;
5053
5054 qc = ata_qc_from_tag(ap, ap->active_tag);
312f7da2 5055 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
21b1ed74 5056 (qc->flags & ATA_QCFLAG_ACTIVE))
1da177e4
LT
5057 handled |= ata_host_intr(ap, qc);
5058 }
5059 }
5060
cca3974e 5061 spin_unlock_irqrestore(&host->lock, flags);
1da177e4
LT
5062
5063 return IRQ_RETVAL(handled);
5064}
5065
34bf2170
TH
5066/**
5067 * sata_scr_valid - test whether SCRs are accessible
5068 * @ap: ATA port to test SCR accessibility for
5069 *
5070 * Test whether SCRs are accessible for @ap.
5071 *
5072 * LOCKING:
5073 * None.
5074 *
5075 * RETURNS:
5076 * 1 if SCRs are accessible, 0 otherwise.
5077 */
5078int sata_scr_valid(struct ata_port *ap)
5079{
5080 return ap->cbl == ATA_CBL_SATA && ap->ops->scr_read;
5081}
5082
5083/**
5084 * sata_scr_read - read SCR register of the specified port
5085 * @ap: ATA port to read SCR for
5086 * @reg: SCR to read
5087 * @val: Place to store read value
5088 *
5089 * Read SCR register @reg of @ap into *@val. This function is
5090 * guaranteed to succeed if the cable type of the port is SATA
5091 * and the port implements ->scr_read.
5092 *
5093 * LOCKING:
5094 * None.
5095 *
5096 * RETURNS:
5097 * 0 on success, negative errno on failure.
5098 */
5099int sata_scr_read(struct ata_port *ap, int reg, u32 *val)
5100{
5101 if (sata_scr_valid(ap)) {
5102 *val = ap->ops->scr_read(ap, reg);
5103 return 0;
5104 }
5105 return -EOPNOTSUPP;
5106}
5107
5108/**
5109 * sata_scr_write - write SCR register of the specified port
5110 * @ap: ATA port to write SCR for
5111 * @reg: SCR to write
5112 * @val: value to write
5113 *
5114 * Write @val to SCR register @reg of @ap. This function is
5115 * guaranteed to succeed if the cable type of the port is SATA
5116 * and the port implements ->scr_read.
5117 *
5118 * LOCKING:
5119 * None.
5120 *
5121 * RETURNS:
5122 * 0 on success, negative errno on failure.
5123 */
5124int sata_scr_write(struct ata_port *ap, int reg, u32 val)
5125{
5126 if (sata_scr_valid(ap)) {
5127 ap->ops->scr_write(ap, reg, val);
5128 return 0;
5129 }
5130 return -EOPNOTSUPP;
5131}
5132
5133/**
5134 * sata_scr_write_flush - write SCR register of the specified port and flush
5135 * @ap: ATA port to write SCR for
5136 * @reg: SCR to write
5137 * @val: value to write
5138 *
5139 * This function is identical to sata_scr_write() except that this
5140 * function performs flush after writing to the register.
5141 *
5142 * LOCKING:
5143 * None.
5144 *
5145 * RETURNS:
5146 * 0 on success, negative errno on failure.
5147 */
5148int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val)
5149{
5150 if (sata_scr_valid(ap)) {
5151 ap->ops->scr_write(ap, reg, val);
5152 ap->ops->scr_read(ap, reg);
5153 return 0;
5154 }
5155 return -EOPNOTSUPP;
5156}
5157
5158/**
5159 * ata_port_online - test whether the given port is online
5160 * @ap: ATA port to test
5161 *
5162 * Test whether @ap is online. Note that this function returns 0
5163 * if online status of @ap cannot be obtained, so
5164 * ata_port_online(ap) != !ata_port_offline(ap).
5165 *
5166 * LOCKING:
5167 * None.
5168 *
5169 * RETURNS:
5170 * 1 if the port online status is available and online.
5171 */
5172int ata_port_online(struct ata_port *ap)
5173{
5174 u32 sstatus;
5175
5176 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) == 0x3)
5177 return 1;
5178 return 0;
5179}
5180
5181/**
5182 * ata_port_offline - test whether the given port is offline
5183 * @ap: ATA port to test
5184 *
5185 * Test whether @ap is offline. Note that this function returns
5186 * 0 if offline status of @ap cannot be obtained, so
5187 * ata_port_online(ap) != !ata_port_offline(ap).
5188 *
5189 * LOCKING:
5190 * None.
5191 *
5192 * RETURNS:
5193 * 1 if the port offline status is available and offline.
5194 */
5195int ata_port_offline(struct ata_port *ap)
5196{
5197 u32 sstatus;
5198
5199 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) != 0x3)
5200 return 1;
5201 return 0;
5202}
0baab86b 5203
77b08fb5 5204int ata_flush_cache(struct ata_device *dev)
9b847548 5205{
977e6b9f 5206 unsigned int err_mask;
9b847548
JA
5207 u8 cmd;
5208
5209 if (!ata_try_flush_cache(dev))
5210 return 0;
5211
6fc49adb 5212 if (dev->flags & ATA_DFLAG_FLUSH_EXT)
9b847548
JA
5213 cmd = ATA_CMD_FLUSH_EXT;
5214 else
5215 cmd = ATA_CMD_FLUSH;
5216
977e6b9f
TH
5217 err_mask = ata_do_simple_cmd(dev, cmd);
5218 if (err_mask) {
5219 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
5220 return -EIO;
5221 }
5222
5223 return 0;
9b847548
JA
5224}
5225
cca3974e
JG
5226static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
5227 unsigned int action, unsigned int ehi_flags,
5228 int wait)
500530f6
TH
5229{
5230 unsigned long flags;
5231 int i, rc;
5232
cca3974e
JG
5233 for (i = 0; i < host->n_ports; i++) {
5234 struct ata_port *ap = host->ports[i];
500530f6
TH
5235
5236 /* Previous resume operation might still be in
5237 * progress. Wait for PM_PENDING to clear.
5238 */
5239 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5240 ata_port_wait_eh(ap);
5241 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5242 }
5243
5244 /* request PM ops to EH */
5245 spin_lock_irqsave(ap->lock, flags);
5246
5247 ap->pm_mesg = mesg;
5248 if (wait) {
5249 rc = 0;
5250 ap->pm_result = &rc;
5251 }
5252
5253 ap->pflags |= ATA_PFLAG_PM_PENDING;
5254 ap->eh_info.action |= action;
5255 ap->eh_info.flags |= ehi_flags;
5256
5257 ata_port_schedule_eh(ap);
5258
5259 spin_unlock_irqrestore(ap->lock, flags);
5260
5261 /* wait and check result */
5262 if (wait) {
5263 ata_port_wait_eh(ap);
5264 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5265 if (rc)
5266 return rc;
5267 }
5268 }
5269
5270 return 0;
5271}
5272
5273/**
cca3974e
JG
5274 * ata_host_suspend - suspend host
5275 * @host: host to suspend
500530f6
TH
5276 * @mesg: PM message
5277 *
cca3974e 5278 * Suspend @host. Actual operation is performed by EH. This
500530f6
TH
5279 * function requests EH to perform PM operations and waits for EH
5280 * to finish.
5281 *
5282 * LOCKING:
5283 * Kernel thread context (may sleep).
5284 *
5285 * RETURNS:
5286 * 0 on success, -errno on failure.
5287 */
cca3974e 5288int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
500530f6
TH
5289{
5290 int i, j, rc;
5291
cca3974e 5292 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
500530f6
TH
5293 if (rc)
5294 goto fail;
5295
5296 /* EH is quiescent now. Fail if we have any ready device.
5297 * This happens if hotplug occurs between completion of device
5298 * suspension and here.
5299 */
cca3974e
JG
5300 for (i = 0; i < host->n_ports; i++) {
5301 struct ata_port *ap = host->ports[i];
500530f6
TH
5302
5303 for (j = 0; j < ATA_MAX_DEVICES; j++) {
5304 struct ata_device *dev = &ap->device[j];
5305
5306 if (ata_dev_ready(dev)) {
5307 ata_port_printk(ap, KERN_WARNING,
5308 "suspend failed, device %d "
5309 "still active\n", dev->devno);
5310 rc = -EBUSY;
5311 goto fail;
5312 }
5313 }
5314 }
5315
cca3974e 5316 host->dev->power.power_state = mesg;
500530f6
TH
5317 return 0;
5318
5319 fail:
cca3974e 5320 ata_host_resume(host);
500530f6
TH
5321 return rc;
5322}
5323
5324/**
cca3974e
JG
5325 * ata_host_resume - resume host
5326 * @host: host to resume
500530f6 5327 *
cca3974e 5328 * Resume @host. Actual operation is performed by EH. This
500530f6
TH
5329 * function requests EH to perform PM operations and returns.
5330 * Note that all resume operations are performed parallely.
5331 *
5332 * LOCKING:
5333 * Kernel thread context (may sleep).
5334 */
cca3974e 5335void ata_host_resume(struct ata_host *host)
500530f6 5336{
cca3974e
JG
5337 ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
5338 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
5339 host->dev->power.power_state = PMSG_ON;
500530f6
TH
5340}
5341
c893a3ae
RD
5342/**
5343 * ata_port_start - Set port up for dma.
5344 * @ap: Port to initialize
5345 *
5346 * Called just after data structures for each port are
5347 * initialized. Allocates space for PRD table.
5348 *
5349 * May be used as the port_start() entry in ata_port_operations.
5350 *
5351 * LOCKING:
5352 * Inherited from caller.
5353 */
f0d36efd 5354int ata_port_start(struct ata_port *ap)
1da177e4 5355{
2f1f610b 5356 struct device *dev = ap->dev;
6037d6bb 5357 int rc;
1da177e4 5358
f0d36efd
TH
5359 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
5360 GFP_KERNEL);
1da177e4
LT
5361 if (!ap->prd)
5362 return -ENOMEM;
5363
6037d6bb 5364 rc = ata_pad_alloc(ap, dev);
f0d36efd 5365 if (rc)
6037d6bb 5366 return rc;
1da177e4 5367
f0d36efd
TH
5368 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd,
5369 (unsigned long long)ap->prd_dma);
1da177e4
LT
5370 return 0;
5371}
5372
3ef3b43d
TH
5373/**
5374 * ata_dev_init - Initialize an ata_device structure
5375 * @dev: Device structure to initialize
5376 *
5377 * Initialize @dev in preparation for probing.
5378 *
5379 * LOCKING:
5380 * Inherited from caller.
5381 */
5382void ata_dev_init(struct ata_device *dev)
5383{
5384 struct ata_port *ap = dev->ap;
72fa4b74
TH
5385 unsigned long flags;
5386
5a04bf4b
TH
5387 /* SATA spd limit is bound to the first device */
5388 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5389
72fa4b74
TH
5390 /* High bits of dev->flags are used to record warm plug
5391 * requests which occur asynchronously. Synchronize using
cca3974e 5392 * host lock.
72fa4b74 5393 */
ba6a1308 5394 spin_lock_irqsave(ap->lock, flags);
72fa4b74 5395 dev->flags &= ~ATA_DFLAG_INIT_MASK;
ba6a1308 5396 spin_unlock_irqrestore(ap->lock, flags);
3ef3b43d 5397
72fa4b74
TH
5398 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
5399 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
3ef3b43d
TH
5400 dev->pio_mask = UINT_MAX;
5401 dev->mwdma_mask = UINT_MAX;
5402 dev->udma_mask = UINT_MAX;
5403}
5404
1da177e4 5405/**
155a8a9c 5406 * ata_port_init - Initialize an ata_port structure
1da177e4 5407 * @ap: Structure to initialize
cca3974e 5408 * @host: Collection of hosts to which @ap belongs
1da177e4
LT
5409 * @ent: Probe information provided by low-level driver
5410 * @port_no: Port number associated with this ata_port
5411 *
155a8a9c 5412 * Initialize a new ata_port structure.
0cba632b 5413 *
1da177e4 5414 * LOCKING:
0cba632b 5415 * Inherited from caller.
1da177e4 5416 */
cca3974e 5417void ata_port_init(struct ata_port *ap, struct ata_host *host,
155a8a9c 5418 const struct ata_probe_ent *ent, unsigned int port_no)
1da177e4
LT
5419{
5420 unsigned int i;
5421
cca3974e 5422 ap->lock = &host->lock;
198e0fed 5423 ap->flags = ATA_FLAG_DISABLED;
155a8a9c 5424 ap->id = ata_unique_id++;
1da177e4 5425 ap->ctl = ATA_DEVCTL_OBS;
cca3974e 5426 ap->host = host;
2f1f610b 5427 ap->dev = ent->dev;
1da177e4 5428 ap->port_no = port_no;
fea63e38
TH
5429 if (port_no == 1 && ent->pinfo2) {
5430 ap->pio_mask = ent->pinfo2->pio_mask;
5431 ap->mwdma_mask = ent->pinfo2->mwdma_mask;
5432 ap->udma_mask = ent->pinfo2->udma_mask;
5433 ap->flags |= ent->pinfo2->flags;
5434 ap->ops = ent->pinfo2->port_ops;
5435 } else {
5436 ap->pio_mask = ent->pio_mask;
5437 ap->mwdma_mask = ent->mwdma_mask;
5438 ap->udma_mask = ent->udma_mask;
5439 ap->flags |= ent->port_flags;
5440 ap->ops = ent->port_ops;
5441 }
5a04bf4b 5442 ap->hw_sata_spd_limit = UINT_MAX;
1da177e4
LT
5443 ap->active_tag = ATA_TAG_POISON;
5444 ap->last_ctl = 0xFF;
bd5d825c
BP
5445
5446#if defined(ATA_VERBOSE_DEBUG)
5447 /* turn on all debugging levels */
5448 ap->msg_enable = 0x00FF;
5449#elif defined(ATA_DEBUG)
5450 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
88574551 5451#else
0dd4b21f 5452 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
bd5d825c 5453#endif
1da177e4 5454
65f27f38
DH
5455 INIT_DELAYED_WORK(&ap->port_task, NULL);
5456 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5457 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
a72ec4ce 5458 INIT_LIST_HEAD(&ap->eh_done_q);
c6cf9e99 5459 init_waitqueue_head(&ap->eh_wait_q);
1da177e4 5460
838df628
TH
5461 /* set cable type */
5462 ap->cbl = ATA_CBL_NONE;
5463 if (ap->flags & ATA_FLAG_SATA)
5464 ap->cbl = ATA_CBL_SATA;
5465
acf356b1
TH
5466 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5467 struct ata_device *dev = &ap->device[i];
38d87234 5468 dev->ap = ap;
72fa4b74 5469 dev->devno = i;
3ef3b43d 5470 ata_dev_init(dev);
acf356b1 5471 }
1da177e4
LT
5472
5473#ifdef ATA_IRQ_TRAP
5474 ap->stats.unhandled_irq = 1;
5475 ap->stats.idle_irq = 1;
5476#endif
5477
5478 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
5479}
5480
155a8a9c 5481/**
4608c160
TH
5482 * ata_port_init_shost - Initialize SCSI host associated with ATA port
5483 * @ap: ATA port to initialize SCSI host for
5484 * @shost: SCSI host associated with @ap
155a8a9c 5485 *
4608c160 5486 * Initialize SCSI host @shost associated with ATA port @ap.
155a8a9c
BK
5487 *
5488 * LOCKING:
5489 * Inherited from caller.
5490 */
4608c160 5491static void ata_port_init_shost(struct ata_port *ap, struct Scsi_Host *shost)
155a8a9c 5492{
cca3974e 5493 ap->scsi_host = shost;
155a8a9c 5494
4608c160
TH
5495 shost->unique_id = ap->id;
5496 shost->max_id = 16;
5497 shost->max_lun = 1;
5498 shost->max_channel = 1;
5499 shost->max_cmd_len = 12;
155a8a9c
BK
5500}
5501
1da177e4 5502/**
996139f1 5503 * ata_port_add - Attach low-level ATA driver to system
1da177e4 5504 * @ent: Information provided by low-level driver
cca3974e 5505 * @host: Collections of ports to which we add
1da177e4
LT
5506 * @port_no: Port number associated with this host
5507 *
0cba632b
JG
5508 * Attach low-level ATA driver to system.
5509 *
1da177e4 5510 * LOCKING:
0cba632b 5511 * PCI/etc. bus probe sem.
1da177e4
LT
5512 *
5513 * RETURNS:
0cba632b 5514 * New ata_port on success, for NULL on error.
1da177e4 5515 */
996139f1 5516static struct ata_port * ata_port_add(const struct ata_probe_ent *ent,
cca3974e 5517 struct ata_host *host,
1da177e4
LT
5518 unsigned int port_no)
5519{
996139f1 5520 struct Scsi_Host *shost;
1da177e4 5521 struct ata_port *ap;
1da177e4
LT
5522
5523 DPRINTK("ENTER\n");
aec5c3c1 5524
52783c5d 5525 if (!ent->port_ops->error_handler &&
cca3974e 5526 !(ent->port_flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST))) {
aec5c3c1
TH
5527 printk(KERN_ERR "ata%u: no reset mechanism available\n",
5528 port_no);
5529 return NULL;
5530 }
5531
996139f1
JG
5532 shost = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
5533 if (!shost)
1da177e4
LT
5534 return NULL;
5535
996139f1 5536 shost->transportt = &ata_scsi_transport_template;
30afc84c 5537
996139f1 5538 ap = ata_shost_to_port(shost);
1da177e4 5539
cca3974e 5540 ata_port_init(ap, host, ent, port_no);
996139f1 5541 ata_port_init_shost(ap, shost);
1da177e4 5542
1da177e4 5543 return ap;
1da177e4
LT
5544}
5545
f0d36efd
TH
5546static void ata_host_release(struct device *gendev, void *res)
5547{
5548 struct ata_host *host = dev_get_drvdata(gendev);
5549 int i;
5550
5551 for (i = 0; i < host->n_ports; i++) {
5552 struct ata_port *ap = host->ports[i];
5553
5554 if (!ap)
5555 continue;
5556
5557 if (ap->ops->port_stop)
5558 ap->ops->port_stop(ap);
5559
5560 scsi_host_put(ap->scsi_host);
5561 }
5562
5563 if (host->ops->host_stop)
5564 host->ops->host_stop(host);
5565}
5566
b03732f0 5567/**
cca3974e
JG
5568 * ata_sas_host_init - Initialize a host struct
5569 * @host: host to initialize
5570 * @dev: device host is attached to
5571 * @flags: host flags
5572 * @ops: port_ops
b03732f0
BK
5573 *
5574 * LOCKING:
5575 * PCI/etc. bus probe sem.
5576 *
5577 */
5578
cca3974e
JG
5579void ata_host_init(struct ata_host *host, struct device *dev,
5580 unsigned long flags, const struct ata_port_operations *ops)
b03732f0 5581{
cca3974e
JG
5582 spin_lock_init(&host->lock);
5583 host->dev = dev;
5584 host->flags = flags;
5585 host->ops = ops;
b03732f0
BK
5586}
5587
1da177e4 5588/**
0cba632b
JG
5589 * ata_device_add - Register hardware device with ATA and SCSI layers
5590 * @ent: Probe information describing hardware device to be registered
5591 *
5592 * This function processes the information provided in the probe
5593 * information struct @ent, allocates the necessary ATA and SCSI
5594 * host information structures, initializes them, and registers
5595 * everything with requisite kernel subsystems.
5596 *
5597 * This function requests irqs, probes the ATA bus, and probes
5598 * the SCSI bus.
1da177e4
LT
5599 *
5600 * LOCKING:
0cba632b 5601 * PCI/etc. bus probe sem.
1da177e4
LT
5602 *
5603 * RETURNS:
0cba632b 5604 * Number of ports registered. Zero on error (no ports registered).
1da177e4 5605 */
057ace5e 5606int ata_device_add(const struct ata_probe_ent *ent)
1da177e4 5607{
6d0500df 5608 unsigned int i;
1da177e4 5609 struct device *dev = ent->dev;
cca3974e 5610 struct ata_host *host;
39b07ce6 5611 int rc;
1da177e4
LT
5612
5613 DPRINTK("ENTER\n");
f20b16ff 5614
02f076aa
AC
5615 if (ent->irq == 0) {
5616 dev_printk(KERN_ERR, dev, "is not available: No interrupt assigned.\n");
5617 return 0;
5618 }
f0d36efd
TH
5619
5620 if (!devres_open_group(dev, ata_device_add, GFP_KERNEL))
5621 return 0;
5622
1da177e4 5623 /* alloc a container for our list of ATA ports (buses) */
f0d36efd
TH
5624 host = devres_alloc(ata_host_release, sizeof(struct ata_host) +
5625 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
cca3974e 5626 if (!host)
f0d36efd
TH
5627 goto err_out;
5628 devres_add(dev, host);
5629 dev_set_drvdata(dev, host);
1da177e4 5630
cca3974e
JG
5631 ata_host_init(host, dev, ent->_host_flags, ent->port_ops);
5632 host->n_ports = ent->n_ports;
5633 host->irq = ent->irq;
5634 host->irq2 = ent->irq2;
0d5ff566 5635 host->iomap = ent->iomap;
cca3974e 5636 host->private_data = ent->private_data;
1da177e4
LT
5637
5638 /* register each port bound to this device */
cca3974e 5639 for (i = 0; i < host->n_ports; i++) {
1da177e4
LT
5640 struct ata_port *ap;
5641 unsigned long xfer_mode_mask;
2ec7df04 5642 int irq_line = ent->irq;
1da177e4 5643
cca3974e 5644 ap = ata_port_add(ent, host, i);
c38778c3 5645 host->ports[i] = ap;
1da177e4
LT
5646 if (!ap)
5647 goto err_out;
5648
dd5b06c4
TH
5649 /* dummy? */
5650 if (ent->dummy_port_mask & (1 << i)) {
5651 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
5652 ap->ops = &ata_dummy_port_ops;
5653 continue;
5654 }
5655
5656 /* start port */
5657 rc = ap->ops->port_start(ap);
5658 if (rc) {
cca3974e
JG
5659 host->ports[i] = NULL;
5660 scsi_host_put(ap->scsi_host);
dd5b06c4
TH
5661 goto err_out;
5662 }
5663
2ec7df04
AC
5664 /* Report the secondary IRQ for second channel legacy */
5665 if (i == 1 && ent->irq2)
5666 irq_line = ent->irq2;
5667
1da177e4
LT
5668 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
5669 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
5670 (ap->pio_mask << ATA_SHIFT_PIO);
5671
5672 /* print per-port info to dmesg */
0d5ff566
TH
5673 ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%p "
5674 "ctl 0x%p bmdma 0x%p irq %d\n",
f15a1daf
TH
5675 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
5676 ata_mode_string(xfer_mode_mask),
5677 ap->ioaddr.cmd_addr,
5678 ap->ioaddr.ctl_addr,
5679 ap->ioaddr.bmdma_addr,
2ec7df04 5680 irq_line);
1da177e4 5681
0f0a3ad3
TH
5682 /* freeze port before requesting IRQ */
5683 ata_eh_freeze_port(ap);
1da177e4
LT
5684 }
5685
2ec7df04 5686 /* obtain irq, that may be shared between channels */
f0d36efd
TH
5687 rc = devm_request_irq(dev, ent->irq, ent->port_ops->irq_handler,
5688 ent->irq_flags, DRV_NAME, host);
39b07ce6
JG
5689 if (rc) {
5690 dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n",
5691 ent->irq, rc);
1da177e4 5692 goto err_out;
39b07ce6 5693 }
1da177e4 5694
2ec7df04
AC
5695 /* do we have a second IRQ for the other channel, eg legacy mode */
5696 if (ent->irq2) {
5697 /* We will get weird core code crashes later if this is true
5698 so trap it now */
5699 BUG_ON(ent->irq == ent->irq2);
5700
f0d36efd
TH
5701 rc = devm_request_irq(dev, ent->irq2,
5702 ent->port_ops->irq_handler, ent->irq_flags,
5703 DRV_NAME, host);
2ec7df04
AC
5704 if (rc) {
5705 dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n",
5706 ent->irq2, rc);
f0d36efd 5707 goto err_out;
2ec7df04
AC
5708 }
5709 }
5710
f0d36efd 5711 /* resource acquisition complete */
b878ca5d 5712 devres_remove_group(dev, ata_device_add);
f0d36efd 5713
1da177e4
LT
5714 /* perform each probe synchronously */
5715 DPRINTK("probe begin\n");
cca3974e
JG
5716 for (i = 0; i < host->n_ports; i++) {
5717 struct ata_port *ap = host->ports[i];
5a04bf4b 5718 u32 scontrol;
1da177e4
LT
5719 int rc;
5720
5a04bf4b
TH
5721 /* init sata_spd_limit to the current value */
5722 if (sata_scr_read(ap, SCR_CONTROL, &scontrol) == 0) {
5723 int spd = (scontrol >> 4) & 0xf;
5724 ap->hw_sata_spd_limit &= (1 << spd) - 1;
5725 }
5726 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5727
cca3974e 5728 rc = scsi_add_host(ap->scsi_host, dev);
1da177e4 5729 if (rc) {
f15a1daf 5730 ata_port_printk(ap, KERN_ERR, "scsi_add_host failed\n");
1da177e4
LT
5731 /* FIXME: do something useful here */
5732 /* FIXME: handle unconditional calls to
5733 * scsi_scan_host and ata_host_remove, below,
5734 * at the very least
5735 */
5736 }
3e706399 5737
52783c5d 5738 if (ap->ops->error_handler) {
1cdaf534 5739 struct ata_eh_info *ehi = &ap->eh_info;
3e706399
TH
5740 unsigned long flags;
5741
5742 ata_port_probe(ap);
5743
5744 /* kick EH for boot probing */
ba6a1308 5745 spin_lock_irqsave(ap->lock, flags);
3e706399 5746
1cdaf534
TH
5747 ehi->probe_mask = (1 << ATA_MAX_DEVICES) - 1;
5748 ehi->action |= ATA_EH_SOFTRESET;
5749 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
3e706399 5750
b51e9e5d 5751 ap->pflags |= ATA_PFLAG_LOADING;
3e706399
TH
5752 ata_port_schedule_eh(ap);
5753
ba6a1308 5754 spin_unlock_irqrestore(ap->lock, flags);
3e706399
TH
5755
5756 /* wait for EH to finish */
5757 ata_port_wait_eh(ap);
5758 } else {
5759 DPRINTK("ata%u: bus probe begin\n", ap->id);
5760 rc = ata_bus_probe(ap);
5761 DPRINTK("ata%u: bus probe end\n", ap->id);
5762
5763 if (rc) {
5764 /* FIXME: do something useful here?
5765 * Current libata behavior will
5766 * tear down everything when
5767 * the module is removed
5768 * or the h/w is unplugged.
5769 */
5770 }
5771 }
1da177e4
LT
5772 }
5773
5774 /* probes are done, now scan each port's disk(s) */
c893a3ae 5775 DPRINTK("host probe begin\n");
cca3974e
JG
5776 for (i = 0; i < host->n_ports; i++) {
5777 struct ata_port *ap = host->ports[i];
1da177e4 5778
644dd0cc 5779 ata_scsi_scan_host(ap);
1da177e4
LT
5780 }
5781
1da177e4
LT
5782 VPRINTK("EXIT, returning %u\n", ent->n_ports);
5783 return ent->n_ports; /* success */
5784
f0d36efd
TH
5785 err_out:
5786 devres_release_group(dev, ata_device_add);
5787 dev_set_drvdata(dev, NULL);
5788 VPRINTK("EXIT, returning %d\n", rc);
1da177e4
LT
5789 return 0;
5790}
5791
720ba126
TH
5792/**
5793 * ata_port_detach - Detach ATA port in prepration of device removal
5794 * @ap: ATA port to be detached
5795 *
5796 * Detach all ATA devices and the associated SCSI devices of @ap;
5797 * then, remove the associated SCSI host. @ap is guaranteed to
5798 * be quiescent on return from this function.
5799 *
5800 * LOCKING:
5801 * Kernel thread context (may sleep).
5802 */
5803void ata_port_detach(struct ata_port *ap)
5804{
5805 unsigned long flags;
5806 int i;
5807
5808 if (!ap->ops->error_handler)
c3cf30a9 5809 goto skip_eh;
720ba126
TH
5810
5811 /* tell EH we're leaving & flush EH */
ba6a1308 5812 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 5813 ap->pflags |= ATA_PFLAG_UNLOADING;
ba6a1308 5814 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
5815
5816 ata_port_wait_eh(ap);
5817
5818 /* EH is now guaranteed to see UNLOADING, so no new device
5819 * will be attached. Disable all existing devices.
5820 */
ba6a1308 5821 spin_lock_irqsave(ap->lock, flags);
720ba126
TH
5822
5823 for (i = 0; i < ATA_MAX_DEVICES; i++)
5824 ata_dev_disable(&ap->device[i]);
5825
ba6a1308 5826 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
5827
5828 /* Final freeze & EH. All in-flight commands are aborted. EH
5829 * will be skipped and retrials will be terminated with bad
5830 * target.
5831 */
ba6a1308 5832 spin_lock_irqsave(ap->lock, flags);
720ba126 5833 ata_port_freeze(ap); /* won't be thawed */
ba6a1308 5834 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
5835
5836 ata_port_wait_eh(ap);
5837
5838 /* Flush hotplug task. The sequence is similar to
5839 * ata_port_flush_task().
5840 */
5841 flush_workqueue(ata_aux_wq);
5842 cancel_delayed_work(&ap->hotplug_task);
5843 flush_workqueue(ata_aux_wq);
5844
c3cf30a9 5845 skip_eh:
720ba126 5846 /* remove the associated SCSI host */
cca3974e 5847 scsi_remove_host(ap->scsi_host);
720ba126
TH
5848}
5849
0529c159
TH
5850/**
5851 * ata_host_detach - Detach all ports of an ATA host
5852 * @host: Host to detach
5853 *
5854 * Detach all ports of @host.
5855 *
5856 * LOCKING:
5857 * Kernel thread context (may sleep).
5858 */
5859void ata_host_detach(struct ata_host *host)
5860{
5861 int i;
5862
5863 for (i = 0; i < host->n_ports; i++)
5864 ata_port_detach(host->ports[i]);
5865}
5866
f6d950e2
BK
5867struct ata_probe_ent *
5868ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
5869{
5870 struct ata_probe_ent *probe_ent;
5871
f0d36efd
TH
5872 /* XXX - the following if can go away once all LLDs are managed */
5873 if (!list_empty(&dev->devres_head))
5874 probe_ent = devm_kzalloc(dev, sizeof(*probe_ent), GFP_KERNEL);
5875 else
5876 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
f6d950e2
BK
5877 if (!probe_ent) {
5878 printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
5879 kobject_name(&(dev->kobj)));
5880 return NULL;
5881 }
5882
5883 INIT_LIST_HEAD(&probe_ent->node);
5884 probe_ent->dev = dev;
5885
5886 probe_ent->sht = port->sht;
cca3974e 5887 probe_ent->port_flags = port->flags;
f6d950e2
BK
5888 probe_ent->pio_mask = port->pio_mask;
5889 probe_ent->mwdma_mask = port->mwdma_mask;
5890 probe_ent->udma_mask = port->udma_mask;
5891 probe_ent->port_ops = port->port_ops;
d639ca94 5892 probe_ent->private_data = port->private_data;
f6d950e2
BK
5893
5894 return probe_ent;
5895}
5896
1da177e4
LT
5897/**
5898 * ata_std_ports - initialize ioaddr with standard port offsets.
5899 * @ioaddr: IO address structure to be initialized
0baab86b
EF
5900 *
5901 * Utility function which initializes data_addr, error_addr,
5902 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
5903 * device_addr, status_addr, and command_addr to standard offsets
5904 * relative to cmd_addr.
5905 *
5906 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
1da177e4 5907 */
0baab86b 5908
1da177e4
LT
5909void ata_std_ports(struct ata_ioports *ioaddr)
5910{
5911 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
5912 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
5913 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
5914 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
5915 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
5916 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
5917 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
5918 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
5919 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
5920 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
5921}
5922
0baab86b 5923
374b1873
JG
5924#ifdef CONFIG_PCI
5925
1da177e4
LT
5926/**
5927 * ata_pci_remove_one - PCI layer callback for device removal
5928 * @pdev: PCI device that was removed
5929 *
b878ca5d
TH
5930 * PCI layer indicates to libata via this hook that hot-unplug or
5931 * module unload event has occurred. Detach all ports. Resource
5932 * release is handled via devres.
1da177e4
LT
5933 *
5934 * LOCKING:
5935 * Inherited from PCI layer (may sleep).
5936 */
f0d36efd 5937void ata_pci_remove_one(struct pci_dev *pdev)
1da177e4
LT
5938{
5939 struct device *dev = pci_dev_to_dev(pdev);
cca3974e 5940 struct ata_host *host = dev_get_drvdata(dev);
1da177e4 5941
b878ca5d 5942 ata_host_detach(host);
1da177e4
LT
5943}
5944
5945/* move to PCI subsystem */
057ace5e 5946int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
1da177e4
LT
5947{
5948 unsigned long tmp = 0;
5949
5950 switch (bits->width) {
5951 case 1: {
5952 u8 tmp8 = 0;
5953 pci_read_config_byte(pdev, bits->reg, &tmp8);
5954 tmp = tmp8;
5955 break;
5956 }
5957 case 2: {
5958 u16 tmp16 = 0;
5959 pci_read_config_word(pdev, bits->reg, &tmp16);
5960 tmp = tmp16;
5961 break;
5962 }
5963 case 4: {
5964 u32 tmp32 = 0;
5965 pci_read_config_dword(pdev, bits->reg, &tmp32);
5966 tmp = tmp32;
5967 break;
5968 }
5969
5970 default:
5971 return -EINVAL;
5972 }
5973
5974 tmp &= bits->mask;
5975
5976 return (tmp == bits->val) ? 1 : 0;
5977}
9b847548 5978
3c5100c1 5979void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
9b847548
JA
5980{
5981 pci_save_state(pdev);
500530f6 5982
3c5100c1 5983 if (mesg.event == PM_EVENT_SUSPEND) {
500530f6
TH
5984 pci_disable_device(pdev);
5985 pci_set_power_state(pdev, PCI_D3hot);
5986 }
9b847548
JA
5987}
5988
553c4aa6 5989int ata_pci_device_do_resume(struct pci_dev *pdev)
9b847548 5990{
553c4aa6
TH
5991 int rc;
5992
9b847548
JA
5993 pci_set_power_state(pdev, PCI_D0);
5994 pci_restore_state(pdev);
553c4aa6 5995
b878ca5d 5996 rc = pcim_enable_device(pdev);
553c4aa6
TH
5997 if (rc) {
5998 dev_printk(KERN_ERR, &pdev->dev,
5999 "failed to enable device after resume (%d)\n", rc);
6000 return rc;
6001 }
6002
9b847548 6003 pci_set_master(pdev);
553c4aa6 6004 return 0;
500530f6
TH
6005}
6006
3c5100c1 6007int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
500530f6 6008{
cca3974e 6009 struct ata_host *host = dev_get_drvdata(&pdev->dev);
500530f6
TH
6010 int rc = 0;
6011
cca3974e 6012 rc = ata_host_suspend(host, mesg);
500530f6
TH
6013 if (rc)
6014 return rc;
6015
3c5100c1 6016 ata_pci_device_do_suspend(pdev, mesg);
500530f6
TH
6017
6018 return 0;
6019}
6020
6021int ata_pci_device_resume(struct pci_dev *pdev)
6022{
cca3974e 6023 struct ata_host *host = dev_get_drvdata(&pdev->dev);
553c4aa6 6024 int rc;
500530f6 6025
553c4aa6
TH
6026 rc = ata_pci_device_do_resume(pdev);
6027 if (rc == 0)
6028 ata_host_resume(host);
6029 return rc;
9b847548 6030}
1da177e4
LT
6031#endif /* CONFIG_PCI */
6032
6033
1da177e4
LT
6034static int __init ata_init(void)
6035{
a8601e5f 6036 ata_probe_timeout *= HZ;
1da177e4
LT
6037 ata_wq = create_workqueue("ata");
6038 if (!ata_wq)
6039 return -ENOMEM;
6040
453b07ac
TH
6041 ata_aux_wq = create_singlethread_workqueue("ata_aux");
6042 if (!ata_aux_wq) {
6043 destroy_workqueue(ata_wq);
6044 return -ENOMEM;
6045 }
6046
1da177e4
LT
6047 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6048 return 0;
6049}
6050
6051static void __exit ata_exit(void)
6052{
6053 destroy_workqueue(ata_wq);
453b07ac 6054 destroy_workqueue(ata_aux_wq);
1da177e4
LT
6055}
6056
a4625085 6057subsys_initcall(ata_init);
1da177e4
LT
6058module_exit(ata_exit);
6059
67846b30 6060static unsigned long ratelimit_time;
34af946a 6061static DEFINE_SPINLOCK(ata_ratelimit_lock);
67846b30
JG
6062
6063int ata_ratelimit(void)
6064{
6065 int rc;
6066 unsigned long flags;
6067
6068 spin_lock_irqsave(&ata_ratelimit_lock, flags);
6069
6070 if (time_after(jiffies, ratelimit_time)) {
6071 rc = 1;
6072 ratelimit_time = jiffies + (HZ/5);
6073 } else
6074 rc = 0;
6075
6076 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
6077
6078 return rc;
6079}
6080
c22daff4
TH
6081/**
6082 * ata_wait_register - wait until register value changes
6083 * @reg: IO-mapped register
6084 * @mask: Mask to apply to read register value
6085 * @val: Wait condition
6086 * @interval_msec: polling interval in milliseconds
6087 * @timeout_msec: timeout in milliseconds
6088 *
6089 * Waiting for some bits of register to change is a common
6090 * operation for ATA controllers. This function reads 32bit LE
6091 * IO-mapped register @reg and tests for the following condition.
6092 *
6093 * (*@reg & mask) != val
6094 *
6095 * If the condition is met, it returns; otherwise, the process is
6096 * repeated after @interval_msec until timeout.
6097 *
6098 * LOCKING:
6099 * Kernel thread context (may sleep)
6100 *
6101 * RETURNS:
6102 * The final register value.
6103 */
6104u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
6105 unsigned long interval_msec,
6106 unsigned long timeout_msec)
6107{
6108 unsigned long timeout;
6109 u32 tmp;
6110
6111 tmp = ioread32(reg);
6112
6113 /* Calculate timeout _after_ the first read to make sure
6114 * preceding writes reach the controller before starting to
6115 * eat away the timeout.
6116 */
6117 timeout = jiffies + (timeout_msec * HZ) / 1000;
6118
6119 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
6120 msleep(interval_msec);
6121 tmp = ioread32(reg);
6122 }
6123
6124 return tmp;
6125}
6126
dd5b06c4
TH
6127/*
6128 * Dummy port_ops
6129 */
6130static void ata_dummy_noret(struct ata_port *ap) { }
6131static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
6132static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
6133
6134static u8 ata_dummy_check_status(struct ata_port *ap)
6135{
6136 return ATA_DRDY;
6137}
6138
6139static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6140{
6141 return AC_ERR_SYSTEM;
6142}
6143
6144const struct ata_port_operations ata_dummy_port_ops = {
6145 .port_disable = ata_port_disable,
6146 .check_status = ata_dummy_check_status,
6147 .check_altstatus = ata_dummy_check_status,
6148 .dev_select = ata_noop_dev_select,
6149 .qc_prep = ata_noop_qc_prep,
6150 .qc_issue = ata_dummy_qc_issue,
6151 .freeze = ata_dummy_noret,
6152 .thaw = ata_dummy_noret,
6153 .error_handler = ata_dummy_noret,
6154 .post_internal_cmd = ata_dummy_qc_noret,
6155 .irq_clear = ata_dummy_noret,
6156 .port_start = ata_dummy_ret0,
6157 .port_stop = ata_dummy_noret,
6158};
6159
1da177e4
LT
6160/*
6161 * libata is essentially a library of internal helper functions for
6162 * low-level ATA host controller drivers. As such, the API/ABI is
6163 * likely to change as new drivers are added and updated.
6164 * Do not depend on ABI/API stability.
6165 */
6166
e9c83914
TH
6167EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6168EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6169EXPORT_SYMBOL_GPL(sata_deb_timing_long);
dd5b06c4 6170EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
1da177e4
LT
6171EXPORT_SYMBOL_GPL(ata_std_bios_param);
6172EXPORT_SYMBOL_GPL(ata_std_ports);
cca3974e 6173EXPORT_SYMBOL_GPL(ata_host_init);
1da177e4 6174EXPORT_SYMBOL_GPL(ata_device_add);
0529c159 6175EXPORT_SYMBOL_GPL(ata_host_detach);
1da177e4
LT
6176EXPORT_SYMBOL_GPL(ata_sg_init);
6177EXPORT_SYMBOL_GPL(ata_sg_init_one);
9a1004d0 6178EXPORT_SYMBOL_GPL(ata_hsm_move);
f686bcb8 6179EXPORT_SYMBOL_GPL(ata_qc_complete);
dedaf2b0 6180EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
1da177e4 6181EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
1da177e4
LT
6182EXPORT_SYMBOL_GPL(ata_tf_load);
6183EXPORT_SYMBOL_GPL(ata_tf_read);
6184EXPORT_SYMBOL_GPL(ata_noop_dev_select);
6185EXPORT_SYMBOL_GPL(ata_std_dev_select);
6186EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6187EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6188EXPORT_SYMBOL_GPL(ata_check_status);
6189EXPORT_SYMBOL_GPL(ata_altstatus);
1da177e4
LT
6190EXPORT_SYMBOL_GPL(ata_exec_command);
6191EXPORT_SYMBOL_GPL(ata_port_start);
1da177e4 6192EXPORT_SYMBOL_GPL(ata_interrupt);
0d5ff566
TH
6193EXPORT_SYMBOL_GPL(ata_data_xfer);
6194EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
1da177e4 6195EXPORT_SYMBOL_GPL(ata_qc_prep);
e46834cd 6196EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
1da177e4
LT
6197EXPORT_SYMBOL_GPL(ata_bmdma_setup);
6198EXPORT_SYMBOL_GPL(ata_bmdma_start);
6199EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
6200EXPORT_SYMBOL_GPL(ata_bmdma_status);
6201EXPORT_SYMBOL_GPL(ata_bmdma_stop);
6d97dbd7
TH
6202EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
6203EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
6204EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
6205EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
6206EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
1da177e4 6207EXPORT_SYMBOL_GPL(ata_port_probe);
3c567b7d 6208EXPORT_SYMBOL_GPL(sata_set_spd);
d7bb4cc7
TH
6209EXPORT_SYMBOL_GPL(sata_phy_debounce);
6210EXPORT_SYMBOL_GPL(sata_phy_resume);
1da177e4
LT
6211EXPORT_SYMBOL_GPL(sata_phy_reset);
6212EXPORT_SYMBOL_GPL(__sata_phy_reset);
6213EXPORT_SYMBOL_GPL(ata_bus_reset);
f5914a46 6214EXPORT_SYMBOL_GPL(ata_std_prereset);
c2bd5804 6215EXPORT_SYMBOL_GPL(ata_std_softreset);
b6103f6d 6216EXPORT_SYMBOL_GPL(sata_port_hardreset);
c2bd5804
TH
6217EXPORT_SYMBOL_GPL(sata_std_hardreset);
6218EXPORT_SYMBOL_GPL(ata_std_postreset);
2e9edbf8
JG
6219EXPORT_SYMBOL_GPL(ata_dev_classify);
6220EXPORT_SYMBOL_GPL(ata_dev_pair);
1da177e4 6221EXPORT_SYMBOL_GPL(ata_port_disable);
67846b30 6222EXPORT_SYMBOL_GPL(ata_ratelimit);
c22daff4 6223EXPORT_SYMBOL_GPL(ata_wait_register);
6f8b9958 6224EXPORT_SYMBOL_GPL(ata_busy_sleep);
86e45b6b 6225EXPORT_SYMBOL_GPL(ata_port_queue_task);
1da177e4
LT
6226EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
6227EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
1da177e4 6228EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
83c47bcb 6229EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
a6e6ce8e 6230EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
1da177e4 6231EXPORT_SYMBOL_GPL(ata_host_intr);
34bf2170
TH
6232EXPORT_SYMBOL_GPL(sata_scr_valid);
6233EXPORT_SYMBOL_GPL(sata_scr_read);
6234EXPORT_SYMBOL_GPL(sata_scr_write);
6235EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6236EXPORT_SYMBOL_GPL(ata_port_online);
6237EXPORT_SYMBOL_GPL(ata_port_offline);
cca3974e
JG
6238EXPORT_SYMBOL_GPL(ata_host_suspend);
6239EXPORT_SYMBOL_GPL(ata_host_resume);
6a62a04d
TH
6240EXPORT_SYMBOL_GPL(ata_id_string);
6241EXPORT_SYMBOL_GPL(ata_id_c_string);
6919a0a6 6242EXPORT_SYMBOL_GPL(ata_device_blacklisted);
1da177e4
LT
6243EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6244
1bc4ccff 6245EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
452503f9
AC
6246EXPORT_SYMBOL_GPL(ata_timing_compute);
6247EXPORT_SYMBOL_GPL(ata_timing_merge);
6248
1da177e4
LT
6249#ifdef CONFIG_PCI
6250EXPORT_SYMBOL_GPL(pci_test_config_bits);
6251EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
6252EXPORT_SYMBOL_GPL(ata_pci_init_one);
6253EXPORT_SYMBOL_GPL(ata_pci_remove_one);
500530f6
TH
6254EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6255EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
9b847548
JA
6256EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6257EXPORT_SYMBOL_GPL(ata_pci_device_resume);
67951ade
AC
6258EXPORT_SYMBOL_GPL(ata_pci_default_filter);
6259EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
1da177e4 6260#endif /* CONFIG_PCI */
9b847548 6261
9b847548
JA
6262EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
6263EXPORT_SYMBOL_GPL(ata_scsi_device_resume);
ece1d636 6264
ece1d636 6265EXPORT_SYMBOL_GPL(ata_eng_timeout);
7b70fc03
TH
6266EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
6267EXPORT_SYMBOL_GPL(ata_port_abort);
e3180499
TH
6268EXPORT_SYMBOL_GPL(ata_port_freeze);
6269EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6270EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
ece1d636
TH
6271EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6272EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
022bdb07 6273EXPORT_SYMBOL_GPL(ata_do_eh);
This page took 0.897332 seconds and 5 git commands to generate.