libata: clean up xfermode / PATA timing related stuff
[deliverable/linux.git] / drivers / ata / libata-core.c
1 /*
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
33 * Standards documents from:
34 * http://www.t13.org (ATA standards, PCI DMA IDE spec)
35 * http://www.t10.org (SCSI MMC - for ATAPI MMC)
36 * http://www.sata-io.org (SATA)
37 * http://www.compactflash.org (CF)
38 * http://www.qic.org (QIC157 - Tape and DSC)
39 * http://www.ce-ata.org (CE-ATA: not supported)
40 *
41 */
42
43 #include <linux/kernel.h>
44 #include <linux/module.h>
45 #include <linux/pci.h>
46 #include <linux/init.h>
47 #include <linux/list.h>
48 #include <linux/mm.h>
49 #include <linux/highmem.h>
50 #include <linux/spinlock.h>
51 #include <linux/blkdev.h>
52 #include <linux/delay.h>
53 #include <linux/timer.h>
54 #include <linux/interrupt.h>
55 #include <linux/completion.h>
56 #include <linux/suspend.h>
57 #include <linux/workqueue.h>
58 #include <linux/jiffies.h>
59 #include <linux/scatterlist.h>
60 #include <linux/io.h>
61 #include <scsi/scsi.h>
62 #include <scsi/scsi_cmnd.h>
63 #include <scsi/scsi_host.h>
64 #include <linux/libata.h>
65 #include <asm/semaphore.h>
66 #include <asm/byteorder.h>
67 #include <linux/cdrom.h>
68
69 #include "libata.h"
70
71
72 /* debounce timing parameters in msecs { interval, duration, timeout } */
73 const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
74 const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
75 const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
76
77 static unsigned int ata_dev_init_params(struct ata_device *dev,
78 u16 heads, u16 sectors);
79 static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
80 static unsigned int ata_dev_set_feature(struct ata_device *dev,
81 u8 enable, u8 feature);
82 static void ata_dev_xfermask(struct ata_device *dev);
83 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
84
85 unsigned int ata_print_id = 1;
86 static struct workqueue_struct *ata_wq;
87
88 struct workqueue_struct *ata_aux_wq;
89
90 int atapi_enabled = 1;
91 module_param(atapi_enabled, int, 0444);
92 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
93
94 int atapi_dmadir = 0;
95 module_param(atapi_dmadir, int, 0444);
96 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
97
98 int atapi_passthru16 = 1;
99 module_param(atapi_passthru16, int, 0444);
100 MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)");
101
102 int libata_fua = 0;
103 module_param_named(fua, libata_fua, int, 0444);
104 MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
105
106 static int ata_ignore_hpa;
107 module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
108 MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
109
110 static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
111 module_param_named(dma, libata_dma_mask, int, 0444);
112 MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
113
114 static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
115 module_param(ata_probe_timeout, int, 0444);
116 MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
117
118 int libata_noacpi = 0;
119 module_param_named(noacpi, libata_noacpi, int, 0444);
120 MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in probe/suspend/resume when set");
121
122 int libata_allow_tpm = 0;
123 module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
124 MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands");
125
126 MODULE_AUTHOR("Jeff Garzik");
127 MODULE_DESCRIPTION("Library module for ATA devices");
128 MODULE_LICENSE("GPL");
129 MODULE_VERSION(DRV_VERSION);
130
131
132 /**
133 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
134 * @tf: Taskfile to convert
135 * @pmp: Port multiplier port
136 * @is_cmd: This FIS is for command
137 * @fis: Buffer into which data will output
138 *
139 * Converts a standard ATA taskfile to a Serial ATA
140 * FIS structure (Register - Host to Device).
141 *
142 * LOCKING:
143 * Inherited from caller.
144 */
145 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
146 {
147 fis[0] = 0x27; /* Register - Host to Device FIS */
148 fis[1] = pmp & 0xf; /* Port multiplier number*/
149 if (is_cmd)
150 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */
151
152 fis[2] = tf->command;
153 fis[3] = tf->feature;
154
155 fis[4] = tf->lbal;
156 fis[5] = tf->lbam;
157 fis[6] = tf->lbah;
158 fis[7] = tf->device;
159
160 fis[8] = tf->hob_lbal;
161 fis[9] = tf->hob_lbam;
162 fis[10] = tf->hob_lbah;
163 fis[11] = tf->hob_feature;
164
165 fis[12] = tf->nsect;
166 fis[13] = tf->hob_nsect;
167 fis[14] = 0;
168 fis[15] = tf->ctl;
169
170 fis[16] = 0;
171 fis[17] = 0;
172 fis[18] = 0;
173 fis[19] = 0;
174 }
175
176 /**
177 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
178 * @fis: Buffer from which data will be input
179 * @tf: Taskfile to output
180 *
181 * Converts a serial ATA FIS structure to a standard ATA taskfile.
182 *
183 * LOCKING:
184 * Inherited from caller.
185 */
186
187 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
188 {
189 tf->command = fis[2]; /* status */
190 tf->feature = fis[3]; /* error */
191
192 tf->lbal = fis[4];
193 tf->lbam = fis[5];
194 tf->lbah = fis[6];
195 tf->device = fis[7];
196
197 tf->hob_lbal = fis[8];
198 tf->hob_lbam = fis[9];
199 tf->hob_lbah = fis[10];
200
201 tf->nsect = fis[12];
202 tf->hob_nsect = fis[13];
203 }
204
205 static const u8 ata_rw_cmds[] = {
206 /* pio multi */
207 ATA_CMD_READ_MULTI,
208 ATA_CMD_WRITE_MULTI,
209 ATA_CMD_READ_MULTI_EXT,
210 ATA_CMD_WRITE_MULTI_EXT,
211 0,
212 0,
213 0,
214 ATA_CMD_WRITE_MULTI_FUA_EXT,
215 /* pio */
216 ATA_CMD_PIO_READ,
217 ATA_CMD_PIO_WRITE,
218 ATA_CMD_PIO_READ_EXT,
219 ATA_CMD_PIO_WRITE_EXT,
220 0,
221 0,
222 0,
223 0,
224 /* dma */
225 ATA_CMD_READ,
226 ATA_CMD_WRITE,
227 ATA_CMD_READ_EXT,
228 ATA_CMD_WRITE_EXT,
229 0,
230 0,
231 0,
232 ATA_CMD_WRITE_FUA_EXT
233 };
234
235 /**
236 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
237 * @tf: command to examine and configure
238 * @dev: device tf belongs to
239 *
240 * Examine the device configuration and tf->flags to calculate
241 * the proper read/write commands and protocol to use.
242 *
243 * LOCKING:
244 * caller.
245 */
246 static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
247 {
248 u8 cmd;
249
250 int index, fua, lba48, write;
251
252 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
253 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
254 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
255
256 if (dev->flags & ATA_DFLAG_PIO) {
257 tf->protocol = ATA_PROT_PIO;
258 index = dev->multi_count ? 0 : 8;
259 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
260 /* Unable to use DMA due to host limitation */
261 tf->protocol = ATA_PROT_PIO;
262 index = dev->multi_count ? 0 : 8;
263 } else {
264 tf->protocol = ATA_PROT_DMA;
265 index = 16;
266 }
267
268 cmd = ata_rw_cmds[index + fua + lba48 + write];
269 if (cmd) {
270 tf->command = cmd;
271 return 0;
272 }
273 return -1;
274 }
275
276 /**
277 * ata_tf_read_block - Read block address from ATA taskfile
278 * @tf: ATA taskfile of interest
279 * @dev: ATA device @tf belongs to
280 *
281 * LOCKING:
282 * None.
283 *
284 * Read block address from @tf. This function can handle all
285 * three address formats - LBA, LBA48 and CHS. tf->protocol and
286 * flags select the address format to use.
287 *
288 * RETURNS:
289 * Block address read from @tf.
290 */
291 u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
292 {
293 u64 block = 0;
294
295 if (tf->flags & ATA_TFLAG_LBA) {
296 if (tf->flags & ATA_TFLAG_LBA48) {
297 block |= (u64)tf->hob_lbah << 40;
298 block |= (u64)tf->hob_lbam << 32;
299 block |= tf->hob_lbal << 24;
300 } else
301 block |= (tf->device & 0xf) << 24;
302
303 block |= tf->lbah << 16;
304 block |= tf->lbam << 8;
305 block |= tf->lbal;
306 } else {
307 u32 cyl, head, sect;
308
309 cyl = tf->lbam | (tf->lbah << 8);
310 head = tf->device & 0xf;
311 sect = tf->lbal;
312
313 block = (cyl * dev->heads + head) * dev->sectors + sect;
314 }
315
316 return block;
317 }
318
319 /**
320 * ata_build_rw_tf - Build ATA taskfile for given read/write request
321 * @tf: Target ATA taskfile
322 * @dev: ATA device @tf belongs to
323 * @block: Block address
324 * @n_block: Number of blocks
325 * @tf_flags: RW/FUA etc...
326 * @tag: tag
327 *
328 * LOCKING:
329 * None.
330 *
331 * Build ATA taskfile @tf for read/write request described by
332 * @block, @n_block, @tf_flags and @tag on @dev.
333 *
334 * RETURNS:
335 *
336 * 0 on success, -ERANGE if the request is too large for @dev,
337 * -EINVAL if the request is invalid.
338 */
339 int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
340 u64 block, u32 n_block, unsigned int tf_flags,
341 unsigned int tag)
342 {
343 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
344 tf->flags |= tf_flags;
345
346 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
347 /* yay, NCQ */
348 if (!lba_48_ok(block, n_block))
349 return -ERANGE;
350
351 tf->protocol = ATA_PROT_NCQ;
352 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
353
354 if (tf->flags & ATA_TFLAG_WRITE)
355 tf->command = ATA_CMD_FPDMA_WRITE;
356 else
357 tf->command = ATA_CMD_FPDMA_READ;
358
359 tf->nsect = tag << 3;
360 tf->hob_feature = (n_block >> 8) & 0xff;
361 tf->feature = n_block & 0xff;
362
363 tf->hob_lbah = (block >> 40) & 0xff;
364 tf->hob_lbam = (block >> 32) & 0xff;
365 tf->hob_lbal = (block >> 24) & 0xff;
366 tf->lbah = (block >> 16) & 0xff;
367 tf->lbam = (block >> 8) & 0xff;
368 tf->lbal = block & 0xff;
369
370 tf->device = 1 << 6;
371 if (tf->flags & ATA_TFLAG_FUA)
372 tf->device |= 1 << 7;
373 } else if (dev->flags & ATA_DFLAG_LBA) {
374 tf->flags |= ATA_TFLAG_LBA;
375
376 if (lba_28_ok(block, n_block)) {
377 /* use LBA28 */
378 tf->device |= (block >> 24) & 0xf;
379 } else if (lba_48_ok(block, n_block)) {
380 if (!(dev->flags & ATA_DFLAG_LBA48))
381 return -ERANGE;
382
383 /* use LBA48 */
384 tf->flags |= ATA_TFLAG_LBA48;
385
386 tf->hob_nsect = (n_block >> 8) & 0xff;
387
388 tf->hob_lbah = (block >> 40) & 0xff;
389 tf->hob_lbam = (block >> 32) & 0xff;
390 tf->hob_lbal = (block >> 24) & 0xff;
391 } else
392 /* request too large even for LBA48 */
393 return -ERANGE;
394
395 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
396 return -EINVAL;
397
398 tf->nsect = n_block & 0xff;
399
400 tf->lbah = (block >> 16) & 0xff;
401 tf->lbam = (block >> 8) & 0xff;
402 tf->lbal = block & 0xff;
403
404 tf->device |= ATA_LBA;
405 } else {
406 /* CHS */
407 u32 sect, head, cyl, track;
408
409 /* The request -may- be too large for CHS addressing. */
410 if (!lba_28_ok(block, n_block))
411 return -ERANGE;
412
413 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
414 return -EINVAL;
415
416 /* Convert LBA to CHS */
417 track = (u32)block / dev->sectors;
418 cyl = track / dev->heads;
419 head = track % dev->heads;
420 sect = (u32)block % dev->sectors + 1;
421
422 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
423 (u32)block, track, cyl, head, sect);
424
425 /* Check whether the converted CHS can fit.
426 Cylinder: 0-65535
427 Head: 0-15
428 Sector: 1-255*/
429 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
430 return -ERANGE;
431
432 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
433 tf->lbal = sect;
434 tf->lbam = cyl;
435 tf->lbah = cyl >> 8;
436 tf->device |= head;
437 }
438
439 return 0;
440 }
441
442 /**
443 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
444 * @pio_mask: pio_mask
445 * @mwdma_mask: mwdma_mask
446 * @udma_mask: udma_mask
447 *
448 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
449 * unsigned int xfer_mask.
450 *
451 * LOCKING:
452 * None.
453 *
454 * RETURNS:
455 * Packed xfer_mask.
456 */
457 unsigned int ata_pack_xfermask(unsigned int pio_mask,
458 unsigned int mwdma_mask, unsigned int udma_mask)
459 {
460 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
461 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
462 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
463 }
464
465 /**
466 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
467 * @xfer_mask: xfer_mask to unpack
468 * @pio_mask: resulting pio_mask
469 * @mwdma_mask: resulting mwdma_mask
470 * @udma_mask: resulting udma_mask
471 *
472 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
473 * Any NULL distination masks will be ignored.
474 */
475 void ata_unpack_xfermask(unsigned int xfer_mask, unsigned int *pio_mask,
476 unsigned int *mwdma_mask, unsigned int *udma_mask)
477 {
478 if (pio_mask)
479 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
480 if (mwdma_mask)
481 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
482 if (udma_mask)
483 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
484 }
485
486 static const struct ata_xfer_ent {
487 int shift, bits;
488 u8 base;
489 } ata_xfer_tbl[] = {
490 { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
491 { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
492 { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
493 { -1, },
494 };
495
496 /**
497 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
498 * @xfer_mask: xfer_mask of interest
499 *
500 * Return matching XFER_* value for @xfer_mask. Only the highest
501 * bit of @xfer_mask is considered.
502 *
503 * LOCKING:
504 * None.
505 *
506 * RETURNS:
507 * Matching XFER_* value, 0xff if no match found.
508 */
509 u8 ata_xfer_mask2mode(unsigned int xfer_mask)
510 {
511 int highbit = fls(xfer_mask) - 1;
512 const struct ata_xfer_ent *ent;
513
514 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
515 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
516 return ent->base + highbit - ent->shift;
517 return 0xff;
518 }
519
520 /**
521 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
522 * @xfer_mode: XFER_* of interest
523 *
524 * Return matching xfer_mask for @xfer_mode.
525 *
526 * LOCKING:
527 * None.
528 *
529 * RETURNS:
530 * Matching xfer_mask, 0 if no match found.
531 */
532 unsigned int ata_xfer_mode2mask(u8 xfer_mode)
533 {
534 const struct ata_xfer_ent *ent;
535
536 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
537 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
538 return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
539 & ~((1 << ent->shift) - 1);
540 return 0;
541 }
542
543 /**
544 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
545 * @xfer_mode: XFER_* of interest
546 *
547 * Return matching xfer_shift for @xfer_mode.
548 *
549 * LOCKING:
550 * None.
551 *
552 * RETURNS:
553 * Matching xfer_shift, -1 if no match found.
554 */
555 int ata_xfer_mode2shift(unsigned int xfer_mode)
556 {
557 const struct ata_xfer_ent *ent;
558
559 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
560 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
561 return ent->shift;
562 return -1;
563 }
564
565 /**
566 * ata_mode_string - convert xfer_mask to string
567 * @xfer_mask: mask of bits supported; only highest bit counts.
568 *
569 * Determine string which represents the highest speed
570 * (highest bit in @modemask).
571 *
572 * LOCKING:
573 * None.
574 *
575 * RETURNS:
576 * Constant C string representing highest speed listed in
577 * @mode_mask, or the constant C string "<n/a>".
578 */
579 const char *ata_mode_string(unsigned int xfer_mask)
580 {
581 static const char * const xfer_mode_str[] = {
582 "PIO0",
583 "PIO1",
584 "PIO2",
585 "PIO3",
586 "PIO4",
587 "PIO5",
588 "PIO6",
589 "MWDMA0",
590 "MWDMA1",
591 "MWDMA2",
592 "MWDMA3",
593 "MWDMA4",
594 "UDMA/16",
595 "UDMA/25",
596 "UDMA/33",
597 "UDMA/44",
598 "UDMA/66",
599 "UDMA/100",
600 "UDMA/133",
601 "UDMA7",
602 };
603 int highbit;
604
605 highbit = fls(xfer_mask) - 1;
606 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
607 return xfer_mode_str[highbit];
608 return "<n/a>";
609 }
610
611 static const char *sata_spd_string(unsigned int spd)
612 {
613 static const char * const spd_str[] = {
614 "1.5 Gbps",
615 "3.0 Gbps",
616 };
617
618 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
619 return "<unknown>";
620 return spd_str[spd - 1];
621 }
622
623 void ata_dev_disable(struct ata_device *dev)
624 {
625 if (ata_dev_enabled(dev)) {
626 if (ata_msg_drv(dev->link->ap))
627 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
628 ata_acpi_on_disable(dev);
629 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
630 ATA_DNXFER_QUIET);
631 dev->class++;
632 }
633 }
634
635 static int ata_dev_set_dipm(struct ata_device *dev, enum link_pm policy)
636 {
637 struct ata_link *link = dev->link;
638 struct ata_port *ap = link->ap;
639 u32 scontrol;
640 unsigned int err_mask;
641 int rc;
642
643 /*
644 * disallow DIPM for drivers which haven't set
645 * ATA_FLAG_IPM. This is because when DIPM is enabled,
646 * phy ready will be set in the interrupt status on
647 * state changes, which will cause some drivers to
648 * think there are errors - additionally drivers will
649 * need to disable hot plug.
650 */
651 if (!(ap->flags & ATA_FLAG_IPM) || !ata_dev_enabled(dev)) {
652 ap->pm_policy = NOT_AVAILABLE;
653 return -EINVAL;
654 }
655
656 /*
657 * For DIPM, we will only enable it for the
658 * min_power setting.
659 *
660 * Why? Because Disks are too stupid to know that
661 * If the host rejects a request to go to SLUMBER
662 * they should retry at PARTIAL, and instead it
663 * just would give up. So, for medium_power to
664 * work at all, we need to only allow HIPM.
665 */
666 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
667 if (rc)
668 return rc;
669
670 switch (policy) {
671 case MIN_POWER:
672 /* no restrictions on IPM transitions */
673 scontrol &= ~(0x3 << 8);
674 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
675 if (rc)
676 return rc;
677
678 /* enable DIPM */
679 if (dev->flags & ATA_DFLAG_DIPM)
680 err_mask = ata_dev_set_feature(dev,
681 SETFEATURES_SATA_ENABLE, SATA_DIPM);
682 break;
683 case MEDIUM_POWER:
684 /* allow IPM to PARTIAL */
685 scontrol &= ~(0x1 << 8);
686 scontrol |= (0x2 << 8);
687 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
688 if (rc)
689 return rc;
690
691 /*
692 * we don't have to disable DIPM since IPM flags
693 * disallow transitions to SLUMBER, which effectively
694 * disable DIPM if it does not support PARTIAL
695 */
696 break;
697 case NOT_AVAILABLE:
698 case MAX_PERFORMANCE:
699 /* disable all IPM transitions */
700 scontrol |= (0x3 << 8);
701 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
702 if (rc)
703 return rc;
704
705 /*
706 * we don't have to disable DIPM since IPM flags
707 * disallow all transitions which effectively
708 * disable DIPM anyway.
709 */
710 break;
711 }
712
713 /* FIXME: handle SET FEATURES failure */
714 (void) err_mask;
715
716 return 0;
717 }
718
719 /**
720 * ata_dev_enable_pm - enable SATA interface power management
721 * @dev: device to enable power management
722 * @policy: the link power management policy
723 *
724 * Enable SATA Interface power management. This will enable
725 * Device Interface Power Management (DIPM) for min_power
726 * policy, and then call driver specific callbacks for
727 * enabling Host Initiated Power management.
728 *
729 * Locking: Caller.
730 * Returns: -EINVAL if IPM is not supported, 0 otherwise.
731 */
732 void ata_dev_enable_pm(struct ata_device *dev, enum link_pm policy)
733 {
734 int rc = 0;
735 struct ata_port *ap = dev->link->ap;
736
737 /* set HIPM first, then DIPM */
738 if (ap->ops->enable_pm)
739 rc = ap->ops->enable_pm(ap, policy);
740 if (rc)
741 goto enable_pm_out;
742 rc = ata_dev_set_dipm(dev, policy);
743
744 enable_pm_out:
745 if (rc)
746 ap->pm_policy = MAX_PERFORMANCE;
747 else
748 ap->pm_policy = policy;
749 return /* rc */; /* hopefully we can use 'rc' eventually */
750 }
751
752 #ifdef CONFIG_PM
753 /**
754 * ata_dev_disable_pm - disable SATA interface power management
755 * @dev: device to disable power management
756 *
757 * Disable SATA Interface power management. This will disable
758 * Device Interface Power Management (DIPM) without changing
759 * policy, call driver specific callbacks for disabling Host
760 * Initiated Power management.
761 *
762 * Locking: Caller.
763 * Returns: void
764 */
765 static void ata_dev_disable_pm(struct ata_device *dev)
766 {
767 struct ata_port *ap = dev->link->ap;
768
769 ata_dev_set_dipm(dev, MAX_PERFORMANCE);
770 if (ap->ops->disable_pm)
771 ap->ops->disable_pm(ap);
772 }
773 #endif /* CONFIG_PM */
774
775 void ata_lpm_schedule(struct ata_port *ap, enum link_pm policy)
776 {
777 ap->pm_policy = policy;
778 ap->link.eh_info.action |= ATA_EHI_LPM;
779 ap->link.eh_info.flags |= ATA_EHI_NO_AUTOPSY;
780 ata_port_schedule_eh(ap);
781 }
782
783 #ifdef CONFIG_PM
784 static void ata_lpm_enable(struct ata_host *host)
785 {
786 struct ata_link *link;
787 struct ata_port *ap;
788 struct ata_device *dev;
789 int i;
790
791 for (i = 0; i < host->n_ports; i++) {
792 ap = host->ports[i];
793 ata_port_for_each_link(link, ap) {
794 ata_link_for_each_dev(dev, link)
795 ata_dev_disable_pm(dev);
796 }
797 }
798 }
799
800 static void ata_lpm_disable(struct ata_host *host)
801 {
802 int i;
803
804 for (i = 0; i < host->n_ports; i++) {
805 struct ata_port *ap = host->ports[i];
806 ata_lpm_schedule(ap, ap->pm_policy);
807 }
808 }
809 #endif /* CONFIG_PM */
810
811
812 /**
813 * ata_devchk - PATA device presence detection
814 * @ap: ATA channel to examine
815 * @device: Device to examine (starting at zero)
816 *
817 * This technique was originally described in
818 * Hale Landis's ATADRVR (www.ata-atapi.com), and
819 * later found its way into the ATA/ATAPI spec.
820 *
821 * Write a pattern to the ATA shadow registers,
822 * and if a device is present, it will respond by
823 * correctly storing and echoing back the
824 * ATA shadow register contents.
825 *
826 * LOCKING:
827 * caller.
828 */
829
830 static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
831 {
832 struct ata_ioports *ioaddr = &ap->ioaddr;
833 u8 nsect, lbal;
834
835 ap->ops->dev_select(ap, device);
836
837 iowrite8(0x55, ioaddr->nsect_addr);
838 iowrite8(0xaa, ioaddr->lbal_addr);
839
840 iowrite8(0xaa, ioaddr->nsect_addr);
841 iowrite8(0x55, ioaddr->lbal_addr);
842
843 iowrite8(0x55, ioaddr->nsect_addr);
844 iowrite8(0xaa, ioaddr->lbal_addr);
845
846 nsect = ioread8(ioaddr->nsect_addr);
847 lbal = ioread8(ioaddr->lbal_addr);
848
849 if ((nsect == 0x55) && (lbal == 0xaa))
850 return 1; /* we found a device */
851
852 return 0; /* nothing found */
853 }
854
855 /**
856 * ata_dev_classify - determine device type based on ATA-spec signature
857 * @tf: ATA taskfile register set for device to be identified
858 *
859 * Determine from taskfile register contents whether a device is
860 * ATA or ATAPI, as per "Signature and persistence" section
861 * of ATA/PI spec (volume 1, sect 5.14).
862 *
863 * LOCKING:
864 * None.
865 *
866 * RETURNS:
867 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or
868 * %ATA_DEV_UNKNOWN the event of failure.
869 */
870 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
871 {
872 /* Apple's open source Darwin code hints that some devices only
873 * put a proper signature into the LBA mid/high registers,
874 * So, we only check those. It's sufficient for uniqueness.
875 *
876 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
877 * signatures for ATA and ATAPI devices attached on SerialATA,
878 * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA
879 * spec has never mentioned about using different signatures
880 * for ATA/ATAPI devices. Then, Serial ATA II: Port
881 * Multiplier specification began to use 0x69/0x96 to identify
882 * port multpliers and 0x3c/0xc3 to identify SEMB device.
883 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
884 * 0x69/0x96 shortly and described them as reserved for
885 * SerialATA.
886 *
887 * We follow the current spec and consider that 0x69/0x96
888 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
889 */
890 if ((tf->lbam == 0) && (tf->lbah == 0)) {
891 DPRINTK("found ATA device by sig\n");
892 return ATA_DEV_ATA;
893 }
894
895 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
896 DPRINTK("found ATAPI device by sig\n");
897 return ATA_DEV_ATAPI;
898 }
899
900 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
901 DPRINTK("found PMP device by sig\n");
902 return ATA_DEV_PMP;
903 }
904
905 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
906 printk(KERN_INFO "ata: SEMB device ignored\n");
907 return ATA_DEV_SEMB_UNSUP; /* not yet */
908 }
909
910 DPRINTK("unknown device\n");
911 return ATA_DEV_UNKNOWN;
912 }
913
914 /**
915 * ata_dev_try_classify - Parse returned ATA device signature
916 * @dev: ATA device to classify (starting at zero)
917 * @present: device seems present
918 * @r_err: Value of error register on completion
919 *
920 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
921 * an ATA/ATAPI-defined set of values is placed in the ATA
922 * shadow registers, indicating the results of device detection
923 * and diagnostics.
924 *
925 * Select the ATA device, and read the values from the ATA shadow
926 * registers. Then parse according to the Error register value,
927 * and the spec-defined values examined by ata_dev_classify().
928 *
929 * LOCKING:
930 * caller.
931 *
932 * RETURNS:
933 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
934 */
935 unsigned int ata_dev_try_classify(struct ata_device *dev, int present,
936 u8 *r_err)
937 {
938 struct ata_port *ap = dev->link->ap;
939 struct ata_taskfile tf;
940 unsigned int class;
941 u8 err;
942
943 ap->ops->dev_select(ap, dev->devno);
944
945 memset(&tf, 0, sizeof(tf));
946
947 ap->ops->tf_read(ap, &tf);
948 err = tf.feature;
949 if (r_err)
950 *r_err = err;
951
952 /* see if device passed diags: if master then continue and warn later */
953 if (err == 0 && dev->devno == 0)
954 /* diagnostic fail : do nothing _YET_ */
955 dev->horkage |= ATA_HORKAGE_DIAGNOSTIC;
956 else if (err == 1)
957 /* do nothing */ ;
958 else if ((dev->devno == 0) && (err == 0x81))
959 /* do nothing */ ;
960 else
961 return ATA_DEV_NONE;
962
963 /* determine if device is ATA or ATAPI */
964 class = ata_dev_classify(&tf);
965
966 if (class == ATA_DEV_UNKNOWN) {
967 /* If the device failed diagnostic, it's likely to
968 * have reported incorrect device signature too.
969 * Assume ATA device if the device seems present but
970 * device signature is invalid with diagnostic
971 * failure.
972 */
973 if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC))
974 class = ATA_DEV_ATA;
975 else
976 class = ATA_DEV_NONE;
977 } else if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
978 class = ATA_DEV_NONE;
979
980 return class;
981 }
982
983 /**
984 * ata_id_string - Convert IDENTIFY DEVICE page into string
985 * @id: IDENTIFY DEVICE results we will examine
986 * @s: string into which data is output
987 * @ofs: offset into identify device page
988 * @len: length of string to return. must be an even number.
989 *
990 * The strings in the IDENTIFY DEVICE page are broken up into
991 * 16-bit chunks. Run through the string, and output each
992 * 8-bit chunk linearly, regardless of platform.
993 *
994 * LOCKING:
995 * caller.
996 */
997
998 void ata_id_string(const u16 *id, unsigned char *s,
999 unsigned int ofs, unsigned int len)
1000 {
1001 unsigned int c;
1002
1003 while (len > 0) {
1004 c = id[ofs] >> 8;
1005 *s = c;
1006 s++;
1007
1008 c = id[ofs] & 0xff;
1009 *s = c;
1010 s++;
1011
1012 ofs++;
1013 len -= 2;
1014 }
1015 }
1016
1017 /**
1018 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
1019 * @id: IDENTIFY DEVICE results we will examine
1020 * @s: string into which data is output
1021 * @ofs: offset into identify device page
1022 * @len: length of string to return. must be an odd number.
1023 *
1024 * This function is identical to ata_id_string except that it
1025 * trims trailing spaces and terminates the resulting string with
1026 * null. @len must be actual maximum length (even number) + 1.
1027 *
1028 * LOCKING:
1029 * caller.
1030 */
1031 void ata_id_c_string(const u16 *id, unsigned char *s,
1032 unsigned int ofs, unsigned int len)
1033 {
1034 unsigned char *p;
1035
1036 WARN_ON(!(len & 1));
1037
1038 ata_id_string(id, s, ofs, len - 1);
1039
1040 p = s + strnlen(s, len - 1);
1041 while (p > s && p[-1] == ' ')
1042 p--;
1043 *p = '\0';
1044 }
1045
1046 static u64 ata_id_n_sectors(const u16 *id)
1047 {
1048 if (ata_id_has_lba(id)) {
1049 if (ata_id_has_lba48(id))
1050 return ata_id_u64(id, 100);
1051 else
1052 return ata_id_u32(id, 60);
1053 } else {
1054 if (ata_id_current_chs_valid(id))
1055 return ata_id_u32(id, 57);
1056 else
1057 return id[1] * id[3] * id[6];
1058 }
1059 }
1060
1061 static u64 ata_tf_to_lba48(struct ata_taskfile *tf)
1062 {
1063 u64 sectors = 0;
1064
1065 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1066 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1067 sectors |= (tf->hob_lbal & 0xff) << 24;
1068 sectors |= (tf->lbah & 0xff) << 16;
1069 sectors |= (tf->lbam & 0xff) << 8;
1070 sectors |= (tf->lbal & 0xff);
1071
1072 return ++sectors;
1073 }
1074
1075 static u64 ata_tf_to_lba(struct ata_taskfile *tf)
1076 {
1077 u64 sectors = 0;
1078
1079 sectors |= (tf->device & 0x0f) << 24;
1080 sectors |= (tf->lbah & 0xff) << 16;
1081 sectors |= (tf->lbam & 0xff) << 8;
1082 sectors |= (tf->lbal & 0xff);
1083
1084 return ++sectors;
1085 }
1086
1087 /**
1088 * ata_read_native_max_address - Read native max address
1089 * @dev: target device
1090 * @max_sectors: out parameter for the result native max address
1091 *
1092 * Perform an LBA48 or LBA28 native size query upon the device in
1093 * question.
1094 *
1095 * RETURNS:
1096 * 0 on success, -EACCES if command is aborted by the drive.
1097 * -EIO on other errors.
1098 */
1099 static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1100 {
1101 unsigned int err_mask;
1102 struct ata_taskfile tf;
1103 int lba48 = ata_id_has_lba48(dev->id);
1104
1105 ata_tf_init(dev, &tf);
1106
1107 /* always clear all address registers */
1108 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1109
1110 if (lba48) {
1111 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1112 tf.flags |= ATA_TFLAG_LBA48;
1113 } else
1114 tf.command = ATA_CMD_READ_NATIVE_MAX;
1115
1116 tf.protocol |= ATA_PROT_NODATA;
1117 tf.device |= ATA_LBA;
1118
1119 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1120 if (err_mask) {
1121 ata_dev_printk(dev, KERN_WARNING, "failed to read native "
1122 "max address (err_mask=0x%x)\n", err_mask);
1123 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1124 return -EACCES;
1125 return -EIO;
1126 }
1127
1128 if (lba48)
1129 *max_sectors = ata_tf_to_lba48(&tf);
1130 else
1131 *max_sectors = ata_tf_to_lba(&tf);
1132 if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
1133 (*max_sectors)--;
1134 return 0;
1135 }
1136
1137 /**
1138 * ata_set_max_sectors - Set max sectors
1139 * @dev: target device
1140 * @new_sectors: new max sectors value to set for the device
1141 *
1142 * Set max sectors of @dev to @new_sectors.
1143 *
1144 * RETURNS:
1145 * 0 on success, -EACCES if command is aborted or denied (due to
1146 * previous non-volatile SET_MAX) by the drive. -EIO on other
1147 * errors.
1148 */
1149 static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1150 {
1151 unsigned int err_mask;
1152 struct ata_taskfile tf;
1153 int lba48 = ata_id_has_lba48(dev->id);
1154
1155 new_sectors--;
1156
1157 ata_tf_init(dev, &tf);
1158
1159 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1160
1161 if (lba48) {
1162 tf.command = ATA_CMD_SET_MAX_EXT;
1163 tf.flags |= ATA_TFLAG_LBA48;
1164
1165 tf.hob_lbal = (new_sectors >> 24) & 0xff;
1166 tf.hob_lbam = (new_sectors >> 32) & 0xff;
1167 tf.hob_lbah = (new_sectors >> 40) & 0xff;
1168 } else {
1169 tf.command = ATA_CMD_SET_MAX;
1170
1171 tf.device |= (new_sectors >> 24) & 0xf;
1172 }
1173
1174 tf.protocol |= ATA_PROT_NODATA;
1175 tf.device |= ATA_LBA;
1176
1177 tf.lbal = (new_sectors >> 0) & 0xff;
1178 tf.lbam = (new_sectors >> 8) & 0xff;
1179 tf.lbah = (new_sectors >> 16) & 0xff;
1180
1181 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1182 if (err_mask) {
1183 ata_dev_printk(dev, KERN_WARNING, "failed to set "
1184 "max address (err_mask=0x%x)\n", err_mask);
1185 if (err_mask == AC_ERR_DEV &&
1186 (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1187 return -EACCES;
1188 return -EIO;
1189 }
1190
1191 return 0;
1192 }
1193
1194 /**
1195 * ata_hpa_resize - Resize a device with an HPA set
1196 * @dev: Device to resize
1197 *
1198 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
1199 * it if required to the full size of the media. The caller must check
1200 * the drive has the HPA feature set enabled.
1201 *
1202 * RETURNS:
1203 * 0 on success, -errno on failure.
1204 */
1205 static int ata_hpa_resize(struct ata_device *dev)
1206 {
1207 struct ata_eh_context *ehc = &dev->link->eh_context;
1208 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1209 u64 sectors = ata_id_n_sectors(dev->id);
1210 u64 native_sectors;
1211 int rc;
1212
1213 /* do we need to do it? */
1214 if (dev->class != ATA_DEV_ATA ||
1215 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1216 (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
1217 return 0;
1218
1219 /* read native max address */
1220 rc = ata_read_native_max_address(dev, &native_sectors);
1221 if (rc) {
1222 /* If HPA isn't going to be unlocked, skip HPA
1223 * resizing from the next try.
1224 */
1225 if (!ata_ignore_hpa) {
1226 ata_dev_printk(dev, KERN_WARNING, "HPA support seems "
1227 "broken, will skip HPA handling\n");
1228 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1229
1230 /* we can continue if device aborted the command */
1231 if (rc == -EACCES)
1232 rc = 0;
1233 }
1234
1235 return rc;
1236 }
1237
1238 /* nothing to do? */
1239 if (native_sectors <= sectors || !ata_ignore_hpa) {
1240 if (!print_info || native_sectors == sectors)
1241 return 0;
1242
1243 if (native_sectors > sectors)
1244 ata_dev_printk(dev, KERN_INFO,
1245 "HPA detected: current %llu, native %llu\n",
1246 (unsigned long long)sectors,
1247 (unsigned long long)native_sectors);
1248 else if (native_sectors < sectors)
1249 ata_dev_printk(dev, KERN_WARNING,
1250 "native sectors (%llu) is smaller than "
1251 "sectors (%llu)\n",
1252 (unsigned long long)native_sectors,
1253 (unsigned long long)sectors);
1254 return 0;
1255 }
1256
1257 /* let's unlock HPA */
1258 rc = ata_set_max_sectors(dev, native_sectors);
1259 if (rc == -EACCES) {
1260 /* if device aborted the command, skip HPA resizing */
1261 ata_dev_printk(dev, KERN_WARNING, "device aborted resize "
1262 "(%llu -> %llu), skipping HPA handling\n",
1263 (unsigned long long)sectors,
1264 (unsigned long long)native_sectors);
1265 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1266 return 0;
1267 } else if (rc)
1268 return rc;
1269
1270 /* re-read IDENTIFY data */
1271 rc = ata_dev_reread_id(dev, 0);
1272 if (rc) {
1273 ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY "
1274 "data after HPA resizing\n");
1275 return rc;
1276 }
1277
1278 if (print_info) {
1279 u64 new_sectors = ata_id_n_sectors(dev->id);
1280 ata_dev_printk(dev, KERN_INFO,
1281 "HPA unlocked: %llu -> %llu, native %llu\n",
1282 (unsigned long long)sectors,
1283 (unsigned long long)new_sectors,
1284 (unsigned long long)native_sectors);
1285 }
1286
1287 return 0;
1288 }
1289
1290 /**
1291 * ata_id_to_dma_mode - Identify DMA mode from id block
1292 * @dev: device to identify
1293 * @unknown: mode to assume if we cannot tell
1294 *
1295 * Set up the timing values for the device based upon the identify
1296 * reported values for the DMA mode. This function is used by drivers
1297 * which rely upon firmware configured modes, but wish to report the
1298 * mode correctly when possible.
1299 *
1300 * In addition we emit similarly formatted messages to the default
1301 * ata_dev_set_mode handler, in order to provide consistency of
1302 * presentation.
1303 */
1304
1305 void ata_id_to_dma_mode(struct ata_device *dev, u8 unknown)
1306 {
1307 unsigned int mask;
1308 u8 mode;
1309
1310 /* Pack the DMA modes */
1311 mask = ((dev->id[63] >> 8) << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA;
1312 if (dev->id[53] & 0x04)
1313 mask |= ((dev->id[88] >> 8) << ATA_SHIFT_UDMA) & ATA_MASK_UDMA;
1314
1315 /* Select the mode in use */
1316 mode = ata_xfer_mask2mode(mask);
1317
1318 if (mode != 0xff) {
1319 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
1320 ata_mode_string(mask));
1321 } else {
1322 /* SWDMA perhaps ? */
1323 mode = unknown;
1324 ata_dev_printk(dev, KERN_INFO, "configured for DMA\n");
1325 }
1326
1327 /* Configure the device reporting */
1328 dev->xfer_mode = mode;
1329 dev->xfer_shift = ata_xfer_mode2shift(mode);
1330 }
1331
1332 /**
1333 * ata_noop_dev_select - Select device 0/1 on ATA bus
1334 * @ap: ATA channel to manipulate
1335 * @device: ATA device (numbered from zero) to select
1336 *
1337 * This function performs no actual function.
1338 *
1339 * May be used as the dev_select() entry in ata_port_operations.
1340 *
1341 * LOCKING:
1342 * caller.
1343 */
1344 void ata_noop_dev_select(struct ata_port *ap, unsigned int device)
1345 {
1346 }
1347
1348
1349 /**
1350 * ata_std_dev_select - Select device 0/1 on ATA bus
1351 * @ap: ATA channel to manipulate
1352 * @device: ATA device (numbered from zero) to select
1353 *
1354 * Use the method defined in the ATA specification to
1355 * make either device 0, or device 1, active on the
1356 * ATA channel. Works with both PIO and MMIO.
1357 *
1358 * May be used as the dev_select() entry in ata_port_operations.
1359 *
1360 * LOCKING:
1361 * caller.
1362 */
1363
1364 void ata_std_dev_select(struct ata_port *ap, unsigned int device)
1365 {
1366 u8 tmp;
1367
1368 if (device == 0)
1369 tmp = ATA_DEVICE_OBS;
1370 else
1371 tmp = ATA_DEVICE_OBS | ATA_DEV1;
1372
1373 iowrite8(tmp, ap->ioaddr.device_addr);
1374 ata_pause(ap); /* needed; also flushes, for mmio */
1375 }
1376
1377 /**
1378 * ata_dev_select - Select device 0/1 on ATA bus
1379 * @ap: ATA channel to manipulate
1380 * @device: ATA device (numbered from zero) to select
1381 * @wait: non-zero to wait for Status register BSY bit to clear
1382 * @can_sleep: non-zero if context allows sleeping
1383 *
1384 * Use the method defined in the ATA specification to
1385 * make either device 0, or device 1, active on the
1386 * ATA channel.
1387 *
1388 * This is a high-level version of ata_std_dev_select(),
1389 * which additionally provides the services of inserting
1390 * the proper pauses and status polling, where needed.
1391 *
1392 * LOCKING:
1393 * caller.
1394 */
1395
1396 void ata_dev_select(struct ata_port *ap, unsigned int device,
1397 unsigned int wait, unsigned int can_sleep)
1398 {
1399 if (ata_msg_probe(ap))
1400 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
1401 "device %u, wait %u\n", device, wait);
1402
1403 if (wait)
1404 ata_wait_idle(ap);
1405
1406 ap->ops->dev_select(ap, device);
1407
1408 if (wait) {
1409 if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI)
1410 msleep(150);
1411 ata_wait_idle(ap);
1412 }
1413 }
1414
1415 /**
1416 * ata_dump_id - IDENTIFY DEVICE info debugging output
1417 * @id: IDENTIFY DEVICE page to dump
1418 *
1419 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1420 * page.
1421 *
1422 * LOCKING:
1423 * caller.
1424 */
1425
1426 static inline void ata_dump_id(const u16 *id)
1427 {
1428 DPRINTK("49==0x%04x "
1429 "53==0x%04x "
1430 "63==0x%04x "
1431 "64==0x%04x "
1432 "75==0x%04x \n",
1433 id[49],
1434 id[53],
1435 id[63],
1436 id[64],
1437 id[75]);
1438 DPRINTK("80==0x%04x "
1439 "81==0x%04x "
1440 "82==0x%04x "
1441 "83==0x%04x "
1442 "84==0x%04x \n",
1443 id[80],
1444 id[81],
1445 id[82],
1446 id[83],
1447 id[84]);
1448 DPRINTK("88==0x%04x "
1449 "93==0x%04x\n",
1450 id[88],
1451 id[93]);
1452 }
1453
1454 /**
1455 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1456 * @id: IDENTIFY data to compute xfer mask from
1457 *
1458 * Compute the xfermask for this device. This is not as trivial
1459 * as it seems if we must consider early devices correctly.
1460 *
1461 * FIXME: pre IDE drive timing (do we care ?).
1462 *
1463 * LOCKING:
1464 * None.
1465 *
1466 * RETURNS:
1467 * Computed xfermask
1468 */
1469 unsigned int ata_id_xfermask(const u16 *id)
1470 {
1471 unsigned int pio_mask, mwdma_mask, udma_mask;
1472
1473 /* Usual case. Word 53 indicates word 64 is valid */
1474 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1475 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1476 pio_mask <<= 3;
1477 pio_mask |= 0x7;
1478 } else {
1479 /* If word 64 isn't valid then Word 51 high byte holds
1480 * the PIO timing number for the maximum. Turn it into
1481 * a mask.
1482 */
1483 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
1484 if (mode < 5) /* Valid PIO range */
1485 pio_mask = (2 << mode) - 1;
1486 else
1487 pio_mask = 1;
1488
1489 /* But wait.. there's more. Design your standards by
1490 * committee and you too can get a free iordy field to
1491 * process. However its the speeds not the modes that
1492 * are supported... Note drivers using the timing API
1493 * will get this right anyway
1494 */
1495 }
1496
1497 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1498
1499 if (ata_id_is_cfa(id)) {
1500 /*
1501 * Process compact flash extended modes
1502 */
1503 int pio = id[163] & 0x7;
1504 int dma = (id[163] >> 3) & 7;
1505
1506 if (pio)
1507 pio_mask |= (1 << 5);
1508 if (pio > 1)
1509 pio_mask |= (1 << 6);
1510 if (dma)
1511 mwdma_mask |= (1 << 3);
1512 if (dma > 1)
1513 mwdma_mask |= (1 << 4);
1514 }
1515
1516 udma_mask = 0;
1517 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1518 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1519
1520 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1521 }
1522
1523 /**
1524 * ata_port_queue_task - Queue port_task
1525 * @ap: The ata_port to queue port_task for
1526 * @fn: workqueue function to be scheduled
1527 * @data: data for @fn to use
1528 * @delay: delay time for workqueue function
1529 *
1530 * Schedule @fn(@data) for execution after @delay jiffies using
1531 * port_task. There is one port_task per port and it's the
1532 * user(low level driver)'s responsibility to make sure that only
1533 * one task is active at any given time.
1534 *
1535 * libata core layer takes care of synchronization between
1536 * port_task and EH. ata_port_queue_task() may be ignored for EH
1537 * synchronization.
1538 *
1539 * LOCKING:
1540 * Inherited from caller.
1541 */
1542 void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
1543 unsigned long delay)
1544 {
1545 PREPARE_DELAYED_WORK(&ap->port_task, fn);
1546 ap->port_task_data = data;
1547
1548 /* may fail if ata_port_flush_task() in progress */
1549 queue_delayed_work(ata_wq, &ap->port_task, delay);
1550 }
1551
1552 /**
1553 * ata_port_flush_task - Flush port_task
1554 * @ap: The ata_port to flush port_task for
1555 *
1556 * After this function completes, port_task is guranteed not to
1557 * be running or scheduled.
1558 *
1559 * LOCKING:
1560 * Kernel thread context (may sleep)
1561 */
1562 void ata_port_flush_task(struct ata_port *ap)
1563 {
1564 DPRINTK("ENTER\n");
1565
1566 cancel_rearming_delayed_work(&ap->port_task);
1567
1568 if (ata_msg_ctl(ap))
1569 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
1570 }
1571
1572 static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1573 {
1574 struct completion *waiting = qc->private_data;
1575
1576 complete(waiting);
1577 }
1578
1579 /**
1580 * ata_exec_internal_sg - execute libata internal command
1581 * @dev: Device to which the command is sent
1582 * @tf: Taskfile registers for the command and the result
1583 * @cdb: CDB for packet command
1584 * @dma_dir: Data tranfer direction of the command
1585 * @sgl: sg list for the data buffer of the command
1586 * @n_elem: Number of sg entries
1587 * @timeout: Timeout in msecs (0 for default)
1588 *
1589 * Executes libata internal command with timeout. @tf contains
1590 * command on entry and result on return. Timeout and error
1591 * conditions are reported via return value. No recovery action
1592 * is taken after a command times out. It's caller's duty to
1593 * clean up after timeout.
1594 *
1595 * LOCKING:
1596 * None. Should be called with kernel context, might sleep.
1597 *
1598 * RETURNS:
1599 * Zero on success, AC_ERR_* mask on failure
1600 */
1601 unsigned ata_exec_internal_sg(struct ata_device *dev,
1602 struct ata_taskfile *tf, const u8 *cdb,
1603 int dma_dir, struct scatterlist *sgl,
1604 unsigned int n_elem, unsigned long timeout)
1605 {
1606 struct ata_link *link = dev->link;
1607 struct ata_port *ap = link->ap;
1608 u8 command = tf->command;
1609 struct ata_queued_cmd *qc;
1610 unsigned int tag, preempted_tag;
1611 u32 preempted_sactive, preempted_qc_active;
1612 int preempted_nr_active_links;
1613 DECLARE_COMPLETION_ONSTACK(wait);
1614 unsigned long flags;
1615 unsigned int err_mask;
1616 int rc;
1617
1618 spin_lock_irqsave(ap->lock, flags);
1619
1620 /* no internal command while frozen */
1621 if (ap->pflags & ATA_PFLAG_FROZEN) {
1622 spin_unlock_irqrestore(ap->lock, flags);
1623 return AC_ERR_SYSTEM;
1624 }
1625
1626 /* initialize internal qc */
1627
1628 /* XXX: Tag 0 is used for drivers with legacy EH as some
1629 * drivers choke if any other tag is given. This breaks
1630 * ata_tag_internal() test for those drivers. Don't use new
1631 * EH stuff without converting to it.
1632 */
1633 if (ap->ops->error_handler)
1634 tag = ATA_TAG_INTERNAL;
1635 else
1636 tag = 0;
1637
1638 if (test_and_set_bit(tag, &ap->qc_allocated))
1639 BUG();
1640 qc = __ata_qc_from_tag(ap, tag);
1641
1642 qc->tag = tag;
1643 qc->scsicmd = NULL;
1644 qc->ap = ap;
1645 qc->dev = dev;
1646 ata_qc_reinit(qc);
1647
1648 preempted_tag = link->active_tag;
1649 preempted_sactive = link->sactive;
1650 preempted_qc_active = ap->qc_active;
1651 preempted_nr_active_links = ap->nr_active_links;
1652 link->active_tag = ATA_TAG_POISON;
1653 link->sactive = 0;
1654 ap->qc_active = 0;
1655 ap->nr_active_links = 0;
1656
1657 /* prepare & issue qc */
1658 qc->tf = *tf;
1659 if (cdb)
1660 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1661 qc->flags |= ATA_QCFLAG_RESULT_TF;
1662 qc->dma_dir = dma_dir;
1663 if (dma_dir != DMA_NONE) {
1664 unsigned int i, buflen = 0;
1665 struct scatterlist *sg;
1666
1667 for_each_sg(sgl, sg, n_elem, i)
1668 buflen += sg->length;
1669
1670 ata_sg_init(qc, sgl, n_elem);
1671 qc->nbytes = buflen;
1672 }
1673
1674 qc->private_data = &wait;
1675 qc->complete_fn = ata_qc_complete_internal;
1676
1677 ata_qc_issue(qc);
1678
1679 spin_unlock_irqrestore(ap->lock, flags);
1680
1681 if (!timeout)
1682 timeout = ata_probe_timeout * 1000 / HZ;
1683
1684 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
1685
1686 ata_port_flush_task(ap);
1687
1688 if (!rc) {
1689 spin_lock_irqsave(ap->lock, flags);
1690
1691 /* We're racing with irq here. If we lose, the
1692 * following test prevents us from completing the qc
1693 * twice. If we win, the port is frozen and will be
1694 * cleaned up by ->post_internal_cmd().
1695 */
1696 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1697 qc->err_mask |= AC_ERR_TIMEOUT;
1698
1699 if (ap->ops->error_handler)
1700 ata_port_freeze(ap);
1701 else
1702 ata_qc_complete(qc);
1703
1704 if (ata_msg_warn(ap))
1705 ata_dev_printk(dev, KERN_WARNING,
1706 "qc timeout (cmd 0x%x)\n", command);
1707 }
1708
1709 spin_unlock_irqrestore(ap->lock, flags);
1710 }
1711
1712 /* do post_internal_cmd */
1713 if (ap->ops->post_internal_cmd)
1714 ap->ops->post_internal_cmd(qc);
1715
1716 /* perform minimal error analysis */
1717 if (qc->flags & ATA_QCFLAG_FAILED) {
1718 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1719 qc->err_mask |= AC_ERR_DEV;
1720
1721 if (!qc->err_mask)
1722 qc->err_mask |= AC_ERR_OTHER;
1723
1724 if (qc->err_mask & ~AC_ERR_OTHER)
1725 qc->err_mask &= ~AC_ERR_OTHER;
1726 }
1727
1728 /* finish up */
1729 spin_lock_irqsave(ap->lock, flags);
1730
1731 *tf = qc->result_tf;
1732 err_mask = qc->err_mask;
1733
1734 ata_qc_free(qc);
1735 link->active_tag = preempted_tag;
1736 link->sactive = preempted_sactive;
1737 ap->qc_active = preempted_qc_active;
1738 ap->nr_active_links = preempted_nr_active_links;
1739
1740 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1741 * Until those drivers are fixed, we detect the condition
1742 * here, fail the command with AC_ERR_SYSTEM and reenable the
1743 * port.
1744 *
1745 * Note that this doesn't change any behavior as internal
1746 * command failure results in disabling the device in the
1747 * higher layer for LLDDs without new reset/EH callbacks.
1748 *
1749 * Kill the following code as soon as those drivers are fixed.
1750 */
1751 if (ap->flags & ATA_FLAG_DISABLED) {
1752 err_mask |= AC_ERR_SYSTEM;
1753 ata_port_probe(ap);
1754 }
1755
1756 spin_unlock_irqrestore(ap->lock, flags);
1757
1758 return err_mask;
1759 }
1760
1761 /**
1762 * ata_exec_internal - execute libata internal command
1763 * @dev: Device to which the command is sent
1764 * @tf: Taskfile registers for the command and the result
1765 * @cdb: CDB for packet command
1766 * @dma_dir: Data tranfer direction of the command
1767 * @buf: Data buffer of the command
1768 * @buflen: Length of data buffer
1769 * @timeout: Timeout in msecs (0 for default)
1770 *
1771 * Wrapper around ata_exec_internal_sg() which takes simple
1772 * buffer instead of sg list.
1773 *
1774 * LOCKING:
1775 * None. Should be called with kernel context, might sleep.
1776 *
1777 * RETURNS:
1778 * Zero on success, AC_ERR_* mask on failure
1779 */
1780 unsigned ata_exec_internal(struct ata_device *dev,
1781 struct ata_taskfile *tf, const u8 *cdb,
1782 int dma_dir, void *buf, unsigned int buflen,
1783 unsigned long timeout)
1784 {
1785 struct scatterlist *psg = NULL, sg;
1786 unsigned int n_elem = 0;
1787
1788 if (dma_dir != DMA_NONE) {
1789 WARN_ON(!buf);
1790 sg_init_one(&sg, buf, buflen);
1791 psg = &sg;
1792 n_elem++;
1793 }
1794
1795 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1796 timeout);
1797 }
1798
1799 /**
1800 * ata_do_simple_cmd - execute simple internal command
1801 * @dev: Device to which the command is sent
1802 * @cmd: Opcode to execute
1803 *
1804 * Execute a 'simple' command, that only consists of the opcode
1805 * 'cmd' itself, without filling any other registers
1806 *
1807 * LOCKING:
1808 * Kernel thread context (may sleep).
1809 *
1810 * RETURNS:
1811 * Zero on success, AC_ERR_* mask on failure
1812 */
1813 unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1814 {
1815 struct ata_taskfile tf;
1816
1817 ata_tf_init(dev, &tf);
1818
1819 tf.command = cmd;
1820 tf.flags |= ATA_TFLAG_DEVICE;
1821 tf.protocol = ATA_PROT_NODATA;
1822
1823 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1824 }
1825
1826 /**
1827 * ata_pio_need_iordy - check if iordy needed
1828 * @adev: ATA device
1829 *
1830 * Check if the current speed of the device requires IORDY. Used
1831 * by various controllers for chip configuration.
1832 */
1833
1834 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1835 {
1836 /* Controller doesn't support IORDY. Probably a pointless check
1837 as the caller should know this */
1838 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1839 return 0;
1840 /* PIO3 and higher it is mandatory */
1841 if (adev->pio_mode > XFER_PIO_2)
1842 return 1;
1843 /* We turn it on when possible */
1844 if (ata_id_has_iordy(adev->id))
1845 return 1;
1846 return 0;
1847 }
1848
1849 /**
1850 * ata_pio_mask_no_iordy - Return the non IORDY mask
1851 * @adev: ATA device
1852 *
1853 * Compute the highest mode possible if we are not using iordy. Return
1854 * -1 if no iordy mode is available.
1855 */
1856
1857 static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1858 {
1859 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1860 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1861 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1862 /* Is the speed faster than the drive allows non IORDY ? */
1863 if (pio) {
1864 /* This is cycle times not frequency - watch the logic! */
1865 if (pio > 240) /* PIO2 is 240nS per cycle */
1866 return 3 << ATA_SHIFT_PIO;
1867 return 7 << ATA_SHIFT_PIO;
1868 }
1869 }
1870 return 3 << ATA_SHIFT_PIO;
1871 }
1872
1873 /**
1874 * ata_dev_read_id - Read ID data from the specified device
1875 * @dev: target device
1876 * @p_class: pointer to class of the target device (may be changed)
1877 * @flags: ATA_READID_* flags
1878 * @id: buffer to read IDENTIFY data into
1879 *
1880 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1881 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1882 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1883 * for pre-ATA4 drives.
1884 *
1885 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right
1886 * now we abort if we hit that case.
1887 *
1888 * LOCKING:
1889 * Kernel thread context (may sleep)
1890 *
1891 * RETURNS:
1892 * 0 on success, -errno otherwise.
1893 */
1894 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1895 unsigned int flags, u16 *id)
1896 {
1897 struct ata_port *ap = dev->link->ap;
1898 unsigned int class = *p_class;
1899 struct ata_taskfile tf;
1900 unsigned int err_mask = 0;
1901 const char *reason;
1902 int may_fallback = 1, tried_spinup = 0;
1903 int rc;
1904
1905 if (ata_msg_ctl(ap))
1906 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1907
1908 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1909 retry:
1910 ata_tf_init(dev, &tf);
1911
1912 switch (class) {
1913 case ATA_DEV_ATA:
1914 tf.command = ATA_CMD_ID_ATA;
1915 break;
1916 case ATA_DEV_ATAPI:
1917 tf.command = ATA_CMD_ID_ATAPI;
1918 break;
1919 default:
1920 rc = -ENODEV;
1921 reason = "unsupported class";
1922 goto err_out;
1923 }
1924
1925 tf.protocol = ATA_PROT_PIO;
1926
1927 /* Some devices choke if TF registers contain garbage. Make
1928 * sure those are properly initialized.
1929 */
1930 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1931
1932 /* Device presence detection is unreliable on some
1933 * controllers. Always poll IDENTIFY if available.
1934 */
1935 tf.flags |= ATA_TFLAG_POLLING;
1936
1937 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
1938 id, sizeof(id[0]) * ATA_ID_WORDS, 0);
1939 if (err_mask) {
1940 if (err_mask & AC_ERR_NODEV_HINT) {
1941 DPRINTK("ata%u.%d: NODEV after polling detection\n",
1942 ap->print_id, dev->devno);
1943 return -ENOENT;
1944 }
1945
1946 /* Device or controller might have reported the wrong
1947 * device class. Give a shot at the other IDENTIFY if
1948 * the current one is aborted by the device.
1949 */
1950 if (may_fallback &&
1951 (err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1952 may_fallback = 0;
1953
1954 if (class == ATA_DEV_ATA)
1955 class = ATA_DEV_ATAPI;
1956 else
1957 class = ATA_DEV_ATA;
1958 goto retry;
1959 }
1960
1961 rc = -EIO;
1962 reason = "I/O error";
1963 goto err_out;
1964 }
1965
1966 /* Falling back doesn't make sense if ID data was read
1967 * successfully at least once.
1968 */
1969 may_fallback = 0;
1970
1971 swap_buf_le16(id, ATA_ID_WORDS);
1972
1973 /* sanity check */
1974 rc = -EINVAL;
1975 reason = "device reports invalid type";
1976
1977 if (class == ATA_DEV_ATA) {
1978 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1979 goto err_out;
1980 } else {
1981 if (ata_id_is_ata(id))
1982 goto err_out;
1983 }
1984
1985 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1986 tried_spinup = 1;
1987 /*
1988 * Drive powered-up in standby mode, and requires a specific
1989 * SET_FEATURES spin-up subcommand before it will accept
1990 * anything other than the original IDENTIFY command.
1991 */
1992 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
1993 if (err_mask && id[2] != 0x738c) {
1994 rc = -EIO;
1995 reason = "SPINUP failed";
1996 goto err_out;
1997 }
1998 /*
1999 * If the drive initially returned incomplete IDENTIFY info,
2000 * we now must reissue the IDENTIFY command.
2001 */
2002 if (id[2] == 0x37c8)
2003 goto retry;
2004 }
2005
2006 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
2007 /*
2008 * The exact sequence expected by certain pre-ATA4 drives is:
2009 * SRST RESET
2010 * IDENTIFY (optional in early ATA)
2011 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
2012 * anything else..
2013 * Some drives were very specific about that exact sequence.
2014 *
2015 * Note that ATA4 says lba is mandatory so the second check
2016 * shoud never trigger.
2017 */
2018 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
2019 err_mask = ata_dev_init_params(dev, id[3], id[6]);
2020 if (err_mask) {
2021 rc = -EIO;
2022 reason = "INIT_DEV_PARAMS failed";
2023 goto err_out;
2024 }
2025
2026 /* current CHS translation info (id[53-58]) might be
2027 * changed. reread the identify device info.
2028 */
2029 flags &= ~ATA_READID_POSTRESET;
2030 goto retry;
2031 }
2032 }
2033
2034 *p_class = class;
2035
2036 return 0;
2037
2038 err_out:
2039 if (ata_msg_warn(ap))
2040 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
2041 "(%s, err_mask=0x%x)\n", reason, err_mask);
2042 return rc;
2043 }
2044
2045 static inline u8 ata_dev_knobble(struct ata_device *dev)
2046 {
2047 struct ata_port *ap = dev->link->ap;
2048 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
2049 }
2050
2051 static void ata_dev_config_ncq(struct ata_device *dev,
2052 char *desc, size_t desc_sz)
2053 {
2054 struct ata_port *ap = dev->link->ap;
2055 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2056
2057 if (!ata_id_has_ncq(dev->id)) {
2058 desc[0] = '\0';
2059 return;
2060 }
2061 if (dev->horkage & ATA_HORKAGE_NONCQ) {
2062 snprintf(desc, desc_sz, "NCQ (not used)");
2063 return;
2064 }
2065 if (ap->flags & ATA_FLAG_NCQ) {
2066 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
2067 dev->flags |= ATA_DFLAG_NCQ;
2068 }
2069
2070 if (hdepth >= ddepth)
2071 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
2072 else
2073 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
2074 }
2075
2076 /**
2077 * ata_dev_configure - Configure the specified ATA/ATAPI device
2078 * @dev: Target device to configure
2079 *
2080 * Configure @dev according to @dev->id. Generic and low-level
2081 * driver specific fixups are also applied.
2082 *
2083 * LOCKING:
2084 * Kernel thread context (may sleep)
2085 *
2086 * RETURNS:
2087 * 0 on success, -errno otherwise
2088 */
2089 int ata_dev_configure(struct ata_device *dev)
2090 {
2091 struct ata_port *ap = dev->link->ap;
2092 struct ata_eh_context *ehc = &dev->link->eh_context;
2093 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
2094 const u16 *id = dev->id;
2095 unsigned int xfer_mask;
2096 char revbuf[7]; /* XYZ-99\0 */
2097 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2098 char modelbuf[ATA_ID_PROD_LEN+1];
2099 int rc;
2100
2101 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
2102 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
2103 __FUNCTION__);
2104 return 0;
2105 }
2106
2107 if (ata_msg_probe(ap))
2108 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
2109
2110 /* set horkage */
2111 dev->horkage |= ata_dev_blacklisted(dev);
2112
2113 /* let ACPI work its magic */
2114 rc = ata_acpi_on_devcfg(dev);
2115 if (rc)
2116 return rc;
2117
2118 /* massage HPA, do it early as it might change IDENTIFY data */
2119 rc = ata_hpa_resize(dev);
2120 if (rc)
2121 return rc;
2122
2123 /* print device capabilities */
2124 if (ata_msg_probe(ap))
2125 ata_dev_printk(dev, KERN_DEBUG,
2126 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2127 "85:%04x 86:%04x 87:%04x 88:%04x\n",
2128 __FUNCTION__,
2129 id[49], id[82], id[83], id[84],
2130 id[85], id[86], id[87], id[88]);
2131
2132 /* initialize to-be-configured parameters */
2133 dev->flags &= ~ATA_DFLAG_CFG_MASK;
2134 dev->max_sectors = 0;
2135 dev->cdb_len = 0;
2136 dev->n_sectors = 0;
2137 dev->cylinders = 0;
2138 dev->heads = 0;
2139 dev->sectors = 0;
2140
2141 /*
2142 * common ATA, ATAPI feature tests
2143 */
2144
2145 /* find max transfer mode; for printk only */
2146 xfer_mask = ata_id_xfermask(id);
2147
2148 if (ata_msg_probe(ap))
2149 ata_dump_id(id);
2150
2151 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2152 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2153 sizeof(fwrevbuf));
2154
2155 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2156 sizeof(modelbuf));
2157
2158 /* ATA-specific feature tests */
2159 if (dev->class == ATA_DEV_ATA) {
2160 if (ata_id_is_cfa(id)) {
2161 if (id[162] & 1) /* CPRM may make this media unusable */
2162 ata_dev_printk(dev, KERN_WARNING,
2163 "supports DRM functions and may "
2164 "not be fully accessable.\n");
2165 snprintf(revbuf, 7, "CFA");
2166 } else {
2167 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
2168 /* Warn the user if the device has TPM extensions */
2169 if (ata_id_has_tpm(id))
2170 ata_dev_printk(dev, KERN_WARNING,
2171 "supports DRM functions and may "
2172 "not be fully accessable.\n");
2173 }
2174
2175 dev->n_sectors = ata_id_n_sectors(id);
2176
2177 if (dev->id[59] & 0x100)
2178 dev->multi_count = dev->id[59] & 0xff;
2179
2180 if (ata_id_has_lba(id)) {
2181 const char *lba_desc;
2182 char ncq_desc[20];
2183
2184 lba_desc = "LBA";
2185 dev->flags |= ATA_DFLAG_LBA;
2186 if (ata_id_has_lba48(id)) {
2187 dev->flags |= ATA_DFLAG_LBA48;
2188 lba_desc = "LBA48";
2189
2190 if (dev->n_sectors >= (1UL << 28) &&
2191 ata_id_has_flush_ext(id))
2192 dev->flags |= ATA_DFLAG_FLUSH_EXT;
2193 }
2194
2195 /* config NCQ */
2196 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2197
2198 /* print device info to dmesg */
2199 if (ata_msg_drv(ap) && print_info) {
2200 ata_dev_printk(dev, KERN_INFO,
2201 "%s: %s, %s, max %s\n",
2202 revbuf, modelbuf, fwrevbuf,
2203 ata_mode_string(xfer_mask));
2204 ata_dev_printk(dev, KERN_INFO,
2205 "%Lu sectors, multi %u: %s %s\n",
2206 (unsigned long long)dev->n_sectors,
2207 dev->multi_count, lba_desc, ncq_desc);
2208 }
2209 } else {
2210 /* CHS */
2211
2212 /* Default translation */
2213 dev->cylinders = id[1];
2214 dev->heads = id[3];
2215 dev->sectors = id[6];
2216
2217 if (ata_id_current_chs_valid(id)) {
2218 /* Current CHS translation is valid. */
2219 dev->cylinders = id[54];
2220 dev->heads = id[55];
2221 dev->sectors = id[56];
2222 }
2223
2224 /* print device info to dmesg */
2225 if (ata_msg_drv(ap) && print_info) {
2226 ata_dev_printk(dev, KERN_INFO,
2227 "%s: %s, %s, max %s\n",
2228 revbuf, modelbuf, fwrevbuf,
2229 ata_mode_string(xfer_mask));
2230 ata_dev_printk(dev, KERN_INFO,
2231 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
2232 (unsigned long long)dev->n_sectors,
2233 dev->multi_count, dev->cylinders,
2234 dev->heads, dev->sectors);
2235 }
2236 }
2237
2238 dev->cdb_len = 16;
2239 }
2240
2241 /* ATAPI-specific feature tests */
2242 else if (dev->class == ATA_DEV_ATAPI) {
2243 const char *cdb_intr_string = "";
2244 const char *atapi_an_string = "";
2245 u32 sntf;
2246
2247 rc = atapi_cdb_len(id);
2248 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
2249 if (ata_msg_warn(ap))
2250 ata_dev_printk(dev, KERN_WARNING,
2251 "unsupported CDB len\n");
2252 rc = -EINVAL;
2253 goto err_out_nosup;
2254 }
2255 dev->cdb_len = (unsigned int) rc;
2256
2257 /* Enable ATAPI AN if both the host and device have
2258 * the support. If PMP is attached, SNTF is required
2259 * to enable ATAPI AN to discern between PHY status
2260 * changed notifications and ATAPI ANs.
2261 */
2262 if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2263 (!ap->nr_pmp_links ||
2264 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
2265 unsigned int err_mask;
2266
2267 /* issue SET feature command to turn this on */
2268 err_mask = ata_dev_set_feature(dev,
2269 SETFEATURES_SATA_ENABLE, SATA_AN);
2270 if (err_mask)
2271 ata_dev_printk(dev, KERN_ERR,
2272 "failed to enable ATAPI AN "
2273 "(err_mask=0x%x)\n", err_mask);
2274 else {
2275 dev->flags |= ATA_DFLAG_AN;
2276 atapi_an_string = ", ATAPI AN";
2277 }
2278 }
2279
2280 if (ata_id_cdb_intr(dev->id)) {
2281 dev->flags |= ATA_DFLAG_CDB_INTR;
2282 cdb_intr_string = ", CDB intr";
2283 }
2284
2285 /* print device info to dmesg */
2286 if (ata_msg_drv(ap) && print_info)
2287 ata_dev_printk(dev, KERN_INFO,
2288 "ATAPI: %s, %s, max %s%s%s\n",
2289 modelbuf, fwrevbuf,
2290 ata_mode_string(xfer_mask),
2291 cdb_intr_string, atapi_an_string);
2292 }
2293
2294 /* determine max_sectors */
2295 dev->max_sectors = ATA_MAX_SECTORS;
2296 if (dev->flags & ATA_DFLAG_LBA48)
2297 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2298
2299 if (!(dev->horkage & ATA_HORKAGE_IPM)) {
2300 if (ata_id_has_hipm(dev->id))
2301 dev->flags |= ATA_DFLAG_HIPM;
2302 if (ata_id_has_dipm(dev->id))
2303 dev->flags |= ATA_DFLAG_DIPM;
2304 }
2305
2306 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2307 /* Let the user know. We don't want to disallow opens for
2308 rescue purposes, or in case the vendor is just a blithering
2309 idiot */
2310 if (print_info) {
2311 ata_dev_printk(dev, KERN_WARNING,
2312 "Drive reports diagnostics failure. This may indicate a drive\n");
2313 ata_dev_printk(dev, KERN_WARNING,
2314 "fault or invalid emulation. Contact drive vendor for information.\n");
2315 }
2316 }
2317
2318 /* limit bridge transfers to udma5, 200 sectors */
2319 if (ata_dev_knobble(dev)) {
2320 if (ata_msg_drv(ap) && print_info)
2321 ata_dev_printk(dev, KERN_INFO,
2322 "applying bridge limits\n");
2323 dev->udma_mask &= ATA_UDMA5;
2324 dev->max_sectors = ATA_MAX_SECTORS;
2325 }
2326
2327 if ((dev->class == ATA_DEV_ATAPI) &&
2328 (atapi_command_packet_set(id) == TYPE_TAPE)) {
2329 dev->max_sectors = ATA_MAX_SECTORS_TAPE;
2330 dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2331 }
2332
2333 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
2334 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2335 dev->max_sectors);
2336
2337 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_IPM) {
2338 dev->horkage |= ATA_HORKAGE_IPM;
2339
2340 /* reset link pm_policy for this port to no pm */
2341 ap->pm_policy = MAX_PERFORMANCE;
2342 }
2343
2344 if (ap->ops->dev_config)
2345 ap->ops->dev_config(dev);
2346
2347 if (ata_msg_probe(ap))
2348 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
2349 __FUNCTION__, ata_chk_status(ap));
2350 return 0;
2351
2352 err_out_nosup:
2353 if (ata_msg_probe(ap))
2354 ata_dev_printk(dev, KERN_DEBUG,
2355 "%s: EXIT, err\n", __FUNCTION__);
2356 return rc;
2357 }
2358
2359 /**
2360 * ata_cable_40wire - return 40 wire cable type
2361 * @ap: port
2362 *
2363 * Helper method for drivers which want to hardwire 40 wire cable
2364 * detection.
2365 */
2366
2367 int ata_cable_40wire(struct ata_port *ap)
2368 {
2369 return ATA_CBL_PATA40;
2370 }
2371
2372 /**
2373 * ata_cable_80wire - return 80 wire cable type
2374 * @ap: port
2375 *
2376 * Helper method for drivers which want to hardwire 80 wire cable
2377 * detection.
2378 */
2379
2380 int ata_cable_80wire(struct ata_port *ap)
2381 {
2382 return ATA_CBL_PATA80;
2383 }
2384
2385 /**
2386 * ata_cable_unknown - return unknown PATA cable.
2387 * @ap: port
2388 *
2389 * Helper method for drivers which have no PATA cable detection.
2390 */
2391
2392 int ata_cable_unknown(struct ata_port *ap)
2393 {
2394 return ATA_CBL_PATA_UNK;
2395 }
2396
2397 /**
2398 * ata_cable_sata - return SATA cable type
2399 * @ap: port
2400 *
2401 * Helper method for drivers which have SATA cables
2402 */
2403
2404 int ata_cable_sata(struct ata_port *ap)
2405 {
2406 return ATA_CBL_SATA;
2407 }
2408
2409 /**
2410 * ata_bus_probe - Reset and probe ATA bus
2411 * @ap: Bus to probe
2412 *
2413 * Master ATA bus probing function. Initiates a hardware-dependent
2414 * bus reset, then attempts to identify any devices found on
2415 * the bus.
2416 *
2417 * LOCKING:
2418 * PCI/etc. bus probe sem.
2419 *
2420 * RETURNS:
2421 * Zero on success, negative errno otherwise.
2422 */
2423
2424 int ata_bus_probe(struct ata_port *ap)
2425 {
2426 unsigned int classes[ATA_MAX_DEVICES];
2427 int tries[ATA_MAX_DEVICES];
2428 int rc;
2429 struct ata_device *dev;
2430
2431 ata_port_probe(ap);
2432
2433 ata_link_for_each_dev(dev, &ap->link)
2434 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2435
2436 retry:
2437 ata_link_for_each_dev(dev, &ap->link) {
2438 /* If we issue an SRST then an ATA drive (not ATAPI)
2439 * may change configuration and be in PIO0 timing. If
2440 * we do a hard reset (or are coming from power on)
2441 * this is true for ATA or ATAPI. Until we've set a
2442 * suitable controller mode we should not touch the
2443 * bus as we may be talking too fast.
2444 */
2445 dev->pio_mode = XFER_PIO_0;
2446
2447 /* If the controller has a pio mode setup function
2448 * then use it to set the chipset to rights. Don't
2449 * touch the DMA setup as that will be dealt with when
2450 * configuring devices.
2451 */
2452 if (ap->ops->set_piomode)
2453 ap->ops->set_piomode(ap, dev);
2454 }
2455
2456 /* reset and determine device classes */
2457 ap->ops->phy_reset(ap);
2458
2459 ata_link_for_each_dev(dev, &ap->link) {
2460 if (!(ap->flags & ATA_FLAG_DISABLED) &&
2461 dev->class != ATA_DEV_UNKNOWN)
2462 classes[dev->devno] = dev->class;
2463 else
2464 classes[dev->devno] = ATA_DEV_NONE;
2465
2466 dev->class = ATA_DEV_UNKNOWN;
2467 }
2468
2469 ata_port_probe(ap);
2470
2471 /* read IDENTIFY page and configure devices. We have to do the identify
2472 specific sequence bass-ackwards so that PDIAG- is released by
2473 the slave device */
2474
2475 ata_link_for_each_dev(dev, &ap->link) {
2476 if (tries[dev->devno])
2477 dev->class = classes[dev->devno];
2478
2479 if (!ata_dev_enabled(dev))
2480 continue;
2481
2482 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2483 dev->id);
2484 if (rc)
2485 goto fail;
2486 }
2487
2488 /* Now ask for the cable type as PDIAG- should have been released */
2489 if (ap->ops->cable_detect)
2490 ap->cbl = ap->ops->cable_detect(ap);
2491
2492 /* We may have SATA bridge glue hiding here irrespective of the
2493 reported cable types and sensed types */
2494 ata_link_for_each_dev(dev, &ap->link) {
2495 if (!ata_dev_enabled(dev))
2496 continue;
2497 /* SATA drives indicate we have a bridge. We don't know which
2498 end of the link the bridge is which is a problem */
2499 if (ata_id_is_sata(dev->id))
2500 ap->cbl = ATA_CBL_SATA;
2501 }
2502
2503 /* After the identify sequence we can now set up the devices. We do
2504 this in the normal order so that the user doesn't get confused */
2505
2506 ata_link_for_each_dev(dev, &ap->link) {
2507 if (!ata_dev_enabled(dev))
2508 continue;
2509
2510 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
2511 rc = ata_dev_configure(dev);
2512 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2513 if (rc)
2514 goto fail;
2515 }
2516
2517 /* configure transfer mode */
2518 rc = ata_set_mode(&ap->link, &dev);
2519 if (rc)
2520 goto fail;
2521
2522 ata_link_for_each_dev(dev, &ap->link)
2523 if (ata_dev_enabled(dev))
2524 return 0;
2525
2526 /* no device present, disable port */
2527 ata_port_disable(ap);
2528 return -ENODEV;
2529
2530 fail:
2531 tries[dev->devno]--;
2532
2533 switch (rc) {
2534 case -EINVAL:
2535 /* eeek, something went very wrong, give up */
2536 tries[dev->devno] = 0;
2537 break;
2538
2539 case -ENODEV:
2540 /* give it just one more chance */
2541 tries[dev->devno] = min(tries[dev->devno], 1);
2542 case -EIO:
2543 if (tries[dev->devno] == 1) {
2544 /* This is the last chance, better to slow
2545 * down than lose it.
2546 */
2547 sata_down_spd_limit(&ap->link);
2548 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2549 }
2550 }
2551
2552 if (!tries[dev->devno])
2553 ata_dev_disable(dev);
2554
2555 goto retry;
2556 }
2557
2558 /**
2559 * ata_port_probe - Mark port as enabled
2560 * @ap: Port for which we indicate enablement
2561 *
2562 * Modify @ap data structure such that the system
2563 * thinks that the entire port is enabled.
2564 *
2565 * LOCKING: host lock, or some other form of
2566 * serialization.
2567 */
2568
2569 void ata_port_probe(struct ata_port *ap)
2570 {
2571 ap->flags &= ~ATA_FLAG_DISABLED;
2572 }
2573
2574 /**
2575 * sata_print_link_status - Print SATA link status
2576 * @link: SATA link to printk link status about
2577 *
2578 * This function prints link speed and status of a SATA link.
2579 *
2580 * LOCKING:
2581 * None.
2582 */
2583 void sata_print_link_status(struct ata_link *link)
2584 {
2585 u32 sstatus, scontrol, tmp;
2586
2587 if (sata_scr_read(link, SCR_STATUS, &sstatus))
2588 return;
2589 sata_scr_read(link, SCR_CONTROL, &scontrol);
2590
2591 if (ata_link_online(link)) {
2592 tmp = (sstatus >> 4) & 0xf;
2593 ata_link_printk(link, KERN_INFO,
2594 "SATA link up %s (SStatus %X SControl %X)\n",
2595 sata_spd_string(tmp), sstatus, scontrol);
2596 } else {
2597 ata_link_printk(link, KERN_INFO,
2598 "SATA link down (SStatus %X SControl %X)\n",
2599 sstatus, scontrol);
2600 }
2601 }
2602
2603 /**
2604 * ata_dev_pair - return other device on cable
2605 * @adev: device
2606 *
2607 * Obtain the other device on the same cable, or if none is
2608 * present NULL is returned
2609 */
2610
2611 struct ata_device *ata_dev_pair(struct ata_device *adev)
2612 {
2613 struct ata_link *link = adev->link;
2614 struct ata_device *pair = &link->device[1 - adev->devno];
2615 if (!ata_dev_enabled(pair))
2616 return NULL;
2617 return pair;
2618 }
2619
2620 /**
2621 * ata_port_disable - Disable port.
2622 * @ap: Port to be disabled.
2623 *
2624 * Modify @ap data structure such that the system
2625 * thinks that the entire port is disabled, and should
2626 * never attempt to probe or communicate with devices
2627 * on this port.
2628 *
2629 * LOCKING: host lock, or some other form of
2630 * serialization.
2631 */
2632
2633 void ata_port_disable(struct ata_port *ap)
2634 {
2635 ap->link.device[0].class = ATA_DEV_NONE;
2636 ap->link.device[1].class = ATA_DEV_NONE;
2637 ap->flags |= ATA_FLAG_DISABLED;
2638 }
2639
2640 /**
2641 * sata_down_spd_limit - adjust SATA spd limit downward
2642 * @link: Link to adjust SATA spd limit for
2643 *
2644 * Adjust SATA spd limit of @link downward. Note that this
2645 * function only adjusts the limit. The change must be applied
2646 * using sata_set_spd().
2647 *
2648 * LOCKING:
2649 * Inherited from caller.
2650 *
2651 * RETURNS:
2652 * 0 on success, negative errno on failure
2653 */
2654 int sata_down_spd_limit(struct ata_link *link)
2655 {
2656 u32 sstatus, spd, mask;
2657 int rc, highbit;
2658
2659 if (!sata_scr_valid(link))
2660 return -EOPNOTSUPP;
2661
2662 /* If SCR can be read, use it to determine the current SPD.
2663 * If not, use cached value in link->sata_spd.
2664 */
2665 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
2666 if (rc == 0)
2667 spd = (sstatus >> 4) & 0xf;
2668 else
2669 spd = link->sata_spd;
2670
2671 mask = link->sata_spd_limit;
2672 if (mask <= 1)
2673 return -EINVAL;
2674
2675 /* unconditionally mask off the highest bit */
2676 highbit = fls(mask) - 1;
2677 mask &= ~(1 << highbit);
2678
2679 /* Mask off all speeds higher than or equal to the current
2680 * one. Force 1.5Gbps if current SPD is not available.
2681 */
2682 if (spd > 1)
2683 mask &= (1 << (spd - 1)) - 1;
2684 else
2685 mask &= 1;
2686
2687 /* were we already at the bottom? */
2688 if (!mask)
2689 return -EINVAL;
2690
2691 link->sata_spd_limit = mask;
2692
2693 ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n",
2694 sata_spd_string(fls(mask)));
2695
2696 return 0;
2697 }
2698
2699 static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
2700 {
2701 struct ata_link *host_link = &link->ap->link;
2702 u32 limit, target, spd;
2703
2704 limit = link->sata_spd_limit;
2705
2706 /* Don't configure downstream link faster than upstream link.
2707 * It doesn't speed up anything and some PMPs choke on such
2708 * configuration.
2709 */
2710 if (!ata_is_host_link(link) && host_link->sata_spd)
2711 limit &= (1 << host_link->sata_spd) - 1;
2712
2713 if (limit == UINT_MAX)
2714 target = 0;
2715 else
2716 target = fls(limit);
2717
2718 spd = (*scontrol >> 4) & 0xf;
2719 *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
2720
2721 return spd != target;
2722 }
2723
2724 /**
2725 * sata_set_spd_needed - is SATA spd configuration needed
2726 * @link: Link in question
2727 *
2728 * Test whether the spd limit in SControl matches
2729 * @link->sata_spd_limit. This function is used to determine
2730 * whether hardreset is necessary to apply SATA spd
2731 * configuration.
2732 *
2733 * LOCKING:
2734 * Inherited from caller.
2735 *
2736 * RETURNS:
2737 * 1 if SATA spd configuration is needed, 0 otherwise.
2738 */
2739 int sata_set_spd_needed(struct ata_link *link)
2740 {
2741 u32 scontrol;
2742
2743 if (sata_scr_read(link, SCR_CONTROL, &scontrol))
2744 return 1;
2745
2746 return __sata_set_spd_needed(link, &scontrol);
2747 }
2748
2749 /**
2750 * sata_set_spd - set SATA spd according to spd limit
2751 * @link: Link to set SATA spd for
2752 *
2753 * Set SATA spd of @link according to sata_spd_limit.
2754 *
2755 * LOCKING:
2756 * Inherited from caller.
2757 *
2758 * RETURNS:
2759 * 0 if spd doesn't need to be changed, 1 if spd has been
2760 * changed. Negative errno if SCR registers are inaccessible.
2761 */
2762 int sata_set_spd(struct ata_link *link)
2763 {
2764 u32 scontrol;
2765 int rc;
2766
2767 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
2768 return rc;
2769
2770 if (!__sata_set_spd_needed(link, &scontrol))
2771 return 0;
2772
2773 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
2774 return rc;
2775
2776 return 1;
2777 }
2778
2779 /*
2780 * This mode timing computation functionality is ported over from
2781 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2782 */
2783 /*
2784 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
2785 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
2786 * for UDMA6, which is currently supported only by Maxtor drives.
2787 *
2788 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
2789 */
2790
2791 static const struct ata_timing ata_timing[] = {
2792 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
2793 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
2794 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
2795 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
2796 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
2797 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
2798 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
2799 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
2800
2801 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
2802 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
2803 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
2804
2805 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
2806 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
2807 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
2808 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
2809 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
2810
2811 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
2812 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
2813 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
2814 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
2815 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
2816 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
2817 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
2818 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
2819
2820 { 0xFF }
2821 };
2822
2823 #define ENOUGH(v, unit) (((v)-1)/(unit)+1)
2824 #define EZ(v, unit) ((v)?ENOUGH(v, unit):0)
2825
2826 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2827 {
2828 q->setup = EZ(t->setup * 1000, T);
2829 q->act8b = EZ(t->act8b * 1000, T);
2830 q->rec8b = EZ(t->rec8b * 1000, T);
2831 q->cyc8b = EZ(t->cyc8b * 1000, T);
2832 q->active = EZ(t->active * 1000, T);
2833 q->recover = EZ(t->recover * 1000, T);
2834 q->cycle = EZ(t->cycle * 1000, T);
2835 q->udma = EZ(t->udma * 1000, UT);
2836 }
2837
2838 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2839 struct ata_timing *m, unsigned int what)
2840 {
2841 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2842 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2843 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2844 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2845 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2846 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2847 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2848 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2849 }
2850
2851 const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
2852 {
2853 const struct ata_timing *t = ata_timing;
2854
2855 while (xfer_mode > t->mode)
2856 t++;
2857
2858 if (xfer_mode == t->mode)
2859 return t;
2860 return NULL;
2861 }
2862
2863 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2864 struct ata_timing *t, int T, int UT)
2865 {
2866 const struct ata_timing *s;
2867 struct ata_timing p;
2868
2869 /*
2870 * Find the mode.
2871 */
2872
2873 if (!(s = ata_timing_find_mode(speed)))
2874 return -EINVAL;
2875
2876 memcpy(t, s, sizeof(*s));
2877
2878 /*
2879 * If the drive is an EIDE drive, it can tell us it needs extended
2880 * PIO/MW_DMA cycle timing.
2881 */
2882
2883 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
2884 memset(&p, 0, sizeof(p));
2885 if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2886 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2887 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2888 } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2889 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2890 }
2891 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2892 }
2893
2894 /*
2895 * Convert the timing to bus clock counts.
2896 */
2897
2898 ata_timing_quantize(t, t, T, UT);
2899
2900 /*
2901 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2902 * S.M.A.R.T * and some other commands. We have to ensure that the
2903 * DMA cycle timing is slower/equal than the fastest PIO timing.
2904 */
2905
2906 if (speed > XFER_PIO_6) {
2907 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2908 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2909 }
2910
2911 /*
2912 * Lengthen active & recovery time so that cycle time is correct.
2913 */
2914
2915 if (t->act8b + t->rec8b < t->cyc8b) {
2916 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2917 t->rec8b = t->cyc8b - t->act8b;
2918 }
2919
2920 if (t->active + t->recover < t->cycle) {
2921 t->active += (t->cycle - (t->active + t->recover)) / 2;
2922 t->recover = t->cycle - t->active;
2923 }
2924
2925 /* In a few cases quantisation may produce enough errors to
2926 leave t->cycle too low for the sum of active and recovery
2927 if so we must correct this */
2928 if (t->active + t->recover > t->cycle)
2929 t->cycle = t->active + t->recover;
2930
2931 return 0;
2932 }
2933
2934 /**
2935 * ata_down_xfermask_limit - adjust dev xfer masks downward
2936 * @dev: Device to adjust xfer masks
2937 * @sel: ATA_DNXFER_* selector
2938 *
2939 * Adjust xfer masks of @dev downward. Note that this function
2940 * does not apply the change. Invoking ata_set_mode() afterwards
2941 * will apply the limit.
2942 *
2943 * LOCKING:
2944 * Inherited from caller.
2945 *
2946 * RETURNS:
2947 * 0 on success, negative errno on failure
2948 */
2949 int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
2950 {
2951 char buf[32];
2952 unsigned int orig_mask, xfer_mask;
2953 unsigned int pio_mask, mwdma_mask, udma_mask;
2954 int quiet, highbit;
2955
2956 quiet = !!(sel & ATA_DNXFER_QUIET);
2957 sel &= ~ATA_DNXFER_QUIET;
2958
2959 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
2960 dev->mwdma_mask,
2961 dev->udma_mask);
2962 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
2963
2964 switch (sel) {
2965 case ATA_DNXFER_PIO:
2966 highbit = fls(pio_mask) - 1;
2967 pio_mask &= ~(1 << highbit);
2968 break;
2969
2970 case ATA_DNXFER_DMA:
2971 if (udma_mask) {
2972 highbit = fls(udma_mask) - 1;
2973 udma_mask &= ~(1 << highbit);
2974 if (!udma_mask)
2975 return -ENOENT;
2976 } else if (mwdma_mask) {
2977 highbit = fls(mwdma_mask) - 1;
2978 mwdma_mask &= ~(1 << highbit);
2979 if (!mwdma_mask)
2980 return -ENOENT;
2981 }
2982 break;
2983
2984 case ATA_DNXFER_40C:
2985 udma_mask &= ATA_UDMA_MASK_40C;
2986 break;
2987
2988 case ATA_DNXFER_FORCE_PIO0:
2989 pio_mask &= 1;
2990 case ATA_DNXFER_FORCE_PIO:
2991 mwdma_mask = 0;
2992 udma_mask = 0;
2993 break;
2994
2995 default:
2996 BUG();
2997 }
2998
2999 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3000
3001 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3002 return -ENOENT;
3003
3004 if (!quiet) {
3005 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3006 snprintf(buf, sizeof(buf), "%s:%s",
3007 ata_mode_string(xfer_mask),
3008 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3009 else
3010 snprintf(buf, sizeof(buf), "%s",
3011 ata_mode_string(xfer_mask));
3012
3013 ata_dev_printk(dev, KERN_WARNING,
3014 "limiting speed to %s\n", buf);
3015 }
3016
3017 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3018 &dev->udma_mask);
3019
3020 return 0;
3021 }
3022
3023 static int ata_dev_set_mode(struct ata_device *dev)
3024 {
3025 struct ata_eh_context *ehc = &dev->link->eh_context;
3026 unsigned int err_mask;
3027 int rc;
3028
3029 dev->flags &= ~ATA_DFLAG_PIO;
3030 if (dev->xfer_shift == ATA_SHIFT_PIO)
3031 dev->flags |= ATA_DFLAG_PIO;
3032
3033 err_mask = ata_dev_set_xfermode(dev);
3034
3035 /* Old CFA may refuse this command, which is just fine */
3036 if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
3037 err_mask &= ~AC_ERR_DEV;
3038
3039 /* Some very old devices and some bad newer ones fail any kind of
3040 SET_XFERMODE request but support PIO0-2 timings and no IORDY */
3041 if (dev->xfer_shift == ATA_SHIFT_PIO && !ata_id_has_iordy(dev->id) &&
3042 dev->pio_mode <= XFER_PIO_2)
3043 err_mask &= ~AC_ERR_DEV;
3044
3045 /* Early MWDMA devices do DMA but don't allow DMA mode setting.
3046 Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
3047 if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3048 dev->dma_mode == XFER_MW_DMA_0 &&
3049 (dev->id[63] >> 8) & 1)
3050 err_mask &= ~AC_ERR_DEV;
3051
3052 if (err_mask) {
3053 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
3054 "(err_mask=0x%x)\n", err_mask);
3055 return -EIO;
3056 }
3057
3058 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3059 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3060 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3061 if (rc)
3062 return rc;
3063
3064 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3065 dev->xfer_shift, (int)dev->xfer_mode);
3066
3067 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
3068 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
3069 return 0;
3070 }
3071
3072 /**
3073 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
3074 * @link: link on which timings will be programmed
3075 * @r_failed_dev: out paramter for failed device
3076 *
3077 * Standard implementation of the function used to tune and set
3078 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3079 * ata_dev_set_mode() fails, pointer to the failing device is
3080 * returned in @r_failed_dev.
3081 *
3082 * LOCKING:
3083 * PCI/etc. bus probe sem.
3084 *
3085 * RETURNS:
3086 * 0 on success, negative errno otherwise
3087 */
3088
3089 int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3090 {
3091 struct ata_port *ap = link->ap;
3092 struct ata_device *dev;
3093 int rc = 0, used_dma = 0, found = 0;
3094
3095 /* step 1: calculate xfer_mask */
3096 ata_link_for_each_dev(dev, link) {
3097 unsigned int pio_mask, dma_mask;
3098 unsigned int mode_mask;
3099
3100 if (!ata_dev_enabled(dev))
3101 continue;
3102
3103 mode_mask = ATA_DMA_MASK_ATA;
3104 if (dev->class == ATA_DEV_ATAPI)
3105 mode_mask = ATA_DMA_MASK_ATAPI;
3106 else if (ata_id_is_cfa(dev->id))
3107 mode_mask = ATA_DMA_MASK_CFA;
3108
3109 ata_dev_xfermask(dev);
3110
3111 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3112 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3113
3114 if (libata_dma_mask & mode_mask)
3115 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3116 else
3117 dma_mask = 0;
3118
3119 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3120 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
3121
3122 found = 1;
3123 if (dev->dma_mode != 0xff)
3124 used_dma = 1;
3125 }
3126 if (!found)
3127 goto out;
3128
3129 /* step 2: always set host PIO timings */
3130 ata_link_for_each_dev(dev, link) {
3131 if (!ata_dev_enabled(dev))
3132 continue;
3133
3134 if (dev->pio_mode == 0xff) {
3135 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
3136 rc = -EINVAL;
3137 goto out;
3138 }
3139
3140 dev->xfer_mode = dev->pio_mode;
3141 dev->xfer_shift = ATA_SHIFT_PIO;
3142 if (ap->ops->set_piomode)
3143 ap->ops->set_piomode(ap, dev);
3144 }
3145
3146 /* step 3: set host DMA timings */
3147 ata_link_for_each_dev(dev, link) {
3148 if (!ata_dev_enabled(dev) || dev->dma_mode == 0xff)
3149 continue;
3150
3151 dev->xfer_mode = dev->dma_mode;
3152 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3153 if (ap->ops->set_dmamode)
3154 ap->ops->set_dmamode(ap, dev);
3155 }
3156
3157 /* step 4: update devices' xfer mode */
3158 ata_link_for_each_dev(dev, link) {
3159 /* don't update suspended devices' xfer mode */
3160 if (!ata_dev_enabled(dev))
3161 continue;
3162
3163 rc = ata_dev_set_mode(dev);
3164 if (rc)
3165 goto out;
3166 }
3167
3168 /* Record simplex status. If we selected DMA then the other
3169 * host channels are not permitted to do so.
3170 */
3171 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
3172 ap->host->simplex_claimed = ap;
3173
3174 out:
3175 if (rc)
3176 *r_failed_dev = dev;
3177 return rc;
3178 }
3179
3180 /**
3181 * ata_tf_to_host - issue ATA taskfile to host controller
3182 * @ap: port to which command is being issued
3183 * @tf: ATA taskfile register set
3184 *
3185 * Issues ATA taskfile register set to ATA host controller,
3186 * with proper synchronization with interrupt handler and
3187 * other threads.
3188 *
3189 * LOCKING:
3190 * spin_lock_irqsave(host lock)
3191 */
3192
3193 static inline void ata_tf_to_host(struct ata_port *ap,
3194 const struct ata_taskfile *tf)
3195 {
3196 ap->ops->tf_load(ap, tf);
3197 ap->ops->exec_command(ap, tf);
3198 }
3199
3200 /**
3201 * ata_busy_sleep - sleep until BSY clears, or timeout
3202 * @ap: port containing status register to be polled
3203 * @tmout_pat: impatience timeout
3204 * @tmout: overall timeout
3205 *
3206 * Sleep until ATA Status register bit BSY clears,
3207 * or a timeout occurs.
3208 *
3209 * LOCKING:
3210 * Kernel thread context (may sleep).
3211 *
3212 * RETURNS:
3213 * 0 on success, -errno otherwise.
3214 */
3215 int ata_busy_sleep(struct ata_port *ap,
3216 unsigned long tmout_pat, unsigned long tmout)
3217 {
3218 unsigned long timer_start, timeout;
3219 u8 status;
3220
3221 status = ata_busy_wait(ap, ATA_BUSY, 300);
3222 timer_start = jiffies;
3223 timeout = timer_start + tmout_pat;
3224 while (status != 0xff && (status & ATA_BUSY) &&
3225 time_before(jiffies, timeout)) {
3226 msleep(50);
3227 status = ata_busy_wait(ap, ATA_BUSY, 3);
3228 }
3229
3230 if (status != 0xff && (status & ATA_BUSY))
3231 ata_port_printk(ap, KERN_WARNING,
3232 "port is slow to respond, please be patient "
3233 "(Status 0x%x)\n", status);
3234
3235 timeout = timer_start + tmout;
3236 while (status != 0xff && (status & ATA_BUSY) &&
3237 time_before(jiffies, timeout)) {
3238 msleep(50);
3239 status = ata_chk_status(ap);
3240 }
3241
3242 if (status == 0xff)
3243 return -ENODEV;
3244
3245 if (status & ATA_BUSY) {
3246 ata_port_printk(ap, KERN_ERR, "port failed to respond "
3247 "(%lu secs, Status 0x%x)\n",
3248 tmout / HZ, status);
3249 return -EBUSY;
3250 }
3251
3252 return 0;
3253 }
3254
3255 /**
3256 * ata_wait_after_reset - wait before checking status after reset
3257 * @ap: port containing status register to be polled
3258 * @deadline: deadline jiffies for the operation
3259 *
3260 * After reset, we need to pause a while before reading status.
3261 * Also, certain combination of controller and device report 0xff
3262 * for some duration (e.g. until SATA PHY is up and running)
3263 * which is interpreted as empty port in ATA world. This
3264 * function also waits for such devices to get out of 0xff
3265 * status.
3266 *
3267 * LOCKING:
3268 * Kernel thread context (may sleep).
3269 */
3270 void ata_wait_after_reset(struct ata_port *ap, unsigned long deadline)
3271 {
3272 unsigned long until = jiffies + ATA_TMOUT_FF_WAIT;
3273
3274 if (time_before(until, deadline))
3275 deadline = until;
3276
3277 /* Spec mandates ">= 2ms" before checking status. We wait
3278 * 150ms, because that was the magic delay used for ATAPI
3279 * devices in Hale Landis's ATADRVR, for the period of time
3280 * between when the ATA command register is written, and then
3281 * status is checked. Because waiting for "a while" before
3282 * checking status is fine, post SRST, we perform this magic
3283 * delay here as well.
3284 *
3285 * Old drivers/ide uses the 2mS rule and then waits for ready.
3286 */
3287 msleep(150);
3288
3289 /* Wait for 0xff to clear. Some SATA devices take a long time
3290 * to clear 0xff after reset. For example, HHD424020F7SV00
3291 * iVDR needs >= 800ms while. Quantum GoVault needs even more
3292 * than that.
3293 *
3294 * Note that some PATA controllers (pata_ali) explode if
3295 * status register is read more than once when there's no
3296 * device attached.
3297 */
3298 if (ap->flags & ATA_FLAG_SATA) {
3299 while (1) {
3300 u8 status = ata_chk_status(ap);
3301
3302 if (status != 0xff || time_after(jiffies, deadline))
3303 return;
3304
3305 msleep(50);
3306 }
3307 }
3308 }
3309
3310 /**
3311 * ata_wait_ready - sleep until BSY clears, or timeout
3312 * @ap: port containing status register to be polled
3313 * @deadline: deadline jiffies for the operation
3314 *
3315 * Sleep until ATA Status register bit BSY clears, or timeout
3316 * occurs.
3317 *
3318 * LOCKING:
3319 * Kernel thread context (may sleep).
3320 *
3321 * RETURNS:
3322 * 0 on success, -errno otherwise.
3323 */
3324 int ata_wait_ready(struct ata_port *ap, unsigned long deadline)
3325 {
3326 unsigned long start = jiffies;
3327 int warned = 0;
3328
3329 while (1) {
3330 u8 status = ata_chk_status(ap);
3331 unsigned long now = jiffies;
3332
3333 if (!(status & ATA_BUSY))
3334 return 0;
3335 if (!ata_link_online(&ap->link) && status == 0xff)
3336 return -ENODEV;
3337 if (time_after(now, deadline))
3338 return -EBUSY;
3339
3340 if (!warned && time_after(now, start + 5 * HZ) &&
3341 (deadline - now > 3 * HZ)) {
3342 ata_port_printk(ap, KERN_WARNING,
3343 "port is slow to respond, please be patient "
3344 "(Status 0x%x)\n", status);
3345 warned = 1;
3346 }
3347
3348 msleep(50);
3349 }
3350 }
3351
3352 static int ata_bus_post_reset(struct ata_port *ap, unsigned int devmask,
3353 unsigned long deadline)
3354 {
3355 struct ata_ioports *ioaddr = &ap->ioaddr;
3356 unsigned int dev0 = devmask & (1 << 0);
3357 unsigned int dev1 = devmask & (1 << 1);
3358 int rc, ret = 0;
3359
3360 /* if device 0 was found in ata_devchk, wait for its
3361 * BSY bit to clear
3362 */
3363 if (dev0) {
3364 rc = ata_wait_ready(ap, deadline);
3365 if (rc) {
3366 if (rc != -ENODEV)
3367 return rc;
3368 ret = rc;
3369 }
3370 }
3371
3372 /* if device 1 was found in ata_devchk, wait for register
3373 * access briefly, then wait for BSY to clear.
3374 */
3375 if (dev1) {
3376 int i;
3377
3378 ap->ops->dev_select(ap, 1);
3379
3380 /* Wait for register access. Some ATAPI devices fail
3381 * to set nsect/lbal after reset, so don't waste too
3382 * much time on it. We're gonna wait for !BSY anyway.
3383 */
3384 for (i = 0; i < 2; i++) {
3385 u8 nsect, lbal;
3386
3387 nsect = ioread8(ioaddr->nsect_addr);
3388 lbal = ioread8(ioaddr->lbal_addr);
3389 if ((nsect == 1) && (lbal == 1))
3390 break;
3391 msleep(50); /* give drive a breather */
3392 }
3393
3394 rc = ata_wait_ready(ap, deadline);
3395 if (rc) {
3396 if (rc != -ENODEV)
3397 return rc;
3398 ret = rc;
3399 }
3400 }
3401
3402 /* is all this really necessary? */
3403 ap->ops->dev_select(ap, 0);
3404 if (dev1)
3405 ap->ops->dev_select(ap, 1);
3406 if (dev0)
3407 ap->ops->dev_select(ap, 0);
3408
3409 return ret;
3410 }
3411
3412 static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
3413 unsigned long deadline)
3414 {
3415 struct ata_ioports *ioaddr = &ap->ioaddr;
3416
3417 DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
3418
3419 /* software reset. causes dev0 to be selected */
3420 iowrite8(ap->ctl, ioaddr->ctl_addr);
3421 udelay(20); /* FIXME: flush */
3422 iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
3423 udelay(20); /* FIXME: flush */
3424 iowrite8(ap->ctl, ioaddr->ctl_addr);
3425
3426 /* wait a while before checking status */
3427 ata_wait_after_reset(ap, deadline);
3428
3429 /* Before we perform post reset processing we want to see if
3430 * the bus shows 0xFF because the odd clown forgets the D7
3431 * pulldown resistor.
3432 */
3433 if (ata_chk_status(ap) == 0xFF)
3434 return -ENODEV;
3435
3436 return ata_bus_post_reset(ap, devmask, deadline);
3437 }
3438
3439 /**
3440 * ata_bus_reset - reset host port and associated ATA channel
3441 * @ap: port to reset
3442 *
3443 * This is typically the first time we actually start issuing
3444 * commands to the ATA channel. We wait for BSY to clear, then
3445 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
3446 * result. Determine what devices, if any, are on the channel
3447 * by looking at the device 0/1 error register. Look at the signature
3448 * stored in each device's taskfile registers, to determine if
3449 * the device is ATA or ATAPI.
3450 *
3451 * LOCKING:
3452 * PCI/etc. bus probe sem.
3453 * Obtains host lock.
3454 *
3455 * SIDE EFFECTS:
3456 * Sets ATA_FLAG_DISABLED if bus reset fails.
3457 */
3458
3459 void ata_bus_reset(struct ata_port *ap)
3460 {
3461 struct ata_device *device = ap->link.device;
3462 struct ata_ioports *ioaddr = &ap->ioaddr;
3463 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3464 u8 err;
3465 unsigned int dev0, dev1 = 0, devmask = 0;
3466 int rc;
3467
3468 DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
3469
3470 /* determine if device 0/1 are present */
3471 if (ap->flags & ATA_FLAG_SATA_RESET)
3472 dev0 = 1;
3473 else {
3474 dev0 = ata_devchk(ap, 0);
3475 if (slave_possible)
3476 dev1 = ata_devchk(ap, 1);
3477 }
3478
3479 if (dev0)
3480 devmask |= (1 << 0);
3481 if (dev1)
3482 devmask |= (1 << 1);
3483
3484 /* select device 0 again */
3485 ap->ops->dev_select(ap, 0);
3486
3487 /* issue bus reset */
3488 if (ap->flags & ATA_FLAG_SRST) {
3489 rc = ata_bus_softreset(ap, devmask, jiffies + 40 * HZ);
3490 if (rc && rc != -ENODEV)
3491 goto err_out;
3492 }
3493
3494 /*
3495 * determine by signature whether we have ATA or ATAPI devices
3496 */
3497 device[0].class = ata_dev_try_classify(&device[0], dev0, &err);
3498 if ((slave_possible) && (err != 0x81))
3499 device[1].class = ata_dev_try_classify(&device[1], dev1, &err);
3500
3501 /* is double-select really necessary? */
3502 if (device[1].class != ATA_DEV_NONE)
3503 ap->ops->dev_select(ap, 1);
3504 if (device[0].class != ATA_DEV_NONE)
3505 ap->ops->dev_select(ap, 0);
3506
3507 /* if no devices were detected, disable this port */
3508 if ((device[0].class == ATA_DEV_NONE) &&
3509 (device[1].class == ATA_DEV_NONE))
3510 goto err_out;
3511
3512 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
3513 /* set up device control for ATA_FLAG_SATA_RESET */
3514 iowrite8(ap->ctl, ioaddr->ctl_addr);
3515 }
3516
3517 DPRINTK("EXIT\n");
3518 return;
3519
3520 err_out:
3521 ata_port_printk(ap, KERN_ERR, "disabling port\n");
3522 ata_port_disable(ap);
3523
3524 DPRINTK("EXIT\n");
3525 }
3526
3527 /**
3528 * sata_link_debounce - debounce SATA phy status
3529 * @link: ATA link to debounce SATA phy status for
3530 * @params: timing parameters { interval, duratinon, timeout } in msec
3531 * @deadline: deadline jiffies for the operation
3532 *
3533 * Make sure SStatus of @link reaches stable state, determined by
3534 * holding the same value where DET is not 1 for @duration polled
3535 * every @interval, before @timeout. Timeout constraints the
3536 * beginning of the stable state. Because DET gets stuck at 1 on
3537 * some controllers after hot unplugging, this functions waits
3538 * until timeout then returns 0 if DET is stable at 1.
3539 *
3540 * @timeout is further limited by @deadline. The sooner of the
3541 * two is used.
3542 *
3543 * LOCKING:
3544 * Kernel thread context (may sleep)
3545 *
3546 * RETURNS:
3547 * 0 on success, -errno on failure.
3548 */
3549 int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3550 unsigned long deadline)
3551 {
3552 unsigned long interval_msec = params[0];
3553 unsigned long duration = msecs_to_jiffies(params[1]);
3554 unsigned long last_jiffies, t;
3555 u32 last, cur;
3556 int rc;
3557
3558 t = jiffies + msecs_to_jiffies(params[2]);
3559 if (time_before(t, deadline))
3560 deadline = t;
3561
3562 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3563 return rc;
3564 cur &= 0xf;
3565
3566 last = cur;
3567 last_jiffies = jiffies;
3568
3569 while (1) {
3570 msleep(interval_msec);
3571 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3572 return rc;
3573 cur &= 0xf;
3574
3575 /* DET stable? */
3576 if (cur == last) {
3577 if (cur == 1 && time_before(jiffies, deadline))
3578 continue;
3579 if (time_after(jiffies, last_jiffies + duration))
3580 return 0;
3581 continue;
3582 }
3583
3584 /* unstable, start over */
3585 last = cur;
3586 last_jiffies = jiffies;
3587
3588 /* Check deadline. If debouncing failed, return
3589 * -EPIPE to tell upper layer to lower link speed.
3590 */
3591 if (time_after(jiffies, deadline))
3592 return -EPIPE;
3593 }
3594 }
3595
3596 /**
3597 * sata_link_resume - resume SATA link
3598 * @link: ATA link to resume SATA
3599 * @params: timing parameters { interval, duratinon, timeout } in msec
3600 * @deadline: deadline jiffies for the operation
3601 *
3602 * Resume SATA phy @link and debounce it.
3603 *
3604 * LOCKING:
3605 * Kernel thread context (may sleep)
3606 *
3607 * RETURNS:
3608 * 0 on success, -errno on failure.
3609 */
3610 int sata_link_resume(struct ata_link *link, const unsigned long *params,
3611 unsigned long deadline)
3612 {
3613 u32 scontrol;
3614 int rc;
3615
3616 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3617 return rc;
3618
3619 scontrol = (scontrol & 0x0f0) | 0x300;
3620
3621 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3622 return rc;
3623
3624 /* Some PHYs react badly if SStatus is pounded immediately
3625 * after resuming. Delay 200ms before debouncing.
3626 */
3627 msleep(200);
3628
3629 return sata_link_debounce(link, params, deadline);
3630 }
3631
3632 /**
3633 * ata_std_prereset - prepare for reset
3634 * @link: ATA link to be reset
3635 * @deadline: deadline jiffies for the operation
3636 *
3637 * @link is about to be reset. Initialize it. Failure from
3638 * prereset makes libata abort whole reset sequence and give up
3639 * that port, so prereset should be best-effort. It does its
3640 * best to prepare for reset sequence but if things go wrong, it
3641 * should just whine, not fail.
3642 *
3643 * LOCKING:
3644 * Kernel thread context (may sleep)
3645 *
3646 * RETURNS:
3647 * 0 on success, -errno otherwise.
3648 */
3649 int ata_std_prereset(struct ata_link *link, unsigned long deadline)
3650 {
3651 struct ata_port *ap = link->ap;
3652 struct ata_eh_context *ehc = &link->eh_context;
3653 const unsigned long *timing = sata_ehc_deb_timing(ehc);
3654 int rc;
3655
3656 /* handle link resume */
3657 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
3658 (link->flags & ATA_LFLAG_HRST_TO_RESUME))
3659 ehc->i.action |= ATA_EH_HARDRESET;
3660
3661 /* Some PMPs don't work with only SRST, force hardreset if PMP
3662 * is supported.
3663 */
3664 if (ap->flags & ATA_FLAG_PMP)
3665 ehc->i.action |= ATA_EH_HARDRESET;
3666
3667 /* if we're about to do hardreset, nothing more to do */
3668 if (ehc->i.action & ATA_EH_HARDRESET)
3669 return 0;
3670
3671 /* if SATA, resume link */
3672 if (ap->flags & ATA_FLAG_SATA) {
3673 rc = sata_link_resume(link, timing, deadline);
3674 /* whine about phy resume failure but proceed */
3675 if (rc && rc != -EOPNOTSUPP)
3676 ata_link_printk(link, KERN_WARNING, "failed to resume "
3677 "link for reset (errno=%d)\n", rc);
3678 }
3679
3680 /* Wait for !BSY if the controller can wait for the first D2H
3681 * Reg FIS and we don't know that no device is attached.
3682 */
3683 if (!(link->flags & ATA_LFLAG_SKIP_D2H_BSY) && !ata_link_offline(link)) {
3684 rc = ata_wait_ready(ap, deadline);
3685 if (rc && rc != -ENODEV) {
3686 ata_link_printk(link, KERN_WARNING, "device not ready "
3687 "(errno=%d), forcing hardreset\n", rc);
3688 ehc->i.action |= ATA_EH_HARDRESET;
3689 }
3690 }
3691
3692 return 0;
3693 }
3694
3695 /**
3696 * ata_std_softreset - reset host port via ATA SRST
3697 * @link: ATA link to reset
3698 * @classes: resulting classes of attached devices
3699 * @deadline: deadline jiffies for the operation
3700 *
3701 * Reset host port using ATA SRST.
3702 *
3703 * LOCKING:
3704 * Kernel thread context (may sleep)
3705 *
3706 * RETURNS:
3707 * 0 on success, -errno otherwise.
3708 */
3709 int ata_std_softreset(struct ata_link *link, unsigned int *classes,
3710 unsigned long deadline)
3711 {
3712 struct ata_port *ap = link->ap;
3713 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3714 unsigned int devmask = 0;
3715 int rc;
3716 u8 err;
3717
3718 DPRINTK("ENTER\n");
3719
3720 if (ata_link_offline(link)) {
3721 classes[0] = ATA_DEV_NONE;
3722 goto out;
3723 }
3724
3725 /* determine if device 0/1 are present */
3726 if (ata_devchk(ap, 0))
3727 devmask |= (1 << 0);
3728 if (slave_possible && ata_devchk(ap, 1))
3729 devmask |= (1 << 1);
3730
3731 /* select device 0 again */
3732 ap->ops->dev_select(ap, 0);
3733
3734 /* issue bus reset */
3735 DPRINTK("about to softreset, devmask=%x\n", devmask);
3736 rc = ata_bus_softreset(ap, devmask, deadline);
3737 /* if link is occupied, -ENODEV too is an error */
3738 if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
3739 ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc);
3740 return rc;
3741 }
3742
3743 /* determine by signature whether we have ATA or ATAPI devices */
3744 classes[0] = ata_dev_try_classify(&link->device[0],
3745 devmask & (1 << 0), &err);
3746 if (slave_possible && err != 0x81)
3747 classes[1] = ata_dev_try_classify(&link->device[1],
3748 devmask & (1 << 1), &err);
3749
3750 out:
3751 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
3752 return 0;
3753 }
3754
3755 /**
3756 * sata_link_hardreset - reset link via SATA phy reset
3757 * @link: link to reset
3758 * @timing: timing parameters { interval, duratinon, timeout } in msec
3759 * @deadline: deadline jiffies for the operation
3760 *
3761 * SATA phy-reset @link using DET bits of SControl register.
3762 *
3763 * LOCKING:
3764 * Kernel thread context (may sleep)
3765 *
3766 * RETURNS:
3767 * 0 on success, -errno otherwise.
3768 */
3769 int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
3770 unsigned long deadline)
3771 {
3772 u32 scontrol;
3773 int rc;
3774
3775 DPRINTK("ENTER\n");
3776
3777 if (sata_set_spd_needed(link)) {
3778 /* SATA spec says nothing about how to reconfigure
3779 * spd. To be on the safe side, turn off phy during
3780 * reconfiguration. This works for at least ICH7 AHCI
3781 * and Sil3124.
3782 */
3783 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3784 goto out;
3785
3786 scontrol = (scontrol & 0x0f0) | 0x304;
3787
3788 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3789 goto out;
3790
3791 sata_set_spd(link);
3792 }
3793
3794 /* issue phy wake/reset */
3795 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3796 goto out;
3797
3798 scontrol = (scontrol & 0x0f0) | 0x301;
3799
3800 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
3801 goto out;
3802
3803 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
3804 * 10.4.2 says at least 1 ms.
3805 */
3806 msleep(1);
3807
3808 /* bring link back */
3809 rc = sata_link_resume(link, timing, deadline);
3810 out:
3811 DPRINTK("EXIT, rc=%d\n", rc);
3812 return rc;
3813 }
3814
3815 /**
3816 * sata_std_hardreset - reset host port via SATA phy reset
3817 * @link: link to reset
3818 * @class: resulting class of attached device
3819 * @deadline: deadline jiffies for the operation
3820 *
3821 * SATA phy-reset host port using DET bits of SControl register,
3822 * wait for !BSY and classify the attached device.
3823 *
3824 * LOCKING:
3825 * Kernel thread context (may sleep)
3826 *
3827 * RETURNS:
3828 * 0 on success, -errno otherwise.
3829 */
3830 int sata_std_hardreset(struct ata_link *link, unsigned int *class,
3831 unsigned long deadline)
3832 {
3833 struct ata_port *ap = link->ap;
3834 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
3835 int rc;
3836
3837 DPRINTK("ENTER\n");
3838
3839 /* do hardreset */
3840 rc = sata_link_hardreset(link, timing, deadline);
3841 if (rc) {
3842 ata_link_printk(link, KERN_ERR,
3843 "COMRESET failed (errno=%d)\n", rc);
3844 return rc;
3845 }
3846
3847 /* TODO: phy layer with polling, timeouts, etc. */
3848 if (ata_link_offline(link)) {
3849 *class = ATA_DEV_NONE;
3850 DPRINTK("EXIT, link offline\n");
3851 return 0;
3852 }
3853
3854 /* wait a while before checking status */
3855 ata_wait_after_reset(ap, deadline);
3856
3857 /* If PMP is supported, we have to do follow-up SRST. Note
3858 * that some PMPs don't send D2H Reg FIS after hardreset at
3859 * all if the first port is empty. Wait for it just for a
3860 * second and request follow-up SRST.
3861 */
3862 if (ap->flags & ATA_FLAG_PMP) {
3863 ata_wait_ready(ap, jiffies + HZ);
3864 return -EAGAIN;
3865 }
3866
3867 rc = ata_wait_ready(ap, deadline);
3868 /* link occupied, -ENODEV too is an error */
3869 if (rc) {
3870 ata_link_printk(link, KERN_ERR,
3871 "COMRESET failed (errno=%d)\n", rc);
3872 return rc;
3873 }
3874
3875 ap->ops->dev_select(ap, 0); /* probably unnecessary */
3876
3877 *class = ata_dev_try_classify(link->device, 1, NULL);
3878
3879 DPRINTK("EXIT, class=%u\n", *class);
3880 return 0;
3881 }
3882
3883 /**
3884 * ata_std_postreset - standard postreset callback
3885 * @link: the target ata_link
3886 * @classes: classes of attached devices
3887 *
3888 * This function is invoked after a successful reset. Note that
3889 * the device might have been reset more than once using
3890 * different reset methods before postreset is invoked.
3891 *
3892 * LOCKING:
3893 * Kernel thread context (may sleep)
3894 */
3895 void ata_std_postreset(struct ata_link *link, unsigned int *classes)
3896 {
3897 struct ata_port *ap = link->ap;
3898 u32 serror;
3899
3900 DPRINTK("ENTER\n");
3901
3902 /* print link status */
3903 sata_print_link_status(link);
3904
3905 /* clear SError */
3906 if (sata_scr_read(link, SCR_ERROR, &serror) == 0)
3907 sata_scr_write(link, SCR_ERROR, serror);
3908 link->eh_info.serror = 0;
3909
3910 /* is double-select really necessary? */
3911 if (classes[0] != ATA_DEV_NONE)
3912 ap->ops->dev_select(ap, 1);
3913 if (classes[1] != ATA_DEV_NONE)
3914 ap->ops->dev_select(ap, 0);
3915
3916 /* bail out if no device is present */
3917 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
3918 DPRINTK("EXIT, no device\n");
3919 return;
3920 }
3921
3922 /* set up device control */
3923 if (ap->ioaddr.ctl_addr)
3924 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
3925
3926 DPRINTK("EXIT\n");
3927 }
3928
3929 /**
3930 * ata_dev_same_device - Determine whether new ID matches configured device
3931 * @dev: device to compare against
3932 * @new_class: class of the new device
3933 * @new_id: IDENTIFY page of the new device
3934 *
3935 * Compare @new_class and @new_id against @dev and determine
3936 * whether @dev is the device indicated by @new_class and
3937 * @new_id.
3938 *
3939 * LOCKING:
3940 * None.
3941 *
3942 * RETURNS:
3943 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3944 */
3945 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3946 const u16 *new_id)
3947 {
3948 const u16 *old_id = dev->id;
3949 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3950 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
3951
3952 if (dev->class != new_class) {
3953 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3954 dev->class, new_class);
3955 return 0;
3956 }
3957
3958 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3959 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3960 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3961 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
3962
3963 if (strcmp(model[0], model[1])) {
3964 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3965 "'%s' != '%s'\n", model[0], model[1]);
3966 return 0;
3967 }
3968
3969 if (strcmp(serial[0], serial[1])) {
3970 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3971 "'%s' != '%s'\n", serial[0], serial[1]);
3972 return 0;
3973 }
3974
3975 return 1;
3976 }
3977
3978 /**
3979 * ata_dev_reread_id - Re-read IDENTIFY data
3980 * @dev: target ATA device
3981 * @readid_flags: read ID flags
3982 *
3983 * Re-read IDENTIFY page and make sure @dev is still attached to
3984 * the port.
3985 *
3986 * LOCKING:
3987 * Kernel thread context (may sleep)
3988 *
3989 * RETURNS:
3990 * 0 on success, negative errno otherwise
3991 */
3992 int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
3993 {
3994 unsigned int class = dev->class;
3995 u16 *id = (void *)dev->link->ap->sector_buf;
3996 int rc;
3997
3998 /* read ID data */
3999 rc = ata_dev_read_id(dev, &class, readid_flags, id);
4000 if (rc)
4001 return rc;
4002
4003 /* is the device still there? */
4004 if (!ata_dev_same_device(dev, class, id))
4005 return -ENODEV;
4006
4007 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
4008 return 0;
4009 }
4010
4011 /**
4012 * ata_dev_revalidate - Revalidate ATA device
4013 * @dev: device to revalidate
4014 * @new_class: new class code
4015 * @readid_flags: read ID flags
4016 *
4017 * Re-read IDENTIFY page, make sure @dev is still attached to the
4018 * port and reconfigure it according to the new IDENTIFY page.
4019 *
4020 * LOCKING:
4021 * Kernel thread context (may sleep)
4022 *
4023 * RETURNS:
4024 * 0 on success, negative errno otherwise
4025 */
4026 int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
4027 unsigned int readid_flags)
4028 {
4029 u64 n_sectors = dev->n_sectors;
4030 int rc;
4031
4032 if (!ata_dev_enabled(dev))
4033 return -ENODEV;
4034
4035 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
4036 if (ata_class_enabled(new_class) &&
4037 new_class != ATA_DEV_ATA && new_class != ATA_DEV_ATAPI) {
4038 ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n",
4039 dev->class, new_class);
4040 rc = -ENODEV;
4041 goto fail;
4042 }
4043
4044 /* re-read ID */
4045 rc = ata_dev_reread_id(dev, readid_flags);
4046 if (rc)
4047 goto fail;
4048
4049 /* configure device according to the new ID */
4050 rc = ata_dev_configure(dev);
4051 if (rc)
4052 goto fail;
4053
4054 /* verify n_sectors hasn't changed */
4055 if (dev->class == ATA_DEV_ATA && n_sectors &&
4056 dev->n_sectors != n_sectors) {
4057 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
4058 "%llu != %llu\n",
4059 (unsigned long long)n_sectors,
4060 (unsigned long long)dev->n_sectors);
4061
4062 /* restore original n_sectors */
4063 dev->n_sectors = n_sectors;
4064
4065 rc = -ENODEV;
4066 goto fail;
4067 }
4068
4069 return 0;
4070
4071 fail:
4072 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
4073 return rc;
4074 }
4075
4076 struct ata_blacklist_entry {
4077 const char *model_num;
4078 const char *model_rev;
4079 unsigned long horkage;
4080 };
4081
4082 static const struct ata_blacklist_entry ata_device_blacklist [] = {
4083 /* Devices with DMA related problems under Linux */
4084 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
4085 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
4086 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
4087 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
4088 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
4089 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
4090 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
4091 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
4092 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
4093 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
4094 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
4095 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
4096 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
4097 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
4098 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
4099 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
4100 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
4101 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
4102 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
4103 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
4104 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
4105 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
4106 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
4107 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
4108 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
4109 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
4110 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
4111 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
4112 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
4113 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
4114 /* Odd clown on sil3726/4726 PMPs */
4115 { "Config Disk", NULL, ATA_HORKAGE_NODMA |
4116 ATA_HORKAGE_SKIP_PM },
4117
4118 /* Weird ATAPI devices */
4119 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
4120
4121 /* Devices we expect to fail diagnostics */
4122
4123 /* Devices where NCQ should be avoided */
4124 /* NCQ is slow */
4125 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
4126 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
4127 /* http://thread.gmane.org/gmane.linux.ide/14907 */
4128 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
4129 /* NCQ is broken */
4130 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
4131 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
4132 { "HITACHI HDS7250SASUN500G*", NULL, ATA_HORKAGE_NONCQ },
4133 { "HITACHI HDS7225SBSUN250G*", NULL, ATA_HORKAGE_NONCQ },
4134 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ },
4135 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ },
4136
4137 /* Blacklist entries taken from Silicon Image 3124/3132
4138 Windows driver .inf file - also several Linux problem reports */
4139 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
4140 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
4141 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
4142
4143 /* devices which puke on READ_NATIVE_MAX */
4144 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
4145 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
4146 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
4147 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
4148
4149 /* Devices which report 1 sector over size HPA */
4150 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, },
4151 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, },
4152
4153 /* Devices which get the IVB wrong */
4154 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
4155 { "TSSTcorp CDDVDW SH-S202J", "SB00", ATA_HORKAGE_IVB, },
4156 { "TSSTcorp CDDVDW SH-S202J", "SB01", ATA_HORKAGE_IVB, },
4157 { "TSSTcorp CDDVDW SH-S202N", "SB00", ATA_HORKAGE_IVB, },
4158 { "TSSTcorp CDDVDW SH-S202N", "SB01", ATA_HORKAGE_IVB, },
4159
4160 /* End Marker */
4161 { }
4162 };
4163
4164 static int strn_pattern_cmp(const char *patt, const char *name, int wildchar)
4165 {
4166 const char *p;
4167 int len;
4168
4169 /*
4170 * check for trailing wildcard: *\0
4171 */
4172 p = strchr(patt, wildchar);
4173 if (p && ((*(p + 1)) == 0))
4174 len = p - patt;
4175 else {
4176 len = strlen(name);
4177 if (!len) {
4178 if (!*patt)
4179 return 0;
4180 return -1;
4181 }
4182 }
4183
4184 return strncmp(patt, name, len);
4185 }
4186
4187 static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
4188 {
4189 unsigned char model_num[ATA_ID_PROD_LEN + 1];
4190 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
4191 const struct ata_blacklist_entry *ad = ata_device_blacklist;
4192
4193 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4194 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
4195
4196 while (ad->model_num) {
4197 if (!strn_pattern_cmp(ad->model_num, model_num, '*')) {
4198 if (ad->model_rev == NULL)
4199 return ad->horkage;
4200 if (!strn_pattern_cmp(ad->model_rev, model_rev, '*'))
4201 return ad->horkage;
4202 }
4203 ad++;
4204 }
4205 return 0;
4206 }
4207
4208 static int ata_dma_blacklisted(const struct ata_device *dev)
4209 {
4210 /* We don't support polling DMA.
4211 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
4212 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4213 */
4214 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
4215 (dev->flags & ATA_DFLAG_CDB_INTR))
4216 return 1;
4217 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
4218 }
4219
4220 /**
4221 * ata_is_40wire - check drive side detection
4222 * @dev: device
4223 *
4224 * Perform drive side detection decoding, allowing for device vendors
4225 * who can't follow the documentation.
4226 */
4227
4228 static int ata_is_40wire(struct ata_device *dev)
4229 {
4230 if (dev->horkage & ATA_HORKAGE_IVB)
4231 return ata_drive_40wire_relaxed(dev->id);
4232 return ata_drive_40wire(dev->id);
4233 }
4234
4235 /**
4236 * ata_dev_xfermask - Compute supported xfermask of the given device
4237 * @dev: Device to compute xfermask for
4238 *
4239 * Compute supported xfermask of @dev and store it in
4240 * dev->*_mask. This function is responsible for applying all
4241 * known limits including host controller limits, device
4242 * blacklist, etc...
4243 *
4244 * LOCKING:
4245 * None.
4246 */
4247 static void ata_dev_xfermask(struct ata_device *dev)
4248 {
4249 struct ata_link *link = dev->link;
4250 struct ata_port *ap = link->ap;
4251 struct ata_host *host = ap->host;
4252 unsigned long xfer_mask;
4253
4254 /* controller modes available */
4255 xfer_mask = ata_pack_xfermask(ap->pio_mask,
4256 ap->mwdma_mask, ap->udma_mask);
4257
4258 /* drive modes available */
4259 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4260 dev->mwdma_mask, dev->udma_mask);
4261 xfer_mask &= ata_id_xfermask(dev->id);
4262
4263 /*
4264 * CFA Advanced TrueIDE timings are not allowed on a shared
4265 * cable
4266 */
4267 if (ata_dev_pair(dev)) {
4268 /* No PIO5 or PIO6 */
4269 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4270 /* No MWDMA3 or MWDMA 4 */
4271 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4272 }
4273
4274 if (ata_dma_blacklisted(dev)) {
4275 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4276 ata_dev_printk(dev, KERN_WARNING,
4277 "device is on DMA blacklist, disabling DMA\n");
4278 }
4279
4280 if ((host->flags & ATA_HOST_SIMPLEX) &&
4281 host->simplex_claimed && host->simplex_claimed != ap) {
4282 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4283 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
4284 "other device, disabling DMA\n");
4285 }
4286
4287 if (ap->flags & ATA_FLAG_NO_IORDY)
4288 xfer_mask &= ata_pio_mask_no_iordy(dev);
4289
4290 if (ap->ops->mode_filter)
4291 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
4292
4293 /* Apply cable rule here. Don't apply it early because when
4294 * we handle hot plug the cable type can itself change.
4295 * Check this last so that we know if the transfer rate was
4296 * solely limited by the cable.
4297 * Unknown or 80 wire cables reported host side are checked
4298 * drive side as well. Cases where we know a 40wire cable
4299 * is used safely for 80 are not checked here.
4300 */
4301 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4302 /* UDMA/44 or higher would be available */
4303 if ((ap->cbl == ATA_CBL_PATA40) ||
4304 (ata_is_40wire(dev) &&
4305 (ap->cbl == ATA_CBL_PATA_UNK ||
4306 ap->cbl == ATA_CBL_PATA80))) {
4307 ata_dev_printk(dev, KERN_WARNING,
4308 "limited to UDMA/33 due to 40-wire cable\n");
4309 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4310 }
4311
4312 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4313 &dev->mwdma_mask, &dev->udma_mask);
4314 }
4315
4316 /**
4317 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
4318 * @dev: Device to which command will be sent
4319 *
4320 * Issue SET FEATURES - XFER MODE command to device @dev
4321 * on port @ap.
4322 *
4323 * LOCKING:
4324 * PCI/etc. bus probe sem.
4325 *
4326 * RETURNS:
4327 * 0 on success, AC_ERR_* mask otherwise.
4328 */
4329
4330 static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4331 {
4332 struct ata_taskfile tf;
4333 unsigned int err_mask;
4334
4335 /* set up set-features taskfile */
4336 DPRINTK("set features - xfer mode\n");
4337
4338 /* Some controllers and ATAPI devices show flaky interrupt
4339 * behavior after setting xfer mode. Use polling instead.
4340 */
4341 ata_tf_init(dev, &tf);
4342 tf.command = ATA_CMD_SET_FEATURES;
4343 tf.feature = SETFEATURES_XFER;
4344 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
4345 tf.protocol = ATA_PROT_NODATA;
4346 /* If we are using IORDY we must send the mode setting command */
4347 if (ata_pio_need_iordy(dev))
4348 tf.nsect = dev->xfer_mode;
4349 /* If the device has IORDY and the controller does not - turn it off */
4350 else if (ata_id_has_iordy(dev->id))
4351 tf.nsect = 0x01;
4352 else /* In the ancient relic department - skip all of this */
4353 return 0;
4354
4355 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4356
4357 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4358 return err_mask;
4359 }
4360 /**
4361 * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
4362 * @dev: Device to which command will be sent
4363 * @enable: Whether to enable or disable the feature
4364 * @feature: The sector count represents the feature to set
4365 *
4366 * Issue SET FEATURES - SATA FEATURES command to device @dev
4367 * on port @ap with sector count
4368 *
4369 * LOCKING:
4370 * PCI/etc. bus probe sem.
4371 *
4372 * RETURNS:
4373 * 0 on success, AC_ERR_* mask otherwise.
4374 */
4375 static unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable,
4376 u8 feature)
4377 {
4378 struct ata_taskfile tf;
4379 unsigned int err_mask;
4380
4381 /* set up set-features taskfile */
4382 DPRINTK("set features - SATA features\n");
4383
4384 ata_tf_init(dev, &tf);
4385 tf.command = ATA_CMD_SET_FEATURES;
4386 tf.feature = enable;
4387 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4388 tf.protocol = ATA_PROT_NODATA;
4389 tf.nsect = feature;
4390
4391 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4392
4393 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4394 return err_mask;
4395 }
4396
4397 /**
4398 * ata_dev_init_params - Issue INIT DEV PARAMS command
4399 * @dev: Device to which command will be sent
4400 * @heads: Number of heads (taskfile parameter)
4401 * @sectors: Number of sectors (taskfile parameter)
4402 *
4403 * LOCKING:
4404 * Kernel thread context (may sleep)
4405 *
4406 * RETURNS:
4407 * 0 on success, AC_ERR_* mask otherwise.
4408 */
4409 static unsigned int ata_dev_init_params(struct ata_device *dev,
4410 u16 heads, u16 sectors)
4411 {
4412 struct ata_taskfile tf;
4413 unsigned int err_mask;
4414
4415 /* Number of sectors per track 1-255. Number of heads 1-16 */
4416 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
4417 return AC_ERR_INVALID;
4418
4419 /* set up init dev params taskfile */
4420 DPRINTK("init dev params \n");
4421
4422 ata_tf_init(dev, &tf);
4423 tf.command = ATA_CMD_INIT_DEV_PARAMS;
4424 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4425 tf.protocol = ATA_PROT_NODATA;
4426 tf.nsect = sectors;
4427 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
4428
4429 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4430 /* A clean abort indicates an original or just out of spec drive
4431 and we should continue as we issue the setup based on the
4432 drive reported working geometry */
4433 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4434 err_mask = 0;
4435
4436 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4437 return err_mask;
4438 }
4439
4440 /**
4441 * ata_sg_clean - Unmap DMA memory associated with command
4442 * @qc: Command containing DMA memory to be released
4443 *
4444 * Unmap all mapped DMA memory associated with this command.
4445 *
4446 * LOCKING:
4447 * spin_lock_irqsave(host lock)
4448 */
4449 void ata_sg_clean(struct ata_queued_cmd *qc)
4450 {
4451 struct ata_port *ap = qc->ap;
4452 struct scatterlist *sg = qc->__sg;
4453 int dir = qc->dma_dir;
4454 void *pad_buf = NULL;
4455
4456 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
4457 WARN_ON(sg == NULL);
4458
4459 if (qc->flags & ATA_QCFLAG_SINGLE)
4460 WARN_ON(qc->n_elem > 1);
4461
4462 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
4463
4464 /* if we padded the buffer out to 32-bit bound, and data
4465 * xfer direction is from-device, we must copy from the
4466 * pad buffer back into the supplied buffer
4467 */
4468 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
4469 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4470
4471 if (qc->flags & ATA_QCFLAG_SG) {
4472 if (qc->n_elem)
4473 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
4474 /* restore last sg */
4475 sg_last(sg, qc->orig_n_elem)->length += qc->pad_len;
4476 if (pad_buf) {
4477 struct scatterlist *psg = &qc->pad_sgent;
4478 void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
4479 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
4480 kunmap_atomic(addr, KM_IRQ0);
4481 }
4482 } else {
4483 if (qc->n_elem)
4484 dma_unmap_single(ap->dev,
4485 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
4486 dir);
4487 /* restore sg */
4488 sg->length += qc->pad_len;
4489 if (pad_buf)
4490 memcpy(qc->buf_virt + sg->length - qc->pad_len,
4491 pad_buf, qc->pad_len);
4492 }
4493
4494 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4495 qc->__sg = NULL;
4496 }
4497
4498 /**
4499 * ata_fill_sg - Fill PCI IDE PRD table
4500 * @qc: Metadata associated with taskfile to be transferred
4501 *
4502 * Fill PCI IDE PRD (scatter-gather) table with segments
4503 * associated with the current disk command.
4504 *
4505 * LOCKING:
4506 * spin_lock_irqsave(host lock)
4507 *
4508 */
4509 static void ata_fill_sg(struct ata_queued_cmd *qc)
4510 {
4511 struct ata_port *ap = qc->ap;
4512 struct scatterlist *sg;
4513 unsigned int idx;
4514
4515 WARN_ON(qc->__sg == NULL);
4516 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
4517
4518 idx = 0;
4519 ata_for_each_sg(sg, qc) {
4520 u32 addr, offset;
4521 u32 sg_len, len;
4522
4523 /* determine if physical DMA addr spans 64K boundary.
4524 * Note h/w doesn't support 64-bit, so we unconditionally
4525 * truncate dma_addr_t to u32.
4526 */
4527 addr = (u32) sg_dma_address(sg);
4528 sg_len = sg_dma_len(sg);
4529
4530 while (sg_len) {
4531 offset = addr & 0xffff;
4532 len = sg_len;
4533 if ((offset + sg_len) > 0x10000)
4534 len = 0x10000 - offset;
4535
4536 ap->prd[idx].addr = cpu_to_le32(addr);
4537 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
4538 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4539
4540 idx++;
4541 sg_len -= len;
4542 addr += len;
4543 }
4544 }
4545
4546 if (idx)
4547 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4548 }
4549
4550 /**
4551 * ata_fill_sg_dumb - Fill PCI IDE PRD table
4552 * @qc: Metadata associated with taskfile to be transferred
4553 *
4554 * Fill PCI IDE PRD (scatter-gather) table with segments
4555 * associated with the current disk command. Perform the fill
4556 * so that we avoid writing any length 64K records for
4557 * controllers that don't follow the spec.
4558 *
4559 * LOCKING:
4560 * spin_lock_irqsave(host lock)
4561 *
4562 */
4563 static void ata_fill_sg_dumb(struct ata_queued_cmd *qc)
4564 {
4565 struct ata_port *ap = qc->ap;
4566 struct scatterlist *sg;
4567 unsigned int idx;
4568
4569 WARN_ON(qc->__sg == NULL);
4570 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
4571
4572 idx = 0;
4573 ata_for_each_sg(sg, qc) {
4574 u32 addr, offset;
4575 u32 sg_len, len, blen;
4576
4577 /* determine if physical DMA addr spans 64K boundary.
4578 * Note h/w doesn't support 64-bit, so we unconditionally
4579 * truncate dma_addr_t to u32.
4580 */
4581 addr = (u32) sg_dma_address(sg);
4582 sg_len = sg_dma_len(sg);
4583
4584 while (sg_len) {
4585 offset = addr & 0xffff;
4586 len = sg_len;
4587 if ((offset + sg_len) > 0x10000)
4588 len = 0x10000 - offset;
4589
4590 blen = len & 0xffff;
4591 ap->prd[idx].addr = cpu_to_le32(addr);
4592 if (blen == 0) {
4593 /* Some PATA chipsets like the CS5530 can't
4594 cope with 0x0000 meaning 64K as the spec says */
4595 ap->prd[idx].flags_len = cpu_to_le32(0x8000);
4596 blen = 0x8000;
4597 ap->prd[++idx].addr = cpu_to_le32(addr + 0x8000);
4598 }
4599 ap->prd[idx].flags_len = cpu_to_le32(blen);
4600 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4601
4602 idx++;
4603 sg_len -= len;
4604 addr += len;
4605 }
4606 }
4607
4608 if (idx)
4609 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4610 }
4611
4612 /**
4613 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
4614 * @qc: Metadata associated with taskfile to check
4615 *
4616 * Allow low-level driver to filter ATA PACKET commands, returning
4617 * a status indicating whether or not it is OK to use DMA for the
4618 * supplied PACKET command.
4619 *
4620 * LOCKING:
4621 * spin_lock_irqsave(host lock)
4622 *
4623 * RETURNS: 0 when ATAPI DMA can be used
4624 * nonzero otherwise
4625 */
4626 int ata_check_atapi_dma(struct ata_queued_cmd *qc)
4627 {
4628 struct ata_port *ap = qc->ap;
4629
4630 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
4631 * few ATAPI devices choke on such DMA requests.
4632 */
4633 if (unlikely(qc->nbytes & 15))
4634 return 1;
4635
4636 if (ap->ops->check_atapi_dma)
4637 return ap->ops->check_atapi_dma(qc);
4638
4639 return 0;
4640 }
4641
4642 /**
4643 * atapi_qc_may_overflow - Check whether data transfer may overflow
4644 * @qc: ATA command in question
4645 *
4646 * ATAPI commands which transfer variable length data to host
4647 * might overflow due to application error or hardare bug. This
4648 * function checks whether overflow should be drained and ignored
4649 * for @qc.
4650 *
4651 * LOCKING:
4652 * None.
4653 *
4654 * RETURNS:
4655 * 1 if @qc may overflow; otherwise, 0.
4656 */
4657 static int atapi_qc_may_overflow(struct ata_queued_cmd *qc)
4658 {
4659 if (qc->tf.protocol != ATA_PROT_ATAPI &&
4660 qc->tf.protocol != ATA_PROT_ATAPI_DMA)
4661 return 0;
4662
4663 if (qc->tf.flags & ATA_TFLAG_WRITE)
4664 return 0;
4665
4666 switch (qc->cdb[0]) {
4667 case READ_10:
4668 case READ_12:
4669 case WRITE_10:
4670 case WRITE_12:
4671 case GPCMD_READ_CD:
4672 case GPCMD_READ_CD_MSF:
4673 return 0;
4674 }
4675
4676 return 1;
4677 }
4678
4679 /**
4680 * ata_std_qc_defer - Check whether a qc needs to be deferred
4681 * @qc: ATA command in question
4682 *
4683 * Non-NCQ commands cannot run with any other command, NCQ or
4684 * not. As upper layer only knows the queue depth, we are
4685 * responsible for maintaining exclusion. This function checks
4686 * whether a new command @qc can be issued.
4687 *
4688 * LOCKING:
4689 * spin_lock_irqsave(host lock)
4690 *
4691 * RETURNS:
4692 * ATA_DEFER_* if deferring is needed, 0 otherwise.
4693 */
4694 int ata_std_qc_defer(struct ata_queued_cmd *qc)
4695 {
4696 struct ata_link *link = qc->dev->link;
4697
4698 if (qc->tf.protocol == ATA_PROT_NCQ) {
4699 if (!ata_tag_valid(link->active_tag))
4700 return 0;
4701 } else {
4702 if (!ata_tag_valid(link->active_tag) && !link->sactive)
4703 return 0;
4704 }
4705
4706 return ATA_DEFER_LINK;
4707 }
4708
4709 /**
4710 * ata_qc_prep - Prepare taskfile for submission
4711 * @qc: Metadata associated with taskfile to be prepared
4712 *
4713 * Prepare ATA taskfile for submission.
4714 *
4715 * LOCKING:
4716 * spin_lock_irqsave(host lock)
4717 */
4718 void ata_qc_prep(struct ata_queued_cmd *qc)
4719 {
4720 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4721 return;
4722
4723 ata_fill_sg(qc);
4724 }
4725
4726 /**
4727 * ata_dumb_qc_prep - Prepare taskfile for submission
4728 * @qc: Metadata associated with taskfile to be prepared
4729 *
4730 * Prepare ATA taskfile for submission.
4731 *
4732 * LOCKING:
4733 * spin_lock_irqsave(host lock)
4734 */
4735 void ata_dumb_qc_prep(struct ata_queued_cmd *qc)
4736 {
4737 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4738 return;
4739
4740 ata_fill_sg_dumb(qc);
4741 }
4742
4743 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4744
4745 /**
4746 * ata_sg_init_one - Associate command with memory buffer
4747 * @qc: Command to be associated
4748 * @buf: Memory buffer
4749 * @buflen: Length of memory buffer, in bytes.
4750 *
4751 * Initialize the data-related elements of queued_cmd @qc
4752 * to point to a single memory buffer, @buf of byte length @buflen.
4753 *
4754 * LOCKING:
4755 * spin_lock_irqsave(host lock)
4756 */
4757
4758 void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
4759 {
4760 qc->flags |= ATA_QCFLAG_SINGLE;
4761
4762 qc->__sg = &qc->sgent;
4763 qc->n_elem = 1;
4764 qc->orig_n_elem = 1;
4765 qc->buf_virt = buf;
4766 qc->nbytes = buflen;
4767 qc->cursg = qc->__sg;
4768
4769 sg_init_one(&qc->sgent, buf, buflen);
4770 }
4771
4772 /**
4773 * ata_sg_init - Associate command with scatter-gather table.
4774 * @qc: Command to be associated
4775 * @sg: Scatter-gather table.
4776 * @n_elem: Number of elements in s/g table.
4777 *
4778 * Initialize the data-related elements of queued_cmd @qc
4779 * to point to a scatter-gather table @sg, containing @n_elem
4780 * elements.
4781 *
4782 * LOCKING:
4783 * spin_lock_irqsave(host lock)
4784 */
4785
4786 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4787 unsigned int n_elem)
4788 {
4789 qc->flags |= ATA_QCFLAG_SG;
4790 qc->__sg = sg;
4791 qc->n_elem = n_elem;
4792 qc->orig_n_elem = n_elem;
4793 qc->cursg = qc->__sg;
4794 }
4795
4796 /**
4797 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
4798 * @qc: Command with memory buffer to be mapped.
4799 *
4800 * DMA-map the memory buffer associated with queued_cmd @qc.
4801 *
4802 * LOCKING:
4803 * spin_lock_irqsave(host lock)
4804 *
4805 * RETURNS:
4806 * Zero on success, negative on error.
4807 */
4808
4809 static int ata_sg_setup_one(struct ata_queued_cmd *qc)
4810 {
4811 struct ata_port *ap = qc->ap;
4812 int dir = qc->dma_dir;
4813 struct scatterlist *sg = qc->__sg;
4814 dma_addr_t dma_address;
4815 int trim_sg = 0;
4816
4817 /* we must lengthen transfers to end on a 32-bit boundary */
4818 qc->pad_len = sg->length & 3;
4819 if (qc->pad_len) {
4820 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4821 struct scatterlist *psg = &qc->pad_sgent;
4822
4823 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
4824
4825 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4826
4827 if (qc->tf.flags & ATA_TFLAG_WRITE)
4828 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
4829 qc->pad_len);
4830
4831 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4832 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4833 /* trim sg */
4834 sg->length -= qc->pad_len;
4835 if (sg->length == 0)
4836 trim_sg = 1;
4837
4838 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
4839 sg->length, qc->pad_len);
4840 }
4841
4842 if (trim_sg) {
4843 qc->n_elem--;
4844 goto skip_map;
4845 }
4846
4847 dma_address = dma_map_single(ap->dev, qc->buf_virt,
4848 sg->length, dir);
4849 if (dma_mapping_error(dma_address)) {
4850 /* restore sg */
4851 sg->length += qc->pad_len;
4852 return -1;
4853 }
4854
4855 sg_dma_address(sg) = dma_address;
4856 sg_dma_len(sg) = sg->length;
4857
4858 skip_map:
4859 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
4860 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4861
4862 return 0;
4863 }
4864
4865 /**
4866 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4867 * @qc: Command with scatter-gather table to be mapped.
4868 *
4869 * DMA-map the scatter-gather table associated with queued_cmd @qc.
4870 *
4871 * LOCKING:
4872 * spin_lock_irqsave(host lock)
4873 *
4874 * RETURNS:
4875 * Zero on success, negative on error.
4876 *
4877 */
4878
4879 static int ata_sg_setup(struct ata_queued_cmd *qc)
4880 {
4881 struct ata_port *ap = qc->ap;
4882 struct scatterlist *sg = qc->__sg;
4883 struct scatterlist *lsg = sg_last(qc->__sg, qc->n_elem);
4884 int n_elem, pre_n_elem, dir, trim_sg = 0;
4885
4886 VPRINTK("ENTER, ata%u\n", ap->print_id);
4887 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
4888
4889 /* we must lengthen transfers to end on a 32-bit boundary */
4890 qc->pad_len = lsg->length & 3;
4891 if (qc->pad_len) {
4892 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4893 struct scatterlist *psg = &qc->pad_sgent;
4894 unsigned int offset;
4895
4896 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
4897
4898 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4899
4900 /*
4901 * psg->page/offset are used to copy to-be-written
4902 * data in this function or read data in ata_sg_clean.
4903 */
4904 offset = lsg->offset + lsg->length - qc->pad_len;
4905 sg_init_table(psg, 1);
4906 sg_set_page(psg, nth_page(sg_page(lsg), offset >> PAGE_SHIFT),
4907 qc->pad_len, offset_in_page(offset));
4908
4909 if (qc->tf.flags & ATA_TFLAG_WRITE) {
4910 void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
4911 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
4912 kunmap_atomic(addr, KM_IRQ0);
4913 }
4914
4915 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4916 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4917 /* trim last sg */
4918 lsg->length -= qc->pad_len;
4919 if (lsg->length == 0)
4920 trim_sg = 1;
4921
4922 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
4923 qc->n_elem - 1, lsg->length, qc->pad_len);
4924 }
4925
4926 pre_n_elem = qc->n_elem;
4927 if (trim_sg && pre_n_elem)
4928 pre_n_elem--;
4929
4930 if (!pre_n_elem) {
4931 n_elem = 0;
4932 goto skip_map;
4933 }
4934
4935 dir = qc->dma_dir;
4936 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
4937 if (n_elem < 1) {
4938 /* restore last sg */
4939 lsg->length += qc->pad_len;
4940 return -1;
4941 }
4942
4943 DPRINTK("%d sg elements mapped\n", n_elem);
4944
4945 skip_map:
4946 qc->n_elem = n_elem;
4947
4948 return 0;
4949 }
4950
4951 /**
4952 * swap_buf_le16 - swap halves of 16-bit words in place
4953 * @buf: Buffer to swap
4954 * @buf_words: Number of 16-bit words in buffer.
4955 *
4956 * Swap halves of 16-bit words if needed to convert from
4957 * little-endian byte order to native cpu byte order, or
4958 * vice-versa.
4959 *
4960 * LOCKING:
4961 * Inherited from caller.
4962 */
4963 void swap_buf_le16(u16 *buf, unsigned int buf_words)
4964 {
4965 #ifdef __BIG_ENDIAN
4966 unsigned int i;
4967
4968 for (i = 0; i < buf_words; i++)
4969 buf[i] = le16_to_cpu(buf[i]);
4970 #endif /* __BIG_ENDIAN */
4971 }
4972
4973 /**
4974 * ata_data_xfer - Transfer data by PIO
4975 * @adev: device to target
4976 * @buf: data buffer
4977 * @buflen: buffer length
4978 * @write_data: read/write
4979 *
4980 * Transfer data from/to the device data register by PIO.
4981 *
4982 * LOCKING:
4983 * Inherited from caller.
4984 */
4985 void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
4986 unsigned int buflen, int write_data)
4987 {
4988 struct ata_port *ap = adev->link->ap;
4989 unsigned int words = buflen >> 1;
4990
4991 /* Transfer multiple of 2 bytes */
4992 if (write_data)
4993 iowrite16_rep(ap->ioaddr.data_addr, buf, words);
4994 else
4995 ioread16_rep(ap->ioaddr.data_addr, buf, words);
4996
4997 /* Transfer trailing 1 byte, if any. */
4998 if (unlikely(buflen & 0x01)) {
4999 u16 align_buf[1] = { 0 };
5000 unsigned char *trailing_buf = buf + buflen - 1;
5001
5002 if (write_data) {
5003 memcpy(align_buf, trailing_buf, 1);
5004 iowrite16(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
5005 } else {
5006 align_buf[0] = cpu_to_le16(ioread16(ap->ioaddr.data_addr));
5007 memcpy(trailing_buf, align_buf, 1);
5008 }
5009 }
5010 }
5011
5012 /**
5013 * ata_data_xfer_noirq - Transfer data by PIO
5014 * @adev: device to target
5015 * @buf: data buffer
5016 * @buflen: buffer length
5017 * @write_data: read/write
5018 *
5019 * Transfer data from/to the device data register by PIO. Do the
5020 * transfer with interrupts disabled.
5021 *
5022 * LOCKING:
5023 * Inherited from caller.
5024 */
5025 void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
5026 unsigned int buflen, int write_data)
5027 {
5028 unsigned long flags;
5029 local_irq_save(flags);
5030 ata_data_xfer(adev, buf, buflen, write_data);
5031 local_irq_restore(flags);
5032 }
5033
5034
5035 /**
5036 * ata_pio_sector - Transfer a sector of data.
5037 * @qc: Command on going
5038 *
5039 * Transfer qc->sect_size bytes of data from/to the ATA device.
5040 *
5041 * LOCKING:
5042 * Inherited from caller.
5043 */
5044
5045 static void ata_pio_sector(struct ata_queued_cmd *qc)
5046 {
5047 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
5048 struct ata_port *ap = qc->ap;
5049 struct page *page;
5050 unsigned int offset;
5051 unsigned char *buf;
5052
5053 if (qc->curbytes == qc->nbytes - qc->sect_size)
5054 ap->hsm_task_state = HSM_ST_LAST;
5055
5056 page = sg_page(qc->cursg);
5057 offset = qc->cursg->offset + qc->cursg_ofs;
5058
5059 /* get the current page and offset */
5060 page = nth_page(page, (offset >> PAGE_SHIFT));
5061 offset %= PAGE_SIZE;
5062
5063 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
5064
5065 if (PageHighMem(page)) {
5066 unsigned long flags;
5067
5068 /* FIXME: use a bounce buffer */
5069 local_irq_save(flags);
5070 buf = kmap_atomic(page, KM_IRQ0);
5071
5072 /* do the actual data transfer */
5073 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
5074
5075 kunmap_atomic(buf, KM_IRQ0);
5076 local_irq_restore(flags);
5077 } else {
5078 buf = page_address(page);
5079 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
5080 }
5081
5082 qc->curbytes += qc->sect_size;
5083 qc->cursg_ofs += qc->sect_size;
5084
5085 if (qc->cursg_ofs == qc->cursg->length) {
5086 qc->cursg = sg_next(qc->cursg);
5087 qc->cursg_ofs = 0;
5088 }
5089 }
5090
5091 /**
5092 * ata_pio_sectors - Transfer one or many sectors.
5093 * @qc: Command on going
5094 *
5095 * Transfer one or many sectors of data from/to the
5096 * ATA device for the DRQ request.
5097 *
5098 * LOCKING:
5099 * Inherited from caller.
5100 */
5101
5102 static void ata_pio_sectors(struct ata_queued_cmd *qc)
5103 {
5104 if (is_multi_taskfile(&qc->tf)) {
5105 /* READ/WRITE MULTIPLE */
5106 unsigned int nsect;
5107
5108 WARN_ON(qc->dev->multi_count == 0);
5109
5110 nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
5111 qc->dev->multi_count);
5112 while (nsect--)
5113 ata_pio_sector(qc);
5114 } else
5115 ata_pio_sector(qc);
5116
5117 ata_altstatus(qc->ap); /* flush */
5118 }
5119
5120 /**
5121 * atapi_send_cdb - Write CDB bytes to hardware
5122 * @ap: Port to which ATAPI device is attached.
5123 * @qc: Taskfile currently active
5124 *
5125 * When device has indicated its readiness to accept
5126 * a CDB, this function is called. Send the CDB.
5127 *
5128 * LOCKING:
5129 * caller.
5130 */
5131
5132 static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
5133 {
5134 /* send SCSI cdb */
5135 DPRINTK("send cdb\n");
5136 WARN_ON(qc->dev->cdb_len < 12);
5137
5138 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
5139 ata_altstatus(ap); /* flush */
5140
5141 switch (qc->tf.protocol) {
5142 case ATA_PROT_ATAPI:
5143 ap->hsm_task_state = HSM_ST;
5144 break;
5145 case ATA_PROT_ATAPI_NODATA:
5146 ap->hsm_task_state = HSM_ST_LAST;
5147 break;
5148 case ATA_PROT_ATAPI_DMA:
5149 ap->hsm_task_state = HSM_ST_LAST;
5150 /* initiate bmdma */
5151 ap->ops->bmdma_start(qc);
5152 break;
5153 }
5154 }
5155
5156 /**
5157 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
5158 * @qc: Command on going
5159 * @bytes: number of bytes
5160 *
5161 * Transfer Transfer data from/to the ATAPI device.
5162 *
5163 * LOCKING:
5164 * Inherited from caller.
5165 *
5166 */
5167 static int __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
5168 {
5169 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
5170 struct ata_port *ap = qc->ap;
5171 struct ata_eh_info *ehi = &qc->dev->link->eh_info;
5172 struct scatterlist *sg;
5173 struct page *page;
5174 unsigned char *buf;
5175 unsigned int offset, count;
5176
5177 next_sg:
5178 sg = qc->cursg;
5179 if (unlikely(!sg)) {
5180 /*
5181 * The end of qc->sg is reached and the device expects
5182 * more data to transfer. In order not to overrun qc->sg
5183 * and fulfill length specified in the byte count register,
5184 * - for read case, discard trailing data from the device
5185 * - for write case, padding zero data to the device
5186 */
5187 u16 pad_buf[1] = { 0 };
5188 unsigned int i;
5189
5190 if (bytes > qc->curbytes - qc->nbytes + ATAPI_MAX_DRAIN) {
5191 ata_ehi_push_desc(ehi, "too much trailing data "
5192 "buf=%u cur=%u bytes=%u",
5193 qc->nbytes, qc->curbytes, bytes);
5194 return -1;
5195 }
5196
5197 /* overflow is exptected for misc ATAPI commands */
5198 if (bytes && !atapi_qc_may_overflow(qc))
5199 ata_dev_printk(qc->dev, KERN_WARNING, "ATAPI %u bytes "
5200 "trailing data (cdb=%02x nbytes=%u)\n",
5201 bytes, qc->cdb[0], qc->nbytes);
5202
5203 for (i = 0; i < (bytes + 1) / 2; i++)
5204 ap->ops->data_xfer(qc->dev, (unsigned char *)pad_buf, 2, do_write);
5205
5206 qc->curbytes += bytes;
5207
5208 return 0;
5209 }
5210
5211 page = sg_page(sg);
5212 offset = sg->offset + qc->cursg_ofs;
5213
5214 /* get the current page and offset */
5215 page = nth_page(page, (offset >> PAGE_SHIFT));
5216 offset %= PAGE_SIZE;
5217
5218 /* don't overrun current sg */
5219 count = min(sg->length - qc->cursg_ofs, bytes);
5220
5221 /* don't cross page boundaries */
5222 count = min(count, (unsigned int)PAGE_SIZE - offset);
5223
5224 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
5225
5226 if (PageHighMem(page)) {
5227 unsigned long flags;
5228
5229 /* FIXME: use bounce buffer */
5230 local_irq_save(flags);
5231 buf = kmap_atomic(page, KM_IRQ0);
5232
5233 /* do the actual data transfer */
5234 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
5235
5236 kunmap_atomic(buf, KM_IRQ0);
5237 local_irq_restore(flags);
5238 } else {
5239 buf = page_address(page);
5240 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
5241 }
5242
5243 bytes -= count;
5244 if ((count & 1) && bytes)
5245 bytes--;
5246 qc->curbytes += count;
5247 qc->cursg_ofs += count;
5248
5249 if (qc->cursg_ofs == sg->length) {
5250 qc->cursg = sg_next(qc->cursg);
5251 qc->cursg_ofs = 0;
5252 }
5253
5254 if (bytes)
5255 goto next_sg;
5256
5257 return 0;
5258 }
5259
5260 /**
5261 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
5262 * @qc: Command on going
5263 *
5264 * Transfer Transfer data from/to the ATAPI device.
5265 *
5266 * LOCKING:
5267 * Inherited from caller.
5268 */
5269
5270 static void atapi_pio_bytes(struct ata_queued_cmd *qc)
5271 {
5272 struct ata_port *ap = qc->ap;
5273 struct ata_device *dev = qc->dev;
5274 unsigned int ireason, bc_lo, bc_hi, bytes;
5275 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
5276
5277 /* Abuse qc->result_tf for temp storage of intermediate TF
5278 * here to save some kernel stack usage.
5279 * For normal completion, qc->result_tf is not relevant. For
5280 * error, qc->result_tf is later overwritten by ata_qc_complete().
5281 * So, the correctness of qc->result_tf is not affected.
5282 */
5283 ap->ops->tf_read(ap, &qc->result_tf);
5284 ireason = qc->result_tf.nsect;
5285 bc_lo = qc->result_tf.lbam;
5286 bc_hi = qc->result_tf.lbah;
5287 bytes = (bc_hi << 8) | bc_lo;
5288
5289 /* shall be cleared to zero, indicating xfer of data */
5290 if (ireason & (1 << 0))
5291 goto err_out;
5292
5293 /* make sure transfer direction matches expected */
5294 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
5295 if (do_write != i_write)
5296 goto err_out;
5297
5298 VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
5299
5300 if (__atapi_pio_bytes(qc, bytes))
5301 goto err_out;
5302 ata_altstatus(ap); /* flush */
5303
5304 return;
5305
5306 err_out:
5307 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
5308 qc->err_mask |= AC_ERR_HSM;
5309 ap->hsm_task_state = HSM_ST_ERR;
5310 }
5311
5312 /**
5313 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
5314 * @ap: the target ata_port
5315 * @qc: qc on going
5316 *
5317 * RETURNS:
5318 * 1 if ok in workqueue, 0 otherwise.
5319 */
5320
5321 static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
5322 {
5323 if (qc->tf.flags & ATA_TFLAG_POLLING)
5324 return 1;
5325
5326 if (ap->hsm_task_state == HSM_ST_FIRST) {
5327 if (qc->tf.protocol == ATA_PROT_PIO &&
5328 (qc->tf.flags & ATA_TFLAG_WRITE))
5329 return 1;
5330
5331 if (ata_is_atapi(qc->tf.protocol) &&
5332 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
5333 return 1;
5334 }
5335
5336 return 0;
5337 }
5338
5339 /**
5340 * ata_hsm_qc_complete - finish a qc running on standard HSM
5341 * @qc: Command to complete
5342 * @in_wq: 1 if called from workqueue, 0 otherwise
5343 *
5344 * Finish @qc which is running on standard HSM.
5345 *
5346 * LOCKING:
5347 * If @in_wq is zero, spin_lock_irqsave(host lock).
5348 * Otherwise, none on entry and grabs host lock.
5349 */
5350 static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
5351 {
5352 struct ata_port *ap = qc->ap;
5353 unsigned long flags;
5354
5355 if (ap->ops->error_handler) {
5356 if (in_wq) {
5357 spin_lock_irqsave(ap->lock, flags);
5358
5359 /* EH might have kicked in while host lock is
5360 * released.
5361 */
5362 qc = ata_qc_from_tag(ap, qc->tag);
5363 if (qc) {
5364 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
5365 ap->ops->irq_on(ap);
5366 ata_qc_complete(qc);
5367 } else
5368 ata_port_freeze(ap);
5369 }
5370
5371 spin_unlock_irqrestore(ap->lock, flags);
5372 } else {
5373 if (likely(!(qc->err_mask & AC_ERR_HSM)))
5374 ata_qc_complete(qc);
5375 else
5376 ata_port_freeze(ap);
5377 }
5378 } else {
5379 if (in_wq) {
5380 spin_lock_irqsave(ap->lock, flags);
5381 ap->ops->irq_on(ap);
5382 ata_qc_complete(qc);
5383 spin_unlock_irqrestore(ap->lock, flags);
5384 } else
5385 ata_qc_complete(qc);
5386 }
5387 }
5388
5389 /**
5390 * ata_hsm_move - move the HSM to the next state.
5391 * @ap: the target ata_port
5392 * @qc: qc on going
5393 * @status: current device status
5394 * @in_wq: 1 if called from workqueue, 0 otherwise
5395 *
5396 * RETURNS:
5397 * 1 when poll next status needed, 0 otherwise.
5398 */
5399 int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
5400 u8 status, int in_wq)
5401 {
5402 unsigned long flags = 0;
5403 int poll_next;
5404
5405 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
5406
5407 /* Make sure ata_qc_issue_prot() does not throw things
5408 * like DMA polling into the workqueue. Notice that
5409 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
5410 */
5411 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
5412
5413 fsm_start:
5414 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
5415 ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
5416
5417 switch (ap->hsm_task_state) {
5418 case HSM_ST_FIRST:
5419 /* Send first data block or PACKET CDB */
5420
5421 /* If polling, we will stay in the work queue after
5422 * sending the data. Otherwise, interrupt handler
5423 * takes over after sending the data.
5424 */
5425 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
5426
5427 /* check device status */
5428 if (unlikely((status & ATA_DRQ) == 0)) {
5429 /* handle BSY=0, DRQ=0 as error */
5430 if (likely(status & (ATA_ERR | ATA_DF)))
5431 /* device stops HSM for abort/error */
5432 qc->err_mask |= AC_ERR_DEV;
5433 else
5434 /* HSM violation. Let EH handle this */
5435 qc->err_mask |= AC_ERR_HSM;
5436
5437 ap->hsm_task_state = HSM_ST_ERR;
5438 goto fsm_start;
5439 }
5440
5441 /* Device should not ask for data transfer (DRQ=1)
5442 * when it finds something wrong.
5443 * We ignore DRQ here and stop the HSM by
5444 * changing hsm_task_state to HSM_ST_ERR and
5445 * let the EH abort the command or reset the device.
5446 */
5447 if (unlikely(status & (ATA_ERR | ATA_DF))) {
5448 /* Some ATAPI tape drives forget to clear the ERR bit
5449 * when doing the next command (mostly request sense).
5450 * We ignore ERR here to workaround and proceed sending
5451 * the CDB.
5452 */
5453 if (!(qc->dev->horkage & ATA_HORKAGE_STUCK_ERR)) {
5454 ata_port_printk(ap, KERN_WARNING,
5455 "DRQ=1 with device error, "
5456 "dev_stat 0x%X\n", status);
5457 qc->err_mask |= AC_ERR_HSM;
5458 ap->hsm_task_state = HSM_ST_ERR;
5459 goto fsm_start;
5460 }
5461 }
5462
5463 /* Send the CDB (atapi) or the first data block (ata pio out).
5464 * During the state transition, interrupt handler shouldn't
5465 * be invoked before the data transfer is complete and
5466 * hsm_task_state is changed. Hence, the following locking.
5467 */
5468 if (in_wq)
5469 spin_lock_irqsave(ap->lock, flags);
5470
5471 if (qc->tf.protocol == ATA_PROT_PIO) {
5472 /* PIO data out protocol.
5473 * send first data block.
5474 */
5475
5476 /* ata_pio_sectors() might change the state
5477 * to HSM_ST_LAST. so, the state is changed here
5478 * before ata_pio_sectors().
5479 */
5480 ap->hsm_task_state = HSM_ST;
5481 ata_pio_sectors(qc);
5482 } else
5483 /* send CDB */
5484 atapi_send_cdb(ap, qc);
5485
5486 if (in_wq)
5487 spin_unlock_irqrestore(ap->lock, flags);
5488
5489 /* if polling, ata_pio_task() handles the rest.
5490 * otherwise, interrupt handler takes over from here.
5491 */
5492 break;
5493
5494 case HSM_ST:
5495 /* complete command or read/write the data register */
5496 if (qc->tf.protocol == ATA_PROT_ATAPI) {
5497 /* ATAPI PIO protocol */
5498 if ((status & ATA_DRQ) == 0) {
5499 /* No more data to transfer or device error.
5500 * Device error will be tagged in HSM_ST_LAST.
5501 */
5502 ap->hsm_task_state = HSM_ST_LAST;
5503 goto fsm_start;
5504 }
5505
5506 /* Device should not ask for data transfer (DRQ=1)
5507 * when it finds something wrong.
5508 * We ignore DRQ here and stop the HSM by
5509 * changing hsm_task_state to HSM_ST_ERR and
5510 * let the EH abort the command or reset the device.
5511 */
5512 if (unlikely(status & (ATA_ERR | ATA_DF))) {
5513 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with "
5514 "device error, dev_stat 0x%X\n",
5515 status);
5516 qc->err_mask |= AC_ERR_HSM;
5517 ap->hsm_task_state = HSM_ST_ERR;
5518 goto fsm_start;
5519 }
5520
5521 atapi_pio_bytes(qc);
5522
5523 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
5524 /* bad ireason reported by device */
5525 goto fsm_start;
5526
5527 } else {
5528 /* ATA PIO protocol */
5529 if (unlikely((status & ATA_DRQ) == 0)) {
5530 /* handle BSY=0, DRQ=0 as error */
5531 if (likely(status & (ATA_ERR | ATA_DF)))
5532 /* device stops HSM for abort/error */
5533 qc->err_mask |= AC_ERR_DEV;
5534 else
5535 /* HSM violation. Let EH handle this.
5536 * Phantom devices also trigger this
5537 * condition. Mark hint.
5538 */
5539 qc->err_mask |= AC_ERR_HSM |
5540 AC_ERR_NODEV_HINT;
5541
5542 ap->hsm_task_state = HSM_ST_ERR;
5543 goto fsm_start;
5544 }
5545
5546 /* For PIO reads, some devices may ask for
5547 * data transfer (DRQ=1) alone with ERR=1.
5548 * We respect DRQ here and transfer one
5549 * block of junk data before changing the
5550 * hsm_task_state to HSM_ST_ERR.
5551 *
5552 * For PIO writes, ERR=1 DRQ=1 doesn't make
5553 * sense since the data block has been
5554 * transferred to the device.
5555 */
5556 if (unlikely(status & (ATA_ERR | ATA_DF))) {
5557 /* data might be corrputed */
5558 qc->err_mask |= AC_ERR_DEV;
5559
5560 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
5561 ata_pio_sectors(qc);
5562 status = ata_wait_idle(ap);
5563 }
5564
5565 if (status & (ATA_BUSY | ATA_DRQ))
5566 qc->err_mask |= AC_ERR_HSM;
5567
5568 /* ata_pio_sectors() might change the
5569 * state to HSM_ST_LAST. so, the state
5570 * is changed after ata_pio_sectors().
5571 */
5572 ap->hsm_task_state = HSM_ST_ERR;
5573 goto fsm_start;
5574 }
5575
5576 ata_pio_sectors(qc);
5577
5578 if (ap->hsm_task_state == HSM_ST_LAST &&
5579 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
5580 /* all data read */
5581 status = ata_wait_idle(ap);
5582 goto fsm_start;
5583 }
5584 }
5585
5586 poll_next = 1;
5587 break;
5588
5589 case HSM_ST_LAST:
5590 if (unlikely(!ata_ok(status))) {
5591 qc->err_mask |= __ac_err_mask(status);
5592 ap->hsm_task_state = HSM_ST_ERR;
5593 goto fsm_start;
5594 }
5595
5596 /* no more data to transfer */
5597 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
5598 ap->print_id, qc->dev->devno, status);
5599
5600 WARN_ON(qc->err_mask);
5601
5602 ap->hsm_task_state = HSM_ST_IDLE;
5603
5604 /* complete taskfile transaction */
5605 ata_hsm_qc_complete(qc, in_wq);
5606
5607 poll_next = 0;
5608 break;
5609
5610 case HSM_ST_ERR:
5611 /* make sure qc->err_mask is available to
5612 * know what's wrong and recover
5613 */
5614 WARN_ON(qc->err_mask == 0);
5615
5616 ap->hsm_task_state = HSM_ST_IDLE;
5617
5618 /* complete taskfile transaction */
5619 ata_hsm_qc_complete(qc, in_wq);
5620
5621 poll_next = 0;
5622 break;
5623 default:
5624 poll_next = 0;
5625 BUG();
5626 }
5627
5628 return poll_next;
5629 }
5630
5631 static void ata_pio_task(struct work_struct *work)
5632 {
5633 struct ata_port *ap =
5634 container_of(work, struct ata_port, port_task.work);
5635 struct ata_queued_cmd *qc = ap->port_task_data;
5636 u8 status;
5637 int poll_next;
5638
5639 fsm_start:
5640 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
5641
5642 /*
5643 * This is purely heuristic. This is a fast path.
5644 * Sometimes when we enter, BSY will be cleared in
5645 * a chk-status or two. If not, the drive is probably seeking
5646 * or something. Snooze for a couple msecs, then
5647 * chk-status again. If still busy, queue delayed work.
5648 */
5649 status = ata_busy_wait(ap, ATA_BUSY, 5);
5650 if (status & ATA_BUSY) {
5651 msleep(2);
5652 status = ata_busy_wait(ap, ATA_BUSY, 10);
5653 if (status & ATA_BUSY) {
5654 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
5655 return;
5656 }
5657 }
5658
5659 /* move the HSM */
5660 poll_next = ata_hsm_move(ap, qc, status, 1);
5661
5662 /* another command or interrupt handler
5663 * may be running at this point.
5664 */
5665 if (poll_next)
5666 goto fsm_start;
5667 }
5668
5669 /**
5670 * ata_qc_new - Request an available ATA command, for queueing
5671 * @ap: Port associated with device @dev
5672 * @dev: Device from whom we request an available command structure
5673 *
5674 * LOCKING:
5675 * None.
5676 */
5677
5678 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
5679 {
5680 struct ata_queued_cmd *qc = NULL;
5681 unsigned int i;
5682
5683 /* no command while frozen */
5684 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
5685 return NULL;
5686
5687 /* the last tag is reserved for internal command. */
5688 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
5689 if (!test_and_set_bit(i, &ap->qc_allocated)) {
5690 qc = __ata_qc_from_tag(ap, i);
5691 break;
5692 }
5693
5694 if (qc)
5695 qc->tag = i;
5696
5697 return qc;
5698 }
5699
5700 /**
5701 * ata_qc_new_init - Request an available ATA command, and initialize it
5702 * @dev: Device from whom we request an available command structure
5703 *
5704 * LOCKING:
5705 * None.
5706 */
5707
5708 struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
5709 {
5710 struct ata_port *ap = dev->link->ap;
5711 struct ata_queued_cmd *qc;
5712
5713 qc = ata_qc_new(ap);
5714 if (qc) {
5715 qc->scsicmd = NULL;
5716 qc->ap = ap;
5717 qc->dev = dev;
5718
5719 ata_qc_reinit(qc);
5720 }
5721
5722 return qc;
5723 }
5724
5725 /**
5726 * ata_qc_free - free unused ata_queued_cmd
5727 * @qc: Command to complete
5728 *
5729 * Designed to free unused ata_queued_cmd object
5730 * in case something prevents using it.
5731 *
5732 * LOCKING:
5733 * spin_lock_irqsave(host lock)
5734 */
5735 void ata_qc_free(struct ata_queued_cmd *qc)
5736 {
5737 struct ata_port *ap = qc->ap;
5738 unsigned int tag;
5739
5740 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
5741
5742 qc->flags = 0;
5743 tag = qc->tag;
5744 if (likely(ata_tag_valid(tag))) {
5745 qc->tag = ATA_TAG_POISON;
5746 clear_bit(tag, &ap->qc_allocated);
5747 }
5748 }
5749
5750 void __ata_qc_complete(struct ata_queued_cmd *qc)
5751 {
5752 struct ata_port *ap = qc->ap;
5753 struct ata_link *link = qc->dev->link;
5754
5755 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
5756 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
5757
5758 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
5759 ata_sg_clean(qc);
5760
5761 /* command should be marked inactive atomically with qc completion */
5762 if (qc->tf.protocol == ATA_PROT_NCQ) {
5763 link->sactive &= ~(1 << qc->tag);
5764 if (!link->sactive)
5765 ap->nr_active_links--;
5766 } else {
5767 link->active_tag = ATA_TAG_POISON;
5768 ap->nr_active_links--;
5769 }
5770
5771 /* clear exclusive status */
5772 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
5773 ap->excl_link == link))
5774 ap->excl_link = NULL;
5775
5776 /* atapi: mark qc as inactive to prevent the interrupt handler
5777 * from completing the command twice later, before the error handler
5778 * is called. (when rc != 0 and atapi request sense is needed)
5779 */
5780 qc->flags &= ~ATA_QCFLAG_ACTIVE;
5781 ap->qc_active &= ~(1 << qc->tag);
5782
5783 /* call completion callback */
5784 qc->complete_fn(qc);
5785 }
5786
5787 static void fill_result_tf(struct ata_queued_cmd *qc)
5788 {
5789 struct ata_port *ap = qc->ap;
5790
5791 qc->result_tf.flags = qc->tf.flags;
5792 ap->ops->tf_read(ap, &qc->result_tf);
5793 }
5794
5795 static void ata_verify_xfer(struct ata_queued_cmd *qc)
5796 {
5797 struct ata_device *dev = qc->dev;
5798
5799 if (ata_tag_internal(qc->tag))
5800 return;
5801
5802 if (ata_is_nodata(qc->tf.protocol))
5803 return;
5804
5805 if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
5806 return;
5807
5808 dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
5809 }
5810
5811 /**
5812 * ata_qc_complete - Complete an active ATA command
5813 * @qc: Command to complete
5814 * @err_mask: ATA Status register contents
5815 *
5816 * Indicate to the mid and upper layers that an ATA
5817 * command has completed, with either an ok or not-ok status.
5818 *
5819 * LOCKING:
5820 * spin_lock_irqsave(host lock)
5821 */
5822 void ata_qc_complete(struct ata_queued_cmd *qc)
5823 {
5824 struct ata_port *ap = qc->ap;
5825
5826 /* XXX: New EH and old EH use different mechanisms to
5827 * synchronize EH with regular execution path.
5828 *
5829 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
5830 * Normal execution path is responsible for not accessing a
5831 * failed qc. libata core enforces the rule by returning NULL
5832 * from ata_qc_from_tag() for failed qcs.
5833 *
5834 * Old EH depends on ata_qc_complete() nullifying completion
5835 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
5836 * not synchronize with interrupt handler. Only PIO task is
5837 * taken care of.
5838 */
5839 if (ap->ops->error_handler) {
5840 struct ata_device *dev = qc->dev;
5841 struct ata_eh_info *ehi = &dev->link->eh_info;
5842
5843 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
5844
5845 if (unlikely(qc->err_mask))
5846 qc->flags |= ATA_QCFLAG_FAILED;
5847
5848 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
5849 if (!ata_tag_internal(qc->tag)) {
5850 /* always fill result TF for failed qc */
5851 fill_result_tf(qc);
5852 ata_qc_schedule_eh(qc);
5853 return;
5854 }
5855 }
5856
5857 /* read result TF if requested */
5858 if (qc->flags & ATA_QCFLAG_RESULT_TF)
5859 fill_result_tf(qc);
5860
5861 /* Some commands need post-processing after successful
5862 * completion.
5863 */
5864 switch (qc->tf.command) {
5865 case ATA_CMD_SET_FEATURES:
5866 if (qc->tf.feature != SETFEATURES_WC_ON &&
5867 qc->tf.feature != SETFEATURES_WC_OFF)
5868 break;
5869 /* fall through */
5870 case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
5871 case ATA_CMD_SET_MULTI: /* multi_count changed */
5872 /* revalidate device */
5873 ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
5874 ata_port_schedule_eh(ap);
5875 break;
5876
5877 case ATA_CMD_SLEEP:
5878 dev->flags |= ATA_DFLAG_SLEEPING;
5879 break;
5880 }
5881
5882 if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
5883 ata_verify_xfer(qc);
5884
5885 __ata_qc_complete(qc);
5886 } else {
5887 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5888 return;
5889
5890 /* read result TF if failed or requested */
5891 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
5892 fill_result_tf(qc);
5893
5894 __ata_qc_complete(qc);
5895 }
5896 }
5897
5898 /**
5899 * ata_qc_complete_multiple - Complete multiple qcs successfully
5900 * @ap: port in question
5901 * @qc_active: new qc_active mask
5902 * @finish_qc: LLDD callback invoked before completing a qc
5903 *
5904 * Complete in-flight commands. This functions is meant to be
5905 * called from low-level driver's interrupt routine to complete
5906 * requests normally. ap->qc_active and @qc_active is compared
5907 * and commands are completed accordingly.
5908 *
5909 * LOCKING:
5910 * spin_lock_irqsave(host lock)
5911 *
5912 * RETURNS:
5913 * Number of completed commands on success, -errno otherwise.
5914 */
5915 int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
5916 void (*finish_qc)(struct ata_queued_cmd *))
5917 {
5918 int nr_done = 0;
5919 u32 done_mask;
5920 int i;
5921
5922 done_mask = ap->qc_active ^ qc_active;
5923
5924 if (unlikely(done_mask & qc_active)) {
5925 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
5926 "(%08x->%08x)\n", ap->qc_active, qc_active);
5927 return -EINVAL;
5928 }
5929
5930 for (i = 0; i < ATA_MAX_QUEUE; i++) {
5931 struct ata_queued_cmd *qc;
5932
5933 if (!(done_mask & (1 << i)))
5934 continue;
5935
5936 if ((qc = ata_qc_from_tag(ap, i))) {
5937 if (finish_qc)
5938 finish_qc(qc);
5939 ata_qc_complete(qc);
5940 nr_done++;
5941 }
5942 }
5943
5944 return nr_done;
5945 }
5946
5947 /**
5948 * ata_qc_issue - issue taskfile to device
5949 * @qc: command to issue to device
5950 *
5951 * Prepare an ATA command to submission to device.
5952 * This includes mapping the data into a DMA-able
5953 * area, filling in the S/G table, and finally
5954 * writing the taskfile to hardware, starting the command.
5955 *
5956 * LOCKING:
5957 * spin_lock_irqsave(host lock)
5958 */
5959 void ata_qc_issue(struct ata_queued_cmd *qc)
5960 {
5961 struct ata_port *ap = qc->ap;
5962 struct ata_link *link = qc->dev->link;
5963 u8 prot = qc->tf.protocol;
5964
5965 /* Make sure only one non-NCQ command is outstanding. The
5966 * check is skipped for old EH because it reuses active qc to
5967 * request ATAPI sense.
5968 */
5969 WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag));
5970
5971 if (prot == ATA_PROT_NCQ) {
5972 WARN_ON(link->sactive & (1 << qc->tag));
5973
5974 if (!link->sactive)
5975 ap->nr_active_links++;
5976 link->sactive |= 1 << qc->tag;
5977 } else {
5978 WARN_ON(link->sactive);
5979
5980 ap->nr_active_links++;
5981 link->active_tag = qc->tag;
5982 }
5983
5984 qc->flags |= ATA_QCFLAG_ACTIVE;
5985 ap->qc_active |= 1 << qc->tag;
5986
5987 if (ata_is_dma(prot) || (ata_is_pio(prot) &&
5988 (ap->flags & ATA_FLAG_PIO_DMA))) {
5989 if (qc->flags & ATA_QCFLAG_SG) {
5990 if (ata_sg_setup(qc))
5991 goto sg_err;
5992 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
5993 if (ata_sg_setup_one(qc))
5994 goto sg_err;
5995 }
5996 } else {
5997 qc->flags &= ~ATA_QCFLAG_DMAMAP;
5998 }
5999
6000 /* if device is sleeping, schedule softreset and abort the link */
6001 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
6002 link->eh_info.action |= ATA_EH_SOFTRESET;
6003 ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
6004 ata_link_abort(link);
6005 return;
6006 }
6007
6008 ap->ops->qc_prep(qc);
6009
6010 qc->err_mask |= ap->ops->qc_issue(qc);
6011 if (unlikely(qc->err_mask))
6012 goto err;
6013 return;
6014
6015 sg_err:
6016 qc->flags &= ~ATA_QCFLAG_DMAMAP;
6017 qc->err_mask |= AC_ERR_SYSTEM;
6018 err:
6019 ata_qc_complete(qc);
6020 }
6021
6022 /**
6023 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
6024 * @qc: command to issue to device
6025 *
6026 * Using various libata functions and hooks, this function
6027 * starts an ATA command. ATA commands are grouped into
6028 * classes called "protocols", and issuing each type of protocol
6029 * is slightly different.
6030 *
6031 * May be used as the qc_issue() entry in ata_port_operations.
6032 *
6033 * LOCKING:
6034 * spin_lock_irqsave(host lock)
6035 *
6036 * RETURNS:
6037 * Zero on success, AC_ERR_* mask on failure
6038 */
6039
6040 unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
6041 {
6042 struct ata_port *ap = qc->ap;
6043
6044 /* Use polling pio if the LLD doesn't handle
6045 * interrupt driven pio and atapi CDB interrupt.
6046 */
6047 if (ap->flags & ATA_FLAG_PIO_POLLING) {
6048 switch (qc->tf.protocol) {
6049 case ATA_PROT_PIO:
6050 case ATA_PROT_NODATA:
6051 case ATA_PROT_ATAPI:
6052 case ATA_PROT_ATAPI_NODATA:
6053 qc->tf.flags |= ATA_TFLAG_POLLING;
6054 break;
6055 case ATA_PROT_ATAPI_DMA:
6056 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
6057 /* see ata_dma_blacklisted() */
6058 BUG();
6059 break;
6060 default:
6061 break;
6062 }
6063 }
6064
6065 /* select the device */
6066 ata_dev_select(ap, qc->dev->devno, 1, 0);
6067
6068 /* start the command */
6069 switch (qc->tf.protocol) {
6070 case ATA_PROT_NODATA:
6071 if (qc->tf.flags & ATA_TFLAG_POLLING)
6072 ata_qc_set_polling(qc);
6073
6074 ata_tf_to_host(ap, &qc->tf);
6075 ap->hsm_task_state = HSM_ST_LAST;
6076
6077 if (qc->tf.flags & ATA_TFLAG_POLLING)
6078 ata_port_queue_task(ap, ata_pio_task, qc, 0);
6079
6080 break;
6081
6082 case ATA_PROT_DMA:
6083 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
6084
6085 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
6086 ap->ops->bmdma_setup(qc); /* set up bmdma */
6087 ap->ops->bmdma_start(qc); /* initiate bmdma */
6088 ap->hsm_task_state = HSM_ST_LAST;
6089 break;
6090
6091 case ATA_PROT_PIO:
6092 if (qc->tf.flags & ATA_TFLAG_POLLING)
6093 ata_qc_set_polling(qc);
6094
6095 ata_tf_to_host(ap, &qc->tf);
6096
6097 if (qc->tf.flags & ATA_TFLAG_WRITE) {
6098 /* PIO data out protocol */
6099 ap->hsm_task_state = HSM_ST_FIRST;
6100 ata_port_queue_task(ap, ata_pio_task, qc, 0);
6101
6102 /* always send first data block using
6103 * the ata_pio_task() codepath.
6104 */
6105 } else {
6106 /* PIO data in protocol */
6107 ap->hsm_task_state = HSM_ST;
6108
6109 if (qc->tf.flags & ATA_TFLAG_POLLING)
6110 ata_port_queue_task(ap, ata_pio_task, qc, 0);
6111
6112 /* if polling, ata_pio_task() handles the rest.
6113 * otherwise, interrupt handler takes over from here.
6114 */
6115 }
6116
6117 break;
6118
6119 case ATA_PROT_ATAPI:
6120 case ATA_PROT_ATAPI_NODATA:
6121 if (qc->tf.flags & ATA_TFLAG_POLLING)
6122 ata_qc_set_polling(qc);
6123
6124 ata_tf_to_host(ap, &qc->tf);
6125
6126 ap->hsm_task_state = HSM_ST_FIRST;
6127
6128 /* send cdb by polling if no cdb interrupt */
6129 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
6130 (qc->tf.flags & ATA_TFLAG_POLLING))
6131 ata_port_queue_task(ap, ata_pio_task, qc, 0);
6132 break;
6133
6134 case ATA_PROT_ATAPI_DMA:
6135 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
6136
6137 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
6138 ap->ops->bmdma_setup(qc); /* set up bmdma */
6139 ap->hsm_task_state = HSM_ST_FIRST;
6140
6141 /* send cdb by polling if no cdb interrupt */
6142 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
6143 ata_port_queue_task(ap, ata_pio_task, qc, 0);
6144 break;
6145
6146 default:
6147 WARN_ON(1);
6148 return AC_ERR_SYSTEM;
6149 }
6150
6151 return 0;
6152 }
6153
6154 /**
6155 * ata_host_intr - Handle host interrupt for given (port, task)
6156 * @ap: Port on which interrupt arrived (possibly...)
6157 * @qc: Taskfile currently active in engine
6158 *
6159 * Handle host interrupt for given queued command. Currently,
6160 * only DMA interrupts are handled. All other commands are
6161 * handled via polling with interrupts disabled (nIEN bit).
6162 *
6163 * LOCKING:
6164 * spin_lock_irqsave(host lock)
6165 *
6166 * RETURNS:
6167 * One if interrupt was handled, zero if not (shared irq).
6168 */
6169
6170 inline unsigned int ata_host_intr(struct ata_port *ap,
6171 struct ata_queued_cmd *qc)
6172 {
6173 struct ata_eh_info *ehi = &ap->link.eh_info;
6174 u8 status, host_stat = 0;
6175
6176 VPRINTK("ata%u: protocol %d task_state %d\n",
6177 ap->print_id, qc->tf.protocol, ap->hsm_task_state);
6178
6179 /* Check whether we are expecting interrupt in this state */
6180 switch (ap->hsm_task_state) {
6181 case HSM_ST_FIRST:
6182 /* Some pre-ATAPI-4 devices assert INTRQ
6183 * at this state when ready to receive CDB.
6184 */
6185
6186 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
6187 * The flag was turned on only for atapi devices. No
6188 * need to check ata_is_atapi(qc->tf.protocol) again.
6189 */
6190 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
6191 goto idle_irq;
6192 break;
6193 case HSM_ST_LAST:
6194 if (qc->tf.protocol == ATA_PROT_DMA ||
6195 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
6196 /* check status of DMA engine */
6197 host_stat = ap->ops->bmdma_status(ap);
6198 VPRINTK("ata%u: host_stat 0x%X\n",
6199 ap->print_id, host_stat);
6200
6201 /* if it's not our irq... */
6202 if (!(host_stat & ATA_DMA_INTR))
6203 goto idle_irq;
6204
6205 /* before we do anything else, clear DMA-Start bit */
6206 ap->ops->bmdma_stop(qc);
6207
6208 if (unlikely(host_stat & ATA_DMA_ERR)) {
6209 /* error when transfering data to/from memory */
6210 qc->err_mask |= AC_ERR_HOST_BUS;
6211 ap->hsm_task_state = HSM_ST_ERR;
6212 }
6213 }
6214 break;
6215 case HSM_ST:
6216 break;
6217 default:
6218 goto idle_irq;
6219 }
6220
6221 /* check altstatus */
6222 status = ata_altstatus(ap);
6223 if (status & ATA_BUSY)
6224 goto idle_irq;
6225
6226 /* check main status, clearing INTRQ */
6227 status = ata_chk_status(ap);
6228 if (unlikely(status & ATA_BUSY))
6229 goto idle_irq;
6230
6231 /* ack bmdma irq events */
6232 ap->ops->irq_clear(ap);
6233
6234 ata_hsm_move(ap, qc, status, 0);
6235
6236 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
6237 qc->tf.protocol == ATA_PROT_ATAPI_DMA))
6238 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
6239
6240 return 1; /* irq handled */
6241
6242 idle_irq:
6243 ap->stats.idle_irq++;
6244
6245 #ifdef ATA_IRQ_TRAP
6246 if ((ap->stats.idle_irq % 1000) == 0) {
6247 ata_chk_status(ap);
6248 ap->ops->irq_clear(ap);
6249 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
6250 return 1;
6251 }
6252 #endif
6253 return 0; /* irq not handled */
6254 }
6255
6256 /**
6257 * ata_interrupt - Default ATA host interrupt handler
6258 * @irq: irq line (unused)
6259 * @dev_instance: pointer to our ata_host information structure
6260 *
6261 * Default interrupt handler for PCI IDE devices. Calls
6262 * ata_host_intr() for each port that is not disabled.
6263 *
6264 * LOCKING:
6265 * Obtains host lock during operation.
6266 *
6267 * RETURNS:
6268 * IRQ_NONE or IRQ_HANDLED.
6269 */
6270
6271 irqreturn_t ata_interrupt(int irq, void *dev_instance)
6272 {
6273 struct ata_host *host = dev_instance;
6274 unsigned int i;
6275 unsigned int handled = 0;
6276 unsigned long flags;
6277
6278 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
6279 spin_lock_irqsave(&host->lock, flags);
6280
6281 for (i = 0; i < host->n_ports; i++) {
6282 struct ata_port *ap;
6283
6284 ap = host->ports[i];
6285 if (ap &&
6286 !(ap->flags & ATA_FLAG_DISABLED)) {
6287 struct ata_queued_cmd *qc;
6288
6289 qc = ata_qc_from_tag(ap, ap->link.active_tag);
6290 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
6291 (qc->flags & ATA_QCFLAG_ACTIVE))
6292 handled |= ata_host_intr(ap, qc);
6293 }
6294 }
6295
6296 spin_unlock_irqrestore(&host->lock, flags);
6297
6298 return IRQ_RETVAL(handled);
6299 }
6300
6301 /**
6302 * sata_scr_valid - test whether SCRs are accessible
6303 * @link: ATA link to test SCR accessibility for
6304 *
6305 * Test whether SCRs are accessible for @link.
6306 *
6307 * LOCKING:
6308 * None.
6309 *
6310 * RETURNS:
6311 * 1 if SCRs are accessible, 0 otherwise.
6312 */
6313 int sata_scr_valid(struct ata_link *link)
6314 {
6315 struct ata_port *ap = link->ap;
6316
6317 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
6318 }
6319
6320 /**
6321 * sata_scr_read - read SCR register of the specified port
6322 * @link: ATA link to read SCR for
6323 * @reg: SCR to read
6324 * @val: Place to store read value
6325 *
6326 * Read SCR register @reg of @link into *@val. This function is
6327 * guaranteed to succeed if @link is ap->link, the cable type of
6328 * the port is SATA and the port implements ->scr_read.
6329 *
6330 * LOCKING:
6331 * None if @link is ap->link. Kernel thread context otherwise.
6332 *
6333 * RETURNS:
6334 * 0 on success, negative errno on failure.
6335 */
6336 int sata_scr_read(struct ata_link *link, int reg, u32 *val)
6337 {
6338 if (ata_is_host_link(link)) {
6339 struct ata_port *ap = link->ap;
6340
6341 if (sata_scr_valid(link))
6342 return ap->ops->scr_read(ap, reg, val);
6343 return -EOPNOTSUPP;
6344 }
6345
6346 return sata_pmp_scr_read(link, reg, val);
6347 }
6348
6349 /**
6350 * sata_scr_write - write SCR register of the specified port
6351 * @link: ATA link to write SCR for
6352 * @reg: SCR to write
6353 * @val: value to write
6354 *
6355 * Write @val to SCR register @reg of @link. This function is
6356 * guaranteed to succeed if @link is ap->link, the cable type of
6357 * the port is SATA and the port implements ->scr_read.
6358 *
6359 * LOCKING:
6360 * None if @link is ap->link. Kernel thread context otherwise.
6361 *
6362 * RETURNS:
6363 * 0 on success, negative errno on failure.
6364 */
6365 int sata_scr_write(struct ata_link *link, int reg, u32 val)
6366 {
6367 if (ata_is_host_link(link)) {
6368 struct ata_port *ap = link->ap;
6369
6370 if (sata_scr_valid(link))
6371 return ap->ops->scr_write(ap, reg, val);
6372 return -EOPNOTSUPP;
6373 }
6374
6375 return sata_pmp_scr_write(link, reg, val);
6376 }
6377
6378 /**
6379 * sata_scr_write_flush - write SCR register of the specified port and flush
6380 * @link: ATA link to write SCR for
6381 * @reg: SCR to write
6382 * @val: value to write
6383 *
6384 * This function is identical to sata_scr_write() except that this
6385 * function performs flush after writing to the register.
6386 *
6387 * LOCKING:
6388 * None if @link is ap->link. Kernel thread context otherwise.
6389 *
6390 * RETURNS:
6391 * 0 on success, negative errno on failure.
6392 */
6393 int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
6394 {
6395 if (ata_is_host_link(link)) {
6396 struct ata_port *ap = link->ap;
6397 int rc;
6398
6399 if (sata_scr_valid(link)) {
6400 rc = ap->ops->scr_write(ap, reg, val);
6401 if (rc == 0)
6402 rc = ap->ops->scr_read(ap, reg, &val);
6403 return rc;
6404 }
6405 return -EOPNOTSUPP;
6406 }
6407
6408 return sata_pmp_scr_write(link, reg, val);
6409 }
6410
6411 /**
6412 * ata_link_online - test whether the given link is online
6413 * @link: ATA link to test
6414 *
6415 * Test whether @link is online. Note that this function returns
6416 * 0 if online status of @link cannot be obtained, so
6417 * ata_link_online(link) != !ata_link_offline(link).
6418 *
6419 * LOCKING:
6420 * None.
6421 *
6422 * RETURNS:
6423 * 1 if the port online status is available and online.
6424 */
6425 int ata_link_online(struct ata_link *link)
6426 {
6427 u32 sstatus;
6428
6429 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
6430 (sstatus & 0xf) == 0x3)
6431 return 1;
6432 return 0;
6433 }
6434
6435 /**
6436 * ata_link_offline - test whether the given link is offline
6437 * @link: ATA link to test
6438 *
6439 * Test whether @link is offline. Note that this function
6440 * returns 0 if offline status of @link cannot be obtained, so
6441 * ata_link_online(link) != !ata_link_offline(link).
6442 *
6443 * LOCKING:
6444 * None.
6445 *
6446 * RETURNS:
6447 * 1 if the port offline status is available and offline.
6448 */
6449 int ata_link_offline(struct ata_link *link)
6450 {
6451 u32 sstatus;
6452
6453 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
6454 (sstatus & 0xf) != 0x3)
6455 return 1;
6456 return 0;
6457 }
6458
6459 int ata_flush_cache(struct ata_device *dev)
6460 {
6461 unsigned int err_mask;
6462 u8 cmd;
6463
6464 if (!ata_try_flush_cache(dev))
6465 return 0;
6466
6467 if (dev->flags & ATA_DFLAG_FLUSH_EXT)
6468 cmd = ATA_CMD_FLUSH_EXT;
6469 else
6470 cmd = ATA_CMD_FLUSH;
6471
6472 /* This is wrong. On a failed flush we get back the LBA of the lost
6473 sector and we should (assuming it wasn't aborted as unknown) issue
6474 a further flush command to continue the writeback until it
6475 does not error */
6476 err_mask = ata_do_simple_cmd(dev, cmd);
6477 if (err_mask) {
6478 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
6479 return -EIO;
6480 }
6481
6482 return 0;
6483 }
6484
6485 #ifdef CONFIG_PM
6486 static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
6487 unsigned int action, unsigned int ehi_flags,
6488 int wait)
6489 {
6490 unsigned long flags;
6491 int i, rc;
6492
6493 for (i = 0; i < host->n_ports; i++) {
6494 struct ata_port *ap = host->ports[i];
6495 struct ata_link *link;
6496
6497 /* Previous resume operation might still be in
6498 * progress. Wait for PM_PENDING to clear.
6499 */
6500 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
6501 ata_port_wait_eh(ap);
6502 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6503 }
6504
6505 /* request PM ops to EH */
6506 spin_lock_irqsave(ap->lock, flags);
6507
6508 ap->pm_mesg = mesg;
6509 if (wait) {
6510 rc = 0;
6511 ap->pm_result = &rc;
6512 }
6513
6514 ap->pflags |= ATA_PFLAG_PM_PENDING;
6515 __ata_port_for_each_link(link, ap) {
6516 link->eh_info.action |= action;
6517 link->eh_info.flags |= ehi_flags;
6518 }
6519
6520 ata_port_schedule_eh(ap);
6521
6522 spin_unlock_irqrestore(ap->lock, flags);
6523
6524 /* wait and check result */
6525 if (wait) {
6526 ata_port_wait_eh(ap);
6527 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6528 if (rc)
6529 return rc;
6530 }
6531 }
6532
6533 return 0;
6534 }
6535
6536 /**
6537 * ata_host_suspend - suspend host
6538 * @host: host to suspend
6539 * @mesg: PM message
6540 *
6541 * Suspend @host. Actual operation is performed by EH. This
6542 * function requests EH to perform PM operations and waits for EH
6543 * to finish.
6544 *
6545 * LOCKING:
6546 * Kernel thread context (may sleep).
6547 *
6548 * RETURNS:
6549 * 0 on success, -errno on failure.
6550 */
6551 int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
6552 {
6553 int rc;
6554
6555 /*
6556 * disable link pm on all ports before requesting
6557 * any pm activity
6558 */
6559 ata_lpm_enable(host);
6560
6561 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
6562 if (rc == 0)
6563 host->dev->power.power_state = mesg;
6564 return rc;
6565 }
6566
6567 /**
6568 * ata_host_resume - resume host
6569 * @host: host to resume
6570 *
6571 * Resume @host. Actual operation is performed by EH. This
6572 * function requests EH to perform PM operations and returns.
6573 * Note that all resume operations are performed parallely.
6574 *
6575 * LOCKING:
6576 * Kernel thread context (may sleep).
6577 */
6578 void ata_host_resume(struct ata_host *host)
6579 {
6580 ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
6581 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
6582 host->dev->power.power_state = PMSG_ON;
6583
6584 /* reenable link pm */
6585 ata_lpm_disable(host);
6586 }
6587 #endif
6588
6589 /**
6590 * ata_port_start - Set port up for dma.
6591 * @ap: Port to initialize
6592 *
6593 * Called just after data structures for each port are
6594 * initialized. Allocates space for PRD table.
6595 *
6596 * May be used as the port_start() entry in ata_port_operations.
6597 *
6598 * LOCKING:
6599 * Inherited from caller.
6600 */
6601 int ata_port_start(struct ata_port *ap)
6602 {
6603 struct device *dev = ap->dev;
6604 int rc;
6605
6606 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
6607 GFP_KERNEL);
6608 if (!ap->prd)
6609 return -ENOMEM;
6610
6611 rc = ata_pad_alloc(ap, dev);
6612 if (rc)
6613 return rc;
6614
6615 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd,
6616 (unsigned long long)ap->prd_dma);
6617 return 0;
6618 }
6619
6620 /**
6621 * ata_dev_init - Initialize an ata_device structure
6622 * @dev: Device structure to initialize
6623 *
6624 * Initialize @dev in preparation for probing.
6625 *
6626 * LOCKING:
6627 * Inherited from caller.
6628 */
6629 void ata_dev_init(struct ata_device *dev)
6630 {
6631 struct ata_link *link = dev->link;
6632 struct ata_port *ap = link->ap;
6633 unsigned long flags;
6634
6635 /* SATA spd limit is bound to the first device */
6636 link->sata_spd_limit = link->hw_sata_spd_limit;
6637 link->sata_spd = 0;
6638
6639 /* High bits of dev->flags are used to record warm plug
6640 * requests which occur asynchronously. Synchronize using
6641 * host lock.
6642 */
6643 spin_lock_irqsave(ap->lock, flags);
6644 dev->flags &= ~ATA_DFLAG_INIT_MASK;
6645 dev->horkage = 0;
6646 spin_unlock_irqrestore(ap->lock, flags);
6647
6648 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
6649 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
6650 dev->pio_mask = UINT_MAX;
6651 dev->mwdma_mask = UINT_MAX;
6652 dev->udma_mask = UINT_MAX;
6653 }
6654
6655 /**
6656 * ata_link_init - Initialize an ata_link structure
6657 * @ap: ATA port link is attached to
6658 * @link: Link structure to initialize
6659 * @pmp: Port multiplier port number
6660 *
6661 * Initialize @link.
6662 *
6663 * LOCKING:
6664 * Kernel thread context (may sleep)
6665 */
6666 void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
6667 {
6668 int i;
6669
6670 /* clear everything except for devices */
6671 memset(link, 0, offsetof(struct ata_link, device[0]));
6672
6673 link->ap = ap;
6674 link->pmp = pmp;
6675 link->active_tag = ATA_TAG_POISON;
6676 link->hw_sata_spd_limit = UINT_MAX;
6677
6678 /* can't use iterator, ap isn't initialized yet */
6679 for (i = 0; i < ATA_MAX_DEVICES; i++) {
6680 struct ata_device *dev = &link->device[i];
6681
6682 dev->link = link;
6683 dev->devno = dev - link->device;
6684 ata_dev_init(dev);
6685 }
6686 }
6687
6688 /**
6689 * sata_link_init_spd - Initialize link->sata_spd_limit
6690 * @link: Link to configure sata_spd_limit for
6691 *
6692 * Initialize @link->[hw_]sata_spd_limit to the currently
6693 * configured value.
6694 *
6695 * LOCKING:
6696 * Kernel thread context (may sleep).
6697 *
6698 * RETURNS:
6699 * 0 on success, -errno on failure.
6700 */
6701 int sata_link_init_spd(struct ata_link *link)
6702 {
6703 u32 scontrol, spd;
6704 int rc;
6705
6706 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
6707 if (rc)
6708 return rc;
6709
6710 spd = (scontrol >> 4) & 0xf;
6711 if (spd)
6712 link->hw_sata_spd_limit &= (1 << spd) - 1;
6713
6714 link->sata_spd_limit = link->hw_sata_spd_limit;
6715
6716 return 0;
6717 }
6718
6719 /**
6720 * ata_port_alloc - allocate and initialize basic ATA port resources
6721 * @host: ATA host this allocated port belongs to
6722 *
6723 * Allocate and initialize basic ATA port resources.
6724 *
6725 * RETURNS:
6726 * Allocate ATA port on success, NULL on failure.
6727 *
6728 * LOCKING:
6729 * Inherited from calling layer (may sleep).
6730 */
6731 struct ata_port *ata_port_alloc(struct ata_host *host)
6732 {
6733 struct ata_port *ap;
6734
6735 DPRINTK("ENTER\n");
6736
6737 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
6738 if (!ap)
6739 return NULL;
6740
6741 ap->pflags |= ATA_PFLAG_INITIALIZING;
6742 ap->lock = &host->lock;
6743 ap->flags = ATA_FLAG_DISABLED;
6744 ap->print_id = -1;
6745 ap->ctl = ATA_DEVCTL_OBS;
6746 ap->host = host;
6747 ap->dev = host->dev;
6748 ap->last_ctl = 0xFF;
6749
6750 #if defined(ATA_VERBOSE_DEBUG)
6751 /* turn on all debugging levels */
6752 ap->msg_enable = 0x00FF;
6753 #elif defined(ATA_DEBUG)
6754 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
6755 #else
6756 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
6757 #endif
6758
6759 INIT_DELAYED_WORK(&ap->port_task, NULL);
6760 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
6761 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
6762 INIT_LIST_HEAD(&ap->eh_done_q);
6763 init_waitqueue_head(&ap->eh_wait_q);
6764 init_timer_deferrable(&ap->fastdrain_timer);
6765 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
6766 ap->fastdrain_timer.data = (unsigned long)ap;
6767
6768 ap->cbl = ATA_CBL_NONE;
6769
6770 ata_link_init(ap, &ap->link, 0);
6771
6772 #ifdef ATA_IRQ_TRAP
6773 ap->stats.unhandled_irq = 1;
6774 ap->stats.idle_irq = 1;
6775 #endif
6776 return ap;
6777 }
6778
6779 static void ata_host_release(struct device *gendev, void *res)
6780 {
6781 struct ata_host *host = dev_get_drvdata(gendev);
6782 int i;
6783
6784 for (i = 0; i < host->n_ports; i++) {
6785 struct ata_port *ap = host->ports[i];
6786
6787 if (!ap)
6788 continue;
6789
6790 if (ap->scsi_host)
6791 scsi_host_put(ap->scsi_host);
6792
6793 kfree(ap->pmp_link);
6794 kfree(ap);
6795 host->ports[i] = NULL;
6796 }
6797
6798 dev_set_drvdata(gendev, NULL);
6799 }
6800
6801 /**
6802 * ata_host_alloc - allocate and init basic ATA host resources
6803 * @dev: generic device this host is associated with
6804 * @max_ports: maximum number of ATA ports associated with this host
6805 *
6806 * Allocate and initialize basic ATA host resources. LLD calls
6807 * this function to allocate a host, initializes it fully and
6808 * attaches it using ata_host_register().
6809 *
6810 * @max_ports ports are allocated and host->n_ports is
6811 * initialized to @max_ports. The caller is allowed to decrease
6812 * host->n_ports before calling ata_host_register(). The unused
6813 * ports will be automatically freed on registration.
6814 *
6815 * RETURNS:
6816 * Allocate ATA host on success, NULL on failure.
6817 *
6818 * LOCKING:
6819 * Inherited from calling layer (may sleep).
6820 */
6821 struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
6822 {
6823 struct ata_host *host;
6824 size_t sz;
6825 int i;
6826
6827 DPRINTK("ENTER\n");
6828
6829 if (!devres_open_group(dev, NULL, GFP_KERNEL))
6830 return NULL;
6831
6832 /* alloc a container for our list of ATA ports (buses) */
6833 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
6834 /* alloc a container for our list of ATA ports (buses) */
6835 host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
6836 if (!host)
6837 goto err_out;
6838
6839 devres_add(dev, host);
6840 dev_set_drvdata(dev, host);
6841
6842 spin_lock_init(&host->lock);
6843 host->dev = dev;
6844 host->n_ports = max_ports;
6845
6846 /* allocate ports bound to this host */
6847 for (i = 0; i < max_ports; i++) {
6848 struct ata_port *ap;
6849
6850 ap = ata_port_alloc(host);
6851 if (!ap)
6852 goto err_out;
6853
6854 ap->port_no = i;
6855 host->ports[i] = ap;
6856 }
6857
6858 devres_remove_group(dev, NULL);
6859 return host;
6860
6861 err_out:
6862 devres_release_group(dev, NULL);
6863 return NULL;
6864 }
6865
6866 /**
6867 * ata_host_alloc_pinfo - alloc host and init with port_info array
6868 * @dev: generic device this host is associated with
6869 * @ppi: array of ATA port_info to initialize host with
6870 * @n_ports: number of ATA ports attached to this host
6871 *
6872 * Allocate ATA host and initialize with info from @ppi. If NULL
6873 * terminated, @ppi may contain fewer entries than @n_ports. The
6874 * last entry will be used for the remaining ports.
6875 *
6876 * RETURNS:
6877 * Allocate ATA host on success, NULL on failure.
6878 *
6879 * LOCKING:
6880 * Inherited from calling layer (may sleep).
6881 */
6882 struct ata_host *ata_host_alloc_pinfo(struct device *dev,
6883 const struct ata_port_info * const * ppi,
6884 int n_ports)
6885 {
6886 const struct ata_port_info *pi;
6887 struct ata_host *host;
6888 int i, j;
6889
6890 host = ata_host_alloc(dev, n_ports);
6891 if (!host)
6892 return NULL;
6893
6894 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
6895 struct ata_port *ap = host->ports[i];
6896
6897 if (ppi[j])
6898 pi = ppi[j++];
6899
6900 ap->pio_mask = pi->pio_mask;
6901 ap->mwdma_mask = pi->mwdma_mask;
6902 ap->udma_mask = pi->udma_mask;
6903 ap->flags |= pi->flags;
6904 ap->link.flags |= pi->link_flags;
6905 ap->ops = pi->port_ops;
6906
6907 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
6908 host->ops = pi->port_ops;
6909 if (!host->private_data && pi->private_data)
6910 host->private_data = pi->private_data;
6911 }
6912
6913 return host;
6914 }
6915
6916 static void ata_host_stop(struct device *gendev, void *res)
6917 {
6918 struct ata_host *host = dev_get_drvdata(gendev);
6919 int i;
6920
6921 WARN_ON(!(host->flags & ATA_HOST_STARTED));
6922
6923 for (i = 0; i < host->n_ports; i++) {
6924 struct ata_port *ap = host->ports[i];
6925
6926 if (ap->ops->port_stop)
6927 ap->ops->port_stop(ap);
6928 }
6929
6930 if (host->ops->host_stop)
6931 host->ops->host_stop(host);
6932 }
6933
6934 /**
6935 * ata_host_start - start and freeze ports of an ATA host
6936 * @host: ATA host to start ports for
6937 *
6938 * Start and then freeze ports of @host. Started status is
6939 * recorded in host->flags, so this function can be called
6940 * multiple times. Ports are guaranteed to get started only
6941 * once. If host->ops isn't initialized yet, its set to the
6942 * first non-dummy port ops.
6943 *
6944 * LOCKING:
6945 * Inherited from calling layer (may sleep).
6946 *
6947 * RETURNS:
6948 * 0 if all ports are started successfully, -errno otherwise.
6949 */
6950 int ata_host_start(struct ata_host *host)
6951 {
6952 int have_stop = 0;
6953 void *start_dr = NULL;
6954 int i, rc;
6955
6956 if (host->flags & ATA_HOST_STARTED)
6957 return 0;
6958
6959 for (i = 0; i < host->n_ports; i++) {
6960 struct ata_port *ap = host->ports[i];
6961
6962 if (!host->ops && !ata_port_is_dummy(ap))
6963 host->ops = ap->ops;
6964
6965 if (ap->ops->port_stop)
6966 have_stop = 1;
6967 }
6968
6969 if (host->ops->host_stop)
6970 have_stop = 1;
6971
6972 if (have_stop) {
6973 start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
6974 if (!start_dr)
6975 return -ENOMEM;
6976 }
6977
6978 for (i = 0; i < host->n_ports; i++) {
6979 struct ata_port *ap = host->ports[i];
6980
6981 if (ap->ops->port_start) {
6982 rc = ap->ops->port_start(ap);
6983 if (rc) {
6984 if (rc != -ENODEV)
6985 dev_printk(KERN_ERR, host->dev,
6986 "failed to start port %d "
6987 "(errno=%d)\n", i, rc);
6988 goto err_out;
6989 }
6990 }
6991 ata_eh_freeze_port(ap);
6992 }
6993
6994 if (start_dr)
6995 devres_add(host->dev, start_dr);
6996 host->flags |= ATA_HOST_STARTED;
6997 return 0;
6998
6999 err_out:
7000 while (--i >= 0) {
7001 struct ata_port *ap = host->ports[i];
7002
7003 if (ap->ops->port_stop)
7004 ap->ops->port_stop(ap);
7005 }
7006 devres_free(start_dr);
7007 return rc;
7008 }
7009
7010 /**
7011 * ata_sas_host_init - Initialize a host struct
7012 * @host: host to initialize
7013 * @dev: device host is attached to
7014 * @flags: host flags
7015 * @ops: port_ops
7016 *
7017 * LOCKING:
7018 * PCI/etc. bus probe sem.
7019 *
7020 */
7021 /* KILLME - the only user left is ipr */
7022 void ata_host_init(struct ata_host *host, struct device *dev,
7023 unsigned long flags, const struct ata_port_operations *ops)
7024 {
7025 spin_lock_init(&host->lock);
7026 host->dev = dev;
7027 host->flags = flags;
7028 host->ops = ops;
7029 }
7030
7031 /**
7032 * ata_host_register - register initialized ATA host
7033 * @host: ATA host to register
7034 * @sht: template for SCSI host
7035 *
7036 * Register initialized ATA host. @host is allocated using
7037 * ata_host_alloc() and fully initialized by LLD. This function
7038 * starts ports, registers @host with ATA and SCSI layers and
7039 * probe registered devices.
7040 *
7041 * LOCKING:
7042 * Inherited from calling layer (may sleep).
7043 *
7044 * RETURNS:
7045 * 0 on success, -errno otherwise.
7046 */
7047 int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
7048 {
7049 int i, rc;
7050
7051 /* host must have been started */
7052 if (!(host->flags & ATA_HOST_STARTED)) {
7053 dev_printk(KERN_ERR, host->dev,
7054 "BUG: trying to register unstarted host\n");
7055 WARN_ON(1);
7056 return -EINVAL;
7057 }
7058
7059 /* Blow away unused ports. This happens when LLD can't
7060 * determine the exact number of ports to allocate at
7061 * allocation time.
7062 */
7063 for (i = host->n_ports; host->ports[i]; i++)
7064 kfree(host->ports[i]);
7065
7066 /* give ports names and add SCSI hosts */
7067 for (i = 0; i < host->n_ports; i++)
7068 host->ports[i]->print_id = ata_print_id++;
7069
7070 rc = ata_scsi_add_hosts(host, sht);
7071 if (rc)
7072 return rc;
7073
7074 /* associate with ACPI nodes */
7075 ata_acpi_associate(host);
7076
7077 /* set cable, sata_spd_limit and report */
7078 for (i = 0; i < host->n_ports; i++) {
7079 struct ata_port *ap = host->ports[i];
7080 unsigned long xfer_mask;
7081
7082 /* set SATA cable type if still unset */
7083 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
7084 ap->cbl = ATA_CBL_SATA;
7085
7086 /* init sata_spd_limit to the current value */
7087 sata_link_init_spd(&ap->link);
7088
7089 /* print per-port info to dmesg */
7090 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
7091 ap->udma_mask);
7092
7093 if (!ata_port_is_dummy(ap)) {
7094 ata_port_printk(ap, KERN_INFO,
7095 "%cATA max %s %s\n",
7096 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
7097 ata_mode_string(xfer_mask),
7098 ap->link.eh_info.desc);
7099 ata_ehi_clear_desc(&ap->link.eh_info);
7100 } else
7101 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
7102 }
7103
7104 /* perform each probe synchronously */
7105 DPRINTK("probe begin\n");
7106 for (i = 0; i < host->n_ports; i++) {
7107 struct ata_port *ap = host->ports[i];
7108 int rc;
7109
7110 /* probe */
7111 if (ap->ops->error_handler) {
7112 struct ata_eh_info *ehi = &ap->link.eh_info;
7113 unsigned long flags;
7114
7115 ata_port_probe(ap);
7116
7117 /* kick EH for boot probing */
7118 spin_lock_irqsave(ap->lock, flags);
7119
7120 ehi->probe_mask =
7121 (1 << ata_link_max_devices(&ap->link)) - 1;
7122 ehi->action |= ATA_EH_SOFTRESET;
7123 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
7124
7125 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
7126 ap->pflags |= ATA_PFLAG_LOADING;
7127 ata_port_schedule_eh(ap);
7128
7129 spin_unlock_irqrestore(ap->lock, flags);
7130
7131 /* wait for EH to finish */
7132 ata_port_wait_eh(ap);
7133 } else {
7134 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
7135 rc = ata_bus_probe(ap);
7136 DPRINTK("ata%u: bus probe end\n", ap->print_id);
7137
7138 if (rc) {
7139 /* FIXME: do something useful here?
7140 * Current libata behavior will
7141 * tear down everything when
7142 * the module is removed
7143 * or the h/w is unplugged.
7144 */
7145 }
7146 }
7147 }
7148
7149 /* probes are done, now scan each port's disk(s) */
7150 DPRINTK("host probe begin\n");
7151 for (i = 0; i < host->n_ports; i++) {
7152 struct ata_port *ap = host->ports[i];
7153
7154 ata_scsi_scan_host(ap, 1);
7155 ata_lpm_schedule(ap, ap->pm_policy);
7156 }
7157
7158 return 0;
7159 }
7160
7161 /**
7162 * ata_host_activate - start host, request IRQ and register it
7163 * @host: target ATA host
7164 * @irq: IRQ to request
7165 * @irq_handler: irq_handler used when requesting IRQ
7166 * @irq_flags: irq_flags used when requesting IRQ
7167 * @sht: scsi_host_template to use when registering the host
7168 *
7169 * After allocating an ATA host and initializing it, most libata
7170 * LLDs perform three steps to activate the host - start host,
7171 * request IRQ and register it. This helper takes necessasry
7172 * arguments and performs the three steps in one go.
7173 *
7174 * An invalid IRQ skips the IRQ registration and expects the host to
7175 * have set polling mode on the port. In this case, @irq_handler
7176 * should be NULL.
7177 *
7178 * LOCKING:
7179 * Inherited from calling layer (may sleep).
7180 *
7181 * RETURNS:
7182 * 0 on success, -errno otherwise.
7183 */
7184 int ata_host_activate(struct ata_host *host, int irq,
7185 irq_handler_t irq_handler, unsigned long irq_flags,
7186 struct scsi_host_template *sht)
7187 {
7188 int i, rc;
7189
7190 rc = ata_host_start(host);
7191 if (rc)
7192 return rc;
7193
7194 /* Special case for polling mode */
7195 if (!irq) {
7196 WARN_ON(irq_handler);
7197 return ata_host_register(host, sht);
7198 }
7199
7200 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
7201 dev_driver_string(host->dev), host);
7202 if (rc)
7203 return rc;
7204
7205 for (i = 0; i < host->n_ports; i++)
7206 ata_port_desc(host->ports[i], "irq %d", irq);
7207
7208 rc = ata_host_register(host, sht);
7209 /* if failed, just free the IRQ and leave ports alone */
7210 if (rc)
7211 devm_free_irq(host->dev, irq, host);
7212
7213 return rc;
7214 }
7215
7216 /**
7217 * ata_port_detach - Detach ATA port in prepration of device removal
7218 * @ap: ATA port to be detached
7219 *
7220 * Detach all ATA devices and the associated SCSI devices of @ap;
7221 * then, remove the associated SCSI host. @ap is guaranteed to
7222 * be quiescent on return from this function.
7223 *
7224 * LOCKING:
7225 * Kernel thread context (may sleep).
7226 */
7227 static void ata_port_detach(struct ata_port *ap)
7228 {
7229 unsigned long flags;
7230 struct ata_link *link;
7231 struct ata_device *dev;
7232
7233 if (!ap->ops->error_handler)
7234 goto skip_eh;
7235
7236 /* tell EH we're leaving & flush EH */
7237 spin_lock_irqsave(ap->lock, flags);
7238 ap->pflags |= ATA_PFLAG_UNLOADING;
7239 spin_unlock_irqrestore(ap->lock, flags);
7240
7241 ata_port_wait_eh(ap);
7242
7243 /* EH is now guaranteed to see UNLOADING - EH context belongs
7244 * to us. Disable all existing devices.
7245 */
7246 ata_port_for_each_link(link, ap) {
7247 ata_link_for_each_dev(dev, link)
7248 ata_dev_disable(dev);
7249 }
7250
7251 /* Final freeze & EH. All in-flight commands are aborted. EH
7252 * will be skipped and retrials will be terminated with bad
7253 * target.
7254 */
7255 spin_lock_irqsave(ap->lock, flags);
7256 ata_port_freeze(ap); /* won't be thawed */
7257 spin_unlock_irqrestore(ap->lock, flags);
7258
7259 ata_port_wait_eh(ap);
7260 cancel_rearming_delayed_work(&ap->hotplug_task);
7261
7262 skip_eh:
7263 /* remove the associated SCSI host */
7264 scsi_remove_host(ap->scsi_host);
7265 }
7266
7267 /**
7268 * ata_host_detach - Detach all ports of an ATA host
7269 * @host: Host to detach
7270 *
7271 * Detach all ports of @host.
7272 *
7273 * LOCKING:
7274 * Kernel thread context (may sleep).
7275 */
7276 void ata_host_detach(struct ata_host *host)
7277 {
7278 int i;
7279
7280 for (i = 0; i < host->n_ports; i++)
7281 ata_port_detach(host->ports[i]);
7282
7283 /* the host is dead now, dissociate ACPI */
7284 ata_acpi_dissociate(host);
7285 }
7286
7287 /**
7288 * ata_std_ports - initialize ioaddr with standard port offsets.
7289 * @ioaddr: IO address structure to be initialized
7290 *
7291 * Utility function which initializes data_addr, error_addr,
7292 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
7293 * device_addr, status_addr, and command_addr to standard offsets
7294 * relative to cmd_addr.
7295 *
7296 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
7297 */
7298
7299 void ata_std_ports(struct ata_ioports *ioaddr)
7300 {
7301 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
7302 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
7303 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
7304 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
7305 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
7306 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
7307 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
7308 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
7309 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
7310 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
7311 }
7312
7313
7314 #ifdef CONFIG_PCI
7315
7316 /**
7317 * ata_pci_remove_one - PCI layer callback for device removal
7318 * @pdev: PCI device that was removed
7319 *
7320 * PCI layer indicates to libata via this hook that hot-unplug or
7321 * module unload event has occurred. Detach all ports. Resource
7322 * release is handled via devres.
7323 *
7324 * LOCKING:
7325 * Inherited from PCI layer (may sleep).
7326 */
7327 void ata_pci_remove_one(struct pci_dev *pdev)
7328 {
7329 struct device *dev = &pdev->dev;
7330 struct ata_host *host = dev_get_drvdata(dev);
7331
7332 ata_host_detach(host);
7333 }
7334
7335 /* move to PCI subsystem */
7336 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
7337 {
7338 unsigned long tmp = 0;
7339
7340 switch (bits->width) {
7341 case 1: {
7342 u8 tmp8 = 0;
7343 pci_read_config_byte(pdev, bits->reg, &tmp8);
7344 tmp = tmp8;
7345 break;
7346 }
7347 case 2: {
7348 u16 tmp16 = 0;
7349 pci_read_config_word(pdev, bits->reg, &tmp16);
7350 tmp = tmp16;
7351 break;
7352 }
7353 case 4: {
7354 u32 tmp32 = 0;
7355 pci_read_config_dword(pdev, bits->reg, &tmp32);
7356 tmp = tmp32;
7357 break;
7358 }
7359
7360 default:
7361 return -EINVAL;
7362 }
7363
7364 tmp &= bits->mask;
7365
7366 return (tmp == bits->val) ? 1 : 0;
7367 }
7368
7369 #ifdef CONFIG_PM
7370 void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
7371 {
7372 pci_save_state(pdev);
7373 pci_disable_device(pdev);
7374
7375 if (mesg.event == PM_EVENT_SUSPEND)
7376 pci_set_power_state(pdev, PCI_D3hot);
7377 }
7378
7379 int ata_pci_device_do_resume(struct pci_dev *pdev)
7380 {
7381 int rc;
7382
7383 pci_set_power_state(pdev, PCI_D0);
7384 pci_restore_state(pdev);
7385
7386 rc = pcim_enable_device(pdev);
7387 if (rc) {
7388 dev_printk(KERN_ERR, &pdev->dev,
7389 "failed to enable device after resume (%d)\n", rc);
7390 return rc;
7391 }
7392
7393 pci_set_master(pdev);
7394 return 0;
7395 }
7396
7397 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
7398 {
7399 struct ata_host *host = dev_get_drvdata(&pdev->dev);
7400 int rc = 0;
7401
7402 rc = ata_host_suspend(host, mesg);
7403 if (rc)
7404 return rc;
7405
7406 ata_pci_device_do_suspend(pdev, mesg);
7407
7408 return 0;
7409 }
7410
7411 int ata_pci_device_resume(struct pci_dev *pdev)
7412 {
7413 struct ata_host *host = dev_get_drvdata(&pdev->dev);
7414 int rc;
7415
7416 rc = ata_pci_device_do_resume(pdev);
7417 if (rc == 0)
7418 ata_host_resume(host);
7419 return rc;
7420 }
7421 #endif /* CONFIG_PM */
7422
7423 #endif /* CONFIG_PCI */
7424
7425
7426 static int __init ata_init(void)
7427 {
7428 ata_probe_timeout *= HZ;
7429 ata_wq = create_workqueue("ata");
7430 if (!ata_wq)
7431 return -ENOMEM;
7432
7433 ata_aux_wq = create_singlethread_workqueue("ata_aux");
7434 if (!ata_aux_wq) {
7435 destroy_workqueue(ata_wq);
7436 return -ENOMEM;
7437 }
7438
7439 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
7440 return 0;
7441 }
7442
7443 static void __exit ata_exit(void)
7444 {
7445 destroy_workqueue(ata_wq);
7446 destroy_workqueue(ata_aux_wq);
7447 }
7448
7449 subsys_initcall(ata_init);
7450 module_exit(ata_exit);
7451
7452 static unsigned long ratelimit_time;
7453 static DEFINE_SPINLOCK(ata_ratelimit_lock);
7454
7455 int ata_ratelimit(void)
7456 {
7457 int rc;
7458 unsigned long flags;
7459
7460 spin_lock_irqsave(&ata_ratelimit_lock, flags);
7461
7462 if (time_after(jiffies, ratelimit_time)) {
7463 rc = 1;
7464 ratelimit_time = jiffies + (HZ/5);
7465 } else
7466 rc = 0;
7467
7468 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
7469
7470 return rc;
7471 }
7472
7473 /**
7474 * ata_wait_register - wait until register value changes
7475 * @reg: IO-mapped register
7476 * @mask: Mask to apply to read register value
7477 * @val: Wait condition
7478 * @interval_msec: polling interval in milliseconds
7479 * @timeout_msec: timeout in milliseconds
7480 *
7481 * Waiting for some bits of register to change is a common
7482 * operation for ATA controllers. This function reads 32bit LE
7483 * IO-mapped register @reg and tests for the following condition.
7484 *
7485 * (*@reg & mask) != val
7486 *
7487 * If the condition is met, it returns; otherwise, the process is
7488 * repeated after @interval_msec until timeout.
7489 *
7490 * LOCKING:
7491 * Kernel thread context (may sleep)
7492 *
7493 * RETURNS:
7494 * The final register value.
7495 */
7496 u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
7497 unsigned long interval_msec,
7498 unsigned long timeout_msec)
7499 {
7500 unsigned long timeout;
7501 u32 tmp;
7502
7503 tmp = ioread32(reg);
7504
7505 /* Calculate timeout _after_ the first read to make sure
7506 * preceding writes reach the controller before starting to
7507 * eat away the timeout.
7508 */
7509 timeout = jiffies + (timeout_msec * HZ) / 1000;
7510
7511 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
7512 msleep(interval_msec);
7513 tmp = ioread32(reg);
7514 }
7515
7516 return tmp;
7517 }
7518
7519 /*
7520 * Dummy port_ops
7521 */
7522 static void ata_dummy_noret(struct ata_port *ap) { }
7523 static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
7524 static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
7525
7526 static u8 ata_dummy_check_status(struct ata_port *ap)
7527 {
7528 return ATA_DRDY;
7529 }
7530
7531 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
7532 {
7533 return AC_ERR_SYSTEM;
7534 }
7535
7536 const struct ata_port_operations ata_dummy_port_ops = {
7537 .check_status = ata_dummy_check_status,
7538 .check_altstatus = ata_dummy_check_status,
7539 .dev_select = ata_noop_dev_select,
7540 .qc_prep = ata_noop_qc_prep,
7541 .qc_issue = ata_dummy_qc_issue,
7542 .freeze = ata_dummy_noret,
7543 .thaw = ata_dummy_noret,
7544 .error_handler = ata_dummy_noret,
7545 .post_internal_cmd = ata_dummy_qc_noret,
7546 .irq_clear = ata_dummy_noret,
7547 .port_start = ata_dummy_ret0,
7548 .port_stop = ata_dummy_noret,
7549 };
7550
7551 const struct ata_port_info ata_dummy_port_info = {
7552 .port_ops = &ata_dummy_port_ops,
7553 };
7554
7555 /*
7556 * libata is essentially a library of internal helper functions for
7557 * low-level ATA host controller drivers. As such, the API/ABI is
7558 * likely to change as new drivers are added and updated.
7559 * Do not depend on ABI/API stability.
7560 */
7561 EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
7562 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
7563 EXPORT_SYMBOL_GPL(sata_deb_timing_long);
7564 EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
7565 EXPORT_SYMBOL_GPL(ata_dummy_port_info);
7566 EXPORT_SYMBOL_GPL(ata_std_bios_param);
7567 EXPORT_SYMBOL_GPL(ata_std_ports);
7568 EXPORT_SYMBOL_GPL(ata_host_init);
7569 EXPORT_SYMBOL_GPL(ata_host_alloc);
7570 EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
7571 EXPORT_SYMBOL_GPL(ata_host_start);
7572 EXPORT_SYMBOL_GPL(ata_host_register);
7573 EXPORT_SYMBOL_GPL(ata_host_activate);
7574 EXPORT_SYMBOL_GPL(ata_host_detach);
7575 EXPORT_SYMBOL_GPL(ata_sg_init);
7576 EXPORT_SYMBOL_GPL(ata_sg_init_one);
7577 EXPORT_SYMBOL_GPL(ata_hsm_move);
7578 EXPORT_SYMBOL_GPL(ata_qc_complete);
7579 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
7580 EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
7581 EXPORT_SYMBOL_GPL(ata_tf_load);
7582 EXPORT_SYMBOL_GPL(ata_tf_read);
7583 EXPORT_SYMBOL_GPL(ata_noop_dev_select);
7584 EXPORT_SYMBOL_GPL(ata_std_dev_select);
7585 EXPORT_SYMBOL_GPL(sata_print_link_status);
7586 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
7587 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
7588 EXPORT_SYMBOL_GPL(ata_pack_xfermask);
7589 EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
7590 EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
7591 EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
7592 EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
7593 EXPORT_SYMBOL_GPL(ata_mode_string);
7594 EXPORT_SYMBOL_GPL(ata_id_xfermask);
7595 EXPORT_SYMBOL_GPL(ata_check_status);
7596 EXPORT_SYMBOL_GPL(ata_altstatus);
7597 EXPORT_SYMBOL_GPL(ata_exec_command);
7598 EXPORT_SYMBOL_GPL(ata_port_start);
7599 EXPORT_SYMBOL_GPL(ata_sff_port_start);
7600 EXPORT_SYMBOL_GPL(ata_interrupt);
7601 EXPORT_SYMBOL_GPL(ata_do_set_mode);
7602 EXPORT_SYMBOL_GPL(ata_data_xfer);
7603 EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
7604 EXPORT_SYMBOL_GPL(ata_std_qc_defer);
7605 EXPORT_SYMBOL_GPL(ata_qc_prep);
7606 EXPORT_SYMBOL_GPL(ata_dumb_qc_prep);
7607 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
7608 EXPORT_SYMBOL_GPL(ata_bmdma_setup);
7609 EXPORT_SYMBOL_GPL(ata_bmdma_start);
7610 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
7611 EXPORT_SYMBOL_GPL(ata_bmdma_status);
7612 EXPORT_SYMBOL_GPL(ata_bmdma_stop);
7613 EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
7614 EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
7615 EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
7616 EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
7617 EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
7618 EXPORT_SYMBOL_GPL(ata_port_probe);
7619 EXPORT_SYMBOL_GPL(ata_dev_disable);
7620 EXPORT_SYMBOL_GPL(sata_set_spd);
7621 EXPORT_SYMBOL_GPL(sata_link_debounce);
7622 EXPORT_SYMBOL_GPL(sata_link_resume);
7623 EXPORT_SYMBOL_GPL(ata_bus_reset);
7624 EXPORT_SYMBOL_GPL(ata_std_prereset);
7625 EXPORT_SYMBOL_GPL(ata_std_softreset);
7626 EXPORT_SYMBOL_GPL(sata_link_hardreset);
7627 EXPORT_SYMBOL_GPL(sata_std_hardreset);
7628 EXPORT_SYMBOL_GPL(ata_std_postreset);
7629 EXPORT_SYMBOL_GPL(ata_dev_classify);
7630 EXPORT_SYMBOL_GPL(ata_dev_pair);
7631 EXPORT_SYMBOL_GPL(ata_port_disable);
7632 EXPORT_SYMBOL_GPL(ata_ratelimit);
7633 EXPORT_SYMBOL_GPL(ata_wait_register);
7634 EXPORT_SYMBOL_GPL(ata_busy_sleep);
7635 EXPORT_SYMBOL_GPL(ata_wait_after_reset);
7636 EXPORT_SYMBOL_GPL(ata_wait_ready);
7637 EXPORT_SYMBOL_GPL(ata_port_queue_task);
7638 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
7639 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
7640 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
7641 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
7642 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
7643 EXPORT_SYMBOL_GPL(ata_host_intr);
7644 EXPORT_SYMBOL_GPL(sata_scr_valid);
7645 EXPORT_SYMBOL_GPL(sata_scr_read);
7646 EXPORT_SYMBOL_GPL(sata_scr_write);
7647 EXPORT_SYMBOL_GPL(sata_scr_write_flush);
7648 EXPORT_SYMBOL_GPL(ata_link_online);
7649 EXPORT_SYMBOL_GPL(ata_link_offline);
7650 #ifdef CONFIG_PM
7651 EXPORT_SYMBOL_GPL(ata_host_suspend);
7652 EXPORT_SYMBOL_GPL(ata_host_resume);
7653 #endif /* CONFIG_PM */
7654 EXPORT_SYMBOL_GPL(ata_id_string);
7655 EXPORT_SYMBOL_GPL(ata_id_c_string);
7656 EXPORT_SYMBOL_GPL(ata_id_to_dma_mode);
7657 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
7658
7659 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
7660 EXPORT_SYMBOL_GPL(ata_timing_find_mode);
7661 EXPORT_SYMBOL_GPL(ata_timing_compute);
7662 EXPORT_SYMBOL_GPL(ata_timing_merge);
7663
7664 #ifdef CONFIG_PCI
7665 EXPORT_SYMBOL_GPL(pci_test_config_bits);
7666 EXPORT_SYMBOL_GPL(ata_pci_init_sff_host);
7667 EXPORT_SYMBOL_GPL(ata_pci_init_bmdma);
7668 EXPORT_SYMBOL_GPL(ata_pci_prepare_sff_host);
7669 EXPORT_SYMBOL_GPL(ata_pci_init_one);
7670 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
7671 #ifdef CONFIG_PM
7672 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
7673 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
7674 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
7675 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
7676 #endif /* CONFIG_PM */
7677 EXPORT_SYMBOL_GPL(ata_pci_default_filter);
7678 EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
7679 #endif /* CONFIG_PCI */
7680
7681 EXPORT_SYMBOL_GPL(sata_pmp_qc_defer_cmd_switch);
7682 EXPORT_SYMBOL_GPL(sata_pmp_std_prereset);
7683 EXPORT_SYMBOL_GPL(sata_pmp_std_hardreset);
7684 EXPORT_SYMBOL_GPL(sata_pmp_std_postreset);
7685 EXPORT_SYMBOL_GPL(sata_pmp_do_eh);
7686
7687 EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
7688 EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
7689 EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
7690 EXPORT_SYMBOL_GPL(ata_port_desc);
7691 #ifdef CONFIG_PCI
7692 EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
7693 #endif /* CONFIG_PCI */
7694 EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
7695 EXPORT_SYMBOL_GPL(ata_link_abort);
7696 EXPORT_SYMBOL_GPL(ata_port_abort);
7697 EXPORT_SYMBOL_GPL(ata_port_freeze);
7698 EXPORT_SYMBOL_GPL(sata_async_notification);
7699 EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
7700 EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
7701 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
7702 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
7703 EXPORT_SYMBOL_GPL(ata_do_eh);
7704 EXPORT_SYMBOL_GPL(ata_irq_on);
7705 EXPORT_SYMBOL_GPL(ata_dev_try_classify);
7706
7707 EXPORT_SYMBOL_GPL(ata_cable_40wire);
7708 EXPORT_SYMBOL_GPL(ata_cable_80wire);
7709 EXPORT_SYMBOL_GPL(ata_cable_unknown);
7710 EXPORT_SYMBOL_GPL(ata_cable_sata);
This page took 0.187678 seconds and 6 git commands to generate.