91b478f205572ec6570e17bb41bda4b635593311
[deliverable/linux.git] / drivers / ata / libata-core.c
1 /*
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
33 * Standards documents from:
34 * http://www.t13.org (ATA standards, PCI DMA IDE spec)
35 * http://www.t10.org (SCSI MMC - for ATAPI MMC)
36 * http://www.sata-io.org (SATA)
37 * http://www.compactflash.org (CF)
38 * http://www.qic.org (QIC157 - Tape and DSC)
39 * http://www.ce-ata.org (CE-ATA: not supported)
40 *
41 */
42
43 #include <linux/kernel.h>
44 #include <linux/module.h>
45 #include <linux/pci.h>
46 #include <linux/init.h>
47 #include <linux/list.h>
48 #include <linux/mm.h>
49 #include <linux/spinlock.h>
50 #include <linux/blkdev.h>
51 #include <linux/delay.h>
52 #include <linux/timer.h>
53 #include <linux/interrupt.h>
54 #include <linux/completion.h>
55 #include <linux/suspend.h>
56 #include <linux/workqueue.h>
57 #include <linux/scatterlist.h>
58 #include <linux/io.h>
59 #include <scsi/scsi.h>
60 #include <scsi/scsi_cmnd.h>
61 #include <scsi/scsi_host.h>
62 #include <linux/libata.h>
63 #include <asm/byteorder.h>
64 #include <linux/cdrom.h>
65
66 #include "libata.h"
67
68
69 /* debounce timing parameters in msecs { interval, duration, timeout } */
70 const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
71 const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
72 const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
73
74 const struct ata_port_operations ata_base_port_ops = {
75 .prereset = ata_std_prereset,
76 .postreset = ata_std_postreset,
77 .error_handler = ata_std_error_handler,
78 };
79
80 const struct ata_port_operations sata_port_ops = {
81 .inherits = &ata_base_port_ops,
82
83 .qc_defer = ata_std_qc_defer,
84 .hardreset = sata_std_hardreset,
85 };
86
87 static unsigned int ata_dev_init_params(struct ata_device *dev,
88 u16 heads, u16 sectors);
89 static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
90 static unsigned int ata_dev_set_feature(struct ata_device *dev,
91 u8 enable, u8 feature);
92 static void ata_dev_xfermask(struct ata_device *dev);
93 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
94
95 unsigned int ata_print_id = 1;
96 static struct workqueue_struct *ata_wq;
97
98 struct workqueue_struct *ata_aux_wq;
99
100 struct ata_force_param {
101 const char *name;
102 unsigned int cbl;
103 int spd_limit;
104 unsigned long xfer_mask;
105 unsigned int horkage_on;
106 unsigned int horkage_off;
107 unsigned int lflags;
108 };
109
110 struct ata_force_ent {
111 int port;
112 int device;
113 struct ata_force_param param;
114 };
115
116 static struct ata_force_ent *ata_force_tbl;
117 static int ata_force_tbl_size;
118
119 static char ata_force_param_buf[PAGE_SIZE] __initdata;
120 /* param_buf is thrown away after initialization, disallow read */
121 module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
122 MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)");
123
124 static int atapi_enabled = 1;
125 module_param(atapi_enabled, int, 0444);
126 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
127
128 static int atapi_dmadir = 0;
129 module_param(atapi_dmadir, int, 0444);
130 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
131
132 int atapi_passthru16 = 1;
133 module_param(atapi_passthru16, int, 0444);
134 MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)");
135
136 int libata_fua = 0;
137 module_param_named(fua, libata_fua, int, 0444);
138 MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
139
140 static int ata_ignore_hpa;
141 module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
142 MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
143
144 static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
145 module_param_named(dma, libata_dma_mask, int, 0444);
146 MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
147
148 static int ata_probe_timeout;
149 module_param(ata_probe_timeout, int, 0444);
150 MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
151
152 int libata_noacpi = 0;
153 module_param_named(noacpi, libata_noacpi, int, 0444);
154 MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in probe/suspend/resume when set");
155
156 int libata_allow_tpm = 0;
157 module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
158 MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands");
159
160 MODULE_AUTHOR("Jeff Garzik");
161 MODULE_DESCRIPTION("Library module for ATA devices");
162 MODULE_LICENSE("GPL");
163 MODULE_VERSION(DRV_VERSION);
164
165
166 /*
167 * Iterator helpers. Don't use directly.
168 *
169 * LOCKING:
170 * Host lock or EH context.
171 */
172 struct ata_link *__ata_port_next_link(struct ata_port *ap,
173 struct ata_link *link, bool dev_only)
174 {
175 /* NULL link indicates start of iteration */
176 if (!link) {
177 if (dev_only && sata_pmp_attached(ap))
178 return ap->pmp_link;
179 return &ap->link;
180 }
181
182 /* we just iterated over the host master link, what's next? */
183 if (link == &ap->link) {
184 if (!sata_pmp_attached(ap)) {
185 if (unlikely(ap->slave_link) && !dev_only)
186 return ap->slave_link;
187 return NULL;
188 }
189 return ap->pmp_link;
190 }
191
192 /* slave_link excludes PMP */
193 if (unlikely(link == ap->slave_link))
194 return NULL;
195
196 /* iterate to the next PMP link */
197 if (++link < ap->pmp_link + ap->nr_pmp_links)
198 return link;
199 return NULL;
200 }
201
202 /**
203 * ata_dev_phys_link - find physical link for a device
204 * @dev: ATA device to look up physical link for
205 *
206 * Look up physical link which @dev is attached to. Note that
207 * this is different from @dev->link only when @dev is on slave
208 * link. For all other cases, it's the same as @dev->link.
209 *
210 * LOCKING:
211 * Don't care.
212 *
213 * RETURNS:
214 * Pointer to the found physical link.
215 */
216 struct ata_link *ata_dev_phys_link(struct ata_device *dev)
217 {
218 struct ata_port *ap = dev->link->ap;
219
220 if (!ap->slave_link)
221 return dev->link;
222 if (!dev->devno)
223 return &ap->link;
224 return ap->slave_link;
225 }
226
227 /**
228 * ata_force_cbl - force cable type according to libata.force
229 * @ap: ATA port of interest
230 *
231 * Force cable type according to libata.force and whine about it.
232 * The last entry which has matching port number is used, so it
233 * can be specified as part of device force parameters. For
234 * example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the
235 * same effect.
236 *
237 * LOCKING:
238 * EH context.
239 */
240 void ata_force_cbl(struct ata_port *ap)
241 {
242 int i;
243
244 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
245 const struct ata_force_ent *fe = &ata_force_tbl[i];
246
247 if (fe->port != -1 && fe->port != ap->print_id)
248 continue;
249
250 if (fe->param.cbl == ATA_CBL_NONE)
251 continue;
252
253 ap->cbl = fe->param.cbl;
254 ata_port_printk(ap, KERN_NOTICE,
255 "FORCE: cable set to %s\n", fe->param.name);
256 return;
257 }
258 }
259
260 /**
261 * ata_force_link_limits - force link limits according to libata.force
262 * @link: ATA link of interest
263 *
264 * Force link flags and SATA spd limit according to libata.force
265 * and whine about it. When only the port part is specified
266 * (e.g. 1:), the limit applies to all links connected to both
267 * the host link and all fan-out ports connected via PMP. If the
268 * device part is specified as 0 (e.g. 1.00:), it specifies the
269 * first fan-out link not the host link. Device number 15 always
270 * points to the host link whether PMP is attached or not. If the
271 * controller has slave link, device number 16 points to it.
272 *
273 * LOCKING:
274 * EH context.
275 */
276 static void ata_force_link_limits(struct ata_link *link)
277 {
278 bool did_spd = false;
279 int linkno = link->pmp;
280 int i;
281
282 if (ata_is_host_link(link))
283 linkno += 15;
284
285 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
286 const struct ata_force_ent *fe = &ata_force_tbl[i];
287
288 if (fe->port != -1 && fe->port != link->ap->print_id)
289 continue;
290
291 if (fe->device != -1 && fe->device != linkno)
292 continue;
293
294 /* only honor the first spd limit */
295 if (!did_spd && fe->param.spd_limit) {
296 link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
297 ata_link_printk(link, KERN_NOTICE,
298 "FORCE: PHY spd limit set to %s\n",
299 fe->param.name);
300 did_spd = true;
301 }
302
303 /* let lflags stack */
304 if (fe->param.lflags) {
305 link->flags |= fe->param.lflags;
306 ata_link_printk(link, KERN_NOTICE,
307 "FORCE: link flag 0x%x forced -> 0x%x\n",
308 fe->param.lflags, link->flags);
309 }
310 }
311 }
312
313 /**
314 * ata_force_xfermask - force xfermask according to libata.force
315 * @dev: ATA device of interest
316 *
317 * Force xfer_mask according to libata.force and whine about it.
318 * For consistency with link selection, device number 15 selects
319 * the first device connected to the host link.
320 *
321 * LOCKING:
322 * EH context.
323 */
324 static void ata_force_xfermask(struct ata_device *dev)
325 {
326 int devno = dev->link->pmp + dev->devno;
327 int alt_devno = devno;
328 int i;
329
330 /* allow n.15/16 for devices attached to host port */
331 if (ata_is_host_link(dev->link))
332 alt_devno += 15;
333
334 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
335 const struct ata_force_ent *fe = &ata_force_tbl[i];
336 unsigned long pio_mask, mwdma_mask, udma_mask;
337
338 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
339 continue;
340
341 if (fe->device != -1 && fe->device != devno &&
342 fe->device != alt_devno)
343 continue;
344
345 if (!fe->param.xfer_mask)
346 continue;
347
348 ata_unpack_xfermask(fe->param.xfer_mask,
349 &pio_mask, &mwdma_mask, &udma_mask);
350 if (udma_mask)
351 dev->udma_mask = udma_mask;
352 else if (mwdma_mask) {
353 dev->udma_mask = 0;
354 dev->mwdma_mask = mwdma_mask;
355 } else {
356 dev->udma_mask = 0;
357 dev->mwdma_mask = 0;
358 dev->pio_mask = pio_mask;
359 }
360
361 ata_dev_printk(dev, KERN_NOTICE,
362 "FORCE: xfer_mask set to %s\n", fe->param.name);
363 return;
364 }
365 }
366
367 /**
368 * ata_force_horkage - force horkage according to libata.force
369 * @dev: ATA device of interest
370 *
371 * Force horkage according to libata.force and whine about it.
372 * For consistency with link selection, device number 15 selects
373 * the first device connected to the host link.
374 *
375 * LOCKING:
376 * EH context.
377 */
378 static void ata_force_horkage(struct ata_device *dev)
379 {
380 int devno = dev->link->pmp + dev->devno;
381 int alt_devno = devno;
382 int i;
383
384 /* allow n.15/16 for devices attached to host port */
385 if (ata_is_host_link(dev->link))
386 alt_devno += 15;
387
388 for (i = 0; i < ata_force_tbl_size; i++) {
389 const struct ata_force_ent *fe = &ata_force_tbl[i];
390
391 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
392 continue;
393
394 if (fe->device != -1 && fe->device != devno &&
395 fe->device != alt_devno)
396 continue;
397
398 if (!(~dev->horkage & fe->param.horkage_on) &&
399 !(dev->horkage & fe->param.horkage_off))
400 continue;
401
402 dev->horkage |= fe->param.horkage_on;
403 dev->horkage &= ~fe->param.horkage_off;
404
405 ata_dev_printk(dev, KERN_NOTICE,
406 "FORCE: horkage modified (%s)\n", fe->param.name);
407 }
408 }
409
410 /**
411 * atapi_cmd_type - Determine ATAPI command type from SCSI opcode
412 * @opcode: SCSI opcode
413 *
414 * Determine ATAPI command type from @opcode.
415 *
416 * LOCKING:
417 * None.
418 *
419 * RETURNS:
420 * ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC}
421 */
422 int atapi_cmd_type(u8 opcode)
423 {
424 switch (opcode) {
425 case GPCMD_READ_10:
426 case GPCMD_READ_12:
427 return ATAPI_READ;
428
429 case GPCMD_WRITE_10:
430 case GPCMD_WRITE_12:
431 case GPCMD_WRITE_AND_VERIFY_10:
432 return ATAPI_WRITE;
433
434 case GPCMD_READ_CD:
435 case GPCMD_READ_CD_MSF:
436 return ATAPI_READ_CD;
437
438 case ATA_16:
439 case ATA_12:
440 if (atapi_passthru16)
441 return ATAPI_PASS_THRU;
442 /* fall thru */
443 default:
444 return ATAPI_MISC;
445 }
446 }
447
448 /**
449 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
450 * @tf: Taskfile to convert
451 * @pmp: Port multiplier port
452 * @is_cmd: This FIS is for command
453 * @fis: Buffer into which data will output
454 *
455 * Converts a standard ATA taskfile to a Serial ATA
456 * FIS structure (Register - Host to Device).
457 *
458 * LOCKING:
459 * Inherited from caller.
460 */
461 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
462 {
463 fis[0] = 0x27; /* Register - Host to Device FIS */
464 fis[1] = pmp & 0xf; /* Port multiplier number*/
465 if (is_cmd)
466 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */
467
468 fis[2] = tf->command;
469 fis[3] = tf->feature;
470
471 fis[4] = tf->lbal;
472 fis[5] = tf->lbam;
473 fis[6] = tf->lbah;
474 fis[7] = tf->device;
475
476 fis[8] = tf->hob_lbal;
477 fis[9] = tf->hob_lbam;
478 fis[10] = tf->hob_lbah;
479 fis[11] = tf->hob_feature;
480
481 fis[12] = tf->nsect;
482 fis[13] = tf->hob_nsect;
483 fis[14] = 0;
484 fis[15] = tf->ctl;
485
486 fis[16] = 0;
487 fis[17] = 0;
488 fis[18] = 0;
489 fis[19] = 0;
490 }
491
492 /**
493 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
494 * @fis: Buffer from which data will be input
495 * @tf: Taskfile to output
496 *
497 * Converts a serial ATA FIS structure to a standard ATA taskfile.
498 *
499 * LOCKING:
500 * Inherited from caller.
501 */
502
503 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
504 {
505 tf->command = fis[2]; /* status */
506 tf->feature = fis[3]; /* error */
507
508 tf->lbal = fis[4];
509 tf->lbam = fis[5];
510 tf->lbah = fis[6];
511 tf->device = fis[7];
512
513 tf->hob_lbal = fis[8];
514 tf->hob_lbam = fis[9];
515 tf->hob_lbah = fis[10];
516
517 tf->nsect = fis[12];
518 tf->hob_nsect = fis[13];
519 }
520
521 static const u8 ata_rw_cmds[] = {
522 /* pio multi */
523 ATA_CMD_READ_MULTI,
524 ATA_CMD_WRITE_MULTI,
525 ATA_CMD_READ_MULTI_EXT,
526 ATA_CMD_WRITE_MULTI_EXT,
527 0,
528 0,
529 0,
530 ATA_CMD_WRITE_MULTI_FUA_EXT,
531 /* pio */
532 ATA_CMD_PIO_READ,
533 ATA_CMD_PIO_WRITE,
534 ATA_CMD_PIO_READ_EXT,
535 ATA_CMD_PIO_WRITE_EXT,
536 0,
537 0,
538 0,
539 0,
540 /* dma */
541 ATA_CMD_READ,
542 ATA_CMD_WRITE,
543 ATA_CMD_READ_EXT,
544 ATA_CMD_WRITE_EXT,
545 0,
546 0,
547 0,
548 ATA_CMD_WRITE_FUA_EXT
549 };
550
551 /**
552 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
553 * @tf: command to examine and configure
554 * @dev: device tf belongs to
555 *
556 * Examine the device configuration and tf->flags to calculate
557 * the proper read/write commands and protocol to use.
558 *
559 * LOCKING:
560 * caller.
561 */
562 static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
563 {
564 u8 cmd;
565
566 int index, fua, lba48, write;
567
568 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
569 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
570 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
571
572 if (dev->flags & ATA_DFLAG_PIO) {
573 tf->protocol = ATA_PROT_PIO;
574 index = dev->multi_count ? 0 : 8;
575 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
576 /* Unable to use DMA due to host limitation */
577 tf->protocol = ATA_PROT_PIO;
578 index = dev->multi_count ? 0 : 8;
579 } else {
580 tf->protocol = ATA_PROT_DMA;
581 index = 16;
582 }
583
584 cmd = ata_rw_cmds[index + fua + lba48 + write];
585 if (cmd) {
586 tf->command = cmd;
587 return 0;
588 }
589 return -1;
590 }
591
592 /**
593 * ata_tf_read_block - Read block address from ATA taskfile
594 * @tf: ATA taskfile of interest
595 * @dev: ATA device @tf belongs to
596 *
597 * LOCKING:
598 * None.
599 *
600 * Read block address from @tf. This function can handle all
601 * three address formats - LBA, LBA48 and CHS. tf->protocol and
602 * flags select the address format to use.
603 *
604 * RETURNS:
605 * Block address read from @tf.
606 */
607 u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
608 {
609 u64 block = 0;
610
611 if (tf->flags & ATA_TFLAG_LBA) {
612 if (tf->flags & ATA_TFLAG_LBA48) {
613 block |= (u64)tf->hob_lbah << 40;
614 block |= (u64)tf->hob_lbam << 32;
615 block |= tf->hob_lbal << 24;
616 } else
617 block |= (tf->device & 0xf) << 24;
618
619 block |= tf->lbah << 16;
620 block |= tf->lbam << 8;
621 block |= tf->lbal;
622 } else {
623 u32 cyl, head, sect;
624
625 cyl = tf->lbam | (tf->lbah << 8);
626 head = tf->device & 0xf;
627 sect = tf->lbal;
628
629 block = (cyl * dev->heads + head) * dev->sectors + sect;
630 }
631
632 return block;
633 }
634
635 /**
636 * ata_build_rw_tf - Build ATA taskfile for given read/write request
637 * @tf: Target ATA taskfile
638 * @dev: ATA device @tf belongs to
639 * @block: Block address
640 * @n_block: Number of blocks
641 * @tf_flags: RW/FUA etc...
642 * @tag: tag
643 *
644 * LOCKING:
645 * None.
646 *
647 * Build ATA taskfile @tf for read/write request described by
648 * @block, @n_block, @tf_flags and @tag on @dev.
649 *
650 * RETURNS:
651 *
652 * 0 on success, -ERANGE if the request is too large for @dev,
653 * -EINVAL if the request is invalid.
654 */
655 int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
656 u64 block, u32 n_block, unsigned int tf_flags,
657 unsigned int tag)
658 {
659 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
660 tf->flags |= tf_flags;
661
662 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
663 /* yay, NCQ */
664 if (!lba_48_ok(block, n_block))
665 return -ERANGE;
666
667 tf->protocol = ATA_PROT_NCQ;
668 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
669
670 if (tf->flags & ATA_TFLAG_WRITE)
671 tf->command = ATA_CMD_FPDMA_WRITE;
672 else
673 tf->command = ATA_CMD_FPDMA_READ;
674
675 tf->nsect = tag << 3;
676 tf->hob_feature = (n_block >> 8) & 0xff;
677 tf->feature = n_block & 0xff;
678
679 tf->hob_lbah = (block >> 40) & 0xff;
680 tf->hob_lbam = (block >> 32) & 0xff;
681 tf->hob_lbal = (block >> 24) & 0xff;
682 tf->lbah = (block >> 16) & 0xff;
683 tf->lbam = (block >> 8) & 0xff;
684 tf->lbal = block & 0xff;
685
686 tf->device = 1 << 6;
687 if (tf->flags & ATA_TFLAG_FUA)
688 tf->device |= 1 << 7;
689 } else if (dev->flags & ATA_DFLAG_LBA) {
690 tf->flags |= ATA_TFLAG_LBA;
691
692 if (lba_28_ok(block, n_block)) {
693 /* use LBA28 */
694 tf->device |= (block >> 24) & 0xf;
695 } else if (lba_48_ok(block, n_block)) {
696 if (!(dev->flags & ATA_DFLAG_LBA48))
697 return -ERANGE;
698
699 /* use LBA48 */
700 tf->flags |= ATA_TFLAG_LBA48;
701
702 tf->hob_nsect = (n_block >> 8) & 0xff;
703
704 tf->hob_lbah = (block >> 40) & 0xff;
705 tf->hob_lbam = (block >> 32) & 0xff;
706 tf->hob_lbal = (block >> 24) & 0xff;
707 } else
708 /* request too large even for LBA48 */
709 return -ERANGE;
710
711 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
712 return -EINVAL;
713
714 tf->nsect = n_block & 0xff;
715
716 tf->lbah = (block >> 16) & 0xff;
717 tf->lbam = (block >> 8) & 0xff;
718 tf->lbal = block & 0xff;
719
720 tf->device |= ATA_LBA;
721 } else {
722 /* CHS */
723 u32 sect, head, cyl, track;
724
725 /* The request -may- be too large for CHS addressing. */
726 if (!lba_28_ok(block, n_block))
727 return -ERANGE;
728
729 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
730 return -EINVAL;
731
732 /* Convert LBA to CHS */
733 track = (u32)block / dev->sectors;
734 cyl = track / dev->heads;
735 head = track % dev->heads;
736 sect = (u32)block % dev->sectors + 1;
737
738 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
739 (u32)block, track, cyl, head, sect);
740
741 /* Check whether the converted CHS can fit.
742 Cylinder: 0-65535
743 Head: 0-15
744 Sector: 1-255*/
745 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
746 return -ERANGE;
747
748 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
749 tf->lbal = sect;
750 tf->lbam = cyl;
751 tf->lbah = cyl >> 8;
752 tf->device |= head;
753 }
754
755 return 0;
756 }
757
758 /**
759 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
760 * @pio_mask: pio_mask
761 * @mwdma_mask: mwdma_mask
762 * @udma_mask: udma_mask
763 *
764 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
765 * unsigned int xfer_mask.
766 *
767 * LOCKING:
768 * None.
769 *
770 * RETURNS:
771 * Packed xfer_mask.
772 */
773 unsigned long ata_pack_xfermask(unsigned long pio_mask,
774 unsigned long mwdma_mask,
775 unsigned long udma_mask)
776 {
777 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
778 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
779 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
780 }
781
782 /**
783 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
784 * @xfer_mask: xfer_mask to unpack
785 * @pio_mask: resulting pio_mask
786 * @mwdma_mask: resulting mwdma_mask
787 * @udma_mask: resulting udma_mask
788 *
789 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
790 * Any NULL distination masks will be ignored.
791 */
792 void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
793 unsigned long *mwdma_mask, unsigned long *udma_mask)
794 {
795 if (pio_mask)
796 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
797 if (mwdma_mask)
798 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
799 if (udma_mask)
800 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
801 }
802
803 static const struct ata_xfer_ent {
804 int shift, bits;
805 u8 base;
806 } ata_xfer_tbl[] = {
807 { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
808 { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
809 { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
810 { -1, },
811 };
812
813 /**
814 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
815 * @xfer_mask: xfer_mask of interest
816 *
817 * Return matching XFER_* value for @xfer_mask. Only the highest
818 * bit of @xfer_mask is considered.
819 *
820 * LOCKING:
821 * None.
822 *
823 * RETURNS:
824 * Matching XFER_* value, 0xff if no match found.
825 */
826 u8 ata_xfer_mask2mode(unsigned long xfer_mask)
827 {
828 int highbit = fls(xfer_mask) - 1;
829 const struct ata_xfer_ent *ent;
830
831 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
832 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
833 return ent->base + highbit - ent->shift;
834 return 0xff;
835 }
836
837 /**
838 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
839 * @xfer_mode: XFER_* of interest
840 *
841 * Return matching xfer_mask for @xfer_mode.
842 *
843 * LOCKING:
844 * None.
845 *
846 * RETURNS:
847 * Matching xfer_mask, 0 if no match found.
848 */
849 unsigned long ata_xfer_mode2mask(u8 xfer_mode)
850 {
851 const struct ata_xfer_ent *ent;
852
853 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
854 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
855 return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
856 & ~((1 << ent->shift) - 1);
857 return 0;
858 }
859
860 /**
861 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
862 * @xfer_mode: XFER_* of interest
863 *
864 * Return matching xfer_shift for @xfer_mode.
865 *
866 * LOCKING:
867 * None.
868 *
869 * RETURNS:
870 * Matching xfer_shift, -1 if no match found.
871 */
872 int ata_xfer_mode2shift(unsigned long xfer_mode)
873 {
874 const struct ata_xfer_ent *ent;
875
876 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
877 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
878 return ent->shift;
879 return -1;
880 }
881
882 /**
883 * ata_mode_string - convert xfer_mask to string
884 * @xfer_mask: mask of bits supported; only highest bit counts.
885 *
886 * Determine string which represents the highest speed
887 * (highest bit in @modemask).
888 *
889 * LOCKING:
890 * None.
891 *
892 * RETURNS:
893 * Constant C string representing highest speed listed in
894 * @mode_mask, or the constant C string "<n/a>".
895 */
896 const char *ata_mode_string(unsigned long xfer_mask)
897 {
898 static const char * const xfer_mode_str[] = {
899 "PIO0",
900 "PIO1",
901 "PIO2",
902 "PIO3",
903 "PIO4",
904 "PIO5",
905 "PIO6",
906 "MWDMA0",
907 "MWDMA1",
908 "MWDMA2",
909 "MWDMA3",
910 "MWDMA4",
911 "UDMA/16",
912 "UDMA/25",
913 "UDMA/33",
914 "UDMA/44",
915 "UDMA/66",
916 "UDMA/100",
917 "UDMA/133",
918 "UDMA7",
919 };
920 int highbit;
921
922 highbit = fls(xfer_mask) - 1;
923 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
924 return xfer_mode_str[highbit];
925 return "<n/a>";
926 }
927
928 static const char *sata_spd_string(unsigned int spd)
929 {
930 static const char * const spd_str[] = {
931 "1.5 Gbps",
932 "3.0 Gbps",
933 };
934
935 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
936 return "<unknown>";
937 return spd_str[spd - 1];
938 }
939
940 void ata_dev_disable(struct ata_device *dev)
941 {
942 if (ata_dev_enabled(dev)) {
943 if (ata_msg_drv(dev->link->ap))
944 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
945 ata_acpi_on_disable(dev);
946 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
947 ATA_DNXFER_QUIET);
948 dev->class++;
949 }
950 }
951
952 static int ata_dev_set_dipm(struct ata_device *dev, enum link_pm policy)
953 {
954 struct ata_link *link = dev->link;
955 struct ata_port *ap = link->ap;
956 u32 scontrol;
957 unsigned int err_mask;
958 int rc;
959
960 /*
961 * disallow DIPM for drivers which haven't set
962 * ATA_FLAG_IPM. This is because when DIPM is enabled,
963 * phy ready will be set in the interrupt status on
964 * state changes, which will cause some drivers to
965 * think there are errors - additionally drivers will
966 * need to disable hot plug.
967 */
968 if (!(ap->flags & ATA_FLAG_IPM) || !ata_dev_enabled(dev)) {
969 ap->pm_policy = NOT_AVAILABLE;
970 return -EINVAL;
971 }
972
973 /*
974 * For DIPM, we will only enable it for the
975 * min_power setting.
976 *
977 * Why? Because Disks are too stupid to know that
978 * If the host rejects a request to go to SLUMBER
979 * they should retry at PARTIAL, and instead it
980 * just would give up. So, for medium_power to
981 * work at all, we need to only allow HIPM.
982 */
983 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
984 if (rc)
985 return rc;
986
987 switch (policy) {
988 case MIN_POWER:
989 /* no restrictions on IPM transitions */
990 scontrol &= ~(0x3 << 8);
991 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
992 if (rc)
993 return rc;
994
995 /* enable DIPM */
996 if (dev->flags & ATA_DFLAG_DIPM)
997 err_mask = ata_dev_set_feature(dev,
998 SETFEATURES_SATA_ENABLE, SATA_DIPM);
999 break;
1000 case MEDIUM_POWER:
1001 /* allow IPM to PARTIAL */
1002 scontrol &= ~(0x1 << 8);
1003 scontrol |= (0x2 << 8);
1004 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
1005 if (rc)
1006 return rc;
1007
1008 /*
1009 * we don't have to disable DIPM since IPM flags
1010 * disallow transitions to SLUMBER, which effectively
1011 * disable DIPM if it does not support PARTIAL
1012 */
1013 break;
1014 case NOT_AVAILABLE:
1015 case MAX_PERFORMANCE:
1016 /* disable all IPM transitions */
1017 scontrol |= (0x3 << 8);
1018 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
1019 if (rc)
1020 return rc;
1021
1022 /*
1023 * we don't have to disable DIPM since IPM flags
1024 * disallow all transitions which effectively
1025 * disable DIPM anyway.
1026 */
1027 break;
1028 }
1029
1030 /* FIXME: handle SET FEATURES failure */
1031 (void) err_mask;
1032
1033 return 0;
1034 }
1035
1036 /**
1037 * ata_dev_enable_pm - enable SATA interface power management
1038 * @dev: device to enable power management
1039 * @policy: the link power management policy
1040 *
1041 * Enable SATA Interface power management. This will enable
1042 * Device Interface Power Management (DIPM) for min_power
1043 * policy, and then call driver specific callbacks for
1044 * enabling Host Initiated Power management.
1045 *
1046 * Locking: Caller.
1047 * Returns: -EINVAL if IPM is not supported, 0 otherwise.
1048 */
1049 void ata_dev_enable_pm(struct ata_device *dev, enum link_pm policy)
1050 {
1051 int rc = 0;
1052 struct ata_port *ap = dev->link->ap;
1053
1054 /* set HIPM first, then DIPM */
1055 if (ap->ops->enable_pm)
1056 rc = ap->ops->enable_pm(ap, policy);
1057 if (rc)
1058 goto enable_pm_out;
1059 rc = ata_dev_set_dipm(dev, policy);
1060
1061 enable_pm_out:
1062 if (rc)
1063 ap->pm_policy = MAX_PERFORMANCE;
1064 else
1065 ap->pm_policy = policy;
1066 return /* rc */; /* hopefully we can use 'rc' eventually */
1067 }
1068
1069 #ifdef CONFIG_PM
1070 /**
1071 * ata_dev_disable_pm - disable SATA interface power management
1072 * @dev: device to disable power management
1073 *
1074 * Disable SATA Interface power management. This will disable
1075 * Device Interface Power Management (DIPM) without changing
1076 * policy, call driver specific callbacks for disabling Host
1077 * Initiated Power management.
1078 *
1079 * Locking: Caller.
1080 * Returns: void
1081 */
1082 static void ata_dev_disable_pm(struct ata_device *dev)
1083 {
1084 struct ata_port *ap = dev->link->ap;
1085
1086 ata_dev_set_dipm(dev, MAX_PERFORMANCE);
1087 if (ap->ops->disable_pm)
1088 ap->ops->disable_pm(ap);
1089 }
1090 #endif /* CONFIG_PM */
1091
1092 void ata_lpm_schedule(struct ata_port *ap, enum link_pm policy)
1093 {
1094 ap->pm_policy = policy;
1095 ap->link.eh_info.action |= ATA_EH_LPM;
1096 ap->link.eh_info.flags |= ATA_EHI_NO_AUTOPSY;
1097 ata_port_schedule_eh(ap);
1098 }
1099
1100 #ifdef CONFIG_PM
1101 static void ata_lpm_enable(struct ata_host *host)
1102 {
1103 struct ata_link *link;
1104 struct ata_port *ap;
1105 struct ata_device *dev;
1106 int i;
1107
1108 for (i = 0; i < host->n_ports; i++) {
1109 ap = host->ports[i];
1110 ata_port_for_each_link(link, ap) {
1111 ata_link_for_each_dev(dev, link)
1112 ata_dev_disable_pm(dev);
1113 }
1114 }
1115 }
1116
1117 static void ata_lpm_disable(struct ata_host *host)
1118 {
1119 int i;
1120
1121 for (i = 0; i < host->n_ports; i++) {
1122 struct ata_port *ap = host->ports[i];
1123 ata_lpm_schedule(ap, ap->pm_policy);
1124 }
1125 }
1126 #endif /* CONFIG_PM */
1127
1128 /**
1129 * ata_dev_classify - determine device type based on ATA-spec signature
1130 * @tf: ATA taskfile register set for device to be identified
1131 *
1132 * Determine from taskfile register contents whether a device is
1133 * ATA or ATAPI, as per "Signature and persistence" section
1134 * of ATA/PI spec (volume 1, sect 5.14).
1135 *
1136 * LOCKING:
1137 * None.
1138 *
1139 * RETURNS:
1140 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or
1141 * %ATA_DEV_UNKNOWN the event of failure.
1142 */
1143 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1144 {
1145 /* Apple's open source Darwin code hints that some devices only
1146 * put a proper signature into the LBA mid/high registers,
1147 * So, we only check those. It's sufficient for uniqueness.
1148 *
1149 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
1150 * signatures for ATA and ATAPI devices attached on SerialATA,
1151 * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA
1152 * spec has never mentioned about using different signatures
1153 * for ATA/ATAPI devices. Then, Serial ATA II: Port
1154 * Multiplier specification began to use 0x69/0x96 to identify
1155 * port multpliers and 0x3c/0xc3 to identify SEMB device.
1156 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
1157 * 0x69/0x96 shortly and described them as reserved for
1158 * SerialATA.
1159 *
1160 * We follow the current spec and consider that 0x69/0x96
1161 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
1162 */
1163 if ((tf->lbam == 0) && (tf->lbah == 0)) {
1164 DPRINTK("found ATA device by sig\n");
1165 return ATA_DEV_ATA;
1166 }
1167
1168 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1169 DPRINTK("found ATAPI device by sig\n");
1170 return ATA_DEV_ATAPI;
1171 }
1172
1173 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
1174 DPRINTK("found PMP device by sig\n");
1175 return ATA_DEV_PMP;
1176 }
1177
1178 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
1179 printk(KERN_INFO "ata: SEMB device ignored\n");
1180 return ATA_DEV_SEMB_UNSUP; /* not yet */
1181 }
1182
1183 DPRINTK("unknown device\n");
1184 return ATA_DEV_UNKNOWN;
1185 }
1186
1187 /**
1188 * ata_id_string - Convert IDENTIFY DEVICE page into string
1189 * @id: IDENTIFY DEVICE results we will examine
1190 * @s: string into which data is output
1191 * @ofs: offset into identify device page
1192 * @len: length of string to return. must be an even number.
1193 *
1194 * The strings in the IDENTIFY DEVICE page are broken up into
1195 * 16-bit chunks. Run through the string, and output each
1196 * 8-bit chunk linearly, regardless of platform.
1197 *
1198 * LOCKING:
1199 * caller.
1200 */
1201
1202 void ata_id_string(const u16 *id, unsigned char *s,
1203 unsigned int ofs, unsigned int len)
1204 {
1205 unsigned int c;
1206
1207 BUG_ON(len & 1);
1208
1209 while (len > 0) {
1210 c = id[ofs] >> 8;
1211 *s = c;
1212 s++;
1213
1214 c = id[ofs] & 0xff;
1215 *s = c;
1216 s++;
1217
1218 ofs++;
1219 len -= 2;
1220 }
1221 }
1222
1223 /**
1224 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
1225 * @id: IDENTIFY DEVICE results we will examine
1226 * @s: string into which data is output
1227 * @ofs: offset into identify device page
1228 * @len: length of string to return. must be an odd number.
1229 *
1230 * This function is identical to ata_id_string except that it
1231 * trims trailing spaces and terminates the resulting string with
1232 * null. @len must be actual maximum length (even number) + 1.
1233 *
1234 * LOCKING:
1235 * caller.
1236 */
1237 void ata_id_c_string(const u16 *id, unsigned char *s,
1238 unsigned int ofs, unsigned int len)
1239 {
1240 unsigned char *p;
1241
1242 ata_id_string(id, s, ofs, len - 1);
1243
1244 p = s + strnlen(s, len - 1);
1245 while (p > s && p[-1] == ' ')
1246 p--;
1247 *p = '\0';
1248 }
1249
1250 static u64 ata_id_n_sectors(const u16 *id)
1251 {
1252 if (ata_id_has_lba(id)) {
1253 if (ata_id_has_lba48(id))
1254 return ata_id_u64(id, 100);
1255 else
1256 return ata_id_u32(id, 60);
1257 } else {
1258 if (ata_id_current_chs_valid(id))
1259 return ata_id_u32(id, 57);
1260 else
1261 return id[1] * id[3] * id[6];
1262 }
1263 }
1264
1265 u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
1266 {
1267 u64 sectors = 0;
1268
1269 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1270 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1271 sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24;
1272 sectors |= (tf->lbah & 0xff) << 16;
1273 sectors |= (tf->lbam & 0xff) << 8;
1274 sectors |= (tf->lbal & 0xff);
1275
1276 return sectors;
1277 }
1278
1279 u64 ata_tf_to_lba(const struct ata_taskfile *tf)
1280 {
1281 u64 sectors = 0;
1282
1283 sectors |= (tf->device & 0x0f) << 24;
1284 sectors |= (tf->lbah & 0xff) << 16;
1285 sectors |= (tf->lbam & 0xff) << 8;
1286 sectors |= (tf->lbal & 0xff);
1287
1288 return sectors;
1289 }
1290
1291 /**
1292 * ata_read_native_max_address - Read native max address
1293 * @dev: target device
1294 * @max_sectors: out parameter for the result native max address
1295 *
1296 * Perform an LBA48 or LBA28 native size query upon the device in
1297 * question.
1298 *
1299 * RETURNS:
1300 * 0 on success, -EACCES if command is aborted by the drive.
1301 * -EIO on other errors.
1302 */
1303 static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1304 {
1305 unsigned int err_mask;
1306 struct ata_taskfile tf;
1307 int lba48 = ata_id_has_lba48(dev->id);
1308
1309 ata_tf_init(dev, &tf);
1310
1311 /* always clear all address registers */
1312 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1313
1314 if (lba48) {
1315 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1316 tf.flags |= ATA_TFLAG_LBA48;
1317 } else
1318 tf.command = ATA_CMD_READ_NATIVE_MAX;
1319
1320 tf.protocol |= ATA_PROT_NODATA;
1321 tf.device |= ATA_LBA;
1322
1323 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1324 if (err_mask) {
1325 ata_dev_printk(dev, KERN_WARNING, "failed to read native "
1326 "max address (err_mask=0x%x)\n", err_mask);
1327 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1328 return -EACCES;
1329 return -EIO;
1330 }
1331
1332 if (lba48)
1333 *max_sectors = ata_tf_to_lba48(&tf) + 1;
1334 else
1335 *max_sectors = ata_tf_to_lba(&tf) + 1;
1336 if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
1337 (*max_sectors)--;
1338 return 0;
1339 }
1340
1341 /**
1342 * ata_set_max_sectors - Set max sectors
1343 * @dev: target device
1344 * @new_sectors: new max sectors value to set for the device
1345 *
1346 * Set max sectors of @dev to @new_sectors.
1347 *
1348 * RETURNS:
1349 * 0 on success, -EACCES if command is aborted or denied (due to
1350 * previous non-volatile SET_MAX) by the drive. -EIO on other
1351 * errors.
1352 */
1353 static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1354 {
1355 unsigned int err_mask;
1356 struct ata_taskfile tf;
1357 int lba48 = ata_id_has_lba48(dev->id);
1358
1359 new_sectors--;
1360
1361 ata_tf_init(dev, &tf);
1362
1363 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1364
1365 if (lba48) {
1366 tf.command = ATA_CMD_SET_MAX_EXT;
1367 tf.flags |= ATA_TFLAG_LBA48;
1368
1369 tf.hob_lbal = (new_sectors >> 24) & 0xff;
1370 tf.hob_lbam = (new_sectors >> 32) & 0xff;
1371 tf.hob_lbah = (new_sectors >> 40) & 0xff;
1372 } else {
1373 tf.command = ATA_CMD_SET_MAX;
1374
1375 tf.device |= (new_sectors >> 24) & 0xf;
1376 }
1377
1378 tf.protocol |= ATA_PROT_NODATA;
1379 tf.device |= ATA_LBA;
1380
1381 tf.lbal = (new_sectors >> 0) & 0xff;
1382 tf.lbam = (new_sectors >> 8) & 0xff;
1383 tf.lbah = (new_sectors >> 16) & 0xff;
1384
1385 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1386 if (err_mask) {
1387 ata_dev_printk(dev, KERN_WARNING, "failed to set "
1388 "max address (err_mask=0x%x)\n", err_mask);
1389 if (err_mask == AC_ERR_DEV &&
1390 (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1391 return -EACCES;
1392 return -EIO;
1393 }
1394
1395 return 0;
1396 }
1397
1398 /**
1399 * ata_hpa_resize - Resize a device with an HPA set
1400 * @dev: Device to resize
1401 *
1402 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
1403 * it if required to the full size of the media. The caller must check
1404 * the drive has the HPA feature set enabled.
1405 *
1406 * RETURNS:
1407 * 0 on success, -errno on failure.
1408 */
1409 static int ata_hpa_resize(struct ata_device *dev)
1410 {
1411 struct ata_eh_context *ehc = &dev->link->eh_context;
1412 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1413 u64 sectors = ata_id_n_sectors(dev->id);
1414 u64 native_sectors;
1415 int rc;
1416
1417 /* do we need to do it? */
1418 if (dev->class != ATA_DEV_ATA ||
1419 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1420 (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
1421 return 0;
1422
1423 /* read native max address */
1424 rc = ata_read_native_max_address(dev, &native_sectors);
1425 if (rc) {
1426 /* If device aborted the command or HPA isn't going to
1427 * be unlocked, skip HPA resizing.
1428 */
1429 if (rc == -EACCES || !ata_ignore_hpa) {
1430 ata_dev_printk(dev, KERN_WARNING, "HPA support seems "
1431 "broken, skipping HPA handling\n");
1432 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1433
1434 /* we can continue if device aborted the command */
1435 if (rc == -EACCES)
1436 rc = 0;
1437 }
1438
1439 return rc;
1440 }
1441
1442 /* nothing to do? */
1443 if (native_sectors <= sectors || !ata_ignore_hpa) {
1444 if (!print_info || native_sectors == sectors)
1445 return 0;
1446
1447 if (native_sectors > sectors)
1448 ata_dev_printk(dev, KERN_INFO,
1449 "HPA detected: current %llu, native %llu\n",
1450 (unsigned long long)sectors,
1451 (unsigned long long)native_sectors);
1452 else if (native_sectors < sectors)
1453 ata_dev_printk(dev, KERN_WARNING,
1454 "native sectors (%llu) is smaller than "
1455 "sectors (%llu)\n",
1456 (unsigned long long)native_sectors,
1457 (unsigned long long)sectors);
1458 return 0;
1459 }
1460
1461 /* let's unlock HPA */
1462 rc = ata_set_max_sectors(dev, native_sectors);
1463 if (rc == -EACCES) {
1464 /* if device aborted the command, skip HPA resizing */
1465 ata_dev_printk(dev, KERN_WARNING, "device aborted resize "
1466 "(%llu -> %llu), skipping HPA handling\n",
1467 (unsigned long long)sectors,
1468 (unsigned long long)native_sectors);
1469 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1470 return 0;
1471 } else if (rc)
1472 return rc;
1473
1474 /* re-read IDENTIFY data */
1475 rc = ata_dev_reread_id(dev, 0);
1476 if (rc) {
1477 ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY "
1478 "data after HPA resizing\n");
1479 return rc;
1480 }
1481
1482 if (print_info) {
1483 u64 new_sectors = ata_id_n_sectors(dev->id);
1484 ata_dev_printk(dev, KERN_INFO,
1485 "HPA unlocked: %llu -> %llu, native %llu\n",
1486 (unsigned long long)sectors,
1487 (unsigned long long)new_sectors,
1488 (unsigned long long)native_sectors);
1489 }
1490
1491 return 0;
1492 }
1493
1494 /**
1495 * ata_dump_id - IDENTIFY DEVICE info debugging output
1496 * @id: IDENTIFY DEVICE page to dump
1497 *
1498 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1499 * page.
1500 *
1501 * LOCKING:
1502 * caller.
1503 */
1504
1505 static inline void ata_dump_id(const u16 *id)
1506 {
1507 DPRINTK("49==0x%04x "
1508 "53==0x%04x "
1509 "63==0x%04x "
1510 "64==0x%04x "
1511 "75==0x%04x \n",
1512 id[49],
1513 id[53],
1514 id[63],
1515 id[64],
1516 id[75]);
1517 DPRINTK("80==0x%04x "
1518 "81==0x%04x "
1519 "82==0x%04x "
1520 "83==0x%04x "
1521 "84==0x%04x \n",
1522 id[80],
1523 id[81],
1524 id[82],
1525 id[83],
1526 id[84]);
1527 DPRINTK("88==0x%04x "
1528 "93==0x%04x\n",
1529 id[88],
1530 id[93]);
1531 }
1532
1533 /**
1534 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1535 * @id: IDENTIFY data to compute xfer mask from
1536 *
1537 * Compute the xfermask for this device. This is not as trivial
1538 * as it seems if we must consider early devices correctly.
1539 *
1540 * FIXME: pre IDE drive timing (do we care ?).
1541 *
1542 * LOCKING:
1543 * None.
1544 *
1545 * RETURNS:
1546 * Computed xfermask
1547 */
1548 unsigned long ata_id_xfermask(const u16 *id)
1549 {
1550 unsigned long pio_mask, mwdma_mask, udma_mask;
1551
1552 /* Usual case. Word 53 indicates word 64 is valid */
1553 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1554 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1555 pio_mask <<= 3;
1556 pio_mask |= 0x7;
1557 } else {
1558 /* If word 64 isn't valid then Word 51 high byte holds
1559 * the PIO timing number for the maximum. Turn it into
1560 * a mask.
1561 */
1562 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
1563 if (mode < 5) /* Valid PIO range */
1564 pio_mask = (2 << mode) - 1;
1565 else
1566 pio_mask = 1;
1567
1568 /* But wait.. there's more. Design your standards by
1569 * committee and you too can get a free iordy field to
1570 * process. However its the speeds not the modes that
1571 * are supported... Note drivers using the timing API
1572 * will get this right anyway
1573 */
1574 }
1575
1576 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1577
1578 if (ata_id_is_cfa(id)) {
1579 /*
1580 * Process compact flash extended modes
1581 */
1582 int pio = id[163] & 0x7;
1583 int dma = (id[163] >> 3) & 7;
1584
1585 if (pio)
1586 pio_mask |= (1 << 5);
1587 if (pio > 1)
1588 pio_mask |= (1 << 6);
1589 if (dma)
1590 mwdma_mask |= (1 << 3);
1591 if (dma > 1)
1592 mwdma_mask |= (1 << 4);
1593 }
1594
1595 udma_mask = 0;
1596 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1597 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1598
1599 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1600 }
1601
1602 /**
1603 * ata_pio_queue_task - Queue port_task
1604 * @ap: The ata_port to queue port_task for
1605 * @data: data for @fn to use
1606 * @delay: delay time in msecs for workqueue function
1607 *
1608 * Schedule @fn(@data) for execution after @delay jiffies using
1609 * port_task. There is one port_task per port and it's the
1610 * user(low level driver)'s responsibility to make sure that only
1611 * one task is active at any given time.
1612 *
1613 * libata core layer takes care of synchronization between
1614 * port_task and EH. ata_pio_queue_task() may be ignored for EH
1615 * synchronization.
1616 *
1617 * LOCKING:
1618 * Inherited from caller.
1619 */
1620 void ata_pio_queue_task(struct ata_port *ap, void *data, unsigned long delay)
1621 {
1622 ap->port_task_data = data;
1623
1624 /* may fail if ata_port_flush_task() in progress */
1625 queue_delayed_work(ata_wq, &ap->port_task, msecs_to_jiffies(delay));
1626 }
1627
1628 /**
1629 * ata_port_flush_task - Flush port_task
1630 * @ap: The ata_port to flush port_task for
1631 *
1632 * After this function completes, port_task is guranteed not to
1633 * be running or scheduled.
1634 *
1635 * LOCKING:
1636 * Kernel thread context (may sleep)
1637 */
1638 void ata_port_flush_task(struct ata_port *ap)
1639 {
1640 DPRINTK("ENTER\n");
1641
1642 cancel_rearming_delayed_work(&ap->port_task);
1643
1644 if (ata_msg_ctl(ap))
1645 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __func__);
1646 }
1647
1648 static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1649 {
1650 struct completion *waiting = qc->private_data;
1651
1652 complete(waiting);
1653 }
1654
1655 /**
1656 * ata_exec_internal_sg - execute libata internal command
1657 * @dev: Device to which the command is sent
1658 * @tf: Taskfile registers for the command and the result
1659 * @cdb: CDB for packet command
1660 * @dma_dir: Data tranfer direction of the command
1661 * @sgl: sg list for the data buffer of the command
1662 * @n_elem: Number of sg entries
1663 * @timeout: Timeout in msecs (0 for default)
1664 *
1665 * Executes libata internal command with timeout. @tf contains
1666 * command on entry and result on return. Timeout and error
1667 * conditions are reported via return value. No recovery action
1668 * is taken after a command times out. It's caller's duty to
1669 * clean up after timeout.
1670 *
1671 * LOCKING:
1672 * None. Should be called with kernel context, might sleep.
1673 *
1674 * RETURNS:
1675 * Zero on success, AC_ERR_* mask on failure
1676 */
1677 unsigned ata_exec_internal_sg(struct ata_device *dev,
1678 struct ata_taskfile *tf, const u8 *cdb,
1679 int dma_dir, struct scatterlist *sgl,
1680 unsigned int n_elem, unsigned long timeout)
1681 {
1682 struct ata_link *link = dev->link;
1683 struct ata_port *ap = link->ap;
1684 u8 command = tf->command;
1685 int auto_timeout = 0;
1686 struct ata_queued_cmd *qc;
1687 unsigned int tag, preempted_tag;
1688 u32 preempted_sactive, preempted_qc_active;
1689 int preempted_nr_active_links;
1690 DECLARE_COMPLETION_ONSTACK(wait);
1691 unsigned long flags;
1692 unsigned int err_mask;
1693 int rc;
1694
1695 spin_lock_irqsave(ap->lock, flags);
1696
1697 /* no internal command while frozen */
1698 if (ap->pflags & ATA_PFLAG_FROZEN) {
1699 spin_unlock_irqrestore(ap->lock, flags);
1700 return AC_ERR_SYSTEM;
1701 }
1702
1703 /* initialize internal qc */
1704
1705 /* XXX: Tag 0 is used for drivers with legacy EH as some
1706 * drivers choke if any other tag is given. This breaks
1707 * ata_tag_internal() test for those drivers. Don't use new
1708 * EH stuff without converting to it.
1709 */
1710 if (ap->ops->error_handler)
1711 tag = ATA_TAG_INTERNAL;
1712 else
1713 tag = 0;
1714
1715 qc = __ata_qc_from_tag(ap, tag);
1716
1717 qc->tag = tag;
1718 qc->scsicmd = NULL;
1719 qc->ap = ap;
1720 qc->dev = dev;
1721 ata_qc_reinit(qc);
1722
1723 preempted_tag = link->active_tag;
1724 preempted_sactive = link->sactive;
1725 preempted_qc_active = ap->qc_active;
1726 preempted_nr_active_links = ap->nr_active_links;
1727 link->active_tag = ATA_TAG_POISON;
1728 link->sactive = 0;
1729 ap->qc_active = 0;
1730 ap->nr_active_links = 0;
1731
1732 /* prepare & issue qc */
1733 qc->tf = *tf;
1734 if (cdb)
1735 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1736 qc->flags |= ATA_QCFLAG_RESULT_TF;
1737 qc->dma_dir = dma_dir;
1738 if (dma_dir != DMA_NONE) {
1739 unsigned int i, buflen = 0;
1740 struct scatterlist *sg;
1741
1742 for_each_sg(sgl, sg, n_elem, i)
1743 buflen += sg->length;
1744
1745 ata_sg_init(qc, sgl, n_elem);
1746 qc->nbytes = buflen;
1747 }
1748
1749 qc->private_data = &wait;
1750 qc->complete_fn = ata_qc_complete_internal;
1751
1752 ata_qc_issue(qc);
1753
1754 spin_unlock_irqrestore(ap->lock, flags);
1755
1756 if (!timeout) {
1757 if (ata_probe_timeout)
1758 timeout = ata_probe_timeout * 1000;
1759 else {
1760 timeout = ata_internal_cmd_timeout(dev, command);
1761 auto_timeout = 1;
1762 }
1763 }
1764
1765 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
1766
1767 ata_port_flush_task(ap);
1768
1769 if (!rc) {
1770 spin_lock_irqsave(ap->lock, flags);
1771
1772 /* We're racing with irq here. If we lose, the
1773 * following test prevents us from completing the qc
1774 * twice. If we win, the port is frozen and will be
1775 * cleaned up by ->post_internal_cmd().
1776 */
1777 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1778 qc->err_mask |= AC_ERR_TIMEOUT;
1779
1780 if (ap->ops->error_handler)
1781 ata_port_freeze(ap);
1782 else
1783 ata_qc_complete(qc);
1784
1785 if (ata_msg_warn(ap))
1786 ata_dev_printk(dev, KERN_WARNING,
1787 "qc timeout (cmd 0x%x)\n", command);
1788 }
1789
1790 spin_unlock_irqrestore(ap->lock, flags);
1791 }
1792
1793 /* do post_internal_cmd */
1794 if (ap->ops->post_internal_cmd)
1795 ap->ops->post_internal_cmd(qc);
1796
1797 /* perform minimal error analysis */
1798 if (qc->flags & ATA_QCFLAG_FAILED) {
1799 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1800 qc->err_mask |= AC_ERR_DEV;
1801
1802 if (!qc->err_mask)
1803 qc->err_mask |= AC_ERR_OTHER;
1804
1805 if (qc->err_mask & ~AC_ERR_OTHER)
1806 qc->err_mask &= ~AC_ERR_OTHER;
1807 }
1808
1809 /* finish up */
1810 spin_lock_irqsave(ap->lock, flags);
1811
1812 *tf = qc->result_tf;
1813 err_mask = qc->err_mask;
1814
1815 ata_qc_free(qc);
1816 link->active_tag = preempted_tag;
1817 link->sactive = preempted_sactive;
1818 ap->qc_active = preempted_qc_active;
1819 ap->nr_active_links = preempted_nr_active_links;
1820
1821 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1822 * Until those drivers are fixed, we detect the condition
1823 * here, fail the command with AC_ERR_SYSTEM and reenable the
1824 * port.
1825 *
1826 * Note that this doesn't change any behavior as internal
1827 * command failure results in disabling the device in the
1828 * higher layer for LLDDs without new reset/EH callbacks.
1829 *
1830 * Kill the following code as soon as those drivers are fixed.
1831 */
1832 if (ap->flags & ATA_FLAG_DISABLED) {
1833 err_mask |= AC_ERR_SYSTEM;
1834 ata_port_probe(ap);
1835 }
1836
1837 spin_unlock_irqrestore(ap->lock, flags);
1838
1839 if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout)
1840 ata_internal_cmd_timed_out(dev, command);
1841
1842 return err_mask;
1843 }
1844
1845 /**
1846 * ata_exec_internal - execute libata internal command
1847 * @dev: Device to which the command is sent
1848 * @tf: Taskfile registers for the command and the result
1849 * @cdb: CDB for packet command
1850 * @dma_dir: Data tranfer direction of the command
1851 * @buf: Data buffer of the command
1852 * @buflen: Length of data buffer
1853 * @timeout: Timeout in msecs (0 for default)
1854 *
1855 * Wrapper around ata_exec_internal_sg() which takes simple
1856 * buffer instead of sg list.
1857 *
1858 * LOCKING:
1859 * None. Should be called with kernel context, might sleep.
1860 *
1861 * RETURNS:
1862 * Zero on success, AC_ERR_* mask on failure
1863 */
1864 unsigned ata_exec_internal(struct ata_device *dev,
1865 struct ata_taskfile *tf, const u8 *cdb,
1866 int dma_dir, void *buf, unsigned int buflen,
1867 unsigned long timeout)
1868 {
1869 struct scatterlist *psg = NULL, sg;
1870 unsigned int n_elem = 0;
1871
1872 if (dma_dir != DMA_NONE) {
1873 WARN_ON(!buf);
1874 sg_init_one(&sg, buf, buflen);
1875 psg = &sg;
1876 n_elem++;
1877 }
1878
1879 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1880 timeout);
1881 }
1882
1883 /**
1884 * ata_do_simple_cmd - execute simple internal command
1885 * @dev: Device to which the command is sent
1886 * @cmd: Opcode to execute
1887 *
1888 * Execute a 'simple' command, that only consists of the opcode
1889 * 'cmd' itself, without filling any other registers
1890 *
1891 * LOCKING:
1892 * Kernel thread context (may sleep).
1893 *
1894 * RETURNS:
1895 * Zero on success, AC_ERR_* mask on failure
1896 */
1897 unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1898 {
1899 struct ata_taskfile tf;
1900
1901 ata_tf_init(dev, &tf);
1902
1903 tf.command = cmd;
1904 tf.flags |= ATA_TFLAG_DEVICE;
1905 tf.protocol = ATA_PROT_NODATA;
1906
1907 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1908 }
1909
1910 /**
1911 * ata_pio_need_iordy - check if iordy needed
1912 * @adev: ATA device
1913 *
1914 * Check if the current speed of the device requires IORDY. Used
1915 * by various controllers for chip configuration.
1916 */
1917
1918 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1919 {
1920 /* Controller doesn't support IORDY. Probably a pointless check
1921 as the caller should know this */
1922 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1923 return 0;
1924 /* PIO3 and higher it is mandatory */
1925 if (adev->pio_mode > XFER_PIO_2)
1926 return 1;
1927 /* We turn it on when possible */
1928 if (ata_id_has_iordy(adev->id))
1929 return 1;
1930 return 0;
1931 }
1932
1933 /**
1934 * ata_pio_mask_no_iordy - Return the non IORDY mask
1935 * @adev: ATA device
1936 *
1937 * Compute the highest mode possible if we are not using iordy. Return
1938 * -1 if no iordy mode is available.
1939 */
1940
1941 static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1942 {
1943 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1944 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1945 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1946 /* Is the speed faster than the drive allows non IORDY ? */
1947 if (pio) {
1948 /* This is cycle times not frequency - watch the logic! */
1949 if (pio > 240) /* PIO2 is 240nS per cycle */
1950 return 3 << ATA_SHIFT_PIO;
1951 return 7 << ATA_SHIFT_PIO;
1952 }
1953 }
1954 return 3 << ATA_SHIFT_PIO;
1955 }
1956
1957 /**
1958 * ata_do_dev_read_id - default ID read method
1959 * @dev: device
1960 * @tf: proposed taskfile
1961 * @id: data buffer
1962 *
1963 * Issue the identify taskfile and hand back the buffer containing
1964 * identify data. For some RAID controllers and for pre ATA devices
1965 * this function is wrapped or replaced by the driver
1966 */
1967 unsigned int ata_do_dev_read_id(struct ata_device *dev,
1968 struct ata_taskfile *tf, u16 *id)
1969 {
1970 return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE,
1971 id, sizeof(id[0]) * ATA_ID_WORDS, 0);
1972 }
1973
1974 /**
1975 * ata_dev_read_id - Read ID data from the specified device
1976 * @dev: target device
1977 * @p_class: pointer to class of the target device (may be changed)
1978 * @flags: ATA_READID_* flags
1979 * @id: buffer to read IDENTIFY data into
1980 *
1981 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1982 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1983 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1984 * for pre-ATA4 drives.
1985 *
1986 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right
1987 * now we abort if we hit that case.
1988 *
1989 * LOCKING:
1990 * Kernel thread context (may sleep)
1991 *
1992 * RETURNS:
1993 * 0 on success, -errno otherwise.
1994 */
1995 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1996 unsigned int flags, u16 *id)
1997 {
1998 struct ata_port *ap = dev->link->ap;
1999 unsigned int class = *p_class;
2000 struct ata_taskfile tf;
2001 unsigned int err_mask = 0;
2002 const char *reason;
2003 int may_fallback = 1, tried_spinup = 0;
2004 int rc;
2005
2006 if (ata_msg_ctl(ap))
2007 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__);
2008
2009 retry:
2010 ata_tf_init(dev, &tf);
2011
2012 switch (class) {
2013 case ATA_DEV_ATA:
2014 tf.command = ATA_CMD_ID_ATA;
2015 break;
2016 case ATA_DEV_ATAPI:
2017 tf.command = ATA_CMD_ID_ATAPI;
2018 break;
2019 default:
2020 rc = -ENODEV;
2021 reason = "unsupported class";
2022 goto err_out;
2023 }
2024
2025 tf.protocol = ATA_PROT_PIO;
2026
2027 /* Some devices choke if TF registers contain garbage. Make
2028 * sure those are properly initialized.
2029 */
2030 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2031
2032 /* Device presence detection is unreliable on some
2033 * controllers. Always poll IDENTIFY if available.
2034 */
2035 tf.flags |= ATA_TFLAG_POLLING;
2036
2037 if (ap->ops->read_id)
2038 err_mask = ap->ops->read_id(dev, &tf, id);
2039 else
2040 err_mask = ata_do_dev_read_id(dev, &tf, id);
2041
2042 if (err_mask) {
2043 if (err_mask & AC_ERR_NODEV_HINT) {
2044 ata_dev_printk(dev, KERN_DEBUG,
2045 "NODEV after polling detection\n");
2046 return -ENOENT;
2047 }
2048
2049 if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
2050 /* Device or controller might have reported
2051 * the wrong device class. Give a shot at the
2052 * other IDENTIFY if the current one is
2053 * aborted by the device.
2054 */
2055 if (may_fallback) {
2056 may_fallback = 0;
2057
2058 if (class == ATA_DEV_ATA)
2059 class = ATA_DEV_ATAPI;
2060 else
2061 class = ATA_DEV_ATA;
2062 goto retry;
2063 }
2064
2065 /* Control reaches here iff the device aborted
2066 * both flavors of IDENTIFYs which happens
2067 * sometimes with phantom devices.
2068 */
2069 ata_dev_printk(dev, KERN_DEBUG,
2070 "both IDENTIFYs aborted, assuming NODEV\n");
2071 return -ENOENT;
2072 }
2073
2074 rc = -EIO;
2075 reason = "I/O error";
2076 goto err_out;
2077 }
2078
2079 /* Falling back doesn't make sense if ID data was read
2080 * successfully at least once.
2081 */
2082 may_fallback = 0;
2083
2084 swap_buf_le16(id, ATA_ID_WORDS);
2085
2086 /* sanity check */
2087 rc = -EINVAL;
2088 reason = "device reports invalid type";
2089
2090 if (class == ATA_DEV_ATA) {
2091 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
2092 goto err_out;
2093 } else {
2094 if (ata_id_is_ata(id))
2095 goto err_out;
2096 }
2097
2098 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
2099 tried_spinup = 1;
2100 /*
2101 * Drive powered-up in standby mode, and requires a specific
2102 * SET_FEATURES spin-up subcommand before it will accept
2103 * anything other than the original IDENTIFY command.
2104 */
2105 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
2106 if (err_mask && id[2] != 0x738c) {
2107 rc = -EIO;
2108 reason = "SPINUP failed";
2109 goto err_out;
2110 }
2111 /*
2112 * If the drive initially returned incomplete IDENTIFY info,
2113 * we now must reissue the IDENTIFY command.
2114 */
2115 if (id[2] == 0x37c8)
2116 goto retry;
2117 }
2118
2119 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
2120 /*
2121 * The exact sequence expected by certain pre-ATA4 drives is:
2122 * SRST RESET
2123 * IDENTIFY (optional in early ATA)
2124 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
2125 * anything else..
2126 * Some drives were very specific about that exact sequence.
2127 *
2128 * Note that ATA4 says lba is mandatory so the second check
2129 * shoud never trigger.
2130 */
2131 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
2132 err_mask = ata_dev_init_params(dev, id[3], id[6]);
2133 if (err_mask) {
2134 rc = -EIO;
2135 reason = "INIT_DEV_PARAMS failed";
2136 goto err_out;
2137 }
2138
2139 /* current CHS translation info (id[53-58]) might be
2140 * changed. reread the identify device info.
2141 */
2142 flags &= ~ATA_READID_POSTRESET;
2143 goto retry;
2144 }
2145 }
2146
2147 *p_class = class;
2148
2149 return 0;
2150
2151 err_out:
2152 if (ata_msg_warn(ap))
2153 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
2154 "(%s, err_mask=0x%x)\n", reason, err_mask);
2155 return rc;
2156 }
2157
2158 static inline u8 ata_dev_knobble(struct ata_device *dev)
2159 {
2160 struct ata_port *ap = dev->link->ap;
2161
2162 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK)
2163 return 0;
2164
2165 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
2166 }
2167
2168 static void ata_dev_config_ncq(struct ata_device *dev,
2169 char *desc, size_t desc_sz)
2170 {
2171 struct ata_port *ap = dev->link->ap;
2172 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2173
2174 if (!ata_id_has_ncq(dev->id)) {
2175 desc[0] = '\0';
2176 return;
2177 }
2178 if (dev->horkage & ATA_HORKAGE_NONCQ) {
2179 snprintf(desc, desc_sz, "NCQ (not used)");
2180 return;
2181 }
2182 if (ap->flags & ATA_FLAG_NCQ) {
2183 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
2184 dev->flags |= ATA_DFLAG_NCQ;
2185 }
2186
2187 if (hdepth >= ddepth)
2188 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
2189 else
2190 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
2191 }
2192
2193 /**
2194 * ata_dev_configure - Configure the specified ATA/ATAPI device
2195 * @dev: Target device to configure
2196 *
2197 * Configure @dev according to @dev->id. Generic and low-level
2198 * driver specific fixups are also applied.
2199 *
2200 * LOCKING:
2201 * Kernel thread context (may sleep)
2202 *
2203 * RETURNS:
2204 * 0 on success, -errno otherwise
2205 */
2206 int ata_dev_configure(struct ata_device *dev)
2207 {
2208 struct ata_port *ap = dev->link->ap;
2209 struct ata_eh_context *ehc = &dev->link->eh_context;
2210 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
2211 const u16 *id = dev->id;
2212 unsigned long xfer_mask;
2213 char revbuf[7]; /* XYZ-99\0 */
2214 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2215 char modelbuf[ATA_ID_PROD_LEN+1];
2216 int rc;
2217
2218 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
2219 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
2220 __func__);
2221 return 0;
2222 }
2223
2224 if (ata_msg_probe(ap))
2225 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__);
2226
2227 /* set horkage */
2228 dev->horkage |= ata_dev_blacklisted(dev);
2229 ata_force_horkage(dev);
2230
2231 if (dev->horkage & ATA_HORKAGE_DISABLE) {
2232 ata_dev_printk(dev, KERN_INFO,
2233 "unsupported device, disabling\n");
2234 ata_dev_disable(dev);
2235 return 0;
2236 }
2237
2238 if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) &&
2239 dev->class == ATA_DEV_ATAPI) {
2240 ata_dev_printk(dev, KERN_WARNING,
2241 "WARNING: ATAPI is %s, device ignored.\n",
2242 atapi_enabled ? "not supported with this driver"
2243 : "disabled");
2244 ata_dev_disable(dev);
2245 return 0;
2246 }
2247
2248 /* let ACPI work its magic */
2249 rc = ata_acpi_on_devcfg(dev);
2250 if (rc)
2251 return rc;
2252
2253 /* massage HPA, do it early as it might change IDENTIFY data */
2254 rc = ata_hpa_resize(dev);
2255 if (rc)
2256 return rc;
2257
2258 /* print device capabilities */
2259 if (ata_msg_probe(ap))
2260 ata_dev_printk(dev, KERN_DEBUG,
2261 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2262 "85:%04x 86:%04x 87:%04x 88:%04x\n",
2263 __func__,
2264 id[49], id[82], id[83], id[84],
2265 id[85], id[86], id[87], id[88]);
2266
2267 /* initialize to-be-configured parameters */
2268 dev->flags &= ~ATA_DFLAG_CFG_MASK;
2269 dev->max_sectors = 0;
2270 dev->cdb_len = 0;
2271 dev->n_sectors = 0;
2272 dev->cylinders = 0;
2273 dev->heads = 0;
2274 dev->sectors = 0;
2275
2276 /*
2277 * common ATA, ATAPI feature tests
2278 */
2279
2280 /* find max transfer mode; for printk only */
2281 xfer_mask = ata_id_xfermask(id);
2282
2283 if (ata_msg_probe(ap))
2284 ata_dump_id(id);
2285
2286 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2287 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2288 sizeof(fwrevbuf));
2289
2290 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2291 sizeof(modelbuf));
2292
2293 /* ATA-specific feature tests */
2294 if (dev->class == ATA_DEV_ATA) {
2295 if (ata_id_is_cfa(id)) {
2296 if (id[162] & 1) /* CPRM may make this media unusable */
2297 ata_dev_printk(dev, KERN_WARNING,
2298 "supports DRM functions and may "
2299 "not be fully accessable.\n");
2300 snprintf(revbuf, 7, "CFA");
2301 } else {
2302 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
2303 /* Warn the user if the device has TPM extensions */
2304 if (ata_id_has_tpm(id))
2305 ata_dev_printk(dev, KERN_WARNING,
2306 "supports DRM functions and may "
2307 "not be fully accessable.\n");
2308 }
2309
2310 dev->n_sectors = ata_id_n_sectors(id);
2311
2312 if (dev->id[59] & 0x100)
2313 dev->multi_count = dev->id[59] & 0xff;
2314
2315 if (ata_id_has_lba(id)) {
2316 const char *lba_desc;
2317 char ncq_desc[20];
2318
2319 lba_desc = "LBA";
2320 dev->flags |= ATA_DFLAG_LBA;
2321 if (ata_id_has_lba48(id)) {
2322 dev->flags |= ATA_DFLAG_LBA48;
2323 lba_desc = "LBA48";
2324
2325 if (dev->n_sectors >= (1UL << 28) &&
2326 ata_id_has_flush_ext(id))
2327 dev->flags |= ATA_DFLAG_FLUSH_EXT;
2328 }
2329
2330 /* config NCQ */
2331 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2332
2333 /* print device info to dmesg */
2334 if (ata_msg_drv(ap) && print_info) {
2335 ata_dev_printk(dev, KERN_INFO,
2336 "%s: %s, %s, max %s\n",
2337 revbuf, modelbuf, fwrevbuf,
2338 ata_mode_string(xfer_mask));
2339 ata_dev_printk(dev, KERN_INFO,
2340 "%Lu sectors, multi %u: %s %s\n",
2341 (unsigned long long)dev->n_sectors,
2342 dev->multi_count, lba_desc, ncq_desc);
2343 }
2344 } else {
2345 /* CHS */
2346
2347 /* Default translation */
2348 dev->cylinders = id[1];
2349 dev->heads = id[3];
2350 dev->sectors = id[6];
2351
2352 if (ata_id_current_chs_valid(id)) {
2353 /* Current CHS translation is valid. */
2354 dev->cylinders = id[54];
2355 dev->heads = id[55];
2356 dev->sectors = id[56];
2357 }
2358
2359 /* print device info to dmesg */
2360 if (ata_msg_drv(ap) && print_info) {
2361 ata_dev_printk(dev, KERN_INFO,
2362 "%s: %s, %s, max %s\n",
2363 revbuf, modelbuf, fwrevbuf,
2364 ata_mode_string(xfer_mask));
2365 ata_dev_printk(dev, KERN_INFO,
2366 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
2367 (unsigned long long)dev->n_sectors,
2368 dev->multi_count, dev->cylinders,
2369 dev->heads, dev->sectors);
2370 }
2371 }
2372
2373 dev->cdb_len = 16;
2374 }
2375
2376 /* ATAPI-specific feature tests */
2377 else if (dev->class == ATA_DEV_ATAPI) {
2378 const char *cdb_intr_string = "";
2379 const char *atapi_an_string = "";
2380 const char *dma_dir_string = "";
2381 u32 sntf;
2382
2383 rc = atapi_cdb_len(id);
2384 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
2385 if (ata_msg_warn(ap))
2386 ata_dev_printk(dev, KERN_WARNING,
2387 "unsupported CDB len\n");
2388 rc = -EINVAL;
2389 goto err_out_nosup;
2390 }
2391 dev->cdb_len = (unsigned int) rc;
2392
2393 /* Enable ATAPI AN if both the host and device have
2394 * the support. If PMP is attached, SNTF is required
2395 * to enable ATAPI AN to discern between PHY status
2396 * changed notifications and ATAPI ANs.
2397 */
2398 if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2399 (!sata_pmp_attached(ap) ||
2400 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
2401 unsigned int err_mask;
2402
2403 /* issue SET feature command to turn this on */
2404 err_mask = ata_dev_set_feature(dev,
2405 SETFEATURES_SATA_ENABLE, SATA_AN);
2406 if (err_mask)
2407 ata_dev_printk(dev, KERN_ERR,
2408 "failed to enable ATAPI AN "
2409 "(err_mask=0x%x)\n", err_mask);
2410 else {
2411 dev->flags |= ATA_DFLAG_AN;
2412 atapi_an_string = ", ATAPI AN";
2413 }
2414 }
2415
2416 if (ata_id_cdb_intr(dev->id)) {
2417 dev->flags |= ATA_DFLAG_CDB_INTR;
2418 cdb_intr_string = ", CDB intr";
2419 }
2420
2421 if (atapi_dmadir || atapi_id_dmadir(dev->id)) {
2422 dev->flags |= ATA_DFLAG_DMADIR;
2423 dma_dir_string = ", DMADIR";
2424 }
2425
2426 /* print device info to dmesg */
2427 if (ata_msg_drv(ap) && print_info)
2428 ata_dev_printk(dev, KERN_INFO,
2429 "ATAPI: %s, %s, max %s%s%s%s\n",
2430 modelbuf, fwrevbuf,
2431 ata_mode_string(xfer_mask),
2432 cdb_intr_string, atapi_an_string,
2433 dma_dir_string);
2434 }
2435
2436 /* determine max_sectors */
2437 dev->max_sectors = ATA_MAX_SECTORS;
2438 if (dev->flags & ATA_DFLAG_LBA48)
2439 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2440
2441 if (!(dev->horkage & ATA_HORKAGE_IPM)) {
2442 if (ata_id_has_hipm(dev->id))
2443 dev->flags |= ATA_DFLAG_HIPM;
2444 if (ata_id_has_dipm(dev->id))
2445 dev->flags |= ATA_DFLAG_DIPM;
2446 }
2447
2448 /* Limit PATA drive on SATA cable bridge transfers to udma5,
2449 200 sectors */
2450 if (ata_dev_knobble(dev)) {
2451 if (ata_msg_drv(ap) && print_info)
2452 ata_dev_printk(dev, KERN_INFO,
2453 "applying bridge limits\n");
2454 dev->udma_mask &= ATA_UDMA5;
2455 dev->max_sectors = ATA_MAX_SECTORS;
2456 }
2457
2458 if ((dev->class == ATA_DEV_ATAPI) &&
2459 (atapi_command_packet_set(id) == TYPE_TAPE)) {
2460 dev->max_sectors = ATA_MAX_SECTORS_TAPE;
2461 dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2462 }
2463
2464 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
2465 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2466 dev->max_sectors);
2467
2468 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_IPM) {
2469 dev->horkage |= ATA_HORKAGE_IPM;
2470
2471 /* reset link pm_policy for this port to no pm */
2472 ap->pm_policy = MAX_PERFORMANCE;
2473 }
2474
2475 if (ap->ops->dev_config)
2476 ap->ops->dev_config(dev);
2477
2478 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2479 /* Let the user know. We don't want to disallow opens for
2480 rescue purposes, or in case the vendor is just a blithering
2481 idiot. Do this after the dev_config call as some controllers
2482 with buggy firmware may want to avoid reporting false device
2483 bugs */
2484
2485 if (print_info) {
2486 ata_dev_printk(dev, KERN_WARNING,
2487 "Drive reports diagnostics failure. This may indicate a drive\n");
2488 ata_dev_printk(dev, KERN_WARNING,
2489 "fault or invalid emulation. Contact drive vendor for information.\n");
2490 }
2491 }
2492
2493 return 0;
2494
2495 err_out_nosup:
2496 if (ata_msg_probe(ap))
2497 ata_dev_printk(dev, KERN_DEBUG,
2498 "%s: EXIT, err\n", __func__);
2499 return rc;
2500 }
2501
2502 /**
2503 * ata_cable_40wire - return 40 wire cable type
2504 * @ap: port
2505 *
2506 * Helper method for drivers which want to hardwire 40 wire cable
2507 * detection.
2508 */
2509
2510 int ata_cable_40wire(struct ata_port *ap)
2511 {
2512 return ATA_CBL_PATA40;
2513 }
2514
2515 /**
2516 * ata_cable_80wire - return 80 wire cable type
2517 * @ap: port
2518 *
2519 * Helper method for drivers which want to hardwire 80 wire cable
2520 * detection.
2521 */
2522
2523 int ata_cable_80wire(struct ata_port *ap)
2524 {
2525 return ATA_CBL_PATA80;
2526 }
2527
2528 /**
2529 * ata_cable_unknown - return unknown PATA cable.
2530 * @ap: port
2531 *
2532 * Helper method for drivers which have no PATA cable detection.
2533 */
2534
2535 int ata_cable_unknown(struct ata_port *ap)
2536 {
2537 return ATA_CBL_PATA_UNK;
2538 }
2539
2540 /**
2541 * ata_cable_ignore - return ignored PATA cable.
2542 * @ap: port
2543 *
2544 * Helper method for drivers which don't use cable type to limit
2545 * transfer mode.
2546 */
2547 int ata_cable_ignore(struct ata_port *ap)
2548 {
2549 return ATA_CBL_PATA_IGN;
2550 }
2551
2552 /**
2553 * ata_cable_sata - return SATA cable type
2554 * @ap: port
2555 *
2556 * Helper method for drivers which have SATA cables
2557 */
2558
2559 int ata_cable_sata(struct ata_port *ap)
2560 {
2561 return ATA_CBL_SATA;
2562 }
2563
2564 /**
2565 * ata_bus_probe - Reset and probe ATA bus
2566 * @ap: Bus to probe
2567 *
2568 * Master ATA bus probing function. Initiates a hardware-dependent
2569 * bus reset, then attempts to identify any devices found on
2570 * the bus.
2571 *
2572 * LOCKING:
2573 * PCI/etc. bus probe sem.
2574 *
2575 * RETURNS:
2576 * Zero on success, negative errno otherwise.
2577 */
2578
2579 int ata_bus_probe(struct ata_port *ap)
2580 {
2581 unsigned int classes[ATA_MAX_DEVICES];
2582 int tries[ATA_MAX_DEVICES];
2583 int rc;
2584 struct ata_device *dev;
2585
2586 ata_port_probe(ap);
2587
2588 ata_link_for_each_dev(dev, &ap->link)
2589 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2590
2591 retry:
2592 ata_link_for_each_dev(dev, &ap->link) {
2593 /* If we issue an SRST then an ATA drive (not ATAPI)
2594 * may change configuration and be in PIO0 timing. If
2595 * we do a hard reset (or are coming from power on)
2596 * this is true for ATA or ATAPI. Until we've set a
2597 * suitable controller mode we should not touch the
2598 * bus as we may be talking too fast.
2599 */
2600 dev->pio_mode = XFER_PIO_0;
2601
2602 /* If the controller has a pio mode setup function
2603 * then use it to set the chipset to rights. Don't
2604 * touch the DMA setup as that will be dealt with when
2605 * configuring devices.
2606 */
2607 if (ap->ops->set_piomode)
2608 ap->ops->set_piomode(ap, dev);
2609 }
2610
2611 /* reset and determine device classes */
2612 ap->ops->phy_reset(ap);
2613
2614 ata_link_for_each_dev(dev, &ap->link) {
2615 if (!(ap->flags & ATA_FLAG_DISABLED) &&
2616 dev->class != ATA_DEV_UNKNOWN)
2617 classes[dev->devno] = dev->class;
2618 else
2619 classes[dev->devno] = ATA_DEV_NONE;
2620
2621 dev->class = ATA_DEV_UNKNOWN;
2622 }
2623
2624 ata_port_probe(ap);
2625
2626 /* read IDENTIFY page and configure devices. We have to do the identify
2627 specific sequence bass-ackwards so that PDIAG- is released by
2628 the slave device */
2629
2630 ata_link_for_each_dev_reverse(dev, &ap->link) {
2631 if (tries[dev->devno])
2632 dev->class = classes[dev->devno];
2633
2634 if (!ata_dev_enabled(dev))
2635 continue;
2636
2637 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2638 dev->id);
2639 if (rc)
2640 goto fail;
2641 }
2642
2643 /* Now ask for the cable type as PDIAG- should have been released */
2644 if (ap->ops->cable_detect)
2645 ap->cbl = ap->ops->cable_detect(ap);
2646
2647 /* We may have SATA bridge glue hiding here irrespective of the
2648 reported cable types and sensed types */
2649 ata_link_for_each_dev(dev, &ap->link) {
2650 if (!ata_dev_enabled(dev))
2651 continue;
2652 /* SATA drives indicate we have a bridge. We don't know which
2653 end of the link the bridge is which is a problem */
2654 if (ata_id_is_sata(dev->id))
2655 ap->cbl = ATA_CBL_SATA;
2656 }
2657
2658 /* After the identify sequence we can now set up the devices. We do
2659 this in the normal order so that the user doesn't get confused */
2660
2661 ata_link_for_each_dev(dev, &ap->link) {
2662 if (!ata_dev_enabled(dev))
2663 continue;
2664
2665 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
2666 rc = ata_dev_configure(dev);
2667 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2668 if (rc)
2669 goto fail;
2670 }
2671
2672 /* configure transfer mode */
2673 rc = ata_set_mode(&ap->link, &dev);
2674 if (rc)
2675 goto fail;
2676
2677 ata_link_for_each_dev(dev, &ap->link)
2678 if (ata_dev_enabled(dev))
2679 return 0;
2680
2681 /* no device present, disable port */
2682 ata_port_disable(ap);
2683 return -ENODEV;
2684
2685 fail:
2686 tries[dev->devno]--;
2687
2688 switch (rc) {
2689 case -EINVAL:
2690 /* eeek, something went very wrong, give up */
2691 tries[dev->devno] = 0;
2692 break;
2693
2694 case -ENODEV:
2695 /* give it just one more chance */
2696 tries[dev->devno] = min(tries[dev->devno], 1);
2697 case -EIO:
2698 if (tries[dev->devno] == 1) {
2699 /* This is the last chance, better to slow
2700 * down than lose it.
2701 */
2702 sata_down_spd_limit(&ap->link);
2703 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2704 }
2705 }
2706
2707 if (!tries[dev->devno])
2708 ata_dev_disable(dev);
2709
2710 goto retry;
2711 }
2712
2713 /**
2714 * ata_port_probe - Mark port as enabled
2715 * @ap: Port for which we indicate enablement
2716 *
2717 * Modify @ap data structure such that the system
2718 * thinks that the entire port is enabled.
2719 *
2720 * LOCKING: host lock, or some other form of
2721 * serialization.
2722 */
2723
2724 void ata_port_probe(struct ata_port *ap)
2725 {
2726 ap->flags &= ~ATA_FLAG_DISABLED;
2727 }
2728
2729 /**
2730 * sata_print_link_status - Print SATA link status
2731 * @link: SATA link to printk link status about
2732 *
2733 * This function prints link speed and status of a SATA link.
2734 *
2735 * LOCKING:
2736 * None.
2737 */
2738 static void sata_print_link_status(struct ata_link *link)
2739 {
2740 u32 sstatus, scontrol, tmp;
2741
2742 if (sata_scr_read(link, SCR_STATUS, &sstatus))
2743 return;
2744 sata_scr_read(link, SCR_CONTROL, &scontrol);
2745
2746 if (ata_phys_link_online(link)) {
2747 tmp = (sstatus >> 4) & 0xf;
2748 ata_link_printk(link, KERN_INFO,
2749 "SATA link up %s (SStatus %X SControl %X)\n",
2750 sata_spd_string(tmp), sstatus, scontrol);
2751 } else {
2752 ata_link_printk(link, KERN_INFO,
2753 "SATA link down (SStatus %X SControl %X)\n",
2754 sstatus, scontrol);
2755 }
2756 }
2757
2758 /**
2759 * ata_dev_pair - return other device on cable
2760 * @adev: device
2761 *
2762 * Obtain the other device on the same cable, or if none is
2763 * present NULL is returned
2764 */
2765
2766 struct ata_device *ata_dev_pair(struct ata_device *adev)
2767 {
2768 struct ata_link *link = adev->link;
2769 struct ata_device *pair = &link->device[1 - adev->devno];
2770 if (!ata_dev_enabled(pair))
2771 return NULL;
2772 return pair;
2773 }
2774
2775 /**
2776 * ata_port_disable - Disable port.
2777 * @ap: Port to be disabled.
2778 *
2779 * Modify @ap data structure such that the system
2780 * thinks that the entire port is disabled, and should
2781 * never attempt to probe or communicate with devices
2782 * on this port.
2783 *
2784 * LOCKING: host lock, or some other form of
2785 * serialization.
2786 */
2787
2788 void ata_port_disable(struct ata_port *ap)
2789 {
2790 ap->link.device[0].class = ATA_DEV_NONE;
2791 ap->link.device[1].class = ATA_DEV_NONE;
2792 ap->flags |= ATA_FLAG_DISABLED;
2793 }
2794
2795 /**
2796 * sata_down_spd_limit - adjust SATA spd limit downward
2797 * @link: Link to adjust SATA spd limit for
2798 *
2799 * Adjust SATA spd limit of @link downward. Note that this
2800 * function only adjusts the limit. The change must be applied
2801 * using sata_set_spd().
2802 *
2803 * LOCKING:
2804 * Inherited from caller.
2805 *
2806 * RETURNS:
2807 * 0 on success, negative errno on failure
2808 */
2809 int sata_down_spd_limit(struct ata_link *link)
2810 {
2811 u32 sstatus, spd, mask;
2812 int rc, highbit;
2813
2814 if (!sata_scr_valid(link))
2815 return -EOPNOTSUPP;
2816
2817 /* If SCR can be read, use it to determine the current SPD.
2818 * If not, use cached value in link->sata_spd.
2819 */
2820 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
2821 if (rc == 0)
2822 spd = (sstatus >> 4) & 0xf;
2823 else
2824 spd = link->sata_spd;
2825
2826 mask = link->sata_spd_limit;
2827 if (mask <= 1)
2828 return -EINVAL;
2829
2830 /* unconditionally mask off the highest bit */
2831 highbit = fls(mask) - 1;
2832 mask &= ~(1 << highbit);
2833
2834 /* Mask off all speeds higher than or equal to the current
2835 * one. Force 1.5Gbps if current SPD is not available.
2836 */
2837 if (spd > 1)
2838 mask &= (1 << (spd - 1)) - 1;
2839 else
2840 mask &= 1;
2841
2842 /* were we already at the bottom? */
2843 if (!mask)
2844 return -EINVAL;
2845
2846 link->sata_spd_limit = mask;
2847
2848 ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n",
2849 sata_spd_string(fls(mask)));
2850
2851 return 0;
2852 }
2853
2854 static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
2855 {
2856 struct ata_link *host_link = &link->ap->link;
2857 u32 limit, target, spd;
2858
2859 limit = link->sata_spd_limit;
2860
2861 /* Don't configure downstream link faster than upstream link.
2862 * It doesn't speed up anything and some PMPs choke on such
2863 * configuration.
2864 */
2865 if (!ata_is_host_link(link) && host_link->sata_spd)
2866 limit &= (1 << host_link->sata_spd) - 1;
2867
2868 if (limit == UINT_MAX)
2869 target = 0;
2870 else
2871 target = fls(limit);
2872
2873 spd = (*scontrol >> 4) & 0xf;
2874 *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
2875
2876 return spd != target;
2877 }
2878
2879 /**
2880 * sata_set_spd_needed - is SATA spd configuration needed
2881 * @link: Link in question
2882 *
2883 * Test whether the spd limit in SControl matches
2884 * @link->sata_spd_limit. This function is used to determine
2885 * whether hardreset is necessary to apply SATA spd
2886 * configuration.
2887 *
2888 * LOCKING:
2889 * Inherited from caller.
2890 *
2891 * RETURNS:
2892 * 1 if SATA spd configuration is needed, 0 otherwise.
2893 */
2894 static int sata_set_spd_needed(struct ata_link *link)
2895 {
2896 u32 scontrol;
2897
2898 if (sata_scr_read(link, SCR_CONTROL, &scontrol))
2899 return 1;
2900
2901 return __sata_set_spd_needed(link, &scontrol);
2902 }
2903
2904 /**
2905 * sata_set_spd - set SATA spd according to spd limit
2906 * @link: Link to set SATA spd for
2907 *
2908 * Set SATA spd of @link according to sata_spd_limit.
2909 *
2910 * LOCKING:
2911 * Inherited from caller.
2912 *
2913 * RETURNS:
2914 * 0 if spd doesn't need to be changed, 1 if spd has been
2915 * changed. Negative errno if SCR registers are inaccessible.
2916 */
2917 int sata_set_spd(struct ata_link *link)
2918 {
2919 u32 scontrol;
2920 int rc;
2921
2922 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
2923 return rc;
2924
2925 if (!__sata_set_spd_needed(link, &scontrol))
2926 return 0;
2927
2928 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
2929 return rc;
2930
2931 return 1;
2932 }
2933
2934 /*
2935 * This mode timing computation functionality is ported over from
2936 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2937 */
2938 /*
2939 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
2940 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
2941 * for UDMA6, which is currently supported only by Maxtor drives.
2942 *
2943 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
2944 */
2945
2946 static const struct ata_timing ata_timing[] = {
2947 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
2948 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
2949 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
2950 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
2951 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
2952 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
2953 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
2954 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
2955
2956 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
2957 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
2958 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
2959
2960 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
2961 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
2962 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
2963 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
2964 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
2965
2966 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
2967 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
2968 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
2969 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
2970 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
2971 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
2972 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
2973 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
2974
2975 { 0xFF }
2976 };
2977
2978 #define ENOUGH(v, unit) (((v)-1)/(unit)+1)
2979 #define EZ(v, unit) ((v)?ENOUGH(v, unit):0)
2980
2981 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2982 {
2983 q->setup = EZ(t->setup * 1000, T);
2984 q->act8b = EZ(t->act8b * 1000, T);
2985 q->rec8b = EZ(t->rec8b * 1000, T);
2986 q->cyc8b = EZ(t->cyc8b * 1000, T);
2987 q->active = EZ(t->active * 1000, T);
2988 q->recover = EZ(t->recover * 1000, T);
2989 q->cycle = EZ(t->cycle * 1000, T);
2990 q->udma = EZ(t->udma * 1000, UT);
2991 }
2992
2993 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2994 struct ata_timing *m, unsigned int what)
2995 {
2996 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2997 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2998 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2999 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
3000 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
3001 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
3002 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
3003 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
3004 }
3005
3006 const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
3007 {
3008 const struct ata_timing *t = ata_timing;
3009
3010 while (xfer_mode > t->mode)
3011 t++;
3012
3013 if (xfer_mode == t->mode)
3014 return t;
3015 return NULL;
3016 }
3017
3018 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
3019 struct ata_timing *t, int T, int UT)
3020 {
3021 const struct ata_timing *s;
3022 struct ata_timing p;
3023
3024 /*
3025 * Find the mode.
3026 */
3027
3028 if (!(s = ata_timing_find_mode(speed)))
3029 return -EINVAL;
3030
3031 memcpy(t, s, sizeof(*s));
3032
3033 /*
3034 * If the drive is an EIDE drive, it can tell us it needs extended
3035 * PIO/MW_DMA cycle timing.
3036 */
3037
3038 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
3039 memset(&p, 0, sizeof(p));
3040 if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
3041 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
3042 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
3043 } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
3044 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
3045 }
3046 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
3047 }
3048
3049 /*
3050 * Convert the timing to bus clock counts.
3051 */
3052
3053 ata_timing_quantize(t, t, T, UT);
3054
3055 /*
3056 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
3057 * S.M.A.R.T * and some other commands. We have to ensure that the
3058 * DMA cycle timing is slower/equal than the fastest PIO timing.
3059 */
3060
3061 if (speed > XFER_PIO_6) {
3062 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
3063 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
3064 }
3065
3066 /*
3067 * Lengthen active & recovery time so that cycle time is correct.
3068 */
3069
3070 if (t->act8b + t->rec8b < t->cyc8b) {
3071 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
3072 t->rec8b = t->cyc8b - t->act8b;
3073 }
3074
3075 if (t->active + t->recover < t->cycle) {
3076 t->active += (t->cycle - (t->active + t->recover)) / 2;
3077 t->recover = t->cycle - t->active;
3078 }
3079
3080 /* In a few cases quantisation may produce enough errors to
3081 leave t->cycle too low for the sum of active and recovery
3082 if so we must correct this */
3083 if (t->active + t->recover > t->cycle)
3084 t->cycle = t->active + t->recover;
3085
3086 return 0;
3087 }
3088
3089 /**
3090 * ata_timing_cycle2mode - find xfer mode for the specified cycle duration
3091 * @xfer_shift: ATA_SHIFT_* value for transfer type to examine.
3092 * @cycle: cycle duration in ns
3093 *
3094 * Return matching xfer mode for @cycle. The returned mode is of
3095 * the transfer type specified by @xfer_shift. If @cycle is too
3096 * slow for @xfer_shift, 0xff is returned. If @cycle is faster
3097 * than the fastest known mode, the fasted mode is returned.
3098 *
3099 * LOCKING:
3100 * None.
3101 *
3102 * RETURNS:
3103 * Matching xfer_mode, 0xff if no match found.
3104 */
3105 u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
3106 {
3107 u8 base_mode = 0xff, last_mode = 0xff;
3108 const struct ata_xfer_ent *ent;
3109 const struct ata_timing *t;
3110
3111 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
3112 if (ent->shift == xfer_shift)
3113 base_mode = ent->base;
3114
3115 for (t = ata_timing_find_mode(base_mode);
3116 t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3117 unsigned short this_cycle;
3118
3119 switch (xfer_shift) {
3120 case ATA_SHIFT_PIO:
3121 case ATA_SHIFT_MWDMA:
3122 this_cycle = t->cycle;
3123 break;
3124 case ATA_SHIFT_UDMA:
3125 this_cycle = t->udma;
3126 break;
3127 default:
3128 return 0xff;
3129 }
3130
3131 if (cycle > this_cycle)
3132 break;
3133
3134 last_mode = t->mode;
3135 }
3136
3137 return last_mode;
3138 }
3139
3140 /**
3141 * ata_down_xfermask_limit - adjust dev xfer masks downward
3142 * @dev: Device to adjust xfer masks
3143 * @sel: ATA_DNXFER_* selector
3144 *
3145 * Adjust xfer masks of @dev downward. Note that this function
3146 * does not apply the change. Invoking ata_set_mode() afterwards
3147 * will apply the limit.
3148 *
3149 * LOCKING:
3150 * Inherited from caller.
3151 *
3152 * RETURNS:
3153 * 0 on success, negative errno on failure
3154 */
3155 int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
3156 {
3157 char buf[32];
3158 unsigned long orig_mask, xfer_mask;
3159 unsigned long pio_mask, mwdma_mask, udma_mask;
3160 int quiet, highbit;
3161
3162 quiet = !!(sel & ATA_DNXFER_QUIET);
3163 sel &= ~ATA_DNXFER_QUIET;
3164
3165 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3166 dev->mwdma_mask,
3167 dev->udma_mask);
3168 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
3169
3170 switch (sel) {
3171 case ATA_DNXFER_PIO:
3172 highbit = fls(pio_mask) - 1;
3173 pio_mask &= ~(1 << highbit);
3174 break;
3175
3176 case ATA_DNXFER_DMA:
3177 if (udma_mask) {
3178 highbit = fls(udma_mask) - 1;
3179 udma_mask &= ~(1 << highbit);
3180 if (!udma_mask)
3181 return -ENOENT;
3182 } else if (mwdma_mask) {
3183 highbit = fls(mwdma_mask) - 1;
3184 mwdma_mask &= ~(1 << highbit);
3185 if (!mwdma_mask)
3186 return -ENOENT;
3187 }
3188 break;
3189
3190 case ATA_DNXFER_40C:
3191 udma_mask &= ATA_UDMA_MASK_40C;
3192 break;
3193
3194 case ATA_DNXFER_FORCE_PIO0:
3195 pio_mask &= 1;
3196 case ATA_DNXFER_FORCE_PIO:
3197 mwdma_mask = 0;
3198 udma_mask = 0;
3199 break;
3200
3201 default:
3202 BUG();
3203 }
3204
3205 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3206
3207 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3208 return -ENOENT;
3209
3210 if (!quiet) {
3211 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3212 snprintf(buf, sizeof(buf), "%s:%s",
3213 ata_mode_string(xfer_mask),
3214 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3215 else
3216 snprintf(buf, sizeof(buf), "%s",
3217 ata_mode_string(xfer_mask));
3218
3219 ata_dev_printk(dev, KERN_WARNING,
3220 "limiting speed to %s\n", buf);
3221 }
3222
3223 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3224 &dev->udma_mask);
3225
3226 return 0;
3227 }
3228
3229 static int ata_dev_set_mode(struct ata_device *dev)
3230 {
3231 struct ata_eh_context *ehc = &dev->link->eh_context;
3232 const char *dev_err_whine = "";
3233 int ign_dev_err = 0;
3234 unsigned int err_mask;
3235 int rc;
3236
3237 dev->flags &= ~ATA_DFLAG_PIO;
3238 if (dev->xfer_shift == ATA_SHIFT_PIO)
3239 dev->flags |= ATA_DFLAG_PIO;
3240
3241 err_mask = ata_dev_set_xfermode(dev);
3242
3243 if (err_mask & ~AC_ERR_DEV)
3244 goto fail;
3245
3246 /* revalidate */
3247 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3248 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3249 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3250 if (rc)
3251 return rc;
3252
3253 if (dev->xfer_shift == ATA_SHIFT_PIO) {
3254 /* Old CFA may refuse this command, which is just fine */
3255 if (ata_id_is_cfa(dev->id))
3256 ign_dev_err = 1;
3257 /* Catch several broken garbage emulations plus some pre
3258 ATA devices */
3259 if (ata_id_major_version(dev->id) == 0 &&
3260 dev->pio_mode <= XFER_PIO_2)
3261 ign_dev_err = 1;
3262 /* Some very old devices and some bad newer ones fail
3263 any kind of SET_XFERMODE request but support PIO0-2
3264 timings and no IORDY */
3265 if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2)
3266 ign_dev_err = 1;
3267 }
3268 /* Early MWDMA devices do DMA but don't allow DMA mode setting.
3269 Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
3270 if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3271 dev->dma_mode == XFER_MW_DMA_0 &&
3272 (dev->id[63] >> 8) & 1)
3273 ign_dev_err = 1;
3274
3275 /* if the device is actually configured correctly, ignore dev err */
3276 if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3277 ign_dev_err = 1;
3278
3279 if (err_mask & AC_ERR_DEV) {
3280 if (!ign_dev_err)
3281 goto fail;
3282 else
3283 dev_err_whine = " (device error ignored)";
3284 }
3285
3286 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3287 dev->xfer_shift, (int)dev->xfer_mode);
3288
3289 ata_dev_printk(dev, KERN_INFO, "configured for %s%s\n",
3290 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3291 dev_err_whine);
3292
3293 return 0;
3294
3295 fail:
3296 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
3297 "(err_mask=0x%x)\n", err_mask);
3298 return -EIO;
3299 }
3300
3301 /**
3302 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
3303 * @link: link on which timings will be programmed
3304 * @r_failed_dev: out parameter for failed device
3305 *
3306 * Standard implementation of the function used to tune and set
3307 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3308 * ata_dev_set_mode() fails, pointer to the failing device is
3309 * returned in @r_failed_dev.
3310 *
3311 * LOCKING:
3312 * PCI/etc. bus probe sem.
3313 *
3314 * RETURNS:
3315 * 0 on success, negative errno otherwise
3316 */
3317
3318 int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3319 {
3320 struct ata_port *ap = link->ap;
3321 struct ata_device *dev;
3322 int rc = 0, used_dma = 0, found = 0;
3323
3324 /* step 1: calculate xfer_mask */
3325 ata_link_for_each_dev(dev, link) {
3326 unsigned long pio_mask, dma_mask;
3327 unsigned int mode_mask;
3328
3329 if (!ata_dev_enabled(dev))
3330 continue;
3331
3332 mode_mask = ATA_DMA_MASK_ATA;
3333 if (dev->class == ATA_DEV_ATAPI)
3334 mode_mask = ATA_DMA_MASK_ATAPI;
3335 else if (ata_id_is_cfa(dev->id))
3336 mode_mask = ATA_DMA_MASK_CFA;
3337
3338 ata_dev_xfermask(dev);
3339 ata_force_xfermask(dev);
3340
3341 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3342 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3343
3344 if (libata_dma_mask & mode_mask)
3345 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3346 else
3347 dma_mask = 0;
3348
3349 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3350 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
3351
3352 found = 1;
3353 if (ata_dma_enabled(dev))
3354 used_dma = 1;
3355 }
3356 if (!found)
3357 goto out;
3358
3359 /* step 2: always set host PIO timings */
3360 ata_link_for_each_dev(dev, link) {
3361 if (!ata_dev_enabled(dev))
3362 continue;
3363
3364 if (dev->pio_mode == 0xff) {
3365 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
3366 rc = -EINVAL;
3367 goto out;
3368 }
3369
3370 dev->xfer_mode = dev->pio_mode;
3371 dev->xfer_shift = ATA_SHIFT_PIO;
3372 if (ap->ops->set_piomode)
3373 ap->ops->set_piomode(ap, dev);
3374 }
3375
3376 /* step 3: set host DMA timings */
3377 ata_link_for_each_dev(dev, link) {
3378 if (!ata_dev_enabled(dev) || !ata_dma_enabled(dev))
3379 continue;
3380
3381 dev->xfer_mode = dev->dma_mode;
3382 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3383 if (ap->ops->set_dmamode)
3384 ap->ops->set_dmamode(ap, dev);
3385 }
3386
3387 /* step 4: update devices' xfer mode */
3388 ata_link_for_each_dev(dev, link) {
3389 /* don't update suspended devices' xfer mode */
3390 if (!ata_dev_enabled(dev))
3391 continue;
3392
3393 rc = ata_dev_set_mode(dev);
3394 if (rc)
3395 goto out;
3396 }
3397
3398 /* Record simplex status. If we selected DMA then the other
3399 * host channels are not permitted to do so.
3400 */
3401 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
3402 ap->host->simplex_claimed = ap;
3403
3404 out:
3405 if (rc)
3406 *r_failed_dev = dev;
3407 return rc;
3408 }
3409
3410 /**
3411 * ata_wait_ready - wait for link to become ready
3412 * @link: link to be waited on
3413 * @deadline: deadline jiffies for the operation
3414 * @check_ready: callback to check link readiness
3415 *
3416 * Wait for @link to become ready. @check_ready should return
3417 * positive number if @link is ready, 0 if it isn't, -ENODEV if
3418 * link doesn't seem to be occupied, other errno for other error
3419 * conditions.
3420 *
3421 * Transient -ENODEV conditions are allowed for
3422 * ATA_TMOUT_FF_WAIT.
3423 *
3424 * LOCKING:
3425 * EH context.
3426 *
3427 * RETURNS:
3428 * 0 if @linke is ready before @deadline; otherwise, -errno.
3429 */
3430 int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3431 int (*check_ready)(struct ata_link *link))
3432 {
3433 unsigned long start = jiffies;
3434 unsigned long nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
3435 int warned = 0;
3436
3437 /* Slave readiness can't be tested separately from master. On
3438 * M/S emulation configuration, this function should be called
3439 * only on the master and it will handle both master and slave.
3440 */
3441 WARN_ON(link == link->ap->slave_link);
3442
3443 if (time_after(nodev_deadline, deadline))
3444 nodev_deadline = deadline;
3445
3446 while (1) {
3447 unsigned long now = jiffies;
3448 int ready, tmp;
3449
3450 ready = tmp = check_ready(link);
3451 if (ready > 0)
3452 return 0;
3453
3454 /* -ENODEV could be transient. Ignore -ENODEV if link
3455 * is online. Also, some SATA devices take a long
3456 * time to clear 0xff after reset. For example,
3457 * HHD424020F7SV00 iVDR needs >= 800ms while Quantum
3458 * GoVault needs even more than that. Wait for
3459 * ATA_TMOUT_FF_WAIT on -ENODEV if link isn't offline.
3460 *
3461 * Note that some PATA controllers (pata_ali) explode
3462 * if status register is read more than once when
3463 * there's no device attached.
3464 */
3465 if (ready == -ENODEV) {
3466 if (ata_link_online(link))
3467 ready = 0;
3468 else if ((link->ap->flags & ATA_FLAG_SATA) &&
3469 !ata_link_offline(link) &&
3470 time_before(now, nodev_deadline))
3471 ready = 0;
3472 }
3473
3474 if (ready)
3475 return ready;
3476 if (time_after(now, deadline))
3477 return -EBUSY;
3478
3479 if (!warned && time_after(now, start + 5 * HZ) &&
3480 (deadline - now > 3 * HZ)) {
3481 ata_link_printk(link, KERN_WARNING,
3482 "link is slow to respond, please be patient "
3483 "(ready=%d)\n", tmp);
3484 warned = 1;
3485 }
3486
3487 msleep(50);
3488 }
3489 }
3490
3491 /**
3492 * ata_wait_after_reset - wait for link to become ready after reset
3493 * @link: link to be waited on
3494 * @deadline: deadline jiffies for the operation
3495 * @check_ready: callback to check link readiness
3496 *
3497 * Wait for @link to become ready after reset.
3498 *
3499 * LOCKING:
3500 * EH context.
3501 *
3502 * RETURNS:
3503 * 0 if @linke is ready before @deadline; otherwise, -errno.
3504 */
3505 int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
3506 int (*check_ready)(struct ata_link *link))
3507 {
3508 msleep(ATA_WAIT_AFTER_RESET);
3509
3510 return ata_wait_ready(link, deadline, check_ready);
3511 }
3512
3513 /**
3514 * sata_link_debounce - debounce SATA phy status
3515 * @link: ATA link to debounce SATA phy status for
3516 * @params: timing parameters { interval, duratinon, timeout } in msec
3517 * @deadline: deadline jiffies for the operation
3518 *
3519 * Make sure SStatus of @link reaches stable state, determined by
3520 * holding the same value where DET is not 1 for @duration polled
3521 * every @interval, before @timeout. Timeout constraints the
3522 * beginning of the stable state. Because DET gets stuck at 1 on
3523 * some controllers after hot unplugging, this functions waits
3524 * until timeout then returns 0 if DET is stable at 1.
3525 *
3526 * @timeout is further limited by @deadline. The sooner of the
3527 * two is used.
3528 *
3529 * LOCKING:
3530 * Kernel thread context (may sleep)
3531 *
3532 * RETURNS:
3533 * 0 on success, -errno on failure.
3534 */
3535 int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3536 unsigned long deadline)
3537 {
3538 unsigned long interval = params[0];
3539 unsigned long duration = params[1];
3540 unsigned long last_jiffies, t;
3541 u32 last, cur;
3542 int rc;
3543
3544 t = ata_deadline(jiffies, params[2]);
3545 if (time_before(t, deadline))
3546 deadline = t;
3547
3548 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3549 return rc;
3550 cur &= 0xf;
3551
3552 last = cur;
3553 last_jiffies = jiffies;
3554
3555 while (1) {
3556 msleep(interval);
3557 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3558 return rc;
3559 cur &= 0xf;
3560
3561 /* DET stable? */
3562 if (cur == last) {
3563 if (cur == 1 && time_before(jiffies, deadline))
3564 continue;
3565 if (time_after(jiffies,
3566 ata_deadline(last_jiffies, duration)))
3567 return 0;
3568 continue;
3569 }
3570
3571 /* unstable, start over */
3572 last = cur;
3573 last_jiffies = jiffies;
3574
3575 /* Check deadline. If debouncing failed, return
3576 * -EPIPE to tell upper layer to lower link speed.
3577 */
3578 if (time_after(jiffies, deadline))
3579 return -EPIPE;
3580 }
3581 }
3582
3583 /**
3584 * sata_link_resume - resume SATA link
3585 * @link: ATA link to resume SATA
3586 * @params: timing parameters { interval, duratinon, timeout } in msec
3587 * @deadline: deadline jiffies for the operation
3588 *
3589 * Resume SATA phy @link and debounce it.
3590 *
3591 * LOCKING:
3592 * Kernel thread context (may sleep)
3593 *
3594 * RETURNS:
3595 * 0 on success, -errno on failure.
3596 */
3597 int sata_link_resume(struct ata_link *link, const unsigned long *params,
3598 unsigned long deadline)
3599 {
3600 u32 scontrol, serror;
3601 int rc;
3602
3603 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3604 return rc;
3605
3606 scontrol = (scontrol & 0x0f0) | 0x300;
3607
3608 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3609 return rc;
3610
3611 /* Some PHYs react badly if SStatus is pounded immediately
3612 * after resuming. Delay 200ms before debouncing.
3613 */
3614 msleep(200);
3615
3616 if ((rc = sata_link_debounce(link, params, deadline)))
3617 return rc;
3618
3619 /* clear SError, some PHYs require this even for SRST to work */
3620 if (!(rc = sata_scr_read(link, SCR_ERROR, &serror)))
3621 rc = sata_scr_write(link, SCR_ERROR, serror);
3622
3623 return rc != -EINVAL ? rc : 0;
3624 }
3625
3626 /**
3627 * ata_std_prereset - prepare for reset
3628 * @link: ATA link to be reset
3629 * @deadline: deadline jiffies for the operation
3630 *
3631 * @link is about to be reset. Initialize it. Failure from
3632 * prereset makes libata abort whole reset sequence and give up
3633 * that port, so prereset should be best-effort. It does its
3634 * best to prepare for reset sequence but if things go wrong, it
3635 * should just whine, not fail.
3636 *
3637 * LOCKING:
3638 * Kernel thread context (may sleep)
3639 *
3640 * RETURNS:
3641 * 0 on success, -errno otherwise.
3642 */
3643 int ata_std_prereset(struct ata_link *link, unsigned long deadline)
3644 {
3645 struct ata_port *ap = link->ap;
3646 struct ata_eh_context *ehc = &link->eh_context;
3647 const unsigned long *timing = sata_ehc_deb_timing(ehc);
3648 int rc;
3649
3650 /* if we're about to do hardreset, nothing more to do */
3651 if (ehc->i.action & ATA_EH_HARDRESET)
3652 return 0;
3653
3654 /* if SATA, resume link */
3655 if (ap->flags & ATA_FLAG_SATA) {
3656 rc = sata_link_resume(link, timing, deadline);
3657 /* whine about phy resume failure but proceed */
3658 if (rc && rc != -EOPNOTSUPP)
3659 ata_link_printk(link, KERN_WARNING, "failed to resume "
3660 "link for reset (errno=%d)\n", rc);
3661 }
3662
3663 /* no point in trying softreset on offline link */
3664 if (ata_phys_link_offline(link))
3665 ehc->i.action &= ~ATA_EH_SOFTRESET;
3666
3667 return 0;
3668 }
3669
3670 /**
3671 * sata_link_hardreset - reset link via SATA phy reset
3672 * @link: link to reset
3673 * @timing: timing parameters { interval, duratinon, timeout } in msec
3674 * @deadline: deadline jiffies for the operation
3675 * @online: optional out parameter indicating link onlineness
3676 * @check_ready: optional callback to check link readiness
3677 *
3678 * SATA phy-reset @link using DET bits of SControl register.
3679 * After hardreset, link readiness is waited upon using
3680 * ata_wait_ready() if @check_ready is specified. LLDs are
3681 * allowed to not specify @check_ready and wait itself after this
3682 * function returns. Device classification is LLD's
3683 * responsibility.
3684 *
3685 * *@online is set to one iff reset succeeded and @link is online
3686 * after reset.
3687 *
3688 * LOCKING:
3689 * Kernel thread context (may sleep)
3690 *
3691 * RETURNS:
3692 * 0 on success, -errno otherwise.
3693 */
3694 int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
3695 unsigned long deadline,
3696 bool *online, int (*check_ready)(struct ata_link *))
3697 {
3698 u32 scontrol;
3699 int rc;
3700
3701 DPRINTK("ENTER\n");
3702
3703 if (online)
3704 *online = false;
3705
3706 if (sata_set_spd_needed(link)) {
3707 /* SATA spec says nothing about how to reconfigure
3708 * spd. To be on the safe side, turn off phy during
3709 * reconfiguration. This works for at least ICH7 AHCI
3710 * and Sil3124.
3711 */
3712 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3713 goto out;
3714
3715 scontrol = (scontrol & 0x0f0) | 0x304;
3716
3717 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3718 goto out;
3719
3720 sata_set_spd(link);
3721 }
3722
3723 /* issue phy wake/reset */
3724 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3725 goto out;
3726
3727 scontrol = (scontrol & 0x0f0) | 0x301;
3728
3729 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
3730 goto out;
3731
3732 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
3733 * 10.4.2 says at least 1 ms.
3734 */
3735 msleep(1);
3736
3737 /* bring link back */
3738 rc = sata_link_resume(link, timing, deadline);
3739 if (rc)
3740 goto out;
3741 /* if link is offline nothing more to do */
3742 if (ata_phys_link_offline(link))
3743 goto out;
3744
3745 /* Link is online. From this point, -ENODEV too is an error. */
3746 if (online)
3747 *online = true;
3748
3749 if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) {
3750 /* If PMP is supported, we have to do follow-up SRST.
3751 * Some PMPs don't send D2H Reg FIS after hardreset if
3752 * the first port is empty. Wait only for
3753 * ATA_TMOUT_PMP_SRST_WAIT.
3754 */
3755 if (check_ready) {
3756 unsigned long pmp_deadline;
3757
3758 pmp_deadline = ata_deadline(jiffies,
3759 ATA_TMOUT_PMP_SRST_WAIT);
3760 if (time_after(pmp_deadline, deadline))
3761 pmp_deadline = deadline;
3762 ata_wait_ready(link, pmp_deadline, check_ready);
3763 }
3764 rc = -EAGAIN;
3765 goto out;
3766 }
3767
3768 rc = 0;
3769 if (check_ready)
3770 rc = ata_wait_ready(link, deadline, check_ready);
3771 out:
3772 if (rc && rc != -EAGAIN) {
3773 /* online is set iff link is online && reset succeeded */
3774 if (online)
3775 *online = false;
3776 ata_link_printk(link, KERN_ERR,
3777 "COMRESET failed (errno=%d)\n", rc);
3778 }
3779 DPRINTK("EXIT, rc=%d\n", rc);
3780 return rc;
3781 }
3782
3783 /**
3784 * sata_std_hardreset - COMRESET w/o waiting or classification
3785 * @link: link to reset
3786 * @class: resulting class of attached device
3787 * @deadline: deadline jiffies for the operation
3788 *
3789 * Standard SATA COMRESET w/o waiting or classification.
3790 *
3791 * LOCKING:
3792 * Kernel thread context (may sleep)
3793 *
3794 * RETURNS:
3795 * 0 if link offline, -EAGAIN if link online, -errno on errors.
3796 */
3797 int sata_std_hardreset(struct ata_link *link, unsigned int *class,
3798 unsigned long deadline)
3799 {
3800 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
3801 bool online;
3802 int rc;
3803
3804 /* do hardreset */
3805 rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
3806 return online ? -EAGAIN : rc;
3807 }
3808
3809 /**
3810 * ata_std_postreset - standard postreset callback
3811 * @link: the target ata_link
3812 * @classes: classes of attached devices
3813 *
3814 * This function is invoked after a successful reset. Note that
3815 * the device might have been reset more than once using
3816 * different reset methods before postreset is invoked.
3817 *
3818 * LOCKING:
3819 * Kernel thread context (may sleep)
3820 */
3821 void ata_std_postreset(struct ata_link *link, unsigned int *classes)
3822 {
3823 u32 serror;
3824
3825 DPRINTK("ENTER\n");
3826
3827 /* reset complete, clear SError */
3828 if (!sata_scr_read(link, SCR_ERROR, &serror))
3829 sata_scr_write(link, SCR_ERROR, serror);
3830
3831 /* print link status */
3832 sata_print_link_status(link);
3833
3834 DPRINTK("EXIT\n");
3835 }
3836
3837 /**
3838 * ata_dev_same_device - Determine whether new ID matches configured device
3839 * @dev: device to compare against
3840 * @new_class: class of the new device
3841 * @new_id: IDENTIFY page of the new device
3842 *
3843 * Compare @new_class and @new_id against @dev and determine
3844 * whether @dev is the device indicated by @new_class and
3845 * @new_id.
3846 *
3847 * LOCKING:
3848 * None.
3849 *
3850 * RETURNS:
3851 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3852 */
3853 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3854 const u16 *new_id)
3855 {
3856 const u16 *old_id = dev->id;
3857 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3858 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
3859
3860 if (dev->class != new_class) {
3861 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3862 dev->class, new_class);
3863 return 0;
3864 }
3865
3866 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3867 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3868 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3869 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
3870
3871 if (strcmp(model[0], model[1])) {
3872 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3873 "'%s' != '%s'\n", model[0], model[1]);
3874 return 0;
3875 }
3876
3877 if (strcmp(serial[0], serial[1])) {
3878 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3879 "'%s' != '%s'\n", serial[0], serial[1]);
3880 return 0;
3881 }
3882
3883 return 1;
3884 }
3885
3886 /**
3887 * ata_dev_reread_id - Re-read IDENTIFY data
3888 * @dev: target ATA device
3889 * @readid_flags: read ID flags
3890 *
3891 * Re-read IDENTIFY page and make sure @dev is still attached to
3892 * the port.
3893 *
3894 * LOCKING:
3895 * Kernel thread context (may sleep)
3896 *
3897 * RETURNS:
3898 * 0 on success, negative errno otherwise
3899 */
3900 int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
3901 {
3902 unsigned int class = dev->class;
3903 u16 *id = (void *)dev->link->ap->sector_buf;
3904 int rc;
3905
3906 /* read ID data */
3907 rc = ata_dev_read_id(dev, &class, readid_flags, id);
3908 if (rc)
3909 return rc;
3910
3911 /* is the device still there? */
3912 if (!ata_dev_same_device(dev, class, id))
3913 return -ENODEV;
3914
3915 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
3916 return 0;
3917 }
3918
3919 /**
3920 * ata_dev_revalidate - Revalidate ATA device
3921 * @dev: device to revalidate
3922 * @new_class: new class code
3923 * @readid_flags: read ID flags
3924 *
3925 * Re-read IDENTIFY page, make sure @dev is still attached to the
3926 * port and reconfigure it according to the new IDENTIFY page.
3927 *
3928 * LOCKING:
3929 * Kernel thread context (may sleep)
3930 *
3931 * RETURNS:
3932 * 0 on success, negative errno otherwise
3933 */
3934 int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
3935 unsigned int readid_flags)
3936 {
3937 u64 n_sectors = dev->n_sectors;
3938 int rc;
3939
3940 if (!ata_dev_enabled(dev))
3941 return -ENODEV;
3942
3943 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
3944 if (ata_class_enabled(new_class) &&
3945 new_class != ATA_DEV_ATA && new_class != ATA_DEV_ATAPI) {
3946 ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n",
3947 dev->class, new_class);
3948 rc = -ENODEV;
3949 goto fail;
3950 }
3951
3952 /* re-read ID */
3953 rc = ata_dev_reread_id(dev, readid_flags);
3954 if (rc)
3955 goto fail;
3956
3957 /* configure device according to the new ID */
3958 rc = ata_dev_configure(dev);
3959 if (rc)
3960 goto fail;
3961
3962 /* verify n_sectors hasn't changed */
3963 if (dev->class == ATA_DEV_ATA && n_sectors &&
3964 dev->n_sectors != n_sectors) {
3965 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
3966 "%llu != %llu\n",
3967 (unsigned long long)n_sectors,
3968 (unsigned long long)dev->n_sectors);
3969
3970 /* restore original n_sectors */
3971 dev->n_sectors = n_sectors;
3972
3973 rc = -ENODEV;
3974 goto fail;
3975 }
3976
3977 return 0;
3978
3979 fail:
3980 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
3981 return rc;
3982 }
3983
3984 struct ata_blacklist_entry {
3985 const char *model_num;
3986 const char *model_rev;
3987 unsigned long horkage;
3988 };
3989
3990 static const struct ata_blacklist_entry ata_device_blacklist [] = {
3991 /* Devices with DMA related problems under Linux */
3992 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
3993 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
3994 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
3995 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
3996 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
3997 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
3998 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
3999 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
4000 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
4001 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
4002 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
4003 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
4004 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
4005 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
4006 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
4007 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
4008 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
4009 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
4010 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
4011 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
4012 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
4013 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
4014 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
4015 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
4016 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
4017 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
4018 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
4019 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
4020 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
4021 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
4022 /* Odd clown on sil3726/4726 PMPs */
4023 { "Config Disk", NULL, ATA_HORKAGE_DISABLE },
4024
4025 /* Weird ATAPI devices */
4026 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
4027 { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA },
4028
4029 /* Devices we expect to fail diagnostics */
4030
4031 /* Devices where NCQ should be avoided */
4032 /* NCQ is slow */
4033 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
4034 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
4035 /* http://thread.gmane.org/gmane.linux.ide/14907 */
4036 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
4037 /* NCQ is broken */
4038 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
4039 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
4040 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ },
4041 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ },
4042
4043 /* Blacklist entries taken from Silicon Image 3124/3132
4044 Windows driver .inf file - also several Linux problem reports */
4045 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
4046 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
4047 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
4048
4049 /* devices which puke on READ_NATIVE_MAX */
4050 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
4051 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
4052 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
4053 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
4054
4055 /* Devices which report 1 sector over size HPA */
4056 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, },
4057 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, },
4058 { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE, },
4059
4060 /* Devices which get the IVB wrong */
4061 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
4062 /* Maybe we should just blacklist TSSTcorp... */
4063 { "TSSTcorp CDDVDW SH-S202H", "SB00", ATA_HORKAGE_IVB, },
4064 { "TSSTcorp CDDVDW SH-S202H", "SB01", ATA_HORKAGE_IVB, },
4065 { "TSSTcorp CDDVDW SH-S202J", "SB00", ATA_HORKAGE_IVB, },
4066 { "TSSTcorp CDDVDW SH-S202J", "SB01", ATA_HORKAGE_IVB, },
4067 { "TSSTcorp CDDVDW SH-S202N", "SB00", ATA_HORKAGE_IVB, },
4068 { "TSSTcorp CDDVDW SH-S202N", "SB01", ATA_HORKAGE_IVB, },
4069
4070 /* Devices that do not need bridging limits applied */
4071 { "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK, },
4072
4073 /* End Marker */
4074 { }
4075 };
4076
4077 static int strn_pattern_cmp(const char *patt, const char *name, int wildchar)
4078 {
4079 const char *p;
4080 int len;
4081
4082 /*
4083 * check for trailing wildcard: *\0
4084 */
4085 p = strchr(patt, wildchar);
4086 if (p && ((*(p + 1)) == 0))
4087 len = p - patt;
4088 else {
4089 len = strlen(name);
4090 if (!len) {
4091 if (!*patt)
4092 return 0;
4093 return -1;
4094 }
4095 }
4096
4097 return strncmp(patt, name, len);
4098 }
4099
4100 static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
4101 {
4102 unsigned char model_num[ATA_ID_PROD_LEN + 1];
4103 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
4104 const struct ata_blacklist_entry *ad = ata_device_blacklist;
4105
4106 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4107 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
4108
4109 while (ad->model_num) {
4110 if (!strn_pattern_cmp(ad->model_num, model_num, '*')) {
4111 if (ad->model_rev == NULL)
4112 return ad->horkage;
4113 if (!strn_pattern_cmp(ad->model_rev, model_rev, '*'))
4114 return ad->horkage;
4115 }
4116 ad++;
4117 }
4118 return 0;
4119 }
4120
4121 static int ata_dma_blacklisted(const struct ata_device *dev)
4122 {
4123 /* We don't support polling DMA.
4124 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
4125 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4126 */
4127 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
4128 (dev->flags & ATA_DFLAG_CDB_INTR))
4129 return 1;
4130 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
4131 }
4132
4133 /**
4134 * ata_is_40wire - check drive side detection
4135 * @dev: device
4136 *
4137 * Perform drive side detection decoding, allowing for device vendors
4138 * who can't follow the documentation.
4139 */
4140
4141 static int ata_is_40wire(struct ata_device *dev)
4142 {
4143 if (dev->horkage & ATA_HORKAGE_IVB)
4144 return ata_drive_40wire_relaxed(dev->id);
4145 return ata_drive_40wire(dev->id);
4146 }
4147
4148 /**
4149 * cable_is_40wire - 40/80/SATA decider
4150 * @ap: port to consider
4151 *
4152 * This function encapsulates the policy for speed management
4153 * in one place. At the moment we don't cache the result but
4154 * there is a good case for setting ap->cbl to the result when
4155 * we are called with unknown cables (and figuring out if it
4156 * impacts hotplug at all).
4157 *
4158 * Return 1 if the cable appears to be 40 wire.
4159 */
4160
4161 static int cable_is_40wire(struct ata_port *ap)
4162 {
4163 struct ata_link *link;
4164 struct ata_device *dev;
4165
4166 /* If the controller thinks we are 40 wire, we are. */
4167 if (ap->cbl == ATA_CBL_PATA40)
4168 return 1;
4169
4170 /* If the controller thinks we are 80 wire, we are. */
4171 if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
4172 return 0;
4173
4174 /* If the system is known to be 40 wire short cable (eg
4175 * laptop), then we allow 80 wire modes even if the drive
4176 * isn't sure.
4177 */
4178 if (ap->cbl == ATA_CBL_PATA40_SHORT)
4179 return 0;
4180
4181 /* If the controller doesn't know, we scan.
4182 *
4183 * Note: We look for all 40 wire detects at this point. Any
4184 * 80 wire detect is taken to be 80 wire cable because
4185 * - in many setups only the one drive (slave if present) will
4186 * give a valid detect
4187 * - if you have a non detect capable drive you don't want it
4188 * to colour the choice
4189 */
4190 ata_port_for_each_link(link, ap) {
4191 ata_link_for_each_dev(dev, link) {
4192 if (ata_dev_enabled(dev) && !ata_is_40wire(dev))
4193 return 0;
4194 }
4195 }
4196 return 1;
4197 }
4198
4199 /**
4200 * ata_dev_xfermask - Compute supported xfermask of the given device
4201 * @dev: Device to compute xfermask for
4202 *
4203 * Compute supported xfermask of @dev and store it in
4204 * dev->*_mask. This function is responsible for applying all
4205 * known limits including host controller limits, device
4206 * blacklist, etc...
4207 *
4208 * LOCKING:
4209 * None.
4210 */
4211 static void ata_dev_xfermask(struct ata_device *dev)
4212 {
4213 struct ata_link *link = dev->link;
4214 struct ata_port *ap = link->ap;
4215 struct ata_host *host = ap->host;
4216 unsigned long xfer_mask;
4217
4218 /* controller modes available */
4219 xfer_mask = ata_pack_xfermask(ap->pio_mask,
4220 ap->mwdma_mask, ap->udma_mask);
4221
4222 /* drive modes available */
4223 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4224 dev->mwdma_mask, dev->udma_mask);
4225 xfer_mask &= ata_id_xfermask(dev->id);
4226
4227 /*
4228 * CFA Advanced TrueIDE timings are not allowed on a shared
4229 * cable
4230 */
4231 if (ata_dev_pair(dev)) {
4232 /* No PIO5 or PIO6 */
4233 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4234 /* No MWDMA3 or MWDMA 4 */
4235 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4236 }
4237
4238 if (ata_dma_blacklisted(dev)) {
4239 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4240 ata_dev_printk(dev, KERN_WARNING,
4241 "device is on DMA blacklist, disabling DMA\n");
4242 }
4243
4244 if ((host->flags & ATA_HOST_SIMPLEX) &&
4245 host->simplex_claimed && host->simplex_claimed != ap) {
4246 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4247 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
4248 "other device, disabling DMA\n");
4249 }
4250
4251 if (ap->flags & ATA_FLAG_NO_IORDY)
4252 xfer_mask &= ata_pio_mask_no_iordy(dev);
4253
4254 if (ap->ops->mode_filter)
4255 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
4256
4257 /* Apply cable rule here. Don't apply it early because when
4258 * we handle hot plug the cable type can itself change.
4259 * Check this last so that we know if the transfer rate was
4260 * solely limited by the cable.
4261 * Unknown or 80 wire cables reported host side are checked
4262 * drive side as well. Cases where we know a 40wire cable
4263 * is used safely for 80 are not checked here.
4264 */
4265 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4266 /* UDMA/44 or higher would be available */
4267 if (cable_is_40wire(ap)) {
4268 ata_dev_printk(dev, KERN_WARNING,
4269 "limited to UDMA/33 due to 40-wire cable\n");
4270 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4271 }
4272
4273 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4274 &dev->mwdma_mask, &dev->udma_mask);
4275 }
4276
4277 /**
4278 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
4279 * @dev: Device to which command will be sent
4280 *
4281 * Issue SET FEATURES - XFER MODE command to device @dev
4282 * on port @ap.
4283 *
4284 * LOCKING:
4285 * PCI/etc. bus probe sem.
4286 *
4287 * RETURNS:
4288 * 0 on success, AC_ERR_* mask otherwise.
4289 */
4290
4291 static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4292 {
4293 struct ata_taskfile tf;
4294 unsigned int err_mask;
4295
4296 /* set up set-features taskfile */
4297 DPRINTK("set features - xfer mode\n");
4298
4299 /* Some controllers and ATAPI devices show flaky interrupt
4300 * behavior after setting xfer mode. Use polling instead.
4301 */
4302 ata_tf_init(dev, &tf);
4303 tf.command = ATA_CMD_SET_FEATURES;
4304 tf.feature = SETFEATURES_XFER;
4305 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
4306 tf.protocol = ATA_PROT_NODATA;
4307 /* If we are using IORDY we must send the mode setting command */
4308 if (ata_pio_need_iordy(dev))
4309 tf.nsect = dev->xfer_mode;
4310 /* If the device has IORDY and the controller does not - turn it off */
4311 else if (ata_id_has_iordy(dev->id))
4312 tf.nsect = 0x01;
4313 else /* In the ancient relic department - skip all of this */
4314 return 0;
4315
4316 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4317
4318 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4319 return err_mask;
4320 }
4321 /**
4322 * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
4323 * @dev: Device to which command will be sent
4324 * @enable: Whether to enable or disable the feature
4325 * @feature: The sector count represents the feature to set
4326 *
4327 * Issue SET FEATURES - SATA FEATURES command to device @dev
4328 * on port @ap with sector count
4329 *
4330 * LOCKING:
4331 * PCI/etc. bus probe sem.
4332 *
4333 * RETURNS:
4334 * 0 on success, AC_ERR_* mask otherwise.
4335 */
4336 static unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable,
4337 u8 feature)
4338 {
4339 struct ata_taskfile tf;
4340 unsigned int err_mask;
4341
4342 /* set up set-features taskfile */
4343 DPRINTK("set features - SATA features\n");
4344
4345 ata_tf_init(dev, &tf);
4346 tf.command = ATA_CMD_SET_FEATURES;
4347 tf.feature = enable;
4348 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4349 tf.protocol = ATA_PROT_NODATA;
4350 tf.nsect = feature;
4351
4352 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4353
4354 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4355 return err_mask;
4356 }
4357
4358 /**
4359 * ata_dev_init_params - Issue INIT DEV PARAMS command
4360 * @dev: Device to which command will be sent
4361 * @heads: Number of heads (taskfile parameter)
4362 * @sectors: Number of sectors (taskfile parameter)
4363 *
4364 * LOCKING:
4365 * Kernel thread context (may sleep)
4366 *
4367 * RETURNS:
4368 * 0 on success, AC_ERR_* mask otherwise.
4369 */
4370 static unsigned int ata_dev_init_params(struct ata_device *dev,
4371 u16 heads, u16 sectors)
4372 {
4373 struct ata_taskfile tf;
4374 unsigned int err_mask;
4375
4376 /* Number of sectors per track 1-255. Number of heads 1-16 */
4377 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
4378 return AC_ERR_INVALID;
4379
4380 /* set up init dev params taskfile */
4381 DPRINTK("init dev params \n");
4382
4383 ata_tf_init(dev, &tf);
4384 tf.command = ATA_CMD_INIT_DEV_PARAMS;
4385 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4386 tf.protocol = ATA_PROT_NODATA;
4387 tf.nsect = sectors;
4388 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
4389
4390 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4391 /* A clean abort indicates an original or just out of spec drive
4392 and we should continue as we issue the setup based on the
4393 drive reported working geometry */
4394 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4395 err_mask = 0;
4396
4397 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4398 return err_mask;
4399 }
4400
4401 /**
4402 * ata_sg_clean - Unmap DMA memory associated with command
4403 * @qc: Command containing DMA memory to be released
4404 *
4405 * Unmap all mapped DMA memory associated with this command.
4406 *
4407 * LOCKING:
4408 * spin_lock_irqsave(host lock)
4409 */
4410 void ata_sg_clean(struct ata_queued_cmd *qc)
4411 {
4412 struct ata_port *ap = qc->ap;
4413 struct scatterlist *sg = qc->sg;
4414 int dir = qc->dma_dir;
4415
4416 WARN_ON(sg == NULL);
4417
4418 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
4419
4420 if (qc->n_elem)
4421 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
4422
4423 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4424 qc->sg = NULL;
4425 }
4426
4427 /**
4428 * atapi_check_dma - Check whether ATAPI DMA can be supported
4429 * @qc: Metadata associated with taskfile to check
4430 *
4431 * Allow low-level driver to filter ATA PACKET commands, returning
4432 * a status indicating whether or not it is OK to use DMA for the
4433 * supplied PACKET command.
4434 *
4435 * LOCKING:
4436 * spin_lock_irqsave(host lock)
4437 *
4438 * RETURNS: 0 when ATAPI DMA can be used
4439 * nonzero otherwise
4440 */
4441 int atapi_check_dma(struct ata_queued_cmd *qc)
4442 {
4443 struct ata_port *ap = qc->ap;
4444
4445 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
4446 * few ATAPI devices choke on such DMA requests.
4447 */
4448 if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) &&
4449 unlikely(qc->nbytes & 15))
4450 return 1;
4451
4452 if (ap->ops->check_atapi_dma)
4453 return ap->ops->check_atapi_dma(qc);
4454
4455 return 0;
4456 }
4457
4458 /**
4459 * ata_std_qc_defer - Check whether a qc needs to be deferred
4460 * @qc: ATA command in question
4461 *
4462 * Non-NCQ commands cannot run with any other command, NCQ or
4463 * not. As upper layer only knows the queue depth, we are
4464 * responsible for maintaining exclusion. This function checks
4465 * whether a new command @qc can be issued.
4466 *
4467 * LOCKING:
4468 * spin_lock_irqsave(host lock)
4469 *
4470 * RETURNS:
4471 * ATA_DEFER_* if deferring is needed, 0 otherwise.
4472 */
4473 int ata_std_qc_defer(struct ata_queued_cmd *qc)
4474 {
4475 struct ata_link *link = qc->dev->link;
4476
4477 if (qc->tf.protocol == ATA_PROT_NCQ) {
4478 if (!ata_tag_valid(link->active_tag))
4479 return 0;
4480 } else {
4481 if (!ata_tag_valid(link->active_tag) && !link->sactive)
4482 return 0;
4483 }
4484
4485 return ATA_DEFER_LINK;
4486 }
4487
4488 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4489
4490 /**
4491 * ata_sg_init - Associate command with scatter-gather table.
4492 * @qc: Command to be associated
4493 * @sg: Scatter-gather table.
4494 * @n_elem: Number of elements in s/g table.
4495 *
4496 * Initialize the data-related elements of queued_cmd @qc
4497 * to point to a scatter-gather table @sg, containing @n_elem
4498 * elements.
4499 *
4500 * LOCKING:
4501 * spin_lock_irqsave(host lock)
4502 */
4503 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4504 unsigned int n_elem)
4505 {
4506 qc->sg = sg;
4507 qc->n_elem = n_elem;
4508 qc->cursg = qc->sg;
4509 }
4510
4511 /**
4512 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4513 * @qc: Command with scatter-gather table to be mapped.
4514 *
4515 * DMA-map the scatter-gather table associated with queued_cmd @qc.
4516 *
4517 * LOCKING:
4518 * spin_lock_irqsave(host lock)
4519 *
4520 * RETURNS:
4521 * Zero on success, negative on error.
4522 *
4523 */
4524 static int ata_sg_setup(struct ata_queued_cmd *qc)
4525 {
4526 struct ata_port *ap = qc->ap;
4527 unsigned int n_elem;
4528
4529 VPRINTK("ENTER, ata%u\n", ap->print_id);
4530
4531 n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
4532 if (n_elem < 1)
4533 return -1;
4534
4535 DPRINTK("%d sg elements mapped\n", n_elem);
4536
4537 qc->n_elem = n_elem;
4538 qc->flags |= ATA_QCFLAG_DMAMAP;
4539
4540 return 0;
4541 }
4542
4543 /**
4544 * swap_buf_le16 - swap halves of 16-bit words in place
4545 * @buf: Buffer to swap
4546 * @buf_words: Number of 16-bit words in buffer.
4547 *
4548 * Swap halves of 16-bit words if needed to convert from
4549 * little-endian byte order to native cpu byte order, or
4550 * vice-versa.
4551 *
4552 * LOCKING:
4553 * Inherited from caller.
4554 */
4555 void swap_buf_le16(u16 *buf, unsigned int buf_words)
4556 {
4557 #ifdef __BIG_ENDIAN
4558 unsigned int i;
4559
4560 for (i = 0; i < buf_words; i++)
4561 buf[i] = le16_to_cpu(buf[i]);
4562 #endif /* __BIG_ENDIAN */
4563 }
4564
4565 /**
4566 * ata_qc_new_init - Request an available ATA command, and initialize it
4567 * @dev: Device from whom we request an available command structure
4568 * @tag: command tag
4569 *
4570 * LOCKING:
4571 * None.
4572 */
4573
4574 struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag)
4575 {
4576 struct ata_port *ap = dev->link->ap;
4577 struct ata_queued_cmd *qc;
4578
4579 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
4580 return NULL;
4581
4582 qc = __ata_qc_from_tag(ap, tag);
4583 if (qc) {
4584 qc->scsicmd = NULL;
4585 qc->ap = ap;
4586 qc->dev = dev;
4587 qc->tag = tag;
4588
4589 ata_qc_reinit(qc);
4590 }
4591
4592 return qc;
4593 }
4594
4595 void __ata_qc_complete(struct ata_queued_cmd *qc)
4596 {
4597 struct ata_port *ap = qc->ap;
4598 struct ata_link *link = qc->dev->link;
4599
4600 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4601 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
4602
4603 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4604 ata_sg_clean(qc);
4605
4606 /* command should be marked inactive atomically with qc completion */
4607 if (qc->tf.protocol == ATA_PROT_NCQ) {
4608 link->sactive &= ~(1 << qc->tag);
4609 if (!link->sactive)
4610 ap->nr_active_links--;
4611 } else {
4612 link->active_tag = ATA_TAG_POISON;
4613 ap->nr_active_links--;
4614 }
4615
4616 /* clear exclusive status */
4617 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
4618 ap->excl_link == link))
4619 ap->excl_link = NULL;
4620
4621 /* atapi: mark qc as inactive to prevent the interrupt handler
4622 * from completing the command twice later, before the error handler
4623 * is called. (when rc != 0 and atapi request sense is needed)
4624 */
4625 qc->flags &= ~ATA_QCFLAG_ACTIVE;
4626 ap->qc_active &= ~(1 << qc->tag);
4627
4628 /* call completion callback */
4629 qc->complete_fn(qc);
4630 }
4631
4632 static void fill_result_tf(struct ata_queued_cmd *qc)
4633 {
4634 struct ata_port *ap = qc->ap;
4635
4636 qc->result_tf.flags = qc->tf.flags;
4637 ap->ops->qc_fill_rtf(qc);
4638 }
4639
4640 static void ata_verify_xfer(struct ata_queued_cmd *qc)
4641 {
4642 struct ata_device *dev = qc->dev;
4643
4644 if (ata_tag_internal(qc->tag))
4645 return;
4646
4647 if (ata_is_nodata(qc->tf.protocol))
4648 return;
4649
4650 if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
4651 return;
4652
4653 dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
4654 }
4655
4656 /**
4657 * ata_qc_complete - Complete an active ATA command
4658 * @qc: Command to complete
4659 *
4660 * Indicate to the mid and upper layers that an ATA
4661 * command has completed, with either an ok or not-ok status.
4662 *
4663 * LOCKING:
4664 * spin_lock_irqsave(host lock)
4665 */
4666 void ata_qc_complete(struct ata_queued_cmd *qc)
4667 {
4668 struct ata_port *ap = qc->ap;
4669
4670 /* XXX: New EH and old EH use different mechanisms to
4671 * synchronize EH with regular execution path.
4672 *
4673 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4674 * Normal execution path is responsible for not accessing a
4675 * failed qc. libata core enforces the rule by returning NULL
4676 * from ata_qc_from_tag() for failed qcs.
4677 *
4678 * Old EH depends on ata_qc_complete() nullifying completion
4679 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
4680 * not synchronize with interrupt handler. Only PIO task is
4681 * taken care of.
4682 */
4683 if (ap->ops->error_handler) {
4684 struct ata_device *dev = qc->dev;
4685 struct ata_eh_info *ehi = &dev->link->eh_info;
4686
4687 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
4688
4689 if (unlikely(qc->err_mask))
4690 qc->flags |= ATA_QCFLAG_FAILED;
4691
4692 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4693 if (!ata_tag_internal(qc->tag)) {
4694 /* always fill result TF for failed qc */
4695 fill_result_tf(qc);
4696 ata_qc_schedule_eh(qc);
4697 return;
4698 }
4699 }
4700
4701 /* read result TF if requested */
4702 if (qc->flags & ATA_QCFLAG_RESULT_TF)
4703 fill_result_tf(qc);
4704
4705 /* Some commands need post-processing after successful
4706 * completion.
4707 */
4708 switch (qc->tf.command) {
4709 case ATA_CMD_SET_FEATURES:
4710 if (qc->tf.feature != SETFEATURES_WC_ON &&
4711 qc->tf.feature != SETFEATURES_WC_OFF)
4712 break;
4713 /* fall through */
4714 case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
4715 case ATA_CMD_SET_MULTI: /* multi_count changed */
4716 /* revalidate device */
4717 ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
4718 ata_port_schedule_eh(ap);
4719 break;
4720
4721 case ATA_CMD_SLEEP:
4722 dev->flags |= ATA_DFLAG_SLEEPING;
4723 break;
4724 }
4725
4726 if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
4727 ata_verify_xfer(qc);
4728
4729 __ata_qc_complete(qc);
4730 } else {
4731 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4732 return;
4733
4734 /* read result TF if failed or requested */
4735 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
4736 fill_result_tf(qc);
4737
4738 __ata_qc_complete(qc);
4739 }
4740 }
4741
4742 /**
4743 * ata_qc_complete_multiple - Complete multiple qcs successfully
4744 * @ap: port in question
4745 * @qc_active: new qc_active mask
4746 *
4747 * Complete in-flight commands. This functions is meant to be
4748 * called from low-level driver's interrupt routine to complete
4749 * requests normally. ap->qc_active and @qc_active is compared
4750 * and commands are completed accordingly.
4751 *
4752 * LOCKING:
4753 * spin_lock_irqsave(host lock)
4754 *
4755 * RETURNS:
4756 * Number of completed commands on success, -errno otherwise.
4757 */
4758 int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active)
4759 {
4760 int nr_done = 0;
4761 u32 done_mask;
4762 int i;
4763
4764 done_mask = ap->qc_active ^ qc_active;
4765
4766 if (unlikely(done_mask & qc_active)) {
4767 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
4768 "(%08x->%08x)\n", ap->qc_active, qc_active);
4769 return -EINVAL;
4770 }
4771
4772 for (i = 0; i < ATA_MAX_QUEUE; i++) {
4773 struct ata_queued_cmd *qc;
4774
4775 if (!(done_mask & (1 << i)))
4776 continue;
4777
4778 if ((qc = ata_qc_from_tag(ap, i))) {
4779 ata_qc_complete(qc);
4780 nr_done++;
4781 }
4782 }
4783
4784 return nr_done;
4785 }
4786
4787 /**
4788 * ata_qc_issue - issue taskfile to device
4789 * @qc: command to issue to device
4790 *
4791 * Prepare an ATA command to submission to device.
4792 * This includes mapping the data into a DMA-able
4793 * area, filling in the S/G table, and finally
4794 * writing the taskfile to hardware, starting the command.
4795 *
4796 * LOCKING:
4797 * spin_lock_irqsave(host lock)
4798 */
4799 void ata_qc_issue(struct ata_queued_cmd *qc)
4800 {
4801 struct ata_port *ap = qc->ap;
4802 struct ata_link *link = qc->dev->link;
4803 u8 prot = qc->tf.protocol;
4804
4805 /* Make sure only one non-NCQ command is outstanding. The
4806 * check is skipped for old EH because it reuses active qc to
4807 * request ATAPI sense.
4808 */
4809 WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag));
4810
4811 if (ata_is_ncq(prot)) {
4812 WARN_ON(link->sactive & (1 << qc->tag));
4813
4814 if (!link->sactive)
4815 ap->nr_active_links++;
4816 link->sactive |= 1 << qc->tag;
4817 } else {
4818 WARN_ON(link->sactive);
4819
4820 ap->nr_active_links++;
4821 link->active_tag = qc->tag;
4822 }
4823
4824 qc->flags |= ATA_QCFLAG_ACTIVE;
4825 ap->qc_active |= 1 << qc->tag;
4826
4827 /* We guarantee to LLDs that they will have at least one
4828 * non-zero sg if the command is a data command.
4829 */
4830 BUG_ON(ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes));
4831
4832 if (ata_is_dma(prot) || (ata_is_pio(prot) &&
4833 (ap->flags & ATA_FLAG_PIO_DMA)))
4834 if (ata_sg_setup(qc))
4835 goto sg_err;
4836
4837 /* if device is sleeping, schedule reset and abort the link */
4838 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
4839 link->eh_info.action |= ATA_EH_RESET;
4840 ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
4841 ata_link_abort(link);
4842 return;
4843 }
4844
4845 ap->ops->qc_prep(qc);
4846
4847 qc->err_mask |= ap->ops->qc_issue(qc);
4848 if (unlikely(qc->err_mask))
4849 goto err;
4850 return;
4851
4852 sg_err:
4853 qc->err_mask |= AC_ERR_SYSTEM;
4854 err:
4855 ata_qc_complete(qc);
4856 }
4857
4858 /**
4859 * sata_scr_valid - test whether SCRs are accessible
4860 * @link: ATA link to test SCR accessibility for
4861 *
4862 * Test whether SCRs are accessible for @link.
4863 *
4864 * LOCKING:
4865 * None.
4866 *
4867 * RETURNS:
4868 * 1 if SCRs are accessible, 0 otherwise.
4869 */
4870 int sata_scr_valid(struct ata_link *link)
4871 {
4872 struct ata_port *ap = link->ap;
4873
4874 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
4875 }
4876
4877 /**
4878 * sata_scr_read - read SCR register of the specified port
4879 * @link: ATA link to read SCR for
4880 * @reg: SCR to read
4881 * @val: Place to store read value
4882 *
4883 * Read SCR register @reg of @link into *@val. This function is
4884 * guaranteed to succeed if @link is ap->link, the cable type of
4885 * the port is SATA and the port implements ->scr_read.
4886 *
4887 * LOCKING:
4888 * None if @link is ap->link. Kernel thread context otherwise.
4889 *
4890 * RETURNS:
4891 * 0 on success, negative errno on failure.
4892 */
4893 int sata_scr_read(struct ata_link *link, int reg, u32 *val)
4894 {
4895 if (ata_is_host_link(link)) {
4896 if (sata_scr_valid(link))
4897 return link->ap->ops->scr_read(link, reg, val);
4898 return -EOPNOTSUPP;
4899 }
4900
4901 return sata_pmp_scr_read(link, reg, val);
4902 }
4903
4904 /**
4905 * sata_scr_write - write SCR register of the specified port
4906 * @link: ATA link to write SCR for
4907 * @reg: SCR to write
4908 * @val: value to write
4909 *
4910 * Write @val to SCR register @reg of @link. This function is
4911 * guaranteed to succeed if @link is ap->link, the cable type of
4912 * the port is SATA and the port implements ->scr_read.
4913 *
4914 * LOCKING:
4915 * None if @link is ap->link. Kernel thread context otherwise.
4916 *
4917 * RETURNS:
4918 * 0 on success, negative errno on failure.
4919 */
4920 int sata_scr_write(struct ata_link *link, int reg, u32 val)
4921 {
4922 if (ata_is_host_link(link)) {
4923 if (sata_scr_valid(link))
4924 return link->ap->ops->scr_write(link, reg, val);
4925 return -EOPNOTSUPP;
4926 }
4927
4928 return sata_pmp_scr_write(link, reg, val);
4929 }
4930
4931 /**
4932 * sata_scr_write_flush - write SCR register of the specified port and flush
4933 * @link: ATA link to write SCR for
4934 * @reg: SCR to write
4935 * @val: value to write
4936 *
4937 * This function is identical to sata_scr_write() except that this
4938 * function performs flush after writing to the register.
4939 *
4940 * LOCKING:
4941 * None if @link is ap->link. Kernel thread context otherwise.
4942 *
4943 * RETURNS:
4944 * 0 on success, negative errno on failure.
4945 */
4946 int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
4947 {
4948 if (ata_is_host_link(link)) {
4949 int rc;
4950
4951 if (sata_scr_valid(link)) {
4952 rc = link->ap->ops->scr_write(link, reg, val);
4953 if (rc == 0)
4954 rc = link->ap->ops->scr_read(link, reg, &val);
4955 return rc;
4956 }
4957 return -EOPNOTSUPP;
4958 }
4959
4960 return sata_pmp_scr_write(link, reg, val);
4961 }
4962
4963 /**
4964 * ata_phys_link_online - test whether the given link is online
4965 * @link: ATA link to test
4966 *
4967 * Test whether @link is online. Note that this function returns
4968 * 0 if online status of @link cannot be obtained, so
4969 * ata_link_online(link) != !ata_link_offline(link).
4970 *
4971 * LOCKING:
4972 * None.
4973 *
4974 * RETURNS:
4975 * True if the port online status is available and online.
4976 */
4977 bool ata_phys_link_online(struct ata_link *link)
4978 {
4979 u32 sstatus;
4980
4981 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
4982 (sstatus & 0xf) == 0x3)
4983 return true;
4984 return false;
4985 }
4986
4987 /**
4988 * ata_phys_link_offline - test whether the given link is offline
4989 * @link: ATA link to test
4990 *
4991 * Test whether @link is offline. Note that this function
4992 * returns 0 if offline status of @link cannot be obtained, so
4993 * ata_link_online(link) != !ata_link_offline(link).
4994 *
4995 * LOCKING:
4996 * None.
4997 *
4998 * RETURNS:
4999 * True if the port offline status is available and offline.
5000 */
5001 bool ata_phys_link_offline(struct ata_link *link)
5002 {
5003 u32 sstatus;
5004
5005 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5006 (sstatus & 0xf) != 0x3)
5007 return true;
5008 return false;
5009 }
5010
5011 /**
5012 * ata_link_online - test whether the given link is online
5013 * @link: ATA link to test
5014 *
5015 * Test whether @link is online. This is identical to
5016 * ata_phys_link_online() when there's no slave link. When
5017 * there's a slave link, this function should only be called on
5018 * the master link and will return true if any of M/S links is
5019 * online.
5020 *
5021 * LOCKING:
5022 * None.
5023 *
5024 * RETURNS:
5025 * True if the port online status is available and online.
5026 */
5027 bool ata_link_online(struct ata_link *link)
5028 {
5029 struct ata_link *slave = link->ap->slave_link;
5030
5031 WARN_ON(link == slave); /* shouldn't be called on slave link */
5032
5033 return ata_phys_link_online(link) ||
5034 (slave && ata_phys_link_online(slave));
5035 }
5036
5037 /**
5038 * ata_link_offline - test whether the given link is offline
5039 * @link: ATA link to test
5040 *
5041 * Test whether @link is offline. This is identical to
5042 * ata_phys_link_offline() when there's no slave link. When
5043 * there's a slave link, this function should only be called on
5044 * the master link and will return true if both M/S links are
5045 * offline.
5046 *
5047 * LOCKING:
5048 * None.
5049 *
5050 * RETURNS:
5051 * True if the port offline status is available and offline.
5052 */
5053 bool ata_link_offline(struct ata_link *link)
5054 {
5055 struct ata_link *slave = link->ap->slave_link;
5056
5057 WARN_ON(link == slave); /* shouldn't be called on slave link */
5058
5059 return ata_phys_link_offline(link) &&
5060 (!slave || ata_phys_link_offline(slave));
5061 }
5062
5063 #ifdef CONFIG_PM
5064 static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
5065 unsigned int action, unsigned int ehi_flags,
5066 int wait)
5067 {
5068 unsigned long flags;
5069 int i, rc;
5070
5071 for (i = 0; i < host->n_ports; i++) {
5072 struct ata_port *ap = host->ports[i];
5073 struct ata_link *link;
5074
5075 /* Previous resume operation might still be in
5076 * progress. Wait for PM_PENDING to clear.
5077 */
5078 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5079 ata_port_wait_eh(ap);
5080 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5081 }
5082
5083 /* request PM ops to EH */
5084 spin_lock_irqsave(ap->lock, flags);
5085
5086 ap->pm_mesg = mesg;
5087 if (wait) {
5088 rc = 0;
5089 ap->pm_result = &rc;
5090 }
5091
5092 ap->pflags |= ATA_PFLAG_PM_PENDING;
5093 __ata_port_for_each_link(link, ap) {
5094 link->eh_info.action |= action;
5095 link->eh_info.flags |= ehi_flags;
5096 }
5097
5098 ata_port_schedule_eh(ap);
5099
5100 spin_unlock_irqrestore(ap->lock, flags);
5101
5102 /* wait and check result */
5103 if (wait) {
5104 ata_port_wait_eh(ap);
5105 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5106 if (rc)
5107 return rc;
5108 }
5109 }
5110
5111 return 0;
5112 }
5113
5114 /**
5115 * ata_host_suspend - suspend host
5116 * @host: host to suspend
5117 * @mesg: PM message
5118 *
5119 * Suspend @host. Actual operation is performed by EH. This
5120 * function requests EH to perform PM operations and waits for EH
5121 * to finish.
5122 *
5123 * LOCKING:
5124 * Kernel thread context (may sleep).
5125 *
5126 * RETURNS:
5127 * 0 on success, -errno on failure.
5128 */
5129 int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5130 {
5131 int rc;
5132
5133 /*
5134 * disable link pm on all ports before requesting
5135 * any pm activity
5136 */
5137 ata_lpm_enable(host);
5138
5139 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
5140 if (rc == 0)
5141 host->dev->power.power_state = mesg;
5142 return rc;
5143 }
5144
5145 /**
5146 * ata_host_resume - resume host
5147 * @host: host to resume
5148 *
5149 * Resume @host. Actual operation is performed by EH. This
5150 * function requests EH to perform PM operations and returns.
5151 * Note that all resume operations are performed parallely.
5152 *
5153 * LOCKING:
5154 * Kernel thread context (may sleep).
5155 */
5156 void ata_host_resume(struct ata_host *host)
5157 {
5158 ata_host_request_pm(host, PMSG_ON, ATA_EH_RESET,
5159 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
5160 host->dev->power.power_state = PMSG_ON;
5161
5162 /* reenable link pm */
5163 ata_lpm_disable(host);
5164 }
5165 #endif
5166
5167 /**
5168 * ata_port_start - Set port up for dma.
5169 * @ap: Port to initialize
5170 *
5171 * Called just after data structures for each port are
5172 * initialized. Allocates space for PRD table.
5173 *
5174 * May be used as the port_start() entry in ata_port_operations.
5175 *
5176 * LOCKING:
5177 * Inherited from caller.
5178 */
5179 int ata_port_start(struct ata_port *ap)
5180 {
5181 struct device *dev = ap->dev;
5182
5183 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
5184 GFP_KERNEL);
5185 if (!ap->prd)
5186 return -ENOMEM;
5187
5188 return 0;
5189 }
5190
5191 /**
5192 * ata_dev_init - Initialize an ata_device structure
5193 * @dev: Device structure to initialize
5194 *
5195 * Initialize @dev in preparation for probing.
5196 *
5197 * LOCKING:
5198 * Inherited from caller.
5199 */
5200 void ata_dev_init(struct ata_device *dev)
5201 {
5202 struct ata_link *link = ata_dev_phys_link(dev);
5203 struct ata_port *ap = link->ap;
5204 unsigned long flags;
5205
5206 /* SATA spd limit is bound to the attached device, reset together */
5207 link->sata_spd_limit = link->hw_sata_spd_limit;
5208 link->sata_spd = 0;
5209
5210 /* High bits of dev->flags are used to record warm plug
5211 * requests which occur asynchronously. Synchronize using
5212 * host lock.
5213 */
5214 spin_lock_irqsave(ap->lock, flags);
5215 dev->flags &= ~ATA_DFLAG_INIT_MASK;
5216 dev->horkage = 0;
5217 spin_unlock_irqrestore(ap->lock, flags);
5218
5219 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
5220 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
5221 dev->pio_mask = UINT_MAX;
5222 dev->mwdma_mask = UINT_MAX;
5223 dev->udma_mask = UINT_MAX;
5224 }
5225
5226 /**
5227 * ata_link_init - Initialize an ata_link structure
5228 * @ap: ATA port link is attached to
5229 * @link: Link structure to initialize
5230 * @pmp: Port multiplier port number
5231 *
5232 * Initialize @link.
5233 *
5234 * LOCKING:
5235 * Kernel thread context (may sleep)
5236 */
5237 void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
5238 {
5239 int i;
5240
5241 /* clear everything except for devices */
5242 memset(link, 0, offsetof(struct ata_link, device[0]));
5243
5244 link->ap = ap;
5245 link->pmp = pmp;
5246 link->active_tag = ATA_TAG_POISON;
5247 link->hw_sata_spd_limit = UINT_MAX;
5248
5249 /* can't use iterator, ap isn't initialized yet */
5250 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5251 struct ata_device *dev = &link->device[i];
5252
5253 dev->link = link;
5254 dev->devno = dev - link->device;
5255 ata_dev_init(dev);
5256 }
5257 }
5258
5259 /**
5260 * sata_link_init_spd - Initialize link->sata_spd_limit
5261 * @link: Link to configure sata_spd_limit for
5262 *
5263 * Initialize @link->[hw_]sata_spd_limit to the currently
5264 * configured value.
5265 *
5266 * LOCKING:
5267 * Kernel thread context (may sleep).
5268 *
5269 * RETURNS:
5270 * 0 on success, -errno on failure.
5271 */
5272 int sata_link_init_spd(struct ata_link *link)
5273 {
5274 u8 spd;
5275 int rc;
5276
5277 rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol);
5278 if (rc)
5279 return rc;
5280
5281 spd = (link->saved_scontrol >> 4) & 0xf;
5282 if (spd)
5283 link->hw_sata_spd_limit &= (1 << spd) - 1;
5284
5285 ata_force_link_limits(link);
5286
5287 link->sata_spd_limit = link->hw_sata_spd_limit;
5288
5289 return 0;
5290 }
5291
5292 /**
5293 * ata_port_alloc - allocate and initialize basic ATA port resources
5294 * @host: ATA host this allocated port belongs to
5295 *
5296 * Allocate and initialize basic ATA port resources.
5297 *
5298 * RETURNS:
5299 * Allocate ATA port on success, NULL on failure.
5300 *
5301 * LOCKING:
5302 * Inherited from calling layer (may sleep).
5303 */
5304 struct ata_port *ata_port_alloc(struct ata_host *host)
5305 {
5306 struct ata_port *ap;
5307
5308 DPRINTK("ENTER\n");
5309
5310 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
5311 if (!ap)
5312 return NULL;
5313
5314 ap->pflags |= ATA_PFLAG_INITIALIZING;
5315 ap->lock = &host->lock;
5316 ap->flags = ATA_FLAG_DISABLED;
5317 ap->print_id = -1;
5318 ap->ctl = ATA_DEVCTL_OBS;
5319 ap->host = host;
5320 ap->dev = host->dev;
5321 ap->last_ctl = 0xFF;
5322
5323 #if defined(ATA_VERBOSE_DEBUG)
5324 /* turn on all debugging levels */
5325 ap->msg_enable = 0x00FF;
5326 #elif defined(ATA_DEBUG)
5327 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
5328 #else
5329 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
5330 #endif
5331
5332 #ifdef CONFIG_ATA_SFF
5333 INIT_DELAYED_WORK(&ap->port_task, ata_pio_task);
5334 #else
5335 INIT_DELAYED_WORK(&ap->port_task, NULL);
5336 #endif
5337 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5338 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
5339 INIT_LIST_HEAD(&ap->eh_done_q);
5340 init_waitqueue_head(&ap->eh_wait_q);
5341 init_completion(&ap->park_req_pending);
5342 init_timer_deferrable(&ap->fastdrain_timer);
5343 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
5344 ap->fastdrain_timer.data = (unsigned long)ap;
5345
5346 ap->cbl = ATA_CBL_NONE;
5347
5348 ata_link_init(ap, &ap->link, 0);
5349
5350 #ifdef ATA_IRQ_TRAP
5351 ap->stats.unhandled_irq = 1;
5352 ap->stats.idle_irq = 1;
5353 #endif
5354 return ap;
5355 }
5356
5357 static void ata_host_release(struct device *gendev, void *res)
5358 {
5359 struct ata_host *host = dev_get_drvdata(gendev);
5360 int i;
5361
5362 for (i = 0; i < host->n_ports; i++) {
5363 struct ata_port *ap = host->ports[i];
5364
5365 if (!ap)
5366 continue;
5367
5368 if (ap->scsi_host)
5369 scsi_host_put(ap->scsi_host);
5370
5371 kfree(ap->pmp_link);
5372 kfree(ap->slave_link);
5373 kfree(ap);
5374 host->ports[i] = NULL;
5375 }
5376
5377 dev_set_drvdata(gendev, NULL);
5378 }
5379
5380 /**
5381 * ata_host_alloc - allocate and init basic ATA host resources
5382 * @dev: generic device this host is associated with
5383 * @max_ports: maximum number of ATA ports associated with this host
5384 *
5385 * Allocate and initialize basic ATA host resources. LLD calls
5386 * this function to allocate a host, initializes it fully and
5387 * attaches it using ata_host_register().
5388 *
5389 * @max_ports ports are allocated and host->n_ports is
5390 * initialized to @max_ports. The caller is allowed to decrease
5391 * host->n_ports before calling ata_host_register(). The unused
5392 * ports will be automatically freed on registration.
5393 *
5394 * RETURNS:
5395 * Allocate ATA host on success, NULL on failure.
5396 *
5397 * LOCKING:
5398 * Inherited from calling layer (may sleep).
5399 */
5400 struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
5401 {
5402 struct ata_host *host;
5403 size_t sz;
5404 int i;
5405
5406 DPRINTK("ENTER\n");
5407
5408 if (!devres_open_group(dev, NULL, GFP_KERNEL))
5409 return NULL;
5410
5411 /* alloc a container for our list of ATA ports (buses) */
5412 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
5413 /* alloc a container for our list of ATA ports (buses) */
5414 host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
5415 if (!host)
5416 goto err_out;
5417
5418 devres_add(dev, host);
5419 dev_set_drvdata(dev, host);
5420
5421 spin_lock_init(&host->lock);
5422 host->dev = dev;
5423 host->n_ports = max_ports;
5424
5425 /* allocate ports bound to this host */
5426 for (i = 0; i < max_ports; i++) {
5427 struct ata_port *ap;
5428
5429 ap = ata_port_alloc(host);
5430 if (!ap)
5431 goto err_out;
5432
5433 ap->port_no = i;
5434 host->ports[i] = ap;
5435 }
5436
5437 devres_remove_group(dev, NULL);
5438 return host;
5439
5440 err_out:
5441 devres_release_group(dev, NULL);
5442 return NULL;
5443 }
5444
5445 /**
5446 * ata_host_alloc_pinfo - alloc host and init with port_info array
5447 * @dev: generic device this host is associated with
5448 * @ppi: array of ATA port_info to initialize host with
5449 * @n_ports: number of ATA ports attached to this host
5450 *
5451 * Allocate ATA host and initialize with info from @ppi. If NULL
5452 * terminated, @ppi may contain fewer entries than @n_ports. The
5453 * last entry will be used for the remaining ports.
5454 *
5455 * RETURNS:
5456 * Allocate ATA host on success, NULL on failure.
5457 *
5458 * LOCKING:
5459 * Inherited from calling layer (may sleep).
5460 */
5461 struct ata_host *ata_host_alloc_pinfo(struct device *dev,
5462 const struct ata_port_info * const * ppi,
5463 int n_ports)
5464 {
5465 const struct ata_port_info *pi;
5466 struct ata_host *host;
5467 int i, j;
5468
5469 host = ata_host_alloc(dev, n_ports);
5470 if (!host)
5471 return NULL;
5472
5473 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
5474 struct ata_port *ap = host->ports[i];
5475
5476 if (ppi[j])
5477 pi = ppi[j++];
5478
5479 ap->pio_mask = pi->pio_mask;
5480 ap->mwdma_mask = pi->mwdma_mask;
5481 ap->udma_mask = pi->udma_mask;
5482 ap->flags |= pi->flags;
5483 ap->link.flags |= pi->link_flags;
5484 ap->ops = pi->port_ops;
5485
5486 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
5487 host->ops = pi->port_ops;
5488 }
5489
5490 return host;
5491 }
5492
5493 /**
5494 * ata_slave_link_init - initialize slave link
5495 * @ap: port to initialize slave link for
5496 *
5497 * Create and initialize slave link for @ap. This enables slave
5498 * link handling on the port.
5499 *
5500 * In libata, a port contains links and a link contains devices.
5501 * There is single host link but if a PMP is attached to it,
5502 * there can be multiple fan-out links. On SATA, there's usually
5503 * a single device connected to a link but PATA and SATA
5504 * controllers emulating TF based interface can have two - master
5505 * and slave.
5506 *
5507 * However, there are a few controllers which don't fit into this
5508 * abstraction too well - SATA controllers which emulate TF
5509 * interface with both master and slave devices but also have
5510 * separate SCR register sets for each device. These controllers
5511 * need separate links for physical link handling
5512 * (e.g. onlineness, link speed) but should be treated like a
5513 * traditional M/S controller for everything else (e.g. command
5514 * issue, softreset).
5515 *
5516 * slave_link is libata's way of handling this class of
5517 * controllers without impacting core layer too much. For
5518 * anything other than physical link handling, the default host
5519 * link is used for both master and slave. For physical link
5520 * handling, separate @ap->slave_link is used. All dirty details
5521 * are implemented inside libata core layer. From LLD's POV, the
5522 * only difference is that prereset, hardreset and postreset are
5523 * called once more for the slave link, so the reset sequence
5524 * looks like the following.
5525 *
5526 * prereset(M) -> prereset(S) -> hardreset(M) -> hardreset(S) ->
5527 * softreset(M) -> postreset(M) -> postreset(S)
5528 *
5529 * Note that softreset is called only for the master. Softreset
5530 * resets both M/S by definition, so SRST on master should handle
5531 * both (the standard method will work just fine).
5532 *
5533 * LOCKING:
5534 * Should be called before host is registered.
5535 *
5536 * RETURNS:
5537 * 0 on success, -errno on failure.
5538 */
5539 int ata_slave_link_init(struct ata_port *ap)
5540 {
5541 struct ata_link *link;
5542
5543 WARN_ON(ap->slave_link);
5544 WARN_ON(ap->flags & ATA_FLAG_PMP);
5545
5546 link = kzalloc(sizeof(*link), GFP_KERNEL);
5547 if (!link)
5548 return -ENOMEM;
5549
5550 ata_link_init(ap, link, 1);
5551 ap->slave_link = link;
5552 return 0;
5553 }
5554
5555 static void ata_host_stop(struct device *gendev, void *res)
5556 {
5557 struct ata_host *host = dev_get_drvdata(gendev);
5558 int i;
5559
5560 WARN_ON(!(host->flags & ATA_HOST_STARTED));
5561
5562 for (i = 0; i < host->n_ports; i++) {
5563 struct ata_port *ap = host->ports[i];
5564
5565 if (ap->ops->port_stop)
5566 ap->ops->port_stop(ap);
5567 }
5568
5569 if (host->ops->host_stop)
5570 host->ops->host_stop(host);
5571 }
5572
5573 /**
5574 * ata_finalize_port_ops - finalize ata_port_operations
5575 * @ops: ata_port_operations to finalize
5576 *
5577 * An ata_port_operations can inherit from another ops and that
5578 * ops can again inherit from another. This can go on as many
5579 * times as necessary as long as there is no loop in the
5580 * inheritance chain.
5581 *
5582 * Ops tables are finalized when the host is started. NULL or
5583 * unspecified entries are inherited from the closet ancestor
5584 * which has the method and the entry is populated with it.
5585 * After finalization, the ops table directly points to all the
5586 * methods and ->inherits is no longer necessary and cleared.
5587 *
5588 * Using ATA_OP_NULL, inheriting ops can force a method to NULL.
5589 *
5590 * LOCKING:
5591 * None.
5592 */
5593 static void ata_finalize_port_ops(struct ata_port_operations *ops)
5594 {
5595 static DEFINE_SPINLOCK(lock);
5596 const struct ata_port_operations *cur;
5597 void **begin = (void **)ops;
5598 void **end = (void **)&ops->inherits;
5599 void **pp;
5600
5601 if (!ops || !ops->inherits)
5602 return;
5603
5604 spin_lock(&lock);
5605
5606 for (cur = ops->inherits; cur; cur = cur->inherits) {
5607 void **inherit = (void **)cur;
5608
5609 for (pp = begin; pp < end; pp++, inherit++)
5610 if (!*pp)
5611 *pp = *inherit;
5612 }
5613
5614 for (pp = begin; pp < end; pp++)
5615 if (IS_ERR(*pp))
5616 *pp = NULL;
5617
5618 ops->inherits = NULL;
5619
5620 spin_unlock(&lock);
5621 }
5622
5623 /**
5624 * ata_host_start - start and freeze ports of an ATA host
5625 * @host: ATA host to start ports for
5626 *
5627 * Start and then freeze ports of @host. Started status is
5628 * recorded in host->flags, so this function can be called
5629 * multiple times. Ports are guaranteed to get started only
5630 * once. If host->ops isn't initialized yet, its set to the
5631 * first non-dummy port ops.
5632 *
5633 * LOCKING:
5634 * Inherited from calling layer (may sleep).
5635 *
5636 * RETURNS:
5637 * 0 if all ports are started successfully, -errno otherwise.
5638 */
5639 int ata_host_start(struct ata_host *host)
5640 {
5641 int have_stop = 0;
5642 void *start_dr = NULL;
5643 int i, rc;
5644
5645 if (host->flags & ATA_HOST_STARTED)
5646 return 0;
5647
5648 ata_finalize_port_ops(host->ops);
5649
5650 for (i = 0; i < host->n_ports; i++) {
5651 struct ata_port *ap = host->ports[i];
5652
5653 ata_finalize_port_ops(ap->ops);
5654
5655 if (!host->ops && !ata_port_is_dummy(ap))
5656 host->ops = ap->ops;
5657
5658 if (ap->ops->port_stop)
5659 have_stop = 1;
5660 }
5661
5662 if (host->ops->host_stop)
5663 have_stop = 1;
5664
5665 if (have_stop) {
5666 start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
5667 if (!start_dr)
5668 return -ENOMEM;
5669 }
5670
5671 for (i = 0; i < host->n_ports; i++) {
5672 struct ata_port *ap = host->ports[i];
5673
5674 if (ap->ops->port_start) {
5675 rc = ap->ops->port_start(ap);
5676 if (rc) {
5677 if (rc != -ENODEV)
5678 dev_printk(KERN_ERR, host->dev,
5679 "failed to start port %d "
5680 "(errno=%d)\n", i, rc);
5681 goto err_out;
5682 }
5683 }
5684 ata_eh_freeze_port(ap);
5685 }
5686
5687 if (start_dr)
5688 devres_add(host->dev, start_dr);
5689 host->flags |= ATA_HOST_STARTED;
5690 return 0;
5691
5692 err_out:
5693 while (--i >= 0) {
5694 struct ata_port *ap = host->ports[i];
5695
5696 if (ap->ops->port_stop)
5697 ap->ops->port_stop(ap);
5698 }
5699 devres_free(start_dr);
5700 return rc;
5701 }
5702
5703 /**
5704 * ata_sas_host_init - Initialize a host struct
5705 * @host: host to initialize
5706 * @dev: device host is attached to
5707 * @flags: host flags
5708 * @ops: port_ops
5709 *
5710 * LOCKING:
5711 * PCI/etc. bus probe sem.
5712 *
5713 */
5714 /* KILLME - the only user left is ipr */
5715 void ata_host_init(struct ata_host *host, struct device *dev,
5716 unsigned long flags, struct ata_port_operations *ops)
5717 {
5718 spin_lock_init(&host->lock);
5719 host->dev = dev;
5720 host->flags = flags;
5721 host->ops = ops;
5722 }
5723
5724 /**
5725 * ata_host_register - register initialized ATA host
5726 * @host: ATA host to register
5727 * @sht: template for SCSI host
5728 *
5729 * Register initialized ATA host. @host is allocated using
5730 * ata_host_alloc() and fully initialized by LLD. This function
5731 * starts ports, registers @host with ATA and SCSI layers and
5732 * probe registered devices.
5733 *
5734 * LOCKING:
5735 * Inherited from calling layer (may sleep).
5736 *
5737 * RETURNS:
5738 * 0 on success, -errno otherwise.
5739 */
5740 int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
5741 {
5742 int i, rc;
5743
5744 /* host must have been started */
5745 if (!(host->flags & ATA_HOST_STARTED)) {
5746 dev_printk(KERN_ERR, host->dev,
5747 "BUG: trying to register unstarted host\n");
5748 WARN_ON(1);
5749 return -EINVAL;
5750 }
5751
5752 /* Blow away unused ports. This happens when LLD can't
5753 * determine the exact number of ports to allocate at
5754 * allocation time.
5755 */
5756 for (i = host->n_ports; host->ports[i]; i++)
5757 kfree(host->ports[i]);
5758
5759 /* give ports names and add SCSI hosts */
5760 for (i = 0; i < host->n_ports; i++)
5761 host->ports[i]->print_id = ata_print_id++;
5762
5763 rc = ata_scsi_add_hosts(host, sht);
5764 if (rc)
5765 return rc;
5766
5767 /* associate with ACPI nodes */
5768 ata_acpi_associate(host);
5769
5770 /* set cable, sata_spd_limit and report */
5771 for (i = 0; i < host->n_ports; i++) {
5772 struct ata_port *ap = host->ports[i];
5773 unsigned long xfer_mask;
5774
5775 /* set SATA cable type if still unset */
5776 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
5777 ap->cbl = ATA_CBL_SATA;
5778
5779 /* init sata_spd_limit to the current value */
5780 sata_link_init_spd(&ap->link);
5781 if (ap->slave_link)
5782 sata_link_init_spd(ap->slave_link);
5783
5784 /* print per-port info to dmesg */
5785 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
5786 ap->udma_mask);
5787
5788 if (!ata_port_is_dummy(ap)) {
5789 ata_port_printk(ap, KERN_INFO,
5790 "%cATA max %s %s\n",
5791 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
5792 ata_mode_string(xfer_mask),
5793 ap->link.eh_info.desc);
5794 ata_ehi_clear_desc(&ap->link.eh_info);
5795 } else
5796 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
5797 }
5798
5799 /* perform each probe synchronously */
5800 DPRINTK("probe begin\n");
5801 for (i = 0; i < host->n_ports; i++) {
5802 struct ata_port *ap = host->ports[i];
5803
5804 /* probe */
5805 if (ap->ops->error_handler) {
5806 struct ata_eh_info *ehi = &ap->link.eh_info;
5807 unsigned long flags;
5808
5809 ata_port_probe(ap);
5810
5811 /* kick EH for boot probing */
5812 spin_lock_irqsave(ap->lock, flags);
5813
5814 ehi->probe_mask |= ATA_ALL_DEVICES;
5815 ehi->action |= ATA_EH_RESET | ATA_EH_LPM;
5816 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
5817
5818 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
5819 ap->pflags |= ATA_PFLAG_LOADING;
5820 ata_port_schedule_eh(ap);
5821
5822 spin_unlock_irqrestore(ap->lock, flags);
5823
5824 /* wait for EH to finish */
5825 ata_port_wait_eh(ap);
5826 } else {
5827 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
5828 rc = ata_bus_probe(ap);
5829 DPRINTK("ata%u: bus probe end\n", ap->print_id);
5830
5831 if (rc) {
5832 /* FIXME: do something useful here?
5833 * Current libata behavior will
5834 * tear down everything when
5835 * the module is removed
5836 * or the h/w is unplugged.
5837 */
5838 }
5839 }
5840 }
5841
5842 /* probes are done, now scan each port's disk(s) */
5843 DPRINTK("host probe begin\n");
5844 for (i = 0; i < host->n_ports; i++) {
5845 struct ata_port *ap = host->ports[i];
5846
5847 ata_scsi_scan_host(ap, 1);
5848 }
5849
5850 return 0;
5851 }
5852
5853 /**
5854 * ata_host_activate - start host, request IRQ and register it
5855 * @host: target ATA host
5856 * @irq: IRQ to request
5857 * @irq_handler: irq_handler used when requesting IRQ
5858 * @irq_flags: irq_flags used when requesting IRQ
5859 * @sht: scsi_host_template to use when registering the host
5860 *
5861 * After allocating an ATA host and initializing it, most libata
5862 * LLDs perform three steps to activate the host - start host,
5863 * request IRQ and register it. This helper takes necessasry
5864 * arguments and performs the three steps in one go.
5865 *
5866 * An invalid IRQ skips the IRQ registration and expects the host to
5867 * have set polling mode on the port. In this case, @irq_handler
5868 * should be NULL.
5869 *
5870 * LOCKING:
5871 * Inherited from calling layer (may sleep).
5872 *
5873 * RETURNS:
5874 * 0 on success, -errno otherwise.
5875 */
5876 int ata_host_activate(struct ata_host *host, int irq,
5877 irq_handler_t irq_handler, unsigned long irq_flags,
5878 struct scsi_host_template *sht)
5879 {
5880 int i, rc;
5881
5882 rc = ata_host_start(host);
5883 if (rc)
5884 return rc;
5885
5886 /* Special case for polling mode */
5887 if (!irq) {
5888 WARN_ON(irq_handler);
5889 return ata_host_register(host, sht);
5890 }
5891
5892 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
5893 dev_driver_string(host->dev), host);
5894 if (rc)
5895 return rc;
5896
5897 for (i = 0; i < host->n_ports; i++)
5898 ata_port_desc(host->ports[i], "irq %d", irq);
5899
5900 rc = ata_host_register(host, sht);
5901 /* if failed, just free the IRQ and leave ports alone */
5902 if (rc)
5903 devm_free_irq(host->dev, irq, host);
5904
5905 return rc;
5906 }
5907
5908 /**
5909 * ata_port_detach - Detach ATA port in prepration of device removal
5910 * @ap: ATA port to be detached
5911 *
5912 * Detach all ATA devices and the associated SCSI devices of @ap;
5913 * then, remove the associated SCSI host. @ap is guaranteed to
5914 * be quiescent on return from this function.
5915 *
5916 * LOCKING:
5917 * Kernel thread context (may sleep).
5918 */
5919 static void ata_port_detach(struct ata_port *ap)
5920 {
5921 unsigned long flags;
5922 struct ata_link *link;
5923 struct ata_device *dev;
5924
5925 if (!ap->ops->error_handler)
5926 goto skip_eh;
5927
5928 /* tell EH we're leaving & flush EH */
5929 spin_lock_irqsave(ap->lock, flags);
5930 ap->pflags |= ATA_PFLAG_UNLOADING;
5931 spin_unlock_irqrestore(ap->lock, flags);
5932
5933 ata_port_wait_eh(ap);
5934
5935 /* EH is now guaranteed to see UNLOADING - EH context belongs
5936 * to us. Restore SControl and disable all existing devices.
5937 */
5938 __ata_port_for_each_link(link, ap) {
5939 sata_scr_write(link, SCR_CONTROL, link->saved_scontrol);
5940 ata_link_for_each_dev(dev, link)
5941 ata_dev_disable(dev);
5942 }
5943
5944 /* Final freeze & EH. All in-flight commands are aborted. EH
5945 * will be skipped and retrials will be terminated with bad
5946 * target.
5947 */
5948 spin_lock_irqsave(ap->lock, flags);
5949 ata_port_freeze(ap); /* won't be thawed */
5950 spin_unlock_irqrestore(ap->lock, flags);
5951
5952 ata_port_wait_eh(ap);
5953 cancel_rearming_delayed_work(&ap->hotplug_task);
5954
5955 skip_eh:
5956 /* remove the associated SCSI host */
5957 scsi_remove_host(ap->scsi_host);
5958 }
5959
5960 /**
5961 * ata_host_detach - Detach all ports of an ATA host
5962 * @host: Host to detach
5963 *
5964 * Detach all ports of @host.
5965 *
5966 * LOCKING:
5967 * Kernel thread context (may sleep).
5968 */
5969 void ata_host_detach(struct ata_host *host)
5970 {
5971 int i;
5972
5973 for (i = 0; i < host->n_ports; i++)
5974 ata_port_detach(host->ports[i]);
5975
5976 /* the host is dead now, dissociate ACPI */
5977 ata_acpi_dissociate(host);
5978 }
5979
5980 #ifdef CONFIG_PCI
5981
5982 /**
5983 * ata_pci_remove_one - PCI layer callback for device removal
5984 * @pdev: PCI device that was removed
5985 *
5986 * PCI layer indicates to libata via this hook that hot-unplug or
5987 * module unload event has occurred. Detach all ports. Resource
5988 * release is handled via devres.
5989 *
5990 * LOCKING:
5991 * Inherited from PCI layer (may sleep).
5992 */
5993 void ata_pci_remove_one(struct pci_dev *pdev)
5994 {
5995 struct device *dev = &pdev->dev;
5996 struct ata_host *host = dev_get_drvdata(dev);
5997
5998 ata_host_detach(host);
5999 }
6000
6001 /* move to PCI subsystem */
6002 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
6003 {
6004 unsigned long tmp = 0;
6005
6006 switch (bits->width) {
6007 case 1: {
6008 u8 tmp8 = 0;
6009 pci_read_config_byte(pdev, bits->reg, &tmp8);
6010 tmp = tmp8;
6011 break;
6012 }
6013 case 2: {
6014 u16 tmp16 = 0;
6015 pci_read_config_word(pdev, bits->reg, &tmp16);
6016 tmp = tmp16;
6017 break;
6018 }
6019 case 4: {
6020 u32 tmp32 = 0;
6021 pci_read_config_dword(pdev, bits->reg, &tmp32);
6022 tmp = tmp32;
6023 break;
6024 }
6025
6026 default:
6027 return -EINVAL;
6028 }
6029
6030 tmp &= bits->mask;
6031
6032 return (tmp == bits->val) ? 1 : 0;
6033 }
6034
6035 #ifdef CONFIG_PM
6036 void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
6037 {
6038 pci_save_state(pdev);
6039 pci_disable_device(pdev);
6040
6041 if (mesg.event & PM_EVENT_SLEEP)
6042 pci_set_power_state(pdev, PCI_D3hot);
6043 }
6044
6045 int ata_pci_device_do_resume(struct pci_dev *pdev)
6046 {
6047 int rc;
6048
6049 pci_set_power_state(pdev, PCI_D0);
6050 pci_restore_state(pdev);
6051
6052 rc = pcim_enable_device(pdev);
6053 if (rc) {
6054 dev_printk(KERN_ERR, &pdev->dev,
6055 "failed to enable device after resume (%d)\n", rc);
6056 return rc;
6057 }
6058
6059 pci_set_master(pdev);
6060 return 0;
6061 }
6062
6063 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
6064 {
6065 struct ata_host *host = dev_get_drvdata(&pdev->dev);
6066 int rc = 0;
6067
6068 rc = ata_host_suspend(host, mesg);
6069 if (rc)
6070 return rc;
6071
6072 ata_pci_device_do_suspend(pdev, mesg);
6073
6074 return 0;
6075 }
6076
6077 int ata_pci_device_resume(struct pci_dev *pdev)
6078 {
6079 struct ata_host *host = dev_get_drvdata(&pdev->dev);
6080 int rc;
6081
6082 rc = ata_pci_device_do_resume(pdev);
6083 if (rc == 0)
6084 ata_host_resume(host);
6085 return rc;
6086 }
6087 #endif /* CONFIG_PM */
6088
6089 #endif /* CONFIG_PCI */
6090
6091 static int __init ata_parse_force_one(char **cur,
6092 struct ata_force_ent *force_ent,
6093 const char **reason)
6094 {
6095 /* FIXME: Currently, there's no way to tag init const data and
6096 * using __initdata causes build failure on some versions of
6097 * gcc. Once __initdataconst is implemented, add const to the
6098 * following structure.
6099 */
6100 static struct ata_force_param force_tbl[] __initdata = {
6101 { "40c", .cbl = ATA_CBL_PATA40 },
6102 { "80c", .cbl = ATA_CBL_PATA80 },
6103 { "short40c", .cbl = ATA_CBL_PATA40_SHORT },
6104 { "unk", .cbl = ATA_CBL_PATA_UNK },
6105 { "ign", .cbl = ATA_CBL_PATA_IGN },
6106 { "sata", .cbl = ATA_CBL_SATA },
6107 { "1.5Gbps", .spd_limit = 1 },
6108 { "3.0Gbps", .spd_limit = 2 },
6109 { "noncq", .horkage_on = ATA_HORKAGE_NONCQ },
6110 { "ncq", .horkage_off = ATA_HORKAGE_NONCQ },
6111 { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) },
6112 { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) },
6113 { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) },
6114 { "pio3", .xfer_mask = 1 << (ATA_SHIFT_PIO + 3) },
6115 { "pio4", .xfer_mask = 1 << (ATA_SHIFT_PIO + 4) },
6116 { "pio5", .xfer_mask = 1 << (ATA_SHIFT_PIO + 5) },
6117 { "pio6", .xfer_mask = 1 << (ATA_SHIFT_PIO + 6) },
6118 { "mwdma0", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 0) },
6119 { "mwdma1", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 1) },
6120 { "mwdma2", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 2) },
6121 { "mwdma3", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 3) },
6122 { "mwdma4", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 4) },
6123 { "udma0", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6124 { "udma16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6125 { "udma/16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6126 { "udma1", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6127 { "udma25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6128 { "udma/25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6129 { "udma2", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6130 { "udma33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6131 { "udma/33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6132 { "udma3", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6133 { "udma44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6134 { "udma/44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6135 { "udma4", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6136 { "udma66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6137 { "udma/66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6138 { "udma5", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6139 { "udma100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6140 { "udma/100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6141 { "udma6", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6142 { "udma133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6143 { "udma/133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6144 { "udma7", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 7) },
6145 { "nohrst", .lflags = ATA_LFLAG_NO_HRST },
6146 { "nosrst", .lflags = ATA_LFLAG_NO_SRST },
6147 { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST },
6148 };
6149 char *start = *cur, *p = *cur;
6150 char *id, *val, *endp;
6151 const struct ata_force_param *match_fp = NULL;
6152 int nr_matches = 0, i;
6153
6154 /* find where this param ends and update *cur */
6155 while (*p != '\0' && *p != ',')
6156 p++;
6157
6158 if (*p == '\0')
6159 *cur = p;
6160 else
6161 *cur = p + 1;
6162
6163 *p = '\0';
6164
6165 /* parse */
6166 p = strchr(start, ':');
6167 if (!p) {
6168 val = strstrip(start);
6169 goto parse_val;
6170 }
6171 *p = '\0';
6172
6173 id = strstrip(start);
6174 val = strstrip(p + 1);
6175
6176 /* parse id */
6177 p = strchr(id, '.');
6178 if (p) {
6179 *p++ = '\0';
6180 force_ent->device = simple_strtoul(p, &endp, 10);
6181 if (p == endp || *endp != '\0') {
6182 *reason = "invalid device";
6183 return -EINVAL;
6184 }
6185 }
6186
6187 force_ent->port = simple_strtoul(id, &endp, 10);
6188 if (p == endp || *endp != '\0') {
6189 *reason = "invalid port/link";
6190 return -EINVAL;
6191 }
6192
6193 parse_val:
6194 /* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */
6195 for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
6196 const struct ata_force_param *fp = &force_tbl[i];
6197
6198 if (strncasecmp(val, fp->name, strlen(val)))
6199 continue;
6200
6201 nr_matches++;
6202 match_fp = fp;
6203
6204 if (strcasecmp(val, fp->name) == 0) {
6205 nr_matches = 1;
6206 break;
6207 }
6208 }
6209
6210 if (!nr_matches) {
6211 *reason = "unknown value";
6212 return -EINVAL;
6213 }
6214 if (nr_matches > 1) {
6215 *reason = "ambigious value";
6216 return -EINVAL;
6217 }
6218
6219 force_ent->param = *match_fp;
6220
6221 return 0;
6222 }
6223
6224 static void __init ata_parse_force_param(void)
6225 {
6226 int idx = 0, size = 1;
6227 int last_port = -1, last_device = -1;
6228 char *p, *cur, *next;
6229
6230 /* calculate maximum number of params and allocate force_tbl */
6231 for (p = ata_force_param_buf; *p; p++)
6232 if (*p == ',')
6233 size++;
6234
6235 ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL);
6236 if (!ata_force_tbl) {
6237 printk(KERN_WARNING "ata: failed to extend force table, "
6238 "libata.force ignored\n");
6239 return;
6240 }
6241
6242 /* parse and populate the table */
6243 for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
6244 const char *reason = "";
6245 struct ata_force_ent te = { .port = -1, .device = -1 };
6246
6247 next = cur;
6248 if (ata_parse_force_one(&next, &te, &reason)) {
6249 printk(KERN_WARNING "ata: failed to parse force "
6250 "parameter \"%s\" (%s)\n",
6251 cur, reason);
6252 continue;
6253 }
6254
6255 if (te.port == -1) {
6256 te.port = last_port;
6257 te.device = last_device;
6258 }
6259
6260 ata_force_tbl[idx++] = te;
6261
6262 last_port = te.port;
6263 last_device = te.device;
6264 }
6265
6266 ata_force_tbl_size = idx;
6267 }
6268
6269 static int __init ata_init(void)
6270 {
6271 ata_parse_force_param();
6272
6273 ata_wq = create_workqueue("ata");
6274 if (!ata_wq)
6275 goto free_force_tbl;
6276
6277 ata_aux_wq = create_singlethread_workqueue("ata_aux");
6278 if (!ata_aux_wq)
6279 goto free_wq;
6280
6281 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6282 return 0;
6283
6284 free_wq:
6285 destroy_workqueue(ata_wq);
6286 free_force_tbl:
6287 kfree(ata_force_tbl);
6288 return -ENOMEM;
6289 }
6290
6291 static void __exit ata_exit(void)
6292 {
6293 kfree(ata_force_tbl);
6294 destroy_workqueue(ata_wq);
6295 destroy_workqueue(ata_aux_wq);
6296 }
6297
6298 subsys_initcall(ata_init);
6299 module_exit(ata_exit);
6300
6301 static unsigned long ratelimit_time;
6302 static DEFINE_SPINLOCK(ata_ratelimit_lock);
6303
6304 int ata_ratelimit(void)
6305 {
6306 int rc;
6307 unsigned long flags;
6308
6309 spin_lock_irqsave(&ata_ratelimit_lock, flags);
6310
6311 if (time_after(jiffies, ratelimit_time)) {
6312 rc = 1;
6313 ratelimit_time = jiffies + (HZ/5);
6314 } else
6315 rc = 0;
6316
6317 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
6318
6319 return rc;
6320 }
6321
6322 /**
6323 * ata_wait_register - wait until register value changes
6324 * @reg: IO-mapped register
6325 * @mask: Mask to apply to read register value
6326 * @val: Wait condition
6327 * @interval: polling interval in milliseconds
6328 * @timeout: timeout in milliseconds
6329 *
6330 * Waiting for some bits of register to change is a common
6331 * operation for ATA controllers. This function reads 32bit LE
6332 * IO-mapped register @reg and tests for the following condition.
6333 *
6334 * (*@reg & mask) != val
6335 *
6336 * If the condition is met, it returns; otherwise, the process is
6337 * repeated after @interval_msec until timeout.
6338 *
6339 * LOCKING:
6340 * Kernel thread context (may sleep)
6341 *
6342 * RETURNS:
6343 * The final register value.
6344 */
6345 u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
6346 unsigned long interval, unsigned long timeout)
6347 {
6348 unsigned long deadline;
6349 u32 tmp;
6350
6351 tmp = ioread32(reg);
6352
6353 /* Calculate timeout _after_ the first read to make sure
6354 * preceding writes reach the controller before starting to
6355 * eat away the timeout.
6356 */
6357 deadline = ata_deadline(jiffies, timeout);
6358
6359 while ((tmp & mask) == val && time_before(jiffies, deadline)) {
6360 msleep(interval);
6361 tmp = ioread32(reg);
6362 }
6363
6364 return tmp;
6365 }
6366
6367 /*
6368 * Dummy port_ops
6369 */
6370 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6371 {
6372 return AC_ERR_SYSTEM;
6373 }
6374
6375 static void ata_dummy_error_handler(struct ata_port *ap)
6376 {
6377 /* truly dummy */
6378 }
6379
6380 struct ata_port_operations ata_dummy_port_ops = {
6381 .qc_prep = ata_noop_qc_prep,
6382 .qc_issue = ata_dummy_qc_issue,
6383 .error_handler = ata_dummy_error_handler,
6384 };
6385
6386 const struct ata_port_info ata_dummy_port_info = {
6387 .port_ops = &ata_dummy_port_ops,
6388 };
6389
6390 /*
6391 * libata is essentially a library of internal helper functions for
6392 * low-level ATA host controller drivers. As such, the API/ABI is
6393 * likely to change as new drivers are added and updated.
6394 * Do not depend on ABI/API stability.
6395 */
6396 EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6397 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6398 EXPORT_SYMBOL_GPL(sata_deb_timing_long);
6399 EXPORT_SYMBOL_GPL(ata_base_port_ops);
6400 EXPORT_SYMBOL_GPL(sata_port_ops);
6401 EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
6402 EXPORT_SYMBOL_GPL(ata_dummy_port_info);
6403 EXPORT_SYMBOL_GPL(__ata_port_next_link);
6404 EXPORT_SYMBOL_GPL(ata_std_bios_param);
6405 EXPORT_SYMBOL_GPL(ata_host_init);
6406 EXPORT_SYMBOL_GPL(ata_host_alloc);
6407 EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
6408 EXPORT_SYMBOL_GPL(ata_slave_link_init);
6409 EXPORT_SYMBOL_GPL(ata_host_start);
6410 EXPORT_SYMBOL_GPL(ata_host_register);
6411 EXPORT_SYMBOL_GPL(ata_host_activate);
6412 EXPORT_SYMBOL_GPL(ata_host_detach);
6413 EXPORT_SYMBOL_GPL(ata_sg_init);
6414 EXPORT_SYMBOL_GPL(ata_qc_complete);
6415 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
6416 EXPORT_SYMBOL_GPL(atapi_cmd_type);
6417 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6418 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6419 EXPORT_SYMBOL_GPL(ata_pack_xfermask);
6420 EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
6421 EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
6422 EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
6423 EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
6424 EXPORT_SYMBOL_GPL(ata_mode_string);
6425 EXPORT_SYMBOL_GPL(ata_id_xfermask);
6426 EXPORT_SYMBOL_GPL(ata_port_start);
6427 EXPORT_SYMBOL_GPL(ata_do_set_mode);
6428 EXPORT_SYMBOL_GPL(ata_std_qc_defer);
6429 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
6430 EXPORT_SYMBOL_GPL(ata_port_probe);
6431 EXPORT_SYMBOL_GPL(ata_dev_disable);
6432 EXPORT_SYMBOL_GPL(sata_set_spd);
6433 EXPORT_SYMBOL_GPL(ata_wait_after_reset);
6434 EXPORT_SYMBOL_GPL(sata_link_debounce);
6435 EXPORT_SYMBOL_GPL(sata_link_resume);
6436 EXPORT_SYMBOL_GPL(ata_std_prereset);
6437 EXPORT_SYMBOL_GPL(sata_link_hardreset);
6438 EXPORT_SYMBOL_GPL(sata_std_hardreset);
6439 EXPORT_SYMBOL_GPL(ata_std_postreset);
6440 EXPORT_SYMBOL_GPL(ata_dev_classify);
6441 EXPORT_SYMBOL_GPL(ata_dev_pair);
6442 EXPORT_SYMBOL_GPL(ata_port_disable);
6443 EXPORT_SYMBOL_GPL(ata_ratelimit);
6444 EXPORT_SYMBOL_GPL(ata_wait_register);
6445 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
6446 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
6447 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
6448 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
6449 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
6450 EXPORT_SYMBOL_GPL(sata_scr_valid);
6451 EXPORT_SYMBOL_GPL(sata_scr_read);
6452 EXPORT_SYMBOL_GPL(sata_scr_write);
6453 EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6454 EXPORT_SYMBOL_GPL(ata_link_online);
6455 EXPORT_SYMBOL_GPL(ata_link_offline);
6456 #ifdef CONFIG_PM
6457 EXPORT_SYMBOL_GPL(ata_host_suspend);
6458 EXPORT_SYMBOL_GPL(ata_host_resume);
6459 #endif /* CONFIG_PM */
6460 EXPORT_SYMBOL_GPL(ata_id_string);
6461 EXPORT_SYMBOL_GPL(ata_id_c_string);
6462 EXPORT_SYMBOL_GPL(ata_do_dev_read_id);
6463 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6464
6465 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
6466 EXPORT_SYMBOL_GPL(ata_timing_find_mode);
6467 EXPORT_SYMBOL_GPL(ata_timing_compute);
6468 EXPORT_SYMBOL_GPL(ata_timing_merge);
6469 EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
6470
6471 #ifdef CONFIG_PCI
6472 EXPORT_SYMBOL_GPL(pci_test_config_bits);
6473 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6474 #ifdef CONFIG_PM
6475 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6476 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
6477 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6478 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6479 #endif /* CONFIG_PM */
6480 #endif /* CONFIG_PCI */
6481
6482 EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
6483 EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
6484 EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
6485 EXPORT_SYMBOL_GPL(ata_port_desc);
6486 #ifdef CONFIG_PCI
6487 EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
6488 #endif /* CONFIG_PCI */
6489 EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
6490 EXPORT_SYMBOL_GPL(ata_link_abort);
6491 EXPORT_SYMBOL_GPL(ata_port_abort);
6492 EXPORT_SYMBOL_GPL(ata_port_freeze);
6493 EXPORT_SYMBOL_GPL(sata_async_notification);
6494 EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6495 EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
6496 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6497 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
6498 EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error);
6499 EXPORT_SYMBOL_GPL(ata_do_eh);
6500 EXPORT_SYMBOL_GPL(ata_std_error_handler);
6501
6502 EXPORT_SYMBOL_GPL(ata_cable_40wire);
6503 EXPORT_SYMBOL_GPL(ata_cable_80wire);
6504 EXPORT_SYMBOL_GPL(ata_cable_unknown);
6505 EXPORT_SYMBOL_GPL(ata_cable_ignore);
6506 EXPORT_SYMBOL_GPL(ata_cable_sata);
This page took 0.261027 seconds and 5 git commands to generate.