staging: comedi: dt9812: use CR_CHAN() for channel number
[deliverable/linux.git] / drivers / ata / libata-core.c
1 /*
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
33 * Standards documents from:
34 * http://www.t13.org (ATA standards, PCI DMA IDE spec)
35 * http://www.t10.org (SCSI MMC - for ATAPI MMC)
36 * http://www.sata-io.org (SATA)
37 * http://www.compactflash.org (CF)
38 * http://www.qic.org (QIC157 - Tape and DSC)
39 * http://www.ce-ata.org (CE-ATA: not supported)
40 *
41 */
42
43 #include <linux/kernel.h>
44 #include <linux/module.h>
45 #include <linux/pci.h>
46 #include <linux/init.h>
47 #include <linux/list.h>
48 #include <linux/mm.h>
49 #include <linux/spinlock.h>
50 #include <linux/blkdev.h>
51 #include <linux/delay.h>
52 #include <linux/timer.h>
53 #include <linux/interrupt.h>
54 #include <linux/completion.h>
55 #include <linux/suspend.h>
56 #include <linux/workqueue.h>
57 #include <linux/scatterlist.h>
58 #include <linux/io.h>
59 #include <linux/async.h>
60 #include <linux/log2.h>
61 #include <linux/slab.h>
62 #include <scsi/scsi.h>
63 #include <scsi/scsi_cmnd.h>
64 #include <scsi/scsi_host.h>
65 #include <linux/libata.h>
66 #include <asm/byteorder.h>
67 #include <linux/cdrom.h>
68 #include <linux/ratelimit.h>
69 #include <linux/pm_runtime.h>
70 #include <linux/platform_device.h>
71
72 #include "libata.h"
73 #include "libata-transport.h"
74
75 /* debounce timing parameters in msecs { interval, duration, timeout } */
76 const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
77 const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
78 const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
79
80 const struct ata_port_operations ata_base_port_ops = {
81 .prereset = ata_std_prereset,
82 .postreset = ata_std_postreset,
83 .error_handler = ata_std_error_handler,
84 .sched_eh = ata_std_sched_eh,
85 .end_eh = ata_std_end_eh,
86 };
87
88 const struct ata_port_operations sata_port_ops = {
89 .inherits = &ata_base_port_ops,
90
91 .qc_defer = ata_std_qc_defer,
92 .hardreset = sata_std_hardreset,
93 };
94
95 static unsigned int ata_dev_init_params(struct ata_device *dev,
96 u16 heads, u16 sectors);
97 static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
98 static void ata_dev_xfermask(struct ata_device *dev);
99 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
100
101 atomic_t ata_print_id = ATOMIC_INIT(0);
102
103 struct ata_force_param {
104 const char *name;
105 unsigned int cbl;
106 int spd_limit;
107 unsigned long xfer_mask;
108 unsigned int horkage_on;
109 unsigned int horkage_off;
110 unsigned int lflags;
111 };
112
113 struct ata_force_ent {
114 int port;
115 int device;
116 struct ata_force_param param;
117 };
118
119 static struct ata_force_ent *ata_force_tbl;
120 static int ata_force_tbl_size;
121
122 static char ata_force_param_buf[PAGE_SIZE] __initdata;
123 /* param_buf is thrown away after initialization, disallow read */
124 module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
125 MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)");
126
127 static int atapi_enabled = 1;
128 module_param(atapi_enabled, int, 0444);
129 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])");
130
131 static int atapi_dmadir = 0;
132 module_param(atapi_dmadir, int, 0444);
133 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)");
134
135 int atapi_passthru16 = 1;
136 module_param(atapi_passthru16, int, 0444);
137 MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])");
138
139 int libata_fua = 0;
140 module_param_named(fua, libata_fua, int, 0444);
141 MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)");
142
143 static int ata_ignore_hpa;
144 module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
145 MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
146
147 static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
148 module_param_named(dma, libata_dma_mask, int, 0444);
149 MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
150
151 static int ata_probe_timeout;
152 module_param(ata_probe_timeout, int, 0444);
153 MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
154
155 int libata_noacpi = 0;
156 module_param_named(noacpi, libata_noacpi, int, 0444);
157 MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)");
158
159 int libata_allow_tpm = 0;
160 module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
161 MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)");
162
163 static int atapi_an;
164 module_param(atapi_an, int, 0444);
165 MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)");
166
167 MODULE_AUTHOR("Jeff Garzik");
168 MODULE_DESCRIPTION("Library module for ATA devices");
169 MODULE_LICENSE("GPL");
170 MODULE_VERSION(DRV_VERSION);
171
172
173 static bool ata_sstatus_online(u32 sstatus)
174 {
175 return (sstatus & 0xf) == 0x3;
176 }
177
178 /**
179 * ata_link_next - link iteration helper
180 * @link: the previous link, NULL to start
181 * @ap: ATA port containing links to iterate
182 * @mode: iteration mode, one of ATA_LITER_*
183 *
184 * LOCKING:
185 * Host lock or EH context.
186 *
187 * RETURNS:
188 * Pointer to the next link.
189 */
190 struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap,
191 enum ata_link_iter_mode mode)
192 {
193 BUG_ON(mode != ATA_LITER_EDGE &&
194 mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST);
195
196 /* NULL link indicates start of iteration */
197 if (!link)
198 switch (mode) {
199 case ATA_LITER_EDGE:
200 case ATA_LITER_PMP_FIRST:
201 if (sata_pmp_attached(ap))
202 return ap->pmp_link;
203 /* fall through */
204 case ATA_LITER_HOST_FIRST:
205 return &ap->link;
206 }
207
208 /* we just iterated over the host link, what's next? */
209 if (link == &ap->link)
210 switch (mode) {
211 case ATA_LITER_HOST_FIRST:
212 if (sata_pmp_attached(ap))
213 return ap->pmp_link;
214 /* fall through */
215 case ATA_LITER_PMP_FIRST:
216 if (unlikely(ap->slave_link))
217 return ap->slave_link;
218 /* fall through */
219 case ATA_LITER_EDGE:
220 return NULL;
221 }
222
223 /* slave_link excludes PMP */
224 if (unlikely(link == ap->slave_link))
225 return NULL;
226
227 /* we were over a PMP link */
228 if (++link < ap->pmp_link + ap->nr_pmp_links)
229 return link;
230
231 if (mode == ATA_LITER_PMP_FIRST)
232 return &ap->link;
233
234 return NULL;
235 }
236
237 /**
238 * ata_dev_next - device iteration helper
239 * @dev: the previous device, NULL to start
240 * @link: ATA link containing devices to iterate
241 * @mode: iteration mode, one of ATA_DITER_*
242 *
243 * LOCKING:
244 * Host lock or EH context.
245 *
246 * RETURNS:
247 * Pointer to the next device.
248 */
249 struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link,
250 enum ata_dev_iter_mode mode)
251 {
252 BUG_ON(mode != ATA_DITER_ENABLED && mode != ATA_DITER_ENABLED_REVERSE &&
253 mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE);
254
255 /* NULL dev indicates start of iteration */
256 if (!dev)
257 switch (mode) {
258 case ATA_DITER_ENABLED:
259 case ATA_DITER_ALL:
260 dev = link->device;
261 goto check;
262 case ATA_DITER_ENABLED_REVERSE:
263 case ATA_DITER_ALL_REVERSE:
264 dev = link->device + ata_link_max_devices(link) - 1;
265 goto check;
266 }
267
268 next:
269 /* move to the next one */
270 switch (mode) {
271 case ATA_DITER_ENABLED:
272 case ATA_DITER_ALL:
273 if (++dev < link->device + ata_link_max_devices(link))
274 goto check;
275 return NULL;
276 case ATA_DITER_ENABLED_REVERSE:
277 case ATA_DITER_ALL_REVERSE:
278 if (--dev >= link->device)
279 goto check;
280 return NULL;
281 }
282
283 check:
284 if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) &&
285 !ata_dev_enabled(dev))
286 goto next;
287 return dev;
288 }
289
290 /**
291 * ata_dev_phys_link - find physical link for a device
292 * @dev: ATA device to look up physical link for
293 *
294 * Look up physical link which @dev is attached to. Note that
295 * this is different from @dev->link only when @dev is on slave
296 * link. For all other cases, it's the same as @dev->link.
297 *
298 * LOCKING:
299 * Don't care.
300 *
301 * RETURNS:
302 * Pointer to the found physical link.
303 */
304 struct ata_link *ata_dev_phys_link(struct ata_device *dev)
305 {
306 struct ata_port *ap = dev->link->ap;
307
308 if (!ap->slave_link)
309 return dev->link;
310 if (!dev->devno)
311 return &ap->link;
312 return ap->slave_link;
313 }
314
315 /**
316 * ata_force_cbl - force cable type according to libata.force
317 * @ap: ATA port of interest
318 *
319 * Force cable type according to libata.force and whine about it.
320 * The last entry which has matching port number is used, so it
321 * can be specified as part of device force parameters. For
322 * example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the
323 * same effect.
324 *
325 * LOCKING:
326 * EH context.
327 */
328 void ata_force_cbl(struct ata_port *ap)
329 {
330 int i;
331
332 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
333 const struct ata_force_ent *fe = &ata_force_tbl[i];
334
335 if (fe->port != -1 && fe->port != ap->print_id)
336 continue;
337
338 if (fe->param.cbl == ATA_CBL_NONE)
339 continue;
340
341 ap->cbl = fe->param.cbl;
342 ata_port_notice(ap, "FORCE: cable set to %s\n", fe->param.name);
343 return;
344 }
345 }
346
347 /**
348 * ata_force_link_limits - force link limits according to libata.force
349 * @link: ATA link of interest
350 *
351 * Force link flags and SATA spd limit according to libata.force
352 * and whine about it. When only the port part is specified
353 * (e.g. 1:), the limit applies to all links connected to both
354 * the host link and all fan-out ports connected via PMP. If the
355 * device part is specified as 0 (e.g. 1.00:), it specifies the
356 * first fan-out link not the host link. Device number 15 always
357 * points to the host link whether PMP is attached or not. If the
358 * controller has slave link, device number 16 points to it.
359 *
360 * LOCKING:
361 * EH context.
362 */
363 static void ata_force_link_limits(struct ata_link *link)
364 {
365 bool did_spd = false;
366 int linkno = link->pmp;
367 int i;
368
369 if (ata_is_host_link(link))
370 linkno += 15;
371
372 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
373 const struct ata_force_ent *fe = &ata_force_tbl[i];
374
375 if (fe->port != -1 && fe->port != link->ap->print_id)
376 continue;
377
378 if (fe->device != -1 && fe->device != linkno)
379 continue;
380
381 /* only honor the first spd limit */
382 if (!did_spd && fe->param.spd_limit) {
383 link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
384 ata_link_notice(link, "FORCE: PHY spd limit set to %s\n",
385 fe->param.name);
386 did_spd = true;
387 }
388
389 /* let lflags stack */
390 if (fe->param.lflags) {
391 link->flags |= fe->param.lflags;
392 ata_link_notice(link,
393 "FORCE: link flag 0x%x forced -> 0x%x\n",
394 fe->param.lflags, link->flags);
395 }
396 }
397 }
398
399 /**
400 * ata_force_xfermask - force xfermask according to libata.force
401 * @dev: ATA device of interest
402 *
403 * Force xfer_mask according to libata.force and whine about it.
404 * For consistency with link selection, device number 15 selects
405 * the first device connected to the host link.
406 *
407 * LOCKING:
408 * EH context.
409 */
410 static void ata_force_xfermask(struct ata_device *dev)
411 {
412 int devno = dev->link->pmp + dev->devno;
413 int alt_devno = devno;
414 int i;
415
416 /* allow n.15/16 for devices attached to host port */
417 if (ata_is_host_link(dev->link))
418 alt_devno += 15;
419
420 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
421 const struct ata_force_ent *fe = &ata_force_tbl[i];
422 unsigned long pio_mask, mwdma_mask, udma_mask;
423
424 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
425 continue;
426
427 if (fe->device != -1 && fe->device != devno &&
428 fe->device != alt_devno)
429 continue;
430
431 if (!fe->param.xfer_mask)
432 continue;
433
434 ata_unpack_xfermask(fe->param.xfer_mask,
435 &pio_mask, &mwdma_mask, &udma_mask);
436 if (udma_mask)
437 dev->udma_mask = udma_mask;
438 else if (mwdma_mask) {
439 dev->udma_mask = 0;
440 dev->mwdma_mask = mwdma_mask;
441 } else {
442 dev->udma_mask = 0;
443 dev->mwdma_mask = 0;
444 dev->pio_mask = pio_mask;
445 }
446
447 ata_dev_notice(dev, "FORCE: xfer_mask set to %s\n",
448 fe->param.name);
449 return;
450 }
451 }
452
453 /**
454 * ata_force_horkage - force horkage according to libata.force
455 * @dev: ATA device of interest
456 *
457 * Force horkage according to libata.force and whine about it.
458 * For consistency with link selection, device number 15 selects
459 * the first device connected to the host link.
460 *
461 * LOCKING:
462 * EH context.
463 */
464 static void ata_force_horkage(struct ata_device *dev)
465 {
466 int devno = dev->link->pmp + dev->devno;
467 int alt_devno = devno;
468 int i;
469
470 /* allow n.15/16 for devices attached to host port */
471 if (ata_is_host_link(dev->link))
472 alt_devno += 15;
473
474 for (i = 0; i < ata_force_tbl_size; i++) {
475 const struct ata_force_ent *fe = &ata_force_tbl[i];
476
477 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
478 continue;
479
480 if (fe->device != -1 && fe->device != devno &&
481 fe->device != alt_devno)
482 continue;
483
484 if (!(~dev->horkage & fe->param.horkage_on) &&
485 !(dev->horkage & fe->param.horkage_off))
486 continue;
487
488 dev->horkage |= fe->param.horkage_on;
489 dev->horkage &= ~fe->param.horkage_off;
490
491 ata_dev_notice(dev, "FORCE: horkage modified (%s)\n",
492 fe->param.name);
493 }
494 }
495
496 /**
497 * atapi_cmd_type - Determine ATAPI command type from SCSI opcode
498 * @opcode: SCSI opcode
499 *
500 * Determine ATAPI command type from @opcode.
501 *
502 * LOCKING:
503 * None.
504 *
505 * RETURNS:
506 * ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC}
507 */
508 int atapi_cmd_type(u8 opcode)
509 {
510 switch (opcode) {
511 case GPCMD_READ_10:
512 case GPCMD_READ_12:
513 return ATAPI_READ;
514
515 case GPCMD_WRITE_10:
516 case GPCMD_WRITE_12:
517 case GPCMD_WRITE_AND_VERIFY_10:
518 return ATAPI_WRITE;
519
520 case GPCMD_READ_CD:
521 case GPCMD_READ_CD_MSF:
522 return ATAPI_READ_CD;
523
524 case ATA_16:
525 case ATA_12:
526 if (atapi_passthru16)
527 return ATAPI_PASS_THRU;
528 /* fall thru */
529 default:
530 return ATAPI_MISC;
531 }
532 }
533
534 /**
535 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
536 * @tf: Taskfile to convert
537 * @pmp: Port multiplier port
538 * @is_cmd: This FIS is for command
539 * @fis: Buffer into which data will output
540 *
541 * Converts a standard ATA taskfile to a Serial ATA
542 * FIS structure (Register - Host to Device).
543 *
544 * LOCKING:
545 * Inherited from caller.
546 */
547 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
548 {
549 fis[0] = 0x27; /* Register - Host to Device FIS */
550 fis[1] = pmp & 0xf; /* Port multiplier number*/
551 if (is_cmd)
552 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */
553
554 fis[2] = tf->command;
555 fis[3] = tf->feature;
556
557 fis[4] = tf->lbal;
558 fis[5] = tf->lbam;
559 fis[6] = tf->lbah;
560 fis[7] = tf->device;
561
562 fis[8] = tf->hob_lbal;
563 fis[9] = tf->hob_lbam;
564 fis[10] = tf->hob_lbah;
565 fis[11] = tf->hob_feature;
566
567 fis[12] = tf->nsect;
568 fis[13] = tf->hob_nsect;
569 fis[14] = 0;
570 fis[15] = tf->ctl;
571
572 fis[16] = 0;
573 fis[17] = 0;
574 fis[18] = 0;
575 fis[19] = 0;
576 }
577
578 /**
579 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
580 * @fis: Buffer from which data will be input
581 * @tf: Taskfile to output
582 *
583 * Converts a serial ATA FIS structure to a standard ATA taskfile.
584 *
585 * LOCKING:
586 * Inherited from caller.
587 */
588
589 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
590 {
591 tf->command = fis[2]; /* status */
592 tf->feature = fis[3]; /* error */
593
594 tf->lbal = fis[4];
595 tf->lbam = fis[5];
596 tf->lbah = fis[6];
597 tf->device = fis[7];
598
599 tf->hob_lbal = fis[8];
600 tf->hob_lbam = fis[9];
601 tf->hob_lbah = fis[10];
602
603 tf->nsect = fis[12];
604 tf->hob_nsect = fis[13];
605 }
606
607 static const u8 ata_rw_cmds[] = {
608 /* pio multi */
609 ATA_CMD_READ_MULTI,
610 ATA_CMD_WRITE_MULTI,
611 ATA_CMD_READ_MULTI_EXT,
612 ATA_CMD_WRITE_MULTI_EXT,
613 0,
614 0,
615 0,
616 ATA_CMD_WRITE_MULTI_FUA_EXT,
617 /* pio */
618 ATA_CMD_PIO_READ,
619 ATA_CMD_PIO_WRITE,
620 ATA_CMD_PIO_READ_EXT,
621 ATA_CMD_PIO_WRITE_EXT,
622 0,
623 0,
624 0,
625 0,
626 /* dma */
627 ATA_CMD_READ,
628 ATA_CMD_WRITE,
629 ATA_CMD_READ_EXT,
630 ATA_CMD_WRITE_EXT,
631 0,
632 0,
633 0,
634 ATA_CMD_WRITE_FUA_EXT
635 };
636
637 /**
638 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
639 * @tf: command to examine and configure
640 * @dev: device tf belongs to
641 *
642 * Examine the device configuration and tf->flags to calculate
643 * the proper read/write commands and protocol to use.
644 *
645 * LOCKING:
646 * caller.
647 */
648 static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
649 {
650 u8 cmd;
651
652 int index, fua, lba48, write;
653
654 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
655 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
656 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
657
658 if (dev->flags & ATA_DFLAG_PIO) {
659 tf->protocol = ATA_PROT_PIO;
660 index = dev->multi_count ? 0 : 8;
661 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
662 /* Unable to use DMA due to host limitation */
663 tf->protocol = ATA_PROT_PIO;
664 index = dev->multi_count ? 0 : 8;
665 } else {
666 tf->protocol = ATA_PROT_DMA;
667 index = 16;
668 }
669
670 cmd = ata_rw_cmds[index + fua + lba48 + write];
671 if (cmd) {
672 tf->command = cmd;
673 return 0;
674 }
675 return -1;
676 }
677
678 /**
679 * ata_tf_read_block - Read block address from ATA taskfile
680 * @tf: ATA taskfile of interest
681 * @dev: ATA device @tf belongs to
682 *
683 * LOCKING:
684 * None.
685 *
686 * Read block address from @tf. This function can handle all
687 * three address formats - LBA, LBA48 and CHS. tf->protocol and
688 * flags select the address format to use.
689 *
690 * RETURNS:
691 * Block address read from @tf.
692 */
693 u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
694 {
695 u64 block = 0;
696
697 if (tf->flags & ATA_TFLAG_LBA) {
698 if (tf->flags & ATA_TFLAG_LBA48) {
699 block |= (u64)tf->hob_lbah << 40;
700 block |= (u64)tf->hob_lbam << 32;
701 block |= (u64)tf->hob_lbal << 24;
702 } else
703 block |= (tf->device & 0xf) << 24;
704
705 block |= tf->lbah << 16;
706 block |= tf->lbam << 8;
707 block |= tf->lbal;
708 } else {
709 u32 cyl, head, sect;
710
711 cyl = tf->lbam | (tf->lbah << 8);
712 head = tf->device & 0xf;
713 sect = tf->lbal;
714
715 if (!sect) {
716 ata_dev_warn(dev,
717 "device reported invalid CHS sector 0\n");
718 sect = 1; /* oh well */
719 }
720
721 block = (cyl * dev->heads + head) * dev->sectors + sect - 1;
722 }
723
724 return block;
725 }
726
727 /**
728 * ata_build_rw_tf - Build ATA taskfile for given read/write request
729 * @tf: Target ATA taskfile
730 * @dev: ATA device @tf belongs to
731 * @block: Block address
732 * @n_block: Number of blocks
733 * @tf_flags: RW/FUA etc...
734 * @tag: tag
735 *
736 * LOCKING:
737 * None.
738 *
739 * Build ATA taskfile @tf for read/write request described by
740 * @block, @n_block, @tf_flags and @tag on @dev.
741 *
742 * RETURNS:
743 *
744 * 0 on success, -ERANGE if the request is too large for @dev,
745 * -EINVAL if the request is invalid.
746 */
747 int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
748 u64 block, u32 n_block, unsigned int tf_flags,
749 unsigned int tag)
750 {
751 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
752 tf->flags |= tf_flags;
753
754 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
755 /* yay, NCQ */
756 if (!lba_48_ok(block, n_block))
757 return -ERANGE;
758
759 tf->protocol = ATA_PROT_NCQ;
760 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
761
762 if (tf->flags & ATA_TFLAG_WRITE)
763 tf->command = ATA_CMD_FPDMA_WRITE;
764 else
765 tf->command = ATA_CMD_FPDMA_READ;
766
767 tf->nsect = tag << 3;
768 tf->hob_feature = (n_block >> 8) & 0xff;
769 tf->feature = n_block & 0xff;
770
771 tf->hob_lbah = (block >> 40) & 0xff;
772 tf->hob_lbam = (block >> 32) & 0xff;
773 tf->hob_lbal = (block >> 24) & 0xff;
774 tf->lbah = (block >> 16) & 0xff;
775 tf->lbam = (block >> 8) & 0xff;
776 tf->lbal = block & 0xff;
777
778 tf->device = ATA_LBA;
779 if (tf->flags & ATA_TFLAG_FUA)
780 tf->device |= 1 << 7;
781 } else if (dev->flags & ATA_DFLAG_LBA) {
782 tf->flags |= ATA_TFLAG_LBA;
783
784 if (lba_28_ok(block, n_block)) {
785 /* use LBA28 */
786 tf->device |= (block >> 24) & 0xf;
787 } else if (lba_48_ok(block, n_block)) {
788 if (!(dev->flags & ATA_DFLAG_LBA48))
789 return -ERANGE;
790
791 /* use LBA48 */
792 tf->flags |= ATA_TFLAG_LBA48;
793
794 tf->hob_nsect = (n_block >> 8) & 0xff;
795
796 tf->hob_lbah = (block >> 40) & 0xff;
797 tf->hob_lbam = (block >> 32) & 0xff;
798 tf->hob_lbal = (block >> 24) & 0xff;
799 } else
800 /* request too large even for LBA48 */
801 return -ERANGE;
802
803 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
804 return -EINVAL;
805
806 tf->nsect = n_block & 0xff;
807
808 tf->lbah = (block >> 16) & 0xff;
809 tf->lbam = (block >> 8) & 0xff;
810 tf->lbal = block & 0xff;
811
812 tf->device |= ATA_LBA;
813 } else {
814 /* CHS */
815 u32 sect, head, cyl, track;
816
817 /* The request -may- be too large for CHS addressing. */
818 if (!lba_28_ok(block, n_block))
819 return -ERANGE;
820
821 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
822 return -EINVAL;
823
824 /* Convert LBA to CHS */
825 track = (u32)block / dev->sectors;
826 cyl = track / dev->heads;
827 head = track % dev->heads;
828 sect = (u32)block % dev->sectors + 1;
829
830 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
831 (u32)block, track, cyl, head, sect);
832
833 /* Check whether the converted CHS can fit.
834 Cylinder: 0-65535
835 Head: 0-15
836 Sector: 1-255*/
837 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
838 return -ERANGE;
839
840 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
841 tf->lbal = sect;
842 tf->lbam = cyl;
843 tf->lbah = cyl >> 8;
844 tf->device |= head;
845 }
846
847 return 0;
848 }
849
850 /**
851 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
852 * @pio_mask: pio_mask
853 * @mwdma_mask: mwdma_mask
854 * @udma_mask: udma_mask
855 *
856 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
857 * unsigned int xfer_mask.
858 *
859 * LOCKING:
860 * None.
861 *
862 * RETURNS:
863 * Packed xfer_mask.
864 */
865 unsigned long ata_pack_xfermask(unsigned long pio_mask,
866 unsigned long mwdma_mask,
867 unsigned long udma_mask)
868 {
869 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
870 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
871 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
872 }
873
874 /**
875 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
876 * @xfer_mask: xfer_mask to unpack
877 * @pio_mask: resulting pio_mask
878 * @mwdma_mask: resulting mwdma_mask
879 * @udma_mask: resulting udma_mask
880 *
881 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
882 * Any NULL distination masks will be ignored.
883 */
884 void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
885 unsigned long *mwdma_mask, unsigned long *udma_mask)
886 {
887 if (pio_mask)
888 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
889 if (mwdma_mask)
890 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
891 if (udma_mask)
892 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
893 }
894
895 static const struct ata_xfer_ent {
896 int shift, bits;
897 u8 base;
898 } ata_xfer_tbl[] = {
899 { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
900 { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
901 { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
902 { -1, },
903 };
904
905 /**
906 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
907 * @xfer_mask: xfer_mask of interest
908 *
909 * Return matching XFER_* value for @xfer_mask. Only the highest
910 * bit of @xfer_mask is considered.
911 *
912 * LOCKING:
913 * None.
914 *
915 * RETURNS:
916 * Matching XFER_* value, 0xff if no match found.
917 */
918 u8 ata_xfer_mask2mode(unsigned long xfer_mask)
919 {
920 int highbit = fls(xfer_mask) - 1;
921 const struct ata_xfer_ent *ent;
922
923 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
924 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
925 return ent->base + highbit - ent->shift;
926 return 0xff;
927 }
928
929 /**
930 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
931 * @xfer_mode: XFER_* of interest
932 *
933 * Return matching xfer_mask for @xfer_mode.
934 *
935 * LOCKING:
936 * None.
937 *
938 * RETURNS:
939 * Matching xfer_mask, 0 if no match found.
940 */
941 unsigned long ata_xfer_mode2mask(u8 xfer_mode)
942 {
943 const struct ata_xfer_ent *ent;
944
945 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
946 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
947 return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
948 & ~((1 << ent->shift) - 1);
949 return 0;
950 }
951
952 /**
953 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
954 * @xfer_mode: XFER_* of interest
955 *
956 * Return matching xfer_shift for @xfer_mode.
957 *
958 * LOCKING:
959 * None.
960 *
961 * RETURNS:
962 * Matching xfer_shift, -1 if no match found.
963 */
964 int ata_xfer_mode2shift(unsigned long xfer_mode)
965 {
966 const struct ata_xfer_ent *ent;
967
968 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
969 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
970 return ent->shift;
971 return -1;
972 }
973
974 /**
975 * ata_mode_string - convert xfer_mask to string
976 * @xfer_mask: mask of bits supported; only highest bit counts.
977 *
978 * Determine string which represents the highest speed
979 * (highest bit in @modemask).
980 *
981 * LOCKING:
982 * None.
983 *
984 * RETURNS:
985 * Constant C string representing highest speed listed in
986 * @mode_mask, or the constant C string "<n/a>".
987 */
988 const char *ata_mode_string(unsigned long xfer_mask)
989 {
990 static const char * const xfer_mode_str[] = {
991 "PIO0",
992 "PIO1",
993 "PIO2",
994 "PIO3",
995 "PIO4",
996 "PIO5",
997 "PIO6",
998 "MWDMA0",
999 "MWDMA1",
1000 "MWDMA2",
1001 "MWDMA3",
1002 "MWDMA4",
1003 "UDMA/16",
1004 "UDMA/25",
1005 "UDMA/33",
1006 "UDMA/44",
1007 "UDMA/66",
1008 "UDMA/100",
1009 "UDMA/133",
1010 "UDMA7",
1011 };
1012 int highbit;
1013
1014 highbit = fls(xfer_mask) - 1;
1015 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
1016 return xfer_mode_str[highbit];
1017 return "<n/a>";
1018 }
1019
1020 const char *sata_spd_string(unsigned int spd)
1021 {
1022 static const char * const spd_str[] = {
1023 "1.5 Gbps",
1024 "3.0 Gbps",
1025 "6.0 Gbps",
1026 };
1027
1028 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
1029 return "<unknown>";
1030 return spd_str[spd - 1];
1031 }
1032
1033 /**
1034 * ata_dev_classify - determine device type based on ATA-spec signature
1035 * @tf: ATA taskfile register set for device to be identified
1036 *
1037 * Determine from taskfile register contents whether a device is
1038 * ATA or ATAPI, as per "Signature and persistence" section
1039 * of ATA/PI spec (volume 1, sect 5.14).
1040 *
1041 * LOCKING:
1042 * None.
1043 *
1044 * RETURNS:
1045 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or
1046 * %ATA_DEV_UNKNOWN the event of failure.
1047 */
1048 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1049 {
1050 /* Apple's open source Darwin code hints that some devices only
1051 * put a proper signature into the LBA mid/high registers,
1052 * So, we only check those. It's sufficient for uniqueness.
1053 *
1054 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
1055 * signatures for ATA and ATAPI devices attached on SerialATA,
1056 * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA
1057 * spec has never mentioned about using different signatures
1058 * for ATA/ATAPI devices. Then, Serial ATA II: Port
1059 * Multiplier specification began to use 0x69/0x96 to identify
1060 * port multpliers and 0x3c/0xc3 to identify SEMB device.
1061 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
1062 * 0x69/0x96 shortly and described them as reserved for
1063 * SerialATA.
1064 *
1065 * We follow the current spec and consider that 0x69/0x96
1066 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
1067 * Unfortunately, WDC WD1600JS-62MHB5 (a hard drive) reports
1068 * SEMB signature. This is worked around in
1069 * ata_dev_read_id().
1070 */
1071 if ((tf->lbam == 0) && (tf->lbah == 0)) {
1072 DPRINTK("found ATA device by sig\n");
1073 return ATA_DEV_ATA;
1074 }
1075
1076 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1077 DPRINTK("found ATAPI device by sig\n");
1078 return ATA_DEV_ATAPI;
1079 }
1080
1081 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
1082 DPRINTK("found PMP device by sig\n");
1083 return ATA_DEV_PMP;
1084 }
1085
1086 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
1087 DPRINTK("found SEMB device by sig (could be ATA device)\n");
1088 return ATA_DEV_SEMB;
1089 }
1090
1091 DPRINTK("unknown device\n");
1092 return ATA_DEV_UNKNOWN;
1093 }
1094
1095 /**
1096 * ata_id_string - Convert IDENTIFY DEVICE page into string
1097 * @id: IDENTIFY DEVICE results we will examine
1098 * @s: string into which data is output
1099 * @ofs: offset into identify device page
1100 * @len: length of string to return. must be an even number.
1101 *
1102 * The strings in the IDENTIFY DEVICE page are broken up into
1103 * 16-bit chunks. Run through the string, and output each
1104 * 8-bit chunk linearly, regardless of platform.
1105 *
1106 * LOCKING:
1107 * caller.
1108 */
1109
1110 void ata_id_string(const u16 *id, unsigned char *s,
1111 unsigned int ofs, unsigned int len)
1112 {
1113 unsigned int c;
1114
1115 BUG_ON(len & 1);
1116
1117 while (len > 0) {
1118 c = id[ofs] >> 8;
1119 *s = c;
1120 s++;
1121
1122 c = id[ofs] & 0xff;
1123 *s = c;
1124 s++;
1125
1126 ofs++;
1127 len -= 2;
1128 }
1129 }
1130
1131 /**
1132 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
1133 * @id: IDENTIFY DEVICE results we will examine
1134 * @s: string into which data is output
1135 * @ofs: offset into identify device page
1136 * @len: length of string to return. must be an odd number.
1137 *
1138 * This function is identical to ata_id_string except that it
1139 * trims trailing spaces and terminates the resulting string with
1140 * null. @len must be actual maximum length (even number) + 1.
1141 *
1142 * LOCKING:
1143 * caller.
1144 */
1145 void ata_id_c_string(const u16 *id, unsigned char *s,
1146 unsigned int ofs, unsigned int len)
1147 {
1148 unsigned char *p;
1149
1150 ata_id_string(id, s, ofs, len - 1);
1151
1152 p = s + strnlen(s, len - 1);
1153 while (p > s && p[-1] == ' ')
1154 p--;
1155 *p = '\0';
1156 }
1157
1158 static u64 ata_id_n_sectors(const u16 *id)
1159 {
1160 if (ata_id_has_lba(id)) {
1161 if (ata_id_has_lba48(id))
1162 return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
1163 else
1164 return ata_id_u32(id, ATA_ID_LBA_CAPACITY);
1165 } else {
1166 if (ata_id_current_chs_valid(id))
1167 return id[ATA_ID_CUR_CYLS] * id[ATA_ID_CUR_HEADS] *
1168 id[ATA_ID_CUR_SECTORS];
1169 else
1170 return id[ATA_ID_CYLS] * id[ATA_ID_HEADS] *
1171 id[ATA_ID_SECTORS];
1172 }
1173 }
1174
1175 u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
1176 {
1177 u64 sectors = 0;
1178
1179 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1180 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1181 sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24;
1182 sectors |= (tf->lbah & 0xff) << 16;
1183 sectors |= (tf->lbam & 0xff) << 8;
1184 sectors |= (tf->lbal & 0xff);
1185
1186 return sectors;
1187 }
1188
1189 u64 ata_tf_to_lba(const struct ata_taskfile *tf)
1190 {
1191 u64 sectors = 0;
1192
1193 sectors |= (tf->device & 0x0f) << 24;
1194 sectors |= (tf->lbah & 0xff) << 16;
1195 sectors |= (tf->lbam & 0xff) << 8;
1196 sectors |= (tf->lbal & 0xff);
1197
1198 return sectors;
1199 }
1200
1201 /**
1202 * ata_read_native_max_address - Read native max address
1203 * @dev: target device
1204 * @max_sectors: out parameter for the result native max address
1205 *
1206 * Perform an LBA48 or LBA28 native size query upon the device in
1207 * question.
1208 *
1209 * RETURNS:
1210 * 0 on success, -EACCES if command is aborted by the drive.
1211 * -EIO on other errors.
1212 */
1213 static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1214 {
1215 unsigned int err_mask;
1216 struct ata_taskfile tf;
1217 int lba48 = ata_id_has_lba48(dev->id);
1218
1219 ata_tf_init(dev, &tf);
1220
1221 /* always clear all address registers */
1222 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1223
1224 if (lba48) {
1225 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1226 tf.flags |= ATA_TFLAG_LBA48;
1227 } else
1228 tf.command = ATA_CMD_READ_NATIVE_MAX;
1229
1230 tf.protocol |= ATA_PROT_NODATA;
1231 tf.device |= ATA_LBA;
1232
1233 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1234 if (err_mask) {
1235 ata_dev_warn(dev,
1236 "failed to read native max address (err_mask=0x%x)\n",
1237 err_mask);
1238 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1239 return -EACCES;
1240 return -EIO;
1241 }
1242
1243 if (lba48)
1244 *max_sectors = ata_tf_to_lba48(&tf) + 1;
1245 else
1246 *max_sectors = ata_tf_to_lba(&tf) + 1;
1247 if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
1248 (*max_sectors)--;
1249 return 0;
1250 }
1251
1252 /**
1253 * ata_set_max_sectors - Set max sectors
1254 * @dev: target device
1255 * @new_sectors: new max sectors value to set for the device
1256 *
1257 * Set max sectors of @dev to @new_sectors.
1258 *
1259 * RETURNS:
1260 * 0 on success, -EACCES if command is aborted or denied (due to
1261 * previous non-volatile SET_MAX) by the drive. -EIO on other
1262 * errors.
1263 */
1264 static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1265 {
1266 unsigned int err_mask;
1267 struct ata_taskfile tf;
1268 int lba48 = ata_id_has_lba48(dev->id);
1269
1270 new_sectors--;
1271
1272 ata_tf_init(dev, &tf);
1273
1274 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1275
1276 if (lba48) {
1277 tf.command = ATA_CMD_SET_MAX_EXT;
1278 tf.flags |= ATA_TFLAG_LBA48;
1279
1280 tf.hob_lbal = (new_sectors >> 24) & 0xff;
1281 tf.hob_lbam = (new_sectors >> 32) & 0xff;
1282 tf.hob_lbah = (new_sectors >> 40) & 0xff;
1283 } else {
1284 tf.command = ATA_CMD_SET_MAX;
1285
1286 tf.device |= (new_sectors >> 24) & 0xf;
1287 }
1288
1289 tf.protocol |= ATA_PROT_NODATA;
1290 tf.device |= ATA_LBA;
1291
1292 tf.lbal = (new_sectors >> 0) & 0xff;
1293 tf.lbam = (new_sectors >> 8) & 0xff;
1294 tf.lbah = (new_sectors >> 16) & 0xff;
1295
1296 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1297 if (err_mask) {
1298 ata_dev_warn(dev,
1299 "failed to set max address (err_mask=0x%x)\n",
1300 err_mask);
1301 if (err_mask == AC_ERR_DEV &&
1302 (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1303 return -EACCES;
1304 return -EIO;
1305 }
1306
1307 return 0;
1308 }
1309
1310 /**
1311 * ata_hpa_resize - Resize a device with an HPA set
1312 * @dev: Device to resize
1313 *
1314 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
1315 * it if required to the full size of the media. The caller must check
1316 * the drive has the HPA feature set enabled.
1317 *
1318 * RETURNS:
1319 * 0 on success, -errno on failure.
1320 */
1321 static int ata_hpa_resize(struct ata_device *dev)
1322 {
1323 struct ata_eh_context *ehc = &dev->link->eh_context;
1324 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1325 bool unlock_hpa = ata_ignore_hpa || dev->flags & ATA_DFLAG_UNLOCK_HPA;
1326 u64 sectors = ata_id_n_sectors(dev->id);
1327 u64 native_sectors;
1328 int rc;
1329
1330 /* do we need to do it? */
1331 if (dev->class != ATA_DEV_ATA ||
1332 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1333 (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
1334 return 0;
1335
1336 /* read native max address */
1337 rc = ata_read_native_max_address(dev, &native_sectors);
1338 if (rc) {
1339 /* If device aborted the command or HPA isn't going to
1340 * be unlocked, skip HPA resizing.
1341 */
1342 if (rc == -EACCES || !unlock_hpa) {
1343 ata_dev_warn(dev,
1344 "HPA support seems broken, skipping HPA handling\n");
1345 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1346
1347 /* we can continue if device aborted the command */
1348 if (rc == -EACCES)
1349 rc = 0;
1350 }
1351
1352 return rc;
1353 }
1354 dev->n_native_sectors = native_sectors;
1355
1356 /* nothing to do? */
1357 if (native_sectors <= sectors || !unlock_hpa) {
1358 if (!print_info || native_sectors == sectors)
1359 return 0;
1360
1361 if (native_sectors > sectors)
1362 ata_dev_info(dev,
1363 "HPA detected: current %llu, native %llu\n",
1364 (unsigned long long)sectors,
1365 (unsigned long long)native_sectors);
1366 else if (native_sectors < sectors)
1367 ata_dev_warn(dev,
1368 "native sectors (%llu) is smaller than sectors (%llu)\n",
1369 (unsigned long long)native_sectors,
1370 (unsigned long long)sectors);
1371 return 0;
1372 }
1373
1374 /* let's unlock HPA */
1375 rc = ata_set_max_sectors(dev, native_sectors);
1376 if (rc == -EACCES) {
1377 /* if device aborted the command, skip HPA resizing */
1378 ata_dev_warn(dev,
1379 "device aborted resize (%llu -> %llu), skipping HPA handling\n",
1380 (unsigned long long)sectors,
1381 (unsigned long long)native_sectors);
1382 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1383 return 0;
1384 } else if (rc)
1385 return rc;
1386
1387 /* re-read IDENTIFY data */
1388 rc = ata_dev_reread_id(dev, 0);
1389 if (rc) {
1390 ata_dev_err(dev,
1391 "failed to re-read IDENTIFY data after HPA resizing\n");
1392 return rc;
1393 }
1394
1395 if (print_info) {
1396 u64 new_sectors = ata_id_n_sectors(dev->id);
1397 ata_dev_info(dev,
1398 "HPA unlocked: %llu -> %llu, native %llu\n",
1399 (unsigned long long)sectors,
1400 (unsigned long long)new_sectors,
1401 (unsigned long long)native_sectors);
1402 }
1403
1404 return 0;
1405 }
1406
1407 /**
1408 * ata_dump_id - IDENTIFY DEVICE info debugging output
1409 * @id: IDENTIFY DEVICE page to dump
1410 *
1411 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1412 * page.
1413 *
1414 * LOCKING:
1415 * caller.
1416 */
1417
1418 static inline void ata_dump_id(const u16 *id)
1419 {
1420 DPRINTK("49==0x%04x "
1421 "53==0x%04x "
1422 "63==0x%04x "
1423 "64==0x%04x "
1424 "75==0x%04x \n",
1425 id[49],
1426 id[53],
1427 id[63],
1428 id[64],
1429 id[75]);
1430 DPRINTK("80==0x%04x "
1431 "81==0x%04x "
1432 "82==0x%04x "
1433 "83==0x%04x "
1434 "84==0x%04x \n",
1435 id[80],
1436 id[81],
1437 id[82],
1438 id[83],
1439 id[84]);
1440 DPRINTK("88==0x%04x "
1441 "93==0x%04x\n",
1442 id[88],
1443 id[93]);
1444 }
1445
1446 /**
1447 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1448 * @id: IDENTIFY data to compute xfer mask from
1449 *
1450 * Compute the xfermask for this device. This is not as trivial
1451 * as it seems if we must consider early devices correctly.
1452 *
1453 * FIXME: pre IDE drive timing (do we care ?).
1454 *
1455 * LOCKING:
1456 * None.
1457 *
1458 * RETURNS:
1459 * Computed xfermask
1460 */
1461 unsigned long ata_id_xfermask(const u16 *id)
1462 {
1463 unsigned long pio_mask, mwdma_mask, udma_mask;
1464
1465 /* Usual case. Word 53 indicates word 64 is valid */
1466 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1467 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1468 pio_mask <<= 3;
1469 pio_mask |= 0x7;
1470 } else {
1471 /* If word 64 isn't valid then Word 51 high byte holds
1472 * the PIO timing number for the maximum. Turn it into
1473 * a mask.
1474 */
1475 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
1476 if (mode < 5) /* Valid PIO range */
1477 pio_mask = (2 << mode) - 1;
1478 else
1479 pio_mask = 1;
1480
1481 /* But wait.. there's more. Design your standards by
1482 * committee and you too can get a free iordy field to
1483 * process. However its the speeds not the modes that
1484 * are supported... Note drivers using the timing API
1485 * will get this right anyway
1486 */
1487 }
1488
1489 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1490
1491 if (ata_id_is_cfa(id)) {
1492 /*
1493 * Process compact flash extended modes
1494 */
1495 int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7;
1496 int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7;
1497
1498 if (pio)
1499 pio_mask |= (1 << 5);
1500 if (pio > 1)
1501 pio_mask |= (1 << 6);
1502 if (dma)
1503 mwdma_mask |= (1 << 3);
1504 if (dma > 1)
1505 mwdma_mask |= (1 << 4);
1506 }
1507
1508 udma_mask = 0;
1509 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1510 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1511
1512 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1513 }
1514
1515 static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1516 {
1517 struct completion *waiting = qc->private_data;
1518
1519 complete(waiting);
1520 }
1521
1522 /**
1523 * ata_exec_internal_sg - execute libata internal command
1524 * @dev: Device to which the command is sent
1525 * @tf: Taskfile registers for the command and the result
1526 * @cdb: CDB for packet command
1527 * @dma_dir: Data tranfer direction of the command
1528 * @sgl: sg list for the data buffer of the command
1529 * @n_elem: Number of sg entries
1530 * @timeout: Timeout in msecs (0 for default)
1531 *
1532 * Executes libata internal command with timeout. @tf contains
1533 * command on entry and result on return. Timeout and error
1534 * conditions are reported via return value. No recovery action
1535 * is taken after a command times out. It's caller's duty to
1536 * clean up after timeout.
1537 *
1538 * LOCKING:
1539 * None. Should be called with kernel context, might sleep.
1540 *
1541 * RETURNS:
1542 * Zero on success, AC_ERR_* mask on failure
1543 */
1544 unsigned ata_exec_internal_sg(struct ata_device *dev,
1545 struct ata_taskfile *tf, const u8 *cdb,
1546 int dma_dir, struct scatterlist *sgl,
1547 unsigned int n_elem, unsigned long timeout)
1548 {
1549 struct ata_link *link = dev->link;
1550 struct ata_port *ap = link->ap;
1551 u8 command = tf->command;
1552 int auto_timeout = 0;
1553 struct ata_queued_cmd *qc;
1554 unsigned int tag, preempted_tag;
1555 u32 preempted_sactive, preempted_qc_active;
1556 int preempted_nr_active_links;
1557 DECLARE_COMPLETION_ONSTACK(wait);
1558 unsigned long flags;
1559 unsigned int err_mask;
1560 int rc;
1561
1562 spin_lock_irqsave(ap->lock, flags);
1563
1564 /* no internal command while frozen */
1565 if (ap->pflags & ATA_PFLAG_FROZEN) {
1566 spin_unlock_irqrestore(ap->lock, flags);
1567 return AC_ERR_SYSTEM;
1568 }
1569
1570 /* initialize internal qc */
1571
1572 /* XXX: Tag 0 is used for drivers with legacy EH as some
1573 * drivers choke if any other tag is given. This breaks
1574 * ata_tag_internal() test for those drivers. Don't use new
1575 * EH stuff without converting to it.
1576 */
1577 if (ap->ops->error_handler)
1578 tag = ATA_TAG_INTERNAL;
1579 else
1580 tag = 0;
1581
1582 if (test_and_set_bit(tag, &ap->qc_allocated))
1583 BUG();
1584 qc = __ata_qc_from_tag(ap, tag);
1585
1586 qc->tag = tag;
1587 qc->scsicmd = NULL;
1588 qc->ap = ap;
1589 qc->dev = dev;
1590 ata_qc_reinit(qc);
1591
1592 preempted_tag = link->active_tag;
1593 preempted_sactive = link->sactive;
1594 preempted_qc_active = ap->qc_active;
1595 preempted_nr_active_links = ap->nr_active_links;
1596 link->active_tag = ATA_TAG_POISON;
1597 link->sactive = 0;
1598 ap->qc_active = 0;
1599 ap->nr_active_links = 0;
1600
1601 /* prepare & issue qc */
1602 qc->tf = *tf;
1603 if (cdb)
1604 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1605 qc->flags |= ATA_QCFLAG_RESULT_TF;
1606 qc->dma_dir = dma_dir;
1607 if (dma_dir != DMA_NONE) {
1608 unsigned int i, buflen = 0;
1609 struct scatterlist *sg;
1610
1611 for_each_sg(sgl, sg, n_elem, i)
1612 buflen += sg->length;
1613
1614 ata_sg_init(qc, sgl, n_elem);
1615 qc->nbytes = buflen;
1616 }
1617
1618 qc->private_data = &wait;
1619 qc->complete_fn = ata_qc_complete_internal;
1620
1621 ata_qc_issue(qc);
1622
1623 spin_unlock_irqrestore(ap->lock, flags);
1624
1625 if (!timeout) {
1626 if (ata_probe_timeout)
1627 timeout = ata_probe_timeout * 1000;
1628 else {
1629 timeout = ata_internal_cmd_timeout(dev, command);
1630 auto_timeout = 1;
1631 }
1632 }
1633
1634 if (ap->ops->error_handler)
1635 ata_eh_release(ap);
1636
1637 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
1638
1639 if (ap->ops->error_handler)
1640 ata_eh_acquire(ap);
1641
1642 ata_sff_flush_pio_task(ap);
1643
1644 if (!rc) {
1645 spin_lock_irqsave(ap->lock, flags);
1646
1647 /* We're racing with irq here. If we lose, the
1648 * following test prevents us from completing the qc
1649 * twice. If we win, the port is frozen and will be
1650 * cleaned up by ->post_internal_cmd().
1651 */
1652 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1653 qc->err_mask |= AC_ERR_TIMEOUT;
1654
1655 if (ap->ops->error_handler)
1656 ata_port_freeze(ap);
1657 else
1658 ata_qc_complete(qc);
1659
1660 if (ata_msg_warn(ap))
1661 ata_dev_warn(dev, "qc timeout (cmd 0x%x)\n",
1662 command);
1663 }
1664
1665 spin_unlock_irqrestore(ap->lock, flags);
1666 }
1667
1668 /* do post_internal_cmd */
1669 if (ap->ops->post_internal_cmd)
1670 ap->ops->post_internal_cmd(qc);
1671
1672 /* perform minimal error analysis */
1673 if (qc->flags & ATA_QCFLAG_FAILED) {
1674 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1675 qc->err_mask |= AC_ERR_DEV;
1676
1677 if (!qc->err_mask)
1678 qc->err_mask |= AC_ERR_OTHER;
1679
1680 if (qc->err_mask & ~AC_ERR_OTHER)
1681 qc->err_mask &= ~AC_ERR_OTHER;
1682 }
1683
1684 /* finish up */
1685 spin_lock_irqsave(ap->lock, flags);
1686
1687 *tf = qc->result_tf;
1688 err_mask = qc->err_mask;
1689
1690 ata_qc_free(qc);
1691 link->active_tag = preempted_tag;
1692 link->sactive = preempted_sactive;
1693 ap->qc_active = preempted_qc_active;
1694 ap->nr_active_links = preempted_nr_active_links;
1695
1696 spin_unlock_irqrestore(ap->lock, flags);
1697
1698 if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout)
1699 ata_internal_cmd_timed_out(dev, command);
1700
1701 return err_mask;
1702 }
1703
1704 /**
1705 * ata_exec_internal - execute libata internal command
1706 * @dev: Device to which the command is sent
1707 * @tf: Taskfile registers for the command and the result
1708 * @cdb: CDB for packet command
1709 * @dma_dir: Data tranfer direction of the command
1710 * @buf: Data buffer of the command
1711 * @buflen: Length of data buffer
1712 * @timeout: Timeout in msecs (0 for default)
1713 *
1714 * Wrapper around ata_exec_internal_sg() which takes simple
1715 * buffer instead of sg list.
1716 *
1717 * LOCKING:
1718 * None. Should be called with kernel context, might sleep.
1719 *
1720 * RETURNS:
1721 * Zero on success, AC_ERR_* mask on failure
1722 */
1723 unsigned ata_exec_internal(struct ata_device *dev,
1724 struct ata_taskfile *tf, const u8 *cdb,
1725 int dma_dir, void *buf, unsigned int buflen,
1726 unsigned long timeout)
1727 {
1728 struct scatterlist *psg = NULL, sg;
1729 unsigned int n_elem = 0;
1730
1731 if (dma_dir != DMA_NONE) {
1732 WARN_ON(!buf);
1733 sg_init_one(&sg, buf, buflen);
1734 psg = &sg;
1735 n_elem++;
1736 }
1737
1738 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1739 timeout);
1740 }
1741
1742 /**
1743 * ata_do_simple_cmd - execute simple internal command
1744 * @dev: Device to which the command is sent
1745 * @cmd: Opcode to execute
1746 *
1747 * Execute a 'simple' command, that only consists of the opcode
1748 * 'cmd' itself, without filling any other registers
1749 *
1750 * LOCKING:
1751 * Kernel thread context (may sleep).
1752 *
1753 * RETURNS:
1754 * Zero on success, AC_ERR_* mask on failure
1755 */
1756 unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1757 {
1758 struct ata_taskfile tf;
1759
1760 ata_tf_init(dev, &tf);
1761
1762 tf.command = cmd;
1763 tf.flags |= ATA_TFLAG_DEVICE;
1764 tf.protocol = ATA_PROT_NODATA;
1765
1766 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1767 }
1768
1769 /**
1770 * ata_pio_need_iordy - check if iordy needed
1771 * @adev: ATA device
1772 *
1773 * Check if the current speed of the device requires IORDY. Used
1774 * by various controllers for chip configuration.
1775 */
1776 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1777 {
1778 /* Don't set IORDY if we're preparing for reset. IORDY may
1779 * lead to controller lock up on certain controllers if the
1780 * port is not occupied. See bko#11703 for details.
1781 */
1782 if (adev->link->ap->pflags & ATA_PFLAG_RESETTING)
1783 return 0;
1784 /* Controller doesn't support IORDY. Probably a pointless
1785 * check as the caller should know this.
1786 */
1787 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1788 return 0;
1789 /* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6. */
1790 if (ata_id_is_cfa(adev->id)
1791 && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6))
1792 return 0;
1793 /* PIO3 and higher it is mandatory */
1794 if (adev->pio_mode > XFER_PIO_2)
1795 return 1;
1796 /* We turn it on when possible */
1797 if (ata_id_has_iordy(adev->id))
1798 return 1;
1799 return 0;
1800 }
1801
1802 /**
1803 * ata_pio_mask_no_iordy - Return the non IORDY mask
1804 * @adev: ATA device
1805 *
1806 * Compute the highest mode possible if we are not using iordy. Return
1807 * -1 if no iordy mode is available.
1808 */
1809 static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1810 {
1811 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1812 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1813 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1814 /* Is the speed faster than the drive allows non IORDY ? */
1815 if (pio) {
1816 /* This is cycle times not frequency - watch the logic! */
1817 if (pio > 240) /* PIO2 is 240nS per cycle */
1818 return 3 << ATA_SHIFT_PIO;
1819 return 7 << ATA_SHIFT_PIO;
1820 }
1821 }
1822 return 3 << ATA_SHIFT_PIO;
1823 }
1824
1825 /**
1826 * ata_do_dev_read_id - default ID read method
1827 * @dev: device
1828 * @tf: proposed taskfile
1829 * @id: data buffer
1830 *
1831 * Issue the identify taskfile and hand back the buffer containing
1832 * identify data. For some RAID controllers and for pre ATA devices
1833 * this function is wrapped or replaced by the driver
1834 */
1835 unsigned int ata_do_dev_read_id(struct ata_device *dev,
1836 struct ata_taskfile *tf, u16 *id)
1837 {
1838 return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE,
1839 id, sizeof(id[0]) * ATA_ID_WORDS, 0);
1840 }
1841
1842 /**
1843 * ata_dev_read_id - Read ID data from the specified device
1844 * @dev: target device
1845 * @p_class: pointer to class of the target device (may be changed)
1846 * @flags: ATA_READID_* flags
1847 * @id: buffer to read IDENTIFY data into
1848 *
1849 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1850 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1851 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1852 * for pre-ATA4 drives.
1853 *
1854 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right
1855 * now we abort if we hit that case.
1856 *
1857 * LOCKING:
1858 * Kernel thread context (may sleep)
1859 *
1860 * RETURNS:
1861 * 0 on success, -errno otherwise.
1862 */
1863 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1864 unsigned int flags, u16 *id)
1865 {
1866 struct ata_port *ap = dev->link->ap;
1867 unsigned int class = *p_class;
1868 struct ata_taskfile tf;
1869 unsigned int err_mask = 0;
1870 const char *reason;
1871 bool is_semb = class == ATA_DEV_SEMB;
1872 int may_fallback = 1, tried_spinup = 0;
1873 int rc;
1874
1875 if (ata_msg_ctl(ap))
1876 ata_dev_dbg(dev, "%s: ENTER\n", __func__);
1877
1878 retry:
1879 ata_tf_init(dev, &tf);
1880
1881 switch (class) {
1882 case ATA_DEV_SEMB:
1883 class = ATA_DEV_ATA; /* some hard drives report SEMB sig */
1884 case ATA_DEV_ATA:
1885 tf.command = ATA_CMD_ID_ATA;
1886 break;
1887 case ATA_DEV_ATAPI:
1888 tf.command = ATA_CMD_ID_ATAPI;
1889 break;
1890 default:
1891 rc = -ENODEV;
1892 reason = "unsupported class";
1893 goto err_out;
1894 }
1895
1896 tf.protocol = ATA_PROT_PIO;
1897
1898 /* Some devices choke if TF registers contain garbage. Make
1899 * sure those are properly initialized.
1900 */
1901 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1902
1903 /* Device presence detection is unreliable on some
1904 * controllers. Always poll IDENTIFY if available.
1905 */
1906 tf.flags |= ATA_TFLAG_POLLING;
1907
1908 if (ap->ops->read_id)
1909 err_mask = ap->ops->read_id(dev, &tf, id);
1910 else
1911 err_mask = ata_do_dev_read_id(dev, &tf, id);
1912
1913 if (err_mask) {
1914 if (err_mask & AC_ERR_NODEV_HINT) {
1915 ata_dev_dbg(dev, "NODEV after polling detection\n");
1916 return -ENOENT;
1917 }
1918
1919 if (is_semb) {
1920 ata_dev_info(dev,
1921 "IDENTIFY failed on device w/ SEMB sig, disabled\n");
1922 /* SEMB is not supported yet */
1923 *p_class = ATA_DEV_SEMB_UNSUP;
1924 return 0;
1925 }
1926
1927 if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1928 /* Device or controller might have reported
1929 * the wrong device class. Give a shot at the
1930 * other IDENTIFY if the current one is
1931 * aborted by the device.
1932 */
1933 if (may_fallback) {
1934 may_fallback = 0;
1935
1936 if (class == ATA_DEV_ATA)
1937 class = ATA_DEV_ATAPI;
1938 else
1939 class = ATA_DEV_ATA;
1940 goto retry;
1941 }
1942
1943 /* Control reaches here iff the device aborted
1944 * both flavors of IDENTIFYs which happens
1945 * sometimes with phantom devices.
1946 */
1947 ata_dev_dbg(dev,
1948 "both IDENTIFYs aborted, assuming NODEV\n");
1949 return -ENOENT;
1950 }
1951
1952 rc = -EIO;
1953 reason = "I/O error";
1954 goto err_out;
1955 }
1956
1957 if (dev->horkage & ATA_HORKAGE_DUMP_ID) {
1958 ata_dev_dbg(dev, "dumping IDENTIFY data, "
1959 "class=%d may_fallback=%d tried_spinup=%d\n",
1960 class, may_fallback, tried_spinup);
1961 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET,
1962 16, 2, id, ATA_ID_WORDS * sizeof(*id), true);
1963 }
1964
1965 /* Falling back doesn't make sense if ID data was read
1966 * successfully at least once.
1967 */
1968 may_fallback = 0;
1969
1970 swap_buf_le16(id, ATA_ID_WORDS);
1971
1972 /* sanity check */
1973 rc = -EINVAL;
1974 reason = "device reports invalid type";
1975
1976 if (class == ATA_DEV_ATA) {
1977 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1978 goto err_out;
1979 if (ap->host->flags & ATA_HOST_IGNORE_ATA &&
1980 ata_id_is_ata(id)) {
1981 ata_dev_dbg(dev,
1982 "host indicates ignore ATA devices, ignored\n");
1983 return -ENOENT;
1984 }
1985 } else {
1986 if (ata_id_is_ata(id))
1987 goto err_out;
1988 }
1989
1990 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1991 tried_spinup = 1;
1992 /*
1993 * Drive powered-up in standby mode, and requires a specific
1994 * SET_FEATURES spin-up subcommand before it will accept
1995 * anything other than the original IDENTIFY command.
1996 */
1997 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
1998 if (err_mask && id[2] != 0x738c) {
1999 rc = -EIO;
2000 reason = "SPINUP failed";
2001 goto err_out;
2002 }
2003 /*
2004 * If the drive initially returned incomplete IDENTIFY info,
2005 * we now must reissue the IDENTIFY command.
2006 */
2007 if (id[2] == 0x37c8)
2008 goto retry;
2009 }
2010
2011 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
2012 /*
2013 * The exact sequence expected by certain pre-ATA4 drives is:
2014 * SRST RESET
2015 * IDENTIFY (optional in early ATA)
2016 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
2017 * anything else..
2018 * Some drives were very specific about that exact sequence.
2019 *
2020 * Note that ATA4 says lba is mandatory so the second check
2021 * should never trigger.
2022 */
2023 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
2024 err_mask = ata_dev_init_params(dev, id[3], id[6]);
2025 if (err_mask) {
2026 rc = -EIO;
2027 reason = "INIT_DEV_PARAMS failed";
2028 goto err_out;
2029 }
2030
2031 /* current CHS translation info (id[53-58]) might be
2032 * changed. reread the identify device info.
2033 */
2034 flags &= ~ATA_READID_POSTRESET;
2035 goto retry;
2036 }
2037 }
2038
2039 *p_class = class;
2040
2041 return 0;
2042
2043 err_out:
2044 if (ata_msg_warn(ap))
2045 ata_dev_warn(dev, "failed to IDENTIFY (%s, err_mask=0x%x)\n",
2046 reason, err_mask);
2047 return rc;
2048 }
2049
2050 static int ata_do_link_spd_horkage(struct ata_device *dev)
2051 {
2052 struct ata_link *plink = ata_dev_phys_link(dev);
2053 u32 target, target_limit;
2054
2055 if (!sata_scr_valid(plink))
2056 return 0;
2057
2058 if (dev->horkage & ATA_HORKAGE_1_5_GBPS)
2059 target = 1;
2060 else
2061 return 0;
2062
2063 target_limit = (1 << target) - 1;
2064
2065 /* if already on stricter limit, no need to push further */
2066 if (plink->sata_spd_limit <= target_limit)
2067 return 0;
2068
2069 plink->sata_spd_limit = target_limit;
2070
2071 /* Request another EH round by returning -EAGAIN if link is
2072 * going faster than the target speed. Forward progress is
2073 * guaranteed by setting sata_spd_limit to target_limit above.
2074 */
2075 if (plink->sata_spd > target) {
2076 ata_dev_info(dev, "applying link speed limit horkage to %s\n",
2077 sata_spd_string(target));
2078 return -EAGAIN;
2079 }
2080 return 0;
2081 }
2082
2083 static inline u8 ata_dev_knobble(struct ata_device *dev)
2084 {
2085 struct ata_port *ap = dev->link->ap;
2086
2087 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK)
2088 return 0;
2089
2090 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
2091 }
2092
2093 static int ata_dev_config_ncq(struct ata_device *dev,
2094 char *desc, size_t desc_sz)
2095 {
2096 struct ata_port *ap = dev->link->ap;
2097 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2098 unsigned int err_mask;
2099 char *aa_desc = "";
2100
2101 if (!ata_id_has_ncq(dev->id)) {
2102 desc[0] = '\0';
2103 return 0;
2104 }
2105 if (dev->horkage & ATA_HORKAGE_NONCQ) {
2106 snprintf(desc, desc_sz, "NCQ (not used)");
2107 return 0;
2108 }
2109 if (ap->flags & ATA_FLAG_NCQ) {
2110 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
2111 dev->flags |= ATA_DFLAG_NCQ;
2112 }
2113
2114 if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) &&
2115 (ap->flags & ATA_FLAG_FPDMA_AA) &&
2116 ata_id_has_fpdma_aa(dev->id)) {
2117 err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE,
2118 SATA_FPDMA_AA);
2119 if (err_mask) {
2120 ata_dev_err(dev,
2121 "failed to enable AA (error_mask=0x%x)\n",
2122 err_mask);
2123 if (err_mask != AC_ERR_DEV) {
2124 dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA;
2125 return -EIO;
2126 }
2127 } else
2128 aa_desc = ", AA";
2129 }
2130
2131 if (hdepth >= ddepth)
2132 snprintf(desc, desc_sz, "NCQ (depth %d)%s", ddepth, aa_desc);
2133 else
2134 snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth,
2135 ddepth, aa_desc);
2136 return 0;
2137 }
2138
2139 /**
2140 * ata_dev_configure - Configure the specified ATA/ATAPI device
2141 * @dev: Target device to configure
2142 *
2143 * Configure @dev according to @dev->id. Generic and low-level
2144 * driver specific fixups are also applied.
2145 *
2146 * LOCKING:
2147 * Kernel thread context (may sleep)
2148 *
2149 * RETURNS:
2150 * 0 on success, -errno otherwise
2151 */
2152 int ata_dev_configure(struct ata_device *dev)
2153 {
2154 struct ata_port *ap = dev->link->ap;
2155 struct ata_eh_context *ehc = &dev->link->eh_context;
2156 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
2157 const u16 *id = dev->id;
2158 unsigned long xfer_mask;
2159 unsigned int err_mask;
2160 char revbuf[7]; /* XYZ-99\0 */
2161 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2162 char modelbuf[ATA_ID_PROD_LEN+1];
2163 int rc;
2164
2165 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
2166 ata_dev_info(dev, "%s: ENTER/EXIT -- nodev\n", __func__);
2167 return 0;
2168 }
2169
2170 if (ata_msg_probe(ap))
2171 ata_dev_dbg(dev, "%s: ENTER\n", __func__);
2172
2173 /* set horkage */
2174 dev->horkage |= ata_dev_blacklisted(dev);
2175 ata_force_horkage(dev);
2176
2177 if (dev->horkage & ATA_HORKAGE_DISABLE) {
2178 ata_dev_info(dev, "unsupported device, disabling\n");
2179 ata_dev_disable(dev);
2180 return 0;
2181 }
2182
2183 if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) &&
2184 dev->class == ATA_DEV_ATAPI) {
2185 ata_dev_warn(dev, "WARNING: ATAPI is %s, device ignored\n",
2186 atapi_enabled ? "not supported with this driver"
2187 : "disabled");
2188 ata_dev_disable(dev);
2189 return 0;
2190 }
2191
2192 rc = ata_do_link_spd_horkage(dev);
2193 if (rc)
2194 return rc;
2195
2196 /* let ACPI work its magic */
2197 rc = ata_acpi_on_devcfg(dev);
2198 if (rc)
2199 return rc;
2200
2201 /* massage HPA, do it early as it might change IDENTIFY data */
2202 rc = ata_hpa_resize(dev);
2203 if (rc)
2204 return rc;
2205
2206 /* print device capabilities */
2207 if (ata_msg_probe(ap))
2208 ata_dev_dbg(dev,
2209 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2210 "85:%04x 86:%04x 87:%04x 88:%04x\n",
2211 __func__,
2212 id[49], id[82], id[83], id[84],
2213 id[85], id[86], id[87], id[88]);
2214
2215 /* initialize to-be-configured parameters */
2216 dev->flags &= ~ATA_DFLAG_CFG_MASK;
2217 dev->max_sectors = 0;
2218 dev->cdb_len = 0;
2219 dev->n_sectors = 0;
2220 dev->cylinders = 0;
2221 dev->heads = 0;
2222 dev->sectors = 0;
2223 dev->multi_count = 0;
2224
2225 /*
2226 * common ATA, ATAPI feature tests
2227 */
2228
2229 /* find max transfer mode; for printk only */
2230 xfer_mask = ata_id_xfermask(id);
2231
2232 if (ata_msg_probe(ap))
2233 ata_dump_id(id);
2234
2235 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2236 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2237 sizeof(fwrevbuf));
2238
2239 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2240 sizeof(modelbuf));
2241
2242 /* ATA-specific feature tests */
2243 if (dev->class == ATA_DEV_ATA) {
2244 if (ata_id_is_cfa(id)) {
2245 /* CPRM may make this media unusable */
2246 if (id[ATA_ID_CFA_KEY_MGMT] & 1)
2247 ata_dev_warn(dev,
2248 "supports DRM functions and may not be fully accessible\n");
2249 snprintf(revbuf, 7, "CFA");
2250 } else {
2251 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
2252 /* Warn the user if the device has TPM extensions */
2253 if (ata_id_has_tpm(id))
2254 ata_dev_warn(dev,
2255 "supports DRM functions and may not be fully accessible\n");
2256 }
2257
2258 dev->n_sectors = ata_id_n_sectors(id);
2259
2260 /* get current R/W Multiple count setting */
2261 if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) {
2262 unsigned int max = dev->id[47] & 0xff;
2263 unsigned int cnt = dev->id[59] & 0xff;
2264 /* only recognize/allow powers of two here */
2265 if (is_power_of_2(max) && is_power_of_2(cnt))
2266 if (cnt <= max)
2267 dev->multi_count = cnt;
2268 }
2269
2270 if (ata_id_has_lba(id)) {
2271 const char *lba_desc;
2272 char ncq_desc[24];
2273
2274 lba_desc = "LBA";
2275 dev->flags |= ATA_DFLAG_LBA;
2276 if (ata_id_has_lba48(id)) {
2277 dev->flags |= ATA_DFLAG_LBA48;
2278 lba_desc = "LBA48";
2279
2280 if (dev->n_sectors >= (1UL << 28) &&
2281 ata_id_has_flush_ext(id))
2282 dev->flags |= ATA_DFLAG_FLUSH_EXT;
2283 }
2284
2285 /* config NCQ */
2286 rc = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2287 if (rc)
2288 return rc;
2289
2290 /* print device info to dmesg */
2291 if (ata_msg_drv(ap) && print_info) {
2292 ata_dev_info(dev, "%s: %s, %s, max %s\n",
2293 revbuf, modelbuf, fwrevbuf,
2294 ata_mode_string(xfer_mask));
2295 ata_dev_info(dev,
2296 "%llu sectors, multi %u: %s %s\n",
2297 (unsigned long long)dev->n_sectors,
2298 dev->multi_count, lba_desc, ncq_desc);
2299 }
2300 } else {
2301 /* CHS */
2302
2303 /* Default translation */
2304 dev->cylinders = id[1];
2305 dev->heads = id[3];
2306 dev->sectors = id[6];
2307
2308 if (ata_id_current_chs_valid(id)) {
2309 /* Current CHS translation is valid. */
2310 dev->cylinders = id[54];
2311 dev->heads = id[55];
2312 dev->sectors = id[56];
2313 }
2314
2315 /* print device info to dmesg */
2316 if (ata_msg_drv(ap) && print_info) {
2317 ata_dev_info(dev, "%s: %s, %s, max %s\n",
2318 revbuf, modelbuf, fwrevbuf,
2319 ata_mode_string(xfer_mask));
2320 ata_dev_info(dev,
2321 "%llu sectors, multi %u, CHS %u/%u/%u\n",
2322 (unsigned long long)dev->n_sectors,
2323 dev->multi_count, dev->cylinders,
2324 dev->heads, dev->sectors);
2325 }
2326 }
2327
2328 /* Check and mark DevSlp capability. Get DevSlp timing variables
2329 * from SATA Settings page of Identify Device Data Log.
2330 */
2331 if (ata_id_has_devslp(dev->id)) {
2332 u8 sata_setting[ATA_SECT_SIZE];
2333 int i, j;
2334
2335 dev->flags |= ATA_DFLAG_DEVSLP;
2336 err_mask = ata_read_log_page(dev,
2337 ATA_LOG_SATA_ID_DEV_DATA,
2338 ATA_LOG_SATA_SETTINGS,
2339 sata_setting,
2340 1);
2341 if (err_mask)
2342 ata_dev_dbg(dev,
2343 "failed to get Identify Device Data, Emask 0x%x\n",
2344 err_mask);
2345 else
2346 for (i = 0; i < ATA_LOG_DEVSLP_SIZE; i++) {
2347 j = ATA_LOG_DEVSLP_OFFSET + i;
2348 dev->devslp_timing[i] = sata_setting[j];
2349 }
2350 }
2351
2352 dev->cdb_len = 16;
2353 }
2354
2355 /* ATAPI-specific feature tests */
2356 else if (dev->class == ATA_DEV_ATAPI) {
2357 const char *cdb_intr_string = "";
2358 const char *atapi_an_string = "";
2359 const char *dma_dir_string = "";
2360 u32 sntf;
2361
2362 rc = atapi_cdb_len(id);
2363 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
2364 if (ata_msg_warn(ap))
2365 ata_dev_warn(dev, "unsupported CDB len\n");
2366 rc = -EINVAL;
2367 goto err_out_nosup;
2368 }
2369 dev->cdb_len = (unsigned int) rc;
2370
2371 /* Enable ATAPI AN if both the host and device have
2372 * the support. If PMP is attached, SNTF is required
2373 * to enable ATAPI AN to discern between PHY status
2374 * changed notifications and ATAPI ANs.
2375 */
2376 if (atapi_an &&
2377 (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2378 (!sata_pmp_attached(ap) ||
2379 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
2380 /* issue SET feature command to turn this on */
2381 err_mask = ata_dev_set_feature(dev,
2382 SETFEATURES_SATA_ENABLE, SATA_AN);
2383 if (err_mask)
2384 ata_dev_err(dev,
2385 "failed to enable ATAPI AN (err_mask=0x%x)\n",
2386 err_mask);
2387 else {
2388 dev->flags |= ATA_DFLAG_AN;
2389 atapi_an_string = ", ATAPI AN";
2390 }
2391 }
2392
2393 if (ata_id_cdb_intr(dev->id)) {
2394 dev->flags |= ATA_DFLAG_CDB_INTR;
2395 cdb_intr_string = ", CDB intr";
2396 }
2397
2398 if (atapi_dmadir || atapi_id_dmadir(dev->id)) {
2399 dev->flags |= ATA_DFLAG_DMADIR;
2400 dma_dir_string = ", DMADIR";
2401 }
2402
2403 if (ata_id_has_da(dev->id)) {
2404 dev->flags |= ATA_DFLAG_DA;
2405 zpodd_init(dev);
2406 }
2407
2408 /* print device info to dmesg */
2409 if (ata_msg_drv(ap) && print_info)
2410 ata_dev_info(dev,
2411 "ATAPI: %s, %s, max %s%s%s%s\n",
2412 modelbuf, fwrevbuf,
2413 ata_mode_string(xfer_mask),
2414 cdb_intr_string, atapi_an_string,
2415 dma_dir_string);
2416 }
2417
2418 /* determine max_sectors */
2419 dev->max_sectors = ATA_MAX_SECTORS;
2420 if (dev->flags & ATA_DFLAG_LBA48)
2421 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2422
2423 /* Limit PATA drive on SATA cable bridge transfers to udma5,
2424 200 sectors */
2425 if (ata_dev_knobble(dev)) {
2426 if (ata_msg_drv(ap) && print_info)
2427 ata_dev_info(dev, "applying bridge limits\n");
2428 dev->udma_mask &= ATA_UDMA5;
2429 dev->max_sectors = ATA_MAX_SECTORS;
2430 }
2431
2432 if ((dev->class == ATA_DEV_ATAPI) &&
2433 (atapi_command_packet_set(id) == TYPE_TAPE)) {
2434 dev->max_sectors = ATA_MAX_SECTORS_TAPE;
2435 dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2436 }
2437
2438 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
2439 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2440 dev->max_sectors);
2441
2442 if (ap->ops->dev_config)
2443 ap->ops->dev_config(dev);
2444
2445 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2446 /* Let the user know. We don't want to disallow opens for
2447 rescue purposes, or in case the vendor is just a blithering
2448 idiot. Do this after the dev_config call as some controllers
2449 with buggy firmware may want to avoid reporting false device
2450 bugs */
2451
2452 if (print_info) {
2453 ata_dev_warn(dev,
2454 "Drive reports diagnostics failure. This may indicate a drive\n");
2455 ata_dev_warn(dev,
2456 "fault or invalid emulation. Contact drive vendor for information.\n");
2457 }
2458 }
2459
2460 if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) {
2461 ata_dev_warn(dev, "WARNING: device requires firmware update to be fully functional\n");
2462 ata_dev_warn(dev, " contact the vendor or visit http://ata.wiki.kernel.org\n");
2463 }
2464
2465 return 0;
2466
2467 err_out_nosup:
2468 if (ata_msg_probe(ap))
2469 ata_dev_dbg(dev, "%s: EXIT, err\n", __func__);
2470 return rc;
2471 }
2472
2473 /**
2474 * ata_cable_40wire - return 40 wire cable type
2475 * @ap: port
2476 *
2477 * Helper method for drivers which want to hardwire 40 wire cable
2478 * detection.
2479 */
2480
2481 int ata_cable_40wire(struct ata_port *ap)
2482 {
2483 return ATA_CBL_PATA40;
2484 }
2485
2486 /**
2487 * ata_cable_80wire - return 80 wire cable type
2488 * @ap: port
2489 *
2490 * Helper method for drivers which want to hardwire 80 wire cable
2491 * detection.
2492 */
2493
2494 int ata_cable_80wire(struct ata_port *ap)
2495 {
2496 return ATA_CBL_PATA80;
2497 }
2498
2499 /**
2500 * ata_cable_unknown - return unknown PATA cable.
2501 * @ap: port
2502 *
2503 * Helper method for drivers which have no PATA cable detection.
2504 */
2505
2506 int ata_cable_unknown(struct ata_port *ap)
2507 {
2508 return ATA_CBL_PATA_UNK;
2509 }
2510
2511 /**
2512 * ata_cable_ignore - return ignored PATA cable.
2513 * @ap: port
2514 *
2515 * Helper method for drivers which don't use cable type to limit
2516 * transfer mode.
2517 */
2518 int ata_cable_ignore(struct ata_port *ap)
2519 {
2520 return ATA_CBL_PATA_IGN;
2521 }
2522
2523 /**
2524 * ata_cable_sata - return SATA cable type
2525 * @ap: port
2526 *
2527 * Helper method for drivers which have SATA cables
2528 */
2529
2530 int ata_cable_sata(struct ata_port *ap)
2531 {
2532 return ATA_CBL_SATA;
2533 }
2534
2535 /**
2536 * ata_bus_probe - Reset and probe ATA bus
2537 * @ap: Bus to probe
2538 *
2539 * Master ATA bus probing function. Initiates a hardware-dependent
2540 * bus reset, then attempts to identify any devices found on
2541 * the bus.
2542 *
2543 * LOCKING:
2544 * PCI/etc. bus probe sem.
2545 *
2546 * RETURNS:
2547 * Zero on success, negative errno otherwise.
2548 */
2549
2550 int ata_bus_probe(struct ata_port *ap)
2551 {
2552 unsigned int classes[ATA_MAX_DEVICES];
2553 int tries[ATA_MAX_DEVICES];
2554 int rc;
2555 struct ata_device *dev;
2556
2557 ata_for_each_dev(dev, &ap->link, ALL)
2558 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2559
2560 retry:
2561 ata_for_each_dev(dev, &ap->link, ALL) {
2562 /* If we issue an SRST then an ATA drive (not ATAPI)
2563 * may change configuration and be in PIO0 timing. If
2564 * we do a hard reset (or are coming from power on)
2565 * this is true for ATA or ATAPI. Until we've set a
2566 * suitable controller mode we should not touch the
2567 * bus as we may be talking too fast.
2568 */
2569 dev->pio_mode = XFER_PIO_0;
2570 dev->dma_mode = 0xff;
2571
2572 /* If the controller has a pio mode setup function
2573 * then use it to set the chipset to rights. Don't
2574 * touch the DMA setup as that will be dealt with when
2575 * configuring devices.
2576 */
2577 if (ap->ops->set_piomode)
2578 ap->ops->set_piomode(ap, dev);
2579 }
2580
2581 /* reset and determine device classes */
2582 ap->ops->phy_reset(ap);
2583
2584 ata_for_each_dev(dev, &ap->link, ALL) {
2585 if (dev->class != ATA_DEV_UNKNOWN)
2586 classes[dev->devno] = dev->class;
2587 else
2588 classes[dev->devno] = ATA_DEV_NONE;
2589
2590 dev->class = ATA_DEV_UNKNOWN;
2591 }
2592
2593 /* read IDENTIFY page and configure devices. We have to do the identify
2594 specific sequence bass-ackwards so that PDIAG- is released by
2595 the slave device */
2596
2597 ata_for_each_dev(dev, &ap->link, ALL_REVERSE) {
2598 if (tries[dev->devno])
2599 dev->class = classes[dev->devno];
2600
2601 if (!ata_dev_enabled(dev))
2602 continue;
2603
2604 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2605 dev->id);
2606 if (rc)
2607 goto fail;
2608 }
2609
2610 /* Now ask for the cable type as PDIAG- should have been released */
2611 if (ap->ops->cable_detect)
2612 ap->cbl = ap->ops->cable_detect(ap);
2613
2614 /* We may have SATA bridge glue hiding here irrespective of
2615 * the reported cable types and sensed types. When SATA
2616 * drives indicate we have a bridge, we don't know which end
2617 * of the link the bridge is which is a problem.
2618 */
2619 ata_for_each_dev(dev, &ap->link, ENABLED)
2620 if (ata_id_is_sata(dev->id))
2621 ap->cbl = ATA_CBL_SATA;
2622
2623 /* After the identify sequence we can now set up the devices. We do
2624 this in the normal order so that the user doesn't get confused */
2625
2626 ata_for_each_dev(dev, &ap->link, ENABLED) {
2627 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
2628 rc = ata_dev_configure(dev);
2629 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2630 if (rc)
2631 goto fail;
2632 }
2633
2634 /* configure transfer mode */
2635 rc = ata_set_mode(&ap->link, &dev);
2636 if (rc)
2637 goto fail;
2638
2639 ata_for_each_dev(dev, &ap->link, ENABLED)
2640 return 0;
2641
2642 return -ENODEV;
2643
2644 fail:
2645 tries[dev->devno]--;
2646
2647 switch (rc) {
2648 case -EINVAL:
2649 /* eeek, something went very wrong, give up */
2650 tries[dev->devno] = 0;
2651 break;
2652
2653 case -ENODEV:
2654 /* give it just one more chance */
2655 tries[dev->devno] = min(tries[dev->devno], 1);
2656 case -EIO:
2657 if (tries[dev->devno] == 1) {
2658 /* This is the last chance, better to slow
2659 * down than lose it.
2660 */
2661 sata_down_spd_limit(&ap->link, 0);
2662 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2663 }
2664 }
2665
2666 if (!tries[dev->devno])
2667 ata_dev_disable(dev);
2668
2669 goto retry;
2670 }
2671
2672 /**
2673 * sata_print_link_status - Print SATA link status
2674 * @link: SATA link to printk link status about
2675 *
2676 * This function prints link speed and status of a SATA link.
2677 *
2678 * LOCKING:
2679 * None.
2680 */
2681 static void sata_print_link_status(struct ata_link *link)
2682 {
2683 u32 sstatus, scontrol, tmp;
2684
2685 if (sata_scr_read(link, SCR_STATUS, &sstatus))
2686 return;
2687 sata_scr_read(link, SCR_CONTROL, &scontrol);
2688
2689 if (ata_phys_link_online(link)) {
2690 tmp = (sstatus >> 4) & 0xf;
2691 ata_link_info(link, "SATA link up %s (SStatus %X SControl %X)\n",
2692 sata_spd_string(tmp), sstatus, scontrol);
2693 } else {
2694 ata_link_info(link, "SATA link down (SStatus %X SControl %X)\n",
2695 sstatus, scontrol);
2696 }
2697 }
2698
2699 /**
2700 * ata_dev_pair - return other device on cable
2701 * @adev: device
2702 *
2703 * Obtain the other device on the same cable, or if none is
2704 * present NULL is returned
2705 */
2706
2707 struct ata_device *ata_dev_pair(struct ata_device *adev)
2708 {
2709 struct ata_link *link = adev->link;
2710 struct ata_device *pair = &link->device[1 - adev->devno];
2711 if (!ata_dev_enabled(pair))
2712 return NULL;
2713 return pair;
2714 }
2715
2716 /**
2717 * sata_down_spd_limit - adjust SATA spd limit downward
2718 * @link: Link to adjust SATA spd limit for
2719 * @spd_limit: Additional limit
2720 *
2721 * Adjust SATA spd limit of @link downward. Note that this
2722 * function only adjusts the limit. The change must be applied
2723 * using sata_set_spd().
2724 *
2725 * If @spd_limit is non-zero, the speed is limited to equal to or
2726 * lower than @spd_limit if such speed is supported. If
2727 * @spd_limit is slower than any supported speed, only the lowest
2728 * supported speed is allowed.
2729 *
2730 * LOCKING:
2731 * Inherited from caller.
2732 *
2733 * RETURNS:
2734 * 0 on success, negative errno on failure
2735 */
2736 int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
2737 {
2738 u32 sstatus, spd, mask;
2739 int rc, bit;
2740
2741 if (!sata_scr_valid(link))
2742 return -EOPNOTSUPP;
2743
2744 /* If SCR can be read, use it to determine the current SPD.
2745 * If not, use cached value in link->sata_spd.
2746 */
2747 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
2748 if (rc == 0 && ata_sstatus_online(sstatus))
2749 spd = (sstatus >> 4) & 0xf;
2750 else
2751 spd = link->sata_spd;
2752
2753 mask = link->sata_spd_limit;
2754 if (mask <= 1)
2755 return -EINVAL;
2756
2757 /* unconditionally mask off the highest bit */
2758 bit = fls(mask) - 1;
2759 mask &= ~(1 << bit);
2760
2761 /* Mask off all speeds higher than or equal to the current
2762 * one. Force 1.5Gbps if current SPD is not available.
2763 */
2764 if (spd > 1)
2765 mask &= (1 << (spd - 1)) - 1;
2766 else
2767 mask &= 1;
2768
2769 /* were we already at the bottom? */
2770 if (!mask)
2771 return -EINVAL;
2772
2773 if (spd_limit) {
2774 if (mask & ((1 << spd_limit) - 1))
2775 mask &= (1 << spd_limit) - 1;
2776 else {
2777 bit = ffs(mask) - 1;
2778 mask = 1 << bit;
2779 }
2780 }
2781
2782 link->sata_spd_limit = mask;
2783
2784 ata_link_warn(link, "limiting SATA link speed to %s\n",
2785 sata_spd_string(fls(mask)));
2786
2787 return 0;
2788 }
2789
2790 static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
2791 {
2792 struct ata_link *host_link = &link->ap->link;
2793 u32 limit, target, spd;
2794
2795 limit = link->sata_spd_limit;
2796
2797 /* Don't configure downstream link faster than upstream link.
2798 * It doesn't speed up anything and some PMPs choke on such
2799 * configuration.
2800 */
2801 if (!ata_is_host_link(link) && host_link->sata_spd)
2802 limit &= (1 << host_link->sata_spd) - 1;
2803
2804 if (limit == UINT_MAX)
2805 target = 0;
2806 else
2807 target = fls(limit);
2808
2809 spd = (*scontrol >> 4) & 0xf;
2810 *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
2811
2812 return spd != target;
2813 }
2814
2815 /**
2816 * sata_set_spd_needed - is SATA spd configuration needed
2817 * @link: Link in question
2818 *
2819 * Test whether the spd limit in SControl matches
2820 * @link->sata_spd_limit. This function is used to determine
2821 * whether hardreset is necessary to apply SATA spd
2822 * configuration.
2823 *
2824 * LOCKING:
2825 * Inherited from caller.
2826 *
2827 * RETURNS:
2828 * 1 if SATA spd configuration is needed, 0 otherwise.
2829 */
2830 static int sata_set_spd_needed(struct ata_link *link)
2831 {
2832 u32 scontrol;
2833
2834 if (sata_scr_read(link, SCR_CONTROL, &scontrol))
2835 return 1;
2836
2837 return __sata_set_spd_needed(link, &scontrol);
2838 }
2839
2840 /**
2841 * sata_set_spd - set SATA spd according to spd limit
2842 * @link: Link to set SATA spd for
2843 *
2844 * Set SATA spd of @link according to sata_spd_limit.
2845 *
2846 * LOCKING:
2847 * Inherited from caller.
2848 *
2849 * RETURNS:
2850 * 0 if spd doesn't need to be changed, 1 if spd has been
2851 * changed. Negative errno if SCR registers are inaccessible.
2852 */
2853 int sata_set_spd(struct ata_link *link)
2854 {
2855 u32 scontrol;
2856 int rc;
2857
2858 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
2859 return rc;
2860
2861 if (!__sata_set_spd_needed(link, &scontrol))
2862 return 0;
2863
2864 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
2865 return rc;
2866
2867 return 1;
2868 }
2869
2870 /*
2871 * This mode timing computation functionality is ported over from
2872 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2873 */
2874 /*
2875 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
2876 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
2877 * for UDMA6, which is currently supported only by Maxtor drives.
2878 *
2879 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
2880 */
2881
2882 static const struct ata_timing ata_timing[] = {
2883 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 0, 960, 0 }, */
2884 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 0, 600, 0 },
2885 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 0, 383, 0 },
2886 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 0, 240, 0 },
2887 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 0, 180, 0 },
2888 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 0, 120, 0 },
2889 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 0, 100, 0 },
2890 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 0, 80, 0 },
2891
2892 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 50, 960, 0 },
2893 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 30, 480, 0 },
2894 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 20, 240, 0 },
2895
2896 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 20, 480, 0 },
2897 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 5, 150, 0 },
2898 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 5, 120, 0 },
2899 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 5, 100, 0 },
2900 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 5, 80, 0 },
2901
2902 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 0, 150 }, */
2903 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 0, 120 },
2904 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 0, 80 },
2905 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 0, 60 },
2906 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 0, 45 },
2907 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 0, 30 },
2908 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 0, 20 },
2909 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 0, 15 },
2910
2911 { 0xFF }
2912 };
2913
2914 #define ENOUGH(v, unit) (((v)-1)/(unit)+1)
2915 #define EZ(v, unit) ((v)?ENOUGH(v, unit):0)
2916
2917 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2918 {
2919 q->setup = EZ(t->setup * 1000, T);
2920 q->act8b = EZ(t->act8b * 1000, T);
2921 q->rec8b = EZ(t->rec8b * 1000, T);
2922 q->cyc8b = EZ(t->cyc8b * 1000, T);
2923 q->active = EZ(t->active * 1000, T);
2924 q->recover = EZ(t->recover * 1000, T);
2925 q->dmack_hold = EZ(t->dmack_hold * 1000, T);
2926 q->cycle = EZ(t->cycle * 1000, T);
2927 q->udma = EZ(t->udma * 1000, UT);
2928 }
2929
2930 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2931 struct ata_timing *m, unsigned int what)
2932 {
2933 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2934 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2935 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2936 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2937 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2938 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2939 if (what & ATA_TIMING_DMACK_HOLD) m->dmack_hold = max(a->dmack_hold, b->dmack_hold);
2940 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2941 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2942 }
2943
2944 const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
2945 {
2946 const struct ata_timing *t = ata_timing;
2947
2948 while (xfer_mode > t->mode)
2949 t++;
2950
2951 if (xfer_mode == t->mode)
2952 return t;
2953
2954 WARN_ONCE(true, "%s: unable to find timing for xfer_mode 0x%x\n",
2955 __func__, xfer_mode);
2956
2957 return NULL;
2958 }
2959
2960 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2961 struct ata_timing *t, int T, int UT)
2962 {
2963 const u16 *id = adev->id;
2964 const struct ata_timing *s;
2965 struct ata_timing p;
2966
2967 /*
2968 * Find the mode.
2969 */
2970
2971 if (!(s = ata_timing_find_mode(speed)))
2972 return -EINVAL;
2973
2974 memcpy(t, s, sizeof(*s));
2975
2976 /*
2977 * If the drive is an EIDE drive, it can tell us it needs extended
2978 * PIO/MW_DMA cycle timing.
2979 */
2980
2981 if (id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
2982 memset(&p, 0, sizeof(p));
2983
2984 if (speed >= XFER_PIO_0 && speed < XFER_SW_DMA_0) {
2985 if (speed <= XFER_PIO_2)
2986 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO];
2987 else if ((speed <= XFER_PIO_4) ||
2988 (speed == XFER_PIO_5 && !ata_id_is_cfa(id)))
2989 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY];
2990 } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2)
2991 p.cycle = id[ATA_ID_EIDE_DMA_MIN];
2992
2993 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2994 }
2995
2996 /*
2997 * Convert the timing to bus clock counts.
2998 */
2999
3000 ata_timing_quantize(t, t, T, UT);
3001
3002 /*
3003 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
3004 * S.M.A.R.T * and some other commands. We have to ensure that the
3005 * DMA cycle timing is slower/equal than the fastest PIO timing.
3006 */
3007
3008 if (speed > XFER_PIO_6) {
3009 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
3010 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
3011 }
3012
3013 /*
3014 * Lengthen active & recovery time so that cycle time is correct.
3015 */
3016
3017 if (t->act8b + t->rec8b < t->cyc8b) {
3018 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
3019 t->rec8b = t->cyc8b - t->act8b;
3020 }
3021
3022 if (t->active + t->recover < t->cycle) {
3023 t->active += (t->cycle - (t->active + t->recover)) / 2;
3024 t->recover = t->cycle - t->active;
3025 }
3026
3027 /* In a few cases quantisation may produce enough errors to
3028 leave t->cycle too low for the sum of active and recovery
3029 if so we must correct this */
3030 if (t->active + t->recover > t->cycle)
3031 t->cycle = t->active + t->recover;
3032
3033 return 0;
3034 }
3035
3036 /**
3037 * ata_timing_cycle2mode - find xfer mode for the specified cycle duration
3038 * @xfer_shift: ATA_SHIFT_* value for transfer type to examine.
3039 * @cycle: cycle duration in ns
3040 *
3041 * Return matching xfer mode for @cycle. The returned mode is of
3042 * the transfer type specified by @xfer_shift. If @cycle is too
3043 * slow for @xfer_shift, 0xff is returned. If @cycle is faster
3044 * than the fastest known mode, the fasted mode is returned.
3045 *
3046 * LOCKING:
3047 * None.
3048 *
3049 * RETURNS:
3050 * Matching xfer_mode, 0xff if no match found.
3051 */
3052 u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
3053 {
3054 u8 base_mode = 0xff, last_mode = 0xff;
3055 const struct ata_xfer_ent *ent;
3056 const struct ata_timing *t;
3057
3058 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
3059 if (ent->shift == xfer_shift)
3060 base_mode = ent->base;
3061
3062 for (t = ata_timing_find_mode(base_mode);
3063 t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3064 unsigned short this_cycle;
3065
3066 switch (xfer_shift) {
3067 case ATA_SHIFT_PIO:
3068 case ATA_SHIFT_MWDMA:
3069 this_cycle = t->cycle;
3070 break;
3071 case ATA_SHIFT_UDMA:
3072 this_cycle = t->udma;
3073 break;
3074 default:
3075 return 0xff;
3076 }
3077
3078 if (cycle > this_cycle)
3079 break;
3080
3081 last_mode = t->mode;
3082 }
3083
3084 return last_mode;
3085 }
3086
3087 /**
3088 * ata_down_xfermask_limit - adjust dev xfer masks downward
3089 * @dev: Device to adjust xfer masks
3090 * @sel: ATA_DNXFER_* selector
3091 *
3092 * Adjust xfer masks of @dev downward. Note that this function
3093 * does not apply the change. Invoking ata_set_mode() afterwards
3094 * will apply the limit.
3095 *
3096 * LOCKING:
3097 * Inherited from caller.
3098 *
3099 * RETURNS:
3100 * 0 on success, negative errno on failure
3101 */
3102 int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
3103 {
3104 char buf[32];
3105 unsigned long orig_mask, xfer_mask;
3106 unsigned long pio_mask, mwdma_mask, udma_mask;
3107 int quiet, highbit;
3108
3109 quiet = !!(sel & ATA_DNXFER_QUIET);
3110 sel &= ~ATA_DNXFER_QUIET;
3111
3112 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3113 dev->mwdma_mask,
3114 dev->udma_mask);
3115 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
3116
3117 switch (sel) {
3118 case ATA_DNXFER_PIO:
3119 highbit = fls(pio_mask) - 1;
3120 pio_mask &= ~(1 << highbit);
3121 break;
3122
3123 case ATA_DNXFER_DMA:
3124 if (udma_mask) {
3125 highbit = fls(udma_mask) - 1;
3126 udma_mask &= ~(1 << highbit);
3127 if (!udma_mask)
3128 return -ENOENT;
3129 } else if (mwdma_mask) {
3130 highbit = fls(mwdma_mask) - 1;
3131 mwdma_mask &= ~(1 << highbit);
3132 if (!mwdma_mask)
3133 return -ENOENT;
3134 }
3135 break;
3136
3137 case ATA_DNXFER_40C:
3138 udma_mask &= ATA_UDMA_MASK_40C;
3139 break;
3140
3141 case ATA_DNXFER_FORCE_PIO0:
3142 pio_mask &= 1;
3143 case ATA_DNXFER_FORCE_PIO:
3144 mwdma_mask = 0;
3145 udma_mask = 0;
3146 break;
3147
3148 default:
3149 BUG();
3150 }
3151
3152 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3153
3154 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3155 return -ENOENT;
3156
3157 if (!quiet) {
3158 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3159 snprintf(buf, sizeof(buf), "%s:%s",
3160 ata_mode_string(xfer_mask),
3161 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3162 else
3163 snprintf(buf, sizeof(buf), "%s",
3164 ata_mode_string(xfer_mask));
3165
3166 ata_dev_warn(dev, "limiting speed to %s\n", buf);
3167 }
3168
3169 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3170 &dev->udma_mask);
3171
3172 return 0;
3173 }
3174
3175 static int ata_dev_set_mode(struct ata_device *dev)
3176 {
3177 struct ata_port *ap = dev->link->ap;
3178 struct ata_eh_context *ehc = &dev->link->eh_context;
3179 const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER;
3180 const char *dev_err_whine = "";
3181 int ign_dev_err = 0;
3182 unsigned int err_mask = 0;
3183 int rc;
3184
3185 dev->flags &= ~ATA_DFLAG_PIO;
3186 if (dev->xfer_shift == ATA_SHIFT_PIO)
3187 dev->flags |= ATA_DFLAG_PIO;
3188
3189 if (nosetxfer && ap->flags & ATA_FLAG_SATA && ata_id_is_sata(dev->id))
3190 dev_err_whine = " (SET_XFERMODE skipped)";
3191 else {
3192 if (nosetxfer)
3193 ata_dev_warn(dev,
3194 "NOSETXFER but PATA detected - can't "
3195 "skip SETXFER, might malfunction\n");
3196 err_mask = ata_dev_set_xfermode(dev);
3197 }
3198
3199 if (err_mask & ~AC_ERR_DEV)
3200 goto fail;
3201
3202 /* revalidate */
3203 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3204 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3205 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3206 if (rc)
3207 return rc;
3208
3209 if (dev->xfer_shift == ATA_SHIFT_PIO) {
3210 /* Old CFA may refuse this command, which is just fine */
3211 if (ata_id_is_cfa(dev->id))
3212 ign_dev_err = 1;
3213 /* Catch several broken garbage emulations plus some pre
3214 ATA devices */
3215 if (ata_id_major_version(dev->id) == 0 &&
3216 dev->pio_mode <= XFER_PIO_2)
3217 ign_dev_err = 1;
3218 /* Some very old devices and some bad newer ones fail
3219 any kind of SET_XFERMODE request but support PIO0-2
3220 timings and no IORDY */
3221 if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2)
3222 ign_dev_err = 1;
3223 }
3224 /* Early MWDMA devices do DMA but don't allow DMA mode setting.
3225 Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
3226 if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3227 dev->dma_mode == XFER_MW_DMA_0 &&
3228 (dev->id[63] >> 8) & 1)
3229 ign_dev_err = 1;
3230
3231 /* if the device is actually configured correctly, ignore dev err */
3232 if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3233 ign_dev_err = 1;
3234
3235 if (err_mask & AC_ERR_DEV) {
3236 if (!ign_dev_err)
3237 goto fail;
3238 else
3239 dev_err_whine = " (device error ignored)";
3240 }
3241
3242 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3243 dev->xfer_shift, (int)dev->xfer_mode);
3244
3245 ata_dev_info(dev, "configured for %s%s\n",
3246 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3247 dev_err_whine);
3248
3249 return 0;
3250
3251 fail:
3252 ata_dev_err(dev, "failed to set xfermode (err_mask=0x%x)\n", err_mask);
3253 return -EIO;
3254 }
3255
3256 /**
3257 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
3258 * @link: link on which timings will be programmed
3259 * @r_failed_dev: out parameter for failed device
3260 *
3261 * Standard implementation of the function used to tune and set
3262 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3263 * ata_dev_set_mode() fails, pointer to the failing device is
3264 * returned in @r_failed_dev.
3265 *
3266 * LOCKING:
3267 * PCI/etc. bus probe sem.
3268 *
3269 * RETURNS:
3270 * 0 on success, negative errno otherwise
3271 */
3272
3273 int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3274 {
3275 struct ata_port *ap = link->ap;
3276 struct ata_device *dev;
3277 int rc = 0, used_dma = 0, found = 0;
3278
3279 /* step 1: calculate xfer_mask */
3280 ata_for_each_dev(dev, link, ENABLED) {
3281 unsigned long pio_mask, dma_mask;
3282 unsigned int mode_mask;
3283
3284 mode_mask = ATA_DMA_MASK_ATA;
3285 if (dev->class == ATA_DEV_ATAPI)
3286 mode_mask = ATA_DMA_MASK_ATAPI;
3287 else if (ata_id_is_cfa(dev->id))
3288 mode_mask = ATA_DMA_MASK_CFA;
3289
3290 ata_dev_xfermask(dev);
3291 ata_force_xfermask(dev);
3292
3293 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3294
3295 if (libata_dma_mask & mode_mask)
3296 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask,
3297 dev->udma_mask);
3298 else
3299 dma_mask = 0;
3300
3301 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3302 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
3303
3304 found = 1;
3305 if (ata_dma_enabled(dev))
3306 used_dma = 1;
3307 }
3308 if (!found)
3309 goto out;
3310
3311 /* step 2: always set host PIO timings */
3312 ata_for_each_dev(dev, link, ENABLED) {
3313 if (dev->pio_mode == 0xff) {
3314 ata_dev_warn(dev, "no PIO support\n");
3315 rc = -EINVAL;
3316 goto out;
3317 }
3318
3319 dev->xfer_mode = dev->pio_mode;
3320 dev->xfer_shift = ATA_SHIFT_PIO;
3321 if (ap->ops->set_piomode)
3322 ap->ops->set_piomode(ap, dev);
3323 }
3324
3325 /* step 3: set host DMA timings */
3326 ata_for_each_dev(dev, link, ENABLED) {
3327 if (!ata_dma_enabled(dev))
3328 continue;
3329
3330 dev->xfer_mode = dev->dma_mode;
3331 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3332 if (ap->ops->set_dmamode)
3333 ap->ops->set_dmamode(ap, dev);
3334 }
3335
3336 /* step 4: update devices' xfer mode */
3337 ata_for_each_dev(dev, link, ENABLED) {
3338 rc = ata_dev_set_mode(dev);
3339 if (rc)
3340 goto out;
3341 }
3342
3343 /* Record simplex status. If we selected DMA then the other
3344 * host channels are not permitted to do so.
3345 */
3346 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
3347 ap->host->simplex_claimed = ap;
3348
3349 out:
3350 if (rc)
3351 *r_failed_dev = dev;
3352 return rc;
3353 }
3354
3355 /**
3356 * ata_wait_ready - wait for link to become ready
3357 * @link: link to be waited on
3358 * @deadline: deadline jiffies for the operation
3359 * @check_ready: callback to check link readiness
3360 *
3361 * Wait for @link to become ready. @check_ready should return
3362 * positive number if @link is ready, 0 if it isn't, -ENODEV if
3363 * link doesn't seem to be occupied, other errno for other error
3364 * conditions.
3365 *
3366 * Transient -ENODEV conditions are allowed for
3367 * ATA_TMOUT_FF_WAIT.
3368 *
3369 * LOCKING:
3370 * EH context.
3371 *
3372 * RETURNS:
3373 * 0 if @linke is ready before @deadline; otherwise, -errno.
3374 */
3375 int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3376 int (*check_ready)(struct ata_link *link))
3377 {
3378 unsigned long start = jiffies;
3379 unsigned long nodev_deadline;
3380 int warned = 0;
3381
3382 /* choose which 0xff timeout to use, read comment in libata.h */
3383 if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN)
3384 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG);
3385 else
3386 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
3387
3388 /* Slave readiness can't be tested separately from master. On
3389 * M/S emulation configuration, this function should be called
3390 * only on the master and it will handle both master and slave.
3391 */
3392 WARN_ON(link == link->ap->slave_link);
3393
3394 if (time_after(nodev_deadline, deadline))
3395 nodev_deadline = deadline;
3396
3397 while (1) {
3398 unsigned long now = jiffies;
3399 int ready, tmp;
3400
3401 ready = tmp = check_ready(link);
3402 if (ready > 0)
3403 return 0;
3404
3405 /*
3406 * -ENODEV could be transient. Ignore -ENODEV if link
3407 * is online. Also, some SATA devices take a long
3408 * time to clear 0xff after reset. Wait for
3409 * ATA_TMOUT_FF_WAIT[_LONG] on -ENODEV if link isn't
3410 * offline.
3411 *
3412 * Note that some PATA controllers (pata_ali) explode
3413 * if status register is read more than once when
3414 * there's no device attached.
3415 */
3416 if (ready == -ENODEV) {
3417 if (ata_link_online(link))
3418 ready = 0;
3419 else if ((link->ap->flags & ATA_FLAG_SATA) &&
3420 !ata_link_offline(link) &&
3421 time_before(now, nodev_deadline))
3422 ready = 0;
3423 }
3424
3425 if (ready)
3426 return ready;
3427 if (time_after(now, deadline))
3428 return -EBUSY;
3429
3430 if (!warned && time_after(now, start + 5 * HZ) &&
3431 (deadline - now > 3 * HZ)) {
3432 ata_link_warn(link,
3433 "link is slow to respond, please be patient "
3434 "(ready=%d)\n", tmp);
3435 warned = 1;
3436 }
3437
3438 ata_msleep(link->ap, 50);
3439 }
3440 }
3441
3442 /**
3443 * ata_wait_after_reset - wait for link to become ready after reset
3444 * @link: link to be waited on
3445 * @deadline: deadline jiffies for the operation
3446 * @check_ready: callback to check link readiness
3447 *
3448 * Wait for @link to become ready after reset.
3449 *
3450 * LOCKING:
3451 * EH context.
3452 *
3453 * RETURNS:
3454 * 0 if @linke is ready before @deadline; otherwise, -errno.
3455 */
3456 int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
3457 int (*check_ready)(struct ata_link *link))
3458 {
3459 ata_msleep(link->ap, ATA_WAIT_AFTER_RESET);
3460
3461 return ata_wait_ready(link, deadline, check_ready);
3462 }
3463
3464 /**
3465 * sata_link_debounce - debounce SATA phy status
3466 * @link: ATA link to debounce SATA phy status for
3467 * @params: timing parameters { interval, duratinon, timeout } in msec
3468 * @deadline: deadline jiffies for the operation
3469 *
3470 * Make sure SStatus of @link reaches stable state, determined by
3471 * holding the same value where DET is not 1 for @duration polled
3472 * every @interval, before @timeout. Timeout constraints the
3473 * beginning of the stable state. Because DET gets stuck at 1 on
3474 * some controllers after hot unplugging, this functions waits
3475 * until timeout then returns 0 if DET is stable at 1.
3476 *
3477 * @timeout is further limited by @deadline. The sooner of the
3478 * two is used.
3479 *
3480 * LOCKING:
3481 * Kernel thread context (may sleep)
3482 *
3483 * RETURNS:
3484 * 0 on success, -errno on failure.
3485 */
3486 int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3487 unsigned long deadline)
3488 {
3489 unsigned long interval = params[0];
3490 unsigned long duration = params[1];
3491 unsigned long last_jiffies, t;
3492 u32 last, cur;
3493 int rc;
3494
3495 t = ata_deadline(jiffies, params[2]);
3496 if (time_before(t, deadline))
3497 deadline = t;
3498
3499 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3500 return rc;
3501 cur &= 0xf;
3502
3503 last = cur;
3504 last_jiffies = jiffies;
3505
3506 while (1) {
3507 ata_msleep(link->ap, interval);
3508 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3509 return rc;
3510 cur &= 0xf;
3511
3512 /* DET stable? */
3513 if (cur == last) {
3514 if (cur == 1 && time_before(jiffies, deadline))
3515 continue;
3516 if (time_after(jiffies,
3517 ata_deadline(last_jiffies, duration)))
3518 return 0;
3519 continue;
3520 }
3521
3522 /* unstable, start over */
3523 last = cur;
3524 last_jiffies = jiffies;
3525
3526 /* Check deadline. If debouncing failed, return
3527 * -EPIPE to tell upper layer to lower link speed.
3528 */
3529 if (time_after(jiffies, deadline))
3530 return -EPIPE;
3531 }
3532 }
3533
3534 /**
3535 * sata_link_resume - resume SATA link
3536 * @link: ATA link to resume SATA
3537 * @params: timing parameters { interval, duratinon, timeout } in msec
3538 * @deadline: deadline jiffies for the operation
3539 *
3540 * Resume SATA phy @link and debounce it.
3541 *
3542 * LOCKING:
3543 * Kernel thread context (may sleep)
3544 *
3545 * RETURNS:
3546 * 0 on success, -errno on failure.
3547 */
3548 int sata_link_resume(struct ata_link *link, const unsigned long *params,
3549 unsigned long deadline)
3550 {
3551 int tries = ATA_LINK_RESUME_TRIES;
3552 u32 scontrol, serror;
3553 int rc;
3554
3555 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3556 return rc;
3557
3558 /*
3559 * Writes to SControl sometimes get ignored under certain
3560 * controllers (ata_piix SIDPR). Make sure DET actually is
3561 * cleared.
3562 */
3563 do {
3564 scontrol = (scontrol & 0x0f0) | 0x300;
3565 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3566 return rc;
3567 /*
3568 * Some PHYs react badly if SStatus is pounded
3569 * immediately after resuming. Delay 200ms before
3570 * debouncing.
3571 */
3572 ata_msleep(link->ap, 200);
3573
3574 /* is SControl restored correctly? */
3575 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3576 return rc;
3577 } while ((scontrol & 0xf0f) != 0x300 && --tries);
3578
3579 if ((scontrol & 0xf0f) != 0x300) {
3580 ata_link_warn(link, "failed to resume link (SControl %X)\n",
3581 scontrol);
3582 return 0;
3583 }
3584
3585 if (tries < ATA_LINK_RESUME_TRIES)
3586 ata_link_warn(link, "link resume succeeded after %d retries\n",
3587 ATA_LINK_RESUME_TRIES - tries);
3588
3589 if ((rc = sata_link_debounce(link, params, deadline)))
3590 return rc;
3591
3592 /* clear SError, some PHYs require this even for SRST to work */
3593 if (!(rc = sata_scr_read(link, SCR_ERROR, &serror)))
3594 rc = sata_scr_write(link, SCR_ERROR, serror);
3595
3596 return rc != -EINVAL ? rc : 0;
3597 }
3598
3599 /**
3600 * sata_link_scr_lpm - manipulate SControl IPM and SPM fields
3601 * @link: ATA link to manipulate SControl for
3602 * @policy: LPM policy to configure
3603 * @spm_wakeup: initiate LPM transition to active state
3604 *
3605 * Manipulate the IPM field of the SControl register of @link
3606 * according to @policy. If @policy is ATA_LPM_MAX_POWER and
3607 * @spm_wakeup is %true, the SPM field is manipulated to wake up
3608 * the link. This function also clears PHYRDY_CHG before
3609 * returning.
3610 *
3611 * LOCKING:
3612 * EH context.
3613 *
3614 * RETURNS:
3615 * 0 on succes, -errno otherwise.
3616 */
3617 int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,
3618 bool spm_wakeup)
3619 {
3620 struct ata_eh_context *ehc = &link->eh_context;
3621 bool woken_up = false;
3622 u32 scontrol;
3623 int rc;
3624
3625 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
3626 if (rc)
3627 return rc;
3628
3629 switch (policy) {
3630 case ATA_LPM_MAX_POWER:
3631 /* disable all LPM transitions */
3632 scontrol |= (0x7 << 8);
3633 /* initiate transition to active state */
3634 if (spm_wakeup) {
3635 scontrol |= (0x4 << 12);
3636 woken_up = true;
3637 }
3638 break;
3639 case ATA_LPM_MED_POWER:
3640 /* allow LPM to PARTIAL */
3641 scontrol &= ~(0x1 << 8);
3642 scontrol |= (0x6 << 8);
3643 break;
3644 case ATA_LPM_MIN_POWER:
3645 if (ata_link_nr_enabled(link) > 0)
3646 /* no restrictions on LPM transitions */
3647 scontrol &= ~(0x7 << 8);
3648 else {
3649 /* empty port, power off */
3650 scontrol &= ~0xf;
3651 scontrol |= (0x1 << 2);
3652 }
3653 break;
3654 default:
3655 WARN_ON(1);
3656 }
3657
3658 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
3659 if (rc)
3660 return rc;
3661
3662 /* give the link time to transit out of LPM state */
3663 if (woken_up)
3664 msleep(10);
3665
3666 /* clear PHYRDY_CHG from SError */
3667 ehc->i.serror &= ~SERR_PHYRDY_CHG;
3668 return sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG);
3669 }
3670
3671 /**
3672 * ata_std_prereset - prepare for reset
3673 * @link: ATA link to be reset
3674 * @deadline: deadline jiffies for the operation
3675 *
3676 * @link is about to be reset. Initialize it. Failure from
3677 * prereset makes libata abort whole reset sequence and give up
3678 * that port, so prereset should be best-effort. It does its
3679 * best to prepare for reset sequence but if things go wrong, it
3680 * should just whine, not fail.
3681 *
3682 * LOCKING:
3683 * Kernel thread context (may sleep)
3684 *
3685 * RETURNS:
3686 * 0 on success, -errno otherwise.
3687 */
3688 int ata_std_prereset(struct ata_link *link, unsigned long deadline)
3689 {
3690 struct ata_port *ap = link->ap;
3691 struct ata_eh_context *ehc = &link->eh_context;
3692 const unsigned long *timing = sata_ehc_deb_timing(ehc);
3693 int rc;
3694
3695 /* if we're about to do hardreset, nothing more to do */
3696 if (ehc->i.action & ATA_EH_HARDRESET)
3697 return 0;
3698
3699 /* if SATA, resume link */
3700 if (ap->flags & ATA_FLAG_SATA) {
3701 rc = sata_link_resume(link, timing, deadline);
3702 /* whine about phy resume failure but proceed */
3703 if (rc && rc != -EOPNOTSUPP)
3704 ata_link_warn(link,
3705 "failed to resume link for reset (errno=%d)\n",
3706 rc);
3707 }
3708
3709 /* no point in trying softreset on offline link */
3710 if (ata_phys_link_offline(link))
3711 ehc->i.action &= ~ATA_EH_SOFTRESET;
3712
3713 return 0;
3714 }
3715
3716 /**
3717 * sata_link_hardreset - reset link via SATA phy reset
3718 * @link: link to reset
3719 * @timing: timing parameters { interval, duratinon, timeout } in msec
3720 * @deadline: deadline jiffies for the operation
3721 * @online: optional out parameter indicating link onlineness
3722 * @check_ready: optional callback to check link readiness
3723 *
3724 * SATA phy-reset @link using DET bits of SControl register.
3725 * After hardreset, link readiness is waited upon using
3726 * ata_wait_ready() if @check_ready is specified. LLDs are
3727 * allowed to not specify @check_ready and wait itself after this
3728 * function returns. Device classification is LLD's
3729 * responsibility.
3730 *
3731 * *@online is set to one iff reset succeeded and @link is online
3732 * after reset.
3733 *
3734 * LOCKING:
3735 * Kernel thread context (may sleep)
3736 *
3737 * RETURNS:
3738 * 0 on success, -errno otherwise.
3739 */
3740 int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
3741 unsigned long deadline,
3742 bool *online, int (*check_ready)(struct ata_link *))
3743 {
3744 u32 scontrol;
3745 int rc;
3746
3747 DPRINTK("ENTER\n");
3748
3749 if (online)
3750 *online = false;
3751
3752 if (sata_set_spd_needed(link)) {
3753 /* SATA spec says nothing about how to reconfigure
3754 * spd. To be on the safe side, turn off phy during
3755 * reconfiguration. This works for at least ICH7 AHCI
3756 * and Sil3124.
3757 */
3758 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3759 goto out;
3760
3761 scontrol = (scontrol & 0x0f0) | 0x304;
3762
3763 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3764 goto out;
3765
3766 sata_set_spd(link);
3767 }
3768
3769 /* issue phy wake/reset */
3770 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3771 goto out;
3772
3773 scontrol = (scontrol & 0x0f0) | 0x301;
3774
3775 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
3776 goto out;
3777
3778 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
3779 * 10.4.2 says at least 1 ms.
3780 */
3781 ata_msleep(link->ap, 1);
3782
3783 /* bring link back */
3784 rc = sata_link_resume(link, timing, deadline);
3785 if (rc)
3786 goto out;
3787 /* if link is offline nothing more to do */
3788 if (ata_phys_link_offline(link))
3789 goto out;
3790
3791 /* Link is online. From this point, -ENODEV too is an error. */
3792 if (online)
3793 *online = true;
3794
3795 if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) {
3796 /* If PMP is supported, we have to do follow-up SRST.
3797 * Some PMPs don't send D2H Reg FIS after hardreset if
3798 * the first port is empty. Wait only for
3799 * ATA_TMOUT_PMP_SRST_WAIT.
3800 */
3801 if (check_ready) {
3802 unsigned long pmp_deadline;
3803
3804 pmp_deadline = ata_deadline(jiffies,
3805 ATA_TMOUT_PMP_SRST_WAIT);
3806 if (time_after(pmp_deadline, deadline))
3807 pmp_deadline = deadline;
3808 ata_wait_ready(link, pmp_deadline, check_ready);
3809 }
3810 rc = -EAGAIN;
3811 goto out;
3812 }
3813
3814 rc = 0;
3815 if (check_ready)
3816 rc = ata_wait_ready(link, deadline, check_ready);
3817 out:
3818 if (rc && rc != -EAGAIN) {
3819 /* online is set iff link is online && reset succeeded */
3820 if (online)
3821 *online = false;
3822 ata_link_err(link, "COMRESET failed (errno=%d)\n", rc);
3823 }
3824 DPRINTK("EXIT, rc=%d\n", rc);
3825 return rc;
3826 }
3827
3828 /**
3829 * sata_std_hardreset - COMRESET w/o waiting or classification
3830 * @link: link to reset
3831 * @class: resulting class of attached device
3832 * @deadline: deadline jiffies for the operation
3833 *
3834 * Standard SATA COMRESET w/o waiting or classification.
3835 *
3836 * LOCKING:
3837 * Kernel thread context (may sleep)
3838 *
3839 * RETURNS:
3840 * 0 if link offline, -EAGAIN if link online, -errno on errors.
3841 */
3842 int sata_std_hardreset(struct ata_link *link, unsigned int *class,
3843 unsigned long deadline)
3844 {
3845 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
3846 bool online;
3847 int rc;
3848
3849 /* do hardreset */
3850 rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
3851 return online ? -EAGAIN : rc;
3852 }
3853
3854 /**
3855 * ata_std_postreset - standard postreset callback
3856 * @link: the target ata_link
3857 * @classes: classes of attached devices
3858 *
3859 * This function is invoked after a successful reset. Note that
3860 * the device might have been reset more than once using
3861 * different reset methods before postreset is invoked.
3862 *
3863 * LOCKING:
3864 * Kernel thread context (may sleep)
3865 */
3866 void ata_std_postreset(struct ata_link *link, unsigned int *classes)
3867 {
3868 u32 serror;
3869
3870 DPRINTK("ENTER\n");
3871
3872 /* reset complete, clear SError */
3873 if (!sata_scr_read(link, SCR_ERROR, &serror))
3874 sata_scr_write(link, SCR_ERROR, serror);
3875
3876 /* print link status */
3877 sata_print_link_status(link);
3878
3879 DPRINTK("EXIT\n");
3880 }
3881
3882 /**
3883 * ata_dev_same_device - Determine whether new ID matches configured device
3884 * @dev: device to compare against
3885 * @new_class: class of the new device
3886 * @new_id: IDENTIFY page of the new device
3887 *
3888 * Compare @new_class and @new_id against @dev and determine
3889 * whether @dev is the device indicated by @new_class and
3890 * @new_id.
3891 *
3892 * LOCKING:
3893 * None.
3894 *
3895 * RETURNS:
3896 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3897 */
3898 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3899 const u16 *new_id)
3900 {
3901 const u16 *old_id = dev->id;
3902 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3903 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
3904
3905 if (dev->class != new_class) {
3906 ata_dev_info(dev, "class mismatch %d != %d\n",
3907 dev->class, new_class);
3908 return 0;
3909 }
3910
3911 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3912 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3913 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3914 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
3915
3916 if (strcmp(model[0], model[1])) {
3917 ata_dev_info(dev, "model number mismatch '%s' != '%s'\n",
3918 model[0], model[1]);
3919 return 0;
3920 }
3921
3922 if (strcmp(serial[0], serial[1])) {
3923 ata_dev_info(dev, "serial number mismatch '%s' != '%s'\n",
3924 serial[0], serial[1]);
3925 return 0;
3926 }
3927
3928 return 1;
3929 }
3930
3931 /**
3932 * ata_dev_reread_id - Re-read IDENTIFY data
3933 * @dev: target ATA device
3934 * @readid_flags: read ID flags
3935 *
3936 * Re-read IDENTIFY page and make sure @dev is still attached to
3937 * the port.
3938 *
3939 * LOCKING:
3940 * Kernel thread context (may sleep)
3941 *
3942 * RETURNS:
3943 * 0 on success, negative errno otherwise
3944 */
3945 int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
3946 {
3947 unsigned int class = dev->class;
3948 u16 *id = (void *)dev->link->ap->sector_buf;
3949 int rc;
3950
3951 /* read ID data */
3952 rc = ata_dev_read_id(dev, &class, readid_flags, id);
3953 if (rc)
3954 return rc;
3955
3956 /* is the device still there? */
3957 if (!ata_dev_same_device(dev, class, id))
3958 return -ENODEV;
3959
3960 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
3961 return 0;
3962 }
3963
3964 /**
3965 * ata_dev_revalidate - Revalidate ATA device
3966 * @dev: device to revalidate
3967 * @new_class: new class code
3968 * @readid_flags: read ID flags
3969 *
3970 * Re-read IDENTIFY page, make sure @dev is still attached to the
3971 * port and reconfigure it according to the new IDENTIFY page.
3972 *
3973 * LOCKING:
3974 * Kernel thread context (may sleep)
3975 *
3976 * RETURNS:
3977 * 0 on success, negative errno otherwise
3978 */
3979 int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
3980 unsigned int readid_flags)
3981 {
3982 u64 n_sectors = dev->n_sectors;
3983 u64 n_native_sectors = dev->n_native_sectors;
3984 int rc;
3985
3986 if (!ata_dev_enabled(dev))
3987 return -ENODEV;
3988
3989 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
3990 if (ata_class_enabled(new_class) &&
3991 new_class != ATA_DEV_ATA &&
3992 new_class != ATA_DEV_ATAPI &&
3993 new_class != ATA_DEV_SEMB) {
3994 ata_dev_info(dev, "class mismatch %u != %u\n",
3995 dev->class, new_class);
3996 rc = -ENODEV;
3997 goto fail;
3998 }
3999
4000 /* re-read ID */
4001 rc = ata_dev_reread_id(dev, readid_flags);
4002 if (rc)
4003 goto fail;
4004
4005 /* configure device according to the new ID */
4006 rc = ata_dev_configure(dev);
4007 if (rc)
4008 goto fail;
4009
4010 /* verify n_sectors hasn't changed */
4011 if (dev->class != ATA_DEV_ATA || !n_sectors ||
4012 dev->n_sectors == n_sectors)
4013 return 0;
4014
4015 /* n_sectors has changed */
4016 ata_dev_warn(dev, "n_sectors mismatch %llu != %llu\n",
4017 (unsigned long long)n_sectors,
4018 (unsigned long long)dev->n_sectors);
4019
4020 /*
4021 * Something could have caused HPA to be unlocked
4022 * involuntarily. If n_native_sectors hasn't changed and the
4023 * new size matches it, keep the device.
4024 */
4025 if (dev->n_native_sectors == n_native_sectors &&
4026 dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) {
4027 ata_dev_warn(dev,
4028 "new n_sectors matches native, probably "
4029 "late HPA unlock, n_sectors updated\n");
4030 /* use the larger n_sectors */
4031 return 0;
4032 }
4033
4034 /*
4035 * Some BIOSes boot w/o HPA but resume w/ HPA locked. Try
4036 * unlocking HPA in those cases.
4037 *
4038 * https://bugzilla.kernel.org/show_bug.cgi?id=15396
4039 */
4040 if (dev->n_native_sectors == n_native_sectors &&
4041 dev->n_sectors < n_sectors && n_sectors == n_native_sectors &&
4042 !(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) {
4043 ata_dev_warn(dev,
4044 "old n_sectors matches native, probably "
4045 "late HPA lock, will try to unlock HPA\n");
4046 /* try unlocking HPA */
4047 dev->flags |= ATA_DFLAG_UNLOCK_HPA;
4048 rc = -EIO;
4049 } else
4050 rc = -ENODEV;
4051
4052 /* restore original n_[native_]sectors and fail */
4053 dev->n_native_sectors = n_native_sectors;
4054 dev->n_sectors = n_sectors;
4055 fail:
4056 ata_dev_err(dev, "revalidation failed (errno=%d)\n", rc);
4057 return rc;
4058 }
4059
4060 struct ata_blacklist_entry {
4061 const char *model_num;
4062 const char *model_rev;
4063 unsigned long horkage;
4064 };
4065
4066 static const struct ata_blacklist_entry ata_device_blacklist [] = {
4067 /* Devices with DMA related problems under Linux */
4068 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
4069 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
4070 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
4071 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
4072 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
4073 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
4074 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
4075 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
4076 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
4077 { "CRD-848[02]B", NULL, ATA_HORKAGE_NODMA },
4078 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
4079 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
4080 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
4081 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
4082 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
4083 { "HITACHI CDR-8[34]35",NULL, ATA_HORKAGE_NODMA },
4084 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
4085 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
4086 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
4087 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
4088 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
4089 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
4090 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
4091 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
4092 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
4093 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
4094 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
4095 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
4096 { " 2GB ATA Flash Disk", "ADMA428M", ATA_HORKAGE_NODMA },
4097 /* Odd clown on sil3726/4726 PMPs */
4098 { "Config Disk", NULL, ATA_HORKAGE_DISABLE },
4099
4100 /* Weird ATAPI devices */
4101 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
4102 { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA },
4103
4104 /* Devices we expect to fail diagnostics */
4105
4106 /* Devices where NCQ should be avoided */
4107 /* NCQ is slow */
4108 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
4109 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
4110 /* http://thread.gmane.org/gmane.linux.ide/14907 */
4111 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
4112 /* NCQ is broken */
4113 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
4114 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
4115 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ },
4116 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ },
4117 { "OCZ CORE_SSD", "02.10104", ATA_HORKAGE_NONCQ },
4118
4119 /* Seagate NCQ + FLUSH CACHE firmware bug */
4120 { "ST31500341AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4121 ATA_HORKAGE_FIRMWARE_WARN },
4122
4123 { "ST31000333AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4124 ATA_HORKAGE_FIRMWARE_WARN },
4125
4126 { "ST3640[36]23AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4127 ATA_HORKAGE_FIRMWARE_WARN },
4128
4129 { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4130 ATA_HORKAGE_FIRMWARE_WARN },
4131
4132 /* Blacklist entries taken from Silicon Image 3124/3132
4133 Windows driver .inf file - also several Linux problem reports */
4134 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
4135 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
4136 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
4137
4138 /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
4139 { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, },
4140
4141 /* devices which puke on READ_NATIVE_MAX */
4142 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
4143 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
4144 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
4145 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
4146
4147 /* this one allows HPA unlocking but fails IOs on the area */
4148 { "OCZ-VERTEX", "1.30", ATA_HORKAGE_BROKEN_HPA },
4149
4150 /* Devices which report 1 sector over size HPA */
4151 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, },
4152 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, },
4153 { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE, },
4154
4155 /* Devices which get the IVB wrong */
4156 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
4157 /* Maybe we should just blacklist TSSTcorp... */
4158 { "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]", ATA_HORKAGE_IVB, },
4159
4160 /* Devices that do not need bridging limits applied */
4161 { "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK, },
4162 { "BUFFALO HD-QSU2/R5", NULL, ATA_HORKAGE_BRIDGE_OK, },
4163
4164 /* Devices which aren't very happy with higher link speeds */
4165 { "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS, },
4166 { "Seagate FreeAgent GoFlex", NULL, ATA_HORKAGE_1_5_GBPS, },
4167
4168 /*
4169 * Devices which choke on SETXFER. Applies only if both the
4170 * device and controller are SATA.
4171 */
4172 { "PIONEER DVD-RW DVRTD08", NULL, ATA_HORKAGE_NOSETXFER },
4173 { "PIONEER DVD-RW DVRTD08A", NULL, ATA_HORKAGE_NOSETXFER },
4174 { "PIONEER DVD-RW DVR-215", NULL, ATA_HORKAGE_NOSETXFER },
4175 { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER },
4176 { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
4177
4178 /* End Marker */
4179 { }
4180 };
4181
4182 /**
4183 * glob_match - match a text string against a glob-style pattern
4184 * @text: the string to be examined
4185 * @pattern: the glob-style pattern to be matched against
4186 *
4187 * Either/both of text and pattern can be empty strings.
4188 *
4189 * Match text against a glob-style pattern, with wildcards and simple sets:
4190 *
4191 * ? matches any single character.
4192 * * matches any run of characters.
4193 * [xyz] matches a single character from the set: x, y, or z.
4194 * [a-d] matches a single character from the range: a, b, c, or d.
4195 * [a-d0-9] matches a single character from either range.
4196 *
4197 * The special characters ?, [, -, or *, can be matched using a set, eg. [*]
4198 * Behaviour with malformed patterns is undefined, though generally reasonable.
4199 *
4200 * Sample patterns: "SD1?", "SD1[0-5]", "*R0", "SD*1?[012]*xx"
4201 *
4202 * This function uses one level of recursion per '*' in pattern.
4203 * Since it calls _nothing_ else, and has _no_ explicit local variables,
4204 * this will not cause stack problems for any reasonable use here.
4205 *
4206 * RETURNS:
4207 * 0 on match, 1 otherwise.
4208 */
4209 static int glob_match (const char *text, const char *pattern)
4210 {
4211 do {
4212 /* Match single character or a '?' wildcard */
4213 if (*text == *pattern || *pattern == '?') {
4214 if (!*pattern++)
4215 return 0; /* End of both strings: match */
4216 } else {
4217 /* Match single char against a '[' bracketed ']' pattern set */
4218 if (!*text || *pattern != '[')
4219 break; /* Not a pattern set */
4220 while (*++pattern && *pattern != ']' && *text != *pattern) {
4221 if (*pattern == '-' && *(pattern - 1) != '[')
4222 if (*text > *(pattern - 1) && *text < *(pattern + 1)) {
4223 ++pattern;
4224 break;
4225 }
4226 }
4227 if (!*pattern || *pattern == ']')
4228 return 1; /* No match */
4229 while (*pattern && *pattern++ != ']');
4230 }
4231 } while (*++text && *pattern);
4232
4233 /* Match any run of chars against a '*' wildcard */
4234 if (*pattern == '*') {
4235 if (!*++pattern)
4236 return 0; /* Match: avoid recursion at end of pattern */
4237 /* Loop to handle additional pattern chars after the wildcard */
4238 while (*text) {
4239 if (glob_match(text, pattern) == 0)
4240 return 0; /* Remainder matched */
4241 ++text; /* Absorb (match) this char and try again */
4242 }
4243 }
4244 if (!*text && !*pattern)
4245 return 0; /* End of both strings: match */
4246 return 1; /* No match */
4247 }
4248
4249 static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
4250 {
4251 unsigned char model_num[ATA_ID_PROD_LEN + 1];
4252 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
4253 const struct ata_blacklist_entry *ad = ata_device_blacklist;
4254
4255 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4256 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
4257
4258 while (ad->model_num) {
4259 if (!glob_match(model_num, ad->model_num)) {
4260 if (ad->model_rev == NULL)
4261 return ad->horkage;
4262 if (!glob_match(model_rev, ad->model_rev))
4263 return ad->horkage;
4264 }
4265 ad++;
4266 }
4267 return 0;
4268 }
4269
4270 static int ata_dma_blacklisted(const struct ata_device *dev)
4271 {
4272 /* We don't support polling DMA.
4273 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
4274 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4275 */
4276 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
4277 (dev->flags & ATA_DFLAG_CDB_INTR))
4278 return 1;
4279 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
4280 }
4281
4282 /**
4283 * ata_is_40wire - check drive side detection
4284 * @dev: device
4285 *
4286 * Perform drive side detection decoding, allowing for device vendors
4287 * who can't follow the documentation.
4288 */
4289
4290 static int ata_is_40wire(struct ata_device *dev)
4291 {
4292 if (dev->horkage & ATA_HORKAGE_IVB)
4293 return ata_drive_40wire_relaxed(dev->id);
4294 return ata_drive_40wire(dev->id);
4295 }
4296
4297 /**
4298 * cable_is_40wire - 40/80/SATA decider
4299 * @ap: port to consider
4300 *
4301 * This function encapsulates the policy for speed management
4302 * in one place. At the moment we don't cache the result but
4303 * there is a good case for setting ap->cbl to the result when
4304 * we are called with unknown cables (and figuring out if it
4305 * impacts hotplug at all).
4306 *
4307 * Return 1 if the cable appears to be 40 wire.
4308 */
4309
4310 static int cable_is_40wire(struct ata_port *ap)
4311 {
4312 struct ata_link *link;
4313 struct ata_device *dev;
4314
4315 /* If the controller thinks we are 40 wire, we are. */
4316 if (ap->cbl == ATA_CBL_PATA40)
4317 return 1;
4318
4319 /* If the controller thinks we are 80 wire, we are. */
4320 if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
4321 return 0;
4322
4323 /* If the system is known to be 40 wire short cable (eg
4324 * laptop), then we allow 80 wire modes even if the drive
4325 * isn't sure.
4326 */
4327 if (ap->cbl == ATA_CBL_PATA40_SHORT)
4328 return 0;
4329
4330 /* If the controller doesn't know, we scan.
4331 *
4332 * Note: We look for all 40 wire detects at this point. Any
4333 * 80 wire detect is taken to be 80 wire cable because
4334 * - in many setups only the one drive (slave if present) will
4335 * give a valid detect
4336 * - if you have a non detect capable drive you don't want it
4337 * to colour the choice
4338 */
4339 ata_for_each_link(link, ap, EDGE) {
4340 ata_for_each_dev(dev, link, ENABLED) {
4341 if (!ata_is_40wire(dev))
4342 return 0;
4343 }
4344 }
4345 return 1;
4346 }
4347
4348 /**
4349 * ata_dev_xfermask - Compute supported xfermask of the given device
4350 * @dev: Device to compute xfermask for
4351 *
4352 * Compute supported xfermask of @dev and store it in
4353 * dev->*_mask. This function is responsible for applying all
4354 * known limits including host controller limits, device
4355 * blacklist, etc...
4356 *
4357 * LOCKING:
4358 * None.
4359 */
4360 static void ata_dev_xfermask(struct ata_device *dev)
4361 {
4362 struct ata_link *link = dev->link;
4363 struct ata_port *ap = link->ap;
4364 struct ata_host *host = ap->host;
4365 unsigned long xfer_mask;
4366
4367 /* controller modes available */
4368 xfer_mask = ata_pack_xfermask(ap->pio_mask,
4369 ap->mwdma_mask, ap->udma_mask);
4370
4371 /* drive modes available */
4372 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4373 dev->mwdma_mask, dev->udma_mask);
4374 xfer_mask &= ata_id_xfermask(dev->id);
4375
4376 /*
4377 * CFA Advanced TrueIDE timings are not allowed on a shared
4378 * cable
4379 */
4380 if (ata_dev_pair(dev)) {
4381 /* No PIO5 or PIO6 */
4382 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4383 /* No MWDMA3 or MWDMA 4 */
4384 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4385 }
4386
4387 if (ata_dma_blacklisted(dev)) {
4388 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4389 ata_dev_warn(dev,
4390 "device is on DMA blacklist, disabling DMA\n");
4391 }
4392
4393 if ((host->flags & ATA_HOST_SIMPLEX) &&
4394 host->simplex_claimed && host->simplex_claimed != ap) {
4395 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4396 ata_dev_warn(dev,
4397 "simplex DMA is claimed by other device, disabling DMA\n");
4398 }
4399
4400 if (ap->flags & ATA_FLAG_NO_IORDY)
4401 xfer_mask &= ata_pio_mask_no_iordy(dev);
4402
4403 if (ap->ops->mode_filter)
4404 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
4405
4406 /* Apply cable rule here. Don't apply it early because when
4407 * we handle hot plug the cable type can itself change.
4408 * Check this last so that we know if the transfer rate was
4409 * solely limited by the cable.
4410 * Unknown or 80 wire cables reported host side are checked
4411 * drive side as well. Cases where we know a 40wire cable
4412 * is used safely for 80 are not checked here.
4413 */
4414 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4415 /* UDMA/44 or higher would be available */
4416 if (cable_is_40wire(ap)) {
4417 ata_dev_warn(dev,
4418 "limited to UDMA/33 due to 40-wire cable\n");
4419 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4420 }
4421
4422 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4423 &dev->mwdma_mask, &dev->udma_mask);
4424 }
4425
4426 /**
4427 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
4428 * @dev: Device to which command will be sent
4429 *
4430 * Issue SET FEATURES - XFER MODE command to device @dev
4431 * on port @ap.
4432 *
4433 * LOCKING:
4434 * PCI/etc. bus probe sem.
4435 *
4436 * RETURNS:
4437 * 0 on success, AC_ERR_* mask otherwise.
4438 */
4439
4440 static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4441 {
4442 struct ata_taskfile tf;
4443 unsigned int err_mask;
4444
4445 /* set up set-features taskfile */
4446 DPRINTK("set features - xfer mode\n");
4447
4448 /* Some controllers and ATAPI devices show flaky interrupt
4449 * behavior after setting xfer mode. Use polling instead.
4450 */
4451 ata_tf_init(dev, &tf);
4452 tf.command = ATA_CMD_SET_FEATURES;
4453 tf.feature = SETFEATURES_XFER;
4454 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
4455 tf.protocol = ATA_PROT_NODATA;
4456 /* If we are using IORDY we must send the mode setting command */
4457 if (ata_pio_need_iordy(dev))
4458 tf.nsect = dev->xfer_mode;
4459 /* If the device has IORDY and the controller does not - turn it off */
4460 else if (ata_id_has_iordy(dev->id))
4461 tf.nsect = 0x01;
4462 else /* In the ancient relic department - skip all of this */
4463 return 0;
4464
4465 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4466
4467 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4468 return err_mask;
4469 }
4470
4471 /**
4472 * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
4473 * @dev: Device to which command will be sent
4474 * @enable: Whether to enable or disable the feature
4475 * @feature: The sector count represents the feature to set
4476 *
4477 * Issue SET FEATURES - SATA FEATURES command to device @dev
4478 * on port @ap with sector count
4479 *
4480 * LOCKING:
4481 * PCI/etc. bus probe sem.
4482 *
4483 * RETURNS:
4484 * 0 on success, AC_ERR_* mask otherwise.
4485 */
4486 unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, u8 feature)
4487 {
4488 struct ata_taskfile tf;
4489 unsigned int err_mask;
4490
4491 /* set up set-features taskfile */
4492 DPRINTK("set features - SATA features\n");
4493
4494 ata_tf_init(dev, &tf);
4495 tf.command = ATA_CMD_SET_FEATURES;
4496 tf.feature = enable;
4497 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4498 tf.protocol = ATA_PROT_NODATA;
4499 tf.nsect = feature;
4500
4501 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4502
4503 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4504 return err_mask;
4505 }
4506 EXPORT_SYMBOL_GPL(ata_dev_set_feature);
4507
4508 /**
4509 * ata_dev_init_params - Issue INIT DEV PARAMS command
4510 * @dev: Device to which command will be sent
4511 * @heads: Number of heads (taskfile parameter)
4512 * @sectors: Number of sectors (taskfile parameter)
4513 *
4514 * LOCKING:
4515 * Kernel thread context (may sleep)
4516 *
4517 * RETURNS:
4518 * 0 on success, AC_ERR_* mask otherwise.
4519 */
4520 static unsigned int ata_dev_init_params(struct ata_device *dev,
4521 u16 heads, u16 sectors)
4522 {
4523 struct ata_taskfile tf;
4524 unsigned int err_mask;
4525
4526 /* Number of sectors per track 1-255. Number of heads 1-16 */
4527 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
4528 return AC_ERR_INVALID;
4529
4530 /* set up init dev params taskfile */
4531 DPRINTK("init dev params \n");
4532
4533 ata_tf_init(dev, &tf);
4534 tf.command = ATA_CMD_INIT_DEV_PARAMS;
4535 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4536 tf.protocol = ATA_PROT_NODATA;
4537 tf.nsect = sectors;
4538 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
4539
4540 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4541 /* A clean abort indicates an original or just out of spec drive
4542 and we should continue as we issue the setup based on the
4543 drive reported working geometry */
4544 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4545 err_mask = 0;
4546
4547 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4548 return err_mask;
4549 }
4550
4551 /**
4552 * ata_sg_clean - Unmap DMA memory associated with command
4553 * @qc: Command containing DMA memory to be released
4554 *
4555 * Unmap all mapped DMA memory associated with this command.
4556 *
4557 * LOCKING:
4558 * spin_lock_irqsave(host lock)
4559 */
4560 void ata_sg_clean(struct ata_queued_cmd *qc)
4561 {
4562 struct ata_port *ap = qc->ap;
4563 struct scatterlist *sg = qc->sg;
4564 int dir = qc->dma_dir;
4565
4566 WARN_ON_ONCE(sg == NULL);
4567
4568 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
4569
4570 if (qc->n_elem)
4571 dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir);
4572
4573 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4574 qc->sg = NULL;
4575 }
4576
4577 /**
4578 * atapi_check_dma - Check whether ATAPI DMA can be supported
4579 * @qc: Metadata associated with taskfile to check
4580 *
4581 * Allow low-level driver to filter ATA PACKET commands, returning
4582 * a status indicating whether or not it is OK to use DMA for the
4583 * supplied PACKET command.
4584 *
4585 * LOCKING:
4586 * spin_lock_irqsave(host lock)
4587 *
4588 * RETURNS: 0 when ATAPI DMA can be used
4589 * nonzero otherwise
4590 */
4591 int atapi_check_dma(struct ata_queued_cmd *qc)
4592 {
4593 struct ata_port *ap = qc->ap;
4594
4595 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
4596 * few ATAPI devices choke on such DMA requests.
4597 */
4598 if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) &&
4599 unlikely(qc->nbytes & 15))
4600 return 1;
4601
4602 if (ap->ops->check_atapi_dma)
4603 return ap->ops->check_atapi_dma(qc);
4604
4605 return 0;
4606 }
4607
4608 /**
4609 * ata_std_qc_defer - Check whether a qc needs to be deferred
4610 * @qc: ATA command in question
4611 *
4612 * Non-NCQ commands cannot run with any other command, NCQ or
4613 * not. As upper layer only knows the queue depth, we are
4614 * responsible for maintaining exclusion. This function checks
4615 * whether a new command @qc can be issued.
4616 *
4617 * LOCKING:
4618 * spin_lock_irqsave(host lock)
4619 *
4620 * RETURNS:
4621 * ATA_DEFER_* if deferring is needed, 0 otherwise.
4622 */
4623 int ata_std_qc_defer(struct ata_queued_cmd *qc)
4624 {
4625 struct ata_link *link = qc->dev->link;
4626
4627 if (qc->tf.protocol == ATA_PROT_NCQ) {
4628 if (!ata_tag_valid(link->active_tag))
4629 return 0;
4630 } else {
4631 if (!ata_tag_valid(link->active_tag) && !link->sactive)
4632 return 0;
4633 }
4634
4635 return ATA_DEFER_LINK;
4636 }
4637
4638 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4639
4640 /**
4641 * ata_sg_init - Associate command with scatter-gather table.
4642 * @qc: Command to be associated
4643 * @sg: Scatter-gather table.
4644 * @n_elem: Number of elements in s/g table.
4645 *
4646 * Initialize the data-related elements of queued_cmd @qc
4647 * to point to a scatter-gather table @sg, containing @n_elem
4648 * elements.
4649 *
4650 * LOCKING:
4651 * spin_lock_irqsave(host lock)
4652 */
4653 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4654 unsigned int n_elem)
4655 {
4656 qc->sg = sg;
4657 qc->n_elem = n_elem;
4658 qc->cursg = qc->sg;
4659 }
4660
4661 /**
4662 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4663 * @qc: Command with scatter-gather table to be mapped.
4664 *
4665 * DMA-map the scatter-gather table associated with queued_cmd @qc.
4666 *
4667 * LOCKING:
4668 * spin_lock_irqsave(host lock)
4669 *
4670 * RETURNS:
4671 * Zero on success, negative on error.
4672 *
4673 */
4674 static int ata_sg_setup(struct ata_queued_cmd *qc)
4675 {
4676 struct ata_port *ap = qc->ap;
4677 unsigned int n_elem;
4678
4679 VPRINTK("ENTER, ata%u\n", ap->print_id);
4680
4681 n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
4682 if (n_elem < 1)
4683 return -1;
4684
4685 DPRINTK("%d sg elements mapped\n", n_elem);
4686 qc->orig_n_elem = qc->n_elem;
4687 qc->n_elem = n_elem;
4688 qc->flags |= ATA_QCFLAG_DMAMAP;
4689
4690 return 0;
4691 }
4692
4693 /**
4694 * swap_buf_le16 - swap halves of 16-bit words in place
4695 * @buf: Buffer to swap
4696 * @buf_words: Number of 16-bit words in buffer.
4697 *
4698 * Swap halves of 16-bit words if needed to convert from
4699 * little-endian byte order to native cpu byte order, or
4700 * vice-versa.
4701 *
4702 * LOCKING:
4703 * Inherited from caller.
4704 */
4705 void swap_buf_le16(u16 *buf, unsigned int buf_words)
4706 {
4707 #ifdef __BIG_ENDIAN
4708 unsigned int i;
4709
4710 for (i = 0; i < buf_words; i++)
4711 buf[i] = le16_to_cpu(buf[i]);
4712 #endif /* __BIG_ENDIAN */
4713 }
4714
4715 /**
4716 * ata_qc_new - Request an available ATA command, for queueing
4717 * @ap: target port
4718 *
4719 * LOCKING:
4720 * None.
4721 */
4722
4723 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4724 {
4725 struct ata_queued_cmd *qc = NULL;
4726 unsigned int i;
4727
4728 /* no command while frozen */
4729 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
4730 return NULL;
4731
4732 /* the last tag is reserved for internal command. */
4733 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
4734 if (!test_and_set_bit(i, &ap->qc_allocated)) {
4735 qc = __ata_qc_from_tag(ap, i);
4736 break;
4737 }
4738
4739 if (qc)
4740 qc->tag = i;
4741
4742 return qc;
4743 }
4744
4745 /**
4746 * ata_qc_new_init - Request an available ATA command, and initialize it
4747 * @dev: Device from whom we request an available command structure
4748 *
4749 * LOCKING:
4750 * None.
4751 */
4752
4753 struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
4754 {
4755 struct ata_port *ap = dev->link->ap;
4756 struct ata_queued_cmd *qc;
4757
4758 qc = ata_qc_new(ap);
4759 if (qc) {
4760 qc->scsicmd = NULL;
4761 qc->ap = ap;
4762 qc->dev = dev;
4763
4764 ata_qc_reinit(qc);
4765 }
4766
4767 return qc;
4768 }
4769
4770 /**
4771 * ata_qc_free - free unused ata_queued_cmd
4772 * @qc: Command to complete
4773 *
4774 * Designed to free unused ata_queued_cmd object
4775 * in case something prevents using it.
4776 *
4777 * LOCKING:
4778 * spin_lock_irqsave(host lock)
4779 */
4780 void ata_qc_free(struct ata_queued_cmd *qc)
4781 {
4782 struct ata_port *ap;
4783 unsigned int tag;
4784
4785 WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4786 ap = qc->ap;
4787
4788 qc->flags = 0;
4789 tag = qc->tag;
4790 if (likely(ata_tag_valid(tag))) {
4791 qc->tag = ATA_TAG_POISON;
4792 clear_bit(tag, &ap->qc_allocated);
4793 }
4794 }
4795
4796 void __ata_qc_complete(struct ata_queued_cmd *qc)
4797 {
4798 struct ata_port *ap;
4799 struct ata_link *link;
4800
4801 WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4802 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
4803 ap = qc->ap;
4804 link = qc->dev->link;
4805
4806 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4807 ata_sg_clean(qc);
4808
4809 /* command should be marked inactive atomically with qc completion */
4810 if (qc->tf.protocol == ATA_PROT_NCQ) {
4811 link->sactive &= ~(1 << qc->tag);
4812 if (!link->sactive)
4813 ap->nr_active_links--;
4814 } else {
4815 link->active_tag = ATA_TAG_POISON;
4816 ap->nr_active_links--;
4817 }
4818
4819 /* clear exclusive status */
4820 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
4821 ap->excl_link == link))
4822 ap->excl_link = NULL;
4823
4824 /* atapi: mark qc as inactive to prevent the interrupt handler
4825 * from completing the command twice later, before the error handler
4826 * is called. (when rc != 0 and atapi request sense is needed)
4827 */
4828 qc->flags &= ~ATA_QCFLAG_ACTIVE;
4829 ap->qc_active &= ~(1 << qc->tag);
4830
4831 /* call completion callback */
4832 qc->complete_fn(qc);
4833 }
4834
4835 static void fill_result_tf(struct ata_queued_cmd *qc)
4836 {
4837 struct ata_port *ap = qc->ap;
4838
4839 qc->result_tf.flags = qc->tf.flags;
4840 ap->ops->qc_fill_rtf(qc);
4841 }
4842
4843 static void ata_verify_xfer(struct ata_queued_cmd *qc)
4844 {
4845 struct ata_device *dev = qc->dev;
4846
4847 if (ata_is_nodata(qc->tf.protocol))
4848 return;
4849
4850 if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
4851 return;
4852
4853 dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
4854 }
4855
4856 /**
4857 * ata_qc_complete - Complete an active ATA command
4858 * @qc: Command to complete
4859 *
4860 * Indicate to the mid and upper layers that an ATA command has
4861 * completed, with either an ok or not-ok status.
4862 *
4863 * Refrain from calling this function multiple times when
4864 * successfully completing multiple NCQ commands.
4865 * ata_qc_complete_multiple() should be used instead, which will
4866 * properly update IRQ expect state.
4867 *
4868 * LOCKING:
4869 * spin_lock_irqsave(host lock)
4870 */
4871 void ata_qc_complete(struct ata_queued_cmd *qc)
4872 {
4873 struct ata_port *ap = qc->ap;
4874
4875 /* XXX: New EH and old EH use different mechanisms to
4876 * synchronize EH with regular execution path.
4877 *
4878 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4879 * Normal execution path is responsible for not accessing a
4880 * failed qc. libata core enforces the rule by returning NULL
4881 * from ata_qc_from_tag() for failed qcs.
4882 *
4883 * Old EH depends on ata_qc_complete() nullifying completion
4884 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
4885 * not synchronize with interrupt handler. Only PIO task is
4886 * taken care of.
4887 */
4888 if (ap->ops->error_handler) {
4889 struct ata_device *dev = qc->dev;
4890 struct ata_eh_info *ehi = &dev->link->eh_info;
4891
4892 if (unlikely(qc->err_mask))
4893 qc->flags |= ATA_QCFLAG_FAILED;
4894
4895 /*
4896 * Finish internal commands without any further processing
4897 * and always with the result TF filled.
4898 */
4899 if (unlikely(ata_tag_internal(qc->tag))) {
4900 fill_result_tf(qc);
4901 __ata_qc_complete(qc);
4902 return;
4903 }
4904
4905 /*
4906 * Non-internal qc has failed. Fill the result TF and
4907 * summon EH.
4908 */
4909 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4910 fill_result_tf(qc);
4911 ata_qc_schedule_eh(qc);
4912 return;
4913 }
4914
4915 WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN);
4916
4917 /* read result TF if requested */
4918 if (qc->flags & ATA_QCFLAG_RESULT_TF)
4919 fill_result_tf(qc);
4920
4921 /* Some commands need post-processing after successful
4922 * completion.
4923 */
4924 switch (qc->tf.command) {
4925 case ATA_CMD_SET_FEATURES:
4926 if (qc->tf.feature != SETFEATURES_WC_ON &&
4927 qc->tf.feature != SETFEATURES_WC_OFF)
4928 break;
4929 /* fall through */
4930 case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
4931 case ATA_CMD_SET_MULTI: /* multi_count changed */
4932 /* revalidate device */
4933 ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
4934 ata_port_schedule_eh(ap);
4935 break;
4936
4937 case ATA_CMD_SLEEP:
4938 dev->flags |= ATA_DFLAG_SLEEPING;
4939 break;
4940 }
4941
4942 if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
4943 ata_verify_xfer(qc);
4944
4945 __ata_qc_complete(qc);
4946 } else {
4947 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4948 return;
4949
4950 /* read result TF if failed or requested */
4951 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
4952 fill_result_tf(qc);
4953
4954 __ata_qc_complete(qc);
4955 }
4956 }
4957
4958 /**
4959 * ata_qc_complete_multiple - Complete multiple qcs successfully
4960 * @ap: port in question
4961 * @qc_active: new qc_active mask
4962 *
4963 * Complete in-flight commands. This functions is meant to be
4964 * called from low-level driver's interrupt routine to complete
4965 * requests normally. ap->qc_active and @qc_active is compared
4966 * and commands are completed accordingly.
4967 *
4968 * Always use this function when completing multiple NCQ commands
4969 * from IRQ handlers instead of calling ata_qc_complete()
4970 * multiple times to keep IRQ expect status properly in sync.
4971 *
4972 * LOCKING:
4973 * spin_lock_irqsave(host lock)
4974 *
4975 * RETURNS:
4976 * Number of completed commands on success, -errno otherwise.
4977 */
4978 int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active)
4979 {
4980 int nr_done = 0;
4981 u32 done_mask;
4982
4983 done_mask = ap->qc_active ^ qc_active;
4984
4985 if (unlikely(done_mask & qc_active)) {
4986 ata_port_err(ap, "illegal qc_active transition (%08x->%08x)\n",
4987 ap->qc_active, qc_active);
4988 return -EINVAL;
4989 }
4990
4991 while (done_mask) {
4992 struct ata_queued_cmd *qc;
4993 unsigned int tag = __ffs(done_mask);
4994
4995 qc = ata_qc_from_tag(ap, tag);
4996 if (qc) {
4997 ata_qc_complete(qc);
4998 nr_done++;
4999 }
5000 done_mask &= ~(1 << tag);
5001 }
5002
5003 return nr_done;
5004 }
5005
5006 /**
5007 * ata_qc_issue - issue taskfile to device
5008 * @qc: command to issue to device
5009 *
5010 * Prepare an ATA command to submission to device.
5011 * This includes mapping the data into a DMA-able
5012 * area, filling in the S/G table, and finally
5013 * writing the taskfile to hardware, starting the command.
5014 *
5015 * LOCKING:
5016 * spin_lock_irqsave(host lock)
5017 */
5018 void ata_qc_issue(struct ata_queued_cmd *qc)
5019 {
5020 struct ata_port *ap = qc->ap;
5021 struct ata_link *link = qc->dev->link;
5022 u8 prot = qc->tf.protocol;
5023
5024 /* Make sure only one non-NCQ command is outstanding. The
5025 * check is skipped for old EH because it reuses active qc to
5026 * request ATAPI sense.
5027 */
5028 WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag));
5029
5030 if (ata_is_ncq(prot)) {
5031 WARN_ON_ONCE(link->sactive & (1 << qc->tag));
5032
5033 if (!link->sactive)
5034 ap->nr_active_links++;
5035 link->sactive |= 1 << qc->tag;
5036 } else {
5037 WARN_ON_ONCE(link->sactive);
5038
5039 ap->nr_active_links++;
5040 link->active_tag = qc->tag;
5041 }
5042
5043 qc->flags |= ATA_QCFLAG_ACTIVE;
5044 ap->qc_active |= 1 << qc->tag;
5045
5046 /*
5047 * We guarantee to LLDs that they will have at least one
5048 * non-zero sg if the command is a data command.
5049 */
5050 if (WARN_ON_ONCE(ata_is_data(prot) &&
5051 (!qc->sg || !qc->n_elem || !qc->nbytes)))
5052 goto sys_err;
5053
5054 if (ata_is_dma(prot) || (ata_is_pio(prot) &&
5055 (ap->flags & ATA_FLAG_PIO_DMA)))
5056 if (ata_sg_setup(qc))
5057 goto sys_err;
5058
5059 /* if device is sleeping, schedule reset and abort the link */
5060 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
5061 link->eh_info.action |= ATA_EH_RESET;
5062 ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
5063 ata_link_abort(link);
5064 return;
5065 }
5066
5067 ap->ops->qc_prep(qc);
5068
5069 qc->err_mask |= ap->ops->qc_issue(qc);
5070 if (unlikely(qc->err_mask))
5071 goto err;
5072 return;
5073
5074 sys_err:
5075 qc->err_mask |= AC_ERR_SYSTEM;
5076 err:
5077 ata_qc_complete(qc);
5078 }
5079
5080 /**
5081 * sata_scr_valid - test whether SCRs are accessible
5082 * @link: ATA link to test SCR accessibility for
5083 *
5084 * Test whether SCRs are accessible for @link.
5085 *
5086 * LOCKING:
5087 * None.
5088 *
5089 * RETURNS:
5090 * 1 if SCRs are accessible, 0 otherwise.
5091 */
5092 int sata_scr_valid(struct ata_link *link)
5093 {
5094 struct ata_port *ap = link->ap;
5095
5096 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
5097 }
5098
5099 /**
5100 * sata_scr_read - read SCR register of the specified port
5101 * @link: ATA link to read SCR for
5102 * @reg: SCR to read
5103 * @val: Place to store read value
5104 *
5105 * Read SCR register @reg of @link into *@val. This function is
5106 * guaranteed to succeed if @link is ap->link, the cable type of
5107 * the port is SATA and the port implements ->scr_read.
5108 *
5109 * LOCKING:
5110 * None if @link is ap->link. Kernel thread context otherwise.
5111 *
5112 * RETURNS:
5113 * 0 on success, negative errno on failure.
5114 */
5115 int sata_scr_read(struct ata_link *link, int reg, u32 *val)
5116 {
5117 if (ata_is_host_link(link)) {
5118 if (sata_scr_valid(link))
5119 return link->ap->ops->scr_read(link, reg, val);
5120 return -EOPNOTSUPP;
5121 }
5122
5123 return sata_pmp_scr_read(link, reg, val);
5124 }
5125
5126 /**
5127 * sata_scr_write - write SCR register of the specified port
5128 * @link: ATA link to write SCR for
5129 * @reg: SCR to write
5130 * @val: value to write
5131 *
5132 * Write @val to SCR register @reg of @link. This function is
5133 * guaranteed to succeed if @link is ap->link, the cable type of
5134 * the port is SATA and the port implements ->scr_read.
5135 *
5136 * LOCKING:
5137 * None if @link is ap->link. Kernel thread context otherwise.
5138 *
5139 * RETURNS:
5140 * 0 on success, negative errno on failure.
5141 */
5142 int sata_scr_write(struct ata_link *link, int reg, u32 val)
5143 {
5144 if (ata_is_host_link(link)) {
5145 if (sata_scr_valid(link))
5146 return link->ap->ops->scr_write(link, reg, val);
5147 return -EOPNOTSUPP;
5148 }
5149
5150 return sata_pmp_scr_write(link, reg, val);
5151 }
5152
5153 /**
5154 * sata_scr_write_flush - write SCR register of the specified port and flush
5155 * @link: ATA link to write SCR for
5156 * @reg: SCR to write
5157 * @val: value to write
5158 *
5159 * This function is identical to sata_scr_write() except that this
5160 * function performs flush after writing to the register.
5161 *
5162 * LOCKING:
5163 * None if @link is ap->link. Kernel thread context otherwise.
5164 *
5165 * RETURNS:
5166 * 0 on success, negative errno on failure.
5167 */
5168 int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
5169 {
5170 if (ata_is_host_link(link)) {
5171 int rc;
5172
5173 if (sata_scr_valid(link)) {
5174 rc = link->ap->ops->scr_write(link, reg, val);
5175 if (rc == 0)
5176 rc = link->ap->ops->scr_read(link, reg, &val);
5177 return rc;
5178 }
5179 return -EOPNOTSUPP;
5180 }
5181
5182 return sata_pmp_scr_write(link, reg, val);
5183 }
5184
5185 /**
5186 * ata_phys_link_online - test whether the given link is online
5187 * @link: ATA link to test
5188 *
5189 * Test whether @link is online. Note that this function returns
5190 * 0 if online status of @link cannot be obtained, so
5191 * ata_link_online(link) != !ata_link_offline(link).
5192 *
5193 * LOCKING:
5194 * None.
5195 *
5196 * RETURNS:
5197 * True if the port online status is available and online.
5198 */
5199 bool ata_phys_link_online(struct ata_link *link)
5200 {
5201 u32 sstatus;
5202
5203 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5204 ata_sstatus_online(sstatus))
5205 return true;
5206 return false;
5207 }
5208
5209 /**
5210 * ata_phys_link_offline - test whether the given link is offline
5211 * @link: ATA link to test
5212 *
5213 * Test whether @link is offline. Note that this function
5214 * returns 0 if offline status of @link cannot be obtained, so
5215 * ata_link_online(link) != !ata_link_offline(link).
5216 *
5217 * LOCKING:
5218 * None.
5219 *
5220 * RETURNS:
5221 * True if the port offline status is available and offline.
5222 */
5223 bool ata_phys_link_offline(struct ata_link *link)
5224 {
5225 u32 sstatus;
5226
5227 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5228 !ata_sstatus_online(sstatus))
5229 return true;
5230 return false;
5231 }
5232
5233 /**
5234 * ata_link_online - test whether the given link is online
5235 * @link: ATA link to test
5236 *
5237 * Test whether @link is online. This is identical to
5238 * ata_phys_link_online() when there's no slave link. When
5239 * there's a slave link, this function should only be called on
5240 * the master link and will return true if any of M/S links is
5241 * online.
5242 *
5243 * LOCKING:
5244 * None.
5245 *
5246 * RETURNS:
5247 * True if the port online status is available and online.
5248 */
5249 bool ata_link_online(struct ata_link *link)
5250 {
5251 struct ata_link *slave = link->ap->slave_link;
5252
5253 WARN_ON(link == slave); /* shouldn't be called on slave link */
5254
5255 return ata_phys_link_online(link) ||
5256 (slave && ata_phys_link_online(slave));
5257 }
5258
5259 /**
5260 * ata_link_offline - test whether the given link is offline
5261 * @link: ATA link to test
5262 *
5263 * Test whether @link is offline. This is identical to
5264 * ata_phys_link_offline() when there's no slave link. When
5265 * there's a slave link, this function should only be called on
5266 * the master link and will return true if both M/S links are
5267 * offline.
5268 *
5269 * LOCKING:
5270 * None.
5271 *
5272 * RETURNS:
5273 * True if the port offline status is available and offline.
5274 */
5275 bool ata_link_offline(struct ata_link *link)
5276 {
5277 struct ata_link *slave = link->ap->slave_link;
5278
5279 WARN_ON(link == slave); /* shouldn't be called on slave link */
5280
5281 return ata_phys_link_offline(link) &&
5282 (!slave || ata_phys_link_offline(slave));
5283 }
5284
5285 #ifdef CONFIG_PM
5286 static int ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
5287 unsigned int action, unsigned int ehi_flags,
5288 int *async)
5289 {
5290 struct ata_link *link;
5291 unsigned long flags;
5292 int rc = 0;
5293
5294 /* Previous resume operation might still be in
5295 * progress. Wait for PM_PENDING to clear.
5296 */
5297 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5298 if (async) {
5299 *async = -EAGAIN;
5300 return 0;
5301 }
5302 ata_port_wait_eh(ap);
5303 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5304 }
5305
5306 /* request PM ops to EH */
5307 spin_lock_irqsave(ap->lock, flags);
5308
5309 ap->pm_mesg = mesg;
5310 if (async)
5311 ap->pm_result = async;
5312 else
5313 ap->pm_result = &rc;
5314
5315 ap->pflags |= ATA_PFLAG_PM_PENDING;
5316 ata_for_each_link(link, ap, HOST_FIRST) {
5317 link->eh_info.action |= action;
5318 link->eh_info.flags |= ehi_flags;
5319 }
5320
5321 ata_port_schedule_eh(ap);
5322
5323 spin_unlock_irqrestore(ap->lock, flags);
5324
5325 /* wait and check result */
5326 if (!async) {
5327 ata_port_wait_eh(ap);
5328 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5329 }
5330
5331 return rc;
5332 }
5333
5334 static int __ata_port_suspend_common(struct ata_port *ap, pm_message_t mesg, int *async)
5335 {
5336 /*
5337 * On some hardware, device fails to respond after spun down
5338 * for suspend. As the device won't be used before being
5339 * resumed, we don't need to touch the device. Ask EH to skip
5340 * the usual stuff and proceed directly to suspend.
5341 *
5342 * http://thread.gmane.org/gmane.linux.ide/46764
5343 */
5344 unsigned int ehi_flags = ATA_EHI_QUIET | ATA_EHI_NO_AUTOPSY |
5345 ATA_EHI_NO_RECOVERY;
5346 return ata_port_request_pm(ap, mesg, 0, ehi_flags, async);
5347 }
5348
5349 static int ata_port_suspend_common(struct device *dev, pm_message_t mesg)
5350 {
5351 struct ata_port *ap = to_ata_port(dev);
5352
5353 return __ata_port_suspend_common(ap, mesg, NULL);
5354 }
5355
5356 static int ata_port_suspend(struct device *dev)
5357 {
5358 if (pm_runtime_suspended(dev))
5359 return 0;
5360
5361 return ata_port_suspend_common(dev, PMSG_SUSPEND);
5362 }
5363
5364 static int ata_port_do_freeze(struct device *dev)
5365 {
5366 if (pm_runtime_suspended(dev))
5367 return 0;
5368
5369 return ata_port_suspend_common(dev, PMSG_FREEZE);
5370 }
5371
5372 static int ata_port_poweroff(struct device *dev)
5373 {
5374 return ata_port_suspend_common(dev, PMSG_HIBERNATE);
5375 }
5376
5377 static int __ata_port_resume_common(struct ata_port *ap, pm_message_t mesg,
5378 int *async)
5379 {
5380 int rc;
5381
5382 rc = ata_port_request_pm(ap, mesg, ATA_EH_RESET,
5383 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, async);
5384 return rc;
5385 }
5386
5387 static int ata_port_resume_common(struct device *dev, pm_message_t mesg)
5388 {
5389 struct ata_port *ap = to_ata_port(dev);
5390
5391 return __ata_port_resume_common(ap, mesg, NULL);
5392 }
5393
5394 static int ata_port_resume(struct device *dev)
5395 {
5396 int rc;
5397
5398 rc = ata_port_resume_common(dev, PMSG_RESUME);
5399 if (!rc) {
5400 pm_runtime_disable(dev);
5401 pm_runtime_set_active(dev);
5402 pm_runtime_enable(dev);
5403 }
5404
5405 return rc;
5406 }
5407
5408 /*
5409 * For ODDs, the upper layer will poll for media change every few seconds,
5410 * which will make it enter and leave suspend state every few seconds. And
5411 * as each suspend will cause a hard/soft reset, the gain of runtime suspend
5412 * is very little and the ODD may malfunction after constantly being reset.
5413 * So the idle callback here will not proceed to suspend if a non-ZPODD capable
5414 * ODD is attached to the port.
5415 */
5416 static int ata_port_runtime_idle(struct device *dev)
5417 {
5418 struct ata_port *ap = to_ata_port(dev);
5419 struct ata_link *link;
5420 struct ata_device *adev;
5421
5422 ata_for_each_link(link, ap, HOST_FIRST) {
5423 ata_for_each_dev(adev, link, ENABLED)
5424 if (adev->class == ATA_DEV_ATAPI &&
5425 !zpodd_dev_enabled(adev))
5426 return -EBUSY;
5427 }
5428
5429 return pm_runtime_suspend(dev);
5430 }
5431
5432 static int ata_port_runtime_suspend(struct device *dev)
5433 {
5434 return ata_port_suspend_common(dev, PMSG_AUTO_SUSPEND);
5435 }
5436
5437 static int ata_port_runtime_resume(struct device *dev)
5438 {
5439 return ata_port_resume_common(dev, PMSG_AUTO_RESUME);
5440 }
5441
5442 static const struct dev_pm_ops ata_port_pm_ops = {
5443 .suspend = ata_port_suspend,
5444 .resume = ata_port_resume,
5445 .freeze = ata_port_do_freeze,
5446 .thaw = ata_port_resume,
5447 .poweroff = ata_port_poweroff,
5448 .restore = ata_port_resume,
5449
5450 .runtime_suspend = ata_port_runtime_suspend,
5451 .runtime_resume = ata_port_runtime_resume,
5452 .runtime_idle = ata_port_runtime_idle,
5453 };
5454
5455 /* sas ports don't participate in pm runtime management of ata_ports,
5456 * and need to resume ata devices at the domain level, not the per-port
5457 * level. sas suspend/resume is async to allow parallel port recovery
5458 * since sas has multiple ata_port instances per Scsi_Host.
5459 */
5460 int ata_sas_port_async_suspend(struct ata_port *ap, int *async)
5461 {
5462 return __ata_port_suspend_common(ap, PMSG_SUSPEND, async);
5463 }
5464 EXPORT_SYMBOL_GPL(ata_sas_port_async_suspend);
5465
5466 int ata_sas_port_async_resume(struct ata_port *ap, int *async)
5467 {
5468 return __ata_port_resume_common(ap, PMSG_RESUME, async);
5469 }
5470 EXPORT_SYMBOL_GPL(ata_sas_port_async_resume);
5471
5472
5473 /**
5474 * ata_host_suspend - suspend host
5475 * @host: host to suspend
5476 * @mesg: PM message
5477 *
5478 * Suspend @host. Actual operation is performed by port suspend.
5479 */
5480 int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5481 {
5482 host->dev->power.power_state = mesg;
5483 return 0;
5484 }
5485
5486 /**
5487 * ata_host_resume - resume host
5488 * @host: host to resume
5489 *
5490 * Resume @host. Actual operation is performed by port resume.
5491 */
5492 void ata_host_resume(struct ata_host *host)
5493 {
5494 host->dev->power.power_state = PMSG_ON;
5495 }
5496 #endif
5497
5498 struct device_type ata_port_type = {
5499 .name = "ata_port",
5500 #ifdef CONFIG_PM
5501 .pm = &ata_port_pm_ops,
5502 #endif
5503 };
5504
5505 /**
5506 * ata_dev_init - Initialize an ata_device structure
5507 * @dev: Device structure to initialize
5508 *
5509 * Initialize @dev in preparation for probing.
5510 *
5511 * LOCKING:
5512 * Inherited from caller.
5513 */
5514 void ata_dev_init(struct ata_device *dev)
5515 {
5516 struct ata_link *link = ata_dev_phys_link(dev);
5517 struct ata_port *ap = link->ap;
5518 unsigned long flags;
5519
5520 /* SATA spd limit is bound to the attached device, reset together */
5521 link->sata_spd_limit = link->hw_sata_spd_limit;
5522 link->sata_spd = 0;
5523
5524 /* High bits of dev->flags are used to record warm plug
5525 * requests which occur asynchronously. Synchronize using
5526 * host lock.
5527 */
5528 spin_lock_irqsave(ap->lock, flags);
5529 dev->flags &= ~ATA_DFLAG_INIT_MASK;
5530 dev->horkage = 0;
5531 spin_unlock_irqrestore(ap->lock, flags);
5532
5533 memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0,
5534 ATA_DEVICE_CLEAR_END - ATA_DEVICE_CLEAR_BEGIN);
5535 dev->pio_mask = UINT_MAX;
5536 dev->mwdma_mask = UINT_MAX;
5537 dev->udma_mask = UINT_MAX;
5538 }
5539
5540 /**
5541 * ata_link_init - Initialize an ata_link structure
5542 * @ap: ATA port link is attached to
5543 * @link: Link structure to initialize
5544 * @pmp: Port multiplier port number
5545 *
5546 * Initialize @link.
5547 *
5548 * LOCKING:
5549 * Kernel thread context (may sleep)
5550 */
5551 void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
5552 {
5553 int i;
5554
5555 /* clear everything except for devices */
5556 memset((void *)link + ATA_LINK_CLEAR_BEGIN, 0,
5557 ATA_LINK_CLEAR_END - ATA_LINK_CLEAR_BEGIN);
5558
5559 link->ap = ap;
5560 link->pmp = pmp;
5561 link->active_tag = ATA_TAG_POISON;
5562 link->hw_sata_spd_limit = UINT_MAX;
5563
5564 /* can't use iterator, ap isn't initialized yet */
5565 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5566 struct ata_device *dev = &link->device[i];
5567
5568 dev->link = link;
5569 dev->devno = dev - link->device;
5570 #ifdef CONFIG_ATA_ACPI
5571 dev->gtf_filter = ata_acpi_gtf_filter;
5572 #endif
5573 ata_dev_init(dev);
5574 }
5575 }
5576
5577 /**
5578 * sata_link_init_spd - Initialize link->sata_spd_limit
5579 * @link: Link to configure sata_spd_limit for
5580 *
5581 * Initialize @link->[hw_]sata_spd_limit to the currently
5582 * configured value.
5583 *
5584 * LOCKING:
5585 * Kernel thread context (may sleep).
5586 *
5587 * RETURNS:
5588 * 0 on success, -errno on failure.
5589 */
5590 int sata_link_init_spd(struct ata_link *link)
5591 {
5592 u8 spd;
5593 int rc;
5594
5595 rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol);
5596 if (rc)
5597 return rc;
5598
5599 spd = (link->saved_scontrol >> 4) & 0xf;
5600 if (spd)
5601 link->hw_sata_spd_limit &= (1 << spd) - 1;
5602
5603 ata_force_link_limits(link);
5604
5605 link->sata_spd_limit = link->hw_sata_spd_limit;
5606
5607 return 0;
5608 }
5609
5610 /**
5611 * ata_port_alloc - allocate and initialize basic ATA port resources
5612 * @host: ATA host this allocated port belongs to
5613 *
5614 * Allocate and initialize basic ATA port resources.
5615 *
5616 * RETURNS:
5617 * Allocate ATA port on success, NULL on failure.
5618 *
5619 * LOCKING:
5620 * Inherited from calling layer (may sleep).
5621 */
5622 struct ata_port *ata_port_alloc(struct ata_host *host)
5623 {
5624 struct ata_port *ap;
5625
5626 DPRINTK("ENTER\n");
5627
5628 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
5629 if (!ap)
5630 return NULL;
5631
5632 ap->pflags |= ATA_PFLAG_INITIALIZING | ATA_PFLAG_FROZEN;
5633 ap->lock = &host->lock;
5634 ap->print_id = -1;
5635 ap->host = host;
5636 ap->dev = host->dev;
5637
5638 #if defined(ATA_VERBOSE_DEBUG)
5639 /* turn on all debugging levels */
5640 ap->msg_enable = 0x00FF;
5641 #elif defined(ATA_DEBUG)
5642 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
5643 #else
5644 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
5645 #endif
5646
5647 mutex_init(&ap->scsi_scan_mutex);
5648 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5649 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
5650 INIT_LIST_HEAD(&ap->eh_done_q);
5651 init_waitqueue_head(&ap->eh_wait_q);
5652 init_completion(&ap->park_req_pending);
5653 init_timer_deferrable(&ap->fastdrain_timer);
5654 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
5655 ap->fastdrain_timer.data = (unsigned long)ap;
5656
5657 ap->cbl = ATA_CBL_NONE;
5658
5659 ata_link_init(ap, &ap->link, 0);
5660
5661 #ifdef ATA_IRQ_TRAP
5662 ap->stats.unhandled_irq = 1;
5663 ap->stats.idle_irq = 1;
5664 #endif
5665 ata_sff_port_init(ap);
5666
5667 return ap;
5668 }
5669
5670 static void ata_host_release(struct device *gendev, void *res)
5671 {
5672 struct ata_host *host = dev_get_drvdata(gendev);
5673 int i;
5674
5675 for (i = 0; i < host->n_ports; i++) {
5676 struct ata_port *ap = host->ports[i];
5677
5678 if (!ap)
5679 continue;
5680
5681 if (ap->scsi_host)
5682 scsi_host_put(ap->scsi_host);
5683
5684 kfree(ap->pmp_link);
5685 kfree(ap->slave_link);
5686 kfree(ap);
5687 host->ports[i] = NULL;
5688 }
5689
5690 dev_set_drvdata(gendev, NULL);
5691 }
5692
5693 /**
5694 * ata_host_alloc - allocate and init basic ATA host resources
5695 * @dev: generic device this host is associated with
5696 * @max_ports: maximum number of ATA ports associated with this host
5697 *
5698 * Allocate and initialize basic ATA host resources. LLD calls
5699 * this function to allocate a host, initializes it fully and
5700 * attaches it using ata_host_register().
5701 *
5702 * @max_ports ports are allocated and host->n_ports is
5703 * initialized to @max_ports. The caller is allowed to decrease
5704 * host->n_ports before calling ata_host_register(). The unused
5705 * ports will be automatically freed on registration.
5706 *
5707 * RETURNS:
5708 * Allocate ATA host on success, NULL on failure.
5709 *
5710 * LOCKING:
5711 * Inherited from calling layer (may sleep).
5712 */
5713 struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
5714 {
5715 struct ata_host *host;
5716 size_t sz;
5717 int i;
5718
5719 DPRINTK("ENTER\n");
5720
5721 if (!devres_open_group(dev, NULL, GFP_KERNEL))
5722 return NULL;
5723
5724 /* alloc a container for our list of ATA ports (buses) */
5725 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
5726 /* alloc a container for our list of ATA ports (buses) */
5727 host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
5728 if (!host)
5729 goto err_out;
5730
5731 devres_add(dev, host);
5732 dev_set_drvdata(dev, host);
5733
5734 spin_lock_init(&host->lock);
5735 mutex_init(&host->eh_mutex);
5736 host->dev = dev;
5737 host->n_ports = max_ports;
5738
5739 /* allocate ports bound to this host */
5740 for (i = 0; i < max_ports; i++) {
5741 struct ata_port *ap;
5742
5743 ap = ata_port_alloc(host);
5744 if (!ap)
5745 goto err_out;
5746
5747 ap->port_no = i;
5748 host->ports[i] = ap;
5749 }
5750
5751 devres_remove_group(dev, NULL);
5752 return host;
5753
5754 err_out:
5755 devres_release_group(dev, NULL);
5756 return NULL;
5757 }
5758
5759 /**
5760 * ata_host_alloc_pinfo - alloc host and init with port_info array
5761 * @dev: generic device this host is associated with
5762 * @ppi: array of ATA port_info to initialize host with
5763 * @n_ports: number of ATA ports attached to this host
5764 *
5765 * Allocate ATA host and initialize with info from @ppi. If NULL
5766 * terminated, @ppi may contain fewer entries than @n_ports. The
5767 * last entry will be used for the remaining ports.
5768 *
5769 * RETURNS:
5770 * Allocate ATA host on success, NULL on failure.
5771 *
5772 * LOCKING:
5773 * Inherited from calling layer (may sleep).
5774 */
5775 struct ata_host *ata_host_alloc_pinfo(struct device *dev,
5776 const struct ata_port_info * const * ppi,
5777 int n_ports)
5778 {
5779 const struct ata_port_info *pi;
5780 struct ata_host *host;
5781 int i, j;
5782
5783 host = ata_host_alloc(dev, n_ports);
5784 if (!host)
5785 return NULL;
5786
5787 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
5788 struct ata_port *ap = host->ports[i];
5789
5790 if (ppi[j])
5791 pi = ppi[j++];
5792
5793 ap->pio_mask = pi->pio_mask;
5794 ap->mwdma_mask = pi->mwdma_mask;
5795 ap->udma_mask = pi->udma_mask;
5796 ap->flags |= pi->flags;
5797 ap->link.flags |= pi->link_flags;
5798 ap->ops = pi->port_ops;
5799
5800 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
5801 host->ops = pi->port_ops;
5802 }
5803
5804 return host;
5805 }
5806
5807 /**
5808 * ata_slave_link_init - initialize slave link
5809 * @ap: port to initialize slave link for
5810 *
5811 * Create and initialize slave link for @ap. This enables slave
5812 * link handling on the port.
5813 *
5814 * In libata, a port contains links and a link contains devices.
5815 * There is single host link but if a PMP is attached to it,
5816 * there can be multiple fan-out links. On SATA, there's usually
5817 * a single device connected to a link but PATA and SATA
5818 * controllers emulating TF based interface can have two - master
5819 * and slave.
5820 *
5821 * However, there are a few controllers which don't fit into this
5822 * abstraction too well - SATA controllers which emulate TF
5823 * interface with both master and slave devices but also have
5824 * separate SCR register sets for each device. These controllers
5825 * need separate links for physical link handling
5826 * (e.g. onlineness, link speed) but should be treated like a
5827 * traditional M/S controller for everything else (e.g. command
5828 * issue, softreset).
5829 *
5830 * slave_link is libata's way of handling this class of
5831 * controllers without impacting core layer too much. For
5832 * anything other than physical link handling, the default host
5833 * link is used for both master and slave. For physical link
5834 * handling, separate @ap->slave_link is used. All dirty details
5835 * are implemented inside libata core layer. From LLD's POV, the
5836 * only difference is that prereset, hardreset and postreset are
5837 * called once more for the slave link, so the reset sequence
5838 * looks like the following.
5839 *
5840 * prereset(M) -> prereset(S) -> hardreset(M) -> hardreset(S) ->
5841 * softreset(M) -> postreset(M) -> postreset(S)
5842 *
5843 * Note that softreset is called only for the master. Softreset
5844 * resets both M/S by definition, so SRST on master should handle
5845 * both (the standard method will work just fine).
5846 *
5847 * LOCKING:
5848 * Should be called before host is registered.
5849 *
5850 * RETURNS:
5851 * 0 on success, -errno on failure.
5852 */
5853 int ata_slave_link_init(struct ata_port *ap)
5854 {
5855 struct ata_link *link;
5856
5857 WARN_ON(ap->slave_link);
5858 WARN_ON(ap->flags & ATA_FLAG_PMP);
5859
5860 link = kzalloc(sizeof(*link), GFP_KERNEL);
5861 if (!link)
5862 return -ENOMEM;
5863
5864 ata_link_init(ap, link, 1);
5865 ap->slave_link = link;
5866 return 0;
5867 }
5868
5869 static void ata_host_stop(struct device *gendev, void *res)
5870 {
5871 struct ata_host *host = dev_get_drvdata(gendev);
5872 int i;
5873
5874 WARN_ON(!(host->flags & ATA_HOST_STARTED));
5875
5876 for (i = 0; i < host->n_ports; i++) {
5877 struct ata_port *ap = host->ports[i];
5878
5879 if (ap->ops->port_stop)
5880 ap->ops->port_stop(ap);
5881 }
5882
5883 if (host->ops->host_stop)
5884 host->ops->host_stop(host);
5885 }
5886
5887 /**
5888 * ata_finalize_port_ops - finalize ata_port_operations
5889 * @ops: ata_port_operations to finalize
5890 *
5891 * An ata_port_operations can inherit from another ops and that
5892 * ops can again inherit from another. This can go on as many
5893 * times as necessary as long as there is no loop in the
5894 * inheritance chain.
5895 *
5896 * Ops tables are finalized when the host is started. NULL or
5897 * unspecified entries are inherited from the closet ancestor
5898 * which has the method and the entry is populated with it.
5899 * After finalization, the ops table directly points to all the
5900 * methods and ->inherits is no longer necessary and cleared.
5901 *
5902 * Using ATA_OP_NULL, inheriting ops can force a method to NULL.
5903 *
5904 * LOCKING:
5905 * None.
5906 */
5907 static void ata_finalize_port_ops(struct ata_port_operations *ops)
5908 {
5909 static DEFINE_SPINLOCK(lock);
5910 const struct ata_port_operations *cur;
5911 void **begin = (void **)ops;
5912 void **end = (void **)&ops->inherits;
5913 void **pp;
5914
5915 if (!ops || !ops->inherits)
5916 return;
5917
5918 spin_lock(&lock);
5919
5920 for (cur = ops->inherits; cur; cur = cur->inherits) {
5921 void **inherit = (void **)cur;
5922
5923 for (pp = begin; pp < end; pp++, inherit++)
5924 if (!*pp)
5925 *pp = *inherit;
5926 }
5927
5928 for (pp = begin; pp < end; pp++)
5929 if (IS_ERR(*pp))
5930 *pp = NULL;
5931
5932 ops->inherits = NULL;
5933
5934 spin_unlock(&lock);
5935 }
5936
5937 /**
5938 * ata_host_start - start and freeze ports of an ATA host
5939 * @host: ATA host to start ports for
5940 *
5941 * Start and then freeze ports of @host. Started status is
5942 * recorded in host->flags, so this function can be called
5943 * multiple times. Ports are guaranteed to get started only
5944 * once. If host->ops isn't initialized yet, its set to the
5945 * first non-dummy port ops.
5946 *
5947 * LOCKING:
5948 * Inherited from calling layer (may sleep).
5949 *
5950 * RETURNS:
5951 * 0 if all ports are started successfully, -errno otherwise.
5952 */
5953 int ata_host_start(struct ata_host *host)
5954 {
5955 int have_stop = 0;
5956 void *start_dr = NULL;
5957 int i, rc;
5958
5959 if (host->flags & ATA_HOST_STARTED)
5960 return 0;
5961
5962 ata_finalize_port_ops(host->ops);
5963
5964 for (i = 0; i < host->n_ports; i++) {
5965 struct ata_port *ap = host->ports[i];
5966
5967 ata_finalize_port_ops(ap->ops);
5968
5969 if (!host->ops && !ata_port_is_dummy(ap))
5970 host->ops = ap->ops;
5971
5972 if (ap->ops->port_stop)
5973 have_stop = 1;
5974 }
5975
5976 if (host->ops->host_stop)
5977 have_stop = 1;
5978
5979 if (have_stop) {
5980 start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
5981 if (!start_dr)
5982 return -ENOMEM;
5983 }
5984
5985 for (i = 0; i < host->n_ports; i++) {
5986 struct ata_port *ap = host->ports[i];
5987
5988 if (ap->ops->port_start) {
5989 rc = ap->ops->port_start(ap);
5990 if (rc) {
5991 if (rc != -ENODEV)
5992 dev_err(host->dev,
5993 "failed to start port %d (errno=%d)\n",
5994 i, rc);
5995 goto err_out;
5996 }
5997 }
5998 ata_eh_freeze_port(ap);
5999 }
6000
6001 if (start_dr)
6002 devres_add(host->dev, start_dr);
6003 host->flags |= ATA_HOST_STARTED;
6004 return 0;
6005
6006 err_out:
6007 while (--i >= 0) {
6008 struct ata_port *ap = host->ports[i];
6009
6010 if (ap->ops->port_stop)
6011 ap->ops->port_stop(ap);
6012 }
6013 devres_free(start_dr);
6014 return rc;
6015 }
6016
6017 /**
6018 * ata_sas_host_init - Initialize a host struct for sas (ipr, libsas)
6019 * @host: host to initialize
6020 * @dev: device host is attached to
6021 * @ops: port_ops
6022 *
6023 */
6024 void ata_host_init(struct ata_host *host, struct device *dev,
6025 struct ata_port_operations *ops)
6026 {
6027 spin_lock_init(&host->lock);
6028 mutex_init(&host->eh_mutex);
6029 host->dev = dev;
6030 host->ops = ops;
6031 }
6032
6033 void __ata_port_probe(struct ata_port *ap)
6034 {
6035 struct ata_eh_info *ehi = &ap->link.eh_info;
6036 unsigned long flags;
6037
6038 /* kick EH for boot probing */
6039 spin_lock_irqsave(ap->lock, flags);
6040
6041 ehi->probe_mask |= ATA_ALL_DEVICES;
6042 ehi->action |= ATA_EH_RESET;
6043 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
6044
6045 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
6046 ap->pflags |= ATA_PFLAG_LOADING;
6047 ata_port_schedule_eh(ap);
6048
6049 spin_unlock_irqrestore(ap->lock, flags);
6050 }
6051
6052 int ata_port_probe(struct ata_port *ap)
6053 {
6054 int rc = 0;
6055
6056 if (ap->ops->error_handler) {
6057 __ata_port_probe(ap);
6058 ata_port_wait_eh(ap);
6059 } else {
6060 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
6061 rc = ata_bus_probe(ap);
6062 DPRINTK("ata%u: bus probe end\n", ap->print_id);
6063 }
6064 return rc;
6065 }
6066
6067
6068 static void async_port_probe(void *data, async_cookie_t cookie)
6069 {
6070 struct ata_port *ap = data;
6071
6072 /*
6073 * If we're not allowed to scan this host in parallel,
6074 * we need to wait until all previous scans have completed
6075 * before going further.
6076 * Jeff Garzik says this is only within a controller, so we
6077 * don't need to wait for port 0, only for later ports.
6078 */
6079 if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0)
6080 async_synchronize_cookie(cookie);
6081
6082 (void)ata_port_probe(ap);
6083
6084 /* in order to keep device order, we need to synchronize at this point */
6085 async_synchronize_cookie(cookie);
6086
6087 ata_scsi_scan_host(ap, 1);
6088 }
6089
6090 /**
6091 * ata_host_register - register initialized ATA host
6092 * @host: ATA host to register
6093 * @sht: template for SCSI host
6094 *
6095 * Register initialized ATA host. @host is allocated using
6096 * ata_host_alloc() and fully initialized by LLD. This function
6097 * starts ports, registers @host with ATA and SCSI layers and
6098 * probe registered devices.
6099 *
6100 * LOCKING:
6101 * Inherited from calling layer (may sleep).
6102 *
6103 * RETURNS:
6104 * 0 on success, -errno otherwise.
6105 */
6106 int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6107 {
6108 int i, rc;
6109
6110 /* host must have been started */
6111 if (!(host->flags & ATA_HOST_STARTED)) {
6112 dev_err(host->dev, "BUG: trying to register unstarted host\n");
6113 WARN_ON(1);
6114 return -EINVAL;
6115 }
6116
6117 /* Blow away unused ports. This happens when LLD can't
6118 * determine the exact number of ports to allocate at
6119 * allocation time.
6120 */
6121 for (i = host->n_ports; host->ports[i]; i++)
6122 kfree(host->ports[i]);
6123
6124 /* give ports names and add SCSI hosts */
6125 for (i = 0; i < host->n_ports; i++)
6126 host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
6127
6128
6129 /* Create associated sysfs transport objects */
6130 for (i = 0; i < host->n_ports; i++) {
6131 rc = ata_tport_add(host->dev,host->ports[i]);
6132 if (rc) {
6133 goto err_tadd;
6134 }
6135 }
6136
6137 rc = ata_scsi_add_hosts(host, sht);
6138 if (rc)
6139 goto err_tadd;
6140
6141 /* set cable, sata_spd_limit and report */
6142 for (i = 0; i < host->n_ports; i++) {
6143 struct ata_port *ap = host->ports[i];
6144 unsigned long xfer_mask;
6145
6146 /* set SATA cable type if still unset */
6147 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
6148 ap->cbl = ATA_CBL_SATA;
6149
6150 /* init sata_spd_limit to the current value */
6151 sata_link_init_spd(&ap->link);
6152 if (ap->slave_link)
6153 sata_link_init_spd(ap->slave_link);
6154
6155 /* print per-port info to dmesg */
6156 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
6157 ap->udma_mask);
6158
6159 if (!ata_port_is_dummy(ap)) {
6160 ata_port_info(ap, "%cATA max %s %s\n",
6161 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
6162 ata_mode_string(xfer_mask),
6163 ap->link.eh_info.desc);
6164 ata_ehi_clear_desc(&ap->link.eh_info);
6165 } else
6166 ata_port_info(ap, "DUMMY\n");
6167 }
6168
6169 /* perform each probe asynchronously */
6170 for (i = 0; i < host->n_ports; i++) {
6171 struct ata_port *ap = host->ports[i];
6172 async_schedule(async_port_probe, ap);
6173 }
6174
6175 return 0;
6176
6177 err_tadd:
6178 while (--i >= 0) {
6179 ata_tport_delete(host->ports[i]);
6180 }
6181 return rc;
6182
6183 }
6184
6185 /**
6186 * ata_host_activate - start host, request IRQ and register it
6187 * @host: target ATA host
6188 * @irq: IRQ to request
6189 * @irq_handler: irq_handler used when requesting IRQ
6190 * @irq_flags: irq_flags used when requesting IRQ
6191 * @sht: scsi_host_template to use when registering the host
6192 *
6193 * After allocating an ATA host and initializing it, most libata
6194 * LLDs perform three steps to activate the host - start host,
6195 * request IRQ and register it. This helper takes necessasry
6196 * arguments and performs the three steps in one go.
6197 *
6198 * An invalid IRQ skips the IRQ registration and expects the host to
6199 * have set polling mode on the port. In this case, @irq_handler
6200 * should be NULL.
6201 *
6202 * LOCKING:
6203 * Inherited from calling layer (may sleep).
6204 *
6205 * RETURNS:
6206 * 0 on success, -errno otherwise.
6207 */
6208 int ata_host_activate(struct ata_host *host, int irq,
6209 irq_handler_t irq_handler, unsigned long irq_flags,
6210 struct scsi_host_template *sht)
6211 {
6212 int i, rc;
6213
6214 rc = ata_host_start(host);
6215 if (rc)
6216 return rc;
6217
6218 /* Special case for polling mode */
6219 if (!irq) {
6220 WARN_ON(irq_handler);
6221 return ata_host_register(host, sht);
6222 }
6223
6224 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
6225 dev_driver_string(host->dev), host);
6226 if (rc)
6227 return rc;
6228
6229 for (i = 0; i < host->n_ports; i++)
6230 ata_port_desc(host->ports[i], "irq %d", irq);
6231
6232 rc = ata_host_register(host, sht);
6233 /* if failed, just free the IRQ and leave ports alone */
6234 if (rc)
6235 devm_free_irq(host->dev, irq, host);
6236
6237 return rc;
6238 }
6239
6240 /**
6241 * ata_port_detach - Detach ATA port in prepration of device removal
6242 * @ap: ATA port to be detached
6243 *
6244 * Detach all ATA devices and the associated SCSI devices of @ap;
6245 * then, remove the associated SCSI host. @ap is guaranteed to
6246 * be quiescent on return from this function.
6247 *
6248 * LOCKING:
6249 * Kernel thread context (may sleep).
6250 */
6251 static void ata_port_detach(struct ata_port *ap)
6252 {
6253 unsigned long flags;
6254
6255 if (!ap->ops->error_handler)
6256 goto skip_eh;
6257
6258 /* tell EH we're leaving & flush EH */
6259 spin_lock_irqsave(ap->lock, flags);
6260 ap->pflags |= ATA_PFLAG_UNLOADING;
6261 ata_port_schedule_eh(ap);
6262 spin_unlock_irqrestore(ap->lock, flags);
6263
6264 /* wait till EH commits suicide */
6265 ata_port_wait_eh(ap);
6266
6267 /* it better be dead now */
6268 WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED));
6269
6270 cancel_delayed_work_sync(&ap->hotplug_task);
6271
6272 skip_eh:
6273 if (ap->pmp_link) {
6274 int i;
6275 for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
6276 ata_tlink_delete(&ap->pmp_link[i]);
6277 }
6278 ata_tport_delete(ap);
6279
6280 /* remove the associated SCSI host */
6281 scsi_remove_host(ap->scsi_host);
6282 }
6283
6284 /**
6285 * ata_host_detach - Detach all ports of an ATA host
6286 * @host: Host to detach
6287 *
6288 * Detach all ports of @host.
6289 *
6290 * LOCKING:
6291 * Kernel thread context (may sleep).
6292 */
6293 void ata_host_detach(struct ata_host *host)
6294 {
6295 int i;
6296
6297 for (i = 0; i < host->n_ports; i++)
6298 ata_port_detach(host->ports[i]);
6299
6300 /* the host is dead now, dissociate ACPI */
6301 ata_acpi_dissociate(host);
6302 }
6303
6304 #ifdef CONFIG_PCI
6305
6306 /**
6307 * ata_pci_remove_one - PCI layer callback for device removal
6308 * @pdev: PCI device that was removed
6309 *
6310 * PCI layer indicates to libata via this hook that hot-unplug or
6311 * module unload event has occurred. Detach all ports. Resource
6312 * release is handled via devres.
6313 *
6314 * LOCKING:
6315 * Inherited from PCI layer (may sleep).
6316 */
6317 void ata_pci_remove_one(struct pci_dev *pdev)
6318 {
6319 struct ata_host *host = pci_get_drvdata(pdev);
6320
6321 ata_host_detach(host);
6322 }
6323
6324 /* move to PCI subsystem */
6325 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
6326 {
6327 unsigned long tmp = 0;
6328
6329 switch (bits->width) {
6330 case 1: {
6331 u8 tmp8 = 0;
6332 pci_read_config_byte(pdev, bits->reg, &tmp8);
6333 tmp = tmp8;
6334 break;
6335 }
6336 case 2: {
6337 u16 tmp16 = 0;
6338 pci_read_config_word(pdev, bits->reg, &tmp16);
6339 tmp = tmp16;
6340 break;
6341 }
6342 case 4: {
6343 u32 tmp32 = 0;
6344 pci_read_config_dword(pdev, bits->reg, &tmp32);
6345 tmp = tmp32;
6346 break;
6347 }
6348
6349 default:
6350 return -EINVAL;
6351 }
6352
6353 tmp &= bits->mask;
6354
6355 return (tmp == bits->val) ? 1 : 0;
6356 }
6357
6358 #ifdef CONFIG_PM
6359 void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
6360 {
6361 pci_save_state(pdev);
6362 pci_disable_device(pdev);
6363
6364 if (mesg.event & PM_EVENT_SLEEP)
6365 pci_set_power_state(pdev, PCI_D3hot);
6366 }
6367
6368 int ata_pci_device_do_resume(struct pci_dev *pdev)
6369 {
6370 int rc;
6371
6372 pci_set_power_state(pdev, PCI_D0);
6373 pci_restore_state(pdev);
6374
6375 rc = pcim_enable_device(pdev);
6376 if (rc) {
6377 dev_err(&pdev->dev,
6378 "failed to enable device after resume (%d)\n", rc);
6379 return rc;
6380 }
6381
6382 pci_set_master(pdev);
6383 return 0;
6384 }
6385
6386 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
6387 {
6388 struct ata_host *host = pci_get_drvdata(pdev);
6389 int rc = 0;
6390
6391 rc = ata_host_suspend(host, mesg);
6392 if (rc)
6393 return rc;
6394
6395 ata_pci_device_do_suspend(pdev, mesg);
6396
6397 return 0;
6398 }
6399
6400 int ata_pci_device_resume(struct pci_dev *pdev)
6401 {
6402 struct ata_host *host = pci_get_drvdata(pdev);
6403 int rc;
6404
6405 rc = ata_pci_device_do_resume(pdev);
6406 if (rc == 0)
6407 ata_host_resume(host);
6408 return rc;
6409 }
6410 #endif /* CONFIG_PM */
6411
6412 #endif /* CONFIG_PCI */
6413
6414 /**
6415 * ata_platform_remove_one - Platform layer callback for device removal
6416 * @pdev: Platform device that was removed
6417 *
6418 * Platform layer indicates to libata via this hook that hot-unplug or
6419 * module unload event has occurred. Detach all ports. Resource
6420 * release is handled via devres.
6421 *
6422 * LOCKING:
6423 * Inherited from platform layer (may sleep).
6424 */
6425 int ata_platform_remove_one(struct platform_device *pdev)
6426 {
6427 struct ata_host *host = platform_get_drvdata(pdev);
6428
6429 ata_host_detach(host);
6430
6431 return 0;
6432 }
6433
6434 static int __init ata_parse_force_one(char **cur,
6435 struct ata_force_ent *force_ent,
6436 const char **reason)
6437 {
6438 /* FIXME: Currently, there's no way to tag init const data and
6439 * using __initdata causes build failure on some versions of
6440 * gcc. Once __initdataconst is implemented, add const to the
6441 * following structure.
6442 */
6443 static struct ata_force_param force_tbl[] __initdata = {
6444 { "40c", .cbl = ATA_CBL_PATA40 },
6445 { "80c", .cbl = ATA_CBL_PATA80 },
6446 { "short40c", .cbl = ATA_CBL_PATA40_SHORT },
6447 { "unk", .cbl = ATA_CBL_PATA_UNK },
6448 { "ign", .cbl = ATA_CBL_PATA_IGN },
6449 { "sata", .cbl = ATA_CBL_SATA },
6450 { "1.5Gbps", .spd_limit = 1 },
6451 { "3.0Gbps", .spd_limit = 2 },
6452 { "noncq", .horkage_on = ATA_HORKAGE_NONCQ },
6453 { "ncq", .horkage_off = ATA_HORKAGE_NONCQ },
6454 { "dump_id", .horkage_on = ATA_HORKAGE_DUMP_ID },
6455 { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) },
6456 { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) },
6457 { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) },
6458 { "pio3", .xfer_mask = 1 << (ATA_SHIFT_PIO + 3) },
6459 { "pio4", .xfer_mask = 1 << (ATA_SHIFT_PIO + 4) },
6460 { "pio5", .xfer_mask = 1 << (ATA_SHIFT_PIO + 5) },
6461 { "pio6", .xfer_mask = 1 << (ATA_SHIFT_PIO + 6) },
6462 { "mwdma0", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 0) },
6463 { "mwdma1", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 1) },
6464 { "mwdma2", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 2) },
6465 { "mwdma3", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 3) },
6466 { "mwdma4", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 4) },
6467 { "udma0", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6468 { "udma16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6469 { "udma/16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6470 { "udma1", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6471 { "udma25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6472 { "udma/25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6473 { "udma2", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6474 { "udma33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6475 { "udma/33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6476 { "udma3", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6477 { "udma44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6478 { "udma/44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6479 { "udma4", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6480 { "udma66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6481 { "udma/66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6482 { "udma5", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6483 { "udma100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6484 { "udma/100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6485 { "udma6", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6486 { "udma133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6487 { "udma/133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6488 { "udma7", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 7) },
6489 { "nohrst", .lflags = ATA_LFLAG_NO_HRST },
6490 { "nosrst", .lflags = ATA_LFLAG_NO_SRST },
6491 { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST },
6492 { "rstonce", .lflags = ATA_LFLAG_RST_ONCE },
6493 };
6494 char *start = *cur, *p = *cur;
6495 char *id, *val, *endp;
6496 const struct ata_force_param *match_fp = NULL;
6497 int nr_matches = 0, i;
6498
6499 /* find where this param ends and update *cur */
6500 while (*p != '\0' && *p != ',')
6501 p++;
6502
6503 if (*p == '\0')
6504 *cur = p;
6505 else
6506 *cur = p + 1;
6507
6508 *p = '\0';
6509
6510 /* parse */
6511 p = strchr(start, ':');
6512 if (!p) {
6513 val = strstrip(start);
6514 goto parse_val;
6515 }
6516 *p = '\0';
6517
6518 id = strstrip(start);
6519 val = strstrip(p + 1);
6520
6521 /* parse id */
6522 p = strchr(id, '.');
6523 if (p) {
6524 *p++ = '\0';
6525 force_ent->device = simple_strtoul(p, &endp, 10);
6526 if (p == endp || *endp != '\0') {
6527 *reason = "invalid device";
6528 return -EINVAL;
6529 }
6530 }
6531
6532 force_ent->port = simple_strtoul(id, &endp, 10);
6533 if (p == endp || *endp != '\0') {
6534 *reason = "invalid port/link";
6535 return -EINVAL;
6536 }
6537
6538 parse_val:
6539 /* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */
6540 for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
6541 const struct ata_force_param *fp = &force_tbl[i];
6542
6543 if (strncasecmp(val, fp->name, strlen(val)))
6544 continue;
6545
6546 nr_matches++;
6547 match_fp = fp;
6548
6549 if (strcasecmp(val, fp->name) == 0) {
6550 nr_matches = 1;
6551 break;
6552 }
6553 }
6554
6555 if (!nr_matches) {
6556 *reason = "unknown value";
6557 return -EINVAL;
6558 }
6559 if (nr_matches > 1) {
6560 *reason = "ambigious value";
6561 return -EINVAL;
6562 }
6563
6564 force_ent->param = *match_fp;
6565
6566 return 0;
6567 }
6568
6569 static void __init ata_parse_force_param(void)
6570 {
6571 int idx = 0, size = 1;
6572 int last_port = -1, last_device = -1;
6573 char *p, *cur, *next;
6574
6575 /* calculate maximum number of params and allocate force_tbl */
6576 for (p = ata_force_param_buf; *p; p++)
6577 if (*p == ',')
6578 size++;
6579
6580 ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL);
6581 if (!ata_force_tbl) {
6582 printk(KERN_WARNING "ata: failed to extend force table, "
6583 "libata.force ignored\n");
6584 return;
6585 }
6586
6587 /* parse and populate the table */
6588 for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
6589 const char *reason = "";
6590 struct ata_force_ent te = { .port = -1, .device = -1 };
6591
6592 next = cur;
6593 if (ata_parse_force_one(&next, &te, &reason)) {
6594 printk(KERN_WARNING "ata: failed to parse force "
6595 "parameter \"%s\" (%s)\n",
6596 cur, reason);
6597 continue;
6598 }
6599
6600 if (te.port == -1) {
6601 te.port = last_port;
6602 te.device = last_device;
6603 }
6604
6605 ata_force_tbl[idx++] = te;
6606
6607 last_port = te.port;
6608 last_device = te.device;
6609 }
6610
6611 ata_force_tbl_size = idx;
6612 }
6613
6614 static int __init ata_init(void)
6615 {
6616 int rc;
6617
6618 ata_parse_force_param();
6619
6620 ata_acpi_register();
6621
6622 rc = ata_sff_init();
6623 if (rc) {
6624 kfree(ata_force_tbl);
6625 return rc;
6626 }
6627
6628 libata_transport_init();
6629 ata_scsi_transport_template = ata_attach_transport();
6630 if (!ata_scsi_transport_template) {
6631 ata_sff_exit();
6632 rc = -ENOMEM;
6633 goto err_out;
6634 }
6635
6636 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6637 return 0;
6638
6639 err_out:
6640 return rc;
6641 }
6642
6643 static void __exit ata_exit(void)
6644 {
6645 ata_release_transport(ata_scsi_transport_template);
6646 libata_transport_exit();
6647 ata_sff_exit();
6648 ata_acpi_unregister();
6649 kfree(ata_force_tbl);
6650 }
6651
6652 subsys_initcall(ata_init);
6653 module_exit(ata_exit);
6654
6655 static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1);
6656
6657 int ata_ratelimit(void)
6658 {
6659 return __ratelimit(&ratelimit);
6660 }
6661
6662 /**
6663 * ata_msleep - ATA EH owner aware msleep
6664 * @ap: ATA port to attribute the sleep to
6665 * @msecs: duration to sleep in milliseconds
6666 *
6667 * Sleeps @msecs. If the current task is owner of @ap's EH, the
6668 * ownership is released before going to sleep and reacquired
6669 * after the sleep is complete. IOW, other ports sharing the
6670 * @ap->host will be allowed to own the EH while this task is
6671 * sleeping.
6672 *
6673 * LOCKING:
6674 * Might sleep.
6675 */
6676 void ata_msleep(struct ata_port *ap, unsigned int msecs)
6677 {
6678 bool owns_eh = ap && ap->host->eh_owner == current;
6679
6680 if (owns_eh)
6681 ata_eh_release(ap);
6682
6683 msleep(msecs);
6684
6685 if (owns_eh)
6686 ata_eh_acquire(ap);
6687 }
6688
6689 /**
6690 * ata_wait_register - wait until register value changes
6691 * @ap: ATA port to wait register for, can be NULL
6692 * @reg: IO-mapped register
6693 * @mask: Mask to apply to read register value
6694 * @val: Wait condition
6695 * @interval: polling interval in milliseconds
6696 * @timeout: timeout in milliseconds
6697 *
6698 * Waiting for some bits of register to change is a common
6699 * operation for ATA controllers. This function reads 32bit LE
6700 * IO-mapped register @reg and tests for the following condition.
6701 *
6702 * (*@reg & mask) != val
6703 *
6704 * If the condition is met, it returns; otherwise, the process is
6705 * repeated after @interval_msec until timeout.
6706 *
6707 * LOCKING:
6708 * Kernel thread context (may sleep)
6709 *
6710 * RETURNS:
6711 * The final register value.
6712 */
6713 u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val,
6714 unsigned long interval, unsigned long timeout)
6715 {
6716 unsigned long deadline;
6717 u32 tmp;
6718
6719 tmp = ioread32(reg);
6720
6721 /* Calculate timeout _after_ the first read to make sure
6722 * preceding writes reach the controller before starting to
6723 * eat away the timeout.
6724 */
6725 deadline = ata_deadline(jiffies, timeout);
6726
6727 while ((tmp & mask) == val && time_before(jiffies, deadline)) {
6728 ata_msleep(ap, interval);
6729 tmp = ioread32(reg);
6730 }
6731
6732 return tmp;
6733 }
6734
6735 /*
6736 * Dummy port_ops
6737 */
6738 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6739 {
6740 return AC_ERR_SYSTEM;
6741 }
6742
6743 static void ata_dummy_error_handler(struct ata_port *ap)
6744 {
6745 /* truly dummy */
6746 }
6747
6748 struct ata_port_operations ata_dummy_port_ops = {
6749 .qc_prep = ata_noop_qc_prep,
6750 .qc_issue = ata_dummy_qc_issue,
6751 .error_handler = ata_dummy_error_handler,
6752 .sched_eh = ata_std_sched_eh,
6753 .end_eh = ata_std_end_eh,
6754 };
6755
6756 const struct ata_port_info ata_dummy_port_info = {
6757 .port_ops = &ata_dummy_port_ops,
6758 };
6759
6760 /*
6761 * Utility print functions
6762 */
6763 int ata_port_printk(const struct ata_port *ap, const char *level,
6764 const char *fmt, ...)
6765 {
6766 struct va_format vaf;
6767 va_list args;
6768 int r;
6769
6770 va_start(args, fmt);
6771
6772 vaf.fmt = fmt;
6773 vaf.va = &args;
6774
6775 r = printk("%sata%u: %pV", level, ap->print_id, &vaf);
6776
6777 va_end(args);
6778
6779 return r;
6780 }
6781 EXPORT_SYMBOL(ata_port_printk);
6782
6783 int ata_link_printk(const struct ata_link *link, const char *level,
6784 const char *fmt, ...)
6785 {
6786 struct va_format vaf;
6787 va_list args;
6788 int r;
6789
6790 va_start(args, fmt);
6791
6792 vaf.fmt = fmt;
6793 vaf.va = &args;
6794
6795 if (sata_pmp_attached(link->ap) || link->ap->slave_link)
6796 r = printk("%sata%u.%02u: %pV",
6797 level, link->ap->print_id, link->pmp, &vaf);
6798 else
6799 r = printk("%sata%u: %pV",
6800 level, link->ap->print_id, &vaf);
6801
6802 va_end(args);
6803
6804 return r;
6805 }
6806 EXPORT_SYMBOL(ata_link_printk);
6807
6808 int ata_dev_printk(const struct ata_device *dev, const char *level,
6809 const char *fmt, ...)
6810 {
6811 struct va_format vaf;
6812 va_list args;
6813 int r;
6814
6815 va_start(args, fmt);
6816
6817 vaf.fmt = fmt;
6818 vaf.va = &args;
6819
6820 r = printk("%sata%u.%02u: %pV",
6821 level, dev->link->ap->print_id, dev->link->pmp + dev->devno,
6822 &vaf);
6823
6824 va_end(args);
6825
6826 return r;
6827 }
6828 EXPORT_SYMBOL(ata_dev_printk);
6829
6830 void ata_print_version(const struct device *dev, const char *version)
6831 {
6832 dev_printk(KERN_DEBUG, dev, "version %s\n", version);
6833 }
6834 EXPORT_SYMBOL(ata_print_version);
6835
6836 /*
6837 * libata is essentially a library of internal helper functions for
6838 * low-level ATA host controller drivers. As such, the API/ABI is
6839 * likely to change as new drivers are added and updated.
6840 * Do not depend on ABI/API stability.
6841 */
6842 EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6843 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6844 EXPORT_SYMBOL_GPL(sata_deb_timing_long);
6845 EXPORT_SYMBOL_GPL(ata_base_port_ops);
6846 EXPORT_SYMBOL_GPL(sata_port_ops);
6847 EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
6848 EXPORT_SYMBOL_GPL(ata_dummy_port_info);
6849 EXPORT_SYMBOL_GPL(ata_link_next);
6850 EXPORT_SYMBOL_GPL(ata_dev_next);
6851 EXPORT_SYMBOL_GPL(ata_std_bios_param);
6852 EXPORT_SYMBOL_GPL(ata_scsi_unlock_native_capacity);
6853 EXPORT_SYMBOL_GPL(ata_host_init);
6854 EXPORT_SYMBOL_GPL(ata_host_alloc);
6855 EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
6856 EXPORT_SYMBOL_GPL(ata_slave_link_init);
6857 EXPORT_SYMBOL_GPL(ata_host_start);
6858 EXPORT_SYMBOL_GPL(ata_host_register);
6859 EXPORT_SYMBOL_GPL(ata_host_activate);
6860 EXPORT_SYMBOL_GPL(ata_host_detach);
6861 EXPORT_SYMBOL_GPL(ata_sg_init);
6862 EXPORT_SYMBOL_GPL(ata_qc_complete);
6863 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
6864 EXPORT_SYMBOL_GPL(atapi_cmd_type);
6865 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6866 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6867 EXPORT_SYMBOL_GPL(ata_pack_xfermask);
6868 EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
6869 EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
6870 EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
6871 EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
6872 EXPORT_SYMBOL_GPL(ata_mode_string);
6873 EXPORT_SYMBOL_GPL(ata_id_xfermask);
6874 EXPORT_SYMBOL_GPL(ata_do_set_mode);
6875 EXPORT_SYMBOL_GPL(ata_std_qc_defer);
6876 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
6877 EXPORT_SYMBOL_GPL(ata_dev_disable);
6878 EXPORT_SYMBOL_GPL(sata_set_spd);
6879 EXPORT_SYMBOL_GPL(ata_wait_after_reset);
6880 EXPORT_SYMBOL_GPL(sata_link_debounce);
6881 EXPORT_SYMBOL_GPL(sata_link_resume);
6882 EXPORT_SYMBOL_GPL(sata_link_scr_lpm);
6883 EXPORT_SYMBOL_GPL(ata_std_prereset);
6884 EXPORT_SYMBOL_GPL(sata_link_hardreset);
6885 EXPORT_SYMBOL_GPL(sata_std_hardreset);
6886 EXPORT_SYMBOL_GPL(ata_std_postreset);
6887 EXPORT_SYMBOL_GPL(ata_dev_classify);
6888 EXPORT_SYMBOL_GPL(ata_dev_pair);
6889 EXPORT_SYMBOL_GPL(ata_ratelimit);
6890 EXPORT_SYMBOL_GPL(ata_msleep);
6891 EXPORT_SYMBOL_GPL(ata_wait_register);
6892 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
6893 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
6894 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
6895 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
6896 EXPORT_SYMBOL_GPL(__ata_change_queue_depth);
6897 EXPORT_SYMBOL_GPL(sata_scr_valid);
6898 EXPORT_SYMBOL_GPL(sata_scr_read);
6899 EXPORT_SYMBOL_GPL(sata_scr_write);
6900 EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6901 EXPORT_SYMBOL_GPL(ata_link_online);
6902 EXPORT_SYMBOL_GPL(ata_link_offline);
6903 #ifdef CONFIG_PM
6904 EXPORT_SYMBOL_GPL(ata_host_suspend);
6905 EXPORT_SYMBOL_GPL(ata_host_resume);
6906 #endif /* CONFIG_PM */
6907 EXPORT_SYMBOL_GPL(ata_id_string);
6908 EXPORT_SYMBOL_GPL(ata_id_c_string);
6909 EXPORT_SYMBOL_GPL(ata_do_dev_read_id);
6910 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6911
6912 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
6913 EXPORT_SYMBOL_GPL(ata_timing_find_mode);
6914 EXPORT_SYMBOL_GPL(ata_timing_compute);
6915 EXPORT_SYMBOL_GPL(ata_timing_merge);
6916 EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
6917
6918 #ifdef CONFIG_PCI
6919 EXPORT_SYMBOL_GPL(pci_test_config_bits);
6920 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6921 #ifdef CONFIG_PM
6922 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6923 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
6924 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6925 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6926 #endif /* CONFIG_PM */
6927 #endif /* CONFIG_PCI */
6928
6929 EXPORT_SYMBOL_GPL(ata_platform_remove_one);
6930
6931 EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
6932 EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
6933 EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
6934 EXPORT_SYMBOL_GPL(ata_port_desc);
6935 #ifdef CONFIG_PCI
6936 EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
6937 #endif /* CONFIG_PCI */
6938 EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
6939 EXPORT_SYMBOL_GPL(ata_link_abort);
6940 EXPORT_SYMBOL_GPL(ata_port_abort);
6941 EXPORT_SYMBOL_GPL(ata_port_freeze);
6942 EXPORT_SYMBOL_GPL(sata_async_notification);
6943 EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6944 EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
6945 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6946 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
6947 EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error);
6948 EXPORT_SYMBOL_GPL(ata_do_eh);
6949 EXPORT_SYMBOL_GPL(ata_std_error_handler);
6950
6951 EXPORT_SYMBOL_GPL(ata_cable_40wire);
6952 EXPORT_SYMBOL_GPL(ata_cable_80wire);
6953 EXPORT_SYMBOL_GPL(ata_cable_unknown);
6954 EXPORT_SYMBOL_GPL(ata_cable_ignore);
6955 EXPORT_SYMBOL_GPL(ata_cable_sata);
This page took 0.177257 seconds and 5 git commands to generate.