Merge tag 'dmaengine-fix-4.2-rc5' of git://git.infradead.org/users/vkoul/slave-dma
[deliverable/linux.git] / drivers / ata / libata-core.c
1 /*
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Tejun Heo <tj@kernel.org>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
33 * Standards documents from:
34 * http://www.t13.org (ATA standards, PCI DMA IDE spec)
35 * http://www.t10.org (SCSI MMC - for ATAPI MMC)
36 * http://www.sata-io.org (SATA)
37 * http://www.compactflash.org (CF)
38 * http://www.qic.org (QIC157 - Tape and DSC)
39 * http://www.ce-ata.org (CE-ATA: not supported)
40 *
41 */
42
43 #include <linux/kernel.h>
44 #include <linux/module.h>
45 #include <linux/pci.h>
46 #include <linux/init.h>
47 #include <linux/list.h>
48 #include <linux/mm.h>
49 #include <linux/spinlock.h>
50 #include <linux/blkdev.h>
51 #include <linux/delay.h>
52 #include <linux/timer.h>
53 #include <linux/interrupt.h>
54 #include <linux/completion.h>
55 #include <linux/suspend.h>
56 #include <linux/workqueue.h>
57 #include <linux/scatterlist.h>
58 #include <linux/io.h>
59 #include <linux/async.h>
60 #include <linux/log2.h>
61 #include <linux/slab.h>
62 #include <linux/glob.h>
63 #include <scsi/scsi.h>
64 #include <scsi/scsi_cmnd.h>
65 #include <scsi/scsi_host.h>
66 #include <linux/libata.h>
67 #include <asm/byteorder.h>
68 #include <linux/cdrom.h>
69 #include <linux/ratelimit.h>
70 #include <linux/pm_runtime.h>
71 #include <linux/platform_device.h>
72
73 #define CREATE_TRACE_POINTS
74 #include <trace/events/libata.h>
75
76 #include "libata.h"
77 #include "libata-transport.h"
78
79 /* debounce timing parameters in msecs { interval, duration, timeout } */
80 const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
81 const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
82 const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
83
84 const struct ata_port_operations ata_base_port_ops = {
85 .prereset = ata_std_prereset,
86 .postreset = ata_std_postreset,
87 .error_handler = ata_std_error_handler,
88 .sched_eh = ata_std_sched_eh,
89 .end_eh = ata_std_end_eh,
90 };
91
92 const struct ata_port_operations sata_port_ops = {
93 .inherits = &ata_base_port_ops,
94
95 .qc_defer = ata_std_qc_defer,
96 .hardreset = sata_std_hardreset,
97 };
98
99 static unsigned int ata_dev_init_params(struct ata_device *dev,
100 u16 heads, u16 sectors);
101 static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
102 static void ata_dev_xfermask(struct ata_device *dev);
103 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
104
105 atomic_t ata_print_id = ATOMIC_INIT(0);
106
107 struct ata_force_param {
108 const char *name;
109 unsigned int cbl;
110 int spd_limit;
111 unsigned long xfer_mask;
112 unsigned int horkage_on;
113 unsigned int horkage_off;
114 unsigned int lflags;
115 };
116
117 struct ata_force_ent {
118 int port;
119 int device;
120 struct ata_force_param param;
121 };
122
123 static struct ata_force_ent *ata_force_tbl;
124 static int ata_force_tbl_size;
125
126 static char ata_force_param_buf[PAGE_SIZE] __initdata;
127 /* param_buf is thrown away after initialization, disallow read */
128 module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
129 MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)");
130
131 static int atapi_enabled = 1;
132 module_param(atapi_enabled, int, 0444);
133 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])");
134
135 static int atapi_dmadir = 0;
136 module_param(atapi_dmadir, int, 0444);
137 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)");
138
139 int atapi_passthru16 = 1;
140 module_param(atapi_passthru16, int, 0444);
141 MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])");
142
143 int libata_fua = 0;
144 module_param_named(fua, libata_fua, int, 0444);
145 MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)");
146
147 static int ata_ignore_hpa;
148 module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
149 MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
150
151 static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
152 module_param_named(dma, libata_dma_mask, int, 0444);
153 MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
154
155 static int ata_probe_timeout;
156 module_param(ata_probe_timeout, int, 0444);
157 MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
158
159 int libata_noacpi = 0;
160 module_param_named(noacpi, libata_noacpi, int, 0444);
161 MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)");
162
163 int libata_allow_tpm = 0;
164 module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
165 MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)");
166
167 static int atapi_an;
168 module_param(atapi_an, int, 0444);
169 MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)");
170
171 MODULE_AUTHOR("Jeff Garzik");
172 MODULE_DESCRIPTION("Library module for ATA devices");
173 MODULE_LICENSE("GPL");
174 MODULE_VERSION(DRV_VERSION);
175
176
177 static bool ata_sstatus_online(u32 sstatus)
178 {
179 return (sstatus & 0xf) == 0x3;
180 }
181
182 /**
183 * ata_link_next - link iteration helper
184 * @link: the previous link, NULL to start
185 * @ap: ATA port containing links to iterate
186 * @mode: iteration mode, one of ATA_LITER_*
187 *
188 * LOCKING:
189 * Host lock or EH context.
190 *
191 * RETURNS:
192 * Pointer to the next link.
193 */
194 struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap,
195 enum ata_link_iter_mode mode)
196 {
197 BUG_ON(mode != ATA_LITER_EDGE &&
198 mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST);
199
200 /* NULL link indicates start of iteration */
201 if (!link)
202 switch (mode) {
203 case ATA_LITER_EDGE:
204 case ATA_LITER_PMP_FIRST:
205 if (sata_pmp_attached(ap))
206 return ap->pmp_link;
207 /* fall through */
208 case ATA_LITER_HOST_FIRST:
209 return &ap->link;
210 }
211
212 /* we just iterated over the host link, what's next? */
213 if (link == &ap->link)
214 switch (mode) {
215 case ATA_LITER_HOST_FIRST:
216 if (sata_pmp_attached(ap))
217 return ap->pmp_link;
218 /* fall through */
219 case ATA_LITER_PMP_FIRST:
220 if (unlikely(ap->slave_link))
221 return ap->slave_link;
222 /* fall through */
223 case ATA_LITER_EDGE:
224 return NULL;
225 }
226
227 /* slave_link excludes PMP */
228 if (unlikely(link == ap->slave_link))
229 return NULL;
230
231 /* we were over a PMP link */
232 if (++link < ap->pmp_link + ap->nr_pmp_links)
233 return link;
234
235 if (mode == ATA_LITER_PMP_FIRST)
236 return &ap->link;
237
238 return NULL;
239 }
240
241 /**
242 * ata_dev_next - device iteration helper
243 * @dev: the previous device, NULL to start
244 * @link: ATA link containing devices to iterate
245 * @mode: iteration mode, one of ATA_DITER_*
246 *
247 * LOCKING:
248 * Host lock or EH context.
249 *
250 * RETURNS:
251 * Pointer to the next device.
252 */
253 struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link,
254 enum ata_dev_iter_mode mode)
255 {
256 BUG_ON(mode != ATA_DITER_ENABLED && mode != ATA_DITER_ENABLED_REVERSE &&
257 mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE);
258
259 /* NULL dev indicates start of iteration */
260 if (!dev)
261 switch (mode) {
262 case ATA_DITER_ENABLED:
263 case ATA_DITER_ALL:
264 dev = link->device;
265 goto check;
266 case ATA_DITER_ENABLED_REVERSE:
267 case ATA_DITER_ALL_REVERSE:
268 dev = link->device + ata_link_max_devices(link) - 1;
269 goto check;
270 }
271
272 next:
273 /* move to the next one */
274 switch (mode) {
275 case ATA_DITER_ENABLED:
276 case ATA_DITER_ALL:
277 if (++dev < link->device + ata_link_max_devices(link))
278 goto check;
279 return NULL;
280 case ATA_DITER_ENABLED_REVERSE:
281 case ATA_DITER_ALL_REVERSE:
282 if (--dev >= link->device)
283 goto check;
284 return NULL;
285 }
286
287 check:
288 if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) &&
289 !ata_dev_enabled(dev))
290 goto next;
291 return dev;
292 }
293
294 /**
295 * ata_dev_phys_link - find physical link for a device
296 * @dev: ATA device to look up physical link for
297 *
298 * Look up physical link which @dev is attached to. Note that
299 * this is different from @dev->link only when @dev is on slave
300 * link. For all other cases, it's the same as @dev->link.
301 *
302 * LOCKING:
303 * Don't care.
304 *
305 * RETURNS:
306 * Pointer to the found physical link.
307 */
308 struct ata_link *ata_dev_phys_link(struct ata_device *dev)
309 {
310 struct ata_port *ap = dev->link->ap;
311
312 if (!ap->slave_link)
313 return dev->link;
314 if (!dev->devno)
315 return &ap->link;
316 return ap->slave_link;
317 }
318
319 /**
320 * ata_force_cbl - force cable type according to libata.force
321 * @ap: ATA port of interest
322 *
323 * Force cable type according to libata.force and whine about it.
324 * The last entry which has matching port number is used, so it
325 * can be specified as part of device force parameters. For
326 * example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the
327 * same effect.
328 *
329 * LOCKING:
330 * EH context.
331 */
332 void ata_force_cbl(struct ata_port *ap)
333 {
334 int i;
335
336 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
337 const struct ata_force_ent *fe = &ata_force_tbl[i];
338
339 if (fe->port != -1 && fe->port != ap->print_id)
340 continue;
341
342 if (fe->param.cbl == ATA_CBL_NONE)
343 continue;
344
345 ap->cbl = fe->param.cbl;
346 ata_port_notice(ap, "FORCE: cable set to %s\n", fe->param.name);
347 return;
348 }
349 }
350
351 /**
352 * ata_force_link_limits - force link limits according to libata.force
353 * @link: ATA link of interest
354 *
355 * Force link flags and SATA spd limit according to libata.force
356 * and whine about it. When only the port part is specified
357 * (e.g. 1:), the limit applies to all links connected to both
358 * the host link and all fan-out ports connected via PMP. If the
359 * device part is specified as 0 (e.g. 1.00:), it specifies the
360 * first fan-out link not the host link. Device number 15 always
361 * points to the host link whether PMP is attached or not. If the
362 * controller has slave link, device number 16 points to it.
363 *
364 * LOCKING:
365 * EH context.
366 */
367 static void ata_force_link_limits(struct ata_link *link)
368 {
369 bool did_spd = false;
370 int linkno = link->pmp;
371 int i;
372
373 if (ata_is_host_link(link))
374 linkno += 15;
375
376 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
377 const struct ata_force_ent *fe = &ata_force_tbl[i];
378
379 if (fe->port != -1 && fe->port != link->ap->print_id)
380 continue;
381
382 if (fe->device != -1 && fe->device != linkno)
383 continue;
384
385 /* only honor the first spd limit */
386 if (!did_spd && fe->param.spd_limit) {
387 link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
388 ata_link_notice(link, "FORCE: PHY spd limit set to %s\n",
389 fe->param.name);
390 did_spd = true;
391 }
392
393 /* let lflags stack */
394 if (fe->param.lflags) {
395 link->flags |= fe->param.lflags;
396 ata_link_notice(link,
397 "FORCE: link flag 0x%x forced -> 0x%x\n",
398 fe->param.lflags, link->flags);
399 }
400 }
401 }
402
403 /**
404 * ata_force_xfermask - force xfermask according to libata.force
405 * @dev: ATA device of interest
406 *
407 * Force xfer_mask according to libata.force and whine about it.
408 * For consistency with link selection, device number 15 selects
409 * the first device connected to the host link.
410 *
411 * LOCKING:
412 * EH context.
413 */
414 static void ata_force_xfermask(struct ata_device *dev)
415 {
416 int devno = dev->link->pmp + dev->devno;
417 int alt_devno = devno;
418 int i;
419
420 /* allow n.15/16 for devices attached to host port */
421 if (ata_is_host_link(dev->link))
422 alt_devno += 15;
423
424 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
425 const struct ata_force_ent *fe = &ata_force_tbl[i];
426 unsigned long pio_mask, mwdma_mask, udma_mask;
427
428 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
429 continue;
430
431 if (fe->device != -1 && fe->device != devno &&
432 fe->device != alt_devno)
433 continue;
434
435 if (!fe->param.xfer_mask)
436 continue;
437
438 ata_unpack_xfermask(fe->param.xfer_mask,
439 &pio_mask, &mwdma_mask, &udma_mask);
440 if (udma_mask)
441 dev->udma_mask = udma_mask;
442 else if (mwdma_mask) {
443 dev->udma_mask = 0;
444 dev->mwdma_mask = mwdma_mask;
445 } else {
446 dev->udma_mask = 0;
447 dev->mwdma_mask = 0;
448 dev->pio_mask = pio_mask;
449 }
450
451 ata_dev_notice(dev, "FORCE: xfer_mask set to %s\n",
452 fe->param.name);
453 return;
454 }
455 }
456
457 /**
458 * ata_force_horkage - force horkage according to libata.force
459 * @dev: ATA device of interest
460 *
461 * Force horkage according to libata.force and whine about it.
462 * For consistency with link selection, device number 15 selects
463 * the first device connected to the host link.
464 *
465 * LOCKING:
466 * EH context.
467 */
468 static void ata_force_horkage(struct ata_device *dev)
469 {
470 int devno = dev->link->pmp + dev->devno;
471 int alt_devno = devno;
472 int i;
473
474 /* allow n.15/16 for devices attached to host port */
475 if (ata_is_host_link(dev->link))
476 alt_devno += 15;
477
478 for (i = 0; i < ata_force_tbl_size; i++) {
479 const struct ata_force_ent *fe = &ata_force_tbl[i];
480
481 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
482 continue;
483
484 if (fe->device != -1 && fe->device != devno &&
485 fe->device != alt_devno)
486 continue;
487
488 if (!(~dev->horkage & fe->param.horkage_on) &&
489 !(dev->horkage & fe->param.horkage_off))
490 continue;
491
492 dev->horkage |= fe->param.horkage_on;
493 dev->horkage &= ~fe->param.horkage_off;
494
495 ata_dev_notice(dev, "FORCE: horkage modified (%s)\n",
496 fe->param.name);
497 }
498 }
499
500 /**
501 * atapi_cmd_type - Determine ATAPI command type from SCSI opcode
502 * @opcode: SCSI opcode
503 *
504 * Determine ATAPI command type from @opcode.
505 *
506 * LOCKING:
507 * None.
508 *
509 * RETURNS:
510 * ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC}
511 */
512 int atapi_cmd_type(u8 opcode)
513 {
514 switch (opcode) {
515 case GPCMD_READ_10:
516 case GPCMD_READ_12:
517 return ATAPI_READ;
518
519 case GPCMD_WRITE_10:
520 case GPCMD_WRITE_12:
521 case GPCMD_WRITE_AND_VERIFY_10:
522 return ATAPI_WRITE;
523
524 case GPCMD_READ_CD:
525 case GPCMD_READ_CD_MSF:
526 return ATAPI_READ_CD;
527
528 case ATA_16:
529 case ATA_12:
530 if (atapi_passthru16)
531 return ATAPI_PASS_THRU;
532 /* fall thru */
533 default:
534 return ATAPI_MISC;
535 }
536 }
537
538 /**
539 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
540 * @tf: Taskfile to convert
541 * @pmp: Port multiplier port
542 * @is_cmd: This FIS is for command
543 * @fis: Buffer into which data will output
544 *
545 * Converts a standard ATA taskfile to a Serial ATA
546 * FIS structure (Register - Host to Device).
547 *
548 * LOCKING:
549 * Inherited from caller.
550 */
551 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
552 {
553 fis[0] = 0x27; /* Register - Host to Device FIS */
554 fis[1] = pmp & 0xf; /* Port multiplier number*/
555 if (is_cmd)
556 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */
557
558 fis[2] = tf->command;
559 fis[3] = tf->feature;
560
561 fis[4] = tf->lbal;
562 fis[5] = tf->lbam;
563 fis[6] = tf->lbah;
564 fis[7] = tf->device;
565
566 fis[8] = tf->hob_lbal;
567 fis[9] = tf->hob_lbam;
568 fis[10] = tf->hob_lbah;
569 fis[11] = tf->hob_feature;
570
571 fis[12] = tf->nsect;
572 fis[13] = tf->hob_nsect;
573 fis[14] = 0;
574 fis[15] = tf->ctl;
575
576 fis[16] = tf->auxiliary & 0xff;
577 fis[17] = (tf->auxiliary >> 8) & 0xff;
578 fis[18] = (tf->auxiliary >> 16) & 0xff;
579 fis[19] = (tf->auxiliary >> 24) & 0xff;
580 }
581
582 /**
583 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
584 * @fis: Buffer from which data will be input
585 * @tf: Taskfile to output
586 *
587 * Converts a serial ATA FIS structure to a standard ATA taskfile.
588 *
589 * LOCKING:
590 * Inherited from caller.
591 */
592
593 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
594 {
595 tf->command = fis[2]; /* status */
596 tf->feature = fis[3]; /* error */
597
598 tf->lbal = fis[4];
599 tf->lbam = fis[5];
600 tf->lbah = fis[6];
601 tf->device = fis[7];
602
603 tf->hob_lbal = fis[8];
604 tf->hob_lbam = fis[9];
605 tf->hob_lbah = fis[10];
606
607 tf->nsect = fis[12];
608 tf->hob_nsect = fis[13];
609 }
610
611 static const u8 ata_rw_cmds[] = {
612 /* pio multi */
613 ATA_CMD_READ_MULTI,
614 ATA_CMD_WRITE_MULTI,
615 ATA_CMD_READ_MULTI_EXT,
616 ATA_CMD_WRITE_MULTI_EXT,
617 0,
618 0,
619 0,
620 ATA_CMD_WRITE_MULTI_FUA_EXT,
621 /* pio */
622 ATA_CMD_PIO_READ,
623 ATA_CMD_PIO_WRITE,
624 ATA_CMD_PIO_READ_EXT,
625 ATA_CMD_PIO_WRITE_EXT,
626 0,
627 0,
628 0,
629 0,
630 /* dma */
631 ATA_CMD_READ,
632 ATA_CMD_WRITE,
633 ATA_CMD_READ_EXT,
634 ATA_CMD_WRITE_EXT,
635 0,
636 0,
637 0,
638 ATA_CMD_WRITE_FUA_EXT
639 };
640
641 /**
642 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
643 * @tf: command to examine and configure
644 * @dev: device tf belongs to
645 *
646 * Examine the device configuration and tf->flags to calculate
647 * the proper read/write commands and protocol to use.
648 *
649 * LOCKING:
650 * caller.
651 */
652 static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
653 {
654 u8 cmd;
655
656 int index, fua, lba48, write;
657
658 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
659 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
660 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
661
662 if (dev->flags & ATA_DFLAG_PIO) {
663 tf->protocol = ATA_PROT_PIO;
664 index = dev->multi_count ? 0 : 8;
665 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
666 /* Unable to use DMA due to host limitation */
667 tf->protocol = ATA_PROT_PIO;
668 index = dev->multi_count ? 0 : 8;
669 } else {
670 tf->protocol = ATA_PROT_DMA;
671 index = 16;
672 }
673
674 cmd = ata_rw_cmds[index + fua + lba48 + write];
675 if (cmd) {
676 tf->command = cmd;
677 return 0;
678 }
679 return -1;
680 }
681
682 /**
683 * ata_tf_read_block - Read block address from ATA taskfile
684 * @tf: ATA taskfile of interest
685 * @dev: ATA device @tf belongs to
686 *
687 * LOCKING:
688 * None.
689 *
690 * Read block address from @tf. This function can handle all
691 * three address formats - LBA, LBA48 and CHS. tf->protocol and
692 * flags select the address format to use.
693 *
694 * RETURNS:
695 * Block address read from @tf.
696 */
697 u64 ata_tf_read_block(const struct ata_taskfile *tf, struct ata_device *dev)
698 {
699 u64 block = 0;
700
701 if (!dev || tf->flags & ATA_TFLAG_LBA) {
702 if (tf->flags & ATA_TFLAG_LBA48) {
703 block |= (u64)tf->hob_lbah << 40;
704 block |= (u64)tf->hob_lbam << 32;
705 block |= (u64)tf->hob_lbal << 24;
706 } else
707 block |= (tf->device & 0xf) << 24;
708
709 block |= tf->lbah << 16;
710 block |= tf->lbam << 8;
711 block |= tf->lbal;
712 } else {
713 u32 cyl, head, sect;
714
715 cyl = tf->lbam | (tf->lbah << 8);
716 head = tf->device & 0xf;
717 sect = tf->lbal;
718
719 if (!sect) {
720 ata_dev_warn(dev,
721 "device reported invalid CHS sector 0\n");
722 sect = 1; /* oh well */
723 }
724
725 block = (cyl * dev->heads + head) * dev->sectors + sect - 1;
726 }
727
728 return block;
729 }
730
731 /**
732 * ata_build_rw_tf - Build ATA taskfile for given read/write request
733 * @tf: Target ATA taskfile
734 * @dev: ATA device @tf belongs to
735 * @block: Block address
736 * @n_block: Number of blocks
737 * @tf_flags: RW/FUA etc...
738 * @tag: tag
739 *
740 * LOCKING:
741 * None.
742 *
743 * Build ATA taskfile @tf for read/write request described by
744 * @block, @n_block, @tf_flags and @tag on @dev.
745 *
746 * RETURNS:
747 *
748 * 0 on success, -ERANGE if the request is too large for @dev,
749 * -EINVAL if the request is invalid.
750 */
751 int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
752 u64 block, u32 n_block, unsigned int tf_flags,
753 unsigned int tag)
754 {
755 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
756 tf->flags |= tf_flags;
757
758 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
759 /* yay, NCQ */
760 if (!lba_48_ok(block, n_block))
761 return -ERANGE;
762
763 tf->protocol = ATA_PROT_NCQ;
764 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
765
766 if (tf->flags & ATA_TFLAG_WRITE)
767 tf->command = ATA_CMD_FPDMA_WRITE;
768 else
769 tf->command = ATA_CMD_FPDMA_READ;
770
771 tf->nsect = tag << 3;
772 tf->hob_feature = (n_block >> 8) & 0xff;
773 tf->feature = n_block & 0xff;
774
775 tf->hob_lbah = (block >> 40) & 0xff;
776 tf->hob_lbam = (block >> 32) & 0xff;
777 tf->hob_lbal = (block >> 24) & 0xff;
778 tf->lbah = (block >> 16) & 0xff;
779 tf->lbam = (block >> 8) & 0xff;
780 tf->lbal = block & 0xff;
781
782 tf->device = ATA_LBA;
783 if (tf->flags & ATA_TFLAG_FUA)
784 tf->device |= 1 << 7;
785 } else if (dev->flags & ATA_DFLAG_LBA) {
786 tf->flags |= ATA_TFLAG_LBA;
787
788 if (lba_28_ok(block, n_block)) {
789 /* use LBA28 */
790 tf->device |= (block >> 24) & 0xf;
791 } else if (lba_48_ok(block, n_block)) {
792 if (!(dev->flags & ATA_DFLAG_LBA48))
793 return -ERANGE;
794
795 /* use LBA48 */
796 tf->flags |= ATA_TFLAG_LBA48;
797
798 tf->hob_nsect = (n_block >> 8) & 0xff;
799
800 tf->hob_lbah = (block >> 40) & 0xff;
801 tf->hob_lbam = (block >> 32) & 0xff;
802 tf->hob_lbal = (block >> 24) & 0xff;
803 } else
804 /* request too large even for LBA48 */
805 return -ERANGE;
806
807 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
808 return -EINVAL;
809
810 tf->nsect = n_block & 0xff;
811
812 tf->lbah = (block >> 16) & 0xff;
813 tf->lbam = (block >> 8) & 0xff;
814 tf->lbal = block & 0xff;
815
816 tf->device |= ATA_LBA;
817 } else {
818 /* CHS */
819 u32 sect, head, cyl, track;
820
821 /* The request -may- be too large for CHS addressing. */
822 if (!lba_28_ok(block, n_block))
823 return -ERANGE;
824
825 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
826 return -EINVAL;
827
828 /* Convert LBA to CHS */
829 track = (u32)block / dev->sectors;
830 cyl = track / dev->heads;
831 head = track % dev->heads;
832 sect = (u32)block % dev->sectors + 1;
833
834 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
835 (u32)block, track, cyl, head, sect);
836
837 /* Check whether the converted CHS can fit.
838 Cylinder: 0-65535
839 Head: 0-15
840 Sector: 1-255*/
841 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
842 return -ERANGE;
843
844 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
845 tf->lbal = sect;
846 tf->lbam = cyl;
847 tf->lbah = cyl >> 8;
848 tf->device |= head;
849 }
850
851 return 0;
852 }
853
854 /**
855 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
856 * @pio_mask: pio_mask
857 * @mwdma_mask: mwdma_mask
858 * @udma_mask: udma_mask
859 *
860 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
861 * unsigned int xfer_mask.
862 *
863 * LOCKING:
864 * None.
865 *
866 * RETURNS:
867 * Packed xfer_mask.
868 */
869 unsigned long ata_pack_xfermask(unsigned long pio_mask,
870 unsigned long mwdma_mask,
871 unsigned long udma_mask)
872 {
873 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
874 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
875 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
876 }
877
878 /**
879 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
880 * @xfer_mask: xfer_mask to unpack
881 * @pio_mask: resulting pio_mask
882 * @mwdma_mask: resulting mwdma_mask
883 * @udma_mask: resulting udma_mask
884 *
885 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
886 * Any NULL distination masks will be ignored.
887 */
888 void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
889 unsigned long *mwdma_mask, unsigned long *udma_mask)
890 {
891 if (pio_mask)
892 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
893 if (mwdma_mask)
894 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
895 if (udma_mask)
896 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
897 }
898
899 static const struct ata_xfer_ent {
900 int shift, bits;
901 u8 base;
902 } ata_xfer_tbl[] = {
903 { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
904 { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
905 { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
906 { -1, },
907 };
908
909 /**
910 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
911 * @xfer_mask: xfer_mask of interest
912 *
913 * Return matching XFER_* value for @xfer_mask. Only the highest
914 * bit of @xfer_mask is considered.
915 *
916 * LOCKING:
917 * None.
918 *
919 * RETURNS:
920 * Matching XFER_* value, 0xff if no match found.
921 */
922 u8 ata_xfer_mask2mode(unsigned long xfer_mask)
923 {
924 int highbit = fls(xfer_mask) - 1;
925 const struct ata_xfer_ent *ent;
926
927 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
928 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
929 return ent->base + highbit - ent->shift;
930 return 0xff;
931 }
932
933 /**
934 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
935 * @xfer_mode: XFER_* of interest
936 *
937 * Return matching xfer_mask for @xfer_mode.
938 *
939 * LOCKING:
940 * None.
941 *
942 * RETURNS:
943 * Matching xfer_mask, 0 if no match found.
944 */
945 unsigned long ata_xfer_mode2mask(u8 xfer_mode)
946 {
947 const struct ata_xfer_ent *ent;
948
949 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
950 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
951 return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
952 & ~((1 << ent->shift) - 1);
953 return 0;
954 }
955
956 /**
957 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
958 * @xfer_mode: XFER_* of interest
959 *
960 * Return matching xfer_shift for @xfer_mode.
961 *
962 * LOCKING:
963 * None.
964 *
965 * RETURNS:
966 * Matching xfer_shift, -1 if no match found.
967 */
968 int ata_xfer_mode2shift(unsigned long xfer_mode)
969 {
970 const struct ata_xfer_ent *ent;
971
972 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
973 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
974 return ent->shift;
975 return -1;
976 }
977
978 /**
979 * ata_mode_string - convert xfer_mask to string
980 * @xfer_mask: mask of bits supported; only highest bit counts.
981 *
982 * Determine string which represents the highest speed
983 * (highest bit in @modemask).
984 *
985 * LOCKING:
986 * None.
987 *
988 * RETURNS:
989 * Constant C string representing highest speed listed in
990 * @mode_mask, or the constant C string "<n/a>".
991 */
992 const char *ata_mode_string(unsigned long xfer_mask)
993 {
994 static const char * const xfer_mode_str[] = {
995 "PIO0",
996 "PIO1",
997 "PIO2",
998 "PIO3",
999 "PIO4",
1000 "PIO5",
1001 "PIO6",
1002 "MWDMA0",
1003 "MWDMA1",
1004 "MWDMA2",
1005 "MWDMA3",
1006 "MWDMA4",
1007 "UDMA/16",
1008 "UDMA/25",
1009 "UDMA/33",
1010 "UDMA/44",
1011 "UDMA/66",
1012 "UDMA/100",
1013 "UDMA/133",
1014 "UDMA7",
1015 };
1016 int highbit;
1017
1018 highbit = fls(xfer_mask) - 1;
1019 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
1020 return xfer_mode_str[highbit];
1021 return "<n/a>";
1022 }
1023
1024 const char *sata_spd_string(unsigned int spd)
1025 {
1026 static const char * const spd_str[] = {
1027 "1.5 Gbps",
1028 "3.0 Gbps",
1029 "6.0 Gbps",
1030 };
1031
1032 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
1033 return "<unknown>";
1034 return spd_str[spd - 1];
1035 }
1036
1037 /**
1038 * ata_dev_classify - determine device type based on ATA-spec signature
1039 * @tf: ATA taskfile register set for device to be identified
1040 *
1041 * Determine from taskfile register contents whether a device is
1042 * ATA or ATAPI, as per "Signature and persistence" section
1043 * of ATA/PI spec (volume 1, sect 5.14).
1044 *
1045 * LOCKING:
1046 * None.
1047 *
1048 * RETURNS:
1049 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP,
1050 * %ATA_DEV_ZAC, or %ATA_DEV_UNKNOWN the event of failure.
1051 */
1052 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1053 {
1054 /* Apple's open source Darwin code hints that some devices only
1055 * put a proper signature into the LBA mid/high registers,
1056 * So, we only check those. It's sufficient for uniqueness.
1057 *
1058 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
1059 * signatures for ATA and ATAPI devices attached on SerialATA,
1060 * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA
1061 * spec has never mentioned about using different signatures
1062 * for ATA/ATAPI devices. Then, Serial ATA II: Port
1063 * Multiplier specification began to use 0x69/0x96 to identify
1064 * port multpliers and 0x3c/0xc3 to identify SEMB device.
1065 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
1066 * 0x69/0x96 shortly and described them as reserved for
1067 * SerialATA.
1068 *
1069 * We follow the current spec and consider that 0x69/0x96
1070 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
1071 * Unfortunately, WDC WD1600JS-62MHB5 (a hard drive) reports
1072 * SEMB signature. This is worked around in
1073 * ata_dev_read_id().
1074 */
1075 if ((tf->lbam == 0) && (tf->lbah == 0)) {
1076 DPRINTK("found ATA device by sig\n");
1077 return ATA_DEV_ATA;
1078 }
1079
1080 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1081 DPRINTK("found ATAPI device by sig\n");
1082 return ATA_DEV_ATAPI;
1083 }
1084
1085 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
1086 DPRINTK("found PMP device by sig\n");
1087 return ATA_DEV_PMP;
1088 }
1089
1090 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
1091 DPRINTK("found SEMB device by sig (could be ATA device)\n");
1092 return ATA_DEV_SEMB;
1093 }
1094
1095 if ((tf->lbam == 0xcd) && (tf->lbah == 0xab)) {
1096 DPRINTK("found ZAC device by sig\n");
1097 return ATA_DEV_ZAC;
1098 }
1099
1100 DPRINTK("unknown device\n");
1101 return ATA_DEV_UNKNOWN;
1102 }
1103
1104 /**
1105 * ata_id_string - Convert IDENTIFY DEVICE page into string
1106 * @id: IDENTIFY DEVICE results we will examine
1107 * @s: string into which data is output
1108 * @ofs: offset into identify device page
1109 * @len: length of string to return. must be an even number.
1110 *
1111 * The strings in the IDENTIFY DEVICE page are broken up into
1112 * 16-bit chunks. Run through the string, and output each
1113 * 8-bit chunk linearly, regardless of platform.
1114 *
1115 * LOCKING:
1116 * caller.
1117 */
1118
1119 void ata_id_string(const u16 *id, unsigned char *s,
1120 unsigned int ofs, unsigned int len)
1121 {
1122 unsigned int c;
1123
1124 BUG_ON(len & 1);
1125
1126 while (len > 0) {
1127 c = id[ofs] >> 8;
1128 *s = c;
1129 s++;
1130
1131 c = id[ofs] & 0xff;
1132 *s = c;
1133 s++;
1134
1135 ofs++;
1136 len -= 2;
1137 }
1138 }
1139
1140 /**
1141 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
1142 * @id: IDENTIFY DEVICE results we will examine
1143 * @s: string into which data is output
1144 * @ofs: offset into identify device page
1145 * @len: length of string to return. must be an odd number.
1146 *
1147 * This function is identical to ata_id_string except that it
1148 * trims trailing spaces and terminates the resulting string with
1149 * null. @len must be actual maximum length (even number) + 1.
1150 *
1151 * LOCKING:
1152 * caller.
1153 */
1154 void ata_id_c_string(const u16 *id, unsigned char *s,
1155 unsigned int ofs, unsigned int len)
1156 {
1157 unsigned char *p;
1158
1159 ata_id_string(id, s, ofs, len - 1);
1160
1161 p = s + strnlen(s, len - 1);
1162 while (p > s && p[-1] == ' ')
1163 p--;
1164 *p = '\0';
1165 }
1166
1167 static u64 ata_id_n_sectors(const u16 *id)
1168 {
1169 if (ata_id_has_lba(id)) {
1170 if (ata_id_has_lba48(id))
1171 return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
1172 else
1173 return ata_id_u32(id, ATA_ID_LBA_CAPACITY);
1174 } else {
1175 if (ata_id_current_chs_valid(id))
1176 return id[ATA_ID_CUR_CYLS] * id[ATA_ID_CUR_HEADS] *
1177 id[ATA_ID_CUR_SECTORS];
1178 else
1179 return id[ATA_ID_CYLS] * id[ATA_ID_HEADS] *
1180 id[ATA_ID_SECTORS];
1181 }
1182 }
1183
1184 u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
1185 {
1186 u64 sectors = 0;
1187
1188 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1189 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1190 sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24;
1191 sectors |= (tf->lbah & 0xff) << 16;
1192 sectors |= (tf->lbam & 0xff) << 8;
1193 sectors |= (tf->lbal & 0xff);
1194
1195 return sectors;
1196 }
1197
1198 u64 ata_tf_to_lba(const struct ata_taskfile *tf)
1199 {
1200 u64 sectors = 0;
1201
1202 sectors |= (tf->device & 0x0f) << 24;
1203 sectors |= (tf->lbah & 0xff) << 16;
1204 sectors |= (tf->lbam & 0xff) << 8;
1205 sectors |= (tf->lbal & 0xff);
1206
1207 return sectors;
1208 }
1209
1210 /**
1211 * ata_read_native_max_address - Read native max address
1212 * @dev: target device
1213 * @max_sectors: out parameter for the result native max address
1214 *
1215 * Perform an LBA48 or LBA28 native size query upon the device in
1216 * question.
1217 *
1218 * RETURNS:
1219 * 0 on success, -EACCES if command is aborted by the drive.
1220 * -EIO on other errors.
1221 */
1222 static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1223 {
1224 unsigned int err_mask;
1225 struct ata_taskfile tf;
1226 int lba48 = ata_id_has_lba48(dev->id);
1227
1228 ata_tf_init(dev, &tf);
1229
1230 /* always clear all address registers */
1231 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1232
1233 if (lba48) {
1234 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1235 tf.flags |= ATA_TFLAG_LBA48;
1236 } else
1237 tf.command = ATA_CMD_READ_NATIVE_MAX;
1238
1239 tf.protocol |= ATA_PROT_NODATA;
1240 tf.device |= ATA_LBA;
1241
1242 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1243 if (err_mask) {
1244 ata_dev_warn(dev,
1245 "failed to read native max address (err_mask=0x%x)\n",
1246 err_mask);
1247 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1248 return -EACCES;
1249 return -EIO;
1250 }
1251
1252 if (lba48)
1253 *max_sectors = ata_tf_to_lba48(&tf) + 1;
1254 else
1255 *max_sectors = ata_tf_to_lba(&tf) + 1;
1256 if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
1257 (*max_sectors)--;
1258 return 0;
1259 }
1260
1261 /**
1262 * ata_set_max_sectors - Set max sectors
1263 * @dev: target device
1264 * @new_sectors: new max sectors value to set for the device
1265 *
1266 * Set max sectors of @dev to @new_sectors.
1267 *
1268 * RETURNS:
1269 * 0 on success, -EACCES if command is aborted or denied (due to
1270 * previous non-volatile SET_MAX) by the drive. -EIO on other
1271 * errors.
1272 */
1273 static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1274 {
1275 unsigned int err_mask;
1276 struct ata_taskfile tf;
1277 int lba48 = ata_id_has_lba48(dev->id);
1278
1279 new_sectors--;
1280
1281 ata_tf_init(dev, &tf);
1282
1283 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1284
1285 if (lba48) {
1286 tf.command = ATA_CMD_SET_MAX_EXT;
1287 tf.flags |= ATA_TFLAG_LBA48;
1288
1289 tf.hob_lbal = (new_sectors >> 24) & 0xff;
1290 tf.hob_lbam = (new_sectors >> 32) & 0xff;
1291 tf.hob_lbah = (new_sectors >> 40) & 0xff;
1292 } else {
1293 tf.command = ATA_CMD_SET_MAX;
1294
1295 tf.device |= (new_sectors >> 24) & 0xf;
1296 }
1297
1298 tf.protocol |= ATA_PROT_NODATA;
1299 tf.device |= ATA_LBA;
1300
1301 tf.lbal = (new_sectors >> 0) & 0xff;
1302 tf.lbam = (new_sectors >> 8) & 0xff;
1303 tf.lbah = (new_sectors >> 16) & 0xff;
1304
1305 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1306 if (err_mask) {
1307 ata_dev_warn(dev,
1308 "failed to set max address (err_mask=0x%x)\n",
1309 err_mask);
1310 if (err_mask == AC_ERR_DEV &&
1311 (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1312 return -EACCES;
1313 return -EIO;
1314 }
1315
1316 return 0;
1317 }
1318
1319 /**
1320 * ata_hpa_resize - Resize a device with an HPA set
1321 * @dev: Device to resize
1322 *
1323 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
1324 * it if required to the full size of the media. The caller must check
1325 * the drive has the HPA feature set enabled.
1326 *
1327 * RETURNS:
1328 * 0 on success, -errno on failure.
1329 */
1330 static int ata_hpa_resize(struct ata_device *dev)
1331 {
1332 struct ata_eh_context *ehc = &dev->link->eh_context;
1333 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1334 bool unlock_hpa = ata_ignore_hpa || dev->flags & ATA_DFLAG_UNLOCK_HPA;
1335 u64 sectors = ata_id_n_sectors(dev->id);
1336 u64 native_sectors;
1337 int rc;
1338
1339 /* do we need to do it? */
1340 if ((dev->class != ATA_DEV_ATA && dev->class != ATA_DEV_ZAC) ||
1341 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1342 (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
1343 return 0;
1344
1345 /* read native max address */
1346 rc = ata_read_native_max_address(dev, &native_sectors);
1347 if (rc) {
1348 /* If device aborted the command or HPA isn't going to
1349 * be unlocked, skip HPA resizing.
1350 */
1351 if (rc == -EACCES || !unlock_hpa) {
1352 ata_dev_warn(dev,
1353 "HPA support seems broken, skipping HPA handling\n");
1354 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1355
1356 /* we can continue if device aborted the command */
1357 if (rc == -EACCES)
1358 rc = 0;
1359 }
1360
1361 return rc;
1362 }
1363 dev->n_native_sectors = native_sectors;
1364
1365 /* nothing to do? */
1366 if (native_sectors <= sectors || !unlock_hpa) {
1367 if (!print_info || native_sectors == sectors)
1368 return 0;
1369
1370 if (native_sectors > sectors)
1371 ata_dev_info(dev,
1372 "HPA detected: current %llu, native %llu\n",
1373 (unsigned long long)sectors,
1374 (unsigned long long)native_sectors);
1375 else if (native_sectors < sectors)
1376 ata_dev_warn(dev,
1377 "native sectors (%llu) is smaller than sectors (%llu)\n",
1378 (unsigned long long)native_sectors,
1379 (unsigned long long)sectors);
1380 return 0;
1381 }
1382
1383 /* let's unlock HPA */
1384 rc = ata_set_max_sectors(dev, native_sectors);
1385 if (rc == -EACCES) {
1386 /* if device aborted the command, skip HPA resizing */
1387 ata_dev_warn(dev,
1388 "device aborted resize (%llu -> %llu), skipping HPA handling\n",
1389 (unsigned long long)sectors,
1390 (unsigned long long)native_sectors);
1391 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1392 return 0;
1393 } else if (rc)
1394 return rc;
1395
1396 /* re-read IDENTIFY data */
1397 rc = ata_dev_reread_id(dev, 0);
1398 if (rc) {
1399 ata_dev_err(dev,
1400 "failed to re-read IDENTIFY data after HPA resizing\n");
1401 return rc;
1402 }
1403
1404 if (print_info) {
1405 u64 new_sectors = ata_id_n_sectors(dev->id);
1406 ata_dev_info(dev,
1407 "HPA unlocked: %llu -> %llu, native %llu\n",
1408 (unsigned long long)sectors,
1409 (unsigned long long)new_sectors,
1410 (unsigned long long)native_sectors);
1411 }
1412
1413 return 0;
1414 }
1415
1416 /**
1417 * ata_dump_id - IDENTIFY DEVICE info debugging output
1418 * @id: IDENTIFY DEVICE page to dump
1419 *
1420 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1421 * page.
1422 *
1423 * LOCKING:
1424 * caller.
1425 */
1426
1427 static inline void ata_dump_id(const u16 *id)
1428 {
1429 DPRINTK("49==0x%04x "
1430 "53==0x%04x "
1431 "63==0x%04x "
1432 "64==0x%04x "
1433 "75==0x%04x \n",
1434 id[49],
1435 id[53],
1436 id[63],
1437 id[64],
1438 id[75]);
1439 DPRINTK("80==0x%04x "
1440 "81==0x%04x "
1441 "82==0x%04x "
1442 "83==0x%04x "
1443 "84==0x%04x \n",
1444 id[80],
1445 id[81],
1446 id[82],
1447 id[83],
1448 id[84]);
1449 DPRINTK("88==0x%04x "
1450 "93==0x%04x\n",
1451 id[88],
1452 id[93]);
1453 }
1454
1455 /**
1456 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1457 * @id: IDENTIFY data to compute xfer mask from
1458 *
1459 * Compute the xfermask for this device. This is not as trivial
1460 * as it seems if we must consider early devices correctly.
1461 *
1462 * FIXME: pre IDE drive timing (do we care ?).
1463 *
1464 * LOCKING:
1465 * None.
1466 *
1467 * RETURNS:
1468 * Computed xfermask
1469 */
1470 unsigned long ata_id_xfermask(const u16 *id)
1471 {
1472 unsigned long pio_mask, mwdma_mask, udma_mask;
1473
1474 /* Usual case. Word 53 indicates word 64 is valid */
1475 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1476 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1477 pio_mask <<= 3;
1478 pio_mask |= 0x7;
1479 } else {
1480 /* If word 64 isn't valid then Word 51 high byte holds
1481 * the PIO timing number for the maximum. Turn it into
1482 * a mask.
1483 */
1484 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
1485 if (mode < 5) /* Valid PIO range */
1486 pio_mask = (2 << mode) - 1;
1487 else
1488 pio_mask = 1;
1489
1490 /* But wait.. there's more. Design your standards by
1491 * committee and you too can get a free iordy field to
1492 * process. However its the speeds not the modes that
1493 * are supported... Note drivers using the timing API
1494 * will get this right anyway
1495 */
1496 }
1497
1498 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1499
1500 if (ata_id_is_cfa(id)) {
1501 /*
1502 * Process compact flash extended modes
1503 */
1504 int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7;
1505 int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7;
1506
1507 if (pio)
1508 pio_mask |= (1 << 5);
1509 if (pio > 1)
1510 pio_mask |= (1 << 6);
1511 if (dma)
1512 mwdma_mask |= (1 << 3);
1513 if (dma > 1)
1514 mwdma_mask |= (1 << 4);
1515 }
1516
1517 udma_mask = 0;
1518 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1519 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1520
1521 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1522 }
1523
1524 static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1525 {
1526 struct completion *waiting = qc->private_data;
1527
1528 complete(waiting);
1529 }
1530
1531 /**
1532 * ata_exec_internal_sg - execute libata internal command
1533 * @dev: Device to which the command is sent
1534 * @tf: Taskfile registers for the command and the result
1535 * @cdb: CDB for packet command
1536 * @dma_dir: Data transfer direction of the command
1537 * @sgl: sg list for the data buffer of the command
1538 * @n_elem: Number of sg entries
1539 * @timeout: Timeout in msecs (0 for default)
1540 *
1541 * Executes libata internal command with timeout. @tf contains
1542 * command on entry and result on return. Timeout and error
1543 * conditions are reported via return value. No recovery action
1544 * is taken after a command times out. It's caller's duty to
1545 * clean up after timeout.
1546 *
1547 * LOCKING:
1548 * None. Should be called with kernel context, might sleep.
1549 *
1550 * RETURNS:
1551 * Zero on success, AC_ERR_* mask on failure
1552 */
1553 unsigned ata_exec_internal_sg(struct ata_device *dev,
1554 struct ata_taskfile *tf, const u8 *cdb,
1555 int dma_dir, struct scatterlist *sgl,
1556 unsigned int n_elem, unsigned long timeout)
1557 {
1558 struct ata_link *link = dev->link;
1559 struct ata_port *ap = link->ap;
1560 u8 command = tf->command;
1561 int auto_timeout = 0;
1562 struct ata_queued_cmd *qc;
1563 unsigned int tag, preempted_tag;
1564 u32 preempted_sactive, preempted_qc_active;
1565 int preempted_nr_active_links;
1566 DECLARE_COMPLETION_ONSTACK(wait);
1567 unsigned long flags;
1568 unsigned int err_mask;
1569 int rc;
1570
1571 spin_lock_irqsave(ap->lock, flags);
1572
1573 /* no internal command while frozen */
1574 if (ap->pflags & ATA_PFLAG_FROZEN) {
1575 spin_unlock_irqrestore(ap->lock, flags);
1576 return AC_ERR_SYSTEM;
1577 }
1578
1579 /* initialize internal qc */
1580
1581 /* XXX: Tag 0 is used for drivers with legacy EH as some
1582 * drivers choke if any other tag is given. This breaks
1583 * ata_tag_internal() test for those drivers. Don't use new
1584 * EH stuff without converting to it.
1585 */
1586 if (ap->ops->error_handler)
1587 tag = ATA_TAG_INTERNAL;
1588 else
1589 tag = 0;
1590
1591 qc = __ata_qc_from_tag(ap, tag);
1592
1593 qc->tag = tag;
1594 qc->scsicmd = NULL;
1595 qc->ap = ap;
1596 qc->dev = dev;
1597 ata_qc_reinit(qc);
1598
1599 preempted_tag = link->active_tag;
1600 preempted_sactive = link->sactive;
1601 preempted_qc_active = ap->qc_active;
1602 preempted_nr_active_links = ap->nr_active_links;
1603 link->active_tag = ATA_TAG_POISON;
1604 link->sactive = 0;
1605 ap->qc_active = 0;
1606 ap->nr_active_links = 0;
1607
1608 /* prepare & issue qc */
1609 qc->tf = *tf;
1610 if (cdb)
1611 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1612
1613 /* some SATA bridges need us to indicate data xfer direction */
1614 if (tf->protocol == ATAPI_PROT_DMA && (dev->flags & ATA_DFLAG_DMADIR) &&
1615 dma_dir == DMA_FROM_DEVICE)
1616 qc->tf.feature |= ATAPI_DMADIR;
1617
1618 qc->flags |= ATA_QCFLAG_RESULT_TF;
1619 qc->dma_dir = dma_dir;
1620 if (dma_dir != DMA_NONE) {
1621 unsigned int i, buflen = 0;
1622 struct scatterlist *sg;
1623
1624 for_each_sg(sgl, sg, n_elem, i)
1625 buflen += sg->length;
1626
1627 ata_sg_init(qc, sgl, n_elem);
1628 qc->nbytes = buflen;
1629 }
1630
1631 qc->private_data = &wait;
1632 qc->complete_fn = ata_qc_complete_internal;
1633
1634 ata_qc_issue(qc);
1635
1636 spin_unlock_irqrestore(ap->lock, flags);
1637
1638 if (!timeout) {
1639 if (ata_probe_timeout)
1640 timeout = ata_probe_timeout * 1000;
1641 else {
1642 timeout = ata_internal_cmd_timeout(dev, command);
1643 auto_timeout = 1;
1644 }
1645 }
1646
1647 if (ap->ops->error_handler)
1648 ata_eh_release(ap);
1649
1650 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
1651
1652 if (ap->ops->error_handler)
1653 ata_eh_acquire(ap);
1654
1655 ata_sff_flush_pio_task(ap);
1656
1657 if (!rc) {
1658 spin_lock_irqsave(ap->lock, flags);
1659
1660 /* We're racing with irq here. If we lose, the
1661 * following test prevents us from completing the qc
1662 * twice. If we win, the port is frozen and will be
1663 * cleaned up by ->post_internal_cmd().
1664 */
1665 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1666 qc->err_mask |= AC_ERR_TIMEOUT;
1667
1668 if (ap->ops->error_handler)
1669 ata_port_freeze(ap);
1670 else
1671 ata_qc_complete(qc);
1672
1673 if (ata_msg_warn(ap))
1674 ata_dev_warn(dev, "qc timeout (cmd 0x%x)\n",
1675 command);
1676 }
1677
1678 spin_unlock_irqrestore(ap->lock, flags);
1679 }
1680
1681 /* do post_internal_cmd */
1682 if (ap->ops->post_internal_cmd)
1683 ap->ops->post_internal_cmd(qc);
1684
1685 /* perform minimal error analysis */
1686 if (qc->flags & ATA_QCFLAG_FAILED) {
1687 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1688 qc->err_mask |= AC_ERR_DEV;
1689
1690 if (!qc->err_mask)
1691 qc->err_mask |= AC_ERR_OTHER;
1692
1693 if (qc->err_mask & ~AC_ERR_OTHER)
1694 qc->err_mask &= ~AC_ERR_OTHER;
1695 }
1696
1697 /* finish up */
1698 spin_lock_irqsave(ap->lock, flags);
1699
1700 *tf = qc->result_tf;
1701 err_mask = qc->err_mask;
1702
1703 ata_qc_free(qc);
1704 link->active_tag = preempted_tag;
1705 link->sactive = preempted_sactive;
1706 ap->qc_active = preempted_qc_active;
1707 ap->nr_active_links = preempted_nr_active_links;
1708
1709 spin_unlock_irqrestore(ap->lock, flags);
1710
1711 if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout)
1712 ata_internal_cmd_timed_out(dev, command);
1713
1714 return err_mask;
1715 }
1716
1717 /**
1718 * ata_exec_internal - execute libata internal command
1719 * @dev: Device to which the command is sent
1720 * @tf: Taskfile registers for the command and the result
1721 * @cdb: CDB for packet command
1722 * @dma_dir: Data transfer direction of the command
1723 * @buf: Data buffer of the command
1724 * @buflen: Length of data buffer
1725 * @timeout: Timeout in msecs (0 for default)
1726 *
1727 * Wrapper around ata_exec_internal_sg() which takes simple
1728 * buffer instead of sg list.
1729 *
1730 * LOCKING:
1731 * None. Should be called with kernel context, might sleep.
1732 *
1733 * RETURNS:
1734 * Zero on success, AC_ERR_* mask on failure
1735 */
1736 unsigned ata_exec_internal(struct ata_device *dev,
1737 struct ata_taskfile *tf, const u8 *cdb,
1738 int dma_dir, void *buf, unsigned int buflen,
1739 unsigned long timeout)
1740 {
1741 struct scatterlist *psg = NULL, sg;
1742 unsigned int n_elem = 0;
1743
1744 if (dma_dir != DMA_NONE) {
1745 WARN_ON(!buf);
1746 sg_init_one(&sg, buf, buflen);
1747 psg = &sg;
1748 n_elem++;
1749 }
1750
1751 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1752 timeout);
1753 }
1754
1755 /**
1756 * ata_pio_need_iordy - check if iordy needed
1757 * @adev: ATA device
1758 *
1759 * Check if the current speed of the device requires IORDY. Used
1760 * by various controllers for chip configuration.
1761 */
1762 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1763 {
1764 /* Don't set IORDY if we're preparing for reset. IORDY may
1765 * lead to controller lock up on certain controllers if the
1766 * port is not occupied. See bko#11703 for details.
1767 */
1768 if (adev->link->ap->pflags & ATA_PFLAG_RESETTING)
1769 return 0;
1770 /* Controller doesn't support IORDY. Probably a pointless
1771 * check as the caller should know this.
1772 */
1773 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1774 return 0;
1775 /* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6. */
1776 if (ata_id_is_cfa(adev->id)
1777 && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6))
1778 return 0;
1779 /* PIO3 and higher it is mandatory */
1780 if (adev->pio_mode > XFER_PIO_2)
1781 return 1;
1782 /* We turn it on when possible */
1783 if (ata_id_has_iordy(adev->id))
1784 return 1;
1785 return 0;
1786 }
1787
1788 /**
1789 * ata_pio_mask_no_iordy - Return the non IORDY mask
1790 * @adev: ATA device
1791 *
1792 * Compute the highest mode possible if we are not using iordy. Return
1793 * -1 if no iordy mode is available.
1794 */
1795 static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1796 {
1797 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1798 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1799 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1800 /* Is the speed faster than the drive allows non IORDY ? */
1801 if (pio) {
1802 /* This is cycle times not frequency - watch the logic! */
1803 if (pio > 240) /* PIO2 is 240nS per cycle */
1804 return 3 << ATA_SHIFT_PIO;
1805 return 7 << ATA_SHIFT_PIO;
1806 }
1807 }
1808 return 3 << ATA_SHIFT_PIO;
1809 }
1810
1811 /**
1812 * ata_do_dev_read_id - default ID read method
1813 * @dev: device
1814 * @tf: proposed taskfile
1815 * @id: data buffer
1816 *
1817 * Issue the identify taskfile and hand back the buffer containing
1818 * identify data. For some RAID controllers and for pre ATA devices
1819 * this function is wrapped or replaced by the driver
1820 */
1821 unsigned int ata_do_dev_read_id(struct ata_device *dev,
1822 struct ata_taskfile *tf, u16 *id)
1823 {
1824 return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE,
1825 id, sizeof(id[0]) * ATA_ID_WORDS, 0);
1826 }
1827
1828 /**
1829 * ata_dev_read_id - Read ID data from the specified device
1830 * @dev: target device
1831 * @p_class: pointer to class of the target device (may be changed)
1832 * @flags: ATA_READID_* flags
1833 * @id: buffer to read IDENTIFY data into
1834 *
1835 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1836 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1837 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1838 * for pre-ATA4 drives.
1839 *
1840 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right
1841 * now we abort if we hit that case.
1842 *
1843 * LOCKING:
1844 * Kernel thread context (may sleep)
1845 *
1846 * RETURNS:
1847 * 0 on success, -errno otherwise.
1848 */
1849 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1850 unsigned int flags, u16 *id)
1851 {
1852 struct ata_port *ap = dev->link->ap;
1853 unsigned int class = *p_class;
1854 struct ata_taskfile tf;
1855 unsigned int err_mask = 0;
1856 const char *reason;
1857 bool is_semb = class == ATA_DEV_SEMB;
1858 int may_fallback = 1, tried_spinup = 0;
1859 int rc;
1860
1861 if (ata_msg_ctl(ap))
1862 ata_dev_dbg(dev, "%s: ENTER\n", __func__);
1863
1864 retry:
1865 ata_tf_init(dev, &tf);
1866
1867 switch (class) {
1868 case ATA_DEV_SEMB:
1869 class = ATA_DEV_ATA; /* some hard drives report SEMB sig */
1870 case ATA_DEV_ATA:
1871 case ATA_DEV_ZAC:
1872 tf.command = ATA_CMD_ID_ATA;
1873 break;
1874 case ATA_DEV_ATAPI:
1875 tf.command = ATA_CMD_ID_ATAPI;
1876 break;
1877 default:
1878 rc = -ENODEV;
1879 reason = "unsupported class";
1880 goto err_out;
1881 }
1882
1883 tf.protocol = ATA_PROT_PIO;
1884
1885 /* Some devices choke if TF registers contain garbage. Make
1886 * sure those are properly initialized.
1887 */
1888 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1889
1890 /* Device presence detection is unreliable on some
1891 * controllers. Always poll IDENTIFY if available.
1892 */
1893 tf.flags |= ATA_TFLAG_POLLING;
1894
1895 if (ap->ops->read_id)
1896 err_mask = ap->ops->read_id(dev, &tf, id);
1897 else
1898 err_mask = ata_do_dev_read_id(dev, &tf, id);
1899
1900 if (err_mask) {
1901 if (err_mask & AC_ERR_NODEV_HINT) {
1902 ata_dev_dbg(dev, "NODEV after polling detection\n");
1903 return -ENOENT;
1904 }
1905
1906 if (is_semb) {
1907 ata_dev_info(dev,
1908 "IDENTIFY failed on device w/ SEMB sig, disabled\n");
1909 /* SEMB is not supported yet */
1910 *p_class = ATA_DEV_SEMB_UNSUP;
1911 return 0;
1912 }
1913
1914 if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1915 /* Device or controller might have reported
1916 * the wrong device class. Give a shot at the
1917 * other IDENTIFY if the current one is
1918 * aborted by the device.
1919 */
1920 if (may_fallback) {
1921 may_fallback = 0;
1922
1923 if (class == ATA_DEV_ATA)
1924 class = ATA_DEV_ATAPI;
1925 else
1926 class = ATA_DEV_ATA;
1927 goto retry;
1928 }
1929
1930 /* Control reaches here iff the device aborted
1931 * both flavors of IDENTIFYs which happens
1932 * sometimes with phantom devices.
1933 */
1934 ata_dev_dbg(dev,
1935 "both IDENTIFYs aborted, assuming NODEV\n");
1936 return -ENOENT;
1937 }
1938
1939 rc = -EIO;
1940 reason = "I/O error";
1941 goto err_out;
1942 }
1943
1944 if (dev->horkage & ATA_HORKAGE_DUMP_ID) {
1945 ata_dev_dbg(dev, "dumping IDENTIFY data, "
1946 "class=%d may_fallback=%d tried_spinup=%d\n",
1947 class, may_fallback, tried_spinup);
1948 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET,
1949 16, 2, id, ATA_ID_WORDS * sizeof(*id), true);
1950 }
1951
1952 /* Falling back doesn't make sense if ID data was read
1953 * successfully at least once.
1954 */
1955 may_fallback = 0;
1956
1957 swap_buf_le16(id, ATA_ID_WORDS);
1958
1959 /* sanity check */
1960 rc = -EINVAL;
1961 reason = "device reports invalid type";
1962
1963 if (class == ATA_DEV_ATA || class == ATA_DEV_ZAC) {
1964 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1965 goto err_out;
1966 if (ap->host->flags & ATA_HOST_IGNORE_ATA &&
1967 ata_id_is_ata(id)) {
1968 ata_dev_dbg(dev,
1969 "host indicates ignore ATA devices, ignored\n");
1970 return -ENOENT;
1971 }
1972 } else {
1973 if (ata_id_is_ata(id))
1974 goto err_out;
1975 }
1976
1977 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1978 tried_spinup = 1;
1979 /*
1980 * Drive powered-up in standby mode, and requires a specific
1981 * SET_FEATURES spin-up subcommand before it will accept
1982 * anything other than the original IDENTIFY command.
1983 */
1984 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
1985 if (err_mask && id[2] != 0x738c) {
1986 rc = -EIO;
1987 reason = "SPINUP failed";
1988 goto err_out;
1989 }
1990 /*
1991 * If the drive initially returned incomplete IDENTIFY info,
1992 * we now must reissue the IDENTIFY command.
1993 */
1994 if (id[2] == 0x37c8)
1995 goto retry;
1996 }
1997
1998 if ((flags & ATA_READID_POSTRESET) &&
1999 (class == ATA_DEV_ATA || class == ATA_DEV_ZAC)) {
2000 /*
2001 * The exact sequence expected by certain pre-ATA4 drives is:
2002 * SRST RESET
2003 * IDENTIFY (optional in early ATA)
2004 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
2005 * anything else..
2006 * Some drives were very specific about that exact sequence.
2007 *
2008 * Note that ATA4 says lba is mandatory so the second check
2009 * should never trigger.
2010 */
2011 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
2012 err_mask = ata_dev_init_params(dev, id[3], id[6]);
2013 if (err_mask) {
2014 rc = -EIO;
2015 reason = "INIT_DEV_PARAMS failed";
2016 goto err_out;
2017 }
2018
2019 /* current CHS translation info (id[53-58]) might be
2020 * changed. reread the identify device info.
2021 */
2022 flags &= ~ATA_READID_POSTRESET;
2023 goto retry;
2024 }
2025 }
2026
2027 *p_class = class;
2028
2029 return 0;
2030
2031 err_out:
2032 if (ata_msg_warn(ap))
2033 ata_dev_warn(dev, "failed to IDENTIFY (%s, err_mask=0x%x)\n",
2034 reason, err_mask);
2035 return rc;
2036 }
2037
2038 static int ata_do_link_spd_horkage(struct ata_device *dev)
2039 {
2040 struct ata_link *plink = ata_dev_phys_link(dev);
2041 u32 target, target_limit;
2042
2043 if (!sata_scr_valid(plink))
2044 return 0;
2045
2046 if (dev->horkage & ATA_HORKAGE_1_5_GBPS)
2047 target = 1;
2048 else
2049 return 0;
2050
2051 target_limit = (1 << target) - 1;
2052
2053 /* if already on stricter limit, no need to push further */
2054 if (plink->sata_spd_limit <= target_limit)
2055 return 0;
2056
2057 plink->sata_spd_limit = target_limit;
2058
2059 /* Request another EH round by returning -EAGAIN if link is
2060 * going faster than the target speed. Forward progress is
2061 * guaranteed by setting sata_spd_limit to target_limit above.
2062 */
2063 if (plink->sata_spd > target) {
2064 ata_dev_info(dev, "applying link speed limit horkage to %s\n",
2065 sata_spd_string(target));
2066 return -EAGAIN;
2067 }
2068 return 0;
2069 }
2070
2071 static inline u8 ata_dev_knobble(struct ata_device *dev)
2072 {
2073 struct ata_port *ap = dev->link->ap;
2074
2075 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK)
2076 return 0;
2077
2078 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
2079 }
2080
2081 static int ata_dev_config_ncq(struct ata_device *dev,
2082 char *desc, size_t desc_sz)
2083 {
2084 struct ata_port *ap = dev->link->ap;
2085 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2086 unsigned int err_mask;
2087 char *aa_desc = "";
2088
2089 if (!ata_id_has_ncq(dev->id)) {
2090 desc[0] = '\0';
2091 return 0;
2092 }
2093 if (dev->horkage & ATA_HORKAGE_NONCQ) {
2094 snprintf(desc, desc_sz, "NCQ (not used)");
2095 return 0;
2096 }
2097 if (ap->flags & ATA_FLAG_NCQ) {
2098 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
2099 dev->flags |= ATA_DFLAG_NCQ;
2100 }
2101
2102 if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) &&
2103 (ap->flags & ATA_FLAG_FPDMA_AA) &&
2104 ata_id_has_fpdma_aa(dev->id)) {
2105 err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE,
2106 SATA_FPDMA_AA);
2107 if (err_mask) {
2108 ata_dev_err(dev,
2109 "failed to enable AA (error_mask=0x%x)\n",
2110 err_mask);
2111 if (err_mask != AC_ERR_DEV) {
2112 dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA;
2113 return -EIO;
2114 }
2115 } else
2116 aa_desc = ", AA";
2117 }
2118
2119 if (hdepth >= ddepth)
2120 snprintf(desc, desc_sz, "NCQ (depth %d)%s", ddepth, aa_desc);
2121 else
2122 snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth,
2123 ddepth, aa_desc);
2124
2125 if ((ap->flags & ATA_FLAG_FPDMA_AUX) &&
2126 ata_id_has_ncq_send_and_recv(dev->id)) {
2127 err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_SEND_RECV,
2128 0, ap->sector_buf, 1);
2129 if (err_mask) {
2130 ata_dev_dbg(dev,
2131 "failed to get NCQ Send/Recv Log Emask 0x%x\n",
2132 err_mask);
2133 } else {
2134 u8 *cmds = dev->ncq_send_recv_cmds;
2135
2136 dev->flags |= ATA_DFLAG_NCQ_SEND_RECV;
2137 memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE);
2138
2139 if (dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) {
2140 ata_dev_dbg(dev, "disabling queued TRIM support\n");
2141 cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &=
2142 ~ATA_LOG_NCQ_SEND_RECV_DSM_TRIM;
2143 }
2144 }
2145 }
2146
2147 return 0;
2148 }
2149
2150 static void ata_dev_config_sense_reporting(struct ata_device *dev)
2151 {
2152 unsigned int err_mask;
2153
2154 if (!ata_id_has_sense_reporting(dev->id))
2155 return;
2156
2157 if (ata_id_sense_reporting_enabled(dev->id))
2158 return;
2159
2160 err_mask = ata_dev_set_feature(dev, SETFEATURE_SENSE_DATA, 0x1);
2161 if (err_mask) {
2162 ata_dev_dbg(dev,
2163 "failed to enable Sense Data Reporting, Emask 0x%x\n",
2164 err_mask);
2165 }
2166 }
2167
2168 /**
2169 * ata_dev_configure - Configure the specified ATA/ATAPI device
2170 * @dev: Target device to configure
2171 *
2172 * Configure @dev according to @dev->id. Generic and low-level
2173 * driver specific fixups are also applied.
2174 *
2175 * LOCKING:
2176 * Kernel thread context (may sleep)
2177 *
2178 * RETURNS:
2179 * 0 on success, -errno otherwise
2180 */
2181 int ata_dev_configure(struct ata_device *dev)
2182 {
2183 struct ata_port *ap = dev->link->ap;
2184 struct ata_eh_context *ehc = &dev->link->eh_context;
2185 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
2186 const u16 *id = dev->id;
2187 unsigned long xfer_mask;
2188 unsigned int err_mask;
2189 char revbuf[7]; /* XYZ-99\0 */
2190 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2191 char modelbuf[ATA_ID_PROD_LEN+1];
2192 int rc;
2193
2194 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
2195 ata_dev_info(dev, "%s: ENTER/EXIT -- nodev\n", __func__);
2196 return 0;
2197 }
2198
2199 if (ata_msg_probe(ap))
2200 ata_dev_dbg(dev, "%s: ENTER\n", __func__);
2201
2202 /* set horkage */
2203 dev->horkage |= ata_dev_blacklisted(dev);
2204 ata_force_horkage(dev);
2205
2206 if (dev->horkage & ATA_HORKAGE_DISABLE) {
2207 ata_dev_info(dev, "unsupported device, disabling\n");
2208 ata_dev_disable(dev);
2209 return 0;
2210 }
2211
2212 if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) &&
2213 dev->class == ATA_DEV_ATAPI) {
2214 ata_dev_warn(dev, "WARNING: ATAPI is %s, device ignored\n",
2215 atapi_enabled ? "not supported with this driver"
2216 : "disabled");
2217 ata_dev_disable(dev);
2218 return 0;
2219 }
2220
2221 rc = ata_do_link_spd_horkage(dev);
2222 if (rc)
2223 return rc;
2224
2225 /* some WD SATA-1 drives have issues with LPM, turn on NOLPM for them */
2226 if ((dev->horkage & ATA_HORKAGE_WD_BROKEN_LPM) &&
2227 (id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2)
2228 dev->horkage |= ATA_HORKAGE_NOLPM;
2229
2230 if (dev->horkage & ATA_HORKAGE_NOLPM) {
2231 ata_dev_warn(dev, "LPM support broken, forcing max_power\n");
2232 dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER;
2233 }
2234
2235 /* let ACPI work its magic */
2236 rc = ata_acpi_on_devcfg(dev);
2237 if (rc)
2238 return rc;
2239
2240 /* massage HPA, do it early as it might change IDENTIFY data */
2241 rc = ata_hpa_resize(dev);
2242 if (rc)
2243 return rc;
2244
2245 /* print device capabilities */
2246 if (ata_msg_probe(ap))
2247 ata_dev_dbg(dev,
2248 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2249 "85:%04x 86:%04x 87:%04x 88:%04x\n",
2250 __func__,
2251 id[49], id[82], id[83], id[84],
2252 id[85], id[86], id[87], id[88]);
2253
2254 /* initialize to-be-configured parameters */
2255 dev->flags &= ~ATA_DFLAG_CFG_MASK;
2256 dev->max_sectors = 0;
2257 dev->cdb_len = 0;
2258 dev->n_sectors = 0;
2259 dev->cylinders = 0;
2260 dev->heads = 0;
2261 dev->sectors = 0;
2262 dev->multi_count = 0;
2263
2264 /*
2265 * common ATA, ATAPI feature tests
2266 */
2267
2268 /* find max transfer mode; for printk only */
2269 xfer_mask = ata_id_xfermask(id);
2270
2271 if (ata_msg_probe(ap))
2272 ata_dump_id(id);
2273
2274 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2275 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2276 sizeof(fwrevbuf));
2277
2278 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2279 sizeof(modelbuf));
2280
2281 /* ATA-specific feature tests */
2282 if (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ZAC) {
2283 if (ata_id_is_cfa(id)) {
2284 /* CPRM may make this media unusable */
2285 if (id[ATA_ID_CFA_KEY_MGMT] & 1)
2286 ata_dev_warn(dev,
2287 "supports DRM functions and may not be fully accessible\n");
2288 snprintf(revbuf, 7, "CFA");
2289 } else {
2290 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
2291 /* Warn the user if the device has TPM extensions */
2292 if (ata_id_has_tpm(id))
2293 ata_dev_warn(dev,
2294 "supports DRM functions and may not be fully accessible\n");
2295 }
2296
2297 dev->n_sectors = ata_id_n_sectors(id);
2298
2299 /* get current R/W Multiple count setting */
2300 if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) {
2301 unsigned int max = dev->id[47] & 0xff;
2302 unsigned int cnt = dev->id[59] & 0xff;
2303 /* only recognize/allow powers of two here */
2304 if (is_power_of_2(max) && is_power_of_2(cnt))
2305 if (cnt <= max)
2306 dev->multi_count = cnt;
2307 }
2308
2309 if (ata_id_has_lba(id)) {
2310 const char *lba_desc;
2311 char ncq_desc[24];
2312
2313 lba_desc = "LBA";
2314 dev->flags |= ATA_DFLAG_LBA;
2315 if (ata_id_has_lba48(id)) {
2316 dev->flags |= ATA_DFLAG_LBA48;
2317 lba_desc = "LBA48";
2318
2319 if (dev->n_sectors >= (1UL << 28) &&
2320 ata_id_has_flush_ext(id))
2321 dev->flags |= ATA_DFLAG_FLUSH_EXT;
2322 }
2323
2324 /* config NCQ */
2325 rc = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2326 if (rc)
2327 return rc;
2328
2329 /* print device info to dmesg */
2330 if (ata_msg_drv(ap) && print_info) {
2331 ata_dev_info(dev, "%s: %s, %s, max %s\n",
2332 revbuf, modelbuf, fwrevbuf,
2333 ata_mode_string(xfer_mask));
2334 ata_dev_info(dev,
2335 "%llu sectors, multi %u: %s %s\n",
2336 (unsigned long long)dev->n_sectors,
2337 dev->multi_count, lba_desc, ncq_desc);
2338 }
2339 } else {
2340 /* CHS */
2341
2342 /* Default translation */
2343 dev->cylinders = id[1];
2344 dev->heads = id[3];
2345 dev->sectors = id[6];
2346
2347 if (ata_id_current_chs_valid(id)) {
2348 /* Current CHS translation is valid. */
2349 dev->cylinders = id[54];
2350 dev->heads = id[55];
2351 dev->sectors = id[56];
2352 }
2353
2354 /* print device info to dmesg */
2355 if (ata_msg_drv(ap) && print_info) {
2356 ata_dev_info(dev, "%s: %s, %s, max %s\n",
2357 revbuf, modelbuf, fwrevbuf,
2358 ata_mode_string(xfer_mask));
2359 ata_dev_info(dev,
2360 "%llu sectors, multi %u, CHS %u/%u/%u\n",
2361 (unsigned long long)dev->n_sectors,
2362 dev->multi_count, dev->cylinders,
2363 dev->heads, dev->sectors);
2364 }
2365 }
2366
2367 /* Check and mark DevSlp capability. Get DevSlp timing variables
2368 * from SATA Settings page of Identify Device Data Log.
2369 */
2370 if (ata_id_has_devslp(dev->id)) {
2371 u8 *sata_setting = ap->sector_buf;
2372 int i, j;
2373
2374 dev->flags |= ATA_DFLAG_DEVSLP;
2375 err_mask = ata_read_log_page(dev,
2376 ATA_LOG_SATA_ID_DEV_DATA,
2377 ATA_LOG_SATA_SETTINGS,
2378 sata_setting,
2379 1);
2380 if (err_mask)
2381 ata_dev_dbg(dev,
2382 "failed to get Identify Device Data, Emask 0x%x\n",
2383 err_mask);
2384 else
2385 for (i = 0; i < ATA_LOG_DEVSLP_SIZE; i++) {
2386 j = ATA_LOG_DEVSLP_OFFSET + i;
2387 dev->devslp_timing[i] = sata_setting[j];
2388 }
2389 }
2390 ata_dev_config_sense_reporting(dev);
2391 dev->cdb_len = 16;
2392 }
2393
2394 /* ATAPI-specific feature tests */
2395 else if (dev->class == ATA_DEV_ATAPI) {
2396 const char *cdb_intr_string = "";
2397 const char *atapi_an_string = "";
2398 const char *dma_dir_string = "";
2399 u32 sntf;
2400
2401 rc = atapi_cdb_len(id);
2402 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
2403 if (ata_msg_warn(ap))
2404 ata_dev_warn(dev, "unsupported CDB len\n");
2405 rc = -EINVAL;
2406 goto err_out_nosup;
2407 }
2408 dev->cdb_len = (unsigned int) rc;
2409
2410 /* Enable ATAPI AN if both the host and device have
2411 * the support. If PMP is attached, SNTF is required
2412 * to enable ATAPI AN to discern between PHY status
2413 * changed notifications and ATAPI ANs.
2414 */
2415 if (atapi_an &&
2416 (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2417 (!sata_pmp_attached(ap) ||
2418 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
2419 /* issue SET feature command to turn this on */
2420 err_mask = ata_dev_set_feature(dev,
2421 SETFEATURES_SATA_ENABLE, SATA_AN);
2422 if (err_mask)
2423 ata_dev_err(dev,
2424 "failed to enable ATAPI AN (err_mask=0x%x)\n",
2425 err_mask);
2426 else {
2427 dev->flags |= ATA_DFLAG_AN;
2428 atapi_an_string = ", ATAPI AN";
2429 }
2430 }
2431
2432 if (ata_id_cdb_intr(dev->id)) {
2433 dev->flags |= ATA_DFLAG_CDB_INTR;
2434 cdb_intr_string = ", CDB intr";
2435 }
2436
2437 if (atapi_dmadir || (dev->horkage & ATA_HORKAGE_ATAPI_DMADIR) || atapi_id_dmadir(dev->id)) {
2438 dev->flags |= ATA_DFLAG_DMADIR;
2439 dma_dir_string = ", DMADIR";
2440 }
2441
2442 if (ata_id_has_da(dev->id)) {
2443 dev->flags |= ATA_DFLAG_DA;
2444 zpodd_init(dev);
2445 }
2446
2447 /* print device info to dmesg */
2448 if (ata_msg_drv(ap) && print_info)
2449 ata_dev_info(dev,
2450 "ATAPI: %s, %s, max %s%s%s%s\n",
2451 modelbuf, fwrevbuf,
2452 ata_mode_string(xfer_mask),
2453 cdb_intr_string, atapi_an_string,
2454 dma_dir_string);
2455 }
2456
2457 /* determine max_sectors */
2458 dev->max_sectors = ATA_MAX_SECTORS;
2459 if (dev->flags & ATA_DFLAG_LBA48)
2460 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2461
2462 /* Limit PATA drive on SATA cable bridge transfers to udma5,
2463 200 sectors */
2464 if (ata_dev_knobble(dev)) {
2465 if (ata_msg_drv(ap) && print_info)
2466 ata_dev_info(dev, "applying bridge limits\n");
2467 dev->udma_mask &= ATA_UDMA5;
2468 dev->max_sectors = ATA_MAX_SECTORS;
2469 }
2470
2471 if ((dev->class == ATA_DEV_ATAPI) &&
2472 (atapi_command_packet_set(id) == TYPE_TAPE)) {
2473 dev->max_sectors = ATA_MAX_SECTORS_TAPE;
2474 dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2475 }
2476
2477 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
2478 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2479 dev->max_sectors);
2480
2481 if (dev->horkage & ATA_HORKAGE_MAX_SEC_1024)
2482 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_1024,
2483 dev->max_sectors);
2484
2485 if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48)
2486 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2487
2488 if (ap->ops->dev_config)
2489 ap->ops->dev_config(dev);
2490
2491 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2492 /* Let the user know. We don't want to disallow opens for
2493 rescue purposes, or in case the vendor is just a blithering
2494 idiot. Do this after the dev_config call as some controllers
2495 with buggy firmware may want to avoid reporting false device
2496 bugs */
2497
2498 if (print_info) {
2499 ata_dev_warn(dev,
2500 "Drive reports diagnostics failure. This may indicate a drive\n");
2501 ata_dev_warn(dev,
2502 "fault or invalid emulation. Contact drive vendor for information.\n");
2503 }
2504 }
2505
2506 if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) {
2507 ata_dev_warn(dev, "WARNING: device requires firmware update to be fully functional\n");
2508 ata_dev_warn(dev, " contact the vendor or visit http://ata.wiki.kernel.org\n");
2509 }
2510
2511 return 0;
2512
2513 err_out_nosup:
2514 if (ata_msg_probe(ap))
2515 ata_dev_dbg(dev, "%s: EXIT, err\n", __func__);
2516 return rc;
2517 }
2518
2519 /**
2520 * ata_cable_40wire - return 40 wire cable type
2521 * @ap: port
2522 *
2523 * Helper method for drivers which want to hardwire 40 wire cable
2524 * detection.
2525 */
2526
2527 int ata_cable_40wire(struct ata_port *ap)
2528 {
2529 return ATA_CBL_PATA40;
2530 }
2531
2532 /**
2533 * ata_cable_80wire - return 80 wire cable type
2534 * @ap: port
2535 *
2536 * Helper method for drivers which want to hardwire 80 wire cable
2537 * detection.
2538 */
2539
2540 int ata_cable_80wire(struct ata_port *ap)
2541 {
2542 return ATA_CBL_PATA80;
2543 }
2544
2545 /**
2546 * ata_cable_unknown - return unknown PATA cable.
2547 * @ap: port
2548 *
2549 * Helper method for drivers which have no PATA cable detection.
2550 */
2551
2552 int ata_cable_unknown(struct ata_port *ap)
2553 {
2554 return ATA_CBL_PATA_UNK;
2555 }
2556
2557 /**
2558 * ata_cable_ignore - return ignored PATA cable.
2559 * @ap: port
2560 *
2561 * Helper method for drivers which don't use cable type to limit
2562 * transfer mode.
2563 */
2564 int ata_cable_ignore(struct ata_port *ap)
2565 {
2566 return ATA_CBL_PATA_IGN;
2567 }
2568
2569 /**
2570 * ata_cable_sata - return SATA cable type
2571 * @ap: port
2572 *
2573 * Helper method for drivers which have SATA cables
2574 */
2575
2576 int ata_cable_sata(struct ata_port *ap)
2577 {
2578 return ATA_CBL_SATA;
2579 }
2580
2581 /**
2582 * ata_bus_probe - Reset and probe ATA bus
2583 * @ap: Bus to probe
2584 *
2585 * Master ATA bus probing function. Initiates a hardware-dependent
2586 * bus reset, then attempts to identify any devices found on
2587 * the bus.
2588 *
2589 * LOCKING:
2590 * PCI/etc. bus probe sem.
2591 *
2592 * RETURNS:
2593 * Zero on success, negative errno otherwise.
2594 */
2595
2596 int ata_bus_probe(struct ata_port *ap)
2597 {
2598 unsigned int classes[ATA_MAX_DEVICES];
2599 int tries[ATA_MAX_DEVICES];
2600 int rc;
2601 struct ata_device *dev;
2602
2603 ata_for_each_dev(dev, &ap->link, ALL)
2604 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2605
2606 retry:
2607 ata_for_each_dev(dev, &ap->link, ALL) {
2608 /* If we issue an SRST then an ATA drive (not ATAPI)
2609 * may change configuration and be in PIO0 timing. If
2610 * we do a hard reset (or are coming from power on)
2611 * this is true for ATA or ATAPI. Until we've set a
2612 * suitable controller mode we should not touch the
2613 * bus as we may be talking too fast.
2614 */
2615 dev->pio_mode = XFER_PIO_0;
2616 dev->dma_mode = 0xff;
2617
2618 /* If the controller has a pio mode setup function
2619 * then use it to set the chipset to rights. Don't
2620 * touch the DMA setup as that will be dealt with when
2621 * configuring devices.
2622 */
2623 if (ap->ops->set_piomode)
2624 ap->ops->set_piomode(ap, dev);
2625 }
2626
2627 /* reset and determine device classes */
2628 ap->ops->phy_reset(ap);
2629
2630 ata_for_each_dev(dev, &ap->link, ALL) {
2631 if (dev->class != ATA_DEV_UNKNOWN)
2632 classes[dev->devno] = dev->class;
2633 else
2634 classes[dev->devno] = ATA_DEV_NONE;
2635
2636 dev->class = ATA_DEV_UNKNOWN;
2637 }
2638
2639 /* read IDENTIFY page and configure devices. We have to do the identify
2640 specific sequence bass-ackwards so that PDIAG- is released by
2641 the slave device */
2642
2643 ata_for_each_dev(dev, &ap->link, ALL_REVERSE) {
2644 if (tries[dev->devno])
2645 dev->class = classes[dev->devno];
2646
2647 if (!ata_dev_enabled(dev))
2648 continue;
2649
2650 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2651 dev->id);
2652 if (rc)
2653 goto fail;
2654 }
2655
2656 /* Now ask for the cable type as PDIAG- should have been released */
2657 if (ap->ops->cable_detect)
2658 ap->cbl = ap->ops->cable_detect(ap);
2659
2660 /* We may have SATA bridge glue hiding here irrespective of
2661 * the reported cable types and sensed types. When SATA
2662 * drives indicate we have a bridge, we don't know which end
2663 * of the link the bridge is which is a problem.
2664 */
2665 ata_for_each_dev(dev, &ap->link, ENABLED)
2666 if (ata_id_is_sata(dev->id))
2667 ap->cbl = ATA_CBL_SATA;
2668
2669 /* After the identify sequence we can now set up the devices. We do
2670 this in the normal order so that the user doesn't get confused */
2671
2672 ata_for_each_dev(dev, &ap->link, ENABLED) {
2673 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
2674 rc = ata_dev_configure(dev);
2675 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2676 if (rc)
2677 goto fail;
2678 }
2679
2680 /* configure transfer mode */
2681 rc = ata_set_mode(&ap->link, &dev);
2682 if (rc)
2683 goto fail;
2684
2685 ata_for_each_dev(dev, &ap->link, ENABLED)
2686 return 0;
2687
2688 return -ENODEV;
2689
2690 fail:
2691 tries[dev->devno]--;
2692
2693 switch (rc) {
2694 case -EINVAL:
2695 /* eeek, something went very wrong, give up */
2696 tries[dev->devno] = 0;
2697 break;
2698
2699 case -ENODEV:
2700 /* give it just one more chance */
2701 tries[dev->devno] = min(tries[dev->devno], 1);
2702 case -EIO:
2703 if (tries[dev->devno] == 1) {
2704 /* This is the last chance, better to slow
2705 * down than lose it.
2706 */
2707 sata_down_spd_limit(&ap->link, 0);
2708 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2709 }
2710 }
2711
2712 if (!tries[dev->devno])
2713 ata_dev_disable(dev);
2714
2715 goto retry;
2716 }
2717
2718 /**
2719 * sata_print_link_status - Print SATA link status
2720 * @link: SATA link to printk link status about
2721 *
2722 * This function prints link speed and status of a SATA link.
2723 *
2724 * LOCKING:
2725 * None.
2726 */
2727 static void sata_print_link_status(struct ata_link *link)
2728 {
2729 u32 sstatus, scontrol, tmp;
2730
2731 if (sata_scr_read(link, SCR_STATUS, &sstatus))
2732 return;
2733 sata_scr_read(link, SCR_CONTROL, &scontrol);
2734
2735 if (ata_phys_link_online(link)) {
2736 tmp = (sstatus >> 4) & 0xf;
2737 ata_link_info(link, "SATA link up %s (SStatus %X SControl %X)\n",
2738 sata_spd_string(tmp), sstatus, scontrol);
2739 } else {
2740 ata_link_info(link, "SATA link down (SStatus %X SControl %X)\n",
2741 sstatus, scontrol);
2742 }
2743 }
2744
2745 /**
2746 * ata_dev_pair - return other device on cable
2747 * @adev: device
2748 *
2749 * Obtain the other device on the same cable, or if none is
2750 * present NULL is returned
2751 */
2752
2753 struct ata_device *ata_dev_pair(struct ata_device *adev)
2754 {
2755 struct ata_link *link = adev->link;
2756 struct ata_device *pair = &link->device[1 - adev->devno];
2757 if (!ata_dev_enabled(pair))
2758 return NULL;
2759 return pair;
2760 }
2761
2762 /**
2763 * sata_down_spd_limit - adjust SATA spd limit downward
2764 * @link: Link to adjust SATA spd limit for
2765 * @spd_limit: Additional limit
2766 *
2767 * Adjust SATA spd limit of @link downward. Note that this
2768 * function only adjusts the limit. The change must be applied
2769 * using sata_set_spd().
2770 *
2771 * If @spd_limit is non-zero, the speed is limited to equal to or
2772 * lower than @spd_limit if such speed is supported. If
2773 * @spd_limit is slower than any supported speed, only the lowest
2774 * supported speed is allowed.
2775 *
2776 * LOCKING:
2777 * Inherited from caller.
2778 *
2779 * RETURNS:
2780 * 0 on success, negative errno on failure
2781 */
2782 int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
2783 {
2784 u32 sstatus, spd, mask;
2785 int rc, bit;
2786
2787 if (!sata_scr_valid(link))
2788 return -EOPNOTSUPP;
2789
2790 /* If SCR can be read, use it to determine the current SPD.
2791 * If not, use cached value in link->sata_spd.
2792 */
2793 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
2794 if (rc == 0 && ata_sstatus_online(sstatus))
2795 spd = (sstatus >> 4) & 0xf;
2796 else
2797 spd = link->sata_spd;
2798
2799 mask = link->sata_spd_limit;
2800 if (mask <= 1)
2801 return -EINVAL;
2802
2803 /* unconditionally mask off the highest bit */
2804 bit = fls(mask) - 1;
2805 mask &= ~(1 << bit);
2806
2807 /* Mask off all speeds higher than or equal to the current
2808 * one. Force 1.5Gbps if current SPD is not available.
2809 */
2810 if (spd > 1)
2811 mask &= (1 << (spd - 1)) - 1;
2812 else
2813 mask &= 1;
2814
2815 /* were we already at the bottom? */
2816 if (!mask)
2817 return -EINVAL;
2818
2819 if (spd_limit) {
2820 if (mask & ((1 << spd_limit) - 1))
2821 mask &= (1 << spd_limit) - 1;
2822 else {
2823 bit = ffs(mask) - 1;
2824 mask = 1 << bit;
2825 }
2826 }
2827
2828 link->sata_spd_limit = mask;
2829
2830 ata_link_warn(link, "limiting SATA link speed to %s\n",
2831 sata_spd_string(fls(mask)));
2832
2833 return 0;
2834 }
2835
2836 static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
2837 {
2838 struct ata_link *host_link = &link->ap->link;
2839 u32 limit, target, spd;
2840
2841 limit = link->sata_spd_limit;
2842
2843 /* Don't configure downstream link faster than upstream link.
2844 * It doesn't speed up anything and some PMPs choke on such
2845 * configuration.
2846 */
2847 if (!ata_is_host_link(link) && host_link->sata_spd)
2848 limit &= (1 << host_link->sata_spd) - 1;
2849
2850 if (limit == UINT_MAX)
2851 target = 0;
2852 else
2853 target = fls(limit);
2854
2855 spd = (*scontrol >> 4) & 0xf;
2856 *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
2857
2858 return spd != target;
2859 }
2860
2861 /**
2862 * sata_set_spd_needed - is SATA spd configuration needed
2863 * @link: Link in question
2864 *
2865 * Test whether the spd limit in SControl matches
2866 * @link->sata_spd_limit. This function is used to determine
2867 * whether hardreset is necessary to apply SATA spd
2868 * configuration.
2869 *
2870 * LOCKING:
2871 * Inherited from caller.
2872 *
2873 * RETURNS:
2874 * 1 if SATA spd configuration is needed, 0 otherwise.
2875 */
2876 static int sata_set_spd_needed(struct ata_link *link)
2877 {
2878 u32 scontrol;
2879
2880 if (sata_scr_read(link, SCR_CONTROL, &scontrol))
2881 return 1;
2882
2883 return __sata_set_spd_needed(link, &scontrol);
2884 }
2885
2886 /**
2887 * sata_set_spd - set SATA spd according to spd limit
2888 * @link: Link to set SATA spd for
2889 *
2890 * Set SATA spd of @link according to sata_spd_limit.
2891 *
2892 * LOCKING:
2893 * Inherited from caller.
2894 *
2895 * RETURNS:
2896 * 0 if spd doesn't need to be changed, 1 if spd has been
2897 * changed. Negative errno if SCR registers are inaccessible.
2898 */
2899 int sata_set_spd(struct ata_link *link)
2900 {
2901 u32 scontrol;
2902 int rc;
2903
2904 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
2905 return rc;
2906
2907 if (!__sata_set_spd_needed(link, &scontrol))
2908 return 0;
2909
2910 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
2911 return rc;
2912
2913 return 1;
2914 }
2915
2916 /*
2917 * This mode timing computation functionality is ported over from
2918 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2919 */
2920 /*
2921 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
2922 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
2923 * for UDMA6, which is currently supported only by Maxtor drives.
2924 *
2925 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
2926 */
2927
2928 static const struct ata_timing ata_timing[] = {
2929 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 0, 960, 0 }, */
2930 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 0, 600, 0 },
2931 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 0, 383, 0 },
2932 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 0, 240, 0 },
2933 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 0, 180, 0 },
2934 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 0, 120, 0 },
2935 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 0, 100, 0 },
2936 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 0, 80, 0 },
2937
2938 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 50, 960, 0 },
2939 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 30, 480, 0 },
2940 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 20, 240, 0 },
2941
2942 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 20, 480, 0 },
2943 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 5, 150, 0 },
2944 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 5, 120, 0 },
2945 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 5, 100, 0 },
2946 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 5, 80, 0 },
2947
2948 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 0, 150 }, */
2949 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 0, 120 },
2950 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 0, 80 },
2951 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 0, 60 },
2952 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 0, 45 },
2953 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 0, 30 },
2954 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 0, 20 },
2955 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 0, 15 },
2956
2957 { 0xFF }
2958 };
2959
2960 #define ENOUGH(v, unit) (((v)-1)/(unit)+1)
2961 #define EZ(v, unit) ((v)?ENOUGH(v, unit):0)
2962
2963 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2964 {
2965 q->setup = EZ(t->setup * 1000, T);
2966 q->act8b = EZ(t->act8b * 1000, T);
2967 q->rec8b = EZ(t->rec8b * 1000, T);
2968 q->cyc8b = EZ(t->cyc8b * 1000, T);
2969 q->active = EZ(t->active * 1000, T);
2970 q->recover = EZ(t->recover * 1000, T);
2971 q->dmack_hold = EZ(t->dmack_hold * 1000, T);
2972 q->cycle = EZ(t->cycle * 1000, T);
2973 q->udma = EZ(t->udma * 1000, UT);
2974 }
2975
2976 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2977 struct ata_timing *m, unsigned int what)
2978 {
2979 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2980 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2981 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2982 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2983 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2984 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2985 if (what & ATA_TIMING_DMACK_HOLD) m->dmack_hold = max(a->dmack_hold, b->dmack_hold);
2986 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2987 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2988 }
2989
2990 const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
2991 {
2992 const struct ata_timing *t = ata_timing;
2993
2994 while (xfer_mode > t->mode)
2995 t++;
2996
2997 if (xfer_mode == t->mode)
2998 return t;
2999
3000 WARN_ONCE(true, "%s: unable to find timing for xfer_mode 0x%x\n",
3001 __func__, xfer_mode);
3002
3003 return NULL;
3004 }
3005
3006 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
3007 struct ata_timing *t, int T, int UT)
3008 {
3009 const u16 *id = adev->id;
3010 const struct ata_timing *s;
3011 struct ata_timing p;
3012
3013 /*
3014 * Find the mode.
3015 */
3016
3017 if (!(s = ata_timing_find_mode(speed)))
3018 return -EINVAL;
3019
3020 memcpy(t, s, sizeof(*s));
3021
3022 /*
3023 * If the drive is an EIDE drive, it can tell us it needs extended
3024 * PIO/MW_DMA cycle timing.
3025 */
3026
3027 if (id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
3028 memset(&p, 0, sizeof(p));
3029
3030 if (speed >= XFER_PIO_0 && speed < XFER_SW_DMA_0) {
3031 if (speed <= XFER_PIO_2)
3032 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO];
3033 else if ((speed <= XFER_PIO_4) ||
3034 (speed == XFER_PIO_5 && !ata_id_is_cfa(id)))
3035 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY];
3036 } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2)
3037 p.cycle = id[ATA_ID_EIDE_DMA_MIN];
3038
3039 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
3040 }
3041
3042 /*
3043 * Convert the timing to bus clock counts.
3044 */
3045
3046 ata_timing_quantize(t, t, T, UT);
3047
3048 /*
3049 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
3050 * S.M.A.R.T * and some other commands. We have to ensure that the
3051 * DMA cycle timing is slower/equal than the fastest PIO timing.
3052 */
3053
3054 if (speed > XFER_PIO_6) {
3055 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
3056 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
3057 }
3058
3059 /*
3060 * Lengthen active & recovery time so that cycle time is correct.
3061 */
3062
3063 if (t->act8b + t->rec8b < t->cyc8b) {
3064 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
3065 t->rec8b = t->cyc8b - t->act8b;
3066 }
3067
3068 if (t->active + t->recover < t->cycle) {
3069 t->active += (t->cycle - (t->active + t->recover)) / 2;
3070 t->recover = t->cycle - t->active;
3071 }
3072
3073 /* In a few cases quantisation may produce enough errors to
3074 leave t->cycle too low for the sum of active and recovery
3075 if so we must correct this */
3076 if (t->active + t->recover > t->cycle)
3077 t->cycle = t->active + t->recover;
3078
3079 return 0;
3080 }
3081
3082 /**
3083 * ata_timing_cycle2mode - find xfer mode for the specified cycle duration
3084 * @xfer_shift: ATA_SHIFT_* value for transfer type to examine.
3085 * @cycle: cycle duration in ns
3086 *
3087 * Return matching xfer mode for @cycle. The returned mode is of
3088 * the transfer type specified by @xfer_shift. If @cycle is too
3089 * slow for @xfer_shift, 0xff is returned. If @cycle is faster
3090 * than the fastest known mode, the fasted mode is returned.
3091 *
3092 * LOCKING:
3093 * None.
3094 *
3095 * RETURNS:
3096 * Matching xfer_mode, 0xff if no match found.
3097 */
3098 u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
3099 {
3100 u8 base_mode = 0xff, last_mode = 0xff;
3101 const struct ata_xfer_ent *ent;
3102 const struct ata_timing *t;
3103
3104 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
3105 if (ent->shift == xfer_shift)
3106 base_mode = ent->base;
3107
3108 for (t = ata_timing_find_mode(base_mode);
3109 t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3110 unsigned short this_cycle;
3111
3112 switch (xfer_shift) {
3113 case ATA_SHIFT_PIO:
3114 case ATA_SHIFT_MWDMA:
3115 this_cycle = t->cycle;
3116 break;
3117 case ATA_SHIFT_UDMA:
3118 this_cycle = t->udma;
3119 break;
3120 default:
3121 return 0xff;
3122 }
3123
3124 if (cycle > this_cycle)
3125 break;
3126
3127 last_mode = t->mode;
3128 }
3129
3130 return last_mode;
3131 }
3132
3133 /**
3134 * ata_down_xfermask_limit - adjust dev xfer masks downward
3135 * @dev: Device to adjust xfer masks
3136 * @sel: ATA_DNXFER_* selector
3137 *
3138 * Adjust xfer masks of @dev downward. Note that this function
3139 * does not apply the change. Invoking ata_set_mode() afterwards
3140 * will apply the limit.
3141 *
3142 * LOCKING:
3143 * Inherited from caller.
3144 *
3145 * RETURNS:
3146 * 0 on success, negative errno on failure
3147 */
3148 int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
3149 {
3150 char buf[32];
3151 unsigned long orig_mask, xfer_mask;
3152 unsigned long pio_mask, mwdma_mask, udma_mask;
3153 int quiet, highbit;
3154
3155 quiet = !!(sel & ATA_DNXFER_QUIET);
3156 sel &= ~ATA_DNXFER_QUIET;
3157
3158 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3159 dev->mwdma_mask,
3160 dev->udma_mask);
3161 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
3162
3163 switch (sel) {
3164 case ATA_DNXFER_PIO:
3165 highbit = fls(pio_mask) - 1;
3166 pio_mask &= ~(1 << highbit);
3167 break;
3168
3169 case ATA_DNXFER_DMA:
3170 if (udma_mask) {
3171 highbit = fls(udma_mask) - 1;
3172 udma_mask &= ~(1 << highbit);
3173 if (!udma_mask)
3174 return -ENOENT;
3175 } else if (mwdma_mask) {
3176 highbit = fls(mwdma_mask) - 1;
3177 mwdma_mask &= ~(1 << highbit);
3178 if (!mwdma_mask)
3179 return -ENOENT;
3180 }
3181 break;
3182
3183 case ATA_DNXFER_40C:
3184 udma_mask &= ATA_UDMA_MASK_40C;
3185 break;
3186
3187 case ATA_DNXFER_FORCE_PIO0:
3188 pio_mask &= 1;
3189 case ATA_DNXFER_FORCE_PIO:
3190 mwdma_mask = 0;
3191 udma_mask = 0;
3192 break;
3193
3194 default:
3195 BUG();
3196 }
3197
3198 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3199
3200 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3201 return -ENOENT;
3202
3203 if (!quiet) {
3204 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3205 snprintf(buf, sizeof(buf), "%s:%s",
3206 ata_mode_string(xfer_mask),
3207 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3208 else
3209 snprintf(buf, sizeof(buf), "%s",
3210 ata_mode_string(xfer_mask));
3211
3212 ata_dev_warn(dev, "limiting speed to %s\n", buf);
3213 }
3214
3215 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3216 &dev->udma_mask);
3217
3218 return 0;
3219 }
3220
3221 static int ata_dev_set_mode(struct ata_device *dev)
3222 {
3223 struct ata_port *ap = dev->link->ap;
3224 struct ata_eh_context *ehc = &dev->link->eh_context;
3225 const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER;
3226 const char *dev_err_whine = "";
3227 int ign_dev_err = 0;
3228 unsigned int err_mask = 0;
3229 int rc;
3230
3231 dev->flags &= ~ATA_DFLAG_PIO;
3232 if (dev->xfer_shift == ATA_SHIFT_PIO)
3233 dev->flags |= ATA_DFLAG_PIO;
3234
3235 if (nosetxfer && ap->flags & ATA_FLAG_SATA && ata_id_is_sata(dev->id))
3236 dev_err_whine = " (SET_XFERMODE skipped)";
3237 else {
3238 if (nosetxfer)
3239 ata_dev_warn(dev,
3240 "NOSETXFER but PATA detected - can't "
3241 "skip SETXFER, might malfunction\n");
3242 err_mask = ata_dev_set_xfermode(dev);
3243 }
3244
3245 if (err_mask & ~AC_ERR_DEV)
3246 goto fail;
3247
3248 /* revalidate */
3249 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3250 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3251 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3252 if (rc)
3253 return rc;
3254
3255 if (dev->xfer_shift == ATA_SHIFT_PIO) {
3256 /* Old CFA may refuse this command, which is just fine */
3257 if (ata_id_is_cfa(dev->id))
3258 ign_dev_err = 1;
3259 /* Catch several broken garbage emulations plus some pre
3260 ATA devices */
3261 if (ata_id_major_version(dev->id) == 0 &&
3262 dev->pio_mode <= XFER_PIO_2)
3263 ign_dev_err = 1;
3264 /* Some very old devices and some bad newer ones fail
3265 any kind of SET_XFERMODE request but support PIO0-2
3266 timings and no IORDY */
3267 if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2)
3268 ign_dev_err = 1;
3269 }
3270 /* Early MWDMA devices do DMA but don't allow DMA mode setting.
3271 Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
3272 if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3273 dev->dma_mode == XFER_MW_DMA_0 &&
3274 (dev->id[63] >> 8) & 1)
3275 ign_dev_err = 1;
3276
3277 /* if the device is actually configured correctly, ignore dev err */
3278 if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3279 ign_dev_err = 1;
3280
3281 if (err_mask & AC_ERR_DEV) {
3282 if (!ign_dev_err)
3283 goto fail;
3284 else
3285 dev_err_whine = " (device error ignored)";
3286 }
3287
3288 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3289 dev->xfer_shift, (int)dev->xfer_mode);
3290
3291 ata_dev_info(dev, "configured for %s%s\n",
3292 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3293 dev_err_whine);
3294
3295 return 0;
3296
3297 fail:
3298 ata_dev_err(dev, "failed to set xfermode (err_mask=0x%x)\n", err_mask);
3299 return -EIO;
3300 }
3301
3302 /**
3303 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
3304 * @link: link on which timings will be programmed
3305 * @r_failed_dev: out parameter for failed device
3306 *
3307 * Standard implementation of the function used to tune and set
3308 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3309 * ata_dev_set_mode() fails, pointer to the failing device is
3310 * returned in @r_failed_dev.
3311 *
3312 * LOCKING:
3313 * PCI/etc. bus probe sem.
3314 *
3315 * RETURNS:
3316 * 0 on success, negative errno otherwise
3317 */
3318
3319 int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3320 {
3321 struct ata_port *ap = link->ap;
3322 struct ata_device *dev;
3323 int rc = 0, used_dma = 0, found = 0;
3324
3325 /* step 1: calculate xfer_mask */
3326 ata_for_each_dev(dev, link, ENABLED) {
3327 unsigned long pio_mask, dma_mask;
3328 unsigned int mode_mask;
3329
3330 mode_mask = ATA_DMA_MASK_ATA;
3331 if (dev->class == ATA_DEV_ATAPI)
3332 mode_mask = ATA_DMA_MASK_ATAPI;
3333 else if (ata_id_is_cfa(dev->id))
3334 mode_mask = ATA_DMA_MASK_CFA;
3335
3336 ata_dev_xfermask(dev);
3337 ata_force_xfermask(dev);
3338
3339 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3340
3341 if (libata_dma_mask & mode_mask)
3342 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask,
3343 dev->udma_mask);
3344 else
3345 dma_mask = 0;
3346
3347 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3348 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
3349
3350 found = 1;
3351 if (ata_dma_enabled(dev))
3352 used_dma = 1;
3353 }
3354 if (!found)
3355 goto out;
3356
3357 /* step 2: always set host PIO timings */
3358 ata_for_each_dev(dev, link, ENABLED) {
3359 if (dev->pio_mode == 0xff) {
3360 ata_dev_warn(dev, "no PIO support\n");
3361 rc = -EINVAL;
3362 goto out;
3363 }
3364
3365 dev->xfer_mode = dev->pio_mode;
3366 dev->xfer_shift = ATA_SHIFT_PIO;
3367 if (ap->ops->set_piomode)
3368 ap->ops->set_piomode(ap, dev);
3369 }
3370
3371 /* step 3: set host DMA timings */
3372 ata_for_each_dev(dev, link, ENABLED) {
3373 if (!ata_dma_enabled(dev))
3374 continue;
3375
3376 dev->xfer_mode = dev->dma_mode;
3377 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3378 if (ap->ops->set_dmamode)
3379 ap->ops->set_dmamode(ap, dev);
3380 }
3381
3382 /* step 4: update devices' xfer mode */
3383 ata_for_each_dev(dev, link, ENABLED) {
3384 rc = ata_dev_set_mode(dev);
3385 if (rc)
3386 goto out;
3387 }
3388
3389 /* Record simplex status. If we selected DMA then the other
3390 * host channels are not permitted to do so.
3391 */
3392 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
3393 ap->host->simplex_claimed = ap;
3394
3395 out:
3396 if (rc)
3397 *r_failed_dev = dev;
3398 return rc;
3399 }
3400
3401 /**
3402 * ata_wait_ready - wait for link to become ready
3403 * @link: link to be waited on
3404 * @deadline: deadline jiffies for the operation
3405 * @check_ready: callback to check link readiness
3406 *
3407 * Wait for @link to become ready. @check_ready should return
3408 * positive number if @link is ready, 0 if it isn't, -ENODEV if
3409 * link doesn't seem to be occupied, other errno for other error
3410 * conditions.
3411 *
3412 * Transient -ENODEV conditions are allowed for
3413 * ATA_TMOUT_FF_WAIT.
3414 *
3415 * LOCKING:
3416 * EH context.
3417 *
3418 * RETURNS:
3419 * 0 if @linke is ready before @deadline; otherwise, -errno.
3420 */
3421 int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3422 int (*check_ready)(struct ata_link *link))
3423 {
3424 unsigned long start = jiffies;
3425 unsigned long nodev_deadline;
3426 int warned = 0;
3427
3428 /* choose which 0xff timeout to use, read comment in libata.h */
3429 if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN)
3430 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG);
3431 else
3432 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
3433
3434 /* Slave readiness can't be tested separately from master. On
3435 * M/S emulation configuration, this function should be called
3436 * only on the master and it will handle both master and slave.
3437 */
3438 WARN_ON(link == link->ap->slave_link);
3439
3440 if (time_after(nodev_deadline, deadline))
3441 nodev_deadline = deadline;
3442
3443 while (1) {
3444 unsigned long now = jiffies;
3445 int ready, tmp;
3446
3447 ready = tmp = check_ready(link);
3448 if (ready > 0)
3449 return 0;
3450
3451 /*
3452 * -ENODEV could be transient. Ignore -ENODEV if link
3453 * is online. Also, some SATA devices take a long
3454 * time to clear 0xff after reset. Wait for
3455 * ATA_TMOUT_FF_WAIT[_LONG] on -ENODEV if link isn't
3456 * offline.
3457 *
3458 * Note that some PATA controllers (pata_ali) explode
3459 * if status register is read more than once when
3460 * there's no device attached.
3461 */
3462 if (ready == -ENODEV) {
3463 if (ata_link_online(link))
3464 ready = 0;
3465 else if ((link->ap->flags & ATA_FLAG_SATA) &&
3466 !ata_link_offline(link) &&
3467 time_before(now, nodev_deadline))
3468 ready = 0;
3469 }
3470
3471 if (ready)
3472 return ready;
3473 if (time_after(now, deadline))
3474 return -EBUSY;
3475
3476 if (!warned && time_after(now, start + 5 * HZ) &&
3477 (deadline - now > 3 * HZ)) {
3478 ata_link_warn(link,
3479 "link is slow to respond, please be patient "
3480 "(ready=%d)\n", tmp);
3481 warned = 1;
3482 }
3483
3484 ata_msleep(link->ap, 50);
3485 }
3486 }
3487
3488 /**
3489 * ata_wait_after_reset - wait for link to become ready after reset
3490 * @link: link to be waited on
3491 * @deadline: deadline jiffies for the operation
3492 * @check_ready: callback to check link readiness
3493 *
3494 * Wait for @link to become ready after reset.
3495 *
3496 * LOCKING:
3497 * EH context.
3498 *
3499 * RETURNS:
3500 * 0 if @linke is ready before @deadline; otherwise, -errno.
3501 */
3502 int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
3503 int (*check_ready)(struct ata_link *link))
3504 {
3505 ata_msleep(link->ap, ATA_WAIT_AFTER_RESET);
3506
3507 return ata_wait_ready(link, deadline, check_ready);
3508 }
3509
3510 /**
3511 * sata_link_debounce - debounce SATA phy status
3512 * @link: ATA link to debounce SATA phy status for
3513 * @params: timing parameters { interval, duratinon, timeout } in msec
3514 * @deadline: deadline jiffies for the operation
3515 *
3516 * Make sure SStatus of @link reaches stable state, determined by
3517 * holding the same value where DET is not 1 for @duration polled
3518 * every @interval, before @timeout. Timeout constraints the
3519 * beginning of the stable state. Because DET gets stuck at 1 on
3520 * some controllers after hot unplugging, this functions waits
3521 * until timeout then returns 0 if DET is stable at 1.
3522 *
3523 * @timeout is further limited by @deadline. The sooner of the
3524 * two is used.
3525 *
3526 * LOCKING:
3527 * Kernel thread context (may sleep)
3528 *
3529 * RETURNS:
3530 * 0 on success, -errno on failure.
3531 */
3532 int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3533 unsigned long deadline)
3534 {
3535 unsigned long interval = params[0];
3536 unsigned long duration = params[1];
3537 unsigned long last_jiffies, t;
3538 u32 last, cur;
3539 int rc;
3540
3541 t = ata_deadline(jiffies, params[2]);
3542 if (time_before(t, deadline))
3543 deadline = t;
3544
3545 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3546 return rc;
3547 cur &= 0xf;
3548
3549 last = cur;
3550 last_jiffies = jiffies;
3551
3552 while (1) {
3553 ata_msleep(link->ap, interval);
3554 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3555 return rc;
3556 cur &= 0xf;
3557
3558 /* DET stable? */
3559 if (cur == last) {
3560 if (cur == 1 && time_before(jiffies, deadline))
3561 continue;
3562 if (time_after(jiffies,
3563 ata_deadline(last_jiffies, duration)))
3564 return 0;
3565 continue;
3566 }
3567
3568 /* unstable, start over */
3569 last = cur;
3570 last_jiffies = jiffies;
3571
3572 /* Check deadline. If debouncing failed, return
3573 * -EPIPE to tell upper layer to lower link speed.
3574 */
3575 if (time_after(jiffies, deadline))
3576 return -EPIPE;
3577 }
3578 }
3579
3580 /**
3581 * sata_link_resume - resume SATA link
3582 * @link: ATA link to resume SATA
3583 * @params: timing parameters { interval, duratinon, timeout } in msec
3584 * @deadline: deadline jiffies for the operation
3585 *
3586 * Resume SATA phy @link and debounce it.
3587 *
3588 * LOCKING:
3589 * Kernel thread context (may sleep)
3590 *
3591 * RETURNS:
3592 * 0 on success, -errno on failure.
3593 */
3594 int sata_link_resume(struct ata_link *link, const unsigned long *params,
3595 unsigned long deadline)
3596 {
3597 int tries = ATA_LINK_RESUME_TRIES;
3598 u32 scontrol, serror;
3599 int rc;
3600
3601 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3602 return rc;
3603
3604 /*
3605 * Writes to SControl sometimes get ignored under certain
3606 * controllers (ata_piix SIDPR). Make sure DET actually is
3607 * cleared.
3608 */
3609 do {
3610 scontrol = (scontrol & 0x0f0) | 0x300;
3611 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3612 return rc;
3613 /*
3614 * Some PHYs react badly if SStatus is pounded
3615 * immediately after resuming. Delay 200ms before
3616 * debouncing.
3617 */
3618 ata_msleep(link->ap, 200);
3619
3620 /* is SControl restored correctly? */
3621 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3622 return rc;
3623 } while ((scontrol & 0xf0f) != 0x300 && --tries);
3624
3625 if ((scontrol & 0xf0f) != 0x300) {
3626 ata_link_warn(link, "failed to resume link (SControl %X)\n",
3627 scontrol);
3628 return 0;
3629 }
3630
3631 if (tries < ATA_LINK_RESUME_TRIES)
3632 ata_link_warn(link, "link resume succeeded after %d retries\n",
3633 ATA_LINK_RESUME_TRIES - tries);
3634
3635 if ((rc = sata_link_debounce(link, params, deadline)))
3636 return rc;
3637
3638 /* clear SError, some PHYs require this even for SRST to work */
3639 if (!(rc = sata_scr_read(link, SCR_ERROR, &serror)))
3640 rc = sata_scr_write(link, SCR_ERROR, serror);
3641
3642 return rc != -EINVAL ? rc : 0;
3643 }
3644
3645 /**
3646 * sata_link_scr_lpm - manipulate SControl IPM and SPM fields
3647 * @link: ATA link to manipulate SControl for
3648 * @policy: LPM policy to configure
3649 * @spm_wakeup: initiate LPM transition to active state
3650 *
3651 * Manipulate the IPM field of the SControl register of @link
3652 * according to @policy. If @policy is ATA_LPM_MAX_POWER and
3653 * @spm_wakeup is %true, the SPM field is manipulated to wake up
3654 * the link. This function also clears PHYRDY_CHG before
3655 * returning.
3656 *
3657 * LOCKING:
3658 * EH context.
3659 *
3660 * RETURNS:
3661 * 0 on success, -errno otherwise.
3662 */
3663 int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,
3664 bool spm_wakeup)
3665 {
3666 struct ata_eh_context *ehc = &link->eh_context;
3667 bool woken_up = false;
3668 u32 scontrol;
3669 int rc;
3670
3671 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
3672 if (rc)
3673 return rc;
3674
3675 switch (policy) {
3676 case ATA_LPM_MAX_POWER:
3677 /* disable all LPM transitions */
3678 scontrol |= (0x7 << 8);
3679 /* initiate transition to active state */
3680 if (spm_wakeup) {
3681 scontrol |= (0x4 << 12);
3682 woken_up = true;
3683 }
3684 break;
3685 case ATA_LPM_MED_POWER:
3686 /* allow LPM to PARTIAL */
3687 scontrol &= ~(0x1 << 8);
3688 scontrol |= (0x6 << 8);
3689 break;
3690 case ATA_LPM_MIN_POWER:
3691 if (ata_link_nr_enabled(link) > 0)
3692 /* no restrictions on LPM transitions */
3693 scontrol &= ~(0x7 << 8);
3694 else {
3695 /* empty port, power off */
3696 scontrol &= ~0xf;
3697 scontrol |= (0x1 << 2);
3698 }
3699 break;
3700 default:
3701 WARN_ON(1);
3702 }
3703
3704 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
3705 if (rc)
3706 return rc;
3707
3708 /* give the link time to transit out of LPM state */
3709 if (woken_up)
3710 msleep(10);
3711
3712 /* clear PHYRDY_CHG from SError */
3713 ehc->i.serror &= ~SERR_PHYRDY_CHG;
3714 return sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG);
3715 }
3716
3717 /**
3718 * ata_std_prereset - prepare for reset
3719 * @link: ATA link to be reset
3720 * @deadline: deadline jiffies for the operation
3721 *
3722 * @link is about to be reset. Initialize it. Failure from
3723 * prereset makes libata abort whole reset sequence and give up
3724 * that port, so prereset should be best-effort. It does its
3725 * best to prepare for reset sequence but if things go wrong, it
3726 * should just whine, not fail.
3727 *
3728 * LOCKING:
3729 * Kernel thread context (may sleep)
3730 *
3731 * RETURNS:
3732 * 0 on success, -errno otherwise.
3733 */
3734 int ata_std_prereset(struct ata_link *link, unsigned long deadline)
3735 {
3736 struct ata_port *ap = link->ap;
3737 struct ata_eh_context *ehc = &link->eh_context;
3738 const unsigned long *timing = sata_ehc_deb_timing(ehc);
3739 int rc;
3740
3741 /* if we're about to do hardreset, nothing more to do */
3742 if (ehc->i.action & ATA_EH_HARDRESET)
3743 return 0;
3744
3745 /* if SATA, resume link */
3746 if (ap->flags & ATA_FLAG_SATA) {
3747 rc = sata_link_resume(link, timing, deadline);
3748 /* whine about phy resume failure but proceed */
3749 if (rc && rc != -EOPNOTSUPP)
3750 ata_link_warn(link,
3751 "failed to resume link for reset (errno=%d)\n",
3752 rc);
3753 }
3754
3755 /* no point in trying softreset on offline link */
3756 if (ata_phys_link_offline(link))
3757 ehc->i.action &= ~ATA_EH_SOFTRESET;
3758
3759 return 0;
3760 }
3761
3762 /**
3763 * sata_link_hardreset - reset link via SATA phy reset
3764 * @link: link to reset
3765 * @timing: timing parameters { interval, duratinon, timeout } in msec
3766 * @deadline: deadline jiffies for the operation
3767 * @online: optional out parameter indicating link onlineness
3768 * @check_ready: optional callback to check link readiness
3769 *
3770 * SATA phy-reset @link using DET bits of SControl register.
3771 * After hardreset, link readiness is waited upon using
3772 * ata_wait_ready() if @check_ready is specified. LLDs are
3773 * allowed to not specify @check_ready and wait itself after this
3774 * function returns. Device classification is LLD's
3775 * responsibility.
3776 *
3777 * *@online is set to one iff reset succeeded and @link is online
3778 * after reset.
3779 *
3780 * LOCKING:
3781 * Kernel thread context (may sleep)
3782 *
3783 * RETURNS:
3784 * 0 on success, -errno otherwise.
3785 */
3786 int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
3787 unsigned long deadline,
3788 bool *online, int (*check_ready)(struct ata_link *))
3789 {
3790 u32 scontrol;
3791 int rc;
3792
3793 DPRINTK("ENTER\n");
3794
3795 if (online)
3796 *online = false;
3797
3798 if (sata_set_spd_needed(link)) {
3799 /* SATA spec says nothing about how to reconfigure
3800 * spd. To be on the safe side, turn off phy during
3801 * reconfiguration. This works for at least ICH7 AHCI
3802 * and Sil3124.
3803 */
3804 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3805 goto out;
3806
3807 scontrol = (scontrol & 0x0f0) | 0x304;
3808
3809 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3810 goto out;
3811
3812 sata_set_spd(link);
3813 }
3814
3815 /* issue phy wake/reset */
3816 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3817 goto out;
3818
3819 scontrol = (scontrol & 0x0f0) | 0x301;
3820
3821 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
3822 goto out;
3823
3824 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
3825 * 10.4.2 says at least 1 ms.
3826 */
3827 ata_msleep(link->ap, 1);
3828
3829 /* bring link back */
3830 rc = sata_link_resume(link, timing, deadline);
3831 if (rc)
3832 goto out;
3833 /* if link is offline nothing more to do */
3834 if (ata_phys_link_offline(link))
3835 goto out;
3836
3837 /* Link is online. From this point, -ENODEV too is an error. */
3838 if (online)
3839 *online = true;
3840
3841 if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) {
3842 /* If PMP is supported, we have to do follow-up SRST.
3843 * Some PMPs don't send D2H Reg FIS after hardreset if
3844 * the first port is empty. Wait only for
3845 * ATA_TMOUT_PMP_SRST_WAIT.
3846 */
3847 if (check_ready) {
3848 unsigned long pmp_deadline;
3849
3850 pmp_deadline = ata_deadline(jiffies,
3851 ATA_TMOUT_PMP_SRST_WAIT);
3852 if (time_after(pmp_deadline, deadline))
3853 pmp_deadline = deadline;
3854 ata_wait_ready(link, pmp_deadline, check_ready);
3855 }
3856 rc = -EAGAIN;
3857 goto out;
3858 }
3859
3860 rc = 0;
3861 if (check_ready)
3862 rc = ata_wait_ready(link, deadline, check_ready);
3863 out:
3864 if (rc && rc != -EAGAIN) {
3865 /* online is set iff link is online && reset succeeded */
3866 if (online)
3867 *online = false;
3868 ata_link_err(link, "COMRESET failed (errno=%d)\n", rc);
3869 }
3870 DPRINTK("EXIT, rc=%d\n", rc);
3871 return rc;
3872 }
3873
3874 /**
3875 * sata_std_hardreset - COMRESET w/o waiting or classification
3876 * @link: link to reset
3877 * @class: resulting class of attached device
3878 * @deadline: deadline jiffies for the operation
3879 *
3880 * Standard SATA COMRESET w/o waiting or classification.
3881 *
3882 * LOCKING:
3883 * Kernel thread context (may sleep)
3884 *
3885 * RETURNS:
3886 * 0 if link offline, -EAGAIN if link online, -errno on errors.
3887 */
3888 int sata_std_hardreset(struct ata_link *link, unsigned int *class,
3889 unsigned long deadline)
3890 {
3891 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
3892 bool online;
3893 int rc;
3894
3895 /* do hardreset */
3896 rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
3897 return online ? -EAGAIN : rc;
3898 }
3899
3900 /**
3901 * ata_std_postreset - standard postreset callback
3902 * @link: the target ata_link
3903 * @classes: classes of attached devices
3904 *
3905 * This function is invoked after a successful reset. Note that
3906 * the device might have been reset more than once using
3907 * different reset methods before postreset is invoked.
3908 *
3909 * LOCKING:
3910 * Kernel thread context (may sleep)
3911 */
3912 void ata_std_postreset(struct ata_link *link, unsigned int *classes)
3913 {
3914 u32 serror;
3915
3916 DPRINTK("ENTER\n");
3917
3918 /* reset complete, clear SError */
3919 if (!sata_scr_read(link, SCR_ERROR, &serror))
3920 sata_scr_write(link, SCR_ERROR, serror);
3921
3922 /* print link status */
3923 sata_print_link_status(link);
3924
3925 DPRINTK("EXIT\n");
3926 }
3927
3928 /**
3929 * ata_dev_same_device - Determine whether new ID matches configured device
3930 * @dev: device to compare against
3931 * @new_class: class of the new device
3932 * @new_id: IDENTIFY page of the new device
3933 *
3934 * Compare @new_class and @new_id against @dev and determine
3935 * whether @dev is the device indicated by @new_class and
3936 * @new_id.
3937 *
3938 * LOCKING:
3939 * None.
3940 *
3941 * RETURNS:
3942 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3943 */
3944 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3945 const u16 *new_id)
3946 {
3947 const u16 *old_id = dev->id;
3948 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3949 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
3950
3951 if (dev->class != new_class) {
3952 ata_dev_info(dev, "class mismatch %d != %d\n",
3953 dev->class, new_class);
3954 return 0;
3955 }
3956
3957 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3958 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3959 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3960 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
3961
3962 if (strcmp(model[0], model[1])) {
3963 ata_dev_info(dev, "model number mismatch '%s' != '%s'\n",
3964 model[0], model[1]);
3965 return 0;
3966 }
3967
3968 if (strcmp(serial[0], serial[1])) {
3969 ata_dev_info(dev, "serial number mismatch '%s' != '%s'\n",
3970 serial[0], serial[1]);
3971 return 0;
3972 }
3973
3974 return 1;
3975 }
3976
3977 /**
3978 * ata_dev_reread_id - Re-read IDENTIFY data
3979 * @dev: target ATA device
3980 * @readid_flags: read ID flags
3981 *
3982 * Re-read IDENTIFY page and make sure @dev is still attached to
3983 * the port.
3984 *
3985 * LOCKING:
3986 * Kernel thread context (may sleep)
3987 *
3988 * RETURNS:
3989 * 0 on success, negative errno otherwise
3990 */
3991 int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
3992 {
3993 unsigned int class = dev->class;
3994 u16 *id = (void *)dev->link->ap->sector_buf;
3995 int rc;
3996
3997 /* read ID data */
3998 rc = ata_dev_read_id(dev, &class, readid_flags, id);
3999 if (rc)
4000 return rc;
4001
4002 /* is the device still there? */
4003 if (!ata_dev_same_device(dev, class, id))
4004 return -ENODEV;
4005
4006 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
4007 return 0;
4008 }
4009
4010 /**
4011 * ata_dev_revalidate - Revalidate ATA device
4012 * @dev: device to revalidate
4013 * @new_class: new class code
4014 * @readid_flags: read ID flags
4015 *
4016 * Re-read IDENTIFY page, make sure @dev is still attached to the
4017 * port and reconfigure it according to the new IDENTIFY page.
4018 *
4019 * LOCKING:
4020 * Kernel thread context (may sleep)
4021 *
4022 * RETURNS:
4023 * 0 on success, negative errno otherwise
4024 */
4025 int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
4026 unsigned int readid_flags)
4027 {
4028 u64 n_sectors = dev->n_sectors;
4029 u64 n_native_sectors = dev->n_native_sectors;
4030 int rc;
4031
4032 if (!ata_dev_enabled(dev))
4033 return -ENODEV;
4034
4035 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
4036 if (ata_class_enabled(new_class) &&
4037 new_class != ATA_DEV_ATA &&
4038 new_class != ATA_DEV_ATAPI &&
4039 new_class != ATA_DEV_ZAC &&
4040 new_class != ATA_DEV_SEMB) {
4041 ata_dev_info(dev, "class mismatch %u != %u\n",
4042 dev->class, new_class);
4043 rc = -ENODEV;
4044 goto fail;
4045 }
4046
4047 /* re-read ID */
4048 rc = ata_dev_reread_id(dev, readid_flags);
4049 if (rc)
4050 goto fail;
4051
4052 /* configure device according to the new ID */
4053 rc = ata_dev_configure(dev);
4054 if (rc)
4055 goto fail;
4056
4057 /* verify n_sectors hasn't changed */
4058 if (dev->class != ATA_DEV_ATA || !n_sectors ||
4059 dev->n_sectors == n_sectors)
4060 return 0;
4061
4062 /* n_sectors has changed */
4063 ata_dev_warn(dev, "n_sectors mismatch %llu != %llu\n",
4064 (unsigned long long)n_sectors,
4065 (unsigned long long)dev->n_sectors);
4066
4067 /*
4068 * Something could have caused HPA to be unlocked
4069 * involuntarily. If n_native_sectors hasn't changed and the
4070 * new size matches it, keep the device.
4071 */
4072 if (dev->n_native_sectors == n_native_sectors &&
4073 dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) {
4074 ata_dev_warn(dev,
4075 "new n_sectors matches native, probably "
4076 "late HPA unlock, n_sectors updated\n");
4077 /* use the larger n_sectors */
4078 return 0;
4079 }
4080
4081 /*
4082 * Some BIOSes boot w/o HPA but resume w/ HPA locked. Try
4083 * unlocking HPA in those cases.
4084 *
4085 * https://bugzilla.kernel.org/show_bug.cgi?id=15396
4086 */
4087 if (dev->n_native_sectors == n_native_sectors &&
4088 dev->n_sectors < n_sectors && n_sectors == n_native_sectors &&
4089 !(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) {
4090 ata_dev_warn(dev,
4091 "old n_sectors matches native, probably "
4092 "late HPA lock, will try to unlock HPA\n");
4093 /* try unlocking HPA */
4094 dev->flags |= ATA_DFLAG_UNLOCK_HPA;
4095 rc = -EIO;
4096 } else
4097 rc = -ENODEV;
4098
4099 /* restore original n_[native_]sectors and fail */
4100 dev->n_native_sectors = n_native_sectors;
4101 dev->n_sectors = n_sectors;
4102 fail:
4103 ata_dev_err(dev, "revalidation failed (errno=%d)\n", rc);
4104 return rc;
4105 }
4106
4107 struct ata_blacklist_entry {
4108 const char *model_num;
4109 const char *model_rev;
4110 unsigned long horkage;
4111 };
4112
4113 static const struct ata_blacklist_entry ata_device_blacklist [] = {
4114 /* Devices with DMA related problems under Linux */
4115 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
4116 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
4117 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
4118 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
4119 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
4120 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
4121 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
4122 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
4123 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
4124 { "CRD-848[02]B", NULL, ATA_HORKAGE_NODMA },
4125 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
4126 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
4127 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
4128 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
4129 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
4130 { "HITACHI CDR-8[34]35",NULL, ATA_HORKAGE_NODMA },
4131 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
4132 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
4133 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
4134 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
4135 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
4136 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
4137 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
4138 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
4139 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
4140 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
4141 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
4142 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
4143 { " 2GB ATA Flash Disk", "ADMA428M", ATA_HORKAGE_NODMA },
4144 /* Odd clown on sil3726/4726 PMPs */
4145 { "Config Disk", NULL, ATA_HORKAGE_DISABLE },
4146
4147 /* Weird ATAPI devices */
4148 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
4149 { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA },
4150 { "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
4151 { "Slimtype DVD A DS8A9SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
4152
4153 /*
4154 * Causes silent data corruption with higher max sects.
4155 * http://lkml.kernel.org/g/x49wpy40ysk.fsf@segfault.boston.devel.redhat.com
4156 */
4157 { "ST380013AS", "3.20", ATA_HORKAGE_MAX_SEC_1024 },
4158
4159 /* Devices we expect to fail diagnostics */
4160
4161 /* Devices where NCQ should be avoided */
4162 /* NCQ is slow */
4163 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
4164 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
4165 /* http://thread.gmane.org/gmane.linux.ide/14907 */
4166 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
4167 /* NCQ is broken */
4168 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
4169 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
4170 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ },
4171 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ },
4172 { "OCZ CORE_SSD", "02.10104", ATA_HORKAGE_NONCQ },
4173
4174 /* Seagate NCQ + FLUSH CACHE firmware bug */
4175 { "ST31500341AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4176 ATA_HORKAGE_FIRMWARE_WARN },
4177
4178 { "ST31000333AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4179 ATA_HORKAGE_FIRMWARE_WARN },
4180
4181 { "ST3640[36]23AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4182 ATA_HORKAGE_FIRMWARE_WARN },
4183
4184 { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4185 ATA_HORKAGE_FIRMWARE_WARN },
4186
4187 /* drives which fail FPDMA_AA activation (some may freeze afterwards) */
4188 { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA },
4189 { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA },
4190 { "VB0250EAVER", "HPG7", ATA_HORKAGE_BROKEN_FPDMA_AA },
4191
4192 /* Blacklist entries taken from Silicon Image 3124/3132
4193 Windows driver .inf file - also several Linux problem reports */
4194 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
4195 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
4196 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
4197
4198 /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
4199 { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, },
4200
4201 /* devices which puke on READ_NATIVE_MAX */
4202 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
4203 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
4204 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
4205 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
4206
4207 /* this one allows HPA unlocking but fails IOs on the area */
4208 { "OCZ-VERTEX", "1.30", ATA_HORKAGE_BROKEN_HPA },
4209
4210 /* Devices which report 1 sector over size HPA */
4211 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, },
4212 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, },
4213 { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE, },
4214
4215 /* Devices which get the IVB wrong */
4216 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
4217 /* Maybe we should just blacklist TSSTcorp... */
4218 { "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]", ATA_HORKAGE_IVB, },
4219
4220 /* Devices that do not need bridging limits applied */
4221 { "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK, },
4222 { "BUFFALO HD-QSU2/R5", NULL, ATA_HORKAGE_BRIDGE_OK, },
4223
4224 /* Devices which aren't very happy with higher link speeds */
4225 { "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS, },
4226 { "Seagate FreeAgent GoFlex", NULL, ATA_HORKAGE_1_5_GBPS, },
4227
4228 /*
4229 * Devices which choke on SETXFER. Applies only if both the
4230 * device and controller are SATA.
4231 */
4232 { "PIONEER DVD-RW DVRTD08", NULL, ATA_HORKAGE_NOSETXFER },
4233 { "PIONEER DVD-RW DVRTD08A", NULL, ATA_HORKAGE_NOSETXFER },
4234 { "PIONEER DVD-RW DVR-215", NULL, ATA_HORKAGE_NOSETXFER },
4235 { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER },
4236 { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
4237
4238 /* devices that don't properly handle queued TRIM commands */
4239 { "Micron_M500_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4240 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4241 { "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4242 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4243 { "Micron_M5[15]0_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4244 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4245 { "Crucial_CT*M550*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4246 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4247 { "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4248 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4249 { "Samsung SSD 8*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4250 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4251
4252 /* devices that don't properly handle TRIM commands */
4253 { "SuperSSpeed S238*", NULL, ATA_HORKAGE_NOTRIM, },
4254
4255 /*
4256 * As defined, the DRAT (Deterministic Read After Trim) and RZAT
4257 * (Return Zero After Trim) flags in the ATA Command Set are
4258 * unreliable in the sense that they only define what happens if
4259 * the device successfully executed the DSM TRIM command. TRIM
4260 * is only advisory, however, and the device is free to silently
4261 * ignore all or parts of the request.
4262 *
4263 * Whitelist drives that are known to reliably return zeroes
4264 * after TRIM.
4265 */
4266
4267 /*
4268 * The intel 510 drive has buggy DRAT/RZAT. Explicitly exclude
4269 * that model before whitelisting all other intel SSDs.
4270 */
4271 { "INTEL*SSDSC2MH*", NULL, 0, },
4272
4273 { "Micron*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4274 { "Crucial*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4275 { "INTEL*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4276 { "SSD*INTEL*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4277 { "Samsung*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4278 { "SAMSUNG*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4279 { "ST[1248][0248]0[FH]*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4280
4281 /*
4282 * Some WD SATA-I drives spin up and down erratically when the link
4283 * is put into the slumber mode. We don't have full list of the
4284 * affected devices. Disable LPM if the device matches one of the
4285 * known prefixes and is SATA-1. As a side effect LPM partial is
4286 * lost too.
4287 *
4288 * https://bugzilla.kernel.org/show_bug.cgi?id=57211
4289 */
4290 { "WDC WD800JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4291 { "WDC WD1200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4292 { "WDC WD1600JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4293 { "WDC WD2000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4294 { "WDC WD2500JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4295 { "WDC WD3000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4296 { "WDC WD3200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4297
4298 /* End Marker */
4299 { }
4300 };
4301
4302 static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
4303 {
4304 unsigned char model_num[ATA_ID_PROD_LEN + 1];
4305 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
4306 const struct ata_blacklist_entry *ad = ata_device_blacklist;
4307
4308 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4309 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
4310
4311 while (ad->model_num) {
4312 if (glob_match(ad->model_num, model_num)) {
4313 if (ad->model_rev == NULL)
4314 return ad->horkage;
4315 if (glob_match(ad->model_rev, model_rev))
4316 return ad->horkage;
4317 }
4318 ad++;
4319 }
4320 return 0;
4321 }
4322
4323 static int ata_dma_blacklisted(const struct ata_device *dev)
4324 {
4325 /* We don't support polling DMA.
4326 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
4327 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4328 */
4329 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
4330 (dev->flags & ATA_DFLAG_CDB_INTR))
4331 return 1;
4332 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
4333 }
4334
4335 /**
4336 * ata_is_40wire - check drive side detection
4337 * @dev: device
4338 *
4339 * Perform drive side detection decoding, allowing for device vendors
4340 * who can't follow the documentation.
4341 */
4342
4343 static int ata_is_40wire(struct ata_device *dev)
4344 {
4345 if (dev->horkage & ATA_HORKAGE_IVB)
4346 return ata_drive_40wire_relaxed(dev->id);
4347 return ata_drive_40wire(dev->id);
4348 }
4349
4350 /**
4351 * cable_is_40wire - 40/80/SATA decider
4352 * @ap: port to consider
4353 *
4354 * This function encapsulates the policy for speed management
4355 * in one place. At the moment we don't cache the result but
4356 * there is a good case for setting ap->cbl to the result when
4357 * we are called with unknown cables (and figuring out if it
4358 * impacts hotplug at all).
4359 *
4360 * Return 1 if the cable appears to be 40 wire.
4361 */
4362
4363 static int cable_is_40wire(struct ata_port *ap)
4364 {
4365 struct ata_link *link;
4366 struct ata_device *dev;
4367
4368 /* If the controller thinks we are 40 wire, we are. */
4369 if (ap->cbl == ATA_CBL_PATA40)
4370 return 1;
4371
4372 /* If the controller thinks we are 80 wire, we are. */
4373 if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
4374 return 0;
4375
4376 /* If the system is known to be 40 wire short cable (eg
4377 * laptop), then we allow 80 wire modes even if the drive
4378 * isn't sure.
4379 */
4380 if (ap->cbl == ATA_CBL_PATA40_SHORT)
4381 return 0;
4382
4383 /* If the controller doesn't know, we scan.
4384 *
4385 * Note: We look for all 40 wire detects at this point. Any
4386 * 80 wire detect is taken to be 80 wire cable because
4387 * - in many setups only the one drive (slave if present) will
4388 * give a valid detect
4389 * - if you have a non detect capable drive you don't want it
4390 * to colour the choice
4391 */
4392 ata_for_each_link(link, ap, EDGE) {
4393 ata_for_each_dev(dev, link, ENABLED) {
4394 if (!ata_is_40wire(dev))
4395 return 0;
4396 }
4397 }
4398 return 1;
4399 }
4400
4401 /**
4402 * ata_dev_xfermask - Compute supported xfermask of the given device
4403 * @dev: Device to compute xfermask for
4404 *
4405 * Compute supported xfermask of @dev and store it in
4406 * dev->*_mask. This function is responsible for applying all
4407 * known limits including host controller limits, device
4408 * blacklist, etc...
4409 *
4410 * LOCKING:
4411 * None.
4412 */
4413 static void ata_dev_xfermask(struct ata_device *dev)
4414 {
4415 struct ata_link *link = dev->link;
4416 struct ata_port *ap = link->ap;
4417 struct ata_host *host = ap->host;
4418 unsigned long xfer_mask;
4419
4420 /* controller modes available */
4421 xfer_mask = ata_pack_xfermask(ap->pio_mask,
4422 ap->mwdma_mask, ap->udma_mask);
4423
4424 /* drive modes available */
4425 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4426 dev->mwdma_mask, dev->udma_mask);
4427 xfer_mask &= ata_id_xfermask(dev->id);
4428
4429 /*
4430 * CFA Advanced TrueIDE timings are not allowed on a shared
4431 * cable
4432 */
4433 if (ata_dev_pair(dev)) {
4434 /* No PIO5 or PIO6 */
4435 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4436 /* No MWDMA3 or MWDMA 4 */
4437 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4438 }
4439
4440 if (ata_dma_blacklisted(dev)) {
4441 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4442 ata_dev_warn(dev,
4443 "device is on DMA blacklist, disabling DMA\n");
4444 }
4445
4446 if ((host->flags & ATA_HOST_SIMPLEX) &&
4447 host->simplex_claimed && host->simplex_claimed != ap) {
4448 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4449 ata_dev_warn(dev,
4450 "simplex DMA is claimed by other device, disabling DMA\n");
4451 }
4452
4453 if (ap->flags & ATA_FLAG_NO_IORDY)
4454 xfer_mask &= ata_pio_mask_no_iordy(dev);
4455
4456 if (ap->ops->mode_filter)
4457 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
4458
4459 /* Apply cable rule here. Don't apply it early because when
4460 * we handle hot plug the cable type can itself change.
4461 * Check this last so that we know if the transfer rate was
4462 * solely limited by the cable.
4463 * Unknown or 80 wire cables reported host side are checked
4464 * drive side as well. Cases where we know a 40wire cable
4465 * is used safely for 80 are not checked here.
4466 */
4467 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4468 /* UDMA/44 or higher would be available */
4469 if (cable_is_40wire(ap)) {
4470 ata_dev_warn(dev,
4471 "limited to UDMA/33 due to 40-wire cable\n");
4472 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4473 }
4474
4475 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4476 &dev->mwdma_mask, &dev->udma_mask);
4477 }
4478
4479 /**
4480 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
4481 * @dev: Device to which command will be sent
4482 *
4483 * Issue SET FEATURES - XFER MODE command to device @dev
4484 * on port @ap.
4485 *
4486 * LOCKING:
4487 * PCI/etc. bus probe sem.
4488 *
4489 * RETURNS:
4490 * 0 on success, AC_ERR_* mask otherwise.
4491 */
4492
4493 static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4494 {
4495 struct ata_taskfile tf;
4496 unsigned int err_mask;
4497
4498 /* set up set-features taskfile */
4499 DPRINTK("set features - xfer mode\n");
4500
4501 /* Some controllers and ATAPI devices show flaky interrupt
4502 * behavior after setting xfer mode. Use polling instead.
4503 */
4504 ata_tf_init(dev, &tf);
4505 tf.command = ATA_CMD_SET_FEATURES;
4506 tf.feature = SETFEATURES_XFER;
4507 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
4508 tf.protocol = ATA_PROT_NODATA;
4509 /* If we are using IORDY we must send the mode setting command */
4510 if (ata_pio_need_iordy(dev))
4511 tf.nsect = dev->xfer_mode;
4512 /* If the device has IORDY and the controller does not - turn it off */
4513 else if (ata_id_has_iordy(dev->id))
4514 tf.nsect = 0x01;
4515 else /* In the ancient relic department - skip all of this */
4516 return 0;
4517
4518 /* On some disks, this command causes spin-up, so we need longer timeout */
4519 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 15000);
4520
4521 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4522 return err_mask;
4523 }
4524
4525 /**
4526 * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
4527 * @dev: Device to which command will be sent
4528 * @enable: Whether to enable or disable the feature
4529 * @feature: The sector count represents the feature to set
4530 *
4531 * Issue SET FEATURES - SATA FEATURES command to device @dev
4532 * on port @ap with sector count
4533 *
4534 * LOCKING:
4535 * PCI/etc. bus probe sem.
4536 *
4537 * RETURNS:
4538 * 0 on success, AC_ERR_* mask otherwise.
4539 */
4540 unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, u8 feature)
4541 {
4542 struct ata_taskfile tf;
4543 unsigned int err_mask;
4544
4545 /* set up set-features taskfile */
4546 DPRINTK("set features - SATA features\n");
4547
4548 ata_tf_init(dev, &tf);
4549 tf.command = ATA_CMD_SET_FEATURES;
4550 tf.feature = enable;
4551 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4552 tf.protocol = ATA_PROT_NODATA;
4553 tf.nsect = feature;
4554
4555 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4556
4557 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4558 return err_mask;
4559 }
4560 EXPORT_SYMBOL_GPL(ata_dev_set_feature);
4561
4562 /**
4563 * ata_dev_init_params - Issue INIT DEV PARAMS command
4564 * @dev: Device to which command will be sent
4565 * @heads: Number of heads (taskfile parameter)
4566 * @sectors: Number of sectors (taskfile parameter)
4567 *
4568 * LOCKING:
4569 * Kernel thread context (may sleep)
4570 *
4571 * RETURNS:
4572 * 0 on success, AC_ERR_* mask otherwise.
4573 */
4574 static unsigned int ata_dev_init_params(struct ata_device *dev,
4575 u16 heads, u16 sectors)
4576 {
4577 struct ata_taskfile tf;
4578 unsigned int err_mask;
4579
4580 /* Number of sectors per track 1-255. Number of heads 1-16 */
4581 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
4582 return AC_ERR_INVALID;
4583
4584 /* set up init dev params taskfile */
4585 DPRINTK("init dev params \n");
4586
4587 ata_tf_init(dev, &tf);
4588 tf.command = ATA_CMD_INIT_DEV_PARAMS;
4589 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4590 tf.protocol = ATA_PROT_NODATA;
4591 tf.nsect = sectors;
4592 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
4593
4594 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4595 /* A clean abort indicates an original or just out of spec drive
4596 and we should continue as we issue the setup based on the
4597 drive reported working geometry */
4598 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4599 err_mask = 0;
4600
4601 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4602 return err_mask;
4603 }
4604
4605 /**
4606 * ata_sg_clean - Unmap DMA memory associated with command
4607 * @qc: Command containing DMA memory to be released
4608 *
4609 * Unmap all mapped DMA memory associated with this command.
4610 *
4611 * LOCKING:
4612 * spin_lock_irqsave(host lock)
4613 */
4614 void ata_sg_clean(struct ata_queued_cmd *qc)
4615 {
4616 struct ata_port *ap = qc->ap;
4617 struct scatterlist *sg = qc->sg;
4618 int dir = qc->dma_dir;
4619
4620 WARN_ON_ONCE(sg == NULL);
4621
4622 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
4623
4624 if (qc->n_elem)
4625 dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir);
4626
4627 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4628 qc->sg = NULL;
4629 }
4630
4631 /**
4632 * atapi_check_dma - Check whether ATAPI DMA can be supported
4633 * @qc: Metadata associated with taskfile to check
4634 *
4635 * Allow low-level driver to filter ATA PACKET commands, returning
4636 * a status indicating whether or not it is OK to use DMA for the
4637 * supplied PACKET command.
4638 *
4639 * LOCKING:
4640 * spin_lock_irqsave(host lock)
4641 *
4642 * RETURNS: 0 when ATAPI DMA can be used
4643 * nonzero otherwise
4644 */
4645 int atapi_check_dma(struct ata_queued_cmd *qc)
4646 {
4647 struct ata_port *ap = qc->ap;
4648
4649 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
4650 * few ATAPI devices choke on such DMA requests.
4651 */
4652 if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) &&
4653 unlikely(qc->nbytes & 15))
4654 return 1;
4655
4656 if (ap->ops->check_atapi_dma)
4657 return ap->ops->check_atapi_dma(qc);
4658
4659 return 0;
4660 }
4661
4662 /**
4663 * ata_std_qc_defer - Check whether a qc needs to be deferred
4664 * @qc: ATA command in question
4665 *
4666 * Non-NCQ commands cannot run with any other command, NCQ or
4667 * not. As upper layer only knows the queue depth, we are
4668 * responsible for maintaining exclusion. This function checks
4669 * whether a new command @qc can be issued.
4670 *
4671 * LOCKING:
4672 * spin_lock_irqsave(host lock)
4673 *
4674 * RETURNS:
4675 * ATA_DEFER_* if deferring is needed, 0 otherwise.
4676 */
4677 int ata_std_qc_defer(struct ata_queued_cmd *qc)
4678 {
4679 struct ata_link *link = qc->dev->link;
4680
4681 if (qc->tf.protocol == ATA_PROT_NCQ) {
4682 if (!ata_tag_valid(link->active_tag))
4683 return 0;
4684 } else {
4685 if (!ata_tag_valid(link->active_tag) && !link->sactive)
4686 return 0;
4687 }
4688
4689 return ATA_DEFER_LINK;
4690 }
4691
4692 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4693
4694 /**
4695 * ata_sg_init - Associate command with scatter-gather table.
4696 * @qc: Command to be associated
4697 * @sg: Scatter-gather table.
4698 * @n_elem: Number of elements in s/g table.
4699 *
4700 * Initialize the data-related elements of queued_cmd @qc
4701 * to point to a scatter-gather table @sg, containing @n_elem
4702 * elements.
4703 *
4704 * LOCKING:
4705 * spin_lock_irqsave(host lock)
4706 */
4707 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4708 unsigned int n_elem)
4709 {
4710 qc->sg = sg;
4711 qc->n_elem = n_elem;
4712 qc->cursg = qc->sg;
4713 }
4714
4715 /**
4716 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4717 * @qc: Command with scatter-gather table to be mapped.
4718 *
4719 * DMA-map the scatter-gather table associated with queued_cmd @qc.
4720 *
4721 * LOCKING:
4722 * spin_lock_irqsave(host lock)
4723 *
4724 * RETURNS:
4725 * Zero on success, negative on error.
4726 *
4727 */
4728 static int ata_sg_setup(struct ata_queued_cmd *qc)
4729 {
4730 struct ata_port *ap = qc->ap;
4731 unsigned int n_elem;
4732
4733 VPRINTK("ENTER, ata%u\n", ap->print_id);
4734
4735 n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
4736 if (n_elem < 1)
4737 return -1;
4738
4739 DPRINTK("%d sg elements mapped\n", n_elem);
4740 qc->orig_n_elem = qc->n_elem;
4741 qc->n_elem = n_elem;
4742 qc->flags |= ATA_QCFLAG_DMAMAP;
4743
4744 return 0;
4745 }
4746
4747 /**
4748 * swap_buf_le16 - swap halves of 16-bit words in place
4749 * @buf: Buffer to swap
4750 * @buf_words: Number of 16-bit words in buffer.
4751 *
4752 * Swap halves of 16-bit words if needed to convert from
4753 * little-endian byte order to native cpu byte order, or
4754 * vice-versa.
4755 *
4756 * LOCKING:
4757 * Inherited from caller.
4758 */
4759 void swap_buf_le16(u16 *buf, unsigned int buf_words)
4760 {
4761 #ifdef __BIG_ENDIAN
4762 unsigned int i;
4763
4764 for (i = 0; i < buf_words; i++)
4765 buf[i] = le16_to_cpu(buf[i]);
4766 #endif /* __BIG_ENDIAN */
4767 }
4768
4769 /**
4770 * ata_qc_new_init - Request an available ATA command, and initialize it
4771 * @dev: Device from whom we request an available command structure
4772 *
4773 * LOCKING:
4774 * None.
4775 */
4776
4777 struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag)
4778 {
4779 struct ata_port *ap = dev->link->ap;
4780 struct ata_queued_cmd *qc;
4781
4782 /* no command while frozen */
4783 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
4784 return NULL;
4785
4786 /* libsas case */
4787 if (ap->flags & ATA_FLAG_SAS_HOST) {
4788 tag = ata_sas_allocate_tag(ap);
4789 if (tag < 0)
4790 return NULL;
4791 }
4792
4793 qc = __ata_qc_from_tag(ap, tag);
4794 qc->tag = tag;
4795 qc->scsicmd = NULL;
4796 qc->ap = ap;
4797 qc->dev = dev;
4798
4799 ata_qc_reinit(qc);
4800
4801 return qc;
4802 }
4803
4804 /**
4805 * ata_qc_free - free unused ata_queued_cmd
4806 * @qc: Command to complete
4807 *
4808 * Designed to free unused ata_queued_cmd object
4809 * in case something prevents using it.
4810 *
4811 * LOCKING:
4812 * spin_lock_irqsave(host lock)
4813 */
4814 void ata_qc_free(struct ata_queued_cmd *qc)
4815 {
4816 struct ata_port *ap;
4817 unsigned int tag;
4818
4819 WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4820 ap = qc->ap;
4821
4822 qc->flags = 0;
4823 tag = qc->tag;
4824 if (likely(ata_tag_valid(tag))) {
4825 qc->tag = ATA_TAG_POISON;
4826 if (ap->flags & ATA_FLAG_SAS_HOST)
4827 ata_sas_free_tag(tag, ap);
4828 }
4829 }
4830
4831 void __ata_qc_complete(struct ata_queued_cmd *qc)
4832 {
4833 struct ata_port *ap;
4834 struct ata_link *link;
4835
4836 WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4837 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
4838 ap = qc->ap;
4839 link = qc->dev->link;
4840
4841 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4842 ata_sg_clean(qc);
4843
4844 /* command should be marked inactive atomically with qc completion */
4845 if (qc->tf.protocol == ATA_PROT_NCQ) {
4846 link->sactive &= ~(1 << qc->tag);
4847 if (!link->sactive)
4848 ap->nr_active_links--;
4849 } else {
4850 link->active_tag = ATA_TAG_POISON;
4851 ap->nr_active_links--;
4852 }
4853
4854 /* clear exclusive status */
4855 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
4856 ap->excl_link == link))
4857 ap->excl_link = NULL;
4858
4859 /* atapi: mark qc as inactive to prevent the interrupt handler
4860 * from completing the command twice later, before the error handler
4861 * is called. (when rc != 0 and atapi request sense is needed)
4862 */
4863 qc->flags &= ~ATA_QCFLAG_ACTIVE;
4864 ap->qc_active &= ~(1 << qc->tag);
4865
4866 /* call completion callback */
4867 qc->complete_fn(qc);
4868 }
4869
4870 static void fill_result_tf(struct ata_queued_cmd *qc)
4871 {
4872 struct ata_port *ap = qc->ap;
4873
4874 qc->result_tf.flags = qc->tf.flags;
4875 ap->ops->qc_fill_rtf(qc);
4876 }
4877
4878 static void ata_verify_xfer(struct ata_queued_cmd *qc)
4879 {
4880 struct ata_device *dev = qc->dev;
4881
4882 if (ata_is_nodata(qc->tf.protocol))
4883 return;
4884
4885 if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
4886 return;
4887
4888 dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
4889 }
4890
4891 /**
4892 * ata_qc_complete - Complete an active ATA command
4893 * @qc: Command to complete
4894 *
4895 * Indicate to the mid and upper layers that an ATA command has
4896 * completed, with either an ok or not-ok status.
4897 *
4898 * Refrain from calling this function multiple times when
4899 * successfully completing multiple NCQ commands.
4900 * ata_qc_complete_multiple() should be used instead, which will
4901 * properly update IRQ expect state.
4902 *
4903 * LOCKING:
4904 * spin_lock_irqsave(host lock)
4905 */
4906 void ata_qc_complete(struct ata_queued_cmd *qc)
4907 {
4908 struct ata_port *ap = qc->ap;
4909
4910 /* XXX: New EH and old EH use different mechanisms to
4911 * synchronize EH with regular execution path.
4912 *
4913 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4914 * Normal execution path is responsible for not accessing a
4915 * failed qc. libata core enforces the rule by returning NULL
4916 * from ata_qc_from_tag() for failed qcs.
4917 *
4918 * Old EH depends on ata_qc_complete() nullifying completion
4919 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
4920 * not synchronize with interrupt handler. Only PIO task is
4921 * taken care of.
4922 */
4923 if (ap->ops->error_handler) {
4924 struct ata_device *dev = qc->dev;
4925 struct ata_eh_info *ehi = &dev->link->eh_info;
4926
4927 if (unlikely(qc->err_mask))
4928 qc->flags |= ATA_QCFLAG_FAILED;
4929
4930 /*
4931 * Finish internal commands without any further processing
4932 * and always with the result TF filled.
4933 */
4934 if (unlikely(ata_tag_internal(qc->tag))) {
4935 fill_result_tf(qc);
4936 trace_ata_qc_complete_internal(qc);
4937 __ata_qc_complete(qc);
4938 return;
4939 }
4940
4941 /*
4942 * Non-internal qc has failed. Fill the result TF and
4943 * summon EH.
4944 */
4945 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4946 fill_result_tf(qc);
4947 trace_ata_qc_complete_failed(qc);
4948 ata_qc_schedule_eh(qc);
4949 return;
4950 }
4951
4952 WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN);
4953
4954 /* read result TF if requested */
4955 if (qc->flags & ATA_QCFLAG_RESULT_TF)
4956 fill_result_tf(qc);
4957
4958 trace_ata_qc_complete_done(qc);
4959 /* Some commands need post-processing after successful
4960 * completion.
4961 */
4962 switch (qc->tf.command) {
4963 case ATA_CMD_SET_FEATURES:
4964 if (qc->tf.feature != SETFEATURES_WC_ON &&
4965 qc->tf.feature != SETFEATURES_WC_OFF)
4966 break;
4967 /* fall through */
4968 case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
4969 case ATA_CMD_SET_MULTI: /* multi_count changed */
4970 /* revalidate device */
4971 ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
4972 ata_port_schedule_eh(ap);
4973 break;
4974
4975 case ATA_CMD_SLEEP:
4976 dev->flags |= ATA_DFLAG_SLEEPING;
4977 break;
4978 }
4979
4980 if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
4981 ata_verify_xfer(qc);
4982
4983 __ata_qc_complete(qc);
4984 } else {
4985 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4986 return;
4987
4988 /* read result TF if failed or requested */
4989 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
4990 fill_result_tf(qc);
4991
4992 __ata_qc_complete(qc);
4993 }
4994 }
4995
4996 /**
4997 * ata_qc_complete_multiple - Complete multiple qcs successfully
4998 * @ap: port in question
4999 * @qc_active: new qc_active mask
5000 *
5001 * Complete in-flight commands. This functions is meant to be
5002 * called from low-level driver's interrupt routine to complete
5003 * requests normally. ap->qc_active and @qc_active is compared
5004 * and commands are completed accordingly.
5005 *
5006 * Always use this function when completing multiple NCQ commands
5007 * from IRQ handlers instead of calling ata_qc_complete()
5008 * multiple times to keep IRQ expect status properly in sync.
5009 *
5010 * LOCKING:
5011 * spin_lock_irqsave(host lock)
5012 *
5013 * RETURNS:
5014 * Number of completed commands on success, -errno otherwise.
5015 */
5016 int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active)
5017 {
5018 int nr_done = 0;
5019 u32 done_mask;
5020
5021 done_mask = ap->qc_active ^ qc_active;
5022
5023 if (unlikely(done_mask & qc_active)) {
5024 ata_port_err(ap, "illegal qc_active transition (%08x->%08x)\n",
5025 ap->qc_active, qc_active);
5026 return -EINVAL;
5027 }
5028
5029 while (done_mask) {
5030 struct ata_queued_cmd *qc;
5031 unsigned int tag = __ffs(done_mask);
5032
5033 qc = ata_qc_from_tag(ap, tag);
5034 if (qc) {
5035 ata_qc_complete(qc);
5036 nr_done++;
5037 }
5038 done_mask &= ~(1 << tag);
5039 }
5040
5041 return nr_done;
5042 }
5043
5044 /**
5045 * ata_qc_issue - issue taskfile to device
5046 * @qc: command to issue to device
5047 *
5048 * Prepare an ATA command to submission to device.
5049 * This includes mapping the data into a DMA-able
5050 * area, filling in the S/G table, and finally
5051 * writing the taskfile to hardware, starting the command.
5052 *
5053 * LOCKING:
5054 * spin_lock_irqsave(host lock)
5055 */
5056 void ata_qc_issue(struct ata_queued_cmd *qc)
5057 {
5058 struct ata_port *ap = qc->ap;
5059 struct ata_link *link = qc->dev->link;
5060 u8 prot = qc->tf.protocol;
5061
5062 /* Make sure only one non-NCQ command is outstanding. The
5063 * check is skipped for old EH because it reuses active qc to
5064 * request ATAPI sense.
5065 */
5066 WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag));
5067
5068 if (ata_is_ncq(prot)) {
5069 WARN_ON_ONCE(link->sactive & (1 << qc->tag));
5070
5071 if (!link->sactive)
5072 ap->nr_active_links++;
5073 link->sactive |= 1 << qc->tag;
5074 } else {
5075 WARN_ON_ONCE(link->sactive);
5076
5077 ap->nr_active_links++;
5078 link->active_tag = qc->tag;
5079 }
5080
5081 qc->flags |= ATA_QCFLAG_ACTIVE;
5082 ap->qc_active |= 1 << qc->tag;
5083
5084 /*
5085 * We guarantee to LLDs that they will have at least one
5086 * non-zero sg if the command is a data command.
5087 */
5088 if (WARN_ON_ONCE(ata_is_data(prot) &&
5089 (!qc->sg || !qc->n_elem || !qc->nbytes)))
5090 goto sys_err;
5091
5092 if (ata_is_dma(prot) || (ata_is_pio(prot) &&
5093 (ap->flags & ATA_FLAG_PIO_DMA)))
5094 if (ata_sg_setup(qc))
5095 goto sys_err;
5096
5097 /* if device is sleeping, schedule reset and abort the link */
5098 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
5099 link->eh_info.action |= ATA_EH_RESET;
5100 ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
5101 ata_link_abort(link);
5102 return;
5103 }
5104
5105 ap->ops->qc_prep(qc);
5106 trace_ata_qc_issue(qc);
5107 qc->err_mask |= ap->ops->qc_issue(qc);
5108 if (unlikely(qc->err_mask))
5109 goto err;
5110 return;
5111
5112 sys_err:
5113 qc->err_mask |= AC_ERR_SYSTEM;
5114 err:
5115 ata_qc_complete(qc);
5116 }
5117
5118 /**
5119 * sata_scr_valid - test whether SCRs are accessible
5120 * @link: ATA link to test SCR accessibility for
5121 *
5122 * Test whether SCRs are accessible for @link.
5123 *
5124 * LOCKING:
5125 * None.
5126 *
5127 * RETURNS:
5128 * 1 if SCRs are accessible, 0 otherwise.
5129 */
5130 int sata_scr_valid(struct ata_link *link)
5131 {
5132 struct ata_port *ap = link->ap;
5133
5134 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
5135 }
5136
5137 /**
5138 * sata_scr_read - read SCR register of the specified port
5139 * @link: ATA link to read SCR for
5140 * @reg: SCR to read
5141 * @val: Place to store read value
5142 *
5143 * Read SCR register @reg of @link into *@val. This function is
5144 * guaranteed to succeed if @link is ap->link, the cable type of
5145 * the port is SATA and the port implements ->scr_read.
5146 *
5147 * LOCKING:
5148 * None if @link is ap->link. Kernel thread context otherwise.
5149 *
5150 * RETURNS:
5151 * 0 on success, negative errno on failure.
5152 */
5153 int sata_scr_read(struct ata_link *link, int reg, u32 *val)
5154 {
5155 if (ata_is_host_link(link)) {
5156 if (sata_scr_valid(link))
5157 return link->ap->ops->scr_read(link, reg, val);
5158 return -EOPNOTSUPP;
5159 }
5160
5161 return sata_pmp_scr_read(link, reg, val);
5162 }
5163
5164 /**
5165 * sata_scr_write - write SCR register of the specified port
5166 * @link: ATA link to write SCR for
5167 * @reg: SCR to write
5168 * @val: value to write
5169 *
5170 * Write @val to SCR register @reg of @link. This function is
5171 * guaranteed to succeed if @link is ap->link, the cable type of
5172 * the port is SATA and the port implements ->scr_read.
5173 *
5174 * LOCKING:
5175 * None if @link is ap->link. Kernel thread context otherwise.
5176 *
5177 * RETURNS:
5178 * 0 on success, negative errno on failure.
5179 */
5180 int sata_scr_write(struct ata_link *link, int reg, u32 val)
5181 {
5182 if (ata_is_host_link(link)) {
5183 if (sata_scr_valid(link))
5184 return link->ap->ops->scr_write(link, reg, val);
5185 return -EOPNOTSUPP;
5186 }
5187
5188 return sata_pmp_scr_write(link, reg, val);
5189 }
5190
5191 /**
5192 * sata_scr_write_flush - write SCR register of the specified port and flush
5193 * @link: ATA link to write SCR for
5194 * @reg: SCR to write
5195 * @val: value to write
5196 *
5197 * This function is identical to sata_scr_write() except that this
5198 * function performs flush after writing to the register.
5199 *
5200 * LOCKING:
5201 * None if @link is ap->link. Kernel thread context otherwise.
5202 *
5203 * RETURNS:
5204 * 0 on success, negative errno on failure.
5205 */
5206 int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
5207 {
5208 if (ata_is_host_link(link)) {
5209 int rc;
5210
5211 if (sata_scr_valid(link)) {
5212 rc = link->ap->ops->scr_write(link, reg, val);
5213 if (rc == 0)
5214 rc = link->ap->ops->scr_read(link, reg, &val);
5215 return rc;
5216 }
5217 return -EOPNOTSUPP;
5218 }
5219
5220 return sata_pmp_scr_write(link, reg, val);
5221 }
5222
5223 /**
5224 * ata_phys_link_online - test whether the given link is online
5225 * @link: ATA link to test
5226 *
5227 * Test whether @link is online. Note that this function returns
5228 * 0 if online status of @link cannot be obtained, so
5229 * ata_link_online(link) != !ata_link_offline(link).
5230 *
5231 * LOCKING:
5232 * None.
5233 *
5234 * RETURNS:
5235 * True if the port online status is available and online.
5236 */
5237 bool ata_phys_link_online(struct ata_link *link)
5238 {
5239 u32 sstatus;
5240
5241 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5242 ata_sstatus_online(sstatus))
5243 return true;
5244 return false;
5245 }
5246
5247 /**
5248 * ata_phys_link_offline - test whether the given link is offline
5249 * @link: ATA link to test
5250 *
5251 * Test whether @link is offline. Note that this function
5252 * returns 0 if offline status of @link cannot be obtained, so
5253 * ata_link_online(link) != !ata_link_offline(link).
5254 *
5255 * LOCKING:
5256 * None.
5257 *
5258 * RETURNS:
5259 * True if the port offline status is available and offline.
5260 */
5261 bool ata_phys_link_offline(struct ata_link *link)
5262 {
5263 u32 sstatus;
5264
5265 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5266 !ata_sstatus_online(sstatus))
5267 return true;
5268 return false;
5269 }
5270
5271 /**
5272 * ata_link_online - test whether the given link is online
5273 * @link: ATA link to test
5274 *
5275 * Test whether @link is online. This is identical to
5276 * ata_phys_link_online() when there's no slave link. When
5277 * there's a slave link, this function should only be called on
5278 * the master link and will return true if any of M/S links is
5279 * online.
5280 *
5281 * LOCKING:
5282 * None.
5283 *
5284 * RETURNS:
5285 * True if the port online status is available and online.
5286 */
5287 bool ata_link_online(struct ata_link *link)
5288 {
5289 struct ata_link *slave = link->ap->slave_link;
5290
5291 WARN_ON(link == slave); /* shouldn't be called on slave link */
5292
5293 return ata_phys_link_online(link) ||
5294 (slave && ata_phys_link_online(slave));
5295 }
5296
5297 /**
5298 * ata_link_offline - test whether the given link is offline
5299 * @link: ATA link to test
5300 *
5301 * Test whether @link is offline. This is identical to
5302 * ata_phys_link_offline() when there's no slave link. When
5303 * there's a slave link, this function should only be called on
5304 * the master link and will return true if both M/S links are
5305 * offline.
5306 *
5307 * LOCKING:
5308 * None.
5309 *
5310 * RETURNS:
5311 * True if the port offline status is available and offline.
5312 */
5313 bool ata_link_offline(struct ata_link *link)
5314 {
5315 struct ata_link *slave = link->ap->slave_link;
5316
5317 WARN_ON(link == slave); /* shouldn't be called on slave link */
5318
5319 return ata_phys_link_offline(link) &&
5320 (!slave || ata_phys_link_offline(slave));
5321 }
5322
5323 #ifdef CONFIG_PM
5324 static void ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
5325 unsigned int action, unsigned int ehi_flags,
5326 bool async)
5327 {
5328 struct ata_link *link;
5329 unsigned long flags;
5330
5331 /* Previous resume operation might still be in
5332 * progress. Wait for PM_PENDING to clear.
5333 */
5334 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5335 ata_port_wait_eh(ap);
5336 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5337 }
5338
5339 /* request PM ops to EH */
5340 spin_lock_irqsave(ap->lock, flags);
5341
5342 ap->pm_mesg = mesg;
5343 ap->pflags |= ATA_PFLAG_PM_PENDING;
5344 ata_for_each_link(link, ap, HOST_FIRST) {
5345 link->eh_info.action |= action;
5346 link->eh_info.flags |= ehi_flags;
5347 }
5348
5349 ata_port_schedule_eh(ap);
5350
5351 spin_unlock_irqrestore(ap->lock, flags);
5352
5353 if (!async) {
5354 ata_port_wait_eh(ap);
5355 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5356 }
5357 }
5358
5359 /*
5360 * On some hardware, device fails to respond after spun down for suspend. As
5361 * the device won't be used before being resumed, we don't need to touch the
5362 * device. Ask EH to skip the usual stuff and proceed directly to suspend.
5363 *
5364 * http://thread.gmane.org/gmane.linux.ide/46764
5365 */
5366 static const unsigned int ata_port_suspend_ehi = ATA_EHI_QUIET
5367 | ATA_EHI_NO_AUTOPSY
5368 | ATA_EHI_NO_RECOVERY;
5369
5370 static void ata_port_suspend(struct ata_port *ap, pm_message_t mesg)
5371 {
5372 ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, false);
5373 }
5374
5375 static void ata_port_suspend_async(struct ata_port *ap, pm_message_t mesg)
5376 {
5377 ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, true);
5378 }
5379
5380 static int ata_port_pm_suspend(struct device *dev)
5381 {
5382 struct ata_port *ap = to_ata_port(dev);
5383
5384 if (pm_runtime_suspended(dev))
5385 return 0;
5386
5387 ata_port_suspend(ap, PMSG_SUSPEND);
5388 return 0;
5389 }
5390
5391 static int ata_port_pm_freeze(struct device *dev)
5392 {
5393 struct ata_port *ap = to_ata_port(dev);
5394
5395 if (pm_runtime_suspended(dev))
5396 return 0;
5397
5398 ata_port_suspend(ap, PMSG_FREEZE);
5399 return 0;
5400 }
5401
5402 static int ata_port_pm_poweroff(struct device *dev)
5403 {
5404 ata_port_suspend(to_ata_port(dev), PMSG_HIBERNATE);
5405 return 0;
5406 }
5407
5408 static const unsigned int ata_port_resume_ehi = ATA_EHI_NO_AUTOPSY
5409 | ATA_EHI_QUIET;
5410
5411 static void ata_port_resume(struct ata_port *ap, pm_message_t mesg)
5412 {
5413 ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, false);
5414 }
5415
5416 static void ata_port_resume_async(struct ata_port *ap, pm_message_t mesg)
5417 {
5418 ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, true);
5419 }
5420
5421 static int ata_port_pm_resume(struct device *dev)
5422 {
5423 ata_port_resume_async(to_ata_port(dev), PMSG_RESUME);
5424 pm_runtime_disable(dev);
5425 pm_runtime_set_active(dev);
5426 pm_runtime_enable(dev);
5427 return 0;
5428 }
5429
5430 /*
5431 * For ODDs, the upper layer will poll for media change every few seconds,
5432 * which will make it enter and leave suspend state every few seconds. And
5433 * as each suspend will cause a hard/soft reset, the gain of runtime suspend
5434 * is very little and the ODD may malfunction after constantly being reset.
5435 * So the idle callback here will not proceed to suspend if a non-ZPODD capable
5436 * ODD is attached to the port.
5437 */
5438 static int ata_port_runtime_idle(struct device *dev)
5439 {
5440 struct ata_port *ap = to_ata_port(dev);
5441 struct ata_link *link;
5442 struct ata_device *adev;
5443
5444 ata_for_each_link(link, ap, HOST_FIRST) {
5445 ata_for_each_dev(adev, link, ENABLED)
5446 if (adev->class == ATA_DEV_ATAPI &&
5447 !zpodd_dev_enabled(adev))
5448 return -EBUSY;
5449 }
5450
5451 return 0;
5452 }
5453
5454 static int ata_port_runtime_suspend(struct device *dev)
5455 {
5456 ata_port_suspend(to_ata_port(dev), PMSG_AUTO_SUSPEND);
5457 return 0;
5458 }
5459
5460 static int ata_port_runtime_resume(struct device *dev)
5461 {
5462 ata_port_resume(to_ata_port(dev), PMSG_AUTO_RESUME);
5463 return 0;
5464 }
5465
5466 static const struct dev_pm_ops ata_port_pm_ops = {
5467 .suspend = ata_port_pm_suspend,
5468 .resume = ata_port_pm_resume,
5469 .freeze = ata_port_pm_freeze,
5470 .thaw = ata_port_pm_resume,
5471 .poweroff = ata_port_pm_poweroff,
5472 .restore = ata_port_pm_resume,
5473
5474 .runtime_suspend = ata_port_runtime_suspend,
5475 .runtime_resume = ata_port_runtime_resume,
5476 .runtime_idle = ata_port_runtime_idle,
5477 };
5478
5479 /* sas ports don't participate in pm runtime management of ata_ports,
5480 * and need to resume ata devices at the domain level, not the per-port
5481 * level. sas suspend/resume is async to allow parallel port recovery
5482 * since sas has multiple ata_port instances per Scsi_Host.
5483 */
5484 void ata_sas_port_suspend(struct ata_port *ap)
5485 {
5486 ata_port_suspend_async(ap, PMSG_SUSPEND);
5487 }
5488 EXPORT_SYMBOL_GPL(ata_sas_port_suspend);
5489
5490 void ata_sas_port_resume(struct ata_port *ap)
5491 {
5492 ata_port_resume_async(ap, PMSG_RESUME);
5493 }
5494 EXPORT_SYMBOL_GPL(ata_sas_port_resume);
5495
5496 /**
5497 * ata_host_suspend - suspend host
5498 * @host: host to suspend
5499 * @mesg: PM message
5500 *
5501 * Suspend @host. Actual operation is performed by port suspend.
5502 */
5503 int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5504 {
5505 host->dev->power.power_state = mesg;
5506 return 0;
5507 }
5508
5509 /**
5510 * ata_host_resume - resume host
5511 * @host: host to resume
5512 *
5513 * Resume @host. Actual operation is performed by port resume.
5514 */
5515 void ata_host_resume(struct ata_host *host)
5516 {
5517 host->dev->power.power_state = PMSG_ON;
5518 }
5519 #endif
5520
5521 struct device_type ata_port_type = {
5522 .name = "ata_port",
5523 #ifdef CONFIG_PM
5524 .pm = &ata_port_pm_ops,
5525 #endif
5526 };
5527
5528 /**
5529 * ata_dev_init - Initialize an ata_device structure
5530 * @dev: Device structure to initialize
5531 *
5532 * Initialize @dev in preparation for probing.
5533 *
5534 * LOCKING:
5535 * Inherited from caller.
5536 */
5537 void ata_dev_init(struct ata_device *dev)
5538 {
5539 struct ata_link *link = ata_dev_phys_link(dev);
5540 struct ata_port *ap = link->ap;
5541 unsigned long flags;
5542
5543 /* SATA spd limit is bound to the attached device, reset together */
5544 link->sata_spd_limit = link->hw_sata_spd_limit;
5545 link->sata_spd = 0;
5546
5547 /* High bits of dev->flags are used to record warm plug
5548 * requests which occur asynchronously. Synchronize using
5549 * host lock.
5550 */
5551 spin_lock_irqsave(ap->lock, flags);
5552 dev->flags &= ~ATA_DFLAG_INIT_MASK;
5553 dev->horkage = 0;
5554 spin_unlock_irqrestore(ap->lock, flags);
5555
5556 memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0,
5557 ATA_DEVICE_CLEAR_END - ATA_DEVICE_CLEAR_BEGIN);
5558 dev->pio_mask = UINT_MAX;
5559 dev->mwdma_mask = UINT_MAX;
5560 dev->udma_mask = UINT_MAX;
5561 }
5562
5563 /**
5564 * ata_link_init - Initialize an ata_link structure
5565 * @ap: ATA port link is attached to
5566 * @link: Link structure to initialize
5567 * @pmp: Port multiplier port number
5568 *
5569 * Initialize @link.
5570 *
5571 * LOCKING:
5572 * Kernel thread context (may sleep)
5573 */
5574 void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
5575 {
5576 int i;
5577
5578 /* clear everything except for devices */
5579 memset((void *)link + ATA_LINK_CLEAR_BEGIN, 0,
5580 ATA_LINK_CLEAR_END - ATA_LINK_CLEAR_BEGIN);
5581
5582 link->ap = ap;
5583 link->pmp = pmp;
5584 link->active_tag = ATA_TAG_POISON;
5585 link->hw_sata_spd_limit = UINT_MAX;
5586
5587 /* can't use iterator, ap isn't initialized yet */
5588 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5589 struct ata_device *dev = &link->device[i];
5590
5591 dev->link = link;
5592 dev->devno = dev - link->device;
5593 #ifdef CONFIG_ATA_ACPI
5594 dev->gtf_filter = ata_acpi_gtf_filter;
5595 #endif
5596 ata_dev_init(dev);
5597 }
5598 }
5599
5600 /**
5601 * sata_link_init_spd - Initialize link->sata_spd_limit
5602 * @link: Link to configure sata_spd_limit for
5603 *
5604 * Initialize @link->[hw_]sata_spd_limit to the currently
5605 * configured value.
5606 *
5607 * LOCKING:
5608 * Kernel thread context (may sleep).
5609 *
5610 * RETURNS:
5611 * 0 on success, -errno on failure.
5612 */
5613 int sata_link_init_spd(struct ata_link *link)
5614 {
5615 u8 spd;
5616 int rc;
5617
5618 rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol);
5619 if (rc)
5620 return rc;
5621
5622 spd = (link->saved_scontrol >> 4) & 0xf;
5623 if (spd)
5624 link->hw_sata_spd_limit &= (1 << spd) - 1;
5625
5626 ata_force_link_limits(link);
5627
5628 link->sata_spd_limit = link->hw_sata_spd_limit;
5629
5630 return 0;
5631 }
5632
5633 /**
5634 * ata_port_alloc - allocate and initialize basic ATA port resources
5635 * @host: ATA host this allocated port belongs to
5636 *
5637 * Allocate and initialize basic ATA port resources.
5638 *
5639 * RETURNS:
5640 * Allocate ATA port on success, NULL on failure.
5641 *
5642 * LOCKING:
5643 * Inherited from calling layer (may sleep).
5644 */
5645 struct ata_port *ata_port_alloc(struct ata_host *host)
5646 {
5647 struct ata_port *ap;
5648
5649 DPRINTK("ENTER\n");
5650
5651 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
5652 if (!ap)
5653 return NULL;
5654
5655 ap->pflags |= ATA_PFLAG_INITIALIZING | ATA_PFLAG_FROZEN;
5656 ap->lock = &host->lock;
5657 ap->print_id = -1;
5658 ap->local_port_no = -1;
5659 ap->host = host;
5660 ap->dev = host->dev;
5661
5662 #if defined(ATA_VERBOSE_DEBUG)
5663 /* turn on all debugging levels */
5664 ap->msg_enable = 0x00FF;
5665 #elif defined(ATA_DEBUG)
5666 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
5667 #else
5668 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
5669 #endif
5670
5671 mutex_init(&ap->scsi_scan_mutex);
5672 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5673 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
5674 INIT_LIST_HEAD(&ap->eh_done_q);
5675 init_waitqueue_head(&ap->eh_wait_q);
5676 init_completion(&ap->park_req_pending);
5677 init_timer_deferrable(&ap->fastdrain_timer);
5678 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
5679 ap->fastdrain_timer.data = (unsigned long)ap;
5680
5681 ap->cbl = ATA_CBL_NONE;
5682
5683 ata_link_init(ap, &ap->link, 0);
5684
5685 #ifdef ATA_IRQ_TRAP
5686 ap->stats.unhandled_irq = 1;
5687 ap->stats.idle_irq = 1;
5688 #endif
5689 ata_sff_port_init(ap);
5690
5691 return ap;
5692 }
5693
5694 static void ata_host_release(struct device *gendev, void *res)
5695 {
5696 struct ata_host *host = dev_get_drvdata(gendev);
5697 int i;
5698
5699 for (i = 0; i < host->n_ports; i++) {
5700 struct ata_port *ap = host->ports[i];
5701
5702 if (!ap)
5703 continue;
5704
5705 if (ap->scsi_host)
5706 scsi_host_put(ap->scsi_host);
5707
5708 kfree(ap->pmp_link);
5709 kfree(ap->slave_link);
5710 kfree(ap);
5711 host->ports[i] = NULL;
5712 }
5713
5714 dev_set_drvdata(gendev, NULL);
5715 }
5716
5717 /**
5718 * ata_host_alloc - allocate and init basic ATA host resources
5719 * @dev: generic device this host is associated with
5720 * @max_ports: maximum number of ATA ports associated with this host
5721 *
5722 * Allocate and initialize basic ATA host resources. LLD calls
5723 * this function to allocate a host, initializes it fully and
5724 * attaches it using ata_host_register().
5725 *
5726 * @max_ports ports are allocated and host->n_ports is
5727 * initialized to @max_ports. The caller is allowed to decrease
5728 * host->n_ports before calling ata_host_register(). The unused
5729 * ports will be automatically freed on registration.
5730 *
5731 * RETURNS:
5732 * Allocate ATA host on success, NULL on failure.
5733 *
5734 * LOCKING:
5735 * Inherited from calling layer (may sleep).
5736 */
5737 struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
5738 {
5739 struct ata_host *host;
5740 size_t sz;
5741 int i;
5742
5743 DPRINTK("ENTER\n");
5744
5745 if (!devres_open_group(dev, NULL, GFP_KERNEL))
5746 return NULL;
5747
5748 /* alloc a container for our list of ATA ports (buses) */
5749 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
5750 /* alloc a container for our list of ATA ports (buses) */
5751 host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
5752 if (!host)
5753 goto err_out;
5754
5755 devres_add(dev, host);
5756 dev_set_drvdata(dev, host);
5757
5758 spin_lock_init(&host->lock);
5759 mutex_init(&host->eh_mutex);
5760 host->dev = dev;
5761 host->n_ports = max_ports;
5762
5763 /* allocate ports bound to this host */
5764 for (i = 0; i < max_ports; i++) {
5765 struct ata_port *ap;
5766
5767 ap = ata_port_alloc(host);
5768 if (!ap)
5769 goto err_out;
5770
5771 ap->port_no = i;
5772 host->ports[i] = ap;
5773 }
5774
5775 devres_remove_group(dev, NULL);
5776 return host;
5777
5778 err_out:
5779 devres_release_group(dev, NULL);
5780 return NULL;
5781 }
5782
5783 /**
5784 * ata_host_alloc_pinfo - alloc host and init with port_info array
5785 * @dev: generic device this host is associated with
5786 * @ppi: array of ATA port_info to initialize host with
5787 * @n_ports: number of ATA ports attached to this host
5788 *
5789 * Allocate ATA host and initialize with info from @ppi. If NULL
5790 * terminated, @ppi may contain fewer entries than @n_ports. The
5791 * last entry will be used for the remaining ports.
5792 *
5793 * RETURNS:
5794 * Allocate ATA host on success, NULL on failure.
5795 *
5796 * LOCKING:
5797 * Inherited from calling layer (may sleep).
5798 */
5799 struct ata_host *ata_host_alloc_pinfo(struct device *dev,
5800 const struct ata_port_info * const * ppi,
5801 int n_ports)
5802 {
5803 const struct ata_port_info *pi;
5804 struct ata_host *host;
5805 int i, j;
5806
5807 host = ata_host_alloc(dev, n_ports);
5808 if (!host)
5809 return NULL;
5810
5811 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
5812 struct ata_port *ap = host->ports[i];
5813
5814 if (ppi[j])
5815 pi = ppi[j++];
5816
5817 ap->pio_mask = pi->pio_mask;
5818 ap->mwdma_mask = pi->mwdma_mask;
5819 ap->udma_mask = pi->udma_mask;
5820 ap->flags |= pi->flags;
5821 ap->link.flags |= pi->link_flags;
5822 ap->ops = pi->port_ops;
5823
5824 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
5825 host->ops = pi->port_ops;
5826 }
5827
5828 return host;
5829 }
5830
5831 /**
5832 * ata_slave_link_init - initialize slave link
5833 * @ap: port to initialize slave link for
5834 *
5835 * Create and initialize slave link for @ap. This enables slave
5836 * link handling on the port.
5837 *
5838 * In libata, a port contains links and a link contains devices.
5839 * There is single host link but if a PMP is attached to it,
5840 * there can be multiple fan-out links. On SATA, there's usually
5841 * a single device connected to a link but PATA and SATA
5842 * controllers emulating TF based interface can have two - master
5843 * and slave.
5844 *
5845 * However, there are a few controllers which don't fit into this
5846 * abstraction too well - SATA controllers which emulate TF
5847 * interface with both master and slave devices but also have
5848 * separate SCR register sets for each device. These controllers
5849 * need separate links for physical link handling
5850 * (e.g. onlineness, link speed) but should be treated like a
5851 * traditional M/S controller for everything else (e.g. command
5852 * issue, softreset).
5853 *
5854 * slave_link is libata's way of handling this class of
5855 * controllers without impacting core layer too much. For
5856 * anything other than physical link handling, the default host
5857 * link is used for both master and slave. For physical link
5858 * handling, separate @ap->slave_link is used. All dirty details
5859 * are implemented inside libata core layer. From LLD's POV, the
5860 * only difference is that prereset, hardreset and postreset are
5861 * called once more for the slave link, so the reset sequence
5862 * looks like the following.
5863 *
5864 * prereset(M) -> prereset(S) -> hardreset(M) -> hardreset(S) ->
5865 * softreset(M) -> postreset(M) -> postreset(S)
5866 *
5867 * Note that softreset is called only for the master. Softreset
5868 * resets both M/S by definition, so SRST on master should handle
5869 * both (the standard method will work just fine).
5870 *
5871 * LOCKING:
5872 * Should be called before host is registered.
5873 *
5874 * RETURNS:
5875 * 0 on success, -errno on failure.
5876 */
5877 int ata_slave_link_init(struct ata_port *ap)
5878 {
5879 struct ata_link *link;
5880
5881 WARN_ON(ap->slave_link);
5882 WARN_ON(ap->flags & ATA_FLAG_PMP);
5883
5884 link = kzalloc(sizeof(*link), GFP_KERNEL);
5885 if (!link)
5886 return -ENOMEM;
5887
5888 ata_link_init(ap, link, 1);
5889 ap->slave_link = link;
5890 return 0;
5891 }
5892
5893 static void ata_host_stop(struct device *gendev, void *res)
5894 {
5895 struct ata_host *host = dev_get_drvdata(gendev);
5896 int i;
5897
5898 WARN_ON(!(host->flags & ATA_HOST_STARTED));
5899
5900 for (i = 0; i < host->n_ports; i++) {
5901 struct ata_port *ap = host->ports[i];
5902
5903 if (ap->ops->port_stop)
5904 ap->ops->port_stop(ap);
5905 }
5906
5907 if (host->ops->host_stop)
5908 host->ops->host_stop(host);
5909 }
5910
5911 /**
5912 * ata_finalize_port_ops - finalize ata_port_operations
5913 * @ops: ata_port_operations to finalize
5914 *
5915 * An ata_port_operations can inherit from another ops and that
5916 * ops can again inherit from another. This can go on as many
5917 * times as necessary as long as there is no loop in the
5918 * inheritance chain.
5919 *
5920 * Ops tables are finalized when the host is started. NULL or
5921 * unspecified entries are inherited from the closet ancestor
5922 * which has the method and the entry is populated with it.
5923 * After finalization, the ops table directly points to all the
5924 * methods and ->inherits is no longer necessary and cleared.
5925 *
5926 * Using ATA_OP_NULL, inheriting ops can force a method to NULL.
5927 *
5928 * LOCKING:
5929 * None.
5930 */
5931 static void ata_finalize_port_ops(struct ata_port_operations *ops)
5932 {
5933 static DEFINE_SPINLOCK(lock);
5934 const struct ata_port_operations *cur;
5935 void **begin = (void **)ops;
5936 void **end = (void **)&ops->inherits;
5937 void **pp;
5938
5939 if (!ops || !ops->inherits)
5940 return;
5941
5942 spin_lock(&lock);
5943
5944 for (cur = ops->inherits; cur; cur = cur->inherits) {
5945 void **inherit = (void **)cur;
5946
5947 for (pp = begin; pp < end; pp++, inherit++)
5948 if (!*pp)
5949 *pp = *inherit;
5950 }
5951
5952 for (pp = begin; pp < end; pp++)
5953 if (IS_ERR(*pp))
5954 *pp = NULL;
5955
5956 ops->inherits = NULL;
5957
5958 spin_unlock(&lock);
5959 }
5960
5961 /**
5962 * ata_host_start - start and freeze ports of an ATA host
5963 * @host: ATA host to start ports for
5964 *
5965 * Start and then freeze ports of @host. Started status is
5966 * recorded in host->flags, so this function can be called
5967 * multiple times. Ports are guaranteed to get started only
5968 * once. If host->ops isn't initialized yet, its set to the
5969 * first non-dummy port ops.
5970 *
5971 * LOCKING:
5972 * Inherited from calling layer (may sleep).
5973 *
5974 * RETURNS:
5975 * 0 if all ports are started successfully, -errno otherwise.
5976 */
5977 int ata_host_start(struct ata_host *host)
5978 {
5979 int have_stop = 0;
5980 void *start_dr = NULL;
5981 int i, rc;
5982
5983 if (host->flags & ATA_HOST_STARTED)
5984 return 0;
5985
5986 ata_finalize_port_ops(host->ops);
5987
5988 for (i = 0; i < host->n_ports; i++) {
5989 struct ata_port *ap = host->ports[i];
5990
5991 ata_finalize_port_ops(ap->ops);
5992
5993 if (!host->ops && !ata_port_is_dummy(ap))
5994 host->ops = ap->ops;
5995
5996 if (ap->ops->port_stop)
5997 have_stop = 1;
5998 }
5999
6000 if (host->ops->host_stop)
6001 have_stop = 1;
6002
6003 if (have_stop) {
6004 start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
6005 if (!start_dr)
6006 return -ENOMEM;
6007 }
6008
6009 for (i = 0; i < host->n_ports; i++) {
6010 struct ata_port *ap = host->ports[i];
6011
6012 if (ap->ops->port_start) {
6013 rc = ap->ops->port_start(ap);
6014 if (rc) {
6015 if (rc != -ENODEV)
6016 dev_err(host->dev,
6017 "failed to start port %d (errno=%d)\n",
6018 i, rc);
6019 goto err_out;
6020 }
6021 }
6022 ata_eh_freeze_port(ap);
6023 }
6024
6025 if (start_dr)
6026 devres_add(host->dev, start_dr);
6027 host->flags |= ATA_HOST_STARTED;
6028 return 0;
6029
6030 err_out:
6031 while (--i >= 0) {
6032 struct ata_port *ap = host->ports[i];
6033
6034 if (ap->ops->port_stop)
6035 ap->ops->port_stop(ap);
6036 }
6037 devres_free(start_dr);
6038 return rc;
6039 }
6040
6041 /**
6042 * ata_sas_host_init - Initialize a host struct for sas (ipr, libsas)
6043 * @host: host to initialize
6044 * @dev: device host is attached to
6045 * @ops: port_ops
6046 *
6047 */
6048 void ata_host_init(struct ata_host *host, struct device *dev,
6049 struct ata_port_operations *ops)
6050 {
6051 spin_lock_init(&host->lock);
6052 mutex_init(&host->eh_mutex);
6053 host->n_tags = ATA_MAX_QUEUE - 1;
6054 host->dev = dev;
6055 host->ops = ops;
6056 }
6057
6058 void __ata_port_probe(struct ata_port *ap)
6059 {
6060 struct ata_eh_info *ehi = &ap->link.eh_info;
6061 unsigned long flags;
6062
6063 /* kick EH for boot probing */
6064 spin_lock_irqsave(ap->lock, flags);
6065
6066 ehi->probe_mask |= ATA_ALL_DEVICES;
6067 ehi->action |= ATA_EH_RESET;
6068 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
6069
6070 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
6071 ap->pflags |= ATA_PFLAG_LOADING;
6072 ata_port_schedule_eh(ap);
6073
6074 spin_unlock_irqrestore(ap->lock, flags);
6075 }
6076
6077 int ata_port_probe(struct ata_port *ap)
6078 {
6079 int rc = 0;
6080
6081 if (ap->ops->error_handler) {
6082 __ata_port_probe(ap);
6083 ata_port_wait_eh(ap);
6084 } else {
6085 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
6086 rc = ata_bus_probe(ap);
6087 DPRINTK("ata%u: bus probe end\n", ap->print_id);
6088 }
6089 return rc;
6090 }
6091
6092
6093 static void async_port_probe(void *data, async_cookie_t cookie)
6094 {
6095 struct ata_port *ap = data;
6096
6097 /*
6098 * If we're not allowed to scan this host in parallel,
6099 * we need to wait until all previous scans have completed
6100 * before going further.
6101 * Jeff Garzik says this is only within a controller, so we
6102 * don't need to wait for port 0, only for later ports.
6103 */
6104 if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0)
6105 async_synchronize_cookie(cookie);
6106
6107 (void)ata_port_probe(ap);
6108
6109 /* in order to keep device order, we need to synchronize at this point */
6110 async_synchronize_cookie(cookie);
6111
6112 ata_scsi_scan_host(ap, 1);
6113 }
6114
6115 /**
6116 * ata_host_register - register initialized ATA host
6117 * @host: ATA host to register
6118 * @sht: template for SCSI host
6119 *
6120 * Register initialized ATA host. @host is allocated using
6121 * ata_host_alloc() and fully initialized by LLD. This function
6122 * starts ports, registers @host with ATA and SCSI layers and
6123 * probe registered devices.
6124 *
6125 * LOCKING:
6126 * Inherited from calling layer (may sleep).
6127 *
6128 * RETURNS:
6129 * 0 on success, -errno otherwise.
6130 */
6131 int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6132 {
6133 int i, rc;
6134
6135 host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE - 1);
6136
6137 /* host must have been started */
6138 if (!(host->flags & ATA_HOST_STARTED)) {
6139 dev_err(host->dev, "BUG: trying to register unstarted host\n");
6140 WARN_ON(1);
6141 return -EINVAL;
6142 }
6143
6144 /* Blow away unused ports. This happens when LLD can't
6145 * determine the exact number of ports to allocate at
6146 * allocation time.
6147 */
6148 for (i = host->n_ports; host->ports[i]; i++)
6149 kfree(host->ports[i]);
6150
6151 /* give ports names and add SCSI hosts */
6152 for (i = 0; i < host->n_ports; i++) {
6153 host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
6154 host->ports[i]->local_port_no = i + 1;
6155 }
6156
6157 /* Create associated sysfs transport objects */
6158 for (i = 0; i < host->n_ports; i++) {
6159 rc = ata_tport_add(host->dev,host->ports[i]);
6160 if (rc) {
6161 goto err_tadd;
6162 }
6163 }
6164
6165 rc = ata_scsi_add_hosts(host, sht);
6166 if (rc)
6167 goto err_tadd;
6168
6169 /* set cable, sata_spd_limit and report */
6170 for (i = 0; i < host->n_ports; i++) {
6171 struct ata_port *ap = host->ports[i];
6172 unsigned long xfer_mask;
6173
6174 /* set SATA cable type if still unset */
6175 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
6176 ap->cbl = ATA_CBL_SATA;
6177
6178 /* init sata_spd_limit to the current value */
6179 sata_link_init_spd(&ap->link);
6180 if (ap->slave_link)
6181 sata_link_init_spd(ap->slave_link);
6182
6183 /* print per-port info to dmesg */
6184 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
6185 ap->udma_mask);
6186
6187 if (!ata_port_is_dummy(ap)) {
6188 ata_port_info(ap, "%cATA max %s %s\n",
6189 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
6190 ata_mode_string(xfer_mask),
6191 ap->link.eh_info.desc);
6192 ata_ehi_clear_desc(&ap->link.eh_info);
6193 } else
6194 ata_port_info(ap, "DUMMY\n");
6195 }
6196
6197 /* perform each probe asynchronously */
6198 for (i = 0; i < host->n_ports; i++) {
6199 struct ata_port *ap = host->ports[i];
6200 async_schedule(async_port_probe, ap);
6201 }
6202
6203 return 0;
6204
6205 err_tadd:
6206 while (--i >= 0) {
6207 ata_tport_delete(host->ports[i]);
6208 }
6209 return rc;
6210
6211 }
6212
6213 /**
6214 * ata_host_activate - start host, request IRQ and register it
6215 * @host: target ATA host
6216 * @irq: IRQ to request
6217 * @irq_handler: irq_handler used when requesting IRQ
6218 * @irq_flags: irq_flags used when requesting IRQ
6219 * @sht: scsi_host_template to use when registering the host
6220 *
6221 * After allocating an ATA host and initializing it, most libata
6222 * LLDs perform three steps to activate the host - start host,
6223 * request IRQ and register it. This helper takes necessasry
6224 * arguments and performs the three steps in one go.
6225 *
6226 * An invalid IRQ skips the IRQ registration and expects the host to
6227 * have set polling mode on the port. In this case, @irq_handler
6228 * should be NULL.
6229 *
6230 * LOCKING:
6231 * Inherited from calling layer (may sleep).
6232 *
6233 * RETURNS:
6234 * 0 on success, -errno otherwise.
6235 */
6236 int ata_host_activate(struct ata_host *host, int irq,
6237 irq_handler_t irq_handler, unsigned long irq_flags,
6238 struct scsi_host_template *sht)
6239 {
6240 int i, rc;
6241
6242 rc = ata_host_start(host);
6243 if (rc)
6244 return rc;
6245
6246 /* Special case for polling mode */
6247 if (!irq) {
6248 WARN_ON(irq_handler);
6249 return ata_host_register(host, sht);
6250 }
6251
6252 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
6253 dev_name(host->dev), host);
6254 if (rc)
6255 return rc;
6256
6257 for (i = 0; i < host->n_ports; i++)
6258 ata_port_desc(host->ports[i], "irq %d", irq);
6259
6260 rc = ata_host_register(host, sht);
6261 /* if failed, just free the IRQ and leave ports alone */
6262 if (rc)
6263 devm_free_irq(host->dev, irq, host);
6264
6265 return rc;
6266 }
6267
6268 /**
6269 * ata_port_detach - Detach ATA port in prepration of device removal
6270 * @ap: ATA port to be detached
6271 *
6272 * Detach all ATA devices and the associated SCSI devices of @ap;
6273 * then, remove the associated SCSI host. @ap is guaranteed to
6274 * be quiescent on return from this function.
6275 *
6276 * LOCKING:
6277 * Kernel thread context (may sleep).
6278 */
6279 static void ata_port_detach(struct ata_port *ap)
6280 {
6281 unsigned long flags;
6282 struct ata_link *link;
6283 struct ata_device *dev;
6284
6285 if (!ap->ops->error_handler)
6286 goto skip_eh;
6287
6288 /* tell EH we're leaving & flush EH */
6289 spin_lock_irqsave(ap->lock, flags);
6290 ap->pflags |= ATA_PFLAG_UNLOADING;
6291 ata_port_schedule_eh(ap);
6292 spin_unlock_irqrestore(ap->lock, flags);
6293
6294 /* wait till EH commits suicide */
6295 ata_port_wait_eh(ap);
6296
6297 /* it better be dead now */
6298 WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED));
6299
6300 cancel_delayed_work_sync(&ap->hotplug_task);
6301
6302 skip_eh:
6303 /* clean up zpodd on port removal */
6304 ata_for_each_link(link, ap, HOST_FIRST) {
6305 ata_for_each_dev(dev, link, ALL) {
6306 if (zpodd_dev_enabled(dev))
6307 zpodd_exit(dev);
6308 }
6309 }
6310 if (ap->pmp_link) {
6311 int i;
6312 for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
6313 ata_tlink_delete(&ap->pmp_link[i]);
6314 }
6315 /* remove the associated SCSI host */
6316 scsi_remove_host(ap->scsi_host);
6317 ata_tport_delete(ap);
6318 }
6319
6320 /**
6321 * ata_host_detach - Detach all ports of an ATA host
6322 * @host: Host to detach
6323 *
6324 * Detach all ports of @host.
6325 *
6326 * LOCKING:
6327 * Kernel thread context (may sleep).
6328 */
6329 void ata_host_detach(struct ata_host *host)
6330 {
6331 int i;
6332
6333 for (i = 0; i < host->n_ports; i++)
6334 ata_port_detach(host->ports[i]);
6335
6336 /* the host is dead now, dissociate ACPI */
6337 ata_acpi_dissociate(host);
6338 }
6339
6340 #ifdef CONFIG_PCI
6341
6342 /**
6343 * ata_pci_remove_one - PCI layer callback for device removal
6344 * @pdev: PCI device that was removed
6345 *
6346 * PCI layer indicates to libata via this hook that hot-unplug or
6347 * module unload event has occurred. Detach all ports. Resource
6348 * release is handled via devres.
6349 *
6350 * LOCKING:
6351 * Inherited from PCI layer (may sleep).
6352 */
6353 void ata_pci_remove_one(struct pci_dev *pdev)
6354 {
6355 struct ata_host *host = pci_get_drvdata(pdev);
6356
6357 ata_host_detach(host);
6358 }
6359
6360 /* move to PCI subsystem */
6361 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
6362 {
6363 unsigned long tmp = 0;
6364
6365 switch (bits->width) {
6366 case 1: {
6367 u8 tmp8 = 0;
6368 pci_read_config_byte(pdev, bits->reg, &tmp8);
6369 tmp = tmp8;
6370 break;
6371 }
6372 case 2: {
6373 u16 tmp16 = 0;
6374 pci_read_config_word(pdev, bits->reg, &tmp16);
6375 tmp = tmp16;
6376 break;
6377 }
6378 case 4: {
6379 u32 tmp32 = 0;
6380 pci_read_config_dword(pdev, bits->reg, &tmp32);
6381 tmp = tmp32;
6382 break;
6383 }
6384
6385 default:
6386 return -EINVAL;
6387 }
6388
6389 tmp &= bits->mask;
6390
6391 return (tmp == bits->val) ? 1 : 0;
6392 }
6393
6394 #ifdef CONFIG_PM
6395 void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
6396 {
6397 pci_save_state(pdev);
6398 pci_disable_device(pdev);
6399
6400 if (mesg.event & PM_EVENT_SLEEP)
6401 pci_set_power_state(pdev, PCI_D3hot);
6402 }
6403
6404 int ata_pci_device_do_resume(struct pci_dev *pdev)
6405 {
6406 int rc;
6407
6408 pci_set_power_state(pdev, PCI_D0);
6409 pci_restore_state(pdev);
6410
6411 rc = pcim_enable_device(pdev);
6412 if (rc) {
6413 dev_err(&pdev->dev,
6414 "failed to enable device after resume (%d)\n", rc);
6415 return rc;
6416 }
6417
6418 pci_set_master(pdev);
6419 return 0;
6420 }
6421
6422 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
6423 {
6424 struct ata_host *host = pci_get_drvdata(pdev);
6425 int rc = 0;
6426
6427 rc = ata_host_suspend(host, mesg);
6428 if (rc)
6429 return rc;
6430
6431 ata_pci_device_do_suspend(pdev, mesg);
6432
6433 return 0;
6434 }
6435
6436 int ata_pci_device_resume(struct pci_dev *pdev)
6437 {
6438 struct ata_host *host = pci_get_drvdata(pdev);
6439 int rc;
6440
6441 rc = ata_pci_device_do_resume(pdev);
6442 if (rc == 0)
6443 ata_host_resume(host);
6444 return rc;
6445 }
6446 #endif /* CONFIG_PM */
6447
6448 #endif /* CONFIG_PCI */
6449
6450 /**
6451 * ata_platform_remove_one - Platform layer callback for device removal
6452 * @pdev: Platform device that was removed
6453 *
6454 * Platform layer indicates to libata via this hook that hot-unplug or
6455 * module unload event has occurred. Detach all ports. Resource
6456 * release is handled via devres.
6457 *
6458 * LOCKING:
6459 * Inherited from platform layer (may sleep).
6460 */
6461 int ata_platform_remove_one(struct platform_device *pdev)
6462 {
6463 struct ata_host *host = platform_get_drvdata(pdev);
6464
6465 ata_host_detach(host);
6466
6467 return 0;
6468 }
6469
6470 static int __init ata_parse_force_one(char **cur,
6471 struct ata_force_ent *force_ent,
6472 const char **reason)
6473 {
6474 static const struct ata_force_param force_tbl[] __initconst = {
6475 { "40c", .cbl = ATA_CBL_PATA40 },
6476 { "80c", .cbl = ATA_CBL_PATA80 },
6477 { "short40c", .cbl = ATA_CBL_PATA40_SHORT },
6478 { "unk", .cbl = ATA_CBL_PATA_UNK },
6479 { "ign", .cbl = ATA_CBL_PATA_IGN },
6480 { "sata", .cbl = ATA_CBL_SATA },
6481 { "1.5Gbps", .spd_limit = 1 },
6482 { "3.0Gbps", .spd_limit = 2 },
6483 { "noncq", .horkage_on = ATA_HORKAGE_NONCQ },
6484 { "ncq", .horkage_off = ATA_HORKAGE_NONCQ },
6485 { "noncqtrim", .horkage_on = ATA_HORKAGE_NO_NCQ_TRIM },
6486 { "ncqtrim", .horkage_off = ATA_HORKAGE_NO_NCQ_TRIM },
6487 { "dump_id", .horkage_on = ATA_HORKAGE_DUMP_ID },
6488 { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) },
6489 { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) },
6490 { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) },
6491 { "pio3", .xfer_mask = 1 << (ATA_SHIFT_PIO + 3) },
6492 { "pio4", .xfer_mask = 1 << (ATA_SHIFT_PIO + 4) },
6493 { "pio5", .xfer_mask = 1 << (ATA_SHIFT_PIO + 5) },
6494 { "pio6", .xfer_mask = 1 << (ATA_SHIFT_PIO + 6) },
6495 { "mwdma0", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 0) },
6496 { "mwdma1", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 1) },
6497 { "mwdma2", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 2) },
6498 { "mwdma3", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 3) },
6499 { "mwdma4", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 4) },
6500 { "udma0", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6501 { "udma16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6502 { "udma/16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6503 { "udma1", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6504 { "udma25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6505 { "udma/25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6506 { "udma2", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6507 { "udma33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6508 { "udma/33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6509 { "udma3", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6510 { "udma44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6511 { "udma/44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6512 { "udma4", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6513 { "udma66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6514 { "udma/66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6515 { "udma5", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6516 { "udma100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6517 { "udma/100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6518 { "udma6", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6519 { "udma133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6520 { "udma/133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6521 { "udma7", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 7) },
6522 { "nohrst", .lflags = ATA_LFLAG_NO_HRST },
6523 { "nosrst", .lflags = ATA_LFLAG_NO_SRST },
6524 { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST },
6525 { "rstonce", .lflags = ATA_LFLAG_RST_ONCE },
6526 { "atapi_dmadir", .horkage_on = ATA_HORKAGE_ATAPI_DMADIR },
6527 { "disable", .horkage_on = ATA_HORKAGE_DISABLE },
6528 };
6529 char *start = *cur, *p = *cur;
6530 char *id, *val, *endp;
6531 const struct ata_force_param *match_fp = NULL;
6532 int nr_matches = 0, i;
6533
6534 /* find where this param ends and update *cur */
6535 while (*p != '\0' && *p != ',')
6536 p++;
6537
6538 if (*p == '\0')
6539 *cur = p;
6540 else
6541 *cur = p + 1;
6542
6543 *p = '\0';
6544
6545 /* parse */
6546 p = strchr(start, ':');
6547 if (!p) {
6548 val = strstrip(start);
6549 goto parse_val;
6550 }
6551 *p = '\0';
6552
6553 id = strstrip(start);
6554 val = strstrip(p + 1);
6555
6556 /* parse id */
6557 p = strchr(id, '.');
6558 if (p) {
6559 *p++ = '\0';
6560 force_ent->device = simple_strtoul(p, &endp, 10);
6561 if (p == endp || *endp != '\0') {
6562 *reason = "invalid device";
6563 return -EINVAL;
6564 }
6565 }
6566
6567 force_ent->port = simple_strtoul(id, &endp, 10);
6568 if (p == endp || *endp != '\0') {
6569 *reason = "invalid port/link";
6570 return -EINVAL;
6571 }
6572
6573 parse_val:
6574 /* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */
6575 for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
6576 const struct ata_force_param *fp = &force_tbl[i];
6577
6578 if (strncasecmp(val, fp->name, strlen(val)))
6579 continue;
6580
6581 nr_matches++;
6582 match_fp = fp;
6583
6584 if (strcasecmp(val, fp->name) == 0) {
6585 nr_matches = 1;
6586 break;
6587 }
6588 }
6589
6590 if (!nr_matches) {
6591 *reason = "unknown value";
6592 return -EINVAL;
6593 }
6594 if (nr_matches > 1) {
6595 *reason = "ambigious value";
6596 return -EINVAL;
6597 }
6598
6599 force_ent->param = *match_fp;
6600
6601 return 0;
6602 }
6603
6604 static void __init ata_parse_force_param(void)
6605 {
6606 int idx = 0, size = 1;
6607 int last_port = -1, last_device = -1;
6608 char *p, *cur, *next;
6609
6610 /* calculate maximum number of params and allocate force_tbl */
6611 for (p = ata_force_param_buf; *p; p++)
6612 if (*p == ',')
6613 size++;
6614
6615 ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL);
6616 if (!ata_force_tbl) {
6617 printk(KERN_WARNING "ata: failed to extend force table, "
6618 "libata.force ignored\n");
6619 return;
6620 }
6621
6622 /* parse and populate the table */
6623 for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
6624 const char *reason = "";
6625 struct ata_force_ent te = { .port = -1, .device = -1 };
6626
6627 next = cur;
6628 if (ata_parse_force_one(&next, &te, &reason)) {
6629 printk(KERN_WARNING "ata: failed to parse force "
6630 "parameter \"%s\" (%s)\n",
6631 cur, reason);
6632 continue;
6633 }
6634
6635 if (te.port == -1) {
6636 te.port = last_port;
6637 te.device = last_device;
6638 }
6639
6640 ata_force_tbl[idx++] = te;
6641
6642 last_port = te.port;
6643 last_device = te.device;
6644 }
6645
6646 ata_force_tbl_size = idx;
6647 }
6648
6649 static int __init ata_init(void)
6650 {
6651 int rc;
6652
6653 ata_parse_force_param();
6654
6655 rc = ata_sff_init();
6656 if (rc) {
6657 kfree(ata_force_tbl);
6658 return rc;
6659 }
6660
6661 libata_transport_init();
6662 ata_scsi_transport_template = ata_attach_transport();
6663 if (!ata_scsi_transport_template) {
6664 ata_sff_exit();
6665 rc = -ENOMEM;
6666 goto err_out;
6667 }
6668
6669 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6670 return 0;
6671
6672 err_out:
6673 return rc;
6674 }
6675
6676 static void __exit ata_exit(void)
6677 {
6678 ata_release_transport(ata_scsi_transport_template);
6679 libata_transport_exit();
6680 ata_sff_exit();
6681 kfree(ata_force_tbl);
6682 }
6683
6684 subsys_initcall(ata_init);
6685 module_exit(ata_exit);
6686
6687 static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1);
6688
6689 int ata_ratelimit(void)
6690 {
6691 return __ratelimit(&ratelimit);
6692 }
6693
6694 /**
6695 * ata_msleep - ATA EH owner aware msleep
6696 * @ap: ATA port to attribute the sleep to
6697 * @msecs: duration to sleep in milliseconds
6698 *
6699 * Sleeps @msecs. If the current task is owner of @ap's EH, the
6700 * ownership is released before going to sleep and reacquired
6701 * after the sleep is complete. IOW, other ports sharing the
6702 * @ap->host will be allowed to own the EH while this task is
6703 * sleeping.
6704 *
6705 * LOCKING:
6706 * Might sleep.
6707 */
6708 void ata_msleep(struct ata_port *ap, unsigned int msecs)
6709 {
6710 bool owns_eh = ap && ap->host->eh_owner == current;
6711
6712 if (owns_eh)
6713 ata_eh_release(ap);
6714
6715 msleep(msecs);
6716
6717 if (owns_eh)
6718 ata_eh_acquire(ap);
6719 }
6720
6721 /**
6722 * ata_wait_register - wait until register value changes
6723 * @ap: ATA port to wait register for, can be NULL
6724 * @reg: IO-mapped register
6725 * @mask: Mask to apply to read register value
6726 * @val: Wait condition
6727 * @interval: polling interval in milliseconds
6728 * @timeout: timeout in milliseconds
6729 *
6730 * Waiting for some bits of register to change is a common
6731 * operation for ATA controllers. This function reads 32bit LE
6732 * IO-mapped register @reg and tests for the following condition.
6733 *
6734 * (*@reg & mask) != val
6735 *
6736 * If the condition is met, it returns; otherwise, the process is
6737 * repeated after @interval_msec until timeout.
6738 *
6739 * LOCKING:
6740 * Kernel thread context (may sleep)
6741 *
6742 * RETURNS:
6743 * The final register value.
6744 */
6745 u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val,
6746 unsigned long interval, unsigned long timeout)
6747 {
6748 unsigned long deadline;
6749 u32 tmp;
6750
6751 tmp = ioread32(reg);
6752
6753 /* Calculate timeout _after_ the first read to make sure
6754 * preceding writes reach the controller before starting to
6755 * eat away the timeout.
6756 */
6757 deadline = ata_deadline(jiffies, timeout);
6758
6759 while ((tmp & mask) == val && time_before(jiffies, deadline)) {
6760 ata_msleep(ap, interval);
6761 tmp = ioread32(reg);
6762 }
6763
6764 return tmp;
6765 }
6766
6767 /**
6768 * sata_lpm_ignore_phy_events - test if PHY event should be ignored
6769 * @link: Link receiving the event
6770 *
6771 * Test whether the received PHY event has to be ignored or not.
6772 *
6773 * LOCKING:
6774 * None:
6775 *
6776 * RETURNS:
6777 * True if the event has to be ignored.
6778 */
6779 bool sata_lpm_ignore_phy_events(struct ata_link *link)
6780 {
6781 unsigned long lpm_timeout = link->last_lpm_change +
6782 msecs_to_jiffies(ATA_TMOUT_SPURIOUS_PHY);
6783
6784 /* if LPM is enabled, PHYRDY doesn't mean anything */
6785 if (link->lpm_policy > ATA_LPM_MAX_POWER)
6786 return true;
6787
6788 /* ignore the first PHY event after the LPM policy changed
6789 * as it is might be spurious
6790 */
6791 if ((link->flags & ATA_LFLAG_CHANGED) &&
6792 time_before(jiffies, lpm_timeout))
6793 return true;
6794
6795 return false;
6796 }
6797 EXPORT_SYMBOL_GPL(sata_lpm_ignore_phy_events);
6798
6799 /*
6800 * Dummy port_ops
6801 */
6802 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6803 {
6804 return AC_ERR_SYSTEM;
6805 }
6806
6807 static void ata_dummy_error_handler(struct ata_port *ap)
6808 {
6809 /* truly dummy */
6810 }
6811
6812 struct ata_port_operations ata_dummy_port_ops = {
6813 .qc_prep = ata_noop_qc_prep,
6814 .qc_issue = ata_dummy_qc_issue,
6815 .error_handler = ata_dummy_error_handler,
6816 .sched_eh = ata_std_sched_eh,
6817 .end_eh = ata_std_end_eh,
6818 };
6819
6820 const struct ata_port_info ata_dummy_port_info = {
6821 .port_ops = &ata_dummy_port_ops,
6822 };
6823
6824 /*
6825 * Utility print functions
6826 */
6827 void ata_port_printk(const struct ata_port *ap, const char *level,
6828 const char *fmt, ...)
6829 {
6830 struct va_format vaf;
6831 va_list args;
6832
6833 va_start(args, fmt);
6834
6835 vaf.fmt = fmt;
6836 vaf.va = &args;
6837
6838 printk("%sata%u: %pV", level, ap->print_id, &vaf);
6839
6840 va_end(args);
6841 }
6842 EXPORT_SYMBOL(ata_port_printk);
6843
6844 void ata_link_printk(const struct ata_link *link, const char *level,
6845 const char *fmt, ...)
6846 {
6847 struct va_format vaf;
6848 va_list args;
6849
6850 va_start(args, fmt);
6851
6852 vaf.fmt = fmt;
6853 vaf.va = &args;
6854
6855 if (sata_pmp_attached(link->ap) || link->ap->slave_link)
6856 printk("%sata%u.%02u: %pV",
6857 level, link->ap->print_id, link->pmp, &vaf);
6858 else
6859 printk("%sata%u: %pV",
6860 level, link->ap->print_id, &vaf);
6861
6862 va_end(args);
6863 }
6864 EXPORT_SYMBOL(ata_link_printk);
6865
6866 void ata_dev_printk(const struct ata_device *dev, const char *level,
6867 const char *fmt, ...)
6868 {
6869 struct va_format vaf;
6870 va_list args;
6871
6872 va_start(args, fmt);
6873
6874 vaf.fmt = fmt;
6875 vaf.va = &args;
6876
6877 printk("%sata%u.%02u: %pV",
6878 level, dev->link->ap->print_id, dev->link->pmp + dev->devno,
6879 &vaf);
6880
6881 va_end(args);
6882 }
6883 EXPORT_SYMBOL(ata_dev_printk);
6884
6885 void ata_print_version(const struct device *dev, const char *version)
6886 {
6887 dev_printk(KERN_DEBUG, dev, "version %s\n", version);
6888 }
6889 EXPORT_SYMBOL(ata_print_version);
6890
6891 /*
6892 * libata is essentially a library of internal helper functions for
6893 * low-level ATA host controller drivers. As such, the API/ABI is
6894 * likely to change as new drivers are added and updated.
6895 * Do not depend on ABI/API stability.
6896 */
6897 EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6898 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6899 EXPORT_SYMBOL_GPL(sata_deb_timing_long);
6900 EXPORT_SYMBOL_GPL(ata_base_port_ops);
6901 EXPORT_SYMBOL_GPL(sata_port_ops);
6902 EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
6903 EXPORT_SYMBOL_GPL(ata_dummy_port_info);
6904 EXPORT_SYMBOL_GPL(ata_link_next);
6905 EXPORT_SYMBOL_GPL(ata_dev_next);
6906 EXPORT_SYMBOL_GPL(ata_std_bios_param);
6907 EXPORT_SYMBOL_GPL(ata_scsi_unlock_native_capacity);
6908 EXPORT_SYMBOL_GPL(ata_host_init);
6909 EXPORT_SYMBOL_GPL(ata_host_alloc);
6910 EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
6911 EXPORT_SYMBOL_GPL(ata_slave_link_init);
6912 EXPORT_SYMBOL_GPL(ata_host_start);
6913 EXPORT_SYMBOL_GPL(ata_host_register);
6914 EXPORT_SYMBOL_GPL(ata_host_activate);
6915 EXPORT_SYMBOL_GPL(ata_host_detach);
6916 EXPORT_SYMBOL_GPL(ata_sg_init);
6917 EXPORT_SYMBOL_GPL(ata_qc_complete);
6918 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
6919 EXPORT_SYMBOL_GPL(atapi_cmd_type);
6920 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6921 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6922 EXPORT_SYMBOL_GPL(ata_pack_xfermask);
6923 EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
6924 EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
6925 EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
6926 EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
6927 EXPORT_SYMBOL_GPL(ata_mode_string);
6928 EXPORT_SYMBOL_GPL(ata_id_xfermask);
6929 EXPORT_SYMBOL_GPL(ata_do_set_mode);
6930 EXPORT_SYMBOL_GPL(ata_std_qc_defer);
6931 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
6932 EXPORT_SYMBOL_GPL(ata_dev_disable);
6933 EXPORT_SYMBOL_GPL(sata_set_spd);
6934 EXPORT_SYMBOL_GPL(ata_wait_after_reset);
6935 EXPORT_SYMBOL_GPL(sata_link_debounce);
6936 EXPORT_SYMBOL_GPL(sata_link_resume);
6937 EXPORT_SYMBOL_GPL(sata_link_scr_lpm);
6938 EXPORT_SYMBOL_GPL(ata_std_prereset);
6939 EXPORT_SYMBOL_GPL(sata_link_hardreset);
6940 EXPORT_SYMBOL_GPL(sata_std_hardreset);
6941 EXPORT_SYMBOL_GPL(ata_std_postreset);
6942 EXPORT_SYMBOL_GPL(ata_dev_classify);
6943 EXPORT_SYMBOL_GPL(ata_dev_pair);
6944 EXPORT_SYMBOL_GPL(ata_ratelimit);
6945 EXPORT_SYMBOL_GPL(ata_msleep);
6946 EXPORT_SYMBOL_GPL(ata_wait_register);
6947 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
6948 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
6949 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
6950 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
6951 EXPORT_SYMBOL_GPL(__ata_change_queue_depth);
6952 EXPORT_SYMBOL_GPL(sata_scr_valid);
6953 EXPORT_SYMBOL_GPL(sata_scr_read);
6954 EXPORT_SYMBOL_GPL(sata_scr_write);
6955 EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6956 EXPORT_SYMBOL_GPL(ata_link_online);
6957 EXPORT_SYMBOL_GPL(ata_link_offline);
6958 #ifdef CONFIG_PM
6959 EXPORT_SYMBOL_GPL(ata_host_suspend);
6960 EXPORT_SYMBOL_GPL(ata_host_resume);
6961 #endif /* CONFIG_PM */
6962 EXPORT_SYMBOL_GPL(ata_id_string);
6963 EXPORT_SYMBOL_GPL(ata_id_c_string);
6964 EXPORT_SYMBOL_GPL(ata_do_dev_read_id);
6965 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6966
6967 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
6968 EXPORT_SYMBOL_GPL(ata_timing_find_mode);
6969 EXPORT_SYMBOL_GPL(ata_timing_compute);
6970 EXPORT_SYMBOL_GPL(ata_timing_merge);
6971 EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
6972
6973 #ifdef CONFIG_PCI
6974 EXPORT_SYMBOL_GPL(pci_test_config_bits);
6975 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6976 #ifdef CONFIG_PM
6977 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6978 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
6979 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6980 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6981 #endif /* CONFIG_PM */
6982 #endif /* CONFIG_PCI */
6983
6984 EXPORT_SYMBOL_GPL(ata_platform_remove_one);
6985
6986 EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
6987 EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
6988 EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
6989 EXPORT_SYMBOL_GPL(ata_port_desc);
6990 #ifdef CONFIG_PCI
6991 EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
6992 #endif /* CONFIG_PCI */
6993 EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
6994 EXPORT_SYMBOL_GPL(ata_link_abort);
6995 EXPORT_SYMBOL_GPL(ata_port_abort);
6996 EXPORT_SYMBOL_GPL(ata_port_freeze);
6997 EXPORT_SYMBOL_GPL(sata_async_notification);
6998 EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6999 EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
7000 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
7001 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
7002 EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error);
7003 EXPORT_SYMBOL_GPL(ata_do_eh);
7004 EXPORT_SYMBOL_GPL(ata_std_error_handler);
7005
7006 EXPORT_SYMBOL_GPL(ata_cable_40wire);
7007 EXPORT_SYMBOL_GPL(ata_cable_80wire);
7008 EXPORT_SYMBOL_GPL(ata_cable_unknown);
7009 EXPORT_SYMBOL_GPL(ata_cable_ignore);
7010 EXPORT_SYMBOL_GPL(ata_cable_sata);
This page took 0.263341 seconds and 6 git commands to generate.