libata: add support for ATA_16 on ATAPI
[deliverable/linux.git] / drivers / ata / libata-core.c
1 /*
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
33 */
34
35 #include <linux/kernel.h>
36 #include <linux/module.h>
37 #include <linux/pci.h>
38 #include <linux/init.h>
39 #include <linux/list.h>
40 #include <linux/mm.h>
41 #include <linux/highmem.h>
42 #include <linux/spinlock.h>
43 #include <linux/blkdev.h>
44 #include <linux/delay.h>
45 #include <linux/timer.h>
46 #include <linux/interrupt.h>
47 #include <linux/completion.h>
48 #include <linux/suspend.h>
49 #include <linux/workqueue.h>
50 #include <linux/jiffies.h>
51 #include <linux/scatterlist.h>
52 #include <scsi/scsi.h>
53 #include <scsi/scsi_cmnd.h>
54 #include <scsi/scsi_host.h>
55 #include <linux/libata.h>
56 #include <asm/io.h>
57 #include <asm/semaphore.h>
58 #include <asm/byteorder.h>
59
60 #include "libata.h"
61
62 #define DRV_VERSION "2.21" /* must be exactly four chars */
63
64
65 /* debounce timing parameters in msecs { interval, duration, timeout } */
66 const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
67 const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
68 const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
69
70 static unsigned int ata_dev_init_params(struct ata_device *dev,
71 u16 heads, u16 sectors);
72 static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
73 static void ata_dev_xfermask(struct ata_device *dev);
74 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
75
76 unsigned int ata_print_id = 1;
77 static struct workqueue_struct *ata_wq;
78
79 struct workqueue_struct *ata_aux_wq;
80
81 int atapi_enabled = 1;
82 module_param(atapi_enabled, int, 0444);
83 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
84
85 int atapi_dmadir = 0;
86 module_param(atapi_dmadir, int, 0444);
87 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
88
89 int atapi_passthru16 = 1;
90 module_param(atapi_passthru16, int, 0444);
91 MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)");
92
93 int libata_fua = 0;
94 module_param_named(fua, libata_fua, int, 0444);
95 MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
96
97 static int ata_ignore_hpa = 0;
98 module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
99 MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
100
101 static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
102 module_param(ata_probe_timeout, int, 0444);
103 MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
104
105 int libata_noacpi = 1;
106 module_param_named(noacpi, libata_noacpi, int, 0444);
107 MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in suspend/resume when set");
108
109 MODULE_AUTHOR("Jeff Garzik");
110 MODULE_DESCRIPTION("Library module for ATA devices");
111 MODULE_LICENSE("GPL");
112 MODULE_VERSION(DRV_VERSION);
113
114
115 /**
116 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
117 * @tf: Taskfile to convert
118 * @pmp: Port multiplier port
119 * @is_cmd: This FIS is for command
120 * @fis: Buffer into which data will output
121 *
122 * Converts a standard ATA taskfile to a Serial ATA
123 * FIS structure (Register - Host to Device).
124 *
125 * LOCKING:
126 * Inherited from caller.
127 */
128 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
129 {
130 fis[0] = 0x27; /* Register - Host to Device FIS */
131 fis[1] = pmp & 0xf; /* Port multiplier number*/
132 if (is_cmd)
133 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */
134
135 fis[2] = tf->command;
136 fis[3] = tf->feature;
137
138 fis[4] = tf->lbal;
139 fis[5] = tf->lbam;
140 fis[6] = tf->lbah;
141 fis[7] = tf->device;
142
143 fis[8] = tf->hob_lbal;
144 fis[9] = tf->hob_lbam;
145 fis[10] = tf->hob_lbah;
146 fis[11] = tf->hob_feature;
147
148 fis[12] = tf->nsect;
149 fis[13] = tf->hob_nsect;
150 fis[14] = 0;
151 fis[15] = tf->ctl;
152
153 fis[16] = 0;
154 fis[17] = 0;
155 fis[18] = 0;
156 fis[19] = 0;
157 }
158
159 /**
160 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
161 * @fis: Buffer from which data will be input
162 * @tf: Taskfile to output
163 *
164 * Converts a serial ATA FIS structure to a standard ATA taskfile.
165 *
166 * LOCKING:
167 * Inherited from caller.
168 */
169
170 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
171 {
172 tf->command = fis[2]; /* status */
173 tf->feature = fis[3]; /* error */
174
175 tf->lbal = fis[4];
176 tf->lbam = fis[5];
177 tf->lbah = fis[6];
178 tf->device = fis[7];
179
180 tf->hob_lbal = fis[8];
181 tf->hob_lbam = fis[9];
182 tf->hob_lbah = fis[10];
183
184 tf->nsect = fis[12];
185 tf->hob_nsect = fis[13];
186 }
187
188 static const u8 ata_rw_cmds[] = {
189 /* pio multi */
190 ATA_CMD_READ_MULTI,
191 ATA_CMD_WRITE_MULTI,
192 ATA_CMD_READ_MULTI_EXT,
193 ATA_CMD_WRITE_MULTI_EXT,
194 0,
195 0,
196 0,
197 ATA_CMD_WRITE_MULTI_FUA_EXT,
198 /* pio */
199 ATA_CMD_PIO_READ,
200 ATA_CMD_PIO_WRITE,
201 ATA_CMD_PIO_READ_EXT,
202 ATA_CMD_PIO_WRITE_EXT,
203 0,
204 0,
205 0,
206 0,
207 /* dma */
208 ATA_CMD_READ,
209 ATA_CMD_WRITE,
210 ATA_CMD_READ_EXT,
211 ATA_CMD_WRITE_EXT,
212 0,
213 0,
214 0,
215 ATA_CMD_WRITE_FUA_EXT
216 };
217
218 /**
219 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
220 * @tf: command to examine and configure
221 * @dev: device tf belongs to
222 *
223 * Examine the device configuration and tf->flags to calculate
224 * the proper read/write commands and protocol to use.
225 *
226 * LOCKING:
227 * caller.
228 */
229 static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
230 {
231 u8 cmd;
232
233 int index, fua, lba48, write;
234
235 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
236 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
237 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
238
239 if (dev->flags & ATA_DFLAG_PIO) {
240 tf->protocol = ATA_PROT_PIO;
241 index = dev->multi_count ? 0 : 8;
242 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
243 /* Unable to use DMA due to host limitation */
244 tf->protocol = ATA_PROT_PIO;
245 index = dev->multi_count ? 0 : 8;
246 } else {
247 tf->protocol = ATA_PROT_DMA;
248 index = 16;
249 }
250
251 cmd = ata_rw_cmds[index + fua + lba48 + write];
252 if (cmd) {
253 tf->command = cmd;
254 return 0;
255 }
256 return -1;
257 }
258
259 /**
260 * ata_tf_read_block - Read block address from ATA taskfile
261 * @tf: ATA taskfile of interest
262 * @dev: ATA device @tf belongs to
263 *
264 * LOCKING:
265 * None.
266 *
267 * Read block address from @tf. This function can handle all
268 * three address formats - LBA, LBA48 and CHS. tf->protocol and
269 * flags select the address format to use.
270 *
271 * RETURNS:
272 * Block address read from @tf.
273 */
274 u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
275 {
276 u64 block = 0;
277
278 if (tf->flags & ATA_TFLAG_LBA) {
279 if (tf->flags & ATA_TFLAG_LBA48) {
280 block |= (u64)tf->hob_lbah << 40;
281 block |= (u64)tf->hob_lbam << 32;
282 block |= tf->hob_lbal << 24;
283 } else
284 block |= (tf->device & 0xf) << 24;
285
286 block |= tf->lbah << 16;
287 block |= tf->lbam << 8;
288 block |= tf->lbal;
289 } else {
290 u32 cyl, head, sect;
291
292 cyl = tf->lbam | (tf->lbah << 8);
293 head = tf->device & 0xf;
294 sect = tf->lbal;
295
296 block = (cyl * dev->heads + head) * dev->sectors + sect;
297 }
298
299 return block;
300 }
301
302 /**
303 * ata_build_rw_tf - Build ATA taskfile for given read/write request
304 * @tf: Target ATA taskfile
305 * @dev: ATA device @tf belongs to
306 * @block: Block address
307 * @n_block: Number of blocks
308 * @tf_flags: RW/FUA etc...
309 * @tag: tag
310 *
311 * LOCKING:
312 * None.
313 *
314 * Build ATA taskfile @tf for read/write request described by
315 * @block, @n_block, @tf_flags and @tag on @dev.
316 *
317 * RETURNS:
318 *
319 * 0 on success, -ERANGE if the request is too large for @dev,
320 * -EINVAL if the request is invalid.
321 */
322 int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
323 u64 block, u32 n_block, unsigned int tf_flags,
324 unsigned int tag)
325 {
326 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
327 tf->flags |= tf_flags;
328
329 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
330 /* yay, NCQ */
331 if (!lba_48_ok(block, n_block))
332 return -ERANGE;
333
334 tf->protocol = ATA_PROT_NCQ;
335 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
336
337 if (tf->flags & ATA_TFLAG_WRITE)
338 tf->command = ATA_CMD_FPDMA_WRITE;
339 else
340 tf->command = ATA_CMD_FPDMA_READ;
341
342 tf->nsect = tag << 3;
343 tf->hob_feature = (n_block >> 8) & 0xff;
344 tf->feature = n_block & 0xff;
345
346 tf->hob_lbah = (block >> 40) & 0xff;
347 tf->hob_lbam = (block >> 32) & 0xff;
348 tf->hob_lbal = (block >> 24) & 0xff;
349 tf->lbah = (block >> 16) & 0xff;
350 tf->lbam = (block >> 8) & 0xff;
351 tf->lbal = block & 0xff;
352
353 tf->device = 1 << 6;
354 if (tf->flags & ATA_TFLAG_FUA)
355 tf->device |= 1 << 7;
356 } else if (dev->flags & ATA_DFLAG_LBA) {
357 tf->flags |= ATA_TFLAG_LBA;
358
359 if (lba_28_ok(block, n_block)) {
360 /* use LBA28 */
361 tf->device |= (block >> 24) & 0xf;
362 } else if (lba_48_ok(block, n_block)) {
363 if (!(dev->flags & ATA_DFLAG_LBA48))
364 return -ERANGE;
365
366 /* use LBA48 */
367 tf->flags |= ATA_TFLAG_LBA48;
368
369 tf->hob_nsect = (n_block >> 8) & 0xff;
370
371 tf->hob_lbah = (block >> 40) & 0xff;
372 tf->hob_lbam = (block >> 32) & 0xff;
373 tf->hob_lbal = (block >> 24) & 0xff;
374 } else
375 /* request too large even for LBA48 */
376 return -ERANGE;
377
378 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
379 return -EINVAL;
380
381 tf->nsect = n_block & 0xff;
382
383 tf->lbah = (block >> 16) & 0xff;
384 tf->lbam = (block >> 8) & 0xff;
385 tf->lbal = block & 0xff;
386
387 tf->device |= ATA_LBA;
388 } else {
389 /* CHS */
390 u32 sect, head, cyl, track;
391
392 /* The request -may- be too large for CHS addressing. */
393 if (!lba_28_ok(block, n_block))
394 return -ERANGE;
395
396 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
397 return -EINVAL;
398
399 /* Convert LBA to CHS */
400 track = (u32)block / dev->sectors;
401 cyl = track / dev->heads;
402 head = track % dev->heads;
403 sect = (u32)block % dev->sectors + 1;
404
405 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
406 (u32)block, track, cyl, head, sect);
407
408 /* Check whether the converted CHS can fit.
409 Cylinder: 0-65535
410 Head: 0-15
411 Sector: 1-255*/
412 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
413 return -ERANGE;
414
415 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
416 tf->lbal = sect;
417 tf->lbam = cyl;
418 tf->lbah = cyl >> 8;
419 tf->device |= head;
420 }
421
422 return 0;
423 }
424
425 /**
426 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
427 * @pio_mask: pio_mask
428 * @mwdma_mask: mwdma_mask
429 * @udma_mask: udma_mask
430 *
431 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
432 * unsigned int xfer_mask.
433 *
434 * LOCKING:
435 * None.
436 *
437 * RETURNS:
438 * Packed xfer_mask.
439 */
440 static unsigned int ata_pack_xfermask(unsigned int pio_mask,
441 unsigned int mwdma_mask,
442 unsigned int udma_mask)
443 {
444 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
445 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
446 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
447 }
448
449 /**
450 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
451 * @xfer_mask: xfer_mask to unpack
452 * @pio_mask: resulting pio_mask
453 * @mwdma_mask: resulting mwdma_mask
454 * @udma_mask: resulting udma_mask
455 *
456 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
457 * Any NULL distination masks will be ignored.
458 */
459 static void ata_unpack_xfermask(unsigned int xfer_mask,
460 unsigned int *pio_mask,
461 unsigned int *mwdma_mask,
462 unsigned int *udma_mask)
463 {
464 if (pio_mask)
465 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
466 if (mwdma_mask)
467 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
468 if (udma_mask)
469 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
470 }
471
472 static const struct ata_xfer_ent {
473 int shift, bits;
474 u8 base;
475 } ata_xfer_tbl[] = {
476 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
477 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
478 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
479 { -1, },
480 };
481
482 /**
483 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
484 * @xfer_mask: xfer_mask of interest
485 *
486 * Return matching XFER_* value for @xfer_mask. Only the highest
487 * bit of @xfer_mask is considered.
488 *
489 * LOCKING:
490 * None.
491 *
492 * RETURNS:
493 * Matching XFER_* value, 0 if no match found.
494 */
495 static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
496 {
497 int highbit = fls(xfer_mask) - 1;
498 const struct ata_xfer_ent *ent;
499
500 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
501 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
502 return ent->base + highbit - ent->shift;
503 return 0;
504 }
505
506 /**
507 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
508 * @xfer_mode: XFER_* of interest
509 *
510 * Return matching xfer_mask for @xfer_mode.
511 *
512 * LOCKING:
513 * None.
514 *
515 * RETURNS:
516 * Matching xfer_mask, 0 if no match found.
517 */
518 static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
519 {
520 const struct ata_xfer_ent *ent;
521
522 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
523 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
524 return 1 << (ent->shift + xfer_mode - ent->base);
525 return 0;
526 }
527
528 /**
529 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
530 * @xfer_mode: XFER_* of interest
531 *
532 * Return matching xfer_shift for @xfer_mode.
533 *
534 * LOCKING:
535 * None.
536 *
537 * RETURNS:
538 * Matching xfer_shift, -1 if no match found.
539 */
540 static int ata_xfer_mode2shift(unsigned int xfer_mode)
541 {
542 const struct ata_xfer_ent *ent;
543
544 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
545 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
546 return ent->shift;
547 return -1;
548 }
549
550 /**
551 * ata_mode_string - convert xfer_mask to string
552 * @xfer_mask: mask of bits supported; only highest bit counts.
553 *
554 * Determine string which represents the highest speed
555 * (highest bit in @modemask).
556 *
557 * LOCKING:
558 * None.
559 *
560 * RETURNS:
561 * Constant C string representing highest speed listed in
562 * @mode_mask, or the constant C string "<n/a>".
563 */
564 static const char *ata_mode_string(unsigned int xfer_mask)
565 {
566 static const char * const xfer_mode_str[] = {
567 "PIO0",
568 "PIO1",
569 "PIO2",
570 "PIO3",
571 "PIO4",
572 "PIO5",
573 "PIO6",
574 "MWDMA0",
575 "MWDMA1",
576 "MWDMA2",
577 "MWDMA3",
578 "MWDMA4",
579 "UDMA/16",
580 "UDMA/25",
581 "UDMA/33",
582 "UDMA/44",
583 "UDMA/66",
584 "UDMA/100",
585 "UDMA/133",
586 "UDMA7",
587 };
588 int highbit;
589
590 highbit = fls(xfer_mask) - 1;
591 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
592 return xfer_mode_str[highbit];
593 return "<n/a>";
594 }
595
596 static const char *sata_spd_string(unsigned int spd)
597 {
598 static const char * const spd_str[] = {
599 "1.5 Gbps",
600 "3.0 Gbps",
601 };
602
603 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
604 return "<unknown>";
605 return spd_str[spd - 1];
606 }
607
608 void ata_dev_disable(struct ata_device *dev)
609 {
610 if (ata_dev_enabled(dev)) {
611 if (ata_msg_drv(dev->link->ap))
612 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
613 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
614 ATA_DNXFER_QUIET);
615 dev->class++;
616 }
617 }
618
619 /**
620 * ata_devchk - PATA device presence detection
621 * @ap: ATA channel to examine
622 * @device: Device to examine (starting at zero)
623 *
624 * This technique was originally described in
625 * Hale Landis's ATADRVR (www.ata-atapi.com), and
626 * later found its way into the ATA/ATAPI spec.
627 *
628 * Write a pattern to the ATA shadow registers,
629 * and if a device is present, it will respond by
630 * correctly storing and echoing back the
631 * ATA shadow register contents.
632 *
633 * LOCKING:
634 * caller.
635 */
636
637 static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
638 {
639 struct ata_ioports *ioaddr = &ap->ioaddr;
640 u8 nsect, lbal;
641
642 ap->ops->dev_select(ap, device);
643
644 iowrite8(0x55, ioaddr->nsect_addr);
645 iowrite8(0xaa, ioaddr->lbal_addr);
646
647 iowrite8(0xaa, ioaddr->nsect_addr);
648 iowrite8(0x55, ioaddr->lbal_addr);
649
650 iowrite8(0x55, ioaddr->nsect_addr);
651 iowrite8(0xaa, ioaddr->lbal_addr);
652
653 nsect = ioread8(ioaddr->nsect_addr);
654 lbal = ioread8(ioaddr->lbal_addr);
655
656 if ((nsect == 0x55) && (lbal == 0xaa))
657 return 1; /* we found a device */
658
659 return 0; /* nothing found */
660 }
661
662 /**
663 * ata_dev_classify - determine device type based on ATA-spec signature
664 * @tf: ATA taskfile register set for device to be identified
665 *
666 * Determine from taskfile register contents whether a device is
667 * ATA or ATAPI, as per "Signature and persistence" section
668 * of ATA/PI spec (volume 1, sect 5.14).
669 *
670 * LOCKING:
671 * None.
672 *
673 * RETURNS:
674 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
675 * the event of failure.
676 */
677
678 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
679 {
680 /* Apple's open source Darwin code hints that some devices only
681 * put a proper signature into the LBA mid/high registers,
682 * So, we only check those. It's sufficient for uniqueness.
683 */
684
685 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
686 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
687 DPRINTK("found ATA device by sig\n");
688 return ATA_DEV_ATA;
689 }
690
691 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
692 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
693 DPRINTK("found ATAPI device by sig\n");
694 return ATA_DEV_ATAPI;
695 }
696
697 DPRINTK("unknown device\n");
698 return ATA_DEV_UNKNOWN;
699 }
700
701 /**
702 * ata_dev_try_classify - Parse returned ATA device signature
703 * @ap: ATA channel to examine
704 * @device: Device to examine (starting at zero)
705 * @r_err: Value of error register on completion
706 *
707 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
708 * an ATA/ATAPI-defined set of values is placed in the ATA
709 * shadow registers, indicating the results of device detection
710 * and diagnostics.
711 *
712 * Select the ATA device, and read the values from the ATA shadow
713 * registers. Then parse according to the Error register value,
714 * and the spec-defined values examined by ata_dev_classify().
715 *
716 * LOCKING:
717 * caller.
718 *
719 * RETURNS:
720 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
721 */
722
723 unsigned int
724 ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
725 {
726 struct ata_taskfile tf;
727 unsigned int class;
728 u8 err;
729
730 ap->ops->dev_select(ap, device);
731
732 memset(&tf, 0, sizeof(tf));
733
734 ap->ops->tf_read(ap, &tf);
735 err = tf.feature;
736 if (r_err)
737 *r_err = err;
738
739 /* see if device passed diags: if master then continue and warn later */
740 if (err == 0 && device == 0)
741 /* diagnostic fail : do nothing _YET_ */
742 ap->link.device[device].horkage |= ATA_HORKAGE_DIAGNOSTIC;
743 else if (err == 1)
744 /* do nothing */ ;
745 else if ((device == 0) && (err == 0x81))
746 /* do nothing */ ;
747 else
748 return ATA_DEV_NONE;
749
750 /* determine if device is ATA or ATAPI */
751 class = ata_dev_classify(&tf);
752
753 if (class == ATA_DEV_UNKNOWN)
754 return ATA_DEV_NONE;
755 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
756 return ATA_DEV_NONE;
757 return class;
758 }
759
760 /**
761 * ata_id_string - Convert IDENTIFY DEVICE page into string
762 * @id: IDENTIFY DEVICE results we will examine
763 * @s: string into which data is output
764 * @ofs: offset into identify device page
765 * @len: length of string to return. must be an even number.
766 *
767 * The strings in the IDENTIFY DEVICE page are broken up into
768 * 16-bit chunks. Run through the string, and output each
769 * 8-bit chunk linearly, regardless of platform.
770 *
771 * LOCKING:
772 * caller.
773 */
774
775 void ata_id_string(const u16 *id, unsigned char *s,
776 unsigned int ofs, unsigned int len)
777 {
778 unsigned int c;
779
780 while (len > 0) {
781 c = id[ofs] >> 8;
782 *s = c;
783 s++;
784
785 c = id[ofs] & 0xff;
786 *s = c;
787 s++;
788
789 ofs++;
790 len -= 2;
791 }
792 }
793
794 /**
795 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
796 * @id: IDENTIFY DEVICE results we will examine
797 * @s: string into which data is output
798 * @ofs: offset into identify device page
799 * @len: length of string to return. must be an odd number.
800 *
801 * This function is identical to ata_id_string except that it
802 * trims trailing spaces and terminates the resulting string with
803 * null. @len must be actual maximum length (even number) + 1.
804 *
805 * LOCKING:
806 * caller.
807 */
808 void ata_id_c_string(const u16 *id, unsigned char *s,
809 unsigned int ofs, unsigned int len)
810 {
811 unsigned char *p;
812
813 WARN_ON(!(len & 1));
814
815 ata_id_string(id, s, ofs, len - 1);
816
817 p = s + strnlen(s, len - 1);
818 while (p > s && p[-1] == ' ')
819 p--;
820 *p = '\0';
821 }
822
823 static u64 ata_tf_to_lba48(struct ata_taskfile *tf)
824 {
825 u64 sectors = 0;
826
827 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
828 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
829 sectors |= (tf->hob_lbal & 0xff) << 24;
830 sectors |= (tf->lbah & 0xff) << 16;
831 sectors |= (tf->lbam & 0xff) << 8;
832 sectors |= (tf->lbal & 0xff);
833
834 return ++sectors;
835 }
836
837 static u64 ata_tf_to_lba(struct ata_taskfile *tf)
838 {
839 u64 sectors = 0;
840
841 sectors |= (tf->device & 0x0f) << 24;
842 sectors |= (tf->lbah & 0xff) << 16;
843 sectors |= (tf->lbam & 0xff) << 8;
844 sectors |= (tf->lbal & 0xff);
845
846 return ++sectors;
847 }
848
849 /**
850 * ata_read_native_max_address_ext - LBA48 native max query
851 * @dev: Device to query
852 *
853 * Perform an LBA48 size query upon the device in question. Return the
854 * actual LBA48 size or zero if the command fails.
855 */
856
857 static u64 ata_read_native_max_address_ext(struct ata_device *dev)
858 {
859 unsigned int err;
860 struct ata_taskfile tf;
861
862 ata_tf_init(dev, &tf);
863
864 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
865 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48 | ATA_TFLAG_ISADDR;
866 tf.protocol |= ATA_PROT_NODATA;
867 tf.device |= 0x40;
868
869 err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
870 if (err)
871 return 0;
872
873 return ata_tf_to_lba48(&tf);
874 }
875
876 /**
877 * ata_read_native_max_address - LBA28 native max query
878 * @dev: Device to query
879 *
880 * Performa an LBA28 size query upon the device in question. Return the
881 * actual LBA28 size or zero if the command fails.
882 */
883
884 static u64 ata_read_native_max_address(struct ata_device *dev)
885 {
886 unsigned int err;
887 struct ata_taskfile tf;
888
889 ata_tf_init(dev, &tf);
890
891 tf.command = ATA_CMD_READ_NATIVE_MAX;
892 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
893 tf.protocol |= ATA_PROT_NODATA;
894 tf.device |= 0x40;
895
896 err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
897 if (err)
898 return 0;
899
900 return ata_tf_to_lba(&tf);
901 }
902
903 /**
904 * ata_set_native_max_address_ext - LBA48 native max set
905 * @dev: Device to query
906 * @new_sectors: new max sectors value to set for the device
907 *
908 * Perform an LBA48 size set max upon the device in question. Return the
909 * actual LBA48 size or zero if the command fails.
910 */
911
912 static u64 ata_set_native_max_address_ext(struct ata_device *dev, u64 new_sectors)
913 {
914 unsigned int err;
915 struct ata_taskfile tf;
916
917 new_sectors--;
918
919 ata_tf_init(dev, &tf);
920
921 tf.command = ATA_CMD_SET_MAX_EXT;
922 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48 | ATA_TFLAG_ISADDR;
923 tf.protocol |= ATA_PROT_NODATA;
924 tf.device |= 0x40;
925
926 tf.lbal = (new_sectors >> 0) & 0xff;
927 tf.lbam = (new_sectors >> 8) & 0xff;
928 tf.lbah = (new_sectors >> 16) & 0xff;
929
930 tf.hob_lbal = (new_sectors >> 24) & 0xff;
931 tf.hob_lbam = (new_sectors >> 32) & 0xff;
932 tf.hob_lbah = (new_sectors >> 40) & 0xff;
933
934 err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
935 if (err)
936 return 0;
937
938 return ata_tf_to_lba48(&tf);
939 }
940
941 /**
942 * ata_set_native_max_address - LBA28 native max set
943 * @dev: Device to query
944 * @new_sectors: new max sectors value to set for the device
945 *
946 * Perform an LBA28 size set max upon the device in question. Return the
947 * actual LBA28 size or zero if the command fails.
948 */
949
950 static u64 ata_set_native_max_address(struct ata_device *dev, u64 new_sectors)
951 {
952 unsigned int err;
953 struct ata_taskfile tf;
954
955 new_sectors--;
956
957 ata_tf_init(dev, &tf);
958
959 tf.command = ATA_CMD_SET_MAX;
960 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
961 tf.protocol |= ATA_PROT_NODATA;
962
963 tf.lbal = (new_sectors >> 0) & 0xff;
964 tf.lbam = (new_sectors >> 8) & 0xff;
965 tf.lbah = (new_sectors >> 16) & 0xff;
966 tf.device |= ((new_sectors >> 24) & 0x0f) | 0x40;
967
968 err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
969 if (err)
970 return 0;
971
972 return ata_tf_to_lba(&tf);
973 }
974
975 /**
976 * ata_hpa_resize - Resize a device with an HPA set
977 * @dev: Device to resize
978 *
979 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
980 * it if required to the full size of the media. The caller must check
981 * the drive has the HPA feature set enabled.
982 */
983
984 static u64 ata_hpa_resize(struct ata_device *dev)
985 {
986 u64 sectors = dev->n_sectors;
987 u64 hpa_sectors;
988
989 if (ata_id_has_lba48(dev->id))
990 hpa_sectors = ata_read_native_max_address_ext(dev);
991 else
992 hpa_sectors = ata_read_native_max_address(dev);
993
994 if (hpa_sectors > sectors) {
995 ata_dev_printk(dev, KERN_INFO,
996 "Host Protected Area detected:\n"
997 "\tcurrent size: %lld sectors\n"
998 "\tnative size: %lld sectors\n",
999 (long long)sectors, (long long)hpa_sectors);
1000
1001 if (ata_ignore_hpa) {
1002 if (ata_id_has_lba48(dev->id))
1003 hpa_sectors = ata_set_native_max_address_ext(dev, hpa_sectors);
1004 else
1005 hpa_sectors = ata_set_native_max_address(dev,
1006 hpa_sectors);
1007
1008 if (hpa_sectors) {
1009 ata_dev_printk(dev, KERN_INFO, "native size "
1010 "increased to %lld sectors\n",
1011 (long long)hpa_sectors);
1012 return hpa_sectors;
1013 }
1014 }
1015 } else if (hpa_sectors < sectors)
1016 ata_dev_printk(dev, KERN_WARNING, "%s 1: hpa sectors (%lld) "
1017 "is smaller than sectors (%lld)\n", __FUNCTION__,
1018 (long long)hpa_sectors, (long long)sectors);
1019
1020 return sectors;
1021 }
1022
1023 static u64 ata_id_n_sectors(const u16 *id)
1024 {
1025 if (ata_id_has_lba(id)) {
1026 if (ata_id_has_lba48(id))
1027 return ata_id_u64(id, 100);
1028 else
1029 return ata_id_u32(id, 60);
1030 } else {
1031 if (ata_id_current_chs_valid(id))
1032 return ata_id_u32(id, 57);
1033 else
1034 return id[1] * id[3] * id[6];
1035 }
1036 }
1037
1038 /**
1039 * ata_id_to_dma_mode - Identify DMA mode from id block
1040 * @dev: device to identify
1041 * @unknown: mode to assume if we cannot tell
1042 *
1043 * Set up the timing values for the device based upon the identify
1044 * reported values for the DMA mode. This function is used by drivers
1045 * which rely upon firmware configured modes, but wish to report the
1046 * mode correctly when possible.
1047 *
1048 * In addition we emit similarly formatted messages to the default
1049 * ata_dev_set_mode handler, in order to provide consistency of
1050 * presentation.
1051 */
1052
1053 void ata_id_to_dma_mode(struct ata_device *dev, u8 unknown)
1054 {
1055 unsigned int mask;
1056 u8 mode;
1057
1058 /* Pack the DMA modes */
1059 mask = ((dev->id[63] >> 8) << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA;
1060 if (dev->id[53] & 0x04)
1061 mask |= ((dev->id[88] >> 8) << ATA_SHIFT_UDMA) & ATA_MASK_UDMA;
1062
1063 /* Select the mode in use */
1064 mode = ata_xfer_mask2mode(mask);
1065
1066 if (mode != 0) {
1067 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
1068 ata_mode_string(mask));
1069 } else {
1070 /* SWDMA perhaps ? */
1071 mode = unknown;
1072 ata_dev_printk(dev, KERN_INFO, "configured for DMA\n");
1073 }
1074
1075 /* Configure the device reporting */
1076 dev->xfer_mode = mode;
1077 dev->xfer_shift = ata_xfer_mode2shift(mode);
1078 }
1079
1080 /**
1081 * ata_noop_dev_select - Select device 0/1 on ATA bus
1082 * @ap: ATA channel to manipulate
1083 * @device: ATA device (numbered from zero) to select
1084 *
1085 * This function performs no actual function.
1086 *
1087 * May be used as the dev_select() entry in ata_port_operations.
1088 *
1089 * LOCKING:
1090 * caller.
1091 */
1092 void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
1093 {
1094 }
1095
1096
1097 /**
1098 * ata_std_dev_select - Select device 0/1 on ATA bus
1099 * @ap: ATA channel to manipulate
1100 * @device: ATA device (numbered from zero) to select
1101 *
1102 * Use the method defined in the ATA specification to
1103 * make either device 0, or device 1, active on the
1104 * ATA channel. Works with both PIO and MMIO.
1105 *
1106 * May be used as the dev_select() entry in ata_port_operations.
1107 *
1108 * LOCKING:
1109 * caller.
1110 */
1111
1112 void ata_std_dev_select (struct ata_port *ap, unsigned int device)
1113 {
1114 u8 tmp;
1115
1116 if (device == 0)
1117 tmp = ATA_DEVICE_OBS;
1118 else
1119 tmp = ATA_DEVICE_OBS | ATA_DEV1;
1120
1121 iowrite8(tmp, ap->ioaddr.device_addr);
1122 ata_pause(ap); /* needed; also flushes, for mmio */
1123 }
1124
1125 /**
1126 * ata_dev_select - Select device 0/1 on ATA bus
1127 * @ap: ATA channel to manipulate
1128 * @device: ATA device (numbered from zero) to select
1129 * @wait: non-zero to wait for Status register BSY bit to clear
1130 * @can_sleep: non-zero if context allows sleeping
1131 *
1132 * Use the method defined in the ATA specification to
1133 * make either device 0, or device 1, active on the
1134 * ATA channel.
1135 *
1136 * This is a high-level version of ata_std_dev_select(),
1137 * which additionally provides the services of inserting
1138 * the proper pauses and status polling, where needed.
1139 *
1140 * LOCKING:
1141 * caller.
1142 */
1143
1144 void ata_dev_select(struct ata_port *ap, unsigned int device,
1145 unsigned int wait, unsigned int can_sleep)
1146 {
1147 if (ata_msg_probe(ap))
1148 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
1149 "device %u, wait %u\n", device, wait);
1150
1151 if (wait)
1152 ata_wait_idle(ap);
1153
1154 ap->ops->dev_select(ap, device);
1155
1156 if (wait) {
1157 if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI)
1158 msleep(150);
1159 ata_wait_idle(ap);
1160 }
1161 }
1162
1163 /**
1164 * ata_dump_id - IDENTIFY DEVICE info debugging output
1165 * @id: IDENTIFY DEVICE page to dump
1166 *
1167 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1168 * page.
1169 *
1170 * LOCKING:
1171 * caller.
1172 */
1173
1174 static inline void ata_dump_id(const u16 *id)
1175 {
1176 DPRINTK("49==0x%04x "
1177 "53==0x%04x "
1178 "63==0x%04x "
1179 "64==0x%04x "
1180 "75==0x%04x \n",
1181 id[49],
1182 id[53],
1183 id[63],
1184 id[64],
1185 id[75]);
1186 DPRINTK("80==0x%04x "
1187 "81==0x%04x "
1188 "82==0x%04x "
1189 "83==0x%04x "
1190 "84==0x%04x \n",
1191 id[80],
1192 id[81],
1193 id[82],
1194 id[83],
1195 id[84]);
1196 DPRINTK("88==0x%04x "
1197 "93==0x%04x\n",
1198 id[88],
1199 id[93]);
1200 }
1201
1202 /**
1203 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1204 * @id: IDENTIFY data to compute xfer mask from
1205 *
1206 * Compute the xfermask for this device. This is not as trivial
1207 * as it seems if we must consider early devices correctly.
1208 *
1209 * FIXME: pre IDE drive timing (do we care ?).
1210 *
1211 * LOCKING:
1212 * None.
1213 *
1214 * RETURNS:
1215 * Computed xfermask
1216 */
1217 static unsigned int ata_id_xfermask(const u16 *id)
1218 {
1219 unsigned int pio_mask, mwdma_mask, udma_mask;
1220
1221 /* Usual case. Word 53 indicates word 64 is valid */
1222 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1223 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1224 pio_mask <<= 3;
1225 pio_mask |= 0x7;
1226 } else {
1227 /* If word 64 isn't valid then Word 51 high byte holds
1228 * the PIO timing number for the maximum. Turn it into
1229 * a mask.
1230 */
1231 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
1232 if (mode < 5) /* Valid PIO range */
1233 pio_mask = (2 << mode) - 1;
1234 else
1235 pio_mask = 1;
1236
1237 /* But wait.. there's more. Design your standards by
1238 * committee and you too can get a free iordy field to
1239 * process. However its the speeds not the modes that
1240 * are supported... Note drivers using the timing API
1241 * will get this right anyway
1242 */
1243 }
1244
1245 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1246
1247 if (ata_id_is_cfa(id)) {
1248 /*
1249 * Process compact flash extended modes
1250 */
1251 int pio = id[163] & 0x7;
1252 int dma = (id[163] >> 3) & 7;
1253
1254 if (pio)
1255 pio_mask |= (1 << 5);
1256 if (pio > 1)
1257 pio_mask |= (1 << 6);
1258 if (dma)
1259 mwdma_mask |= (1 << 3);
1260 if (dma > 1)
1261 mwdma_mask |= (1 << 4);
1262 }
1263
1264 udma_mask = 0;
1265 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1266 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1267
1268 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1269 }
1270
1271 /**
1272 * ata_port_queue_task - Queue port_task
1273 * @ap: The ata_port to queue port_task for
1274 * @fn: workqueue function to be scheduled
1275 * @data: data for @fn to use
1276 * @delay: delay time for workqueue function
1277 *
1278 * Schedule @fn(@data) for execution after @delay jiffies using
1279 * port_task. There is one port_task per port and it's the
1280 * user(low level driver)'s responsibility to make sure that only
1281 * one task is active at any given time.
1282 *
1283 * libata core layer takes care of synchronization between
1284 * port_task and EH. ata_port_queue_task() may be ignored for EH
1285 * synchronization.
1286 *
1287 * LOCKING:
1288 * Inherited from caller.
1289 */
1290 void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
1291 unsigned long delay)
1292 {
1293 PREPARE_DELAYED_WORK(&ap->port_task, fn);
1294 ap->port_task_data = data;
1295
1296 /* may fail if ata_port_flush_task() in progress */
1297 queue_delayed_work(ata_wq, &ap->port_task, delay);
1298 }
1299
1300 /**
1301 * ata_port_flush_task - Flush port_task
1302 * @ap: The ata_port to flush port_task for
1303 *
1304 * After this function completes, port_task is guranteed not to
1305 * be running or scheduled.
1306 *
1307 * LOCKING:
1308 * Kernel thread context (may sleep)
1309 */
1310 void ata_port_flush_task(struct ata_port *ap)
1311 {
1312 DPRINTK("ENTER\n");
1313
1314 cancel_rearming_delayed_work(&ap->port_task);
1315
1316 if (ata_msg_ctl(ap))
1317 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
1318 }
1319
1320 static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1321 {
1322 struct completion *waiting = qc->private_data;
1323
1324 complete(waiting);
1325 }
1326
1327 /**
1328 * ata_exec_internal_sg - execute libata internal command
1329 * @dev: Device to which the command is sent
1330 * @tf: Taskfile registers for the command and the result
1331 * @cdb: CDB for packet command
1332 * @dma_dir: Data tranfer direction of the command
1333 * @sg: sg list for the data buffer of the command
1334 * @n_elem: Number of sg entries
1335 *
1336 * Executes libata internal command with timeout. @tf contains
1337 * command on entry and result on return. Timeout and error
1338 * conditions are reported via return value. No recovery action
1339 * is taken after a command times out. It's caller's duty to
1340 * clean up after timeout.
1341 *
1342 * LOCKING:
1343 * None. Should be called with kernel context, might sleep.
1344 *
1345 * RETURNS:
1346 * Zero on success, AC_ERR_* mask on failure
1347 */
1348 unsigned ata_exec_internal_sg(struct ata_device *dev,
1349 struct ata_taskfile *tf, const u8 *cdb,
1350 int dma_dir, struct scatterlist *sg,
1351 unsigned int n_elem)
1352 {
1353 struct ata_link *link = dev->link;
1354 struct ata_port *ap = link->ap;
1355 u8 command = tf->command;
1356 struct ata_queued_cmd *qc;
1357 unsigned int tag, preempted_tag;
1358 u32 preempted_sactive, preempted_qc_active;
1359 DECLARE_COMPLETION_ONSTACK(wait);
1360 unsigned long flags;
1361 unsigned int err_mask;
1362 int rc;
1363
1364 spin_lock_irqsave(ap->lock, flags);
1365
1366 /* no internal command while frozen */
1367 if (ap->pflags & ATA_PFLAG_FROZEN) {
1368 spin_unlock_irqrestore(ap->lock, flags);
1369 return AC_ERR_SYSTEM;
1370 }
1371
1372 /* initialize internal qc */
1373
1374 /* XXX: Tag 0 is used for drivers with legacy EH as some
1375 * drivers choke if any other tag is given. This breaks
1376 * ata_tag_internal() test for those drivers. Don't use new
1377 * EH stuff without converting to it.
1378 */
1379 if (ap->ops->error_handler)
1380 tag = ATA_TAG_INTERNAL;
1381 else
1382 tag = 0;
1383
1384 if (test_and_set_bit(tag, &ap->qc_allocated))
1385 BUG();
1386 qc = __ata_qc_from_tag(ap, tag);
1387
1388 qc->tag = tag;
1389 qc->scsicmd = NULL;
1390 qc->ap = ap;
1391 qc->dev = dev;
1392 ata_qc_reinit(qc);
1393
1394 preempted_tag = link->active_tag;
1395 preempted_sactive = link->sactive;
1396 preempted_qc_active = ap->qc_active;
1397 link->active_tag = ATA_TAG_POISON;
1398 link->sactive = 0;
1399 ap->qc_active = 0;
1400
1401 /* prepare & issue qc */
1402 qc->tf = *tf;
1403 if (cdb)
1404 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1405 qc->flags |= ATA_QCFLAG_RESULT_TF;
1406 qc->dma_dir = dma_dir;
1407 if (dma_dir != DMA_NONE) {
1408 unsigned int i, buflen = 0;
1409
1410 for (i = 0; i < n_elem; i++)
1411 buflen += sg[i].length;
1412
1413 ata_sg_init(qc, sg, n_elem);
1414 qc->nbytes = buflen;
1415 }
1416
1417 qc->private_data = &wait;
1418 qc->complete_fn = ata_qc_complete_internal;
1419
1420 ata_qc_issue(qc);
1421
1422 spin_unlock_irqrestore(ap->lock, flags);
1423
1424 rc = wait_for_completion_timeout(&wait, ata_probe_timeout);
1425
1426 ata_port_flush_task(ap);
1427
1428 if (!rc) {
1429 spin_lock_irqsave(ap->lock, flags);
1430
1431 /* We're racing with irq here. If we lose, the
1432 * following test prevents us from completing the qc
1433 * twice. If we win, the port is frozen and will be
1434 * cleaned up by ->post_internal_cmd().
1435 */
1436 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1437 qc->err_mask |= AC_ERR_TIMEOUT;
1438
1439 if (ap->ops->error_handler)
1440 ata_port_freeze(ap);
1441 else
1442 ata_qc_complete(qc);
1443
1444 if (ata_msg_warn(ap))
1445 ata_dev_printk(dev, KERN_WARNING,
1446 "qc timeout (cmd 0x%x)\n", command);
1447 }
1448
1449 spin_unlock_irqrestore(ap->lock, flags);
1450 }
1451
1452 /* do post_internal_cmd */
1453 if (ap->ops->post_internal_cmd)
1454 ap->ops->post_internal_cmd(qc);
1455
1456 /* perform minimal error analysis */
1457 if (qc->flags & ATA_QCFLAG_FAILED) {
1458 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1459 qc->err_mask |= AC_ERR_DEV;
1460
1461 if (!qc->err_mask)
1462 qc->err_mask |= AC_ERR_OTHER;
1463
1464 if (qc->err_mask & ~AC_ERR_OTHER)
1465 qc->err_mask &= ~AC_ERR_OTHER;
1466 }
1467
1468 /* finish up */
1469 spin_lock_irqsave(ap->lock, flags);
1470
1471 *tf = qc->result_tf;
1472 err_mask = qc->err_mask;
1473
1474 ata_qc_free(qc);
1475 link->active_tag = preempted_tag;
1476 link->sactive = preempted_sactive;
1477 ap->qc_active = preempted_qc_active;
1478
1479 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1480 * Until those drivers are fixed, we detect the condition
1481 * here, fail the command with AC_ERR_SYSTEM and reenable the
1482 * port.
1483 *
1484 * Note that this doesn't change any behavior as internal
1485 * command failure results in disabling the device in the
1486 * higher layer for LLDDs without new reset/EH callbacks.
1487 *
1488 * Kill the following code as soon as those drivers are fixed.
1489 */
1490 if (ap->flags & ATA_FLAG_DISABLED) {
1491 err_mask |= AC_ERR_SYSTEM;
1492 ata_port_probe(ap);
1493 }
1494
1495 spin_unlock_irqrestore(ap->lock, flags);
1496
1497 return err_mask;
1498 }
1499
1500 /**
1501 * ata_exec_internal - execute libata internal command
1502 * @dev: Device to which the command is sent
1503 * @tf: Taskfile registers for the command and the result
1504 * @cdb: CDB for packet command
1505 * @dma_dir: Data tranfer direction of the command
1506 * @buf: Data buffer of the command
1507 * @buflen: Length of data buffer
1508 *
1509 * Wrapper around ata_exec_internal_sg() which takes simple
1510 * buffer instead of sg list.
1511 *
1512 * LOCKING:
1513 * None. Should be called with kernel context, might sleep.
1514 *
1515 * RETURNS:
1516 * Zero on success, AC_ERR_* mask on failure
1517 */
1518 unsigned ata_exec_internal(struct ata_device *dev,
1519 struct ata_taskfile *tf, const u8 *cdb,
1520 int dma_dir, void *buf, unsigned int buflen)
1521 {
1522 struct scatterlist *psg = NULL, sg;
1523 unsigned int n_elem = 0;
1524
1525 if (dma_dir != DMA_NONE) {
1526 WARN_ON(!buf);
1527 sg_init_one(&sg, buf, buflen);
1528 psg = &sg;
1529 n_elem++;
1530 }
1531
1532 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem);
1533 }
1534
1535 /**
1536 * ata_do_simple_cmd - execute simple internal command
1537 * @dev: Device to which the command is sent
1538 * @cmd: Opcode to execute
1539 *
1540 * Execute a 'simple' command, that only consists of the opcode
1541 * 'cmd' itself, without filling any other registers
1542 *
1543 * LOCKING:
1544 * Kernel thread context (may sleep).
1545 *
1546 * RETURNS:
1547 * Zero on success, AC_ERR_* mask on failure
1548 */
1549 unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1550 {
1551 struct ata_taskfile tf;
1552
1553 ata_tf_init(dev, &tf);
1554
1555 tf.command = cmd;
1556 tf.flags |= ATA_TFLAG_DEVICE;
1557 tf.protocol = ATA_PROT_NODATA;
1558
1559 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1560 }
1561
1562 /**
1563 * ata_pio_need_iordy - check if iordy needed
1564 * @adev: ATA device
1565 *
1566 * Check if the current speed of the device requires IORDY. Used
1567 * by various controllers for chip configuration.
1568 */
1569
1570 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1571 {
1572 /* Controller doesn't support IORDY. Probably a pointless check
1573 as the caller should know this */
1574 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1575 return 0;
1576 /* PIO3 and higher it is mandatory */
1577 if (adev->pio_mode > XFER_PIO_2)
1578 return 1;
1579 /* We turn it on when possible */
1580 if (ata_id_has_iordy(adev->id))
1581 return 1;
1582 return 0;
1583 }
1584
1585 /**
1586 * ata_pio_mask_no_iordy - Return the non IORDY mask
1587 * @adev: ATA device
1588 *
1589 * Compute the highest mode possible if we are not using iordy. Return
1590 * -1 if no iordy mode is available.
1591 */
1592
1593 static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1594 {
1595 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1596 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1597 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1598 /* Is the speed faster than the drive allows non IORDY ? */
1599 if (pio) {
1600 /* This is cycle times not frequency - watch the logic! */
1601 if (pio > 240) /* PIO2 is 240nS per cycle */
1602 return 3 << ATA_SHIFT_PIO;
1603 return 7 << ATA_SHIFT_PIO;
1604 }
1605 }
1606 return 3 << ATA_SHIFT_PIO;
1607 }
1608
1609 /**
1610 * ata_dev_read_id - Read ID data from the specified device
1611 * @dev: target device
1612 * @p_class: pointer to class of the target device (may be changed)
1613 * @flags: ATA_READID_* flags
1614 * @id: buffer to read IDENTIFY data into
1615 *
1616 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1617 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1618 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1619 * for pre-ATA4 drives.
1620 *
1621 * LOCKING:
1622 * Kernel thread context (may sleep)
1623 *
1624 * RETURNS:
1625 * 0 on success, -errno otherwise.
1626 */
1627 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1628 unsigned int flags, u16 *id)
1629 {
1630 struct ata_port *ap = dev->link->ap;
1631 unsigned int class = *p_class;
1632 struct ata_taskfile tf;
1633 unsigned int err_mask = 0;
1634 const char *reason;
1635 int may_fallback = 1, tried_spinup = 0;
1636 int rc;
1637
1638 if (ata_msg_ctl(ap))
1639 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1640
1641 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1642 retry:
1643 ata_tf_init(dev, &tf);
1644
1645 switch (class) {
1646 case ATA_DEV_ATA:
1647 tf.command = ATA_CMD_ID_ATA;
1648 break;
1649 case ATA_DEV_ATAPI:
1650 tf.command = ATA_CMD_ID_ATAPI;
1651 break;
1652 default:
1653 rc = -ENODEV;
1654 reason = "unsupported class";
1655 goto err_out;
1656 }
1657
1658 tf.protocol = ATA_PROT_PIO;
1659
1660 /* Some devices choke if TF registers contain garbage. Make
1661 * sure those are properly initialized.
1662 */
1663 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1664
1665 /* Device presence detection is unreliable on some
1666 * controllers. Always poll IDENTIFY if available.
1667 */
1668 tf.flags |= ATA_TFLAG_POLLING;
1669
1670 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
1671 id, sizeof(id[0]) * ATA_ID_WORDS);
1672 if (err_mask) {
1673 if (err_mask & AC_ERR_NODEV_HINT) {
1674 DPRINTK("ata%u.%d: NODEV after polling detection\n",
1675 ap->print_id, dev->devno);
1676 return -ENOENT;
1677 }
1678
1679 /* Device or controller might have reported the wrong
1680 * device class. Give a shot at the other IDENTIFY if
1681 * the current one is aborted by the device.
1682 */
1683 if (may_fallback &&
1684 (err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1685 may_fallback = 0;
1686
1687 if (class == ATA_DEV_ATA)
1688 class = ATA_DEV_ATAPI;
1689 else
1690 class = ATA_DEV_ATA;
1691 goto retry;
1692 }
1693
1694 rc = -EIO;
1695 reason = "I/O error";
1696 goto err_out;
1697 }
1698
1699 /* Falling back doesn't make sense if ID data was read
1700 * successfully at least once.
1701 */
1702 may_fallback = 0;
1703
1704 swap_buf_le16(id, ATA_ID_WORDS);
1705
1706 /* sanity check */
1707 rc = -EINVAL;
1708 reason = "device reports invalid type";
1709
1710 if (class == ATA_DEV_ATA) {
1711 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1712 goto err_out;
1713 } else {
1714 if (ata_id_is_ata(id))
1715 goto err_out;
1716 }
1717
1718 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1719 tried_spinup = 1;
1720 /*
1721 * Drive powered-up in standby mode, and requires a specific
1722 * SET_FEATURES spin-up subcommand before it will accept
1723 * anything other than the original IDENTIFY command.
1724 */
1725 ata_tf_init(dev, &tf);
1726 tf.command = ATA_CMD_SET_FEATURES;
1727 tf.feature = SETFEATURES_SPINUP;
1728 tf.protocol = ATA_PROT_NODATA;
1729 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1730 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1731 if (err_mask && id[2] != 0x738c) {
1732 rc = -EIO;
1733 reason = "SPINUP failed";
1734 goto err_out;
1735 }
1736 /*
1737 * If the drive initially returned incomplete IDENTIFY info,
1738 * we now must reissue the IDENTIFY command.
1739 */
1740 if (id[2] == 0x37c8)
1741 goto retry;
1742 }
1743
1744 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
1745 /*
1746 * The exact sequence expected by certain pre-ATA4 drives is:
1747 * SRST RESET
1748 * IDENTIFY
1749 * INITIALIZE DEVICE PARAMETERS
1750 * anything else..
1751 * Some drives were very specific about that exact sequence.
1752 */
1753 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1754 err_mask = ata_dev_init_params(dev, id[3], id[6]);
1755 if (err_mask) {
1756 rc = -EIO;
1757 reason = "INIT_DEV_PARAMS failed";
1758 goto err_out;
1759 }
1760
1761 /* current CHS translation info (id[53-58]) might be
1762 * changed. reread the identify device info.
1763 */
1764 flags &= ~ATA_READID_POSTRESET;
1765 goto retry;
1766 }
1767 }
1768
1769 *p_class = class;
1770
1771 return 0;
1772
1773 err_out:
1774 if (ata_msg_warn(ap))
1775 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
1776 "(%s, err_mask=0x%x)\n", reason, err_mask);
1777 return rc;
1778 }
1779
1780 static inline u8 ata_dev_knobble(struct ata_device *dev)
1781 {
1782 struct ata_port *ap = dev->link->ap;
1783 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
1784 }
1785
1786 static void ata_dev_config_ncq(struct ata_device *dev,
1787 char *desc, size_t desc_sz)
1788 {
1789 struct ata_port *ap = dev->link->ap;
1790 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
1791
1792 if (!ata_id_has_ncq(dev->id)) {
1793 desc[0] = '\0';
1794 return;
1795 }
1796 if (dev->horkage & ATA_HORKAGE_NONCQ) {
1797 snprintf(desc, desc_sz, "NCQ (not used)");
1798 return;
1799 }
1800 if (ap->flags & ATA_FLAG_NCQ) {
1801 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
1802 dev->flags |= ATA_DFLAG_NCQ;
1803 }
1804
1805 if (hdepth >= ddepth)
1806 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
1807 else
1808 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
1809 }
1810
1811 /**
1812 * ata_dev_configure - Configure the specified ATA/ATAPI device
1813 * @dev: Target device to configure
1814 *
1815 * Configure @dev according to @dev->id. Generic and low-level
1816 * driver specific fixups are also applied.
1817 *
1818 * LOCKING:
1819 * Kernel thread context (may sleep)
1820 *
1821 * RETURNS:
1822 * 0 on success, -errno otherwise
1823 */
1824 int ata_dev_configure(struct ata_device *dev)
1825 {
1826 struct ata_port *ap = dev->link->ap;
1827 struct ata_eh_context *ehc = &dev->link->eh_context;
1828 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1829 const u16 *id = dev->id;
1830 unsigned int xfer_mask;
1831 char revbuf[7]; /* XYZ-99\0 */
1832 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
1833 char modelbuf[ATA_ID_PROD_LEN+1];
1834 int rc;
1835
1836 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
1837 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
1838 __FUNCTION__);
1839 return 0;
1840 }
1841
1842 if (ata_msg_probe(ap))
1843 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1844
1845 /* set horkage */
1846 dev->horkage |= ata_dev_blacklisted(dev);
1847
1848 /* let ACPI work its magic */
1849 rc = ata_acpi_on_devcfg(dev);
1850 if (rc)
1851 return rc;
1852
1853 /* print device capabilities */
1854 if (ata_msg_probe(ap))
1855 ata_dev_printk(dev, KERN_DEBUG,
1856 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
1857 "85:%04x 86:%04x 87:%04x 88:%04x\n",
1858 __FUNCTION__,
1859 id[49], id[82], id[83], id[84],
1860 id[85], id[86], id[87], id[88]);
1861
1862 /* initialize to-be-configured parameters */
1863 dev->flags &= ~ATA_DFLAG_CFG_MASK;
1864 dev->max_sectors = 0;
1865 dev->cdb_len = 0;
1866 dev->n_sectors = 0;
1867 dev->cylinders = 0;
1868 dev->heads = 0;
1869 dev->sectors = 0;
1870
1871 /*
1872 * common ATA, ATAPI feature tests
1873 */
1874
1875 /* find max transfer mode; for printk only */
1876 xfer_mask = ata_id_xfermask(id);
1877
1878 if (ata_msg_probe(ap))
1879 ata_dump_id(id);
1880
1881 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
1882 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
1883 sizeof(fwrevbuf));
1884
1885 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
1886 sizeof(modelbuf));
1887
1888 /* ATA-specific feature tests */
1889 if (dev->class == ATA_DEV_ATA) {
1890 if (ata_id_is_cfa(id)) {
1891 if (id[162] & 1) /* CPRM may make this media unusable */
1892 ata_dev_printk(dev, KERN_WARNING,
1893 "supports DRM functions and may "
1894 "not be fully accessable.\n");
1895 snprintf(revbuf, 7, "CFA");
1896 }
1897 else
1898 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
1899
1900 dev->n_sectors = ata_id_n_sectors(id);
1901
1902 if (dev->id[59] & 0x100)
1903 dev->multi_count = dev->id[59] & 0xff;
1904
1905 if (ata_id_has_lba(id)) {
1906 const char *lba_desc;
1907 char ncq_desc[20];
1908
1909 lba_desc = "LBA";
1910 dev->flags |= ATA_DFLAG_LBA;
1911 if (ata_id_has_lba48(id)) {
1912 dev->flags |= ATA_DFLAG_LBA48;
1913 lba_desc = "LBA48";
1914
1915 if (dev->n_sectors >= (1UL << 28) &&
1916 ata_id_has_flush_ext(id))
1917 dev->flags |= ATA_DFLAG_FLUSH_EXT;
1918 }
1919
1920 if (!(dev->horkage & ATA_HORKAGE_BROKEN_HPA) &&
1921 ata_id_hpa_enabled(dev->id))
1922 dev->n_sectors = ata_hpa_resize(dev);
1923
1924 /* config NCQ */
1925 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
1926
1927 /* print device info to dmesg */
1928 if (ata_msg_drv(ap) && print_info) {
1929 ata_dev_printk(dev, KERN_INFO,
1930 "%s: %s, %s, max %s\n",
1931 revbuf, modelbuf, fwrevbuf,
1932 ata_mode_string(xfer_mask));
1933 ata_dev_printk(dev, KERN_INFO,
1934 "%Lu sectors, multi %u: %s %s\n",
1935 (unsigned long long)dev->n_sectors,
1936 dev->multi_count, lba_desc, ncq_desc);
1937 }
1938 } else {
1939 /* CHS */
1940
1941 /* Default translation */
1942 dev->cylinders = id[1];
1943 dev->heads = id[3];
1944 dev->sectors = id[6];
1945
1946 if (ata_id_current_chs_valid(id)) {
1947 /* Current CHS translation is valid. */
1948 dev->cylinders = id[54];
1949 dev->heads = id[55];
1950 dev->sectors = id[56];
1951 }
1952
1953 /* print device info to dmesg */
1954 if (ata_msg_drv(ap) && print_info) {
1955 ata_dev_printk(dev, KERN_INFO,
1956 "%s: %s, %s, max %s\n",
1957 revbuf, modelbuf, fwrevbuf,
1958 ata_mode_string(xfer_mask));
1959 ata_dev_printk(dev, KERN_INFO,
1960 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
1961 (unsigned long long)dev->n_sectors,
1962 dev->multi_count, dev->cylinders,
1963 dev->heads, dev->sectors);
1964 }
1965 }
1966
1967 dev->cdb_len = 16;
1968 }
1969
1970 /* ATAPI-specific feature tests */
1971 else if (dev->class == ATA_DEV_ATAPI) {
1972 char *cdb_intr_string = "";
1973
1974 rc = atapi_cdb_len(id);
1975 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1976 if (ata_msg_warn(ap))
1977 ata_dev_printk(dev, KERN_WARNING,
1978 "unsupported CDB len\n");
1979 rc = -EINVAL;
1980 goto err_out_nosup;
1981 }
1982 dev->cdb_len = (unsigned int) rc;
1983
1984 if (ata_id_cdb_intr(dev->id)) {
1985 dev->flags |= ATA_DFLAG_CDB_INTR;
1986 cdb_intr_string = ", CDB intr";
1987 }
1988
1989 /* print device info to dmesg */
1990 if (ata_msg_drv(ap) && print_info)
1991 ata_dev_printk(dev, KERN_INFO,
1992 "ATAPI: %s, %s, max %s%s\n",
1993 modelbuf, fwrevbuf,
1994 ata_mode_string(xfer_mask),
1995 cdb_intr_string);
1996 }
1997
1998 /* determine max_sectors */
1999 dev->max_sectors = ATA_MAX_SECTORS;
2000 if (dev->flags & ATA_DFLAG_LBA48)
2001 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2002
2003 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2004 /* Let the user know. We don't want to disallow opens for
2005 rescue purposes, or in case the vendor is just a blithering
2006 idiot */
2007 if (print_info) {
2008 ata_dev_printk(dev, KERN_WARNING,
2009 "Drive reports diagnostics failure. This may indicate a drive\n");
2010 ata_dev_printk(dev, KERN_WARNING,
2011 "fault or invalid emulation. Contact drive vendor for information.\n");
2012 }
2013 }
2014
2015 /* limit bridge transfers to udma5, 200 sectors */
2016 if (ata_dev_knobble(dev)) {
2017 if (ata_msg_drv(ap) && print_info)
2018 ata_dev_printk(dev, KERN_INFO,
2019 "applying bridge limits\n");
2020 dev->udma_mask &= ATA_UDMA5;
2021 dev->max_sectors = ATA_MAX_SECTORS;
2022 }
2023
2024 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
2025 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2026 dev->max_sectors);
2027
2028 if (ap->ops->dev_config)
2029 ap->ops->dev_config(dev);
2030
2031 if (ata_msg_probe(ap))
2032 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
2033 __FUNCTION__, ata_chk_status(ap));
2034 return 0;
2035
2036 err_out_nosup:
2037 if (ata_msg_probe(ap))
2038 ata_dev_printk(dev, KERN_DEBUG,
2039 "%s: EXIT, err\n", __FUNCTION__);
2040 return rc;
2041 }
2042
2043 /**
2044 * ata_cable_40wire - return 40 wire cable type
2045 * @ap: port
2046 *
2047 * Helper method for drivers which want to hardwire 40 wire cable
2048 * detection.
2049 */
2050
2051 int ata_cable_40wire(struct ata_port *ap)
2052 {
2053 return ATA_CBL_PATA40;
2054 }
2055
2056 /**
2057 * ata_cable_80wire - return 80 wire cable type
2058 * @ap: port
2059 *
2060 * Helper method for drivers which want to hardwire 80 wire cable
2061 * detection.
2062 */
2063
2064 int ata_cable_80wire(struct ata_port *ap)
2065 {
2066 return ATA_CBL_PATA80;
2067 }
2068
2069 /**
2070 * ata_cable_unknown - return unknown PATA cable.
2071 * @ap: port
2072 *
2073 * Helper method for drivers which have no PATA cable detection.
2074 */
2075
2076 int ata_cable_unknown(struct ata_port *ap)
2077 {
2078 return ATA_CBL_PATA_UNK;
2079 }
2080
2081 /**
2082 * ata_cable_sata - return SATA cable type
2083 * @ap: port
2084 *
2085 * Helper method for drivers which have SATA cables
2086 */
2087
2088 int ata_cable_sata(struct ata_port *ap)
2089 {
2090 return ATA_CBL_SATA;
2091 }
2092
2093 /**
2094 * ata_bus_probe - Reset and probe ATA bus
2095 * @ap: Bus to probe
2096 *
2097 * Master ATA bus probing function. Initiates a hardware-dependent
2098 * bus reset, then attempts to identify any devices found on
2099 * the bus.
2100 *
2101 * LOCKING:
2102 * PCI/etc. bus probe sem.
2103 *
2104 * RETURNS:
2105 * Zero on success, negative errno otherwise.
2106 */
2107
2108 int ata_bus_probe(struct ata_port *ap)
2109 {
2110 unsigned int classes[ATA_MAX_DEVICES];
2111 int tries[ATA_MAX_DEVICES];
2112 int rc;
2113 struct ata_device *dev;
2114
2115 ata_port_probe(ap);
2116
2117 ata_link_for_each_dev(dev, &ap->link)
2118 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2119
2120 retry:
2121 /* reset and determine device classes */
2122 ap->ops->phy_reset(ap);
2123
2124 ata_link_for_each_dev(dev, &ap->link) {
2125 if (!(ap->flags & ATA_FLAG_DISABLED) &&
2126 dev->class != ATA_DEV_UNKNOWN)
2127 classes[dev->devno] = dev->class;
2128 else
2129 classes[dev->devno] = ATA_DEV_NONE;
2130
2131 dev->class = ATA_DEV_UNKNOWN;
2132 }
2133
2134 ata_port_probe(ap);
2135
2136 /* after the reset the device state is PIO 0 and the controller
2137 state is undefined. Record the mode */
2138
2139 ata_link_for_each_dev(dev, &ap->link)
2140 dev->pio_mode = XFER_PIO_0;
2141
2142 /* read IDENTIFY page and configure devices. We have to do the identify
2143 specific sequence bass-ackwards so that PDIAG- is released by
2144 the slave device */
2145
2146 ata_link_for_each_dev(dev, &ap->link) {
2147 if (tries[dev->devno])
2148 dev->class = classes[dev->devno];
2149
2150 if (!ata_dev_enabled(dev))
2151 continue;
2152
2153 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2154 dev->id);
2155 if (rc)
2156 goto fail;
2157 }
2158
2159 /* Now ask for the cable type as PDIAG- should have been released */
2160 if (ap->ops->cable_detect)
2161 ap->cbl = ap->ops->cable_detect(ap);
2162
2163 /* After the identify sequence we can now set up the devices. We do
2164 this in the normal order so that the user doesn't get confused */
2165
2166 ata_link_for_each_dev(dev, &ap->link) {
2167 if (!ata_dev_enabled(dev))
2168 continue;
2169
2170 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
2171 rc = ata_dev_configure(dev);
2172 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2173 if (rc)
2174 goto fail;
2175 }
2176
2177 /* configure transfer mode */
2178 rc = ata_set_mode(&ap->link, &dev);
2179 if (rc)
2180 goto fail;
2181
2182 ata_link_for_each_dev(dev, &ap->link)
2183 if (ata_dev_enabled(dev))
2184 return 0;
2185
2186 /* no device present, disable port */
2187 ata_port_disable(ap);
2188 ap->ops->port_disable(ap);
2189 return -ENODEV;
2190
2191 fail:
2192 tries[dev->devno]--;
2193
2194 switch (rc) {
2195 case -EINVAL:
2196 /* eeek, something went very wrong, give up */
2197 tries[dev->devno] = 0;
2198 break;
2199
2200 case -ENODEV:
2201 /* give it just one more chance */
2202 tries[dev->devno] = min(tries[dev->devno], 1);
2203 case -EIO:
2204 if (tries[dev->devno] == 1) {
2205 /* This is the last chance, better to slow
2206 * down than lose it.
2207 */
2208 sata_down_spd_limit(&ap->link);
2209 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2210 }
2211 }
2212
2213 if (!tries[dev->devno])
2214 ata_dev_disable(dev);
2215
2216 goto retry;
2217 }
2218
2219 /**
2220 * ata_port_probe - Mark port as enabled
2221 * @ap: Port for which we indicate enablement
2222 *
2223 * Modify @ap data structure such that the system
2224 * thinks that the entire port is enabled.
2225 *
2226 * LOCKING: host lock, or some other form of
2227 * serialization.
2228 */
2229
2230 void ata_port_probe(struct ata_port *ap)
2231 {
2232 ap->flags &= ~ATA_FLAG_DISABLED;
2233 }
2234
2235 /**
2236 * sata_print_link_status - Print SATA link status
2237 * @link: SATA link to printk link status about
2238 *
2239 * This function prints link speed and status of a SATA link.
2240 *
2241 * LOCKING:
2242 * None.
2243 */
2244 void sata_print_link_status(struct ata_link *link)
2245 {
2246 u32 sstatus, scontrol, tmp;
2247
2248 if (sata_scr_read(link, SCR_STATUS, &sstatus))
2249 return;
2250 sata_scr_read(link, SCR_CONTROL, &scontrol);
2251
2252 if (ata_link_online(link)) {
2253 tmp = (sstatus >> 4) & 0xf;
2254 ata_link_printk(link, KERN_INFO,
2255 "SATA link up %s (SStatus %X SControl %X)\n",
2256 sata_spd_string(tmp), sstatus, scontrol);
2257 } else {
2258 ata_link_printk(link, KERN_INFO,
2259 "SATA link down (SStatus %X SControl %X)\n",
2260 sstatus, scontrol);
2261 }
2262 }
2263
2264 /**
2265 * __sata_phy_reset - Wake/reset a low-level SATA PHY
2266 * @ap: SATA port associated with target SATA PHY.
2267 *
2268 * This function issues commands to standard SATA Sxxx
2269 * PHY registers, to wake up the phy (and device), and
2270 * clear any reset condition.
2271 *
2272 * LOCKING:
2273 * PCI/etc. bus probe sem.
2274 *
2275 */
2276 void __sata_phy_reset(struct ata_port *ap)
2277 {
2278 struct ata_link *link = &ap->link;
2279 unsigned long timeout = jiffies + (HZ * 5);
2280 u32 sstatus;
2281
2282 if (ap->flags & ATA_FLAG_SATA_RESET) {
2283 /* issue phy wake/reset */
2284 sata_scr_write_flush(link, SCR_CONTROL, 0x301);
2285 /* Couldn't find anything in SATA I/II specs, but
2286 * AHCI-1.1 10.4.2 says at least 1 ms. */
2287 mdelay(1);
2288 }
2289 /* phy wake/clear reset */
2290 sata_scr_write_flush(link, SCR_CONTROL, 0x300);
2291
2292 /* wait for phy to become ready, if necessary */
2293 do {
2294 msleep(200);
2295 sata_scr_read(link, SCR_STATUS, &sstatus);
2296 if ((sstatus & 0xf) != 1)
2297 break;
2298 } while (time_before(jiffies, timeout));
2299
2300 /* print link status */
2301 sata_print_link_status(link);
2302
2303 /* TODO: phy layer with polling, timeouts, etc. */
2304 if (!ata_link_offline(link))
2305 ata_port_probe(ap);
2306 else
2307 ata_port_disable(ap);
2308
2309 if (ap->flags & ATA_FLAG_DISABLED)
2310 return;
2311
2312 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2313 ata_port_disable(ap);
2314 return;
2315 }
2316
2317 ap->cbl = ATA_CBL_SATA;
2318 }
2319
2320 /**
2321 * sata_phy_reset - Reset SATA bus.
2322 * @ap: SATA port associated with target SATA PHY.
2323 *
2324 * This function resets the SATA bus, and then probes
2325 * the bus for devices.
2326 *
2327 * LOCKING:
2328 * PCI/etc. bus probe sem.
2329 *
2330 */
2331 void sata_phy_reset(struct ata_port *ap)
2332 {
2333 __sata_phy_reset(ap);
2334 if (ap->flags & ATA_FLAG_DISABLED)
2335 return;
2336 ata_bus_reset(ap);
2337 }
2338
2339 /**
2340 * ata_dev_pair - return other device on cable
2341 * @adev: device
2342 *
2343 * Obtain the other device on the same cable, or if none is
2344 * present NULL is returned
2345 */
2346
2347 struct ata_device *ata_dev_pair(struct ata_device *adev)
2348 {
2349 struct ata_link *link = adev->link;
2350 struct ata_device *pair = &link->device[1 - adev->devno];
2351 if (!ata_dev_enabled(pair))
2352 return NULL;
2353 return pair;
2354 }
2355
2356 /**
2357 * ata_port_disable - Disable port.
2358 * @ap: Port to be disabled.
2359 *
2360 * Modify @ap data structure such that the system
2361 * thinks that the entire port is disabled, and should
2362 * never attempt to probe or communicate with devices
2363 * on this port.
2364 *
2365 * LOCKING: host lock, or some other form of
2366 * serialization.
2367 */
2368
2369 void ata_port_disable(struct ata_port *ap)
2370 {
2371 ap->link.device[0].class = ATA_DEV_NONE;
2372 ap->link.device[1].class = ATA_DEV_NONE;
2373 ap->flags |= ATA_FLAG_DISABLED;
2374 }
2375
2376 /**
2377 * sata_down_spd_limit - adjust SATA spd limit downward
2378 * @link: Link to adjust SATA spd limit for
2379 *
2380 * Adjust SATA spd limit of @link downward. Note that this
2381 * function only adjusts the limit. The change must be applied
2382 * using sata_set_spd().
2383 *
2384 * LOCKING:
2385 * Inherited from caller.
2386 *
2387 * RETURNS:
2388 * 0 on success, negative errno on failure
2389 */
2390 int sata_down_spd_limit(struct ata_link *link)
2391 {
2392 u32 sstatus, spd, mask;
2393 int rc, highbit;
2394
2395 if (!sata_scr_valid(link))
2396 return -EOPNOTSUPP;
2397
2398 /* If SCR can be read, use it to determine the current SPD.
2399 * If not, use cached value in link->sata_spd.
2400 */
2401 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
2402 if (rc == 0)
2403 spd = (sstatus >> 4) & 0xf;
2404 else
2405 spd = link->sata_spd;
2406
2407 mask = link->sata_spd_limit;
2408 if (mask <= 1)
2409 return -EINVAL;
2410
2411 /* unconditionally mask off the highest bit */
2412 highbit = fls(mask) - 1;
2413 mask &= ~(1 << highbit);
2414
2415 /* Mask off all speeds higher than or equal to the current
2416 * one. Force 1.5Gbps if current SPD is not available.
2417 */
2418 if (spd > 1)
2419 mask &= (1 << (spd - 1)) - 1;
2420 else
2421 mask &= 1;
2422
2423 /* were we already at the bottom? */
2424 if (!mask)
2425 return -EINVAL;
2426
2427 link->sata_spd_limit = mask;
2428
2429 ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n",
2430 sata_spd_string(fls(mask)));
2431
2432 return 0;
2433 }
2434
2435 static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
2436 {
2437 u32 spd, limit;
2438
2439 if (link->sata_spd_limit == UINT_MAX)
2440 limit = 0;
2441 else
2442 limit = fls(link->sata_spd_limit);
2443
2444 spd = (*scontrol >> 4) & 0xf;
2445 *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
2446
2447 return spd != limit;
2448 }
2449
2450 /**
2451 * sata_set_spd_needed - is SATA spd configuration needed
2452 * @link: Link in question
2453 *
2454 * Test whether the spd limit in SControl matches
2455 * @link->sata_spd_limit. This function is used to determine
2456 * whether hardreset is necessary to apply SATA spd
2457 * configuration.
2458 *
2459 * LOCKING:
2460 * Inherited from caller.
2461 *
2462 * RETURNS:
2463 * 1 if SATA spd configuration is needed, 0 otherwise.
2464 */
2465 int sata_set_spd_needed(struct ata_link *link)
2466 {
2467 u32 scontrol;
2468
2469 if (sata_scr_read(link, SCR_CONTROL, &scontrol))
2470 return 0;
2471
2472 return __sata_set_spd_needed(link, &scontrol);
2473 }
2474
2475 /**
2476 * sata_set_spd - set SATA spd according to spd limit
2477 * @link: Link to set SATA spd for
2478 *
2479 * Set SATA spd of @link according to sata_spd_limit.
2480 *
2481 * LOCKING:
2482 * Inherited from caller.
2483 *
2484 * RETURNS:
2485 * 0 if spd doesn't need to be changed, 1 if spd has been
2486 * changed. Negative errno if SCR registers are inaccessible.
2487 */
2488 int sata_set_spd(struct ata_link *link)
2489 {
2490 u32 scontrol;
2491 int rc;
2492
2493 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
2494 return rc;
2495
2496 if (!__sata_set_spd_needed(link, &scontrol))
2497 return 0;
2498
2499 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
2500 return rc;
2501
2502 return 1;
2503 }
2504
2505 /*
2506 * This mode timing computation functionality is ported over from
2507 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2508 */
2509 /*
2510 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
2511 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
2512 * for UDMA6, which is currently supported only by Maxtor drives.
2513 *
2514 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
2515 */
2516
2517 static const struct ata_timing ata_timing[] = {
2518
2519 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
2520 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
2521 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
2522 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
2523
2524 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
2525 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
2526 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
2527 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
2528 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
2529
2530 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
2531
2532 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
2533 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
2534 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
2535
2536 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
2537 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
2538 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
2539
2540 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
2541 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
2542 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
2543 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
2544
2545 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
2546 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
2547 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
2548
2549 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
2550
2551 { 0xFF }
2552 };
2553
2554 #define ENOUGH(v,unit) (((v)-1)/(unit)+1)
2555 #define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
2556
2557 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2558 {
2559 q->setup = EZ(t->setup * 1000, T);
2560 q->act8b = EZ(t->act8b * 1000, T);
2561 q->rec8b = EZ(t->rec8b * 1000, T);
2562 q->cyc8b = EZ(t->cyc8b * 1000, T);
2563 q->active = EZ(t->active * 1000, T);
2564 q->recover = EZ(t->recover * 1000, T);
2565 q->cycle = EZ(t->cycle * 1000, T);
2566 q->udma = EZ(t->udma * 1000, UT);
2567 }
2568
2569 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2570 struct ata_timing *m, unsigned int what)
2571 {
2572 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2573 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2574 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2575 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2576 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2577 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2578 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2579 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2580 }
2581
2582 static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
2583 {
2584 const struct ata_timing *t;
2585
2586 for (t = ata_timing; t->mode != speed; t++)
2587 if (t->mode == 0xFF)
2588 return NULL;
2589 return t;
2590 }
2591
2592 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2593 struct ata_timing *t, int T, int UT)
2594 {
2595 const struct ata_timing *s;
2596 struct ata_timing p;
2597
2598 /*
2599 * Find the mode.
2600 */
2601
2602 if (!(s = ata_timing_find_mode(speed)))
2603 return -EINVAL;
2604
2605 memcpy(t, s, sizeof(*s));
2606
2607 /*
2608 * If the drive is an EIDE drive, it can tell us it needs extended
2609 * PIO/MW_DMA cycle timing.
2610 */
2611
2612 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
2613 memset(&p, 0, sizeof(p));
2614 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2615 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2616 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2617 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2618 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2619 }
2620 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2621 }
2622
2623 /*
2624 * Convert the timing to bus clock counts.
2625 */
2626
2627 ata_timing_quantize(t, t, T, UT);
2628
2629 /*
2630 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2631 * S.M.A.R.T * and some other commands. We have to ensure that the
2632 * DMA cycle timing is slower/equal than the fastest PIO timing.
2633 */
2634
2635 if (speed > XFER_PIO_6) {
2636 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2637 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2638 }
2639
2640 /*
2641 * Lengthen active & recovery time so that cycle time is correct.
2642 */
2643
2644 if (t->act8b + t->rec8b < t->cyc8b) {
2645 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2646 t->rec8b = t->cyc8b - t->act8b;
2647 }
2648
2649 if (t->active + t->recover < t->cycle) {
2650 t->active += (t->cycle - (t->active + t->recover)) / 2;
2651 t->recover = t->cycle - t->active;
2652 }
2653
2654 /* In a few cases quantisation may produce enough errors to
2655 leave t->cycle too low for the sum of active and recovery
2656 if so we must correct this */
2657 if (t->active + t->recover > t->cycle)
2658 t->cycle = t->active + t->recover;
2659
2660 return 0;
2661 }
2662
2663 /**
2664 * ata_down_xfermask_limit - adjust dev xfer masks downward
2665 * @dev: Device to adjust xfer masks
2666 * @sel: ATA_DNXFER_* selector
2667 *
2668 * Adjust xfer masks of @dev downward. Note that this function
2669 * does not apply the change. Invoking ata_set_mode() afterwards
2670 * will apply the limit.
2671 *
2672 * LOCKING:
2673 * Inherited from caller.
2674 *
2675 * RETURNS:
2676 * 0 on success, negative errno on failure
2677 */
2678 int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
2679 {
2680 char buf[32];
2681 unsigned int orig_mask, xfer_mask;
2682 unsigned int pio_mask, mwdma_mask, udma_mask;
2683 int quiet, highbit;
2684
2685 quiet = !!(sel & ATA_DNXFER_QUIET);
2686 sel &= ~ATA_DNXFER_QUIET;
2687
2688 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
2689 dev->mwdma_mask,
2690 dev->udma_mask);
2691 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
2692
2693 switch (sel) {
2694 case ATA_DNXFER_PIO:
2695 highbit = fls(pio_mask) - 1;
2696 pio_mask &= ~(1 << highbit);
2697 break;
2698
2699 case ATA_DNXFER_DMA:
2700 if (udma_mask) {
2701 highbit = fls(udma_mask) - 1;
2702 udma_mask &= ~(1 << highbit);
2703 if (!udma_mask)
2704 return -ENOENT;
2705 } else if (mwdma_mask) {
2706 highbit = fls(mwdma_mask) - 1;
2707 mwdma_mask &= ~(1 << highbit);
2708 if (!mwdma_mask)
2709 return -ENOENT;
2710 }
2711 break;
2712
2713 case ATA_DNXFER_40C:
2714 udma_mask &= ATA_UDMA_MASK_40C;
2715 break;
2716
2717 case ATA_DNXFER_FORCE_PIO0:
2718 pio_mask &= 1;
2719 case ATA_DNXFER_FORCE_PIO:
2720 mwdma_mask = 0;
2721 udma_mask = 0;
2722 break;
2723
2724 default:
2725 BUG();
2726 }
2727
2728 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
2729
2730 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
2731 return -ENOENT;
2732
2733 if (!quiet) {
2734 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
2735 snprintf(buf, sizeof(buf), "%s:%s",
2736 ata_mode_string(xfer_mask),
2737 ata_mode_string(xfer_mask & ATA_MASK_PIO));
2738 else
2739 snprintf(buf, sizeof(buf), "%s",
2740 ata_mode_string(xfer_mask));
2741
2742 ata_dev_printk(dev, KERN_WARNING,
2743 "limiting speed to %s\n", buf);
2744 }
2745
2746 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2747 &dev->udma_mask);
2748
2749 return 0;
2750 }
2751
2752 static int ata_dev_set_mode(struct ata_device *dev)
2753 {
2754 struct ata_eh_context *ehc = &dev->link->eh_context;
2755 unsigned int err_mask;
2756 int rc;
2757
2758 dev->flags &= ~ATA_DFLAG_PIO;
2759 if (dev->xfer_shift == ATA_SHIFT_PIO)
2760 dev->flags |= ATA_DFLAG_PIO;
2761
2762 err_mask = ata_dev_set_xfermode(dev);
2763 /* Old CFA may refuse this command, which is just fine */
2764 if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
2765 err_mask &= ~AC_ERR_DEV;
2766 /* Some very old devices and some bad newer ones fail any kind of
2767 SET_XFERMODE request but support PIO0-2 timings and no IORDY */
2768 if (dev->xfer_shift == ATA_SHIFT_PIO && !ata_id_has_iordy(dev->id) &&
2769 dev->pio_mode <= XFER_PIO_2)
2770 err_mask &= ~AC_ERR_DEV;
2771 if (err_mask) {
2772 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
2773 "(err_mask=0x%x)\n", err_mask);
2774 return -EIO;
2775 }
2776
2777 ehc->i.flags |= ATA_EHI_POST_SETMODE;
2778 rc = ata_dev_revalidate(dev, 0);
2779 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
2780 if (rc)
2781 return rc;
2782
2783 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
2784 dev->xfer_shift, (int)dev->xfer_mode);
2785
2786 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
2787 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
2788 return 0;
2789 }
2790
2791 /**
2792 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
2793 * @link: link on which timings will be programmed
2794 * @r_failed_dev: out paramter for failed device
2795 *
2796 * Standard implementation of the function used to tune and set
2797 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2798 * ata_dev_set_mode() fails, pointer to the failing device is
2799 * returned in @r_failed_dev.
2800 *
2801 * LOCKING:
2802 * PCI/etc. bus probe sem.
2803 *
2804 * RETURNS:
2805 * 0 on success, negative errno otherwise
2806 */
2807
2808 int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
2809 {
2810 struct ata_port *ap = link->ap;
2811 struct ata_device *dev;
2812 int rc = 0, used_dma = 0, found = 0;
2813
2814 /* step 1: calculate xfer_mask */
2815 ata_link_for_each_dev(dev, link) {
2816 unsigned int pio_mask, dma_mask;
2817
2818 if (!ata_dev_enabled(dev))
2819 continue;
2820
2821 ata_dev_xfermask(dev);
2822
2823 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
2824 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2825 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
2826 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
2827
2828 found = 1;
2829 if (dev->dma_mode)
2830 used_dma = 1;
2831 }
2832 if (!found)
2833 goto out;
2834
2835 /* step 2: always set host PIO timings */
2836 ata_link_for_each_dev(dev, link) {
2837 if (!ata_dev_enabled(dev))
2838 continue;
2839
2840 if (!dev->pio_mode) {
2841 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
2842 rc = -EINVAL;
2843 goto out;
2844 }
2845
2846 dev->xfer_mode = dev->pio_mode;
2847 dev->xfer_shift = ATA_SHIFT_PIO;
2848 if (ap->ops->set_piomode)
2849 ap->ops->set_piomode(ap, dev);
2850 }
2851
2852 /* step 3: set host DMA timings */
2853 ata_link_for_each_dev(dev, link) {
2854 if (!ata_dev_enabled(dev) || !dev->dma_mode)
2855 continue;
2856
2857 dev->xfer_mode = dev->dma_mode;
2858 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
2859 if (ap->ops->set_dmamode)
2860 ap->ops->set_dmamode(ap, dev);
2861 }
2862
2863 /* step 4: update devices' xfer mode */
2864 ata_link_for_each_dev(dev, link) {
2865 /* don't update suspended devices' xfer mode */
2866 if (!ata_dev_enabled(dev))
2867 continue;
2868
2869 rc = ata_dev_set_mode(dev);
2870 if (rc)
2871 goto out;
2872 }
2873
2874 /* Record simplex status. If we selected DMA then the other
2875 * host channels are not permitted to do so.
2876 */
2877 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
2878 ap->host->simplex_claimed = ap;
2879
2880 out:
2881 if (rc)
2882 *r_failed_dev = dev;
2883 return rc;
2884 }
2885
2886 /**
2887 * ata_set_mode - Program timings and issue SET FEATURES - XFER
2888 * @link: link on which timings will be programmed
2889 * @r_failed_dev: out paramter for failed device
2890 *
2891 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2892 * ata_set_mode() fails, pointer to the failing device is
2893 * returned in @r_failed_dev.
2894 *
2895 * LOCKING:
2896 * PCI/etc. bus probe sem.
2897 *
2898 * RETURNS:
2899 * 0 on success, negative errno otherwise
2900 */
2901 int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
2902 {
2903 struct ata_port *ap = link->ap;
2904
2905 /* has private set_mode? */
2906 if (ap->ops->set_mode)
2907 return ap->ops->set_mode(link, r_failed_dev);
2908 return ata_do_set_mode(link, r_failed_dev);
2909 }
2910
2911 /**
2912 * ata_tf_to_host - issue ATA taskfile to host controller
2913 * @ap: port to which command is being issued
2914 * @tf: ATA taskfile register set
2915 *
2916 * Issues ATA taskfile register set to ATA host controller,
2917 * with proper synchronization with interrupt handler and
2918 * other threads.
2919 *
2920 * LOCKING:
2921 * spin_lock_irqsave(host lock)
2922 */
2923
2924 static inline void ata_tf_to_host(struct ata_port *ap,
2925 const struct ata_taskfile *tf)
2926 {
2927 ap->ops->tf_load(ap, tf);
2928 ap->ops->exec_command(ap, tf);
2929 }
2930
2931 /**
2932 * ata_busy_sleep - sleep until BSY clears, or timeout
2933 * @ap: port containing status register to be polled
2934 * @tmout_pat: impatience timeout
2935 * @tmout: overall timeout
2936 *
2937 * Sleep until ATA Status register bit BSY clears,
2938 * or a timeout occurs.
2939 *
2940 * LOCKING:
2941 * Kernel thread context (may sleep).
2942 *
2943 * RETURNS:
2944 * 0 on success, -errno otherwise.
2945 */
2946 int ata_busy_sleep(struct ata_port *ap,
2947 unsigned long tmout_pat, unsigned long tmout)
2948 {
2949 unsigned long timer_start, timeout;
2950 u8 status;
2951
2952 status = ata_busy_wait(ap, ATA_BUSY, 300);
2953 timer_start = jiffies;
2954 timeout = timer_start + tmout_pat;
2955 while (status != 0xff && (status & ATA_BUSY) &&
2956 time_before(jiffies, timeout)) {
2957 msleep(50);
2958 status = ata_busy_wait(ap, ATA_BUSY, 3);
2959 }
2960
2961 if (status != 0xff && (status & ATA_BUSY))
2962 ata_port_printk(ap, KERN_WARNING,
2963 "port is slow to respond, please be patient "
2964 "(Status 0x%x)\n", status);
2965
2966 timeout = timer_start + tmout;
2967 while (status != 0xff && (status & ATA_BUSY) &&
2968 time_before(jiffies, timeout)) {
2969 msleep(50);
2970 status = ata_chk_status(ap);
2971 }
2972
2973 if (status == 0xff)
2974 return -ENODEV;
2975
2976 if (status & ATA_BUSY) {
2977 ata_port_printk(ap, KERN_ERR, "port failed to respond "
2978 "(%lu secs, Status 0x%x)\n",
2979 tmout / HZ, status);
2980 return -EBUSY;
2981 }
2982
2983 return 0;
2984 }
2985
2986 /**
2987 * ata_wait_ready - sleep until BSY clears, or timeout
2988 * @ap: port containing status register to be polled
2989 * @deadline: deadline jiffies for the operation
2990 *
2991 * Sleep until ATA Status register bit BSY clears, or timeout
2992 * occurs.
2993 *
2994 * LOCKING:
2995 * Kernel thread context (may sleep).
2996 *
2997 * RETURNS:
2998 * 0 on success, -errno otherwise.
2999 */
3000 int ata_wait_ready(struct ata_port *ap, unsigned long deadline)
3001 {
3002 unsigned long start = jiffies;
3003 int warned = 0;
3004
3005 while (1) {
3006 u8 status = ata_chk_status(ap);
3007 unsigned long now = jiffies;
3008
3009 if (!(status & ATA_BUSY))
3010 return 0;
3011 if (!ata_link_online(&ap->link) && status == 0xff)
3012 return -ENODEV;
3013 if (time_after(now, deadline))
3014 return -EBUSY;
3015
3016 if (!warned && time_after(now, start + 5 * HZ) &&
3017 (deadline - now > 3 * HZ)) {
3018 ata_port_printk(ap, KERN_WARNING,
3019 "port is slow to respond, please be patient "
3020 "(Status 0x%x)\n", status);
3021 warned = 1;
3022 }
3023
3024 msleep(50);
3025 }
3026 }
3027
3028 static int ata_bus_post_reset(struct ata_port *ap, unsigned int devmask,
3029 unsigned long deadline)
3030 {
3031 struct ata_ioports *ioaddr = &ap->ioaddr;
3032 unsigned int dev0 = devmask & (1 << 0);
3033 unsigned int dev1 = devmask & (1 << 1);
3034 int rc, ret = 0;
3035
3036 /* if device 0 was found in ata_devchk, wait for its
3037 * BSY bit to clear
3038 */
3039 if (dev0) {
3040 rc = ata_wait_ready(ap, deadline);
3041 if (rc) {
3042 if (rc != -ENODEV)
3043 return rc;
3044 ret = rc;
3045 }
3046 }
3047
3048 /* if device 1 was found in ata_devchk, wait for register
3049 * access briefly, then wait for BSY to clear.
3050 */
3051 if (dev1) {
3052 int i;
3053
3054 ap->ops->dev_select(ap, 1);
3055
3056 /* Wait for register access. Some ATAPI devices fail
3057 * to set nsect/lbal after reset, so don't waste too
3058 * much time on it. We're gonna wait for !BSY anyway.
3059 */
3060 for (i = 0; i < 2; i++) {
3061 u8 nsect, lbal;
3062
3063 nsect = ioread8(ioaddr->nsect_addr);
3064 lbal = ioread8(ioaddr->lbal_addr);
3065 if ((nsect == 1) && (lbal == 1))
3066 break;
3067 msleep(50); /* give drive a breather */
3068 }
3069
3070 rc = ata_wait_ready(ap, deadline);
3071 if (rc) {
3072 if (rc != -ENODEV)
3073 return rc;
3074 ret = rc;
3075 }
3076 }
3077
3078 /* is all this really necessary? */
3079 ap->ops->dev_select(ap, 0);
3080 if (dev1)
3081 ap->ops->dev_select(ap, 1);
3082 if (dev0)
3083 ap->ops->dev_select(ap, 0);
3084
3085 return ret;
3086 }
3087
3088 static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
3089 unsigned long deadline)
3090 {
3091 struct ata_ioports *ioaddr = &ap->ioaddr;
3092
3093 DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
3094
3095 /* software reset. causes dev0 to be selected */
3096 iowrite8(ap->ctl, ioaddr->ctl_addr);
3097 udelay(20); /* FIXME: flush */
3098 iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
3099 udelay(20); /* FIXME: flush */
3100 iowrite8(ap->ctl, ioaddr->ctl_addr);
3101
3102 /* spec mandates ">= 2ms" before checking status.
3103 * We wait 150ms, because that was the magic delay used for
3104 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
3105 * between when the ATA command register is written, and then
3106 * status is checked. Because waiting for "a while" before
3107 * checking status is fine, post SRST, we perform this magic
3108 * delay here as well.
3109 *
3110 * Old drivers/ide uses the 2mS rule and then waits for ready
3111 */
3112 msleep(150);
3113
3114 /* Before we perform post reset processing we want to see if
3115 * the bus shows 0xFF because the odd clown forgets the D7
3116 * pulldown resistor.
3117 */
3118 if (ata_check_status(ap) == 0xFF)
3119 return -ENODEV;
3120
3121 return ata_bus_post_reset(ap, devmask, deadline);
3122 }
3123
3124 /**
3125 * ata_bus_reset - reset host port and associated ATA channel
3126 * @ap: port to reset
3127 *
3128 * This is typically the first time we actually start issuing
3129 * commands to the ATA channel. We wait for BSY to clear, then
3130 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
3131 * result. Determine what devices, if any, are on the channel
3132 * by looking at the device 0/1 error register. Look at the signature
3133 * stored in each device's taskfile registers, to determine if
3134 * the device is ATA or ATAPI.
3135 *
3136 * LOCKING:
3137 * PCI/etc. bus probe sem.
3138 * Obtains host lock.
3139 *
3140 * SIDE EFFECTS:
3141 * Sets ATA_FLAG_DISABLED if bus reset fails.
3142 */
3143
3144 void ata_bus_reset(struct ata_port *ap)
3145 {
3146 struct ata_device *device = ap->link.device;
3147 struct ata_ioports *ioaddr = &ap->ioaddr;
3148 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3149 u8 err;
3150 unsigned int dev0, dev1 = 0, devmask = 0;
3151 int rc;
3152
3153 DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
3154
3155 /* determine if device 0/1 are present */
3156 if (ap->flags & ATA_FLAG_SATA_RESET)
3157 dev0 = 1;
3158 else {
3159 dev0 = ata_devchk(ap, 0);
3160 if (slave_possible)
3161 dev1 = ata_devchk(ap, 1);
3162 }
3163
3164 if (dev0)
3165 devmask |= (1 << 0);
3166 if (dev1)
3167 devmask |= (1 << 1);
3168
3169 /* select device 0 again */
3170 ap->ops->dev_select(ap, 0);
3171
3172 /* issue bus reset */
3173 if (ap->flags & ATA_FLAG_SRST) {
3174 rc = ata_bus_softreset(ap, devmask, jiffies + 40 * HZ);
3175 if (rc && rc != -ENODEV)
3176 goto err_out;
3177 }
3178
3179 /*
3180 * determine by signature whether we have ATA or ATAPI devices
3181 */
3182 device[0].class = ata_dev_try_classify(ap, 0, &err);
3183 if ((slave_possible) && (err != 0x81))
3184 device[1].class = ata_dev_try_classify(ap, 1, &err);
3185
3186 /* is double-select really necessary? */
3187 if (device[1].class != ATA_DEV_NONE)
3188 ap->ops->dev_select(ap, 1);
3189 if (device[0].class != ATA_DEV_NONE)
3190 ap->ops->dev_select(ap, 0);
3191
3192 /* if no devices were detected, disable this port */
3193 if ((device[0].class == ATA_DEV_NONE) &&
3194 (device[1].class == ATA_DEV_NONE))
3195 goto err_out;
3196
3197 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
3198 /* set up device control for ATA_FLAG_SATA_RESET */
3199 iowrite8(ap->ctl, ioaddr->ctl_addr);
3200 }
3201
3202 DPRINTK("EXIT\n");
3203 return;
3204
3205 err_out:
3206 ata_port_printk(ap, KERN_ERR, "disabling port\n");
3207 ap->ops->port_disable(ap);
3208
3209 DPRINTK("EXIT\n");
3210 }
3211
3212 /**
3213 * sata_link_debounce - debounce SATA phy status
3214 * @link: ATA link to debounce SATA phy status for
3215 * @params: timing parameters { interval, duratinon, timeout } in msec
3216 * @deadline: deadline jiffies for the operation
3217 *
3218 * Make sure SStatus of @link reaches stable state, determined by
3219 * holding the same value where DET is not 1 for @duration polled
3220 * every @interval, before @timeout. Timeout constraints the
3221 * beginning of the stable state. Because DET gets stuck at 1 on
3222 * some controllers after hot unplugging, this functions waits
3223 * until timeout then returns 0 if DET is stable at 1.
3224 *
3225 * @timeout is further limited by @deadline. The sooner of the
3226 * two is used.
3227 *
3228 * LOCKING:
3229 * Kernel thread context (may sleep)
3230 *
3231 * RETURNS:
3232 * 0 on success, -errno on failure.
3233 */
3234 int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3235 unsigned long deadline)
3236 {
3237 unsigned long interval_msec = params[0];
3238 unsigned long duration = msecs_to_jiffies(params[1]);
3239 unsigned long last_jiffies, t;
3240 u32 last, cur;
3241 int rc;
3242
3243 t = jiffies + msecs_to_jiffies(params[2]);
3244 if (time_before(t, deadline))
3245 deadline = t;
3246
3247 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3248 return rc;
3249 cur &= 0xf;
3250
3251 last = cur;
3252 last_jiffies = jiffies;
3253
3254 while (1) {
3255 msleep(interval_msec);
3256 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3257 return rc;
3258 cur &= 0xf;
3259
3260 /* DET stable? */
3261 if (cur == last) {
3262 if (cur == 1 && time_before(jiffies, deadline))
3263 continue;
3264 if (time_after(jiffies, last_jiffies + duration))
3265 return 0;
3266 continue;
3267 }
3268
3269 /* unstable, start over */
3270 last = cur;
3271 last_jiffies = jiffies;
3272
3273 /* Check deadline. If debouncing failed, return
3274 * -EPIPE to tell upper layer to lower link speed.
3275 */
3276 if (time_after(jiffies, deadline))
3277 return -EPIPE;
3278 }
3279 }
3280
3281 /**
3282 * sata_link_resume - resume SATA link
3283 * @link: ATA link to resume SATA
3284 * @params: timing parameters { interval, duratinon, timeout } in msec
3285 * @deadline: deadline jiffies for the operation
3286 *
3287 * Resume SATA phy @link and debounce it.
3288 *
3289 * LOCKING:
3290 * Kernel thread context (may sleep)
3291 *
3292 * RETURNS:
3293 * 0 on success, -errno on failure.
3294 */
3295 int sata_link_resume(struct ata_link *link, const unsigned long *params,
3296 unsigned long deadline)
3297 {
3298 u32 scontrol;
3299 int rc;
3300
3301 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3302 return rc;
3303
3304 scontrol = (scontrol & 0x0f0) | 0x300;
3305
3306 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3307 return rc;
3308
3309 /* Some PHYs react badly if SStatus is pounded immediately
3310 * after resuming. Delay 200ms before debouncing.
3311 */
3312 msleep(200);
3313
3314 return sata_link_debounce(link, params, deadline);
3315 }
3316
3317 /**
3318 * ata_std_prereset - prepare for reset
3319 * @link: ATA link to be reset
3320 * @deadline: deadline jiffies for the operation
3321 *
3322 * @link is about to be reset. Initialize it. Failure from
3323 * prereset makes libata abort whole reset sequence and give up
3324 * that port, so prereset should be best-effort. It does its
3325 * best to prepare for reset sequence but if things go wrong, it
3326 * should just whine, not fail.
3327 *
3328 * LOCKING:
3329 * Kernel thread context (may sleep)
3330 *
3331 * RETURNS:
3332 * 0 on success, -errno otherwise.
3333 */
3334 int ata_std_prereset(struct ata_link *link, unsigned long deadline)
3335 {
3336 struct ata_port *ap = link->ap;
3337 struct ata_eh_context *ehc = &link->eh_context;
3338 const unsigned long *timing = sata_ehc_deb_timing(ehc);
3339 int rc;
3340
3341 /* handle link resume */
3342 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
3343 (link->flags & ATA_LFLAG_HRST_TO_RESUME))
3344 ehc->i.action |= ATA_EH_HARDRESET;
3345
3346 /* if we're about to do hardreset, nothing more to do */
3347 if (ehc->i.action & ATA_EH_HARDRESET)
3348 return 0;
3349
3350 /* if SATA, resume link */
3351 if (ap->flags & ATA_FLAG_SATA) {
3352 rc = sata_link_resume(link, timing, deadline);
3353 /* whine about phy resume failure but proceed */
3354 if (rc && rc != -EOPNOTSUPP)
3355 ata_link_printk(link, KERN_WARNING, "failed to resume "
3356 "link for reset (errno=%d)\n", rc);
3357 }
3358
3359 /* Wait for !BSY if the controller can wait for the first D2H
3360 * Reg FIS and we don't know that no device is attached.
3361 */
3362 if (!(link->flags & ATA_LFLAG_SKIP_D2H_BSY) && !ata_link_offline(link)) {
3363 rc = ata_wait_ready(ap, deadline);
3364 if (rc && rc != -ENODEV) {
3365 ata_link_printk(link, KERN_WARNING, "device not ready "
3366 "(errno=%d), forcing hardreset\n", rc);
3367 ehc->i.action |= ATA_EH_HARDRESET;
3368 }
3369 }
3370
3371 return 0;
3372 }
3373
3374 /**
3375 * ata_std_softreset - reset host port via ATA SRST
3376 * @link: ATA link to reset
3377 * @classes: resulting classes of attached devices
3378 * @deadline: deadline jiffies for the operation
3379 *
3380 * Reset host port using ATA SRST.
3381 *
3382 * LOCKING:
3383 * Kernel thread context (may sleep)
3384 *
3385 * RETURNS:
3386 * 0 on success, -errno otherwise.
3387 */
3388 int ata_std_softreset(struct ata_link *link, unsigned int *classes,
3389 unsigned long deadline)
3390 {
3391 struct ata_port *ap = link->ap;
3392 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3393 unsigned int devmask = 0;
3394 int rc;
3395 u8 err;
3396
3397 DPRINTK("ENTER\n");
3398
3399 if (ata_link_offline(link)) {
3400 classes[0] = ATA_DEV_NONE;
3401 goto out;
3402 }
3403
3404 /* determine if device 0/1 are present */
3405 if (ata_devchk(ap, 0))
3406 devmask |= (1 << 0);
3407 if (slave_possible && ata_devchk(ap, 1))
3408 devmask |= (1 << 1);
3409
3410 /* select device 0 again */
3411 ap->ops->dev_select(ap, 0);
3412
3413 /* issue bus reset */
3414 DPRINTK("about to softreset, devmask=%x\n", devmask);
3415 rc = ata_bus_softreset(ap, devmask, deadline);
3416 /* if link is occupied, -ENODEV too is an error */
3417 if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
3418 ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc);
3419 return rc;
3420 }
3421
3422 /* determine by signature whether we have ATA or ATAPI devices */
3423 classes[0] = ata_dev_try_classify(ap, 0, &err);
3424 if (slave_possible && err != 0x81)
3425 classes[1] = ata_dev_try_classify(ap, 1, &err);
3426
3427 out:
3428 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
3429 return 0;
3430 }
3431
3432 /**
3433 * sata_link_hardreset - reset link via SATA phy reset
3434 * @link: link to reset
3435 * @timing: timing parameters { interval, duratinon, timeout } in msec
3436 * @deadline: deadline jiffies for the operation
3437 *
3438 * SATA phy-reset @link using DET bits of SControl register.
3439 *
3440 * LOCKING:
3441 * Kernel thread context (may sleep)
3442 *
3443 * RETURNS:
3444 * 0 on success, -errno otherwise.
3445 */
3446 int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
3447 unsigned long deadline)
3448 {
3449 u32 scontrol;
3450 int rc;
3451
3452 DPRINTK("ENTER\n");
3453
3454 if (sata_set_spd_needed(link)) {
3455 /* SATA spec says nothing about how to reconfigure
3456 * spd. To be on the safe side, turn off phy during
3457 * reconfiguration. This works for at least ICH7 AHCI
3458 * and Sil3124.
3459 */
3460 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3461 goto out;
3462
3463 scontrol = (scontrol & 0x0f0) | 0x304;
3464
3465 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3466 goto out;
3467
3468 sata_set_spd(link);
3469 }
3470
3471 /* issue phy wake/reset */
3472 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3473 goto out;
3474
3475 scontrol = (scontrol & 0x0f0) | 0x301;
3476
3477 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
3478 goto out;
3479
3480 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
3481 * 10.4.2 says at least 1 ms.
3482 */
3483 msleep(1);
3484
3485 /* bring link back */
3486 rc = sata_link_resume(link, timing, deadline);
3487 out:
3488 DPRINTK("EXIT, rc=%d\n", rc);
3489 return rc;
3490 }
3491
3492 /**
3493 * sata_std_hardreset - reset host port via SATA phy reset
3494 * @link: link to reset
3495 * @class: resulting class of attached device
3496 * @deadline: deadline jiffies for the operation
3497 *
3498 * SATA phy-reset host port using DET bits of SControl register,
3499 * wait for !BSY and classify the attached device.
3500 *
3501 * LOCKING:
3502 * Kernel thread context (may sleep)
3503 *
3504 * RETURNS:
3505 * 0 on success, -errno otherwise.
3506 */
3507 int sata_std_hardreset(struct ata_link *link, unsigned int *class,
3508 unsigned long deadline)
3509 {
3510 struct ata_port *ap = link->ap;
3511 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
3512 int rc;
3513
3514 DPRINTK("ENTER\n");
3515
3516 /* do hardreset */
3517 rc = sata_link_hardreset(link, timing, deadline);
3518 if (rc) {
3519 ata_link_printk(link, KERN_ERR,
3520 "COMRESET failed (errno=%d)\n", rc);
3521 return rc;
3522 }
3523
3524 /* TODO: phy layer with polling, timeouts, etc. */
3525 if (ata_link_offline(link)) {
3526 *class = ATA_DEV_NONE;
3527 DPRINTK("EXIT, link offline\n");
3528 return 0;
3529 }
3530
3531 /* wait a while before checking status, see SRST for more info */
3532 msleep(150);
3533
3534 rc = ata_wait_ready(ap, deadline);
3535 /* link occupied, -ENODEV too is an error */
3536 if (rc) {
3537 ata_link_printk(link, KERN_ERR,
3538 "COMRESET failed (errno=%d)\n", rc);
3539 return rc;
3540 }
3541
3542 ap->ops->dev_select(ap, 0); /* probably unnecessary */
3543
3544 *class = ata_dev_try_classify(ap, 0, NULL);
3545
3546 DPRINTK("EXIT, class=%u\n", *class);
3547 return 0;
3548 }
3549
3550 /**
3551 * ata_std_postreset - standard postreset callback
3552 * @link: the target ata_link
3553 * @classes: classes of attached devices
3554 *
3555 * This function is invoked after a successful reset. Note that
3556 * the device might have been reset more than once using
3557 * different reset methods before postreset is invoked.
3558 *
3559 * LOCKING:
3560 * Kernel thread context (may sleep)
3561 */
3562 void ata_std_postreset(struct ata_link *link, unsigned int *classes)
3563 {
3564 struct ata_port *ap = link->ap;
3565 u32 serror;
3566
3567 DPRINTK("ENTER\n");
3568
3569 /* print link status */
3570 sata_print_link_status(link);
3571
3572 /* clear SError */
3573 if (sata_scr_read(link, SCR_ERROR, &serror) == 0)
3574 sata_scr_write(link, SCR_ERROR, serror);
3575
3576 /* is double-select really necessary? */
3577 if (classes[0] != ATA_DEV_NONE)
3578 ap->ops->dev_select(ap, 1);
3579 if (classes[1] != ATA_DEV_NONE)
3580 ap->ops->dev_select(ap, 0);
3581
3582 /* bail out if no device is present */
3583 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
3584 DPRINTK("EXIT, no device\n");
3585 return;
3586 }
3587
3588 /* set up device control */
3589 if (ap->ioaddr.ctl_addr)
3590 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
3591
3592 DPRINTK("EXIT\n");
3593 }
3594
3595 /**
3596 * ata_dev_same_device - Determine whether new ID matches configured device
3597 * @dev: device to compare against
3598 * @new_class: class of the new device
3599 * @new_id: IDENTIFY page of the new device
3600 *
3601 * Compare @new_class and @new_id against @dev and determine
3602 * whether @dev is the device indicated by @new_class and
3603 * @new_id.
3604 *
3605 * LOCKING:
3606 * None.
3607 *
3608 * RETURNS:
3609 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3610 */
3611 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3612 const u16 *new_id)
3613 {
3614 const u16 *old_id = dev->id;
3615 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3616 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
3617
3618 if (dev->class != new_class) {
3619 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3620 dev->class, new_class);
3621 return 0;
3622 }
3623
3624 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3625 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3626 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3627 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
3628
3629 if (strcmp(model[0], model[1])) {
3630 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3631 "'%s' != '%s'\n", model[0], model[1]);
3632 return 0;
3633 }
3634
3635 if (strcmp(serial[0], serial[1])) {
3636 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3637 "'%s' != '%s'\n", serial[0], serial[1]);
3638 return 0;
3639 }
3640
3641 return 1;
3642 }
3643
3644 /**
3645 * ata_dev_reread_id - Re-read IDENTIFY data
3646 * @dev: target ATA device
3647 * @readid_flags: read ID flags
3648 *
3649 * Re-read IDENTIFY page and make sure @dev is still attached to
3650 * the port.
3651 *
3652 * LOCKING:
3653 * Kernel thread context (may sleep)
3654 *
3655 * RETURNS:
3656 * 0 on success, negative errno otherwise
3657 */
3658 int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
3659 {
3660 unsigned int class = dev->class;
3661 u16 *id = (void *)dev->link->ap->sector_buf;
3662 int rc;
3663
3664 /* read ID data */
3665 rc = ata_dev_read_id(dev, &class, readid_flags, id);
3666 if (rc)
3667 return rc;
3668
3669 /* is the device still there? */
3670 if (!ata_dev_same_device(dev, class, id))
3671 return -ENODEV;
3672
3673 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
3674 return 0;
3675 }
3676
3677 /**
3678 * ata_dev_revalidate - Revalidate ATA device
3679 * @dev: device to revalidate
3680 * @readid_flags: read ID flags
3681 *
3682 * Re-read IDENTIFY page, make sure @dev is still attached to the
3683 * port and reconfigure it according to the new IDENTIFY page.
3684 *
3685 * LOCKING:
3686 * Kernel thread context (may sleep)
3687 *
3688 * RETURNS:
3689 * 0 on success, negative errno otherwise
3690 */
3691 int ata_dev_revalidate(struct ata_device *dev, unsigned int readid_flags)
3692 {
3693 u64 n_sectors = dev->n_sectors;
3694 int rc;
3695
3696 if (!ata_dev_enabled(dev))
3697 return -ENODEV;
3698
3699 /* re-read ID */
3700 rc = ata_dev_reread_id(dev, readid_flags);
3701 if (rc)
3702 goto fail;
3703
3704 /* configure device according to the new ID */
3705 rc = ata_dev_configure(dev);
3706 if (rc)
3707 goto fail;
3708
3709 /* verify n_sectors hasn't changed */
3710 if (dev->class == ATA_DEV_ATA && n_sectors &&
3711 dev->n_sectors != n_sectors) {
3712 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
3713 "%llu != %llu\n",
3714 (unsigned long long)n_sectors,
3715 (unsigned long long)dev->n_sectors);
3716
3717 /* restore original n_sectors */
3718 dev->n_sectors = n_sectors;
3719
3720 rc = -ENODEV;
3721 goto fail;
3722 }
3723
3724 return 0;
3725
3726 fail:
3727 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
3728 return rc;
3729 }
3730
3731 struct ata_blacklist_entry {
3732 const char *model_num;
3733 const char *model_rev;
3734 unsigned long horkage;
3735 };
3736
3737 static const struct ata_blacklist_entry ata_device_blacklist [] = {
3738 /* Devices with DMA related problems under Linux */
3739 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
3740 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
3741 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
3742 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
3743 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
3744 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
3745 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
3746 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
3747 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
3748 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
3749 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
3750 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
3751 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
3752 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3753 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
3754 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
3755 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
3756 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
3757 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
3758 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
3759 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
3760 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
3761 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
3762 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
3763 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
3764 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
3765 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
3766 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
3767 { "SAMSUNG CD-ROM SN-124","N001", ATA_HORKAGE_NODMA },
3768 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
3769 { "IOMEGA ZIP 250 ATAPI", NULL, ATA_HORKAGE_NODMA }, /* temporary fix */
3770 { "IOMEGA ZIP 250 ATAPI Floppy",
3771 NULL, ATA_HORKAGE_NODMA },
3772
3773 /* Weird ATAPI devices */
3774 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
3775
3776 /* Devices we expect to fail diagnostics */
3777
3778 /* Devices where NCQ should be avoided */
3779 /* NCQ is slow */
3780 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
3781 /* http://thread.gmane.org/gmane.linux.ide/14907 */
3782 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
3783 /* NCQ is broken */
3784 { "Maxtor 6L250S0", "BANC1G10", ATA_HORKAGE_NONCQ },
3785 { "Maxtor 6B200M0", "BANC1BM0", ATA_HORKAGE_NONCQ },
3786 { "Maxtor 6B200M0", "BANC1B10", ATA_HORKAGE_NONCQ },
3787 { "Maxtor 7B250S0", "BANC1B70", ATA_HORKAGE_NONCQ, },
3788 { "Maxtor 7B300S0", "BANC1B70", ATA_HORKAGE_NONCQ },
3789 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
3790 { "HITACHI HDS7250SASUN500G 0621KTAWSD", "K2AOAJ0AHITACHI",
3791 ATA_HORKAGE_NONCQ },
3792 /* NCQ hard hangs device under heavier load, needs hard power cycle */
3793 { "Maxtor 6B250S0", "BANC1B70", ATA_HORKAGE_NONCQ },
3794 /* Blacklist entries taken from Silicon Image 3124/3132
3795 Windows driver .inf file - also several Linux problem reports */
3796 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
3797 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
3798 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
3799 /* Drives which do spurious command completion */
3800 { "HTS541680J9SA00", "SB2IC7EP", ATA_HORKAGE_NONCQ, },
3801 { "HTS541612J9SA00", "SBDIC7JP", ATA_HORKAGE_NONCQ, },
3802 { "Hitachi HTS541616J9SA00", "SB4OC70P", ATA_HORKAGE_NONCQ, },
3803 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
3804 { "FUJITSU MHV2080BH", "00840028", ATA_HORKAGE_NONCQ, },
3805 { "ST9160821AS", "3.CLF", ATA_HORKAGE_NONCQ, },
3806 { "ST3160812AS", "3.AD", ATA_HORKAGE_NONCQ, },
3807 { "SAMSUNG HD401LJ", "ZZ100-15", ATA_HORKAGE_NONCQ, },
3808
3809 /* devices which puke on READ_NATIVE_MAX */
3810 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
3811 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
3812 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
3813 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
3814
3815 /* End Marker */
3816 { }
3817 };
3818
3819 static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
3820 {
3821 unsigned char model_num[ATA_ID_PROD_LEN + 1];
3822 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
3823 const struct ata_blacklist_entry *ad = ata_device_blacklist;
3824
3825 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
3826 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
3827
3828 while (ad->model_num) {
3829 if (!strcmp(ad->model_num, model_num)) {
3830 if (ad->model_rev == NULL)
3831 return ad->horkage;
3832 if (!strcmp(ad->model_rev, model_rev))
3833 return ad->horkage;
3834 }
3835 ad++;
3836 }
3837 return 0;
3838 }
3839
3840 static int ata_dma_blacklisted(const struct ata_device *dev)
3841 {
3842 /* We don't support polling DMA.
3843 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
3844 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
3845 */
3846 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
3847 (dev->flags & ATA_DFLAG_CDB_INTR))
3848 return 1;
3849 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
3850 }
3851
3852 /**
3853 * ata_dev_xfermask - Compute supported xfermask of the given device
3854 * @dev: Device to compute xfermask for
3855 *
3856 * Compute supported xfermask of @dev and store it in
3857 * dev->*_mask. This function is responsible for applying all
3858 * known limits including host controller limits, device
3859 * blacklist, etc...
3860 *
3861 * LOCKING:
3862 * None.
3863 */
3864 static void ata_dev_xfermask(struct ata_device *dev)
3865 {
3866 struct ata_link *link = dev->link;
3867 struct ata_port *ap = link->ap;
3868 struct ata_host *host = ap->host;
3869 unsigned long xfer_mask;
3870
3871 /* controller modes available */
3872 xfer_mask = ata_pack_xfermask(ap->pio_mask,
3873 ap->mwdma_mask, ap->udma_mask);
3874
3875 /* drive modes available */
3876 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
3877 dev->mwdma_mask, dev->udma_mask);
3878 xfer_mask &= ata_id_xfermask(dev->id);
3879
3880 /*
3881 * CFA Advanced TrueIDE timings are not allowed on a shared
3882 * cable
3883 */
3884 if (ata_dev_pair(dev)) {
3885 /* No PIO5 or PIO6 */
3886 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
3887 /* No MWDMA3 or MWDMA 4 */
3888 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
3889 }
3890
3891 if (ata_dma_blacklisted(dev)) {
3892 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3893 ata_dev_printk(dev, KERN_WARNING,
3894 "device is on DMA blacklist, disabling DMA\n");
3895 }
3896
3897 if ((host->flags & ATA_HOST_SIMPLEX) &&
3898 host->simplex_claimed && host->simplex_claimed != ap) {
3899 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3900 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
3901 "other device, disabling DMA\n");
3902 }
3903
3904 if (ap->flags & ATA_FLAG_NO_IORDY)
3905 xfer_mask &= ata_pio_mask_no_iordy(dev);
3906
3907 if (ap->ops->mode_filter)
3908 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
3909
3910 /* Apply cable rule here. Don't apply it early because when
3911 * we handle hot plug the cable type can itself change.
3912 * Check this last so that we know if the transfer rate was
3913 * solely limited by the cable.
3914 * Unknown or 80 wire cables reported host side are checked
3915 * drive side as well. Cases where we know a 40wire cable
3916 * is used safely for 80 are not checked here.
3917 */
3918 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
3919 /* UDMA/44 or higher would be available */
3920 if((ap->cbl == ATA_CBL_PATA40) ||
3921 (ata_drive_40wire(dev->id) &&
3922 (ap->cbl == ATA_CBL_PATA_UNK ||
3923 ap->cbl == ATA_CBL_PATA80))) {
3924 ata_dev_printk(dev, KERN_WARNING,
3925 "limited to UDMA/33 due to 40-wire cable\n");
3926 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
3927 }
3928
3929 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
3930 &dev->mwdma_mask, &dev->udma_mask);
3931 }
3932
3933 /**
3934 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
3935 * @dev: Device to which command will be sent
3936 *
3937 * Issue SET FEATURES - XFER MODE command to device @dev
3938 * on port @ap.
3939 *
3940 * LOCKING:
3941 * PCI/etc. bus probe sem.
3942 *
3943 * RETURNS:
3944 * 0 on success, AC_ERR_* mask otherwise.
3945 */
3946
3947 static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
3948 {
3949 struct ata_taskfile tf;
3950 unsigned int err_mask;
3951
3952 /* set up set-features taskfile */
3953 DPRINTK("set features - xfer mode\n");
3954
3955 /* Some controllers and ATAPI devices show flaky interrupt
3956 * behavior after setting xfer mode. Use polling instead.
3957 */
3958 ata_tf_init(dev, &tf);
3959 tf.command = ATA_CMD_SET_FEATURES;
3960 tf.feature = SETFEATURES_XFER;
3961 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
3962 tf.protocol = ATA_PROT_NODATA;
3963 tf.nsect = dev->xfer_mode;
3964
3965 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
3966
3967 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3968 return err_mask;
3969 }
3970
3971 /**
3972 * ata_dev_init_params - Issue INIT DEV PARAMS command
3973 * @dev: Device to which command will be sent
3974 * @heads: Number of heads (taskfile parameter)
3975 * @sectors: Number of sectors (taskfile parameter)
3976 *
3977 * LOCKING:
3978 * Kernel thread context (may sleep)
3979 *
3980 * RETURNS:
3981 * 0 on success, AC_ERR_* mask otherwise.
3982 */
3983 static unsigned int ata_dev_init_params(struct ata_device *dev,
3984 u16 heads, u16 sectors)
3985 {
3986 struct ata_taskfile tf;
3987 unsigned int err_mask;
3988
3989 /* Number of sectors per track 1-255. Number of heads 1-16 */
3990 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
3991 return AC_ERR_INVALID;
3992
3993 /* set up init dev params taskfile */
3994 DPRINTK("init dev params \n");
3995
3996 ata_tf_init(dev, &tf);
3997 tf.command = ATA_CMD_INIT_DEV_PARAMS;
3998 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3999 tf.protocol = ATA_PROT_NODATA;
4000 tf.nsect = sectors;
4001 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
4002
4003 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
4004 /* A clean abort indicates an original or just out of spec drive
4005 and we should continue as we issue the setup based on the
4006 drive reported working geometry */
4007 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4008 err_mask = 0;
4009
4010 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4011 return err_mask;
4012 }
4013
4014 /**
4015 * ata_sg_clean - Unmap DMA memory associated with command
4016 * @qc: Command containing DMA memory to be released
4017 *
4018 * Unmap all mapped DMA memory associated with this command.
4019 *
4020 * LOCKING:
4021 * spin_lock_irqsave(host lock)
4022 */
4023 void ata_sg_clean(struct ata_queued_cmd *qc)
4024 {
4025 struct ata_port *ap = qc->ap;
4026 struct scatterlist *sg = qc->__sg;
4027 int dir = qc->dma_dir;
4028 void *pad_buf = NULL;
4029
4030 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
4031 WARN_ON(sg == NULL);
4032
4033 if (qc->flags & ATA_QCFLAG_SINGLE)
4034 WARN_ON(qc->n_elem > 1);
4035
4036 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
4037
4038 /* if we padded the buffer out to 32-bit bound, and data
4039 * xfer direction is from-device, we must copy from the
4040 * pad buffer back into the supplied buffer
4041 */
4042 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
4043 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4044
4045 if (qc->flags & ATA_QCFLAG_SG) {
4046 if (qc->n_elem)
4047 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
4048 /* restore last sg */
4049 sg[qc->orig_n_elem - 1].length += qc->pad_len;
4050 if (pad_buf) {
4051 struct scatterlist *psg = &qc->pad_sgent;
4052 void *addr = kmap_atomic(psg->page, KM_IRQ0);
4053 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
4054 kunmap_atomic(addr, KM_IRQ0);
4055 }
4056 } else {
4057 if (qc->n_elem)
4058 dma_unmap_single(ap->dev,
4059 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
4060 dir);
4061 /* restore sg */
4062 sg->length += qc->pad_len;
4063 if (pad_buf)
4064 memcpy(qc->buf_virt + sg->length - qc->pad_len,
4065 pad_buf, qc->pad_len);
4066 }
4067
4068 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4069 qc->__sg = NULL;
4070 }
4071
4072 /**
4073 * ata_fill_sg - Fill PCI IDE PRD table
4074 * @qc: Metadata associated with taskfile to be transferred
4075 *
4076 * Fill PCI IDE PRD (scatter-gather) table with segments
4077 * associated with the current disk command.
4078 *
4079 * LOCKING:
4080 * spin_lock_irqsave(host lock)
4081 *
4082 */
4083 static void ata_fill_sg(struct ata_queued_cmd *qc)
4084 {
4085 struct ata_port *ap = qc->ap;
4086 struct scatterlist *sg;
4087 unsigned int idx;
4088
4089 WARN_ON(qc->__sg == NULL);
4090 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
4091
4092 idx = 0;
4093 ata_for_each_sg(sg, qc) {
4094 u32 addr, offset;
4095 u32 sg_len, len;
4096
4097 /* determine if physical DMA addr spans 64K boundary.
4098 * Note h/w doesn't support 64-bit, so we unconditionally
4099 * truncate dma_addr_t to u32.
4100 */
4101 addr = (u32) sg_dma_address(sg);
4102 sg_len = sg_dma_len(sg);
4103
4104 while (sg_len) {
4105 offset = addr & 0xffff;
4106 len = sg_len;
4107 if ((offset + sg_len) > 0x10000)
4108 len = 0x10000 - offset;
4109
4110 ap->prd[idx].addr = cpu_to_le32(addr);
4111 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
4112 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4113
4114 idx++;
4115 sg_len -= len;
4116 addr += len;
4117 }
4118 }
4119
4120 if (idx)
4121 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4122 }
4123
4124 /**
4125 * ata_fill_sg_dumb - Fill PCI IDE PRD table
4126 * @qc: Metadata associated with taskfile to be transferred
4127 *
4128 * Fill PCI IDE PRD (scatter-gather) table with segments
4129 * associated with the current disk command. Perform the fill
4130 * so that we avoid writing any length 64K records for
4131 * controllers that don't follow the spec.
4132 *
4133 * LOCKING:
4134 * spin_lock_irqsave(host lock)
4135 *
4136 */
4137 static void ata_fill_sg_dumb(struct ata_queued_cmd *qc)
4138 {
4139 struct ata_port *ap = qc->ap;
4140 struct scatterlist *sg;
4141 unsigned int idx;
4142
4143 WARN_ON(qc->__sg == NULL);
4144 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
4145
4146 idx = 0;
4147 ata_for_each_sg(sg, qc) {
4148 u32 addr, offset;
4149 u32 sg_len, len, blen;
4150
4151 /* determine if physical DMA addr spans 64K boundary.
4152 * Note h/w doesn't support 64-bit, so we unconditionally
4153 * truncate dma_addr_t to u32.
4154 */
4155 addr = (u32) sg_dma_address(sg);
4156 sg_len = sg_dma_len(sg);
4157
4158 while (sg_len) {
4159 offset = addr & 0xffff;
4160 len = sg_len;
4161 if ((offset + sg_len) > 0x10000)
4162 len = 0x10000 - offset;
4163
4164 blen = len & 0xffff;
4165 ap->prd[idx].addr = cpu_to_le32(addr);
4166 if (blen == 0) {
4167 /* Some PATA chipsets like the CS5530 can't
4168 cope with 0x0000 meaning 64K as the spec says */
4169 ap->prd[idx].flags_len = cpu_to_le32(0x8000);
4170 blen = 0x8000;
4171 ap->prd[++idx].addr = cpu_to_le32(addr + 0x8000);
4172 }
4173 ap->prd[idx].flags_len = cpu_to_le32(blen);
4174 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4175
4176 idx++;
4177 sg_len -= len;
4178 addr += len;
4179 }
4180 }
4181
4182 if (idx)
4183 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4184 }
4185
4186 /**
4187 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
4188 * @qc: Metadata associated with taskfile to check
4189 *
4190 * Allow low-level driver to filter ATA PACKET commands, returning
4191 * a status indicating whether or not it is OK to use DMA for the
4192 * supplied PACKET command.
4193 *
4194 * LOCKING:
4195 * spin_lock_irqsave(host lock)
4196 *
4197 * RETURNS: 0 when ATAPI DMA can be used
4198 * nonzero otherwise
4199 */
4200 int ata_check_atapi_dma(struct ata_queued_cmd *qc)
4201 {
4202 struct ata_port *ap = qc->ap;
4203
4204 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
4205 * few ATAPI devices choke on such DMA requests.
4206 */
4207 if (unlikely(qc->nbytes & 15))
4208 return 1;
4209
4210 if (ap->ops->check_atapi_dma)
4211 return ap->ops->check_atapi_dma(qc);
4212
4213 return 0;
4214 }
4215
4216 /**
4217 * ata_qc_prep - Prepare taskfile for submission
4218 * @qc: Metadata associated with taskfile to be prepared
4219 *
4220 * Prepare ATA taskfile for submission.
4221 *
4222 * LOCKING:
4223 * spin_lock_irqsave(host lock)
4224 */
4225 void ata_qc_prep(struct ata_queued_cmd *qc)
4226 {
4227 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4228 return;
4229
4230 ata_fill_sg(qc);
4231 }
4232
4233 /**
4234 * ata_dumb_qc_prep - Prepare taskfile for submission
4235 * @qc: Metadata associated with taskfile to be prepared
4236 *
4237 * Prepare ATA taskfile for submission.
4238 *
4239 * LOCKING:
4240 * spin_lock_irqsave(host lock)
4241 */
4242 void ata_dumb_qc_prep(struct ata_queued_cmd *qc)
4243 {
4244 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4245 return;
4246
4247 ata_fill_sg_dumb(qc);
4248 }
4249
4250 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4251
4252 /**
4253 * ata_sg_init_one - Associate command with memory buffer
4254 * @qc: Command to be associated
4255 * @buf: Memory buffer
4256 * @buflen: Length of memory buffer, in bytes.
4257 *
4258 * Initialize the data-related elements of queued_cmd @qc
4259 * to point to a single memory buffer, @buf of byte length @buflen.
4260 *
4261 * LOCKING:
4262 * spin_lock_irqsave(host lock)
4263 */
4264
4265 void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
4266 {
4267 qc->flags |= ATA_QCFLAG_SINGLE;
4268
4269 qc->__sg = &qc->sgent;
4270 qc->n_elem = 1;
4271 qc->orig_n_elem = 1;
4272 qc->buf_virt = buf;
4273 qc->nbytes = buflen;
4274
4275 sg_init_one(&qc->sgent, buf, buflen);
4276 }
4277
4278 /**
4279 * ata_sg_init - Associate command with scatter-gather table.
4280 * @qc: Command to be associated
4281 * @sg: Scatter-gather table.
4282 * @n_elem: Number of elements in s/g table.
4283 *
4284 * Initialize the data-related elements of queued_cmd @qc
4285 * to point to a scatter-gather table @sg, containing @n_elem
4286 * elements.
4287 *
4288 * LOCKING:
4289 * spin_lock_irqsave(host lock)
4290 */
4291
4292 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4293 unsigned int n_elem)
4294 {
4295 qc->flags |= ATA_QCFLAG_SG;
4296 qc->__sg = sg;
4297 qc->n_elem = n_elem;
4298 qc->orig_n_elem = n_elem;
4299 }
4300
4301 /**
4302 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
4303 * @qc: Command with memory buffer to be mapped.
4304 *
4305 * DMA-map the memory buffer associated with queued_cmd @qc.
4306 *
4307 * LOCKING:
4308 * spin_lock_irqsave(host lock)
4309 *
4310 * RETURNS:
4311 * Zero on success, negative on error.
4312 */
4313
4314 static int ata_sg_setup_one(struct ata_queued_cmd *qc)
4315 {
4316 struct ata_port *ap = qc->ap;
4317 int dir = qc->dma_dir;
4318 struct scatterlist *sg = qc->__sg;
4319 dma_addr_t dma_address;
4320 int trim_sg = 0;
4321
4322 /* we must lengthen transfers to end on a 32-bit boundary */
4323 qc->pad_len = sg->length & 3;
4324 if (qc->pad_len) {
4325 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4326 struct scatterlist *psg = &qc->pad_sgent;
4327
4328 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
4329
4330 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4331
4332 if (qc->tf.flags & ATA_TFLAG_WRITE)
4333 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
4334 qc->pad_len);
4335
4336 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4337 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4338 /* trim sg */
4339 sg->length -= qc->pad_len;
4340 if (sg->length == 0)
4341 trim_sg = 1;
4342
4343 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
4344 sg->length, qc->pad_len);
4345 }
4346
4347 if (trim_sg) {
4348 qc->n_elem--;
4349 goto skip_map;
4350 }
4351
4352 dma_address = dma_map_single(ap->dev, qc->buf_virt,
4353 sg->length, dir);
4354 if (dma_mapping_error(dma_address)) {
4355 /* restore sg */
4356 sg->length += qc->pad_len;
4357 return -1;
4358 }
4359
4360 sg_dma_address(sg) = dma_address;
4361 sg_dma_len(sg) = sg->length;
4362
4363 skip_map:
4364 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
4365 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4366
4367 return 0;
4368 }
4369
4370 /**
4371 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4372 * @qc: Command with scatter-gather table to be mapped.
4373 *
4374 * DMA-map the scatter-gather table associated with queued_cmd @qc.
4375 *
4376 * LOCKING:
4377 * spin_lock_irqsave(host lock)
4378 *
4379 * RETURNS:
4380 * Zero on success, negative on error.
4381 *
4382 */
4383
4384 static int ata_sg_setup(struct ata_queued_cmd *qc)
4385 {
4386 struct ata_port *ap = qc->ap;
4387 struct scatterlist *sg = qc->__sg;
4388 struct scatterlist *lsg = &sg[qc->n_elem - 1];
4389 int n_elem, pre_n_elem, dir, trim_sg = 0;
4390
4391 VPRINTK("ENTER, ata%u\n", ap->print_id);
4392 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
4393
4394 /* we must lengthen transfers to end on a 32-bit boundary */
4395 qc->pad_len = lsg->length & 3;
4396 if (qc->pad_len) {
4397 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4398 struct scatterlist *psg = &qc->pad_sgent;
4399 unsigned int offset;
4400
4401 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
4402
4403 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4404
4405 /*
4406 * psg->page/offset are used to copy to-be-written
4407 * data in this function or read data in ata_sg_clean.
4408 */
4409 offset = lsg->offset + lsg->length - qc->pad_len;
4410 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
4411 psg->offset = offset_in_page(offset);
4412
4413 if (qc->tf.flags & ATA_TFLAG_WRITE) {
4414 void *addr = kmap_atomic(psg->page, KM_IRQ0);
4415 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
4416 kunmap_atomic(addr, KM_IRQ0);
4417 }
4418
4419 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4420 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4421 /* trim last sg */
4422 lsg->length -= qc->pad_len;
4423 if (lsg->length == 0)
4424 trim_sg = 1;
4425
4426 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
4427 qc->n_elem - 1, lsg->length, qc->pad_len);
4428 }
4429
4430 pre_n_elem = qc->n_elem;
4431 if (trim_sg && pre_n_elem)
4432 pre_n_elem--;
4433
4434 if (!pre_n_elem) {
4435 n_elem = 0;
4436 goto skip_map;
4437 }
4438
4439 dir = qc->dma_dir;
4440 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
4441 if (n_elem < 1) {
4442 /* restore last sg */
4443 lsg->length += qc->pad_len;
4444 return -1;
4445 }
4446
4447 DPRINTK("%d sg elements mapped\n", n_elem);
4448
4449 skip_map:
4450 qc->n_elem = n_elem;
4451
4452 return 0;
4453 }
4454
4455 /**
4456 * swap_buf_le16 - swap halves of 16-bit words in place
4457 * @buf: Buffer to swap
4458 * @buf_words: Number of 16-bit words in buffer.
4459 *
4460 * Swap halves of 16-bit words if needed to convert from
4461 * little-endian byte order to native cpu byte order, or
4462 * vice-versa.
4463 *
4464 * LOCKING:
4465 * Inherited from caller.
4466 */
4467 void swap_buf_le16(u16 *buf, unsigned int buf_words)
4468 {
4469 #ifdef __BIG_ENDIAN
4470 unsigned int i;
4471
4472 for (i = 0; i < buf_words; i++)
4473 buf[i] = le16_to_cpu(buf[i]);
4474 #endif /* __BIG_ENDIAN */
4475 }
4476
4477 /**
4478 * ata_data_xfer - Transfer data by PIO
4479 * @adev: device to target
4480 * @buf: data buffer
4481 * @buflen: buffer length
4482 * @write_data: read/write
4483 *
4484 * Transfer data from/to the device data register by PIO.
4485 *
4486 * LOCKING:
4487 * Inherited from caller.
4488 */
4489 void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
4490 unsigned int buflen, int write_data)
4491 {
4492 struct ata_port *ap = adev->link->ap;
4493 unsigned int words = buflen >> 1;
4494
4495 /* Transfer multiple of 2 bytes */
4496 if (write_data)
4497 iowrite16_rep(ap->ioaddr.data_addr, buf, words);
4498 else
4499 ioread16_rep(ap->ioaddr.data_addr, buf, words);
4500
4501 /* Transfer trailing 1 byte, if any. */
4502 if (unlikely(buflen & 0x01)) {
4503 u16 align_buf[1] = { 0 };
4504 unsigned char *trailing_buf = buf + buflen - 1;
4505
4506 if (write_data) {
4507 memcpy(align_buf, trailing_buf, 1);
4508 iowrite16(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
4509 } else {
4510 align_buf[0] = cpu_to_le16(ioread16(ap->ioaddr.data_addr));
4511 memcpy(trailing_buf, align_buf, 1);
4512 }
4513 }
4514 }
4515
4516 /**
4517 * ata_data_xfer_noirq - Transfer data by PIO
4518 * @adev: device to target
4519 * @buf: data buffer
4520 * @buflen: buffer length
4521 * @write_data: read/write
4522 *
4523 * Transfer data from/to the device data register by PIO. Do the
4524 * transfer with interrupts disabled.
4525 *
4526 * LOCKING:
4527 * Inherited from caller.
4528 */
4529 void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
4530 unsigned int buflen, int write_data)
4531 {
4532 unsigned long flags;
4533 local_irq_save(flags);
4534 ata_data_xfer(adev, buf, buflen, write_data);
4535 local_irq_restore(flags);
4536 }
4537
4538
4539 /**
4540 * ata_pio_sector - Transfer a sector of data.
4541 * @qc: Command on going
4542 *
4543 * Transfer qc->sect_size bytes of data from/to the ATA device.
4544 *
4545 * LOCKING:
4546 * Inherited from caller.
4547 */
4548
4549 static void ata_pio_sector(struct ata_queued_cmd *qc)
4550 {
4551 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
4552 struct scatterlist *sg = qc->__sg;
4553 struct ata_port *ap = qc->ap;
4554 struct page *page;
4555 unsigned int offset;
4556 unsigned char *buf;
4557
4558 if (qc->curbytes == qc->nbytes - qc->sect_size)
4559 ap->hsm_task_state = HSM_ST_LAST;
4560
4561 page = sg[qc->cursg].page;
4562 offset = sg[qc->cursg].offset + qc->cursg_ofs;
4563
4564 /* get the current page and offset */
4565 page = nth_page(page, (offset >> PAGE_SHIFT));
4566 offset %= PAGE_SIZE;
4567
4568 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4569
4570 if (PageHighMem(page)) {
4571 unsigned long flags;
4572
4573 /* FIXME: use a bounce buffer */
4574 local_irq_save(flags);
4575 buf = kmap_atomic(page, KM_IRQ0);
4576
4577 /* do the actual data transfer */
4578 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
4579
4580 kunmap_atomic(buf, KM_IRQ0);
4581 local_irq_restore(flags);
4582 } else {
4583 buf = page_address(page);
4584 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
4585 }
4586
4587 qc->curbytes += qc->sect_size;
4588 qc->cursg_ofs += qc->sect_size;
4589
4590 if (qc->cursg_ofs == (&sg[qc->cursg])->length) {
4591 qc->cursg++;
4592 qc->cursg_ofs = 0;
4593 }
4594 }
4595
4596 /**
4597 * ata_pio_sectors - Transfer one or many sectors.
4598 * @qc: Command on going
4599 *
4600 * Transfer one or many sectors of data from/to the
4601 * ATA device for the DRQ request.
4602 *
4603 * LOCKING:
4604 * Inherited from caller.
4605 */
4606
4607 static void ata_pio_sectors(struct ata_queued_cmd *qc)
4608 {
4609 if (is_multi_taskfile(&qc->tf)) {
4610 /* READ/WRITE MULTIPLE */
4611 unsigned int nsect;
4612
4613 WARN_ON(qc->dev->multi_count == 0);
4614
4615 nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
4616 qc->dev->multi_count);
4617 while (nsect--)
4618 ata_pio_sector(qc);
4619 } else
4620 ata_pio_sector(qc);
4621
4622 ata_altstatus(qc->ap); /* flush */
4623 }
4624
4625 /**
4626 * atapi_send_cdb - Write CDB bytes to hardware
4627 * @ap: Port to which ATAPI device is attached.
4628 * @qc: Taskfile currently active
4629 *
4630 * When device has indicated its readiness to accept
4631 * a CDB, this function is called. Send the CDB.
4632 *
4633 * LOCKING:
4634 * caller.
4635 */
4636
4637 static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
4638 {
4639 /* send SCSI cdb */
4640 DPRINTK("send cdb\n");
4641 WARN_ON(qc->dev->cdb_len < 12);
4642
4643 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
4644 ata_altstatus(ap); /* flush */
4645
4646 switch (qc->tf.protocol) {
4647 case ATA_PROT_ATAPI:
4648 ap->hsm_task_state = HSM_ST;
4649 break;
4650 case ATA_PROT_ATAPI_NODATA:
4651 ap->hsm_task_state = HSM_ST_LAST;
4652 break;
4653 case ATA_PROT_ATAPI_DMA:
4654 ap->hsm_task_state = HSM_ST_LAST;
4655 /* initiate bmdma */
4656 ap->ops->bmdma_start(qc);
4657 break;
4658 }
4659 }
4660
4661 /**
4662 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
4663 * @qc: Command on going
4664 * @bytes: number of bytes
4665 *
4666 * Transfer Transfer data from/to the ATAPI device.
4667 *
4668 * LOCKING:
4669 * Inherited from caller.
4670 *
4671 */
4672
4673 static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
4674 {
4675 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
4676 struct scatterlist *sg = qc->__sg;
4677 struct ata_port *ap = qc->ap;
4678 struct page *page;
4679 unsigned char *buf;
4680 unsigned int offset, count;
4681
4682 if (qc->curbytes + bytes >= qc->nbytes)
4683 ap->hsm_task_state = HSM_ST_LAST;
4684
4685 next_sg:
4686 if (unlikely(qc->cursg >= qc->n_elem)) {
4687 /*
4688 * The end of qc->sg is reached and the device expects
4689 * more data to transfer. In order not to overrun qc->sg
4690 * and fulfill length specified in the byte count register,
4691 * - for read case, discard trailing data from the device
4692 * - for write case, padding zero data to the device
4693 */
4694 u16 pad_buf[1] = { 0 };
4695 unsigned int words = bytes >> 1;
4696 unsigned int i;
4697
4698 if (words) /* warning if bytes > 1 */
4699 ata_dev_printk(qc->dev, KERN_WARNING,
4700 "%u bytes trailing data\n", bytes);
4701
4702 for (i = 0; i < words; i++)
4703 ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write);
4704
4705 ap->hsm_task_state = HSM_ST_LAST;
4706 return;
4707 }
4708
4709 sg = &qc->__sg[qc->cursg];
4710
4711 page = sg->page;
4712 offset = sg->offset + qc->cursg_ofs;
4713
4714 /* get the current page and offset */
4715 page = nth_page(page, (offset >> PAGE_SHIFT));
4716 offset %= PAGE_SIZE;
4717
4718 /* don't overrun current sg */
4719 count = min(sg->length - qc->cursg_ofs, bytes);
4720
4721 /* don't cross page boundaries */
4722 count = min(count, (unsigned int)PAGE_SIZE - offset);
4723
4724 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4725
4726 if (PageHighMem(page)) {
4727 unsigned long flags;
4728
4729 /* FIXME: use bounce buffer */
4730 local_irq_save(flags);
4731 buf = kmap_atomic(page, KM_IRQ0);
4732
4733 /* do the actual data transfer */
4734 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
4735
4736 kunmap_atomic(buf, KM_IRQ0);
4737 local_irq_restore(flags);
4738 } else {
4739 buf = page_address(page);
4740 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
4741 }
4742
4743 bytes -= count;
4744 qc->curbytes += count;
4745 qc->cursg_ofs += count;
4746
4747 if (qc->cursg_ofs == sg->length) {
4748 qc->cursg++;
4749 qc->cursg_ofs = 0;
4750 }
4751
4752 if (bytes)
4753 goto next_sg;
4754 }
4755
4756 /**
4757 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
4758 * @qc: Command on going
4759 *
4760 * Transfer Transfer data from/to the ATAPI device.
4761 *
4762 * LOCKING:
4763 * Inherited from caller.
4764 */
4765
4766 static void atapi_pio_bytes(struct ata_queued_cmd *qc)
4767 {
4768 struct ata_port *ap = qc->ap;
4769 struct ata_device *dev = qc->dev;
4770 unsigned int ireason, bc_lo, bc_hi, bytes;
4771 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
4772
4773 /* Abuse qc->result_tf for temp storage of intermediate TF
4774 * here to save some kernel stack usage.
4775 * For normal completion, qc->result_tf is not relevant. For
4776 * error, qc->result_tf is later overwritten by ata_qc_complete().
4777 * So, the correctness of qc->result_tf is not affected.
4778 */
4779 ap->ops->tf_read(ap, &qc->result_tf);
4780 ireason = qc->result_tf.nsect;
4781 bc_lo = qc->result_tf.lbam;
4782 bc_hi = qc->result_tf.lbah;
4783 bytes = (bc_hi << 8) | bc_lo;
4784
4785 /* shall be cleared to zero, indicating xfer of data */
4786 if (ireason & (1 << 0))
4787 goto err_out;
4788
4789 /* make sure transfer direction matches expected */
4790 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
4791 if (do_write != i_write)
4792 goto err_out;
4793
4794 VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
4795
4796 __atapi_pio_bytes(qc, bytes);
4797 ata_altstatus(ap); /* flush */
4798
4799 return;
4800
4801 err_out:
4802 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
4803 qc->err_mask |= AC_ERR_HSM;
4804 ap->hsm_task_state = HSM_ST_ERR;
4805 }
4806
4807 /**
4808 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
4809 * @ap: the target ata_port
4810 * @qc: qc on going
4811 *
4812 * RETURNS:
4813 * 1 if ok in workqueue, 0 otherwise.
4814 */
4815
4816 static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
4817 {
4818 if (qc->tf.flags & ATA_TFLAG_POLLING)
4819 return 1;
4820
4821 if (ap->hsm_task_state == HSM_ST_FIRST) {
4822 if (qc->tf.protocol == ATA_PROT_PIO &&
4823 (qc->tf.flags & ATA_TFLAG_WRITE))
4824 return 1;
4825
4826 if (is_atapi_taskfile(&qc->tf) &&
4827 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4828 return 1;
4829 }
4830
4831 return 0;
4832 }
4833
4834 /**
4835 * ata_hsm_qc_complete - finish a qc running on standard HSM
4836 * @qc: Command to complete
4837 * @in_wq: 1 if called from workqueue, 0 otherwise
4838 *
4839 * Finish @qc which is running on standard HSM.
4840 *
4841 * LOCKING:
4842 * If @in_wq is zero, spin_lock_irqsave(host lock).
4843 * Otherwise, none on entry and grabs host lock.
4844 */
4845 static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
4846 {
4847 struct ata_port *ap = qc->ap;
4848 unsigned long flags;
4849
4850 if (ap->ops->error_handler) {
4851 if (in_wq) {
4852 spin_lock_irqsave(ap->lock, flags);
4853
4854 /* EH might have kicked in while host lock is
4855 * released.
4856 */
4857 qc = ata_qc_from_tag(ap, qc->tag);
4858 if (qc) {
4859 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
4860 ap->ops->irq_on(ap);
4861 ata_qc_complete(qc);
4862 } else
4863 ata_port_freeze(ap);
4864 }
4865
4866 spin_unlock_irqrestore(ap->lock, flags);
4867 } else {
4868 if (likely(!(qc->err_mask & AC_ERR_HSM)))
4869 ata_qc_complete(qc);
4870 else
4871 ata_port_freeze(ap);
4872 }
4873 } else {
4874 if (in_wq) {
4875 spin_lock_irqsave(ap->lock, flags);
4876 ap->ops->irq_on(ap);
4877 ata_qc_complete(qc);
4878 spin_unlock_irqrestore(ap->lock, flags);
4879 } else
4880 ata_qc_complete(qc);
4881 }
4882 }
4883
4884 /**
4885 * ata_hsm_move - move the HSM to the next state.
4886 * @ap: the target ata_port
4887 * @qc: qc on going
4888 * @status: current device status
4889 * @in_wq: 1 if called from workqueue, 0 otherwise
4890 *
4891 * RETURNS:
4892 * 1 when poll next status needed, 0 otherwise.
4893 */
4894 int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
4895 u8 status, int in_wq)
4896 {
4897 unsigned long flags = 0;
4898 int poll_next;
4899
4900 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
4901
4902 /* Make sure ata_qc_issue_prot() does not throw things
4903 * like DMA polling into the workqueue. Notice that
4904 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
4905 */
4906 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
4907
4908 fsm_start:
4909 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
4910 ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
4911
4912 switch (ap->hsm_task_state) {
4913 case HSM_ST_FIRST:
4914 /* Send first data block or PACKET CDB */
4915
4916 /* If polling, we will stay in the work queue after
4917 * sending the data. Otherwise, interrupt handler
4918 * takes over after sending the data.
4919 */
4920 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
4921
4922 /* check device status */
4923 if (unlikely((status & ATA_DRQ) == 0)) {
4924 /* handle BSY=0, DRQ=0 as error */
4925 if (likely(status & (ATA_ERR | ATA_DF)))
4926 /* device stops HSM for abort/error */
4927 qc->err_mask |= AC_ERR_DEV;
4928 else
4929 /* HSM violation. Let EH handle this */
4930 qc->err_mask |= AC_ERR_HSM;
4931
4932 ap->hsm_task_state = HSM_ST_ERR;
4933 goto fsm_start;
4934 }
4935
4936 /* Device should not ask for data transfer (DRQ=1)
4937 * when it finds something wrong.
4938 * We ignore DRQ here and stop the HSM by
4939 * changing hsm_task_state to HSM_ST_ERR and
4940 * let the EH abort the command or reset the device.
4941 */
4942 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4943 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with device "
4944 "error, dev_stat 0x%X\n", status);
4945 qc->err_mask |= AC_ERR_HSM;
4946 ap->hsm_task_state = HSM_ST_ERR;
4947 goto fsm_start;
4948 }
4949
4950 /* Send the CDB (atapi) or the first data block (ata pio out).
4951 * During the state transition, interrupt handler shouldn't
4952 * be invoked before the data transfer is complete and
4953 * hsm_task_state is changed. Hence, the following locking.
4954 */
4955 if (in_wq)
4956 spin_lock_irqsave(ap->lock, flags);
4957
4958 if (qc->tf.protocol == ATA_PROT_PIO) {
4959 /* PIO data out protocol.
4960 * send first data block.
4961 */
4962
4963 /* ata_pio_sectors() might change the state
4964 * to HSM_ST_LAST. so, the state is changed here
4965 * before ata_pio_sectors().
4966 */
4967 ap->hsm_task_state = HSM_ST;
4968 ata_pio_sectors(qc);
4969 } else
4970 /* send CDB */
4971 atapi_send_cdb(ap, qc);
4972
4973 if (in_wq)
4974 spin_unlock_irqrestore(ap->lock, flags);
4975
4976 /* if polling, ata_pio_task() handles the rest.
4977 * otherwise, interrupt handler takes over from here.
4978 */
4979 break;
4980
4981 case HSM_ST:
4982 /* complete command or read/write the data register */
4983 if (qc->tf.protocol == ATA_PROT_ATAPI) {
4984 /* ATAPI PIO protocol */
4985 if ((status & ATA_DRQ) == 0) {
4986 /* No more data to transfer or device error.
4987 * Device error will be tagged in HSM_ST_LAST.
4988 */
4989 ap->hsm_task_state = HSM_ST_LAST;
4990 goto fsm_start;
4991 }
4992
4993 /* Device should not ask for data transfer (DRQ=1)
4994 * when it finds something wrong.
4995 * We ignore DRQ here and stop the HSM by
4996 * changing hsm_task_state to HSM_ST_ERR and
4997 * let the EH abort the command or reset the device.
4998 */
4999 if (unlikely(status & (ATA_ERR | ATA_DF))) {
5000 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with "
5001 "device error, dev_stat 0x%X\n",
5002 status);
5003 qc->err_mask |= AC_ERR_HSM;
5004 ap->hsm_task_state = HSM_ST_ERR;
5005 goto fsm_start;
5006 }
5007
5008 atapi_pio_bytes(qc);
5009
5010 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
5011 /* bad ireason reported by device */
5012 goto fsm_start;
5013
5014 } else {
5015 /* ATA PIO protocol */
5016 if (unlikely((status & ATA_DRQ) == 0)) {
5017 /* handle BSY=0, DRQ=0 as error */
5018 if (likely(status & (ATA_ERR | ATA_DF)))
5019 /* device stops HSM for abort/error */
5020 qc->err_mask |= AC_ERR_DEV;
5021 else
5022 /* HSM violation. Let EH handle this.
5023 * Phantom devices also trigger this
5024 * condition. Mark hint.
5025 */
5026 qc->err_mask |= AC_ERR_HSM |
5027 AC_ERR_NODEV_HINT;
5028
5029 ap->hsm_task_state = HSM_ST_ERR;
5030 goto fsm_start;
5031 }
5032
5033 /* For PIO reads, some devices may ask for
5034 * data transfer (DRQ=1) alone with ERR=1.
5035 * We respect DRQ here and transfer one
5036 * block of junk data before changing the
5037 * hsm_task_state to HSM_ST_ERR.
5038 *
5039 * For PIO writes, ERR=1 DRQ=1 doesn't make
5040 * sense since the data block has been
5041 * transferred to the device.
5042 */
5043 if (unlikely(status & (ATA_ERR | ATA_DF))) {
5044 /* data might be corrputed */
5045 qc->err_mask |= AC_ERR_DEV;
5046
5047 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
5048 ata_pio_sectors(qc);
5049 status = ata_wait_idle(ap);
5050 }
5051
5052 if (status & (ATA_BUSY | ATA_DRQ))
5053 qc->err_mask |= AC_ERR_HSM;
5054
5055 /* ata_pio_sectors() might change the
5056 * state to HSM_ST_LAST. so, the state
5057 * is changed after ata_pio_sectors().
5058 */
5059 ap->hsm_task_state = HSM_ST_ERR;
5060 goto fsm_start;
5061 }
5062
5063 ata_pio_sectors(qc);
5064
5065 if (ap->hsm_task_state == HSM_ST_LAST &&
5066 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
5067 /* all data read */
5068 status = ata_wait_idle(ap);
5069 goto fsm_start;
5070 }
5071 }
5072
5073 poll_next = 1;
5074 break;
5075
5076 case HSM_ST_LAST:
5077 if (unlikely(!ata_ok(status))) {
5078 qc->err_mask |= __ac_err_mask(status);
5079 ap->hsm_task_state = HSM_ST_ERR;
5080 goto fsm_start;
5081 }
5082
5083 /* no more data to transfer */
5084 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
5085 ap->print_id, qc->dev->devno, status);
5086
5087 WARN_ON(qc->err_mask);
5088
5089 ap->hsm_task_state = HSM_ST_IDLE;
5090
5091 /* complete taskfile transaction */
5092 ata_hsm_qc_complete(qc, in_wq);
5093
5094 poll_next = 0;
5095 break;
5096
5097 case HSM_ST_ERR:
5098 /* make sure qc->err_mask is available to
5099 * know what's wrong and recover
5100 */
5101 WARN_ON(qc->err_mask == 0);
5102
5103 ap->hsm_task_state = HSM_ST_IDLE;
5104
5105 /* complete taskfile transaction */
5106 ata_hsm_qc_complete(qc, in_wq);
5107
5108 poll_next = 0;
5109 break;
5110 default:
5111 poll_next = 0;
5112 BUG();
5113 }
5114
5115 return poll_next;
5116 }
5117
5118 static void ata_pio_task(struct work_struct *work)
5119 {
5120 struct ata_port *ap =
5121 container_of(work, struct ata_port, port_task.work);
5122 struct ata_queued_cmd *qc = ap->port_task_data;
5123 u8 status;
5124 int poll_next;
5125
5126 fsm_start:
5127 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
5128
5129 /*
5130 * This is purely heuristic. This is a fast path.
5131 * Sometimes when we enter, BSY will be cleared in
5132 * a chk-status or two. If not, the drive is probably seeking
5133 * or something. Snooze for a couple msecs, then
5134 * chk-status again. If still busy, queue delayed work.
5135 */
5136 status = ata_busy_wait(ap, ATA_BUSY, 5);
5137 if (status & ATA_BUSY) {
5138 msleep(2);
5139 status = ata_busy_wait(ap, ATA_BUSY, 10);
5140 if (status & ATA_BUSY) {
5141 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
5142 return;
5143 }
5144 }
5145
5146 /* move the HSM */
5147 poll_next = ata_hsm_move(ap, qc, status, 1);
5148
5149 /* another command or interrupt handler
5150 * may be running at this point.
5151 */
5152 if (poll_next)
5153 goto fsm_start;
5154 }
5155
5156 /**
5157 * ata_qc_new - Request an available ATA command, for queueing
5158 * @ap: Port associated with device @dev
5159 * @dev: Device from whom we request an available command structure
5160 *
5161 * LOCKING:
5162 * None.
5163 */
5164
5165 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
5166 {
5167 struct ata_queued_cmd *qc = NULL;
5168 unsigned int i;
5169
5170 /* no command while frozen */
5171 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
5172 return NULL;
5173
5174 /* the last tag is reserved for internal command. */
5175 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
5176 if (!test_and_set_bit(i, &ap->qc_allocated)) {
5177 qc = __ata_qc_from_tag(ap, i);
5178 break;
5179 }
5180
5181 if (qc)
5182 qc->tag = i;
5183
5184 return qc;
5185 }
5186
5187 /**
5188 * ata_qc_new_init - Request an available ATA command, and initialize it
5189 * @dev: Device from whom we request an available command structure
5190 *
5191 * LOCKING:
5192 * None.
5193 */
5194
5195 struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
5196 {
5197 struct ata_port *ap = dev->link->ap;
5198 struct ata_queued_cmd *qc;
5199
5200 qc = ata_qc_new(ap);
5201 if (qc) {
5202 qc->scsicmd = NULL;
5203 qc->ap = ap;
5204 qc->dev = dev;
5205
5206 ata_qc_reinit(qc);
5207 }
5208
5209 return qc;
5210 }
5211
5212 /**
5213 * ata_qc_free - free unused ata_queued_cmd
5214 * @qc: Command to complete
5215 *
5216 * Designed to free unused ata_queued_cmd object
5217 * in case something prevents using it.
5218 *
5219 * LOCKING:
5220 * spin_lock_irqsave(host lock)
5221 */
5222 void ata_qc_free(struct ata_queued_cmd *qc)
5223 {
5224 struct ata_port *ap = qc->ap;
5225 unsigned int tag;
5226
5227 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
5228
5229 qc->flags = 0;
5230 tag = qc->tag;
5231 if (likely(ata_tag_valid(tag))) {
5232 qc->tag = ATA_TAG_POISON;
5233 clear_bit(tag, &ap->qc_allocated);
5234 }
5235 }
5236
5237 void __ata_qc_complete(struct ata_queued_cmd *qc)
5238 {
5239 struct ata_port *ap = qc->ap;
5240 struct ata_link *link = qc->dev->link;
5241
5242 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
5243 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
5244
5245 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
5246 ata_sg_clean(qc);
5247
5248 /* command should be marked inactive atomically with qc completion */
5249 if (qc->tf.protocol == ATA_PROT_NCQ)
5250 link->sactive &= ~(1 << qc->tag);
5251 else
5252 link->active_tag = ATA_TAG_POISON;
5253
5254 /* atapi: mark qc as inactive to prevent the interrupt handler
5255 * from completing the command twice later, before the error handler
5256 * is called. (when rc != 0 and atapi request sense is needed)
5257 */
5258 qc->flags &= ~ATA_QCFLAG_ACTIVE;
5259 ap->qc_active &= ~(1 << qc->tag);
5260
5261 /* call completion callback */
5262 qc->complete_fn(qc);
5263 }
5264
5265 static void fill_result_tf(struct ata_queued_cmd *qc)
5266 {
5267 struct ata_port *ap = qc->ap;
5268
5269 qc->result_tf.flags = qc->tf.flags;
5270 ap->ops->tf_read(ap, &qc->result_tf);
5271 }
5272
5273 /**
5274 * ata_qc_complete - Complete an active ATA command
5275 * @qc: Command to complete
5276 * @err_mask: ATA Status register contents
5277 *
5278 * Indicate to the mid and upper layers that an ATA
5279 * command has completed, with either an ok or not-ok status.
5280 *
5281 * LOCKING:
5282 * spin_lock_irqsave(host lock)
5283 */
5284 void ata_qc_complete(struct ata_queued_cmd *qc)
5285 {
5286 struct ata_port *ap = qc->ap;
5287
5288 /* XXX: New EH and old EH use different mechanisms to
5289 * synchronize EH with regular execution path.
5290 *
5291 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
5292 * Normal execution path is responsible for not accessing a
5293 * failed qc. libata core enforces the rule by returning NULL
5294 * from ata_qc_from_tag() for failed qcs.
5295 *
5296 * Old EH depends on ata_qc_complete() nullifying completion
5297 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
5298 * not synchronize with interrupt handler. Only PIO task is
5299 * taken care of.
5300 */
5301 if (ap->ops->error_handler) {
5302 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
5303
5304 if (unlikely(qc->err_mask))
5305 qc->flags |= ATA_QCFLAG_FAILED;
5306
5307 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
5308 if (!ata_tag_internal(qc->tag)) {
5309 /* always fill result TF for failed qc */
5310 fill_result_tf(qc);
5311 ata_qc_schedule_eh(qc);
5312 return;
5313 }
5314 }
5315
5316 /* read result TF if requested */
5317 if (qc->flags & ATA_QCFLAG_RESULT_TF)
5318 fill_result_tf(qc);
5319
5320 __ata_qc_complete(qc);
5321 } else {
5322 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5323 return;
5324
5325 /* read result TF if failed or requested */
5326 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
5327 fill_result_tf(qc);
5328
5329 __ata_qc_complete(qc);
5330 }
5331 }
5332
5333 /**
5334 * ata_qc_complete_multiple - Complete multiple qcs successfully
5335 * @ap: port in question
5336 * @qc_active: new qc_active mask
5337 * @finish_qc: LLDD callback invoked before completing a qc
5338 *
5339 * Complete in-flight commands. This functions is meant to be
5340 * called from low-level driver's interrupt routine to complete
5341 * requests normally. ap->qc_active and @qc_active is compared
5342 * and commands are completed accordingly.
5343 *
5344 * LOCKING:
5345 * spin_lock_irqsave(host lock)
5346 *
5347 * RETURNS:
5348 * Number of completed commands on success, -errno otherwise.
5349 */
5350 int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
5351 void (*finish_qc)(struct ata_queued_cmd *))
5352 {
5353 int nr_done = 0;
5354 u32 done_mask;
5355 int i;
5356
5357 done_mask = ap->qc_active ^ qc_active;
5358
5359 if (unlikely(done_mask & qc_active)) {
5360 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
5361 "(%08x->%08x)\n", ap->qc_active, qc_active);
5362 return -EINVAL;
5363 }
5364
5365 for (i = 0; i < ATA_MAX_QUEUE; i++) {
5366 struct ata_queued_cmd *qc;
5367
5368 if (!(done_mask & (1 << i)))
5369 continue;
5370
5371 if ((qc = ata_qc_from_tag(ap, i))) {
5372 if (finish_qc)
5373 finish_qc(qc);
5374 ata_qc_complete(qc);
5375 nr_done++;
5376 }
5377 }
5378
5379 return nr_done;
5380 }
5381
5382 static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
5383 {
5384 struct ata_port *ap = qc->ap;
5385
5386 switch (qc->tf.protocol) {
5387 case ATA_PROT_NCQ:
5388 case ATA_PROT_DMA:
5389 case ATA_PROT_ATAPI_DMA:
5390 return 1;
5391
5392 case ATA_PROT_ATAPI:
5393 case ATA_PROT_PIO:
5394 if (ap->flags & ATA_FLAG_PIO_DMA)
5395 return 1;
5396
5397 /* fall through */
5398
5399 default:
5400 return 0;
5401 }
5402
5403 /* never reached */
5404 }
5405
5406 /**
5407 * ata_qc_issue - issue taskfile to device
5408 * @qc: command to issue to device
5409 *
5410 * Prepare an ATA command to submission to device.
5411 * This includes mapping the data into a DMA-able
5412 * area, filling in the S/G table, and finally
5413 * writing the taskfile to hardware, starting the command.
5414 *
5415 * LOCKING:
5416 * spin_lock_irqsave(host lock)
5417 */
5418 void ata_qc_issue(struct ata_queued_cmd *qc)
5419 {
5420 struct ata_port *ap = qc->ap;
5421 struct ata_link *link = qc->dev->link;
5422
5423 /* Make sure only one non-NCQ command is outstanding. The
5424 * check is skipped for old EH because it reuses active qc to
5425 * request ATAPI sense.
5426 */
5427 WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag));
5428
5429 if (qc->tf.protocol == ATA_PROT_NCQ) {
5430 WARN_ON(link->sactive & (1 << qc->tag));
5431 link->sactive |= 1 << qc->tag;
5432 } else {
5433 WARN_ON(link->sactive);
5434 link->active_tag = qc->tag;
5435 }
5436
5437 qc->flags |= ATA_QCFLAG_ACTIVE;
5438 ap->qc_active |= 1 << qc->tag;
5439
5440 if (ata_should_dma_map(qc)) {
5441 if (qc->flags & ATA_QCFLAG_SG) {
5442 if (ata_sg_setup(qc))
5443 goto sg_err;
5444 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
5445 if (ata_sg_setup_one(qc))
5446 goto sg_err;
5447 }
5448 } else {
5449 qc->flags &= ~ATA_QCFLAG_DMAMAP;
5450 }
5451
5452 ap->ops->qc_prep(qc);
5453
5454 qc->err_mask |= ap->ops->qc_issue(qc);
5455 if (unlikely(qc->err_mask))
5456 goto err;
5457 return;
5458
5459 sg_err:
5460 qc->flags &= ~ATA_QCFLAG_DMAMAP;
5461 qc->err_mask |= AC_ERR_SYSTEM;
5462 err:
5463 ata_qc_complete(qc);
5464 }
5465
5466 /**
5467 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
5468 * @qc: command to issue to device
5469 *
5470 * Using various libata functions and hooks, this function
5471 * starts an ATA command. ATA commands are grouped into
5472 * classes called "protocols", and issuing each type of protocol
5473 * is slightly different.
5474 *
5475 * May be used as the qc_issue() entry in ata_port_operations.
5476 *
5477 * LOCKING:
5478 * spin_lock_irqsave(host lock)
5479 *
5480 * RETURNS:
5481 * Zero on success, AC_ERR_* mask on failure
5482 */
5483
5484 unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
5485 {
5486 struct ata_port *ap = qc->ap;
5487
5488 /* Use polling pio if the LLD doesn't handle
5489 * interrupt driven pio and atapi CDB interrupt.
5490 */
5491 if (ap->flags & ATA_FLAG_PIO_POLLING) {
5492 switch (qc->tf.protocol) {
5493 case ATA_PROT_PIO:
5494 case ATA_PROT_NODATA:
5495 case ATA_PROT_ATAPI:
5496 case ATA_PROT_ATAPI_NODATA:
5497 qc->tf.flags |= ATA_TFLAG_POLLING;
5498 break;
5499 case ATA_PROT_ATAPI_DMA:
5500 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
5501 /* see ata_dma_blacklisted() */
5502 BUG();
5503 break;
5504 default:
5505 break;
5506 }
5507 }
5508
5509 /* select the device */
5510 ata_dev_select(ap, qc->dev->devno, 1, 0);
5511
5512 /* start the command */
5513 switch (qc->tf.protocol) {
5514 case ATA_PROT_NODATA:
5515 if (qc->tf.flags & ATA_TFLAG_POLLING)
5516 ata_qc_set_polling(qc);
5517
5518 ata_tf_to_host(ap, &qc->tf);
5519 ap->hsm_task_state = HSM_ST_LAST;
5520
5521 if (qc->tf.flags & ATA_TFLAG_POLLING)
5522 ata_port_queue_task(ap, ata_pio_task, qc, 0);
5523
5524 break;
5525
5526 case ATA_PROT_DMA:
5527 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
5528
5529 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
5530 ap->ops->bmdma_setup(qc); /* set up bmdma */
5531 ap->ops->bmdma_start(qc); /* initiate bmdma */
5532 ap->hsm_task_state = HSM_ST_LAST;
5533 break;
5534
5535 case ATA_PROT_PIO:
5536 if (qc->tf.flags & ATA_TFLAG_POLLING)
5537 ata_qc_set_polling(qc);
5538
5539 ata_tf_to_host(ap, &qc->tf);
5540
5541 if (qc->tf.flags & ATA_TFLAG_WRITE) {
5542 /* PIO data out protocol */
5543 ap->hsm_task_state = HSM_ST_FIRST;
5544 ata_port_queue_task(ap, ata_pio_task, qc, 0);
5545
5546 /* always send first data block using
5547 * the ata_pio_task() codepath.
5548 */
5549 } else {
5550 /* PIO data in protocol */
5551 ap->hsm_task_state = HSM_ST;
5552
5553 if (qc->tf.flags & ATA_TFLAG_POLLING)
5554 ata_port_queue_task(ap, ata_pio_task, qc, 0);
5555
5556 /* if polling, ata_pio_task() handles the rest.
5557 * otherwise, interrupt handler takes over from here.
5558 */
5559 }
5560
5561 break;
5562
5563 case ATA_PROT_ATAPI:
5564 case ATA_PROT_ATAPI_NODATA:
5565 if (qc->tf.flags & ATA_TFLAG_POLLING)
5566 ata_qc_set_polling(qc);
5567
5568 ata_tf_to_host(ap, &qc->tf);
5569
5570 ap->hsm_task_state = HSM_ST_FIRST;
5571
5572 /* send cdb by polling if no cdb interrupt */
5573 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
5574 (qc->tf.flags & ATA_TFLAG_POLLING))
5575 ata_port_queue_task(ap, ata_pio_task, qc, 0);
5576 break;
5577
5578 case ATA_PROT_ATAPI_DMA:
5579 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
5580
5581 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
5582 ap->ops->bmdma_setup(qc); /* set up bmdma */
5583 ap->hsm_task_state = HSM_ST_FIRST;
5584
5585 /* send cdb by polling if no cdb interrupt */
5586 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
5587 ata_port_queue_task(ap, ata_pio_task, qc, 0);
5588 break;
5589
5590 default:
5591 WARN_ON(1);
5592 return AC_ERR_SYSTEM;
5593 }
5594
5595 return 0;
5596 }
5597
5598 /**
5599 * ata_host_intr - Handle host interrupt for given (port, task)
5600 * @ap: Port on which interrupt arrived (possibly...)
5601 * @qc: Taskfile currently active in engine
5602 *
5603 * Handle host interrupt for given queued command. Currently,
5604 * only DMA interrupts are handled. All other commands are
5605 * handled via polling with interrupts disabled (nIEN bit).
5606 *
5607 * LOCKING:
5608 * spin_lock_irqsave(host lock)
5609 *
5610 * RETURNS:
5611 * One if interrupt was handled, zero if not (shared irq).
5612 */
5613
5614 inline unsigned int ata_host_intr (struct ata_port *ap,
5615 struct ata_queued_cmd *qc)
5616 {
5617 struct ata_eh_info *ehi = &ap->link.eh_info;
5618 u8 status, host_stat = 0;
5619
5620 VPRINTK("ata%u: protocol %d task_state %d\n",
5621 ap->print_id, qc->tf.protocol, ap->hsm_task_state);
5622
5623 /* Check whether we are expecting interrupt in this state */
5624 switch (ap->hsm_task_state) {
5625 case HSM_ST_FIRST:
5626 /* Some pre-ATAPI-4 devices assert INTRQ
5627 * at this state when ready to receive CDB.
5628 */
5629
5630 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
5631 * The flag was turned on only for atapi devices.
5632 * No need to check is_atapi_taskfile(&qc->tf) again.
5633 */
5634 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
5635 goto idle_irq;
5636 break;
5637 case HSM_ST_LAST:
5638 if (qc->tf.protocol == ATA_PROT_DMA ||
5639 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
5640 /* check status of DMA engine */
5641 host_stat = ap->ops->bmdma_status(ap);
5642 VPRINTK("ata%u: host_stat 0x%X\n",
5643 ap->print_id, host_stat);
5644
5645 /* if it's not our irq... */
5646 if (!(host_stat & ATA_DMA_INTR))
5647 goto idle_irq;
5648
5649 /* before we do anything else, clear DMA-Start bit */
5650 ap->ops->bmdma_stop(qc);
5651
5652 if (unlikely(host_stat & ATA_DMA_ERR)) {
5653 /* error when transfering data to/from memory */
5654 qc->err_mask |= AC_ERR_HOST_BUS;
5655 ap->hsm_task_state = HSM_ST_ERR;
5656 }
5657 }
5658 break;
5659 case HSM_ST:
5660 break;
5661 default:
5662 goto idle_irq;
5663 }
5664
5665 /* check altstatus */
5666 status = ata_altstatus(ap);
5667 if (status & ATA_BUSY)
5668 goto idle_irq;
5669
5670 /* check main status, clearing INTRQ */
5671 status = ata_chk_status(ap);
5672 if (unlikely(status & ATA_BUSY))
5673 goto idle_irq;
5674
5675 /* ack bmdma irq events */
5676 ap->ops->irq_clear(ap);
5677
5678 ata_hsm_move(ap, qc, status, 0);
5679
5680 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
5681 qc->tf.protocol == ATA_PROT_ATAPI_DMA))
5682 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
5683
5684 return 1; /* irq handled */
5685
5686 idle_irq:
5687 ap->stats.idle_irq++;
5688
5689 #ifdef ATA_IRQ_TRAP
5690 if ((ap->stats.idle_irq % 1000) == 0) {
5691 ap->ops->irq_ack(ap, 0); /* debug trap */
5692 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
5693 return 1;
5694 }
5695 #endif
5696 return 0; /* irq not handled */
5697 }
5698
5699 /**
5700 * ata_interrupt - Default ATA host interrupt handler
5701 * @irq: irq line (unused)
5702 * @dev_instance: pointer to our ata_host information structure
5703 *
5704 * Default interrupt handler for PCI IDE devices. Calls
5705 * ata_host_intr() for each port that is not disabled.
5706 *
5707 * LOCKING:
5708 * Obtains host lock during operation.
5709 *
5710 * RETURNS:
5711 * IRQ_NONE or IRQ_HANDLED.
5712 */
5713
5714 irqreturn_t ata_interrupt (int irq, void *dev_instance)
5715 {
5716 struct ata_host *host = dev_instance;
5717 unsigned int i;
5718 unsigned int handled = 0;
5719 unsigned long flags;
5720
5721 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
5722 spin_lock_irqsave(&host->lock, flags);
5723
5724 for (i = 0; i < host->n_ports; i++) {
5725 struct ata_port *ap;
5726
5727 ap = host->ports[i];
5728 if (ap &&
5729 !(ap->flags & ATA_FLAG_DISABLED)) {
5730 struct ata_queued_cmd *qc;
5731
5732 qc = ata_qc_from_tag(ap, ap->link.active_tag);
5733 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
5734 (qc->flags & ATA_QCFLAG_ACTIVE))
5735 handled |= ata_host_intr(ap, qc);
5736 }
5737 }
5738
5739 spin_unlock_irqrestore(&host->lock, flags);
5740
5741 return IRQ_RETVAL(handled);
5742 }
5743
5744 /**
5745 * sata_scr_valid - test whether SCRs are accessible
5746 * @link: ATA link to test SCR accessibility for
5747 *
5748 * Test whether SCRs are accessible for @link.
5749 *
5750 * LOCKING:
5751 * None.
5752 *
5753 * RETURNS:
5754 * 1 if SCRs are accessible, 0 otherwise.
5755 */
5756 int sata_scr_valid(struct ata_link *link)
5757 {
5758 struct ata_port *ap = link->ap;
5759
5760 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
5761 }
5762
5763 /**
5764 * sata_scr_read - read SCR register of the specified port
5765 * @link: ATA link to read SCR for
5766 * @reg: SCR to read
5767 * @val: Place to store read value
5768 *
5769 * Read SCR register @reg of @link into *@val. This function is
5770 * guaranteed to succeed if the cable type of the port is SATA
5771 * and the port implements ->scr_read.
5772 *
5773 * LOCKING:
5774 * None.
5775 *
5776 * RETURNS:
5777 * 0 on success, negative errno on failure.
5778 */
5779 int sata_scr_read(struct ata_link *link, int reg, u32 *val)
5780 {
5781 struct ata_port *ap = link->ap;
5782
5783 if (sata_scr_valid(link))
5784 return ap->ops->scr_read(ap, reg, val);
5785 return -EOPNOTSUPP;
5786 }
5787
5788 /**
5789 * sata_scr_write - write SCR register of the specified port
5790 * @link: ATA link to write SCR for
5791 * @reg: SCR to write
5792 * @val: value to write
5793 *
5794 * Write @val to SCR register @reg of @link. This function is
5795 * guaranteed to succeed if the cable type of the port is SATA
5796 * and the port implements ->scr_read.
5797 *
5798 * LOCKING:
5799 * None.
5800 *
5801 * RETURNS:
5802 * 0 on success, negative errno on failure.
5803 */
5804 int sata_scr_write(struct ata_link *link, int reg, u32 val)
5805 {
5806 struct ata_port *ap = link->ap;
5807
5808 if (sata_scr_valid(link))
5809 return ap->ops->scr_write(ap, reg, val);
5810 return -EOPNOTSUPP;
5811 }
5812
5813 /**
5814 * sata_scr_write_flush - write SCR register of the specified port and flush
5815 * @link: ATA link to write SCR for
5816 * @reg: SCR to write
5817 * @val: value to write
5818 *
5819 * This function is identical to sata_scr_write() except that this
5820 * function performs flush after writing to the register.
5821 *
5822 * LOCKING:
5823 * None.
5824 *
5825 * RETURNS:
5826 * 0 on success, negative errno on failure.
5827 */
5828 int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
5829 {
5830 struct ata_port *ap = link->ap;
5831 int rc;
5832
5833 if (sata_scr_valid(link)) {
5834 rc = ap->ops->scr_write(ap, reg, val);
5835 if (rc == 0)
5836 rc = ap->ops->scr_read(ap, reg, &val);
5837 return rc;
5838 }
5839 return -EOPNOTSUPP;
5840 }
5841
5842 /**
5843 * ata_link_online - test whether the given link is online
5844 * @link: ATA link to test
5845 *
5846 * Test whether @link is online. Note that this function returns
5847 * 0 if online status of @link cannot be obtained, so
5848 * ata_link_online(link) != !ata_link_offline(link).
5849 *
5850 * LOCKING:
5851 * None.
5852 *
5853 * RETURNS:
5854 * 1 if the port online status is available and online.
5855 */
5856 int ata_link_online(struct ata_link *link)
5857 {
5858 u32 sstatus;
5859
5860 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5861 (sstatus & 0xf) == 0x3)
5862 return 1;
5863 return 0;
5864 }
5865
5866 /**
5867 * ata_link_offline - test whether the given link is offline
5868 * @link: ATA link to test
5869 *
5870 * Test whether @link is offline. Note that this function
5871 * returns 0 if offline status of @link cannot be obtained, so
5872 * ata_link_online(link) != !ata_link_offline(link).
5873 *
5874 * LOCKING:
5875 * None.
5876 *
5877 * RETURNS:
5878 * 1 if the port offline status is available and offline.
5879 */
5880 int ata_link_offline(struct ata_link *link)
5881 {
5882 u32 sstatus;
5883
5884 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5885 (sstatus & 0xf) != 0x3)
5886 return 1;
5887 return 0;
5888 }
5889
5890 int ata_flush_cache(struct ata_device *dev)
5891 {
5892 unsigned int err_mask;
5893 u8 cmd;
5894
5895 if (!ata_try_flush_cache(dev))
5896 return 0;
5897
5898 if (dev->flags & ATA_DFLAG_FLUSH_EXT)
5899 cmd = ATA_CMD_FLUSH_EXT;
5900 else
5901 cmd = ATA_CMD_FLUSH;
5902
5903 err_mask = ata_do_simple_cmd(dev, cmd);
5904 if (err_mask) {
5905 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
5906 return -EIO;
5907 }
5908
5909 return 0;
5910 }
5911
5912 #ifdef CONFIG_PM
5913 static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
5914 unsigned int action, unsigned int ehi_flags,
5915 int wait)
5916 {
5917 unsigned long flags;
5918 int i, rc;
5919
5920 for (i = 0; i < host->n_ports; i++) {
5921 struct ata_port *ap = host->ports[i];
5922 struct ata_link *link;
5923
5924 /* Previous resume operation might still be in
5925 * progress. Wait for PM_PENDING to clear.
5926 */
5927 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5928 ata_port_wait_eh(ap);
5929 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5930 }
5931
5932 /* request PM ops to EH */
5933 spin_lock_irqsave(ap->lock, flags);
5934
5935 ap->pm_mesg = mesg;
5936 if (wait) {
5937 rc = 0;
5938 ap->pm_result = &rc;
5939 }
5940
5941 ap->pflags |= ATA_PFLAG_PM_PENDING;
5942 __ata_port_for_each_link(link, ap) {
5943 link->eh_info.action |= action;
5944 link->eh_info.flags |= ehi_flags;
5945 }
5946
5947 ata_port_schedule_eh(ap);
5948
5949 spin_unlock_irqrestore(ap->lock, flags);
5950
5951 /* wait and check result */
5952 if (wait) {
5953 ata_port_wait_eh(ap);
5954 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5955 if (rc)
5956 return rc;
5957 }
5958 }
5959
5960 return 0;
5961 }
5962
5963 /**
5964 * ata_host_suspend - suspend host
5965 * @host: host to suspend
5966 * @mesg: PM message
5967 *
5968 * Suspend @host. Actual operation is performed by EH. This
5969 * function requests EH to perform PM operations and waits for EH
5970 * to finish.
5971 *
5972 * LOCKING:
5973 * Kernel thread context (may sleep).
5974 *
5975 * RETURNS:
5976 * 0 on success, -errno on failure.
5977 */
5978 int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5979 {
5980 int rc;
5981
5982 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
5983 if (rc == 0)
5984 host->dev->power.power_state = mesg;
5985 return rc;
5986 }
5987
5988 /**
5989 * ata_host_resume - resume host
5990 * @host: host to resume
5991 *
5992 * Resume @host. Actual operation is performed by EH. This
5993 * function requests EH to perform PM operations and returns.
5994 * Note that all resume operations are performed parallely.
5995 *
5996 * LOCKING:
5997 * Kernel thread context (may sleep).
5998 */
5999 void ata_host_resume(struct ata_host *host)
6000 {
6001 ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
6002 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
6003 host->dev->power.power_state = PMSG_ON;
6004 }
6005 #endif
6006
6007 /**
6008 * ata_port_start - Set port up for dma.
6009 * @ap: Port to initialize
6010 *
6011 * Called just after data structures for each port are
6012 * initialized. Allocates space for PRD table.
6013 *
6014 * May be used as the port_start() entry in ata_port_operations.
6015 *
6016 * LOCKING:
6017 * Inherited from caller.
6018 */
6019 int ata_port_start(struct ata_port *ap)
6020 {
6021 struct device *dev = ap->dev;
6022 int rc;
6023
6024 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
6025 GFP_KERNEL);
6026 if (!ap->prd)
6027 return -ENOMEM;
6028
6029 rc = ata_pad_alloc(ap, dev);
6030 if (rc)
6031 return rc;
6032
6033 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd,
6034 (unsigned long long)ap->prd_dma);
6035 return 0;
6036 }
6037
6038 /**
6039 * ata_dev_init - Initialize an ata_device structure
6040 * @dev: Device structure to initialize
6041 *
6042 * Initialize @dev in preparation for probing.
6043 *
6044 * LOCKING:
6045 * Inherited from caller.
6046 */
6047 void ata_dev_init(struct ata_device *dev)
6048 {
6049 struct ata_link *link = dev->link;
6050 struct ata_port *ap = link->ap;
6051 unsigned long flags;
6052
6053 /* SATA spd limit is bound to the first device */
6054 link->sata_spd_limit = link->hw_sata_spd_limit;
6055 link->sata_spd = 0;
6056
6057 /* High bits of dev->flags are used to record warm plug
6058 * requests which occur asynchronously. Synchronize using
6059 * host lock.
6060 */
6061 spin_lock_irqsave(ap->lock, flags);
6062 dev->flags &= ~ATA_DFLAG_INIT_MASK;
6063 dev->horkage = 0;
6064 spin_unlock_irqrestore(ap->lock, flags);
6065
6066 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
6067 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
6068 dev->pio_mask = UINT_MAX;
6069 dev->mwdma_mask = UINT_MAX;
6070 dev->udma_mask = UINT_MAX;
6071 }
6072
6073 /**
6074 * ata_link_init - Initialize an ata_link structure
6075 * @ap: ATA port link is attached to
6076 * @link: Link structure to initialize
6077 * @pmp: Port multiplier port number
6078 *
6079 * Initialize @link.
6080 *
6081 * LOCKING:
6082 * Kernel thread context (may sleep)
6083 */
6084 static void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
6085 {
6086 int i;
6087
6088 /* clear everything except for devices */
6089 memset(link, 0, offsetof(struct ata_link, device[0]));
6090
6091 link->ap = ap;
6092 link->pmp = pmp;
6093 link->active_tag = ATA_TAG_POISON;
6094 link->hw_sata_spd_limit = UINT_MAX;
6095
6096 /* can't use iterator, ap isn't initialized yet */
6097 for (i = 0; i < ATA_MAX_DEVICES; i++) {
6098 struct ata_device *dev = &link->device[i];
6099
6100 dev->link = link;
6101 dev->devno = dev - link->device;
6102 ata_dev_init(dev);
6103 }
6104 }
6105
6106 /**
6107 * sata_link_init_spd - Initialize link->sata_spd_limit
6108 * @link: Link to configure sata_spd_limit for
6109 *
6110 * Initialize @link->[hw_]sata_spd_limit to the currently
6111 * configured value.
6112 *
6113 * LOCKING:
6114 * Kernel thread context (may sleep).
6115 *
6116 * RETURNS:
6117 * 0 on success, -errno on failure.
6118 */
6119 static int sata_link_init_spd(struct ata_link *link)
6120 {
6121 u32 scontrol, spd;
6122 int rc;
6123
6124 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
6125 if (rc)
6126 return rc;
6127
6128 spd = (scontrol >> 4) & 0xf;
6129 if (spd)
6130 link->hw_sata_spd_limit &= (1 << spd) - 1;
6131
6132 link->sata_spd_limit = link->hw_sata_spd_limit;
6133
6134 return 0;
6135 }
6136
6137 /**
6138 * ata_port_alloc - allocate and initialize basic ATA port resources
6139 * @host: ATA host this allocated port belongs to
6140 *
6141 * Allocate and initialize basic ATA port resources.
6142 *
6143 * RETURNS:
6144 * Allocate ATA port on success, NULL on failure.
6145 *
6146 * LOCKING:
6147 * Inherited from calling layer (may sleep).
6148 */
6149 struct ata_port *ata_port_alloc(struct ata_host *host)
6150 {
6151 struct ata_port *ap;
6152
6153 DPRINTK("ENTER\n");
6154
6155 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
6156 if (!ap)
6157 return NULL;
6158
6159 ap->pflags |= ATA_PFLAG_INITIALIZING;
6160 ap->lock = &host->lock;
6161 ap->flags = ATA_FLAG_DISABLED;
6162 ap->print_id = -1;
6163 ap->ctl = ATA_DEVCTL_OBS;
6164 ap->host = host;
6165 ap->dev = host->dev;
6166 ap->last_ctl = 0xFF;
6167
6168 #if defined(ATA_VERBOSE_DEBUG)
6169 /* turn on all debugging levels */
6170 ap->msg_enable = 0x00FF;
6171 #elif defined(ATA_DEBUG)
6172 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
6173 #else
6174 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
6175 #endif
6176
6177 INIT_DELAYED_WORK(&ap->port_task, NULL);
6178 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
6179 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
6180 INIT_LIST_HEAD(&ap->eh_done_q);
6181 init_waitqueue_head(&ap->eh_wait_q);
6182 init_timer_deferrable(&ap->fastdrain_timer);
6183 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
6184 ap->fastdrain_timer.data = (unsigned long)ap;
6185
6186 ap->cbl = ATA_CBL_NONE;
6187
6188 ata_link_init(ap, &ap->link, 0);
6189
6190 #ifdef ATA_IRQ_TRAP
6191 ap->stats.unhandled_irq = 1;
6192 ap->stats.idle_irq = 1;
6193 #endif
6194 return ap;
6195 }
6196
6197 static void ata_host_release(struct device *gendev, void *res)
6198 {
6199 struct ata_host *host = dev_get_drvdata(gendev);
6200 int i;
6201
6202 for (i = 0; i < host->n_ports; i++) {
6203 struct ata_port *ap = host->ports[i];
6204
6205 if (!ap)
6206 continue;
6207
6208 if ((host->flags & ATA_HOST_STARTED) && ap->ops->port_stop)
6209 ap->ops->port_stop(ap);
6210 }
6211
6212 if ((host->flags & ATA_HOST_STARTED) && host->ops->host_stop)
6213 host->ops->host_stop(host);
6214
6215 for (i = 0; i < host->n_ports; i++) {
6216 struct ata_port *ap = host->ports[i];
6217
6218 if (!ap)
6219 continue;
6220
6221 if (ap->scsi_host)
6222 scsi_host_put(ap->scsi_host);
6223
6224 kfree(ap);
6225 host->ports[i] = NULL;
6226 }
6227
6228 dev_set_drvdata(gendev, NULL);
6229 }
6230
6231 /**
6232 * ata_host_alloc - allocate and init basic ATA host resources
6233 * @dev: generic device this host is associated with
6234 * @max_ports: maximum number of ATA ports associated with this host
6235 *
6236 * Allocate and initialize basic ATA host resources. LLD calls
6237 * this function to allocate a host, initializes it fully and
6238 * attaches it using ata_host_register().
6239 *
6240 * @max_ports ports are allocated and host->n_ports is
6241 * initialized to @max_ports. The caller is allowed to decrease
6242 * host->n_ports before calling ata_host_register(). The unused
6243 * ports will be automatically freed on registration.
6244 *
6245 * RETURNS:
6246 * Allocate ATA host on success, NULL on failure.
6247 *
6248 * LOCKING:
6249 * Inherited from calling layer (may sleep).
6250 */
6251 struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
6252 {
6253 struct ata_host *host;
6254 size_t sz;
6255 int i;
6256
6257 DPRINTK("ENTER\n");
6258
6259 if (!devres_open_group(dev, NULL, GFP_KERNEL))
6260 return NULL;
6261
6262 /* alloc a container for our list of ATA ports (buses) */
6263 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
6264 /* alloc a container for our list of ATA ports (buses) */
6265 host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
6266 if (!host)
6267 goto err_out;
6268
6269 devres_add(dev, host);
6270 dev_set_drvdata(dev, host);
6271
6272 spin_lock_init(&host->lock);
6273 host->dev = dev;
6274 host->n_ports = max_ports;
6275
6276 /* allocate ports bound to this host */
6277 for (i = 0; i < max_ports; i++) {
6278 struct ata_port *ap;
6279
6280 ap = ata_port_alloc(host);
6281 if (!ap)
6282 goto err_out;
6283
6284 ap->port_no = i;
6285 host->ports[i] = ap;
6286 }
6287
6288 devres_remove_group(dev, NULL);
6289 return host;
6290
6291 err_out:
6292 devres_release_group(dev, NULL);
6293 return NULL;
6294 }
6295
6296 /**
6297 * ata_host_alloc_pinfo - alloc host and init with port_info array
6298 * @dev: generic device this host is associated with
6299 * @ppi: array of ATA port_info to initialize host with
6300 * @n_ports: number of ATA ports attached to this host
6301 *
6302 * Allocate ATA host and initialize with info from @ppi. If NULL
6303 * terminated, @ppi may contain fewer entries than @n_ports. The
6304 * last entry will be used for the remaining ports.
6305 *
6306 * RETURNS:
6307 * Allocate ATA host on success, NULL on failure.
6308 *
6309 * LOCKING:
6310 * Inherited from calling layer (may sleep).
6311 */
6312 struct ata_host *ata_host_alloc_pinfo(struct device *dev,
6313 const struct ata_port_info * const * ppi,
6314 int n_ports)
6315 {
6316 const struct ata_port_info *pi;
6317 struct ata_host *host;
6318 int i, j;
6319
6320 host = ata_host_alloc(dev, n_ports);
6321 if (!host)
6322 return NULL;
6323
6324 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
6325 struct ata_port *ap = host->ports[i];
6326
6327 if (ppi[j])
6328 pi = ppi[j++];
6329
6330 ap->pio_mask = pi->pio_mask;
6331 ap->mwdma_mask = pi->mwdma_mask;
6332 ap->udma_mask = pi->udma_mask;
6333 ap->flags |= pi->flags;
6334 ap->link.flags |= pi->link_flags;
6335 ap->ops = pi->port_ops;
6336
6337 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
6338 host->ops = pi->port_ops;
6339 if (!host->private_data && pi->private_data)
6340 host->private_data = pi->private_data;
6341 }
6342
6343 return host;
6344 }
6345
6346 /**
6347 * ata_host_start - start and freeze ports of an ATA host
6348 * @host: ATA host to start ports for
6349 *
6350 * Start and then freeze ports of @host. Started status is
6351 * recorded in host->flags, so this function can be called
6352 * multiple times. Ports are guaranteed to get started only
6353 * once. If host->ops isn't initialized yet, its set to the
6354 * first non-dummy port ops.
6355 *
6356 * LOCKING:
6357 * Inherited from calling layer (may sleep).
6358 *
6359 * RETURNS:
6360 * 0 if all ports are started successfully, -errno otherwise.
6361 */
6362 int ata_host_start(struct ata_host *host)
6363 {
6364 int i, rc;
6365
6366 if (host->flags & ATA_HOST_STARTED)
6367 return 0;
6368
6369 for (i = 0; i < host->n_ports; i++) {
6370 struct ata_port *ap = host->ports[i];
6371
6372 if (!host->ops && !ata_port_is_dummy(ap))
6373 host->ops = ap->ops;
6374
6375 if (ap->ops->port_start) {
6376 rc = ap->ops->port_start(ap);
6377 if (rc) {
6378 ata_port_printk(ap, KERN_ERR, "failed to "
6379 "start port (errno=%d)\n", rc);
6380 goto err_out;
6381 }
6382 }
6383
6384 ata_eh_freeze_port(ap);
6385 }
6386
6387 host->flags |= ATA_HOST_STARTED;
6388 return 0;
6389
6390 err_out:
6391 while (--i >= 0) {
6392 struct ata_port *ap = host->ports[i];
6393
6394 if (ap->ops->port_stop)
6395 ap->ops->port_stop(ap);
6396 }
6397 return rc;
6398 }
6399
6400 /**
6401 * ata_sas_host_init - Initialize a host struct
6402 * @host: host to initialize
6403 * @dev: device host is attached to
6404 * @flags: host flags
6405 * @ops: port_ops
6406 *
6407 * LOCKING:
6408 * PCI/etc. bus probe sem.
6409 *
6410 */
6411 /* KILLME - the only user left is ipr */
6412 void ata_host_init(struct ata_host *host, struct device *dev,
6413 unsigned long flags, const struct ata_port_operations *ops)
6414 {
6415 spin_lock_init(&host->lock);
6416 host->dev = dev;
6417 host->flags = flags;
6418 host->ops = ops;
6419 }
6420
6421 /**
6422 * ata_host_register - register initialized ATA host
6423 * @host: ATA host to register
6424 * @sht: template for SCSI host
6425 *
6426 * Register initialized ATA host. @host is allocated using
6427 * ata_host_alloc() and fully initialized by LLD. This function
6428 * starts ports, registers @host with ATA and SCSI layers and
6429 * probe registered devices.
6430 *
6431 * LOCKING:
6432 * Inherited from calling layer (may sleep).
6433 *
6434 * RETURNS:
6435 * 0 on success, -errno otherwise.
6436 */
6437 int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6438 {
6439 int i, rc;
6440
6441 /* host must have been started */
6442 if (!(host->flags & ATA_HOST_STARTED)) {
6443 dev_printk(KERN_ERR, host->dev,
6444 "BUG: trying to register unstarted host\n");
6445 WARN_ON(1);
6446 return -EINVAL;
6447 }
6448
6449 /* Blow away unused ports. This happens when LLD can't
6450 * determine the exact number of ports to allocate at
6451 * allocation time.
6452 */
6453 for (i = host->n_ports; host->ports[i]; i++)
6454 kfree(host->ports[i]);
6455
6456 /* give ports names and add SCSI hosts */
6457 for (i = 0; i < host->n_ports; i++)
6458 host->ports[i]->print_id = ata_print_id++;
6459
6460 rc = ata_scsi_add_hosts(host, sht);
6461 if (rc)
6462 return rc;
6463
6464 /* associate with ACPI nodes */
6465 ata_acpi_associate(host);
6466
6467 /* set cable, sata_spd_limit and report */
6468 for (i = 0; i < host->n_ports; i++) {
6469 struct ata_port *ap = host->ports[i];
6470 int irq_line;
6471 unsigned long xfer_mask;
6472
6473 /* set SATA cable type if still unset */
6474 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
6475 ap->cbl = ATA_CBL_SATA;
6476
6477 /* init sata_spd_limit to the current value */
6478 sata_link_init_spd(&ap->link);
6479
6480 /* report the secondary IRQ for second channel legacy */
6481 irq_line = host->irq;
6482 if (i == 1 && host->irq2)
6483 irq_line = host->irq2;
6484
6485 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
6486 ap->udma_mask);
6487
6488 /* print per-port info to dmesg */
6489 if (!ata_port_is_dummy(ap))
6490 ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%p "
6491 "ctl 0x%p bmdma 0x%p irq %d\n",
6492 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
6493 ata_mode_string(xfer_mask),
6494 ap->ioaddr.cmd_addr,
6495 ap->ioaddr.ctl_addr,
6496 ap->ioaddr.bmdma_addr,
6497 irq_line);
6498 else
6499 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
6500 }
6501
6502 /* perform each probe synchronously */
6503 DPRINTK("probe begin\n");
6504 for (i = 0; i < host->n_ports; i++) {
6505 struct ata_port *ap = host->ports[i];
6506 int rc;
6507
6508 /* probe */
6509 if (ap->ops->error_handler) {
6510 struct ata_eh_info *ehi = &ap->link.eh_info;
6511 unsigned long flags;
6512
6513 ata_port_probe(ap);
6514
6515 /* kick EH for boot probing */
6516 spin_lock_irqsave(ap->lock, flags);
6517
6518 ehi->probe_mask =
6519 (1 << ata_link_max_devices(&ap->link)) - 1;
6520 ehi->action |= ATA_EH_SOFTRESET;
6521 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
6522
6523 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
6524 ap->pflags |= ATA_PFLAG_LOADING;
6525 ata_port_schedule_eh(ap);
6526
6527 spin_unlock_irqrestore(ap->lock, flags);
6528
6529 /* wait for EH to finish */
6530 ata_port_wait_eh(ap);
6531 } else {
6532 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
6533 rc = ata_bus_probe(ap);
6534 DPRINTK("ata%u: bus probe end\n", ap->print_id);
6535
6536 if (rc) {
6537 /* FIXME: do something useful here?
6538 * Current libata behavior will
6539 * tear down everything when
6540 * the module is removed
6541 * or the h/w is unplugged.
6542 */
6543 }
6544 }
6545 }
6546
6547 /* probes are done, now scan each port's disk(s) */
6548 DPRINTK("host probe begin\n");
6549 for (i = 0; i < host->n_ports; i++) {
6550 struct ata_port *ap = host->ports[i];
6551
6552 ata_scsi_scan_host(ap, 1);
6553 }
6554
6555 return 0;
6556 }
6557
6558 /**
6559 * ata_host_activate - start host, request IRQ and register it
6560 * @host: target ATA host
6561 * @irq: IRQ to request
6562 * @irq_handler: irq_handler used when requesting IRQ
6563 * @irq_flags: irq_flags used when requesting IRQ
6564 * @sht: scsi_host_template to use when registering the host
6565 *
6566 * After allocating an ATA host and initializing it, most libata
6567 * LLDs perform three steps to activate the host - start host,
6568 * request IRQ and register it. This helper takes necessasry
6569 * arguments and performs the three steps in one go.
6570 *
6571 * LOCKING:
6572 * Inherited from calling layer (may sleep).
6573 *
6574 * RETURNS:
6575 * 0 on success, -errno otherwise.
6576 */
6577 int ata_host_activate(struct ata_host *host, int irq,
6578 irq_handler_t irq_handler, unsigned long irq_flags,
6579 struct scsi_host_template *sht)
6580 {
6581 int rc;
6582
6583 rc = ata_host_start(host);
6584 if (rc)
6585 return rc;
6586
6587 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
6588 dev_driver_string(host->dev), host);
6589 if (rc)
6590 return rc;
6591
6592 /* Used to print device info at probe */
6593 host->irq = irq;
6594
6595 rc = ata_host_register(host, sht);
6596 /* if failed, just free the IRQ and leave ports alone */
6597 if (rc)
6598 devm_free_irq(host->dev, irq, host);
6599
6600 return rc;
6601 }
6602
6603 /**
6604 * ata_port_detach - Detach ATA port in prepration of device removal
6605 * @ap: ATA port to be detached
6606 *
6607 * Detach all ATA devices and the associated SCSI devices of @ap;
6608 * then, remove the associated SCSI host. @ap is guaranteed to
6609 * be quiescent on return from this function.
6610 *
6611 * LOCKING:
6612 * Kernel thread context (may sleep).
6613 */
6614 void ata_port_detach(struct ata_port *ap)
6615 {
6616 unsigned long flags;
6617 struct ata_link *link;
6618 struct ata_device *dev;
6619
6620 if (!ap->ops->error_handler)
6621 goto skip_eh;
6622
6623 /* tell EH we're leaving & flush EH */
6624 spin_lock_irqsave(ap->lock, flags);
6625 ap->pflags |= ATA_PFLAG_UNLOADING;
6626 spin_unlock_irqrestore(ap->lock, flags);
6627
6628 ata_port_wait_eh(ap);
6629
6630 /* EH is now guaranteed to see UNLOADING, so no new device
6631 * will be attached. Disable all existing devices.
6632 */
6633 spin_lock_irqsave(ap->lock, flags);
6634
6635 ata_port_for_each_link(link, ap) {
6636 ata_link_for_each_dev(dev, link)
6637 ata_dev_disable(dev);
6638 }
6639
6640 spin_unlock_irqrestore(ap->lock, flags);
6641
6642 /* Final freeze & EH. All in-flight commands are aborted. EH
6643 * will be skipped and retrials will be terminated with bad
6644 * target.
6645 */
6646 spin_lock_irqsave(ap->lock, flags);
6647 ata_port_freeze(ap); /* won't be thawed */
6648 spin_unlock_irqrestore(ap->lock, flags);
6649
6650 ata_port_wait_eh(ap);
6651 cancel_rearming_delayed_work(&ap->hotplug_task);
6652
6653 skip_eh:
6654 /* remove the associated SCSI host */
6655 scsi_remove_host(ap->scsi_host);
6656 }
6657
6658 /**
6659 * ata_host_detach - Detach all ports of an ATA host
6660 * @host: Host to detach
6661 *
6662 * Detach all ports of @host.
6663 *
6664 * LOCKING:
6665 * Kernel thread context (may sleep).
6666 */
6667 void ata_host_detach(struct ata_host *host)
6668 {
6669 int i;
6670
6671 for (i = 0; i < host->n_ports; i++)
6672 ata_port_detach(host->ports[i]);
6673 }
6674
6675 /**
6676 * ata_std_ports - initialize ioaddr with standard port offsets.
6677 * @ioaddr: IO address structure to be initialized
6678 *
6679 * Utility function which initializes data_addr, error_addr,
6680 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
6681 * device_addr, status_addr, and command_addr to standard offsets
6682 * relative to cmd_addr.
6683 *
6684 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
6685 */
6686
6687 void ata_std_ports(struct ata_ioports *ioaddr)
6688 {
6689 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
6690 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
6691 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
6692 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
6693 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
6694 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
6695 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
6696 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
6697 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
6698 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
6699 }
6700
6701
6702 #ifdef CONFIG_PCI
6703
6704 /**
6705 * ata_pci_remove_one - PCI layer callback for device removal
6706 * @pdev: PCI device that was removed
6707 *
6708 * PCI layer indicates to libata via this hook that hot-unplug or
6709 * module unload event has occurred. Detach all ports. Resource
6710 * release is handled via devres.
6711 *
6712 * LOCKING:
6713 * Inherited from PCI layer (may sleep).
6714 */
6715 void ata_pci_remove_one(struct pci_dev *pdev)
6716 {
6717 struct device *dev = pci_dev_to_dev(pdev);
6718 struct ata_host *host = dev_get_drvdata(dev);
6719
6720 ata_host_detach(host);
6721 }
6722
6723 /* move to PCI subsystem */
6724 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
6725 {
6726 unsigned long tmp = 0;
6727
6728 switch (bits->width) {
6729 case 1: {
6730 u8 tmp8 = 0;
6731 pci_read_config_byte(pdev, bits->reg, &tmp8);
6732 tmp = tmp8;
6733 break;
6734 }
6735 case 2: {
6736 u16 tmp16 = 0;
6737 pci_read_config_word(pdev, bits->reg, &tmp16);
6738 tmp = tmp16;
6739 break;
6740 }
6741 case 4: {
6742 u32 tmp32 = 0;
6743 pci_read_config_dword(pdev, bits->reg, &tmp32);
6744 tmp = tmp32;
6745 break;
6746 }
6747
6748 default:
6749 return -EINVAL;
6750 }
6751
6752 tmp &= bits->mask;
6753
6754 return (tmp == bits->val) ? 1 : 0;
6755 }
6756
6757 #ifdef CONFIG_PM
6758 void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
6759 {
6760 pci_save_state(pdev);
6761 pci_disable_device(pdev);
6762
6763 if (mesg.event == PM_EVENT_SUSPEND)
6764 pci_set_power_state(pdev, PCI_D3hot);
6765 }
6766
6767 int ata_pci_device_do_resume(struct pci_dev *pdev)
6768 {
6769 int rc;
6770
6771 pci_set_power_state(pdev, PCI_D0);
6772 pci_restore_state(pdev);
6773
6774 rc = pcim_enable_device(pdev);
6775 if (rc) {
6776 dev_printk(KERN_ERR, &pdev->dev,
6777 "failed to enable device after resume (%d)\n", rc);
6778 return rc;
6779 }
6780
6781 pci_set_master(pdev);
6782 return 0;
6783 }
6784
6785 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
6786 {
6787 struct ata_host *host = dev_get_drvdata(&pdev->dev);
6788 int rc = 0;
6789
6790 rc = ata_host_suspend(host, mesg);
6791 if (rc)
6792 return rc;
6793
6794 ata_pci_device_do_suspend(pdev, mesg);
6795
6796 return 0;
6797 }
6798
6799 int ata_pci_device_resume(struct pci_dev *pdev)
6800 {
6801 struct ata_host *host = dev_get_drvdata(&pdev->dev);
6802 int rc;
6803
6804 rc = ata_pci_device_do_resume(pdev);
6805 if (rc == 0)
6806 ata_host_resume(host);
6807 return rc;
6808 }
6809 #endif /* CONFIG_PM */
6810
6811 #endif /* CONFIG_PCI */
6812
6813
6814 static int __init ata_init(void)
6815 {
6816 ata_probe_timeout *= HZ;
6817 ata_wq = create_workqueue("ata");
6818 if (!ata_wq)
6819 return -ENOMEM;
6820
6821 ata_aux_wq = create_singlethread_workqueue("ata_aux");
6822 if (!ata_aux_wq) {
6823 destroy_workqueue(ata_wq);
6824 return -ENOMEM;
6825 }
6826
6827 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6828 return 0;
6829 }
6830
6831 static void __exit ata_exit(void)
6832 {
6833 destroy_workqueue(ata_wq);
6834 destroy_workqueue(ata_aux_wq);
6835 }
6836
6837 subsys_initcall(ata_init);
6838 module_exit(ata_exit);
6839
6840 static unsigned long ratelimit_time;
6841 static DEFINE_SPINLOCK(ata_ratelimit_lock);
6842
6843 int ata_ratelimit(void)
6844 {
6845 int rc;
6846 unsigned long flags;
6847
6848 spin_lock_irqsave(&ata_ratelimit_lock, flags);
6849
6850 if (time_after(jiffies, ratelimit_time)) {
6851 rc = 1;
6852 ratelimit_time = jiffies + (HZ/5);
6853 } else
6854 rc = 0;
6855
6856 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
6857
6858 return rc;
6859 }
6860
6861 /**
6862 * ata_wait_register - wait until register value changes
6863 * @reg: IO-mapped register
6864 * @mask: Mask to apply to read register value
6865 * @val: Wait condition
6866 * @interval_msec: polling interval in milliseconds
6867 * @timeout_msec: timeout in milliseconds
6868 *
6869 * Waiting for some bits of register to change is a common
6870 * operation for ATA controllers. This function reads 32bit LE
6871 * IO-mapped register @reg and tests for the following condition.
6872 *
6873 * (*@reg & mask) != val
6874 *
6875 * If the condition is met, it returns; otherwise, the process is
6876 * repeated after @interval_msec until timeout.
6877 *
6878 * LOCKING:
6879 * Kernel thread context (may sleep)
6880 *
6881 * RETURNS:
6882 * The final register value.
6883 */
6884 u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
6885 unsigned long interval_msec,
6886 unsigned long timeout_msec)
6887 {
6888 unsigned long timeout;
6889 u32 tmp;
6890
6891 tmp = ioread32(reg);
6892
6893 /* Calculate timeout _after_ the first read to make sure
6894 * preceding writes reach the controller before starting to
6895 * eat away the timeout.
6896 */
6897 timeout = jiffies + (timeout_msec * HZ) / 1000;
6898
6899 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
6900 msleep(interval_msec);
6901 tmp = ioread32(reg);
6902 }
6903
6904 return tmp;
6905 }
6906
6907 /*
6908 * Dummy port_ops
6909 */
6910 static void ata_dummy_noret(struct ata_port *ap) { }
6911 static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
6912 static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
6913
6914 static u8 ata_dummy_check_status(struct ata_port *ap)
6915 {
6916 return ATA_DRDY;
6917 }
6918
6919 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6920 {
6921 return AC_ERR_SYSTEM;
6922 }
6923
6924 const struct ata_port_operations ata_dummy_port_ops = {
6925 .port_disable = ata_port_disable,
6926 .check_status = ata_dummy_check_status,
6927 .check_altstatus = ata_dummy_check_status,
6928 .dev_select = ata_noop_dev_select,
6929 .qc_prep = ata_noop_qc_prep,
6930 .qc_issue = ata_dummy_qc_issue,
6931 .freeze = ata_dummy_noret,
6932 .thaw = ata_dummy_noret,
6933 .error_handler = ata_dummy_noret,
6934 .post_internal_cmd = ata_dummy_qc_noret,
6935 .irq_clear = ata_dummy_noret,
6936 .port_start = ata_dummy_ret0,
6937 .port_stop = ata_dummy_noret,
6938 };
6939
6940 const struct ata_port_info ata_dummy_port_info = {
6941 .port_ops = &ata_dummy_port_ops,
6942 };
6943
6944 /*
6945 * libata is essentially a library of internal helper functions for
6946 * low-level ATA host controller drivers. As such, the API/ABI is
6947 * likely to change as new drivers are added and updated.
6948 * Do not depend on ABI/API stability.
6949 */
6950
6951 EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6952 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6953 EXPORT_SYMBOL_GPL(sata_deb_timing_long);
6954 EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
6955 EXPORT_SYMBOL_GPL(ata_dummy_port_info);
6956 EXPORT_SYMBOL_GPL(ata_std_bios_param);
6957 EXPORT_SYMBOL_GPL(ata_std_ports);
6958 EXPORT_SYMBOL_GPL(ata_host_init);
6959 EXPORT_SYMBOL_GPL(ata_host_alloc);
6960 EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
6961 EXPORT_SYMBOL_GPL(ata_host_start);
6962 EXPORT_SYMBOL_GPL(ata_host_register);
6963 EXPORT_SYMBOL_GPL(ata_host_activate);
6964 EXPORT_SYMBOL_GPL(ata_host_detach);
6965 EXPORT_SYMBOL_GPL(ata_sg_init);
6966 EXPORT_SYMBOL_GPL(ata_sg_init_one);
6967 EXPORT_SYMBOL_GPL(ata_hsm_move);
6968 EXPORT_SYMBOL_GPL(ata_qc_complete);
6969 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
6970 EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
6971 EXPORT_SYMBOL_GPL(ata_tf_load);
6972 EXPORT_SYMBOL_GPL(ata_tf_read);
6973 EXPORT_SYMBOL_GPL(ata_noop_dev_select);
6974 EXPORT_SYMBOL_GPL(ata_std_dev_select);
6975 EXPORT_SYMBOL_GPL(sata_print_link_status);
6976 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6977 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6978 EXPORT_SYMBOL_GPL(ata_check_status);
6979 EXPORT_SYMBOL_GPL(ata_altstatus);
6980 EXPORT_SYMBOL_GPL(ata_exec_command);
6981 EXPORT_SYMBOL_GPL(ata_port_start);
6982 EXPORT_SYMBOL_GPL(ata_sff_port_start);
6983 EXPORT_SYMBOL_GPL(ata_interrupt);
6984 EXPORT_SYMBOL_GPL(ata_do_set_mode);
6985 EXPORT_SYMBOL_GPL(ata_data_xfer);
6986 EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
6987 EXPORT_SYMBOL_GPL(ata_qc_prep);
6988 EXPORT_SYMBOL_GPL(ata_dumb_qc_prep);
6989 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
6990 EXPORT_SYMBOL_GPL(ata_bmdma_setup);
6991 EXPORT_SYMBOL_GPL(ata_bmdma_start);
6992 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
6993 EXPORT_SYMBOL_GPL(ata_bmdma_status);
6994 EXPORT_SYMBOL_GPL(ata_bmdma_stop);
6995 EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
6996 EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
6997 EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
6998 EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
6999 EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
7000 EXPORT_SYMBOL_GPL(ata_port_probe);
7001 EXPORT_SYMBOL_GPL(ata_dev_disable);
7002 EXPORT_SYMBOL_GPL(sata_set_spd);
7003 EXPORT_SYMBOL_GPL(sata_link_debounce);
7004 EXPORT_SYMBOL_GPL(sata_link_resume);
7005 EXPORT_SYMBOL_GPL(sata_phy_reset);
7006 EXPORT_SYMBOL_GPL(__sata_phy_reset);
7007 EXPORT_SYMBOL_GPL(ata_bus_reset);
7008 EXPORT_SYMBOL_GPL(ata_std_prereset);
7009 EXPORT_SYMBOL_GPL(ata_std_softreset);
7010 EXPORT_SYMBOL_GPL(sata_link_hardreset);
7011 EXPORT_SYMBOL_GPL(sata_std_hardreset);
7012 EXPORT_SYMBOL_GPL(ata_std_postreset);
7013 EXPORT_SYMBOL_GPL(ata_dev_classify);
7014 EXPORT_SYMBOL_GPL(ata_dev_pair);
7015 EXPORT_SYMBOL_GPL(ata_port_disable);
7016 EXPORT_SYMBOL_GPL(ata_ratelimit);
7017 EXPORT_SYMBOL_GPL(ata_wait_register);
7018 EXPORT_SYMBOL_GPL(ata_busy_sleep);
7019 EXPORT_SYMBOL_GPL(ata_wait_ready);
7020 EXPORT_SYMBOL_GPL(ata_port_queue_task);
7021 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
7022 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
7023 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
7024 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
7025 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
7026 EXPORT_SYMBOL_GPL(ata_host_intr);
7027 EXPORT_SYMBOL_GPL(sata_scr_valid);
7028 EXPORT_SYMBOL_GPL(sata_scr_read);
7029 EXPORT_SYMBOL_GPL(sata_scr_write);
7030 EXPORT_SYMBOL_GPL(sata_scr_write_flush);
7031 EXPORT_SYMBOL_GPL(ata_link_online);
7032 EXPORT_SYMBOL_GPL(ata_link_offline);
7033 #ifdef CONFIG_PM
7034 EXPORT_SYMBOL_GPL(ata_host_suspend);
7035 EXPORT_SYMBOL_GPL(ata_host_resume);
7036 #endif /* CONFIG_PM */
7037 EXPORT_SYMBOL_GPL(ata_id_string);
7038 EXPORT_SYMBOL_GPL(ata_id_c_string);
7039 EXPORT_SYMBOL_GPL(ata_id_to_dma_mode);
7040 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
7041
7042 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
7043 EXPORT_SYMBOL_GPL(ata_timing_compute);
7044 EXPORT_SYMBOL_GPL(ata_timing_merge);
7045
7046 #ifdef CONFIG_PCI
7047 EXPORT_SYMBOL_GPL(pci_test_config_bits);
7048 EXPORT_SYMBOL_GPL(ata_pci_init_sff_host);
7049 EXPORT_SYMBOL_GPL(ata_pci_init_bmdma);
7050 EXPORT_SYMBOL_GPL(ata_pci_prepare_sff_host);
7051 EXPORT_SYMBOL_GPL(ata_pci_init_one);
7052 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
7053 #ifdef CONFIG_PM
7054 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
7055 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
7056 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
7057 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
7058 #endif /* CONFIG_PM */
7059 EXPORT_SYMBOL_GPL(ata_pci_default_filter);
7060 EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
7061 #endif /* CONFIG_PCI */
7062
7063 EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
7064 EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
7065 EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
7066 EXPORT_SYMBOL_GPL(ata_eng_timeout);
7067 EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
7068 EXPORT_SYMBOL_GPL(ata_link_abort);
7069 EXPORT_SYMBOL_GPL(ata_port_abort);
7070 EXPORT_SYMBOL_GPL(ata_port_freeze);
7071 EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
7072 EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
7073 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
7074 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
7075 EXPORT_SYMBOL_GPL(ata_do_eh);
7076 EXPORT_SYMBOL_GPL(ata_irq_on);
7077 EXPORT_SYMBOL_GPL(ata_dummy_irq_on);
7078 EXPORT_SYMBOL_GPL(ata_irq_ack);
7079 EXPORT_SYMBOL_GPL(ata_dummy_irq_ack);
7080 EXPORT_SYMBOL_GPL(ata_dev_try_classify);
7081
7082 EXPORT_SYMBOL_GPL(ata_cable_40wire);
7083 EXPORT_SYMBOL_GPL(ata_cable_80wire);
7084 EXPORT_SYMBOL_GPL(ata_cable_unknown);
7085 EXPORT_SYMBOL_GPL(ata_cable_sata);
This page took 0.18345 seconds and 5 git commands to generate.