[PATCH] libata: use xfer_mask helpers in ata_dev_set_mode()
[deliverable/linux.git] / drivers / scsi / libata-core.c
1 /*
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
33 */
34
35 #include <linux/config.h>
36 #include <linux/kernel.h>
37 #include <linux/module.h>
38 #include <linux/pci.h>
39 #include <linux/init.h>
40 #include <linux/list.h>
41 #include <linux/mm.h>
42 #include <linux/highmem.h>
43 #include <linux/spinlock.h>
44 #include <linux/blkdev.h>
45 #include <linux/delay.h>
46 #include <linux/timer.h>
47 #include <linux/interrupt.h>
48 #include <linux/completion.h>
49 #include <linux/suspend.h>
50 #include <linux/workqueue.h>
51 #include <linux/jiffies.h>
52 #include <linux/scatterlist.h>
53 #include <scsi/scsi.h>
54 #include "scsi_priv.h"
55 #include <scsi/scsi_cmnd.h>
56 #include <scsi/scsi_host.h>
57 #include <linux/libata.h>
58 #include <asm/io.h>
59 #include <asm/semaphore.h>
60 #include <asm/byteorder.h>
61
62 #include "libata.h"
63
64 static unsigned int ata_dev_init_params(struct ata_port *ap,
65 struct ata_device *dev);
66 static void ata_set_mode(struct ata_port *ap);
67 static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev);
68 static unsigned int ata_get_mode_mask(const struct ata_port *ap, int shift);
69 static int fgb(u32 bitmap);
70 static int ata_choose_xfer_mode(const struct ata_port *ap,
71 u8 *xfer_mode_out,
72 unsigned int *xfer_shift_out);
73
74 static unsigned int ata_unique_id = 1;
75 static struct workqueue_struct *ata_wq;
76
77 int atapi_enabled = 0;
78 module_param(atapi_enabled, int, 0444);
79 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
80
81 int libata_fua = 0;
82 module_param_named(fua, libata_fua, int, 0444);
83 MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
84
85 MODULE_AUTHOR("Jeff Garzik");
86 MODULE_DESCRIPTION("Library module for ATA devices");
87 MODULE_LICENSE("GPL");
88 MODULE_VERSION(DRV_VERSION);
89
90
91 /**
92 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
93 * @tf: Taskfile to convert
94 * @fis: Buffer into which data will output
95 * @pmp: Port multiplier port
96 *
97 * Converts a standard ATA taskfile to a Serial ATA
98 * FIS structure (Register - Host to Device).
99 *
100 * LOCKING:
101 * Inherited from caller.
102 */
103
104 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
105 {
106 fis[0] = 0x27; /* Register - Host to Device FIS */
107 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
108 bit 7 indicates Command FIS */
109 fis[2] = tf->command;
110 fis[3] = tf->feature;
111
112 fis[4] = tf->lbal;
113 fis[5] = tf->lbam;
114 fis[6] = tf->lbah;
115 fis[7] = tf->device;
116
117 fis[8] = tf->hob_lbal;
118 fis[9] = tf->hob_lbam;
119 fis[10] = tf->hob_lbah;
120 fis[11] = tf->hob_feature;
121
122 fis[12] = tf->nsect;
123 fis[13] = tf->hob_nsect;
124 fis[14] = 0;
125 fis[15] = tf->ctl;
126
127 fis[16] = 0;
128 fis[17] = 0;
129 fis[18] = 0;
130 fis[19] = 0;
131 }
132
133 /**
134 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
135 * @fis: Buffer from which data will be input
136 * @tf: Taskfile to output
137 *
138 * Converts a serial ATA FIS structure to a standard ATA taskfile.
139 *
140 * LOCKING:
141 * Inherited from caller.
142 */
143
144 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
145 {
146 tf->command = fis[2]; /* status */
147 tf->feature = fis[3]; /* error */
148
149 tf->lbal = fis[4];
150 tf->lbam = fis[5];
151 tf->lbah = fis[6];
152 tf->device = fis[7];
153
154 tf->hob_lbal = fis[8];
155 tf->hob_lbam = fis[9];
156 tf->hob_lbah = fis[10];
157
158 tf->nsect = fis[12];
159 tf->hob_nsect = fis[13];
160 }
161
162 static const u8 ata_rw_cmds[] = {
163 /* pio multi */
164 ATA_CMD_READ_MULTI,
165 ATA_CMD_WRITE_MULTI,
166 ATA_CMD_READ_MULTI_EXT,
167 ATA_CMD_WRITE_MULTI_EXT,
168 0,
169 0,
170 0,
171 ATA_CMD_WRITE_MULTI_FUA_EXT,
172 /* pio */
173 ATA_CMD_PIO_READ,
174 ATA_CMD_PIO_WRITE,
175 ATA_CMD_PIO_READ_EXT,
176 ATA_CMD_PIO_WRITE_EXT,
177 0,
178 0,
179 0,
180 0,
181 /* dma */
182 ATA_CMD_READ,
183 ATA_CMD_WRITE,
184 ATA_CMD_READ_EXT,
185 ATA_CMD_WRITE_EXT,
186 0,
187 0,
188 0,
189 ATA_CMD_WRITE_FUA_EXT
190 };
191
192 /**
193 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
194 * @qc: command to examine and configure
195 *
196 * Examine the device configuration and tf->flags to calculate
197 * the proper read/write commands and protocol to use.
198 *
199 * LOCKING:
200 * caller.
201 */
202 int ata_rwcmd_protocol(struct ata_queued_cmd *qc)
203 {
204 struct ata_taskfile *tf = &qc->tf;
205 struct ata_device *dev = qc->dev;
206 u8 cmd;
207
208 int index, fua, lba48, write;
209
210 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
211 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
212 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
213
214 if (dev->flags & ATA_DFLAG_PIO) {
215 tf->protocol = ATA_PROT_PIO;
216 index = dev->multi_count ? 0 : 8;
217 } else if (lba48 && (qc->ap->flags & ATA_FLAG_PIO_LBA48)) {
218 /* Unable to use DMA due to host limitation */
219 tf->protocol = ATA_PROT_PIO;
220 index = dev->multi_count ? 0 : 8;
221 } else {
222 tf->protocol = ATA_PROT_DMA;
223 index = 16;
224 }
225
226 cmd = ata_rw_cmds[index + fua + lba48 + write];
227 if (cmd) {
228 tf->command = cmd;
229 return 0;
230 }
231 return -1;
232 }
233
234 /**
235 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
236 * @pio_mask: pio_mask
237 * @mwdma_mask: mwdma_mask
238 * @udma_mask: udma_mask
239 *
240 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
241 * unsigned int xfer_mask.
242 *
243 * LOCKING:
244 * None.
245 *
246 * RETURNS:
247 * Packed xfer_mask.
248 */
249 static unsigned int ata_pack_xfermask(unsigned int pio_mask,
250 unsigned int mwdma_mask,
251 unsigned int udma_mask)
252 {
253 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
254 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
255 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
256 }
257
258 static const struct ata_xfer_ent {
259 unsigned int shift, bits;
260 u8 base;
261 } ata_xfer_tbl[] = {
262 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
263 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
264 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
265 { -1, },
266 };
267
268 /**
269 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
270 * @xfer_mask: xfer_mask of interest
271 *
272 * Return matching XFER_* value for @xfer_mask. Only the highest
273 * bit of @xfer_mask is considered.
274 *
275 * LOCKING:
276 * None.
277 *
278 * RETURNS:
279 * Matching XFER_* value, 0 if no match found.
280 */
281 static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
282 {
283 int highbit = fls(xfer_mask) - 1;
284 const struct ata_xfer_ent *ent;
285
286 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
287 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
288 return ent->base + highbit - ent->shift;
289 return 0;
290 }
291
292 /**
293 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
294 * @xfer_mode: XFER_* of interest
295 *
296 * Return matching xfer_mask for @xfer_mode.
297 *
298 * LOCKING:
299 * None.
300 *
301 * RETURNS:
302 * Matching xfer_mask, 0 if no match found.
303 */
304 static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
305 {
306 const struct ata_xfer_ent *ent;
307
308 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
309 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
310 return 1 << (ent->shift + xfer_mode - ent->base);
311 return 0;
312 }
313
314 /**
315 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
316 * @xfer_mode: XFER_* of interest
317 *
318 * Return matching xfer_shift for @xfer_mode.
319 *
320 * LOCKING:
321 * None.
322 *
323 * RETURNS:
324 * Matching xfer_shift, -1 if no match found.
325 */
326 static int ata_xfer_mode2shift(unsigned int xfer_mode)
327 {
328 const struct ata_xfer_ent *ent;
329
330 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
331 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
332 return ent->shift;
333 return -1;
334 }
335
336 static const char * const xfer_mode_str[] = {
337 "PIO0",
338 "PIO1",
339 "PIO2",
340 "PIO3",
341 "PIO4",
342 "MWDMA0",
343 "MWDMA1",
344 "MWDMA2",
345 "UDMA/16",
346 "UDMA/25",
347 "UDMA/33",
348 "UDMA/44",
349 "UDMA/66",
350 "UDMA/100",
351 "UDMA/133",
352 "UDMA7",
353 };
354
355 /**
356 * ata_mode_string - convert xfer_mask to string
357 * @xfer_mask: mask of bits supported; only highest bit counts.
358 *
359 * Determine string which represents the highest speed
360 * (highest bit in @modemask).
361 *
362 * LOCKING:
363 * None.
364 *
365 * RETURNS:
366 * Constant C string representing highest speed listed in
367 * @mode_mask, or the constant C string "<n/a>".
368 */
369
370 static const char *ata_mode_string(unsigned int xfer_mask)
371 {
372 int highbit;
373
374 highbit = fls(xfer_mask) - 1;
375 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
376 return xfer_mode_str[highbit];
377 return "<n/a>";
378 }
379
380 /**
381 * ata_pio_devchk - PATA device presence detection
382 * @ap: ATA channel to examine
383 * @device: Device to examine (starting at zero)
384 *
385 * This technique was originally described in
386 * Hale Landis's ATADRVR (www.ata-atapi.com), and
387 * later found its way into the ATA/ATAPI spec.
388 *
389 * Write a pattern to the ATA shadow registers,
390 * and if a device is present, it will respond by
391 * correctly storing and echoing back the
392 * ATA shadow register contents.
393 *
394 * LOCKING:
395 * caller.
396 */
397
398 static unsigned int ata_pio_devchk(struct ata_port *ap,
399 unsigned int device)
400 {
401 struct ata_ioports *ioaddr = &ap->ioaddr;
402 u8 nsect, lbal;
403
404 ap->ops->dev_select(ap, device);
405
406 outb(0x55, ioaddr->nsect_addr);
407 outb(0xaa, ioaddr->lbal_addr);
408
409 outb(0xaa, ioaddr->nsect_addr);
410 outb(0x55, ioaddr->lbal_addr);
411
412 outb(0x55, ioaddr->nsect_addr);
413 outb(0xaa, ioaddr->lbal_addr);
414
415 nsect = inb(ioaddr->nsect_addr);
416 lbal = inb(ioaddr->lbal_addr);
417
418 if ((nsect == 0x55) && (lbal == 0xaa))
419 return 1; /* we found a device */
420
421 return 0; /* nothing found */
422 }
423
424 /**
425 * ata_mmio_devchk - PATA device presence detection
426 * @ap: ATA channel to examine
427 * @device: Device to examine (starting at zero)
428 *
429 * This technique was originally described in
430 * Hale Landis's ATADRVR (www.ata-atapi.com), and
431 * later found its way into the ATA/ATAPI spec.
432 *
433 * Write a pattern to the ATA shadow registers,
434 * and if a device is present, it will respond by
435 * correctly storing and echoing back the
436 * ATA shadow register contents.
437 *
438 * LOCKING:
439 * caller.
440 */
441
442 static unsigned int ata_mmio_devchk(struct ata_port *ap,
443 unsigned int device)
444 {
445 struct ata_ioports *ioaddr = &ap->ioaddr;
446 u8 nsect, lbal;
447
448 ap->ops->dev_select(ap, device);
449
450 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
451 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
452
453 writeb(0xaa, (void __iomem *) ioaddr->nsect_addr);
454 writeb(0x55, (void __iomem *) ioaddr->lbal_addr);
455
456 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
457 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
458
459 nsect = readb((void __iomem *) ioaddr->nsect_addr);
460 lbal = readb((void __iomem *) ioaddr->lbal_addr);
461
462 if ((nsect == 0x55) && (lbal == 0xaa))
463 return 1; /* we found a device */
464
465 return 0; /* nothing found */
466 }
467
468 /**
469 * ata_devchk - PATA device presence detection
470 * @ap: ATA channel to examine
471 * @device: Device to examine (starting at zero)
472 *
473 * Dispatch ATA device presence detection, depending
474 * on whether we are using PIO or MMIO to talk to the
475 * ATA shadow registers.
476 *
477 * LOCKING:
478 * caller.
479 */
480
481 static unsigned int ata_devchk(struct ata_port *ap,
482 unsigned int device)
483 {
484 if (ap->flags & ATA_FLAG_MMIO)
485 return ata_mmio_devchk(ap, device);
486 return ata_pio_devchk(ap, device);
487 }
488
489 /**
490 * ata_dev_classify - determine device type based on ATA-spec signature
491 * @tf: ATA taskfile register set for device to be identified
492 *
493 * Determine from taskfile register contents whether a device is
494 * ATA or ATAPI, as per "Signature and persistence" section
495 * of ATA/PI spec (volume 1, sect 5.14).
496 *
497 * LOCKING:
498 * None.
499 *
500 * RETURNS:
501 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
502 * the event of failure.
503 */
504
505 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
506 {
507 /* Apple's open source Darwin code hints that some devices only
508 * put a proper signature into the LBA mid/high registers,
509 * So, we only check those. It's sufficient for uniqueness.
510 */
511
512 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
513 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
514 DPRINTK("found ATA device by sig\n");
515 return ATA_DEV_ATA;
516 }
517
518 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
519 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
520 DPRINTK("found ATAPI device by sig\n");
521 return ATA_DEV_ATAPI;
522 }
523
524 DPRINTK("unknown device\n");
525 return ATA_DEV_UNKNOWN;
526 }
527
528 /**
529 * ata_dev_try_classify - Parse returned ATA device signature
530 * @ap: ATA channel to examine
531 * @device: Device to examine (starting at zero)
532 * @r_err: Value of error register on completion
533 *
534 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
535 * an ATA/ATAPI-defined set of values is placed in the ATA
536 * shadow registers, indicating the results of device detection
537 * and diagnostics.
538 *
539 * Select the ATA device, and read the values from the ATA shadow
540 * registers. Then parse according to the Error register value,
541 * and the spec-defined values examined by ata_dev_classify().
542 *
543 * LOCKING:
544 * caller.
545 *
546 * RETURNS:
547 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
548 */
549
550 static unsigned int
551 ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
552 {
553 struct ata_taskfile tf;
554 unsigned int class;
555 u8 err;
556
557 ap->ops->dev_select(ap, device);
558
559 memset(&tf, 0, sizeof(tf));
560
561 ap->ops->tf_read(ap, &tf);
562 err = tf.feature;
563 if (r_err)
564 *r_err = err;
565
566 /* see if device passed diags */
567 if (err == 1)
568 /* do nothing */ ;
569 else if ((device == 0) && (err == 0x81))
570 /* do nothing */ ;
571 else
572 return ATA_DEV_NONE;
573
574 /* determine if device is ATA or ATAPI */
575 class = ata_dev_classify(&tf);
576
577 if (class == ATA_DEV_UNKNOWN)
578 return ATA_DEV_NONE;
579 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
580 return ATA_DEV_NONE;
581 return class;
582 }
583
584 /**
585 * ata_id_string - Convert IDENTIFY DEVICE page into string
586 * @id: IDENTIFY DEVICE results we will examine
587 * @s: string into which data is output
588 * @ofs: offset into identify device page
589 * @len: length of string to return. must be an even number.
590 *
591 * The strings in the IDENTIFY DEVICE page are broken up into
592 * 16-bit chunks. Run through the string, and output each
593 * 8-bit chunk linearly, regardless of platform.
594 *
595 * LOCKING:
596 * caller.
597 */
598
599 void ata_id_string(const u16 *id, unsigned char *s,
600 unsigned int ofs, unsigned int len)
601 {
602 unsigned int c;
603
604 while (len > 0) {
605 c = id[ofs] >> 8;
606 *s = c;
607 s++;
608
609 c = id[ofs] & 0xff;
610 *s = c;
611 s++;
612
613 ofs++;
614 len -= 2;
615 }
616 }
617
618 /**
619 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
620 * @id: IDENTIFY DEVICE results we will examine
621 * @s: string into which data is output
622 * @ofs: offset into identify device page
623 * @len: length of string to return. must be an odd number.
624 *
625 * This function is identical to ata_id_string except that it
626 * trims trailing spaces and terminates the resulting string with
627 * null. @len must be actual maximum length (even number) + 1.
628 *
629 * LOCKING:
630 * caller.
631 */
632 void ata_id_c_string(const u16 *id, unsigned char *s,
633 unsigned int ofs, unsigned int len)
634 {
635 unsigned char *p;
636
637 WARN_ON(!(len & 1));
638
639 ata_id_string(id, s, ofs, len - 1);
640
641 p = s + strnlen(s, len - 1);
642 while (p > s && p[-1] == ' ')
643 p--;
644 *p = '\0';
645 }
646
647 static u64 ata_id_n_sectors(const u16 *id)
648 {
649 if (ata_id_has_lba(id)) {
650 if (ata_id_has_lba48(id))
651 return ata_id_u64(id, 100);
652 else
653 return ata_id_u32(id, 60);
654 } else {
655 if (ata_id_current_chs_valid(id))
656 return ata_id_u32(id, 57);
657 else
658 return id[1] * id[3] * id[6];
659 }
660 }
661
662 /**
663 * ata_noop_dev_select - Select device 0/1 on ATA bus
664 * @ap: ATA channel to manipulate
665 * @device: ATA device (numbered from zero) to select
666 *
667 * This function performs no actual function.
668 *
669 * May be used as the dev_select() entry in ata_port_operations.
670 *
671 * LOCKING:
672 * caller.
673 */
674 void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
675 {
676 }
677
678
679 /**
680 * ata_std_dev_select - Select device 0/1 on ATA bus
681 * @ap: ATA channel to manipulate
682 * @device: ATA device (numbered from zero) to select
683 *
684 * Use the method defined in the ATA specification to
685 * make either device 0, or device 1, active on the
686 * ATA channel. Works with both PIO and MMIO.
687 *
688 * May be used as the dev_select() entry in ata_port_operations.
689 *
690 * LOCKING:
691 * caller.
692 */
693
694 void ata_std_dev_select (struct ata_port *ap, unsigned int device)
695 {
696 u8 tmp;
697
698 if (device == 0)
699 tmp = ATA_DEVICE_OBS;
700 else
701 tmp = ATA_DEVICE_OBS | ATA_DEV1;
702
703 if (ap->flags & ATA_FLAG_MMIO) {
704 writeb(tmp, (void __iomem *) ap->ioaddr.device_addr);
705 } else {
706 outb(tmp, ap->ioaddr.device_addr);
707 }
708 ata_pause(ap); /* needed; also flushes, for mmio */
709 }
710
711 /**
712 * ata_dev_select - Select device 0/1 on ATA bus
713 * @ap: ATA channel to manipulate
714 * @device: ATA device (numbered from zero) to select
715 * @wait: non-zero to wait for Status register BSY bit to clear
716 * @can_sleep: non-zero if context allows sleeping
717 *
718 * Use the method defined in the ATA specification to
719 * make either device 0, or device 1, active on the
720 * ATA channel.
721 *
722 * This is a high-level version of ata_std_dev_select(),
723 * which additionally provides the services of inserting
724 * the proper pauses and status polling, where needed.
725 *
726 * LOCKING:
727 * caller.
728 */
729
730 void ata_dev_select(struct ata_port *ap, unsigned int device,
731 unsigned int wait, unsigned int can_sleep)
732 {
733 VPRINTK("ENTER, ata%u: device %u, wait %u\n",
734 ap->id, device, wait);
735
736 if (wait)
737 ata_wait_idle(ap);
738
739 ap->ops->dev_select(ap, device);
740
741 if (wait) {
742 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
743 msleep(150);
744 ata_wait_idle(ap);
745 }
746 }
747
748 /**
749 * ata_dump_id - IDENTIFY DEVICE info debugging output
750 * @id: IDENTIFY DEVICE page to dump
751 *
752 * Dump selected 16-bit words from the given IDENTIFY DEVICE
753 * page.
754 *
755 * LOCKING:
756 * caller.
757 */
758
759 static inline void ata_dump_id(const u16 *id)
760 {
761 DPRINTK("49==0x%04x "
762 "53==0x%04x "
763 "63==0x%04x "
764 "64==0x%04x "
765 "75==0x%04x \n",
766 id[49],
767 id[53],
768 id[63],
769 id[64],
770 id[75]);
771 DPRINTK("80==0x%04x "
772 "81==0x%04x "
773 "82==0x%04x "
774 "83==0x%04x "
775 "84==0x%04x \n",
776 id[80],
777 id[81],
778 id[82],
779 id[83],
780 id[84]);
781 DPRINTK("88==0x%04x "
782 "93==0x%04x\n",
783 id[88],
784 id[93]);
785 }
786
787 /**
788 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
789 * @id: IDENTIFY data to compute xfer mask from
790 *
791 * Compute the xfermask for this device. This is not as trivial
792 * as it seems if we must consider early devices correctly.
793 *
794 * FIXME: pre IDE drive timing (do we care ?).
795 *
796 * LOCKING:
797 * None.
798 *
799 * RETURNS:
800 * Computed xfermask
801 */
802 static unsigned int ata_id_xfermask(const u16 *id)
803 {
804 unsigned int pio_mask, mwdma_mask, udma_mask;
805
806 /* Usual case. Word 53 indicates word 64 is valid */
807 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
808 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
809 pio_mask <<= 3;
810 pio_mask |= 0x7;
811 } else {
812 /* If word 64 isn't valid then Word 51 high byte holds
813 * the PIO timing number for the maximum. Turn it into
814 * a mask.
815 */
816 pio_mask = (2 << (id[ATA_ID_OLD_PIO_MODES] & 0xFF)) - 1 ;
817
818 /* But wait.. there's more. Design your standards by
819 * committee and you too can get a free iordy field to
820 * process. However its the speeds not the modes that
821 * are supported... Note drivers using the timing API
822 * will get this right anyway
823 */
824 }
825
826 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
827 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
828
829 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
830 }
831
832 /*
833 * Compute the PIO modes available for this device. This is not as
834 * trivial as it seems if we must consider early devices correctly.
835 *
836 * FIXME: pre IDE drive timing (do we care ?).
837 */
838
839 static unsigned int ata_pio_modes(const struct ata_device *adev)
840 {
841 u16 modes;
842
843 /* Usual case. Word 53 indicates word 64 is valid */
844 if (adev->id[ATA_ID_FIELD_VALID] & (1 << 1)) {
845 modes = adev->id[ATA_ID_PIO_MODES] & 0x03;
846 modes <<= 3;
847 modes |= 0x7;
848 return modes;
849 }
850
851 /* If word 64 isn't valid then Word 51 high byte holds the PIO timing
852 number for the maximum. Turn it into a mask and return it */
853 modes = (2 << ((adev->id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF)) - 1 ;
854 return modes;
855 /* But wait.. there's more. Design your standards by committee and
856 you too can get a free iordy field to process. However its the
857 speeds not the modes that are supported... Note drivers using the
858 timing API will get this right anyway */
859 }
860
861 /**
862 * ata_port_queue_task - Queue port_task
863 * @ap: The ata_port to queue port_task for
864 *
865 * Schedule @fn(@data) for execution after @delay jiffies using
866 * port_task. There is one port_task per port and it's the
867 * user(low level driver)'s responsibility to make sure that only
868 * one task is active at any given time.
869 *
870 * libata core layer takes care of synchronization between
871 * port_task and EH. ata_port_queue_task() may be ignored for EH
872 * synchronization.
873 *
874 * LOCKING:
875 * Inherited from caller.
876 */
877 void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data,
878 unsigned long delay)
879 {
880 int rc;
881
882 if (ap->flags & ATA_FLAG_FLUSH_PORT_TASK)
883 return;
884
885 PREPARE_WORK(&ap->port_task, fn, data);
886
887 if (!delay)
888 rc = queue_work(ata_wq, &ap->port_task);
889 else
890 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
891
892 /* rc == 0 means that another user is using port task */
893 WARN_ON(rc == 0);
894 }
895
896 /**
897 * ata_port_flush_task - Flush port_task
898 * @ap: The ata_port to flush port_task for
899 *
900 * After this function completes, port_task is guranteed not to
901 * be running or scheduled.
902 *
903 * LOCKING:
904 * Kernel thread context (may sleep)
905 */
906 void ata_port_flush_task(struct ata_port *ap)
907 {
908 unsigned long flags;
909
910 DPRINTK("ENTER\n");
911
912 spin_lock_irqsave(&ap->host_set->lock, flags);
913 ap->flags |= ATA_FLAG_FLUSH_PORT_TASK;
914 spin_unlock_irqrestore(&ap->host_set->lock, flags);
915
916 DPRINTK("flush #1\n");
917 flush_workqueue(ata_wq);
918
919 /*
920 * At this point, if a task is running, it's guaranteed to see
921 * the FLUSH flag; thus, it will never queue pio tasks again.
922 * Cancel and flush.
923 */
924 if (!cancel_delayed_work(&ap->port_task)) {
925 DPRINTK("flush #2\n");
926 flush_workqueue(ata_wq);
927 }
928
929 spin_lock_irqsave(&ap->host_set->lock, flags);
930 ap->flags &= ~ATA_FLAG_FLUSH_PORT_TASK;
931 spin_unlock_irqrestore(&ap->host_set->lock, flags);
932
933 DPRINTK("EXIT\n");
934 }
935
936 void ata_qc_complete_internal(struct ata_queued_cmd *qc)
937 {
938 struct completion *waiting = qc->private_data;
939
940 qc->ap->ops->tf_read(qc->ap, &qc->tf);
941 complete(waiting);
942 }
943
944 /**
945 * ata_exec_internal - execute libata internal command
946 * @ap: Port to which the command is sent
947 * @dev: Device to which the command is sent
948 * @tf: Taskfile registers for the command and the result
949 * @dma_dir: Data tranfer direction of the command
950 * @buf: Data buffer of the command
951 * @buflen: Length of data buffer
952 *
953 * Executes libata internal command with timeout. @tf contains
954 * command on entry and result on return. Timeout and error
955 * conditions are reported via return value. No recovery action
956 * is taken after a command times out. It's caller's duty to
957 * clean up after timeout.
958 *
959 * LOCKING:
960 * None. Should be called with kernel context, might sleep.
961 */
962
963 static unsigned
964 ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
965 struct ata_taskfile *tf,
966 int dma_dir, void *buf, unsigned int buflen)
967 {
968 u8 command = tf->command;
969 struct ata_queued_cmd *qc;
970 DECLARE_COMPLETION(wait);
971 unsigned long flags;
972 unsigned int err_mask;
973
974 spin_lock_irqsave(&ap->host_set->lock, flags);
975
976 qc = ata_qc_new_init(ap, dev);
977 BUG_ON(qc == NULL);
978
979 qc->tf = *tf;
980 qc->dma_dir = dma_dir;
981 if (dma_dir != DMA_NONE) {
982 ata_sg_init_one(qc, buf, buflen);
983 qc->nsect = buflen / ATA_SECT_SIZE;
984 }
985
986 qc->private_data = &wait;
987 qc->complete_fn = ata_qc_complete_internal;
988
989 qc->err_mask = ata_qc_issue(qc);
990 if (qc->err_mask)
991 ata_qc_complete(qc);
992
993 spin_unlock_irqrestore(&ap->host_set->lock, flags);
994
995 if (!wait_for_completion_timeout(&wait, ATA_TMOUT_INTERNAL)) {
996 spin_lock_irqsave(&ap->host_set->lock, flags);
997
998 /* We're racing with irq here. If we lose, the
999 * following test prevents us from completing the qc
1000 * again. If completion irq occurs after here but
1001 * before the caller cleans up, it will result in a
1002 * spurious interrupt. We can live with that.
1003 */
1004 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1005 qc->err_mask = AC_ERR_TIMEOUT;
1006 ata_qc_complete(qc);
1007 printk(KERN_WARNING "ata%u: qc timeout (cmd 0x%x)\n",
1008 ap->id, command);
1009 }
1010
1011 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1012 }
1013
1014 *tf = qc->tf;
1015 err_mask = qc->err_mask;
1016
1017 ata_qc_free(qc);
1018
1019 return err_mask;
1020 }
1021
1022 /**
1023 * ata_pio_need_iordy - check if iordy needed
1024 * @adev: ATA device
1025 *
1026 * Check if the current speed of the device requires IORDY. Used
1027 * by various controllers for chip configuration.
1028 */
1029
1030 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1031 {
1032 int pio;
1033 int speed = adev->pio_mode - XFER_PIO_0;
1034
1035 if (speed < 2)
1036 return 0;
1037 if (speed > 2)
1038 return 1;
1039
1040 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1041
1042 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1043 pio = adev->id[ATA_ID_EIDE_PIO];
1044 /* Is the speed faster than the drive allows non IORDY ? */
1045 if (pio) {
1046 /* This is cycle times not frequency - watch the logic! */
1047 if (pio > 240) /* PIO2 is 240nS per cycle */
1048 return 1;
1049 return 0;
1050 }
1051 }
1052 return 0;
1053 }
1054
1055 /**
1056 * ata_dev_read_id - Read ID data from the specified device
1057 * @ap: port on which target device resides
1058 * @dev: target device
1059 * @p_class: pointer to class of the target device (may be changed)
1060 * @post_reset: is this read ID post-reset?
1061 * @p_id: read IDENTIFY page (newly allocated)
1062 *
1063 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1064 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1065 * devices. This function also takes care of EDD signature
1066 * misreporting (to be removed once EDD support is gone) and
1067 * issues ATA_CMD_INIT_DEV_PARAMS for pre-ATA4 drives.
1068 *
1069 * LOCKING:
1070 * Kernel thread context (may sleep)
1071 *
1072 * RETURNS:
1073 * 0 on success, -errno otherwise.
1074 */
1075 static int ata_dev_read_id(struct ata_port *ap, struct ata_device *dev,
1076 unsigned int *p_class, int post_reset, u16 **p_id)
1077 {
1078 unsigned int class = *p_class;
1079 unsigned int using_edd;
1080 struct ata_taskfile tf;
1081 unsigned int err_mask = 0;
1082 u16 *id;
1083 const char *reason;
1084 int rc;
1085
1086 DPRINTK("ENTER, host %u, dev %u\n", ap->id, dev->devno);
1087
1088 if (ap->ops->probe_reset ||
1089 ap->flags & (ATA_FLAG_SRST | ATA_FLAG_SATA_RESET))
1090 using_edd = 0;
1091 else
1092 using_edd = 1;
1093
1094 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1095
1096 id = kmalloc(sizeof(id[0]) * ATA_ID_WORDS, GFP_KERNEL);
1097 if (id == NULL) {
1098 rc = -ENOMEM;
1099 reason = "out of memory";
1100 goto err_out;
1101 }
1102
1103 retry:
1104 ata_tf_init(ap, &tf, dev->devno);
1105
1106 switch (class) {
1107 case ATA_DEV_ATA:
1108 tf.command = ATA_CMD_ID_ATA;
1109 break;
1110 case ATA_DEV_ATAPI:
1111 tf.command = ATA_CMD_ID_ATAPI;
1112 break;
1113 default:
1114 rc = -ENODEV;
1115 reason = "unsupported class";
1116 goto err_out;
1117 }
1118
1119 tf.protocol = ATA_PROT_PIO;
1120
1121 err_mask = ata_exec_internal(ap, dev, &tf, DMA_FROM_DEVICE,
1122 id, sizeof(id[0]) * ATA_ID_WORDS);
1123
1124 if (err_mask) {
1125 rc = -EIO;
1126 reason = "I/O error";
1127
1128 if (err_mask & ~AC_ERR_DEV)
1129 goto err_out;
1130
1131 /*
1132 * arg! EDD works for all test cases, but seems to return
1133 * the ATA signature for some ATAPI devices. Until the
1134 * reason for this is found and fixed, we fix up the mess
1135 * here. If IDENTIFY DEVICE returns command aborted
1136 * (as ATAPI devices do), then we issue an
1137 * IDENTIFY PACKET DEVICE.
1138 *
1139 * ATA software reset (SRST, the default) does not appear
1140 * to have this problem.
1141 */
1142 if ((using_edd) && (class == ATA_DEV_ATA)) {
1143 u8 err = tf.feature;
1144 if (err & ATA_ABORTED) {
1145 class = ATA_DEV_ATAPI;
1146 goto retry;
1147 }
1148 }
1149 goto err_out;
1150 }
1151
1152 swap_buf_le16(id, ATA_ID_WORDS);
1153
1154 /* print device capabilities */
1155 printk(KERN_DEBUG "ata%u: dev %u cfg "
1156 "49:%04x 82:%04x 83:%04x 84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n",
1157 ap->id, dev->devno,
1158 id[49], id[82], id[83], id[84], id[85], id[86], id[87], id[88]);
1159
1160 /* sanity check */
1161 if ((class == ATA_DEV_ATA) != ata_id_is_ata(id)) {
1162 rc = -EINVAL;
1163 reason = "device reports illegal type";
1164 goto err_out;
1165 }
1166
1167 if (post_reset && class == ATA_DEV_ATA) {
1168 /*
1169 * The exact sequence expected by certain pre-ATA4 drives is:
1170 * SRST RESET
1171 * IDENTIFY
1172 * INITIALIZE DEVICE PARAMETERS
1173 * anything else..
1174 * Some drives were very specific about that exact sequence.
1175 */
1176 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1177 err_mask = ata_dev_init_params(ap, dev);
1178 if (err_mask) {
1179 rc = -EIO;
1180 reason = "INIT_DEV_PARAMS failed";
1181 goto err_out;
1182 }
1183
1184 /* current CHS translation info (id[53-58]) might be
1185 * changed. reread the identify device info.
1186 */
1187 post_reset = 0;
1188 goto retry;
1189 }
1190 }
1191
1192 *p_class = class;
1193 *p_id = id;
1194 return 0;
1195
1196 err_out:
1197 printk(KERN_WARNING "ata%u: dev %u failed to IDENTIFY (%s)\n",
1198 ap->id, dev->devno, reason);
1199 kfree(id);
1200 return rc;
1201 }
1202
1203 static inline u8 ata_dev_knobble(const struct ata_port *ap,
1204 struct ata_device *dev)
1205 {
1206 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
1207 }
1208
1209 /**
1210 * ata_dev_configure - Configure the specified ATA/ATAPI device
1211 * @ap: Port on which target device resides
1212 * @dev: Target device to configure
1213 * @print_info: Enable device info printout
1214 *
1215 * Configure @dev according to @dev->id. Generic and low-level
1216 * driver specific fixups are also applied.
1217 *
1218 * LOCKING:
1219 * Kernel thread context (may sleep)
1220 *
1221 * RETURNS:
1222 * 0 on success, -errno otherwise
1223 */
1224 static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev,
1225 int print_info)
1226 {
1227 unsigned int xfer_mask;
1228 int i, rc;
1229
1230 if (!ata_dev_present(dev)) {
1231 DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n",
1232 ap->id, dev->devno);
1233 return 0;
1234 }
1235
1236 DPRINTK("ENTER, host %u, dev %u\n", ap->id, dev->devno);
1237
1238 /* initialize to-be-configured parameters */
1239 dev->flags = 0;
1240 dev->max_sectors = 0;
1241 dev->cdb_len = 0;
1242 dev->n_sectors = 0;
1243 dev->cylinders = 0;
1244 dev->heads = 0;
1245 dev->sectors = 0;
1246
1247 /*
1248 * common ATA, ATAPI feature tests
1249 */
1250
1251 /* we require DMA support (bits 8 of word 49) */
1252 if (!ata_id_has_dma(dev->id)) {
1253 printk(KERN_DEBUG "ata%u: no dma\n", ap->id);
1254 rc = -EINVAL;
1255 goto err_out_nosup;
1256 }
1257
1258 /* find max transfer mode; for printk only */
1259 xfer_mask = ata_id_xfermask(dev->id);
1260
1261 ata_dump_id(dev->id);
1262
1263 /* ATA-specific feature tests */
1264 if (dev->class == ATA_DEV_ATA) {
1265 dev->n_sectors = ata_id_n_sectors(dev->id);
1266
1267 if (ata_id_has_lba(dev->id)) {
1268 const char *lba_desc;
1269
1270 lba_desc = "LBA";
1271 dev->flags |= ATA_DFLAG_LBA;
1272 if (ata_id_has_lba48(dev->id)) {
1273 dev->flags |= ATA_DFLAG_LBA48;
1274 lba_desc = "LBA48";
1275 }
1276
1277 /* print device info to dmesg */
1278 if (print_info)
1279 printk(KERN_INFO "ata%u: dev %u ATA-%d, "
1280 "max %s, %Lu sectors: %s\n",
1281 ap->id, dev->devno,
1282 ata_id_major_version(dev->id),
1283 ata_mode_string(xfer_mask),
1284 (unsigned long long)dev->n_sectors,
1285 lba_desc);
1286 } else {
1287 /* CHS */
1288
1289 /* Default translation */
1290 dev->cylinders = dev->id[1];
1291 dev->heads = dev->id[3];
1292 dev->sectors = dev->id[6];
1293
1294 if (ata_id_current_chs_valid(dev->id)) {
1295 /* Current CHS translation is valid. */
1296 dev->cylinders = dev->id[54];
1297 dev->heads = dev->id[55];
1298 dev->sectors = dev->id[56];
1299 }
1300
1301 /* print device info to dmesg */
1302 if (print_info)
1303 printk(KERN_INFO "ata%u: dev %u ATA-%d, "
1304 "max %s, %Lu sectors: CHS %u/%u/%u\n",
1305 ap->id, dev->devno,
1306 ata_id_major_version(dev->id),
1307 ata_mode_string(xfer_mask),
1308 (unsigned long long)dev->n_sectors,
1309 dev->cylinders, dev->heads, dev->sectors);
1310 }
1311
1312 dev->cdb_len = 16;
1313 }
1314
1315 /* ATAPI-specific feature tests */
1316 else if (dev->class == ATA_DEV_ATAPI) {
1317 rc = atapi_cdb_len(dev->id);
1318 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1319 printk(KERN_WARNING "ata%u: unsupported CDB len\n", ap->id);
1320 rc = -EINVAL;
1321 goto err_out_nosup;
1322 }
1323 dev->cdb_len = (unsigned int) rc;
1324
1325 /* print device info to dmesg */
1326 if (print_info)
1327 printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n",
1328 ap->id, dev->devno, ata_mode_string(xfer_mask));
1329 }
1330
1331 ap->host->max_cmd_len = 0;
1332 for (i = 0; i < ATA_MAX_DEVICES; i++)
1333 ap->host->max_cmd_len = max_t(unsigned int,
1334 ap->host->max_cmd_len,
1335 ap->device[i].cdb_len);
1336
1337 /* limit bridge transfers to udma5, 200 sectors */
1338 if (ata_dev_knobble(ap, dev)) {
1339 if (print_info)
1340 printk(KERN_INFO "ata%u(%u): applying bridge limits\n",
1341 ap->id, dev->devno);
1342 ap->udma_mask &= ATA_UDMA5;
1343 dev->max_sectors = ATA_MAX_SECTORS;
1344 }
1345
1346 if (ap->ops->dev_config)
1347 ap->ops->dev_config(ap, dev);
1348
1349 DPRINTK("EXIT, drv_stat = 0x%x\n", ata_chk_status(ap));
1350 return 0;
1351
1352 err_out_nosup:
1353 printk(KERN_WARNING "ata%u: dev %u not supported, ignoring\n",
1354 ap->id, dev->devno);
1355 DPRINTK("EXIT, err\n");
1356 return rc;
1357 }
1358
1359 /**
1360 * ata_bus_probe - Reset and probe ATA bus
1361 * @ap: Bus to probe
1362 *
1363 * Master ATA bus probing function. Initiates a hardware-dependent
1364 * bus reset, then attempts to identify any devices found on
1365 * the bus.
1366 *
1367 * LOCKING:
1368 * PCI/etc. bus probe sem.
1369 *
1370 * RETURNS:
1371 * Zero on success, non-zero on error.
1372 */
1373
1374 static int ata_bus_probe(struct ata_port *ap)
1375 {
1376 unsigned int classes[ATA_MAX_DEVICES];
1377 unsigned int i, rc, found = 0;
1378
1379 ata_port_probe(ap);
1380
1381 /* reset */
1382 if (ap->ops->probe_reset) {
1383 rc = ap->ops->probe_reset(ap, classes);
1384 if (rc) {
1385 printk("ata%u: reset failed (errno=%d)\n", ap->id, rc);
1386 return rc;
1387 }
1388
1389 for (i = 0; i < ATA_MAX_DEVICES; i++)
1390 if (classes[i] == ATA_DEV_UNKNOWN)
1391 classes[i] = ATA_DEV_NONE;
1392 } else {
1393 ap->ops->phy_reset(ap);
1394
1395 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1396 if (!(ap->flags & ATA_FLAG_PORT_DISABLED))
1397 classes[i] = ap->device[i].class;
1398 else
1399 ap->device[i].class = ATA_DEV_UNKNOWN;
1400 }
1401 ata_port_probe(ap);
1402 }
1403
1404 /* read IDENTIFY page and configure devices */
1405 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1406 struct ata_device *dev = &ap->device[i];
1407
1408 dev->class = classes[i];
1409
1410 if (!ata_dev_present(dev))
1411 continue;
1412
1413 WARN_ON(dev->id != NULL);
1414 if (ata_dev_read_id(ap, dev, &dev->class, 1, &dev->id)) {
1415 dev->class = ATA_DEV_NONE;
1416 continue;
1417 }
1418
1419 if (ata_dev_configure(ap, dev, 1)) {
1420 dev->class++; /* disable device */
1421 continue;
1422 }
1423
1424 found = 1;
1425 }
1426
1427 if (!found)
1428 goto err_out_disable;
1429
1430 ata_set_mode(ap);
1431 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1432 goto err_out_disable;
1433
1434 return 0;
1435
1436 err_out_disable:
1437 ap->ops->port_disable(ap);
1438 return -1;
1439 }
1440
1441 /**
1442 * ata_port_probe - Mark port as enabled
1443 * @ap: Port for which we indicate enablement
1444 *
1445 * Modify @ap data structure such that the system
1446 * thinks that the entire port is enabled.
1447 *
1448 * LOCKING: host_set lock, or some other form of
1449 * serialization.
1450 */
1451
1452 void ata_port_probe(struct ata_port *ap)
1453 {
1454 ap->flags &= ~ATA_FLAG_PORT_DISABLED;
1455 }
1456
1457 /**
1458 * sata_print_link_status - Print SATA link status
1459 * @ap: SATA port to printk link status about
1460 *
1461 * This function prints link speed and status of a SATA link.
1462 *
1463 * LOCKING:
1464 * None.
1465 */
1466 static void sata_print_link_status(struct ata_port *ap)
1467 {
1468 u32 sstatus, tmp;
1469 const char *speed;
1470
1471 if (!ap->ops->scr_read)
1472 return;
1473
1474 sstatus = scr_read(ap, SCR_STATUS);
1475
1476 if (sata_dev_present(ap)) {
1477 tmp = (sstatus >> 4) & 0xf;
1478 if (tmp & (1 << 0))
1479 speed = "1.5";
1480 else if (tmp & (1 << 1))
1481 speed = "3.0";
1482 else
1483 speed = "<unknown>";
1484 printk(KERN_INFO "ata%u: SATA link up %s Gbps (SStatus %X)\n",
1485 ap->id, speed, sstatus);
1486 } else {
1487 printk(KERN_INFO "ata%u: SATA link down (SStatus %X)\n",
1488 ap->id, sstatus);
1489 }
1490 }
1491
1492 /**
1493 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1494 * @ap: SATA port associated with target SATA PHY.
1495 *
1496 * This function issues commands to standard SATA Sxxx
1497 * PHY registers, to wake up the phy (and device), and
1498 * clear any reset condition.
1499 *
1500 * LOCKING:
1501 * PCI/etc. bus probe sem.
1502 *
1503 */
1504 void __sata_phy_reset(struct ata_port *ap)
1505 {
1506 u32 sstatus;
1507 unsigned long timeout = jiffies + (HZ * 5);
1508
1509 if (ap->flags & ATA_FLAG_SATA_RESET) {
1510 /* issue phy wake/reset */
1511 scr_write_flush(ap, SCR_CONTROL, 0x301);
1512 /* Couldn't find anything in SATA I/II specs, but
1513 * AHCI-1.1 10.4.2 says at least 1 ms. */
1514 mdelay(1);
1515 }
1516 scr_write_flush(ap, SCR_CONTROL, 0x300); /* phy wake/clear reset */
1517
1518 /* wait for phy to become ready, if necessary */
1519 do {
1520 msleep(200);
1521 sstatus = scr_read(ap, SCR_STATUS);
1522 if ((sstatus & 0xf) != 1)
1523 break;
1524 } while (time_before(jiffies, timeout));
1525
1526 /* print link status */
1527 sata_print_link_status(ap);
1528
1529 /* TODO: phy layer with polling, timeouts, etc. */
1530 if (sata_dev_present(ap))
1531 ata_port_probe(ap);
1532 else
1533 ata_port_disable(ap);
1534
1535 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1536 return;
1537
1538 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
1539 ata_port_disable(ap);
1540 return;
1541 }
1542
1543 ap->cbl = ATA_CBL_SATA;
1544 }
1545
1546 /**
1547 * sata_phy_reset - Reset SATA bus.
1548 * @ap: SATA port associated with target SATA PHY.
1549 *
1550 * This function resets the SATA bus, and then probes
1551 * the bus for devices.
1552 *
1553 * LOCKING:
1554 * PCI/etc. bus probe sem.
1555 *
1556 */
1557 void sata_phy_reset(struct ata_port *ap)
1558 {
1559 __sata_phy_reset(ap);
1560 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1561 return;
1562 ata_bus_reset(ap);
1563 }
1564
1565 /**
1566 * ata_port_disable - Disable port.
1567 * @ap: Port to be disabled.
1568 *
1569 * Modify @ap data structure such that the system
1570 * thinks that the entire port is disabled, and should
1571 * never attempt to probe or communicate with devices
1572 * on this port.
1573 *
1574 * LOCKING: host_set lock, or some other form of
1575 * serialization.
1576 */
1577
1578 void ata_port_disable(struct ata_port *ap)
1579 {
1580 ap->device[0].class = ATA_DEV_NONE;
1581 ap->device[1].class = ATA_DEV_NONE;
1582 ap->flags |= ATA_FLAG_PORT_DISABLED;
1583 }
1584
1585 /*
1586 * This mode timing computation functionality is ported over from
1587 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
1588 */
1589 /*
1590 * PIO 0-5, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
1591 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
1592 * for PIO 5, which is a nonstandard extension and UDMA6, which
1593 * is currently supported only by Maxtor drives.
1594 */
1595
1596 static const struct ata_timing ata_timing[] = {
1597
1598 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
1599 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
1600 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
1601 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
1602
1603 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
1604 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
1605 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
1606
1607 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
1608
1609 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
1610 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
1611 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
1612
1613 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
1614 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
1615 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
1616
1617 /* { XFER_PIO_5, 20, 50, 30, 100, 50, 30, 100, 0 }, */
1618 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
1619 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
1620
1621 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
1622 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
1623 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
1624
1625 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
1626
1627 { 0xFF }
1628 };
1629
1630 #define ENOUGH(v,unit) (((v)-1)/(unit)+1)
1631 #define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
1632
1633 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
1634 {
1635 q->setup = EZ(t->setup * 1000, T);
1636 q->act8b = EZ(t->act8b * 1000, T);
1637 q->rec8b = EZ(t->rec8b * 1000, T);
1638 q->cyc8b = EZ(t->cyc8b * 1000, T);
1639 q->active = EZ(t->active * 1000, T);
1640 q->recover = EZ(t->recover * 1000, T);
1641 q->cycle = EZ(t->cycle * 1000, T);
1642 q->udma = EZ(t->udma * 1000, UT);
1643 }
1644
1645 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
1646 struct ata_timing *m, unsigned int what)
1647 {
1648 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
1649 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
1650 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
1651 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
1652 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
1653 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
1654 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
1655 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
1656 }
1657
1658 static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
1659 {
1660 const struct ata_timing *t;
1661
1662 for (t = ata_timing; t->mode != speed; t++)
1663 if (t->mode == 0xFF)
1664 return NULL;
1665 return t;
1666 }
1667
1668 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
1669 struct ata_timing *t, int T, int UT)
1670 {
1671 const struct ata_timing *s;
1672 struct ata_timing p;
1673
1674 /*
1675 * Find the mode.
1676 */
1677
1678 if (!(s = ata_timing_find_mode(speed)))
1679 return -EINVAL;
1680
1681 memcpy(t, s, sizeof(*s));
1682
1683 /*
1684 * If the drive is an EIDE drive, it can tell us it needs extended
1685 * PIO/MW_DMA cycle timing.
1686 */
1687
1688 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
1689 memset(&p, 0, sizeof(p));
1690 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
1691 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
1692 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
1693 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
1694 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
1695 }
1696 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
1697 }
1698
1699 /*
1700 * Convert the timing to bus clock counts.
1701 */
1702
1703 ata_timing_quantize(t, t, T, UT);
1704
1705 /*
1706 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
1707 * S.M.A.R.T * and some other commands. We have to ensure that the
1708 * DMA cycle timing is slower/equal than the fastest PIO timing.
1709 */
1710
1711 if (speed > XFER_PIO_4) {
1712 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
1713 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
1714 }
1715
1716 /*
1717 * Lengthen active & recovery time so that cycle time is correct.
1718 */
1719
1720 if (t->act8b + t->rec8b < t->cyc8b) {
1721 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
1722 t->rec8b = t->cyc8b - t->act8b;
1723 }
1724
1725 if (t->active + t->recover < t->cycle) {
1726 t->active += (t->cycle - (t->active + t->recover)) / 2;
1727 t->recover = t->cycle - t->active;
1728 }
1729
1730 return 0;
1731 }
1732
1733 static const struct {
1734 unsigned int shift;
1735 u8 base;
1736 } xfer_mode_classes[] = {
1737 { ATA_SHIFT_UDMA, XFER_UDMA_0 },
1738 { ATA_SHIFT_MWDMA, XFER_MW_DMA_0 },
1739 { ATA_SHIFT_PIO, XFER_PIO_0 },
1740 };
1741
1742 static u8 base_from_shift(unsigned int shift)
1743 {
1744 int i;
1745
1746 for (i = 0; i < ARRAY_SIZE(xfer_mode_classes); i++)
1747 if (xfer_mode_classes[i].shift == shift)
1748 return xfer_mode_classes[i].base;
1749
1750 return 0xff;
1751 }
1752
1753 static void ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev)
1754 {
1755 if (!ata_dev_present(dev) || (ap->flags & ATA_FLAG_PORT_DISABLED))
1756 return;
1757
1758 if (dev->xfer_shift == ATA_SHIFT_PIO)
1759 dev->flags |= ATA_DFLAG_PIO;
1760
1761 ata_dev_set_xfermode(ap, dev);
1762
1763 if (ata_dev_revalidate(ap, dev, 0)) {
1764 printk(KERN_ERR "ata%u: failed to revalidate after set "
1765 "xfermode, disabled\n", ap->id);
1766 ata_port_disable(ap);
1767 }
1768
1769 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
1770 dev->xfer_shift, (int)dev->xfer_mode);
1771
1772 printk(KERN_INFO "ata%u: dev %u configured for %s\n",
1773 ap->id, dev->devno,
1774 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
1775 }
1776
1777 static int ata_host_set_pio(struct ata_port *ap)
1778 {
1779 unsigned int mask;
1780 int x, i;
1781 u8 base, xfer_mode;
1782
1783 mask = ata_get_mode_mask(ap, ATA_SHIFT_PIO);
1784 x = fgb(mask);
1785 if (x < 0) {
1786 printk(KERN_WARNING "ata%u: no PIO support\n", ap->id);
1787 return -1;
1788 }
1789
1790 base = base_from_shift(ATA_SHIFT_PIO);
1791 xfer_mode = base + x;
1792
1793 DPRINTK("base 0x%x xfer_mode 0x%x mask 0x%x x %d\n",
1794 (int)base, (int)xfer_mode, mask, x);
1795
1796 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1797 struct ata_device *dev = &ap->device[i];
1798 if (ata_dev_present(dev)) {
1799 dev->pio_mode = xfer_mode;
1800 dev->xfer_mode = xfer_mode;
1801 dev->xfer_shift = ATA_SHIFT_PIO;
1802 if (ap->ops->set_piomode)
1803 ap->ops->set_piomode(ap, dev);
1804 }
1805 }
1806
1807 return 0;
1808 }
1809
1810 static void ata_host_set_dma(struct ata_port *ap, u8 xfer_mode,
1811 unsigned int xfer_shift)
1812 {
1813 int i;
1814
1815 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1816 struct ata_device *dev = &ap->device[i];
1817 if (ata_dev_present(dev)) {
1818 dev->dma_mode = xfer_mode;
1819 dev->xfer_mode = xfer_mode;
1820 dev->xfer_shift = xfer_shift;
1821 if (ap->ops->set_dmamode)
1822 ap->ops->set_dmamode(ap, dev);
1823 }
1824 }
1825 }
1826
1827 /**
1828 * ata_set_mode - Program timings and issue SET FEATURES - XFER
1829 * @ap: port on which timings will be programmed
1830 *
1831 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.).
1832 *
1833 * LOCKING:
1834 * PCI/etc. bus probe sem.
1835 */
1836 static void ata_set_mode(struct ata_port *ap)
1837 {
1838 unsigned int xfer_shift;
1839 u8 xfer_mode;
1840 int rc;
1841
1842 /* step 1: always set host PIO timings */
1843 rc = ata_host_set_pio(ap);
1844 if (rc)
1845 goto err_out;
1846
1847 /* step 2: choose the best data xfer mode */
1848 xfer_mode = xfer_shift = 0;
1849 rc = ata_choose_xfer_mode(ap, &xfer_mode, &xfer_shift);
1850 if (rc)
1851 goto err_out;
1852
1853 /* step 3: if that xfer mode isn't PIO, set host DMA timings */
1854 if (xfer_shift != ATA_SHIFT_PIO)
1855 ata_host_set_dma(ap, xfer_mode, xfer_shift);
1856
1857 /* step 4: update devices' xfer mode */
1858 ata_dev_set_mode(ap, &ap->device[0]);
1859 ata_dev_set_mode(ap, &ap->device[1]);
1860
1861 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1862 return;
1863
1864 if (ap->ops->post_set_mode)
1865 ap->ops->post_set_mode(ap);
1866
1867 return;
1868
1869 err_out:
1870 ata_port_disable(ap);
1871 }
1872
1873 /**
1874 * ata_tf_to_host - issue ATA taskfile to host controller
1875 * @ap: port to which command is being issued
1876 * @tf: ATA taskfile register set
1877 *
1878 * Issues ATA taskfile register set to ATA host controller,
1879 * with proper synchronization with interrupt handler and
1880 * other threads.
1881 *
1882 * LOCKING:
1883 * spin_lock_irqsave(host_set lock)
1884 */
1885
1886 static inline void ata_tf_to_host(struct ata_port *ap,
1887 const struct ata_taskfile *tf)
1888 {
1889 ap->ops->tf_load(ap, tf);
1890 ap->ops->exec_command(ap, tf);
1891 }
1892
1893 /**
1894 * ata_busy_sleep - sleep until BSY clears, or timeout
1895 * @ap: port containing status register to be polled
1896 * @tmout_pat: impatience timeout
1897 * @tmout: overall timeout
1898 *
1899 * Sleep until ATA Status register bit BSY clears,
1900 * or a timeout occurs.
1901 *
1902 * LOCKING: None.
1903 */
1904
1905 unsigned int ata_busy_sleep (struct ata_port *ap,
1906 unsigned long tmout_pat, unsigned long tmout)
1907 {
1908 unsigned long timer_start, timeout;
1909 u8 status;
1910
1911 status = ata_busy_wait(ap, ATA_BUSY, 300);
1912 timer_start = jiffies;
1913 timeout = timer_start + tmout_pat;
1914 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1915 msleep(50);
1916 status = ata_busy_wait(ap, ATA_BUSY, 3);
1917 }
1918
1919 if (status & ATA_BUSY)
1920 printk(KERN_WARNING "ata%u is slow to respond, "
1921 "please be patient\n", ap->id);
1922
1923 timeout = timer_start + tmout;
1924 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1925 msleep(50);
1926 status = ata_chk_status(ap);
1927 }
1928
1929 if (status & ATA_BUSY) {
1930 printk(KERN_ERR "ata%u failed to respond (%lu secs)\n",
1931 ap->id, tmout / HZ);
1932 return 1;
1933 }
1934
1935 return 0;
1936 }
1937
1938 static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
1939 {
1940 struct ata_ioports *ioaddr = &ap->ioaddr;
1941 unsigned int dev0 = devmask & (1 << 0);
1942 unsigned int dev1 = devmask & (1 << 1);
1943 unsigned long timeout;
1944
1945 /* if device 0 was found in ata_devchk, wait for its
1946 * BSY bit to clear
1947 */
1948 if (dev0)
1949 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1950
1951 /* if device 1 was found in ata_devchk, wait for
1952 * register access, then wait for BSY to clear
1953 */
1954 timeout = jiffies + ATA_TMOUT_BOOT;
1955 while (dev1) {
1956 u8 nsect, lbal;
1957
1958 ap->ops->dev_select(ap, 1);
1959 if (ap->flags & ATA_FLAG_MMIO) {
1960 nsect = readb((void __iomem *) ioaddr->nsect_addr);
1961 lbal = readb((void __iomem *) ioaddr->lbal_addr);
1962 } else {
1963 nsect = inb(ioaddr->nsect_addr);
1964 lbal = inb(ioaddr->lbal_addr);
1965 }
1966 if ((nsect == 1) && (lbal == 1))
1967 break;
1968 if (time_after(jiffies, timeout)) {
1969 dev1 = 0;
1970 break;
1971 }
1972 msleep(50); /* give drive a breather */
1973 }
1974 if (dev1)
1975 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1976
1977 /* is all this really necessary? */
1978 ap->ops->dev_select(ap, 0);
1979 if (dev1)
1980 ap->ops->dev_select(ap, 1);
1981 if (dev0)
1982 ap->ops->dev_select(ap, 0);
1983 }
1984
1985 /**
1986 * ata_bus_edd - Issue EXECUTE DEVICE DIAGNOSTIC command.
1987 * @ap: Port to reset and probe
1988 *
1989 * Use the EXECUTE DEVICE DIAGNOSTIC command to reset and
1990 * probe the bus. Not often used these days.
1991 *
1992 * LOCKING:
1993 * PCI/etc. bus probe sem.
1994 * Obtains host_set lock.
1995 *
1996 */
1997
1998 static unsigned int ata_bus_edd(struct ata_port *ap)
1999 {
2000 struct ata_taskfile tf;
2001 unsigned long flags;
2002
2003 /* set up execute-device-diag (bus reset) taskfile */
2004 /* also, take interrupts to a known state (disabled) */
2005 DPRINTK("execute-device-diag\n");
2006 ata_tf_init(ap, &tf, 0);
2007 tf.ctl |= ATA_NIEN;
2008 tf.command = ATA_CMD_EDD;
2009 tf.protocol = ATA_PROT_NODATA;
2010
2011 /* do bus reset */
2012 spin_lock_irqsave(&ap->host_set->lock, flags);
2013 ata_tf_to_host(ap, &tf);
2014 spin_unlock_irqrestore(&ap->host_set->lock, flags);
2015
2016 /* spec says at least 2ms. but who knows with those
2017 * crazy ATAPI devices...
2018 */
2019 msleep(150);
2020
2021 return ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2022 }
2023
2024 static unsigned int ata_bus_softreset(struct ata_port *ap,
2025 unsigned int devmask)
2026 {
2027 struct ata_ioports *ioaddr = &ap->ioaddr;
2028
2029 DPRINTK("ata%u: bus reset via SRST\n", ap->id);
2030
2031 /* software reset. causes dev0 to be selected */
2032 if (ap->flags & ATA_FLAG_MMIO) {
2033 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2034 udelay(20); /* FIXME: flush */
2035 writeb(ap->ctl | ATA_SRST, (void __iomem *) ioaddr->ctl_addr);
2036 udelay(20); /* FIXME: flush */
2037 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2038 } else {
2039 outb(ap->ctl, ioaddr->ctl_addr);
2040 udelay(10);
2041 outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
2042 udelay(10);
2043 outb(ap->ctl, ioaddr->ctl_addr);
2044 }
2045
2046 /* spec mandates ">= 2ms" before checking status.
2047 * We wait 150ms, because that was the magic delay used for
2048 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
2049 * between when the ATA command register is written, and then
2050 * status is checked. Because waiting for "a while" before
2051 * checking status is fine, post SRST, we perform this magic
2052 * delay here as well.
2053 */
2054 msleep(150);
2055
2056 ata_bus_post_reset(ap, devmask);
2057
2058 return 0;
2059 }
2060
2061 /**
2062 * ata_bus_reset - reset host port and associated ATA channel
2063 * @ap: port to reset
2064 *
2065 * This is typically the first time we actually start issuing
2066 * commands to the ATA channel. We wait for BSY to clear, then
2067 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
2068 * result. Determine what devices, if any, are on the channel
2069 * by looking at the device 0/1 error register. Look at the signature
2070 * stored in each device's taskfile registers, to determine if
2071 * the device is ATA or ATAPI.
2072 *
2073 * LOCKING:
2074 * PCI/etc. bus probe sem.
2075 * Obtains host_set lock.
2076 *
2077 * SIDE EFFECTS:
2078 * Sets ATA_FLAG_PORT_DISABLED if bus reset fails.
2079 */
2080
2081 void ata_bus_reset(struct ata_port *ap)
2082 {
2083 struct ata_ioports *ioaddr = &ap->ioaddr;
2084 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2085 u8 err;
2086 unsigned int dev0, dev1 = 0, rc = 0, devmask = 0;
2087
2088 DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no);
2089
2090 /* determine if device 0/1 are present */
2091 if (ap->flags & ATA_FLAG_SATA_RESET)
2092 dev0 = 1;
2093 else {
2094 dev0 = ata_devchk(ap, 0);
2095 if (slave_possible)
2096 dev1 = ata_devchk(ap, 1);
2097 }
2098
2099 if (dev0)
2100 devmask |= (1 << 0);
2101 if (dev1)
2102 devmask |= (1 << 1);
2103
2104 /* select device 0 again */
2105 ap->ops->dev_select(ap, 0);
2106
2107 /* issue bus reset */
2108 if (ap->flags & ATA_FLAG_SRST)
2109 rc = ata_bus_softreset(ap, devmask);
2110 else if ((ap->flags & ATA_FLAG_SATA_RESET) == 0) {
2111 /* set up device control */
2112 if (ap->flags & ATA_FLAG_MMIO)
2113 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2114 else
2115 outb(ap->ctl, ioaddr->ctl_addr);
2116 rc = ata_bus_edd(ap);
2117 }
2118
2119 if (rc)
2120 goto err_out;
2121
2122 /*
2123 * determine by signature whether we have ATA or ATAPI devices
2124 */
2125 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
2126 if ((slave_possible) && (err != 0x81))
2127 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
2128
2129 /* re-enable interrupts */
2130 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2131 ata_irq_on(ap);
2132
2133 /* is double-select really necessary? */
2134 if (ap->device[1].class != ATA_DEV_NONE)
2135 ap->ops->dev_select(ap, 1);
2136 if (ap->device[0].class != ATA_DEV_NONE)
2137 ap->ops->dev_select(ap, 0);
2138
2139 /* if no devices were detected, disable this port */
2140 if ((ap->device[0].class == ATA_DEV_NONE) &&
2141 (ap->device[1].class == ATA_DEV_NONE))
2142 goto err_out;
2143
2144 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
2145 /* set up device control for ATA_FLAG_SATA_RESET */
2146 if (ap->flags & ATA_FLAG_MMIO)
2147 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2148 else
2149 outb(ap->ctl, ioaddr->ctl_addr);
2150 }
2151
2152 DPRINTK("EXIT\n");
2153 return;
2154
2155 err_out:
2156 printk(KERN_ERR "ata%u: disabling port\n", ap->id);
2157 ap->ops->port_disable(ap);
2158
2159 DPRINTK("EXIT\n");
2160 }
2161
2162 static int sata_phy_resume(struct ata_port *ap)
2163 {
2164 unsigned long timeout = jiffies + (HZ * 5);
2165 u32 sstatus;
2166
2167 scr_write_flush(ap, SCR_CONTROL, 0x300);
2168
2169 /* Wait for phy to become ready, if necessary. */
2170 do {
2171 msleep(200);
2172 sstatus = scr_read(ap, SCR_STATUS);
2173 if ((sstatus & 0xf) != 1)
2174 return 0;
2175 } while (time_before(jiffies, timeout));
2176
2177 return -1;
2178 }
2179
2180 /**
2181 * ata_std_probeinit - initialize probing
2182 * @ap: port to be probed
2183 *
2184 * @ap is about to be probed. Initialize it. This function is
2185 * to be used as standard callback for ata_drive_probe_reset().
2186 *
2187 * NOTE!!! Do not use this function as probeinit if a low level
2188 * driver implements only hardreset. Just pass NULL as probeinit
2189 * in that case. Using this function is probably okay but doing
2190 * so makes reset sequence different from the original
2191 * ->phy_reset implementation and Jeff nervous. :-P
2192 */
2193 extern void ata_std_probeinit(struct ata_port *ap)
2194 {
2195 if (ap->flags & ATA_FLAG_SATA && ap->ops->scr_read) {
2196 sata_phy_resume(ap);
2197 if (sata_dev_present(ap))
2198 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2199 }
2200 }
2201
2202 /**
2203 * ata_std_softreset - reset host port via ATA SRST
2204 * @ap: port to reset
2205 * @verbose: fail verbosely
2206 * @classes: resulting classes of attached devices
2207 *
2208 * Reset host port using ATA SRST. This function is to be used
2209 * as standard callback for ata_drive_*_reset() functions.
2210 *
2211 * LOCKING:
2212 * Kernel thread context (may sleep)
2213 *
2214 * RETURNS:
2215 * 0 on success, -errno otherwise.
2216 */
2217 int ata_std_softreset(struct ata_port *ap, int verbose, unsigned int *classes)
2218 {
2219 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2220 unsigned int devmask = 0, err_mask;
2221 u8 err;
2222
2223 DPRINTK("ENTER\n");
2224
2225 if (ap->ops->scr_read && !sata_dev_present(ap)) {
2226 classes[0] = ATA_DEV_NONE;
2227 goto out;
2228 }
2229
2230 /* determine if device 0/1 are present */
2231 if (ata_devchk(ap, 0))
2232 devmask |= (1 << 0);
2233 if (slave_possible && ata_devchk(ap, 1))
2234 devmask |= (1 << 1);
2235
2236 /* select device 0 again */
2237 ap->ops->dev_select(ap, 0);
2238
2239 /* issue bus reset */
2240 DPRINTK("about to softreset, devmask=%x\n", devmask);
2241 err_mask = ata_bus_softreset(ap, devmask);
2242 if (err_mask) {
2243 if (verbose)
2244 printk(KERN_ERR "ata%u: SRST failed (err_mask=0x%x)\n",
2245 ap->id, err_mask);
2246 else
2247 DPRINTK("EXIT, softreset failed (err_mask=0x%x)\n",
2248 err_mask);
2249 return -EIO;
2250 }
2251
2252 /* determine by signature whether we have ATA or ATAPI devices */
2253 classes[0] = ata_dev_try_classify(ap, 0, &err);
2254 if (slave_possible && err != 0x81)
2255 classes[1] = ata_dev_try_classify(ap, 1, &err);
2256
2257 out:
2258 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
2259 return 0;
2260 }
2261
2262 /**
2263 * sata_std_hardreset - reset host port via SATA phy reset
2264 * @ap: port to reset
2265 * @verbose: fail verbosely
2266 * @class: resulting class of attached device
2267 *
2268 * SATA phy-reset host port using DET bits of SControl register.
2269 * This function is to be used as standard callback for
2270 * ata_drive_*_reset().
2271 *
2272 * LOCKING:
2273 * Kernel thread context (may sleep)
2274 *
2275 * RETURNS:
2276 * 0 on success, -errno otherwise.
2277 */
2278 int sata_std_hardreset(struct ata_port *ap, int verbose, unsigned int *class)
2279 {
2280 DPRINTK("ENTER\n");
2281
2282 /* Issue phy wake/reset */
2283 scr_write_flush(ap, SCR_CONTROL, 0x301);
2284
2285 /*
2286 * Couldn't find anything in SATA I/II specs, but AHCI-1.1
2287 * 10.4.2 says at least 1 ms.
2288 */
2289 msleep(1);
2290
2291 /* Bring phy back */
2292 sata_phy_resume(ap);
2293
2294 /* TODO: phy layer with polling, timeouts, etc. */
2295 if (!sata_dev_present(ap)) {
2296 *class = ATA_DEV_NONE;
2297 DPRINTK("EXIT, link offline\n");
2298 return 0;
2299 }
2300
2301 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2302 if (verbose)
2303 printk(KERN_ERR "ata%u: COMRESET failed "
2304 "(device not ready)\n", ap->id);
2305 else
2306 DPRINTK("EXIT, device not ready\n");
2307 return -EIO;
2308 }
2309
2310 ap->ops->dev_select(ap, 0); /* probably unnecessary */
2311
2312 *class = ata_dev_try_classify(ap, 0, NULL);
2313
2314 DPRINTK("EXIT, class=%u\n", *class);
2315 return 0;
2316 }
2317
2318 /**
2319 * ata_std_postreset - standard postreset callback
2320 * @ap: the target ata_port
2321 * @classes: classes of attached devices
2322 *
2323 * This function is invoked after a successful reset. Note that
2324 * the device might have been reset more than once using
2325 * different reset methods before postreset is invoked.
2326 *
2327 * This function is to be used as standard callback for
2328 * ata_drive_*_reset().
2329 *
2330 * LOCKING:
2331 * Kernel thread context (may sleep)
2332 */
2333 void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
2334 {
2335 DPRINTK("ENTER\n");
2336
2337 /* set cable type if it isn't already set */
2338 if (ap->cbl == ATA_CBL_NONE && ap->flags & ATA_FLAG_SATA)
2339 ap->cbl = ATA_CBL_SATA;
2340
2341 /* print link status */
2342 if (ap->cbl == ATA_CBL_SATA)
2343 sata_print_link_status(ap);
2344
2345 /* re-enable interrupts */
2346 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2347 ata_irq_on(ap);
2348
2349 /* is double-select really necessary? */
2350 if (classes[0] != ATA_DEV_NONE)
2351 ap->ops->dev_select(ap, 1);
2352 if (classes[1] != ATA_DEV_NONE)
2353 ap->ops->dev_select(ap, 0);
2354
2355 /* bail out if no device is present */
2356 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2357 DPRINTK("EXIT, no device\n");
2358 return;
2359 }
2360
2361 /* set up device control */
2362 if (ap->ioaddr.ctl_addr) {
2363 if (ap->flags & ATA_FLAG_MMIO)
2364 writeb(ap->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
2365 else
2366 outb(ap->ctl, ap->ioaddr.ctl_addr);
2367 }
2368
2369 DPRINTK("EXIT\n");
2370 }
2371
2372 /**
2373 * ata_std_probe_reset - standard probe reset method
2374 * @ap: prot to perform probe-reset
2375 * @classes: resulting classes of attached devices
2376 *
2377 * The stock off-the-shelf ->probe_reset method.
2378 *
2379 * LOCKING:
2380 * Kernel thread context (may sleep)
2381 *
2382 * RETURNS:
2383 * 0 on success, -errno otherwise.
2384 */
2385 int ata_std_probe_reset(struct ata_port *ap, unsigned int *classes)
2386 {
2387 ata_reset_fn_t hardreset;
2388
2389 hardreset = NULL;
2390 if (ap->flags & ATA_FLAG_SATA && ap->ops->scr_read)
2391 hardreset = sata_std_hardreset;
2392
2393 return ata_drive_probe_reset(ap, ata_std_probeinit,
2394 ata_std_softreset, hardreset,
2395 ata_std_postreset, classes);
2396 }
2397
2398 static int do_probe_reset(struct ata_port *ap, ata_reset_fn_t reset,
2399 ata_postreset_fn_t postreset,
2400 unsigned int *classes)
2401 {
2402 int i, rc;
2403
2404 for (i = 0; i < ATA_MAX_DEVICES; i++)
2405 classes[i] = ATA_DEV_UNKNOWN;
2406
2407 rc = reset(ap, 0, classes);
2408 if (rc)
2409 return rc;
2410
2411 /* If any class isn't ATA_DEV_UNKNOWN, consider classification
2412 * is complete and convert all ATA_DEV_UNKNOWN to
2413 * ATA_DEV_NONE.
2414 */
2415 for (i = 0; i < ATA_MAX_DEVICES; i++)
2416 if (classes[i] != ATA_DEV_UNKNOWN)
2417 break;
2418
2419 if (i < ATA_MAX_DEVICES)
2420 for (i = 0; i < ATA_MAX_DEVICES; i++)
2421 if (classes[i] == ATA_DEV_UNKNOWN)
2422 classes[i] = ATA_DEV_NONE;
2423
2424 if (postreset)
2425 postreset(ap, classes);
2426
2427 return classes[0] != ATA_DEV_UNKNOWN ? 0 : -ENODEV;
2428 }
2429
2430 /**
2431 * ata_drive_probe_reset - Perform probe reset with given methods
2432 * @ap: port to reset
2433 * @probeinit: probeinit method (can be NULL)
2434 * @softreset: softreset method (can be NULL)
2435 * @hardreset: hardreset method (can be NULL)
2436 * @postreset: postreset method (can be NULL)
2437 * @classes: resulting classes of attached devices
2438 *
2439 * Reset the specified port and classify attached devices using
2440 * given methods. This function prefers softreset but tries all
2441 * possible reset sequences to reset and classify devices. This
2442 * function is intended to be used for constructing ->probe_reset
2443 * callback by low level drivers.
2444 *
2445 * Reset methods should follow the following rules.
2446 *
2447 * - Return 0 on sucess, -errno on failure.
2448 * - If classification is supported, fill classes[] with
2449 * recognized class codes.
2450 * - If classification is not supported, leave classes[] alone.
2451 * - If verbose is non-zero, print error message on failure;
2452 * otherwise, shut up.
2453 *
2454 * LOCKING:
2455 * Kernel thread context (may sleep)
2456 *
2457 * RETURNS:
2458 * 0 on success, -EINVAL if no reset method is avaliable, -ENODEV
2459 * if classification fails, and any error code from reset
2460 * methods.
2461 */
2462 int ata_drive_probe_reset(struct ata_port *ap, ata_probeinit_fn_t probeinit,
2463 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
2464 ata_postreset_fn_t postreset, unsigned int *classes)
2465 {
2466 int rc = -EINVAL;
2467
2468 if (probeinit)
2469 probeinit(ap);
2470
2471 if (softreset) {
2472 rc = do_probe_reset(ap, softreset, postreset, classes);
2473 if (rc == 0)
2474 return 0;
2475 }
2476
2477 if (!hardreset)
2478 return rc;
2479
2480 rc = do_probe_reset(ap, hardreset, postreset, classes);
2481 if (rc == 0 || rc != -ENODEV)
2482 return rc;
2483
2484 if (softreset)
2485 rc = do_probe_reset(ap, softreset, postreset, classes);
2486
2487 return rc;
2488 }
2489
2490 /**
2491 * ata_dev_same_device - Determine whether new ID matches configured device
2492 * @ap: port on which the device to compare against resides
2493 * @dev: device to compare against
2494 * @new_class: class of the new device
2495 * @new_id: IDENTIFY page of the new device
2496 *
2497 * Compare @new_class and @new_id against @dev and determine
2498 * whether @dev is the device indicated by @new_class and
2499 * @new_id.
2500 *
2501 * LOCKING:
2502 * None.
2503 *
2504 * RETURNS:
2505 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
2506 */
2507 static int ata_dev_same_device(struct ata_port *ap, struct ata_device *dev,
2508 unsigned int new_class, const u16 *new_id)
2509 {
2510 const u16 *old_id = dev->id;
2511 unsigned char model[2][41], serial[2][21];
2512 u64 new_n_sectors;
2513
2514 if (dev->class != new_class) {
2515 printk(KERN_INFO
2516 "ata%u: dev %u class mismatch %d != %d\n",
2517 ap->id, dev->devno, dev->class, new_class);
2518 return 0;
2519 }
2520
2521 ata_id_c_string(old_id, model[0], ATA_ID_PROD_OFS, sizeof(model[0]));
2522 ata_id_c_string(new_id, model[1], ATA_ID_PROD_OFS, sizeof(model[1]));
2523 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO_OFS, sizeof(serial[0]));
2524 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO_OFS, sizeof(serial[1]));
2525 new_n_sectors = ata_id_n_sectors(new_id);
2526
2527 if (strcmp(model[0], model[1])) {
2528 printk(KERN_INFO
2529 "ata%u: dev %u model number mismatch '%s' != '%s'\n",
2530 ap->id, dev->devno, model[0], model[1]);
2531 return 0;
2532 }
2533
2534 if (strcmp(serial[0], serial[1])) {
2535 printk(KERN_INFO
2536 "ata%u: dev %u serial number mismatch '%s' != '%s'\n",
2537 ap->id, dev->devno, serial[0], serial[1]);
2538 return 0;
2539 }
2540
2541 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
2542 printk(KERN_INFO
2543 "ata%u: dev %u n_sectors mismatch %llu != %llu\n",
2544 ap->id, dev->devno, (unsigned long long)dev->n_sectors,
2545 (unsigned long long)new_n_sectors);
2546 return 0;
2547 }
2548
2549 return 1;
2550 }
2551
2552 /**
2553 * ata_dev_revalidate - Revalidate ATA device
2554 * @ap: port on which the device to revalidate resides
2555 * @dev: device to revalidate
2556 * @post_reset: is this revalidation after reset?
2557 *
2558 * Re-read IDENTIFY page and make sure @dev is still attached to
2559 * the port.
2560 *
2561 * LOCKING:
2562 * Kernel thread context (may sleep)
2563 *
2564 * RETURNS:
2565 * 0 on success, negative errno otherwise
2566 */
2567 int ata_dev_revalidate(struct ata_port *ap, struct ata_device *dev,
2568 int post_reset)
2569 {
2570 unsigned int class;
2571 u16 *id;
2572 int rc;
2573
2574 if (!ata_dev_present(dev))
2575 return -ENODEV;
2576
2577 class = dev->class;
2578 id = NULL;
2579
2580 /* allocate & read ID data */
2581 rc = ata_dev_read_id(ap, dev, &class, post_reset, &id);
2582 if (rc)
2583 goto fail;
2584
2585 /* is the device still there? */
2586 if (!ata_dev_same_device(ap, dev, class, id)) {
2587 rc = -ENODEV;
2588 goto fail;
2589 }
2590
2591 kfree(dev->id);
2592 dev->id = id;
2593
2594 /* configure device according to the new ID */
2595 return ata_dev_configure(ap, dev, 0);
2596
2597 fail:
2598 printk(KERN_ERR "ata%u: dev %u revalidation failed (errno=%d)\n",
2599 ap->id, dev->devno, rc);
2600 kfree(id);
2601 return rc;
2602 }
2603
2604 static void ata_pr_blacklisted(const struct ata_port *ap,
2605 const struct ata_device *dev)
2606 {
2607 printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, disabling DMA\n",
2608 ap->id, dev->devno);
2609 }
2610
2611 static const char * const ata_dma_blacklist [] = {
2612 "WDC AC11000H",
2613 "WDC AC22100H",
2614 "WDC AC32500H",
2615 "WDC AC33100H",
2616 "WDC AC31600H",
2617 "WDC AC32100H",
2618 "WDC AC23200L",
2619 "Compaq CRD-8241B",
2620 "CRD-8400B",
2621 "CRD-8480B",
2622 "CRD-8482B",
2623 "CRD-84",
2624 "SanDisk SDP3B",
2625 "SanDisk SDP3B-64",
2626 "SANYO CD-ROM CRD",
2627 "HITACHI CDR-8",
2628 "HITACHI CDR-8335",
2629 "HITACHI CDR-8435",
2630 "Toshiba CD-ROM XM-6202B",
2631 "TOSHIBA CD-ROM XM-1702BC",
2632 "CD-532E-A",
2633 "E-IDE CD-ROM CR-840",
2634 "CD-ROM Drive/F5A",
2635 "WPI CDD-820",
2636 "SAMSUNG CD-ROM SC-148C",
2637 "SAMSUNG CD-ROM SC",
2638 "SanDisk SDP3B-64",
2639 "ATAPI CD-ROM DRIVE 40X MAXIMUM",
2640 "_NEC DV5800A",
2641 };
2642
2643 static int ata_dma_blacklisted(const struct ata_device *dev)
2644 {
2645 unsigned char model_num[41];
2646 int i;
2647
2648 ata_id_c_string(dev->id, model_num, ATA_ID_PROD_OFS, sizeof(model_num));
2649
2650 for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i++)
2651 if (!strcmp(ata_dma_blacklist[i], model_num))
2652 return 1;
2653
2654 return 0;
2655 }
2656
2657 static unsigned int ata_get_mode_mask(const struct ata_port *ap, int shift)
2658 {
2659 const struct ata_device *master, *slave;
2660 unsigned int mask;
2661
2662 master = &ap->device[0];
2663 slave = &ap->device[1];
2664
2665 WARN_ON(!ata_dev_present(master) && !ata_dev_present(slave));
2666
2667 if (shift == ATA_SHIFT_UDMA) {
2668 mask = ap->udma_mask;
2669 if (ata_dev_present(master)) {
2670 mask &= (master->id[ATA_ID_UDMA_MODES] & 0xff);
2671 if (ata_dma_blacklisted(master)) {
2672 mask = 0;
2673 ata_pr_blacklisted(ap, master);
2674 }
2675 }
2676 if (ata_dev_present(slave)) {
2677 mask &= (slave->id[ATA_ID_UDMA_MODES] & 0xff);
2678 if (ata_dma_blacklisted(slave)) {
2679 mask = 0;
2680 ata_pr_blacklisted(ap, slave);
2681 }
2682 }
2683 }
2684 else if (shift == ATA_SHIFT_MWDMA) {
2685 mask = ap->mwdma_mask;
2686 if (ata_dev_present(master)) {
2687 mask &= (master->id[ATA_ID_MWDMA_MODES] & 0x07);
2688 if (ata_dma_blacklisted(master)) {
2689 mask = 0;
2690 ata_pr_blacklisted(ap, master);
2691 }
2692 }
2693 if (ata_dev_present(slave)) {
2694 mask &= (slave->id[ATA_ID_MWDMA_MODES] & 0x07);
2695 if (ata_dma_blacklisted(slave)) {
2696 mask = 0;
2697 ata_pr_blacklisted(ap, slave);
2698 }
2699 }
2700 }
2701 else if (shift == ATA_SHIFT_PIO) {
2702 mask = ap->pio_mask;
2703 if (ata_dev_present(master)) {
2704 /* spec doesn't return explicit support for
2705 * PIO0-2, so we fake it
2706 */
2707 u16 tmp_mode = master->id[ATA_ID_PIO_MODES] & 0x03;
2708 tmp_mode <<= 3;
2709 tmp_mode |= 0x7;
2710 mask &= tmp_mode;
2711 }
2712 if (ata_dev_present(slave)) {
2713 /* spec doesn't return explicit support for
2714 * PIO0-2, so we fake it
2715 */
2716 u16 tmp_mode = slave->id[ATA_ID_PIO_MODES] & 0x03;
2717 tmp_mode <<= 3;
2718 tmp_mode |= 0x7;
2719 mask &= tmp_mode;
2720 }
2721 }
2722 else {
2723 mask = 0xffffffff; /* shut up compiler warning */
2724 BUG();
2725 }
2726
2727 return mask;
2728 }
2729
2730 /* find greatest bit */
2731 static int fgb(u32 bitmap)
2732 {
2733 unsigned int i;
2734 int x = -1;
2735
2736 for (i = 0; i < 32; i++)
2737 if (bitmap & (1 << i))
2738 x = i;
2739
2740 return x;
2741 }
2742
2743 /**
2744 * ata_choose_xfer_mode - attempt to find best transfer mode
2745 * @ap: Port for which an xfer mode will be selected
2746 * @xfer_mode_out: (output) SET FEATURES - XFER MODE code
2747 * @xfer_shift_out: (output) bit shift that selects this mode
2748 *
2749 * Based on host and device capabilities, determine the
2750 * maximum transfer mode that is amenable to all.
2751 *
2752 * LOCKING:
2753 * PCI/etc. bus probe sem.
2754 *
2755 * RETURNS:
2756 * Zero on success, negative on error.
2757 */
2758
2759 static int ata_choose_xfer_mode(const struct ata_port *ap,
2760 u8 *xfer_mode_out,
2761 unsigned int *xfer_shift_out)
2762 {
2763 unsigned int mask, shift;
2764 int x, i;
2765
2766 for (i = 0; i < ARRAY_SIZE(xfer_mode_classes); i++) {
2767 shift = xfer_mode_classes[i].shift;
2768 mask = ata_get_mode_mask(ap, shift);
2769
2770 x = fgb(mask);
2771 if (x >= 0) {
2772 *xfer_mode_out = xfer_mode_classes[i].base + x;
2773 *xfer_shift_out = shift;
2774 return 0;
2775 }
2776 }
2777
2778 return -1;
2779 }
2780
2781 /**
2782 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
2783 * @ap: Port associated with device @dev
2784 * @dev: Device to which command will be sent
2785 *
2786 * Issue SET FEATURES - XFER MODE command to device @dev
2787 * on port @ap.
2788 *
2789 * LOCKING:
2790 * PCI/etc. bus probe sem.
2791 */
2792
2793 static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev)
2794 {
2795 struct ata_taskfile tf;
2796
2797 /* set up set-features taskfile */
2798 DPRINTK("set features - xfer mode\n");
2799
2800 ata_tf_init(ap, &tf, dev->devno);
2801 tf.command = ATA_CMD_SET_FEATURES;
2802 tf.feature = SETFEATURES_XFER;
2803 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2804 tf.protocol = ATA_PROT_NODATA;
2805 tf.nsect = dev->xfer_mode;
2806
2807 if (ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0)) {
2808 printk(KERN_ERR "ata%u: failed to set xfermode, disabled\n",
2809 ap->id);
2810 ata_port_disable(ap);
2811 }
2812
2813 DPRINTK("EXIT\n");
2814 }
2815
2816 /**
2817 * ata_dev_init_params - Issue INIT DEV PARAMS command
2818 * @ap: Port associated with device @dev
2819 * @dev: Device to which command will be sent
2820 *
2821 * LOCKING:
2822 * Kernel thread context (may sleep)
2823 *
2824 * RETURNS:
2825 * 0 on success, AC_ERR_* mask otherwise.
2826 */
2827
2828 static unsigned int ata_dev_init_params(struct ata_port *ap,
2829 struct ata_device *dev)
2830 {
2831 struct ata_taskfile tf;
2832 unsigned int err_mask;
2833 u16 sectors = dev->id[6];
2834 u16 heads = dev->id[3];
2835
2836 /* Number of sectors per track 1-255. Number of heads 1-16 */
2837 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
2838 return 0;
2839
2840 /* set up init dev params taskfile */
2841 DPRINTK("init dev params \n");
2842
2843 ata_tf_init(ap, &tf, dev->devno);
2844 tf.command = ATA_CMD_INIT_DEV_PARAMS;
2845 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2846 tf.protocol = ATA_PROT_NODATA;
2847 tf.nsect = sectors;
2848 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
2849
2850 err_mask = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0);
2851
2852 DPRINTK("EXIT, err_mask=%x\n", err_mask);
2853 return err_mask;
2854 }
2855
2856 /**
2857 * ata_sg_clean - Unmap DMA memory associated with command
2858 * @qc: Command containing DMA memory to be released
2859 *
2860 * Unmap all mapped DMA memory associated with this command.
2861 *
2862 * LOCKING:
2863 * spin_lock_irqsave(host_set lock)
2864 */
2865
2866 static void ata_sg_clean(struct ata_queued_cmd *qc)
2867 {
2868 struct ata_port *ap = qc->ap;
2869 struct scatterlist *sg = qc->__sg;
2870 int dir = qc->dma_dir;
2871 void *pad_buf = NULL;
2872
2873 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
2874 WARN_ON(sg == NULL);
2875
2876 if (qc->flags & ATA_QCFLAG_SINGLE)
2877 WARN_ON(qc->n_elem > 1);
2878
2879 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
2880
2881 /* if we padded the buffer out to 32-bit bound, and data
2882 * xfer direction is from-device, we must copy from the
2883 * pad buffer back into the supplied buffer
2884 */
2885 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
2886 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
2887
2888 if (qc->flags & ATA_QCFLAG_SG) {
2889 if (qc->n_elem)
2890 dma_unmap_sg(ap->host_set->dev, sg, qc->n_elem, dir);
2891 /* restore last sg */
2892 sg[qc->orig_n_elem - 1].length += qc->pad_len;
2893 if (pad_buf) {
2894 struct scatterlist *psg = &qc->pad_sgent;
2895 void *addr = kmap_atomic(psg->page, KM_IRQ0);
2896 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
2897 kunmap_atomic(addr, KM_IRQ0);
2898 }
2899 } else {
2900 if (qc->n_elem)
2901 dma_unmap_single(ap->host_set->dev,
2902 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
2903 dir);
2904 /* restore sg */
2905 sg->length += qc->pad_len;
2906 if (pad_buf)
2907 memcpy(qc->buf_virt + sg->length - qc->pad_len,
2908 pad_buf, qc->pad_len);
2909 }
2910
2911 qc->flags &= ~ATA_QCFLAG_DMAMAP;
2912 qc->__sg = NULL;
2913 }
2914
2915 /**
2916 * ata_fill_sg - Fill PCI IDE PRD table
2917 * @qc: Metadata associated with taskfile to be transferred
2918 *
2919 * Fill PCI IDE PRD (scatter-gather) table with segments
2920 * associated with the current disk command.
2921 *
2922 * LOCKING:
2923 * spin_lock_irqsave(host_set lock)
2924 *
2925 */
2926 static void ata_fill_sg(struct ata_queued_cmd *qc)
2927 {
2928 struct ata_port *ap = qc->ap;
2929 struct scatterlist *sg;
2930 unsigned int idx;
2931
2932 WARN_ON(qc->__sg == NULL);
2933 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
2934
2935 idx = 0;
2936 ata_for_each_sg(sg, qc) {
2937 u32 addr, offset;
2938 u32 sg_len, len;
2939
2940 /* determine if physical DMA addr spans 64K boundary.
2941 * Note h/w doesn't support 64-bit, so we unconditionally
2942 * truncate dma_addr_t to u32.
2943 */
2944 addr = (u32) sg_dma_address(sg);
2945 sg_len = sg_dma_len(sg);
2946
2947 while (sg_len) {
2948 offset = addr & 0xffff;
2949 len = sg_len;
2950 if ((offset + sg_len) > 0x10000)
2951 len = 0x10000 - offset;
2952
2953 ap->prd[idx].addr = cpu_to_le32(addr);
2954 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
2955 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
2956
2957 idx++;
2958 sg_len -= len;
2959 addr += len;
2960 }
2961 }
2962
2963 if (idx)
2964 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2965 }
2966 /**
2967 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
2968 * @qc: Metadata associated with taskfile to check
2969 *
2970 * Allow low-level driver to filter ATA PACKET commands, returning
2971 * a status indicating whether or not it is OK to use DMA for the
2972 * supplied PACKET command.
2973 *
2974 * LOCKING:
2975 * spin_lock_irqsave(host_set lock)
2976 *
2977 * RETURNS: 0 when ATAPI DMA can be used
2978 * nonzero otherwise
2979 */
2980 int ata_check_atapi_dma(struct ata_queued_cmd *qc)
2981 {
2982 struct ata_port *ap = qc->ap;
2983 int rc = 0; /* Assume ATAPI DMA is OK by default */
2984
2985 if (ap->ops->check_atapi_dma)
2986 rc = ap->ops->check_atapi_dma(qc);
2987
2988 return rc;
2989 }
2990 /**
2991 * ata_qc_prep - Prepare taskfile for submission
2992 * @qc: Metadata associated with taskfile to be prepared
2993 *
2994 * Prepare ATA taskfile for submission.
2995 *
2996 * LOCKING:
2997 * spin_lock_irqsave(host_set lock)
2998 */
2999 void ata_qc_prep(struct ata_queued_cmd *qc)
3000 {
3001 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
3002 return;
3003
3004 ata_fill_sg(qc);
3005 }
3006
3007 /**
3008 * ata_sg_init_one - Associate command with memory buffer
3009 * @qc: Command to be associated
3010 * @buf: Memory buffer
3011 * @buflen: Length of memory buffer, in bytes.
3012 *
3013 * Initialize the data-related elements of queued_cmd @qc
3014 * to point to a single memory buffer, @buf of byte length @buflen.
3015 *
3016 * LOCKING:
3017 * spin_lock_irqsave(host_set lock)
3018 */
3019
3020 void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
3021 {
3022 struct scatterlist *sg;
3023
3024 qc->flags |= ATA_QCFLAG_SINGLE;
3025
3026 memset(&qc->sgent, 0, sizeof(qc->sgent));
3027 qc->__sg = &qc->sgent;
3028 qc->n_elem = 1;
3029 qc->orig_n_elem = 1;
3030 qc->buf_virt = buf;
3031
3032 sg = qc->__sg;
3033 sg_init_one(sg, buf, buflen);
3034 }
3035
3036 /**
3037 * ata_sg_init - Associate command with scatter-gather table.
3038 * @qc: Command to be associated
3039 * @sg: Scatter-gather table.
3040 * @n_elem: Number of elements in s/g table.
3041 *
3042 * Initialize the data-related elements of queued_cmd @qc
3043 * to point to a scatter-gather table @sg, containing @n_elem
3044 * elements.
3045 *
3046 * LOCKING:
3047 * spin_lock_irqsave(host_set lock)
3048 */
3049
3050 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
3051 unsigned int n_elem)
3052 {
3053 qc->flags |= ATA_QCFLAG_SG;
3054 qc->__sg = sg;
3055 qc->n_elem = n_elem;
3056 qc->orig_n_elem = n_elem;
3057 }
3058
3059 /**
3060 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
3061 * @qc: Command with memory buffer to be mapped.
3062 *
3063 * DMA-map the memory buffer associated with queued_cmd @qc.
3064 *
3065 * LOCKING:
3066 * spin_lock_irqsave(host_set lock)
3067 *
3068 * RETURNS:
3069 * Zero on success, negative on error.
3070 */
3071
3072 static int ata_sg_setup_one(struct ata_queued_cmd *qc)
3073 {
3074 struct ata_port *ap = qc->ap;
3075 int dir = qc->dma_dir;
3076 struct scatterlist *sg = qc->__sg;
3077 dma_addr_t dma_address;
3078 int trim_sg = 0;
3079
3080 /* we must lengthen transfers to end on a 32-bit boundary */
3081 qc->pad_len = sg->length & 3;
3082 if (qc->pad_len) {
3083 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3084 struct scatterlist *psg = &qc->pad_sgent;
3085
3086 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
3087
3088 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3089
3090 if (qc->tf.flags & ATA_TFLAG_WRITE)
3091 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
3092 qc->pad_len);
3093
3094 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3095 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3096 /* trim sg */
3097 sg->length -= qc->pad_len;
3098 if (sg->length == 0)
3099 trim_sg = 1;
3100
3101 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
3102 sg->length, qc->pad_len);
3103 }
3104
3105 if (trim_sg) {
3106 qc->n_elem--;
3107 goto skip_map;
3108 }
3109
3110 dma_address = dma_map_single(ap->host_set->dev, qc->buf_virt,
3111 sg->length, dir);
3112 if (dma_mapping_error(dma_address)) {
3113 /* restore sg */
3114 sg->length += qc->pad_len;
3115 return -1;
3116 }
3117
3118 sg_dma_address(sg) = dma_address;
3119 sg_dma_len(sg) = sg->length;
3120
3121 skip_map:
3122 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
3123 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3124
3125 return 0;
3126 }
3127
3128 /**
3129 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
3130 * @qc: Command with scatter-gather table to be mapped.
3131 *
3132 * DMA-map the scatter-gather table associated with queued_cmd @qc.
3133 *
3134 * LOCKING:
3135 * spin_lock_irqsave(host_set lock)
3136 *
3137 * RETURNS:
3138 * Zero on success, negative on error.
3139 *
3140 */
3141
3142 static int ata_sg_setup(struct ata_queued_cmd *qc)
3143 {
3144 struct ata_port *ap = qc->ap;
3145 struct scatterlist *sg = qc->__sg;
3146 struct scatterlist *lsg = &sg[qc->n_elem - 1];
3147 int n_elem, pre_n_elem, dir, trim_sg = 0;
3148
3149 VPRINTK("ENTER, ata%u\n", ap->id);
3150 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
3151
3152 /* we must lengthen transfers to end on a 32-bit boundary */
3153 qc->pad_len = lsg->length & 3;
3154 if (qc->pad_len) {
3155 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3156 struct scatterlist *psg = &qc->pad_sgent;
3157 unsigned int offset;
3158
3159 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
3160
3161 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3162
3163 /*
3164 * psg->page/offset are used to copy to-be-written
3165 * data in this function or read data in ata_sg_clean.
3166 */
3167 offset = lsg->offset + lsg->length - qc->pad_len;
3168 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
3169 psg->offset = offset_in_page(offset);
3170
3171 if (qc->tf.flags & ATA_TFLAG_WRITE) {
3172 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3173 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
3174 kunmap_atomic(addr, KM_IRQ0);
3175 }
3176
3177 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3178 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3179 /* trim last sg */
3180 lsg->length -= qc->pad_len;
3181 if (lsg->length == 0)
3182 trim_sg = 1;
3183
3184 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
3185 qc->n_elem - 1, lsg->length, qc->pad_len);
3186 }
3187
3188 pre_n_elem = qc->n_elem;
3189 if (trim_sg && pre_n_elem)
3190 pre_n_elem--;
3191
3192 if (!pre_n_elem) {
3193 n_elem = 0;
3194 goto skip_map;
3195 }
3196
3197 dir = qc->dma_dir;
3198 n_elem = dma_map_sg(ap->host_set->dev, sg, pre_n_elem, dir);
3199 if (n_elem < 1) {
3200 /* restore last sg */
3201 lsg->length += qc->pad_len;
3202 return -1;
3203 }
3204
3205 DPRINTK("%d sg elements mapped\n", n_elem);
3206
3207 skip_map:
3208 qc->n_elem = n_elem;
3209
3210 return 0;
3211 }
3212
3213 /**
3214 * ata_poll_qc_complete - turn irq back on and finish qc
3215 * @qc: Command to complete
3216 * @err_mask: ATA status register content
3217 *
3218 * LOCKING:
3219 * None. (grabs host lock)
3220 */
3221
3222 void ata_poll_qc_complete(struct ata_queued_cmd *qc)
3223 {
3224 struct ata_port *ap = qc->ap;
3225 unsigned long flags;
3226
3227 spin_lock_irqsave(&ap->host_set->lock, flags);
3228 ap->flags &= ~ATA_FLAG_NOINTR;
3229 ata_irq_on(ap);
3230 ata_qc_complete(qc);
3231 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3232 }
3233
3234 /**
3235 * ata_pio_poll - poll using PIO, depending on current state
3236 * @ap: the target ata_port
3237 *
3238 * LOCKING:
3239 * None. (executing in kernel thread context)
3240 *
3241 * RETURNS:
3242 * timeout value to use
3243 */
3244
3245 static unsigned long ata_pio_poll(struct ata_port *ap)
3246 {
3247 struct ata_queued_cmd *qc;
3248 u8 status;
3249 unsigned int poll_state = HSM_ST_UNKNOWN;
3250 unsigned int reg_state = HSM_ST_UNKNOWN;
3251
3252 qc = ata_qc_from_tag(ap, ap->active_tag);
3253 WARN_ON(qc == NULL);
3254
3255 switch (ap->hsm_task_state) {
3256 case HSM_ST:
3257 case HSM_ST_POLL:
3258 poll_state = HSM_ST_POLL;
3259 reg_state = HSM_ST;
3260 break;
3261 case HSM_ST_LAST:
3262 case HSM_ST_LAST_POLL:
3263 poll_state = HSM_ST_LAST_POLL;
3264 reg_state = HSM_ST_LAST;
3265 break;
3266 default:
3267 BUG();
3268 break;
3269 }
3270
3271 status = ata_chk_status(ap);
3272 if (status & ATA_BUSY) {
3273 if (time_after(jiffies, ap->pio_task_timeout)) {
3274 qc->err_mask |= AC_ERR_TIMEOUT;
3275 ap->hsm_task_state = HSM_ST_TMOUT;
3276 return 0;
3277 }
3278 ap->hsm_task_state = poll_state;
3279 return ATA_SHORT_PAUSE;
3280 }
3281
3282 ap->hsm_task_state = reg_state;
3283 return 0;
3284 }
3285
3286 /**
3287 * ata_pio_complete - check if drive is busy or idle
3288 * @ap: the target ata_port
3289 *
3290 * LOCKING:
3291 * None. (executing in kernel thread context)
3292 *
3293 * RETURNS:
3294 * Non-zero if qc completed, zero otherwise.
3295 */
3296
3297 static int ata_pio_complete (struct ata_port *ap)
3298 {
3299 struct ata_queued_cmd *qc;
3300 u8 drv_stat;
3301
3302 /*
3303 * This is purely heuristic. This is a fast path. Sometimes when
3304 * we enter, BSY will be cleared in a chk-status or two. If not,
3305 * the drive is probably seeking or something. Snooze for a couple
3306 * msecs, then chk-status again. If still busy, fall back to
3307 * HSM_ST_POLL state.
3308 */
3309 drv_stat = ata_busy_wait(ap, ATA_BUSY, 10);
3310 if (drv_stat & ATA_BUSY) {
3311 msleep(2);
3312 drv_stat = ata_busy_wait(ap, ATA_BUSY, 10);
3313 if (drv_stat & ATA_BUSY) {
3314 ap->hsm_task_state = HSM_ST_LAST_POLL;
3315 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
3316 return 0;
3317 }
3318 }
3319
3320 qc = ata_qc_from_tag(ap, ap->active_tag);
3321 WARN_ON(qc == NULL);
3322
3323 drv_stat = ata_wait_idle(ap);
3324 if (!ata_ok(drv_stat)) {
3325 qc->err_mask |= __ac_err_mask(drv_stat);
3326 ap->hsm_task_state = HSM_ST_ERR;
3327 return 0;
3328 }
3329
3330 ap->hsm_task_state = HSM_ST_IDLE;
3331
3332 WARN_ON(qc->err_mask);
3333 ata_poll_qc_complete(qc);
3334
3335 /* another command may start at this point */
3336
3337 return 1;
3338 }
3339
3340
3341 /**
3342 * swap_buf_le16 - swap halves of 16-bit words in place
3343 * @buf: Buffer to swap
3344 * @buf_words: Number of 16-bit words in buffer.
3345 *
3346 * Swap halves of 16-bit words if needed to convert from
3347 * little-endian byte order to native cpu byte order, or
3348 * vice-versa.
3349 *
3350 * LOCKING:
3351 * Inherited from caller.
3352 */
3353 void swap_buf_le16(u16 *buf, unsigned int buf_words)
3354 {
3355 #ifdef __BIG_ENDIAN
3356 unsigned int i;
3357
3358 for (i = 0; i < buf_words; i++)
3359 buf[i] = le16_to_cpu(buf[i]);
3360 #endif /* __BIG_ENDIAN */
3361 }
3362
3363 /**
3364 * ata_mmio_data_xfer - Transfer data by MMIO
3365 * @ap: port to read/write
3366 * @buf: data buffer
3367 * @buflen: buffer length
3368 * @write_data: read/write
3369 *
3370 * Transfer data from/to the device data register by MMIO.
3371 *
3372 * LOCKING:
3373 * Inherited from caller.
3374 */
3375
3376 static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
3377 unsigned int buflen, int write_data)
3378 {
3379 unsigned int i;
3380 unsigned int words = buflen >> 1;
3381 u16 *buf16 = (u16 *) buf;
3382 void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr;
3383
3384 /* Transfer multiple of 2 bytes */
3385 if (write_data) {
3386 for (i = 0; i < words; i++)
3387 writew(le16_to_cpu(buf16[i]), mmio);
3388 } else {
3389 for (i = 0; i < words; i++)
3390 buf16[i] = cpu_to_le16(readw(mmio));
3391 }
3392
3393 /* Transfer trailing 1 byte, if any. */
3394 if (unlikely(buflen & 0x01)) {
3395 u16 align_buf[1] = { 0 };
3396 unsigned char *trailing_buf = buf + buflen - 1;
3397
3398 if (write_data) {
3399 memcpy(align_buf, trailing_buf, 1);
3400 writew(le16_to_cpu(align_buf[0]), mmio);
3401 } else {
3402 align_buf[0] = cpu_to_le16(readw(mmio));
3403 memcpy(trailing_buf, align_buf, 1);
3404 }
3405 }
3406 }
3407
3408 /**
3409 * ata_pio_data_xfer - Transfer data by PIO
3410 * @ap: port to read/write
3411 * @buf: data buffer
3412 * @buflen: buffer length
3413 * @write_data: read/write
3414 *
3415 * Transfer data from/to the device data register by PIO.
3416 *
3417 * LOCKING:
3418 * Inherited from caller.
3419 */
3420
3421 static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf,
3422 unsigned int buflen, int write_data)
3423 {
3424 unsigned int words = buflen >> 1;
3425
3426 /* Transfer multiple of 2 bytes */
3427 if (write_data)
3428 outsw(ap->ioaddr.data_addr, buf, words);
3429 else
3430 insw(ap->ioaddr.data_addr, buf, words);
3431
3432 /* Transfer trailing 1 byte, if any. */
3433 if (unlikely(buflen & 0x01)) {
3434 u16 align_buf[1] = { 0 };
3435 unsigned char *trailing_buf = buf + buflen - 1;
3436
3437 if (write_data) {
3438 memcpy(align_buf, trailing_buf, 1);
3439 outw(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
3440 } else {
3441 align_buf[0] = cpu_to_le16(inw(ap->ioaddr.data_addr));
3442 memcpy(trailing_buf, align_buf, 1);
3443 }
3444 }
3445 }
3446
3447 /**
3448 * ata_data_xfer - Transfer data from/to the data register.
3449 * @ap: port to read/write
3450 * @buf: data buffer
3451 * @buflen: buffer length
3452 * @do_write: read/write
3453 *
3454 * Transfer data from/to the device data register.
3455 *
3456 * LOCKING:
3457 * Inherited from caller.
3458 */
3459
3460 static void ata_data_xfer(struct ata_port *ap, unsigned char *buf,
3461 unsigned int buflen, int do_write)
3462 {
3463 /* Make the crap hardware pay the costs not the good stuff */
3464 if (unlikely(ap->flags & ATA_FLAG_IRQ_MASK)) {
3465 unsigned long flags;
3466 local_irq_save(flags);
3467 if (ap->flags & ATA_FLAG_MMIO)
3468 ata_mmio_data_xfer(ap, buf, buflen, do_write);
3469 else
3470 ata_pio_data_xfer(ap, buf, buflen, do_write);
3471 local_irq_restore(flags);
3472 } else {
3473 if (ap->flags & ATA_FLAG_MMIO)
3474 ata_mmio_data_xfer(ap, buf, buflen, do_write);
3475 else
3476 ata_pio_data_xfer(ap, buf, buflen, do_write);
3477 }
3478 }
3479
3480 /**
3481 * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
3482 * @qc: Command on going
3483 *
3484 * Transfer ATA_SECT_SIZE of data from/to the ATA device.
3485 *
3486 * LOCKING:
3487 * Inherited from caller.
3488 */
3489
3490 static void ata_pio_sector(struct ata_queued_cmd *qc)
3491 {
3492 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3493 struct scatterlist *sg = qc->__sg;
3494 struct ata_port *ap = qc->ap;
3495 struct page *page;
3496 unsigned int offset;
3497 unsigned char *buf;
3498
3499 if (qc->cursect == (qc->nsect - 1))
3500 ap->hsm_task_state = HSM_ST_LAST;
3501
3502 page = sg[qc->cursg].page;
3503 offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE;
3504
3505 /* get the current page and offset */
3506 page = nth_page(page, (offset >> PAGE_SHIFT));
3507 offset %= PAGE_SIZE;
3508
3509 buf = kmap(page) + offset;
3510
3511 qc->cursect++;
3512 qc->cursg_ofs++;
3513
3514 if ((qc->cursg_ofs * ATA_SECT_SIZE) == (&sg[qc->cursg])->length) {
3515 qc->cursg++;
3516 qc->cursg_ofs = 0;
3517 }
3518
3519 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3520
3521 /* do the actual data transfer */
3522 do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3523 ata_data_xfer(ap, buf, ATA_SECT_SIZE, do_write);
3524
3525 kunmap(page);
3526 }
3527
3528 /**
3529 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
3530 * @qc: Command on going
3531 * @bytes: number of bytes
3532 *
3533 * Transfer Transfer data from/to the ATAPI device.
3534 *
3535 * LOCKING:
3536 * Inherited from caller.
3537 *
3538 */
3539
3540 static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
3541 {
3542 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3543 struct scatterlist *sg = qc->__sg;
3544 struct ata_port *ap = qc->ap;
3545 struct page *page;
3546 unsigned char *buf;
3547 unsigned int offset, count;
3548
3549 if (qc->curbytes + bytes >= qc->nbytes)
3550 ap->hsm_task_state = HSM_ST_LAST;
3551
3552 next_sg:
3553 if (unlikely(qc->cursg >= qc->n_elem)) {
3554 /*
3555 * The end of qc->sg is reached and the device expects
3556 * more data to transfer. In order not to overrun qc->sg
3557 * and fulfill length specified in the byte count register,
3558 * - for read case, discard trailing data from the device
3559 * - for write case, padding zero data to the device
3560 */
3561 u16 pad_buf[1] = { 0 };
3562 unsigned int words = bytes >> 1;
3563 unsigned int i;
3564
3565 if (words) /* warning if bytes > 1 */
3566 printk(KERN_WARNING "ata%u: %u bytes trailing data\n",
3567 ap->id, bytes);
3568
3569 for (i = 0; i < words; i++)
3570 ata_data_xfer(ap, (unsigned char*)pad_buf, 2, do_write);
3571
3572 ap->hsm_task_state = HSM_ST_LAST;
3573 return;
3574 }
3575
3576 sg = &qc->__sg[qc->cursg];
3577
3578 page = sg->page;
3579 offset = sg->offset + qc->cursg_ofs;
3580
3581 /* get the current page and offset */
3582 page = nth_page(page, (offset >> PAGE_SHIFT));
3583 offset %= PAGE_SIZE;
3584
3585 /* don't overrun current sg */
3586 count = min(sg->length - qc->cursg_ofs, bytes);
3587
3588 /* don't cross page boundaries */
3589 count = min(count, (unsigned int)PAGE_SIZE - offset);
3590
3591 buf = kmap(page) + offset;
3592
3593 bytes -= count;
3594 qc->curbytes += count;
3595 qc->cursg_ofs += count;
3596
3597 if (qc->cursg_ofs == sg->length) {
3598 qc->cursg++;
3599 qc->cursg_ofs = 0;
3600 }
3601
3602 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3603
3604 /* do the actual data transfer */
3605 ata_data_xfer(ap, buf, count, do_write);
3606
3607 kunmap(page);
3608
3609 if (bytes)
3610 goto next_sg;
3611 }
3612
3613 /**
3614 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
3615 * @qc: Command on going
3616 *
3617 * Transfer Transfer data from/to the ATAPI device.
3618 *
3619 * LOCKING:
3620 * Inherited from caller.
3621 */
3622
3623 static void atapi_pio_bytes(struct ata_queued_cmd *qc)
3624 {
3625 struct ata_port *ap = qc->ap;
3626 struct ata_device *dev = qc->dev;
3627 unsigned int ireason, bc_lo, bc_hi, bytes;
3628 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
3629
3630 ap->ops->tf_read(ap, &qc->tf);
3631 ireason = qc->tf.nsect;
3632 bc_lo = qc->tf.lbam;
3633 bc_hi = qc->tf.lbah;
3634 bytes = (bc_hi << 8) | bc_lo;
3635
3636 /* shall be cleared to zero, indicating xfer of data */
3637 if (ireason & (1 << 0))
3638 goto err_out;
3639
3640 /* make sure transfer direction matches expected */
3641 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
3642 if (do_write != i_write)
3643 goto err_out;
3644
3645 __atapi_pio_bytes(qc, bytes);
3646
3647 return;
3648
3649 err_out:
3650 printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n",
3651 ap->id, dev->devno);
3652 qc->err_mask |= AC_ERR_HSM;
3653 ap->hsm_task_state = HSM_ST_ERR;
3654 }
3655
3656 /**
3657 * ata_pio_block - start PIO on a block
3658 * @ap: the target ata_port
3659 *
3660 * LOCKING:
3661 * None. (executing in kernel thread context)
3662 */
3663
3664 static void ata_pio_block(struct ata_port *ap)
3665 {
3666 struct ata_queued_cmd *qc;
3667 u8 status;
3668
3669 /*
3670 * This is purely heuristic. This is a fast path.
3671 * Sometimes when we enter, BSY will be cleared in
3672 * a chk-status or two. If not, the drive is probably seeking
3673 * or something. Snooze for a couple msecs, then
3674 * chk-status again. If still busy, fall back to
3675 * HSM_ST_POLL state.
3676 */
3677 status = ata_busy_wait(ap, ATA_BUSY, 5);
3678 if (status & ATA_BUSY) {
3679 msleep(2);
3680 status = ata_busy_wait(ap, ATA_BUSY, 10);
3681 if (status & ATA_BUSY) {
3682 ap->hsm_task_state = HSM_ST_POLL;
3683 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
3684 return;
3685 }
3686 }
3687
3688 qc = ata_qc_from_tag(ap, ap->active_tag);
3689 WARN_ON(qc == NULL);
3690
3691 /* check error */
3692 if (status & (ATA_ERR | ATA_DF)) {
3693 qc->err_mask |= AC_ERR_DEV;
3694 ap->hsm_task_state = HSM_ST_ERR;
3695 return;
3696 }
3697
3698 /* transfer data if any */
3699 if (is_atapi_taskfile(&qc->tf)) {
3700 /* DRQ=0 means no more data to transfer */
3701 if ((status & ATA_DRQ) == 0) {
3702 ap->hsm_task_state = HSM_ST_LAST;
3703 return;
3704 }
3705
3706 atapi_pio_bytes(qc);
3707 } else {
3708 /* handle BSY=0, DRQ=0 as error */
3709 if ((status & ATA_DRQ) == 0) {
3710 qc->err_mask |= AC_ERR_HSM;
3711 ap->hsm_task_state = HSM_ST_ERR;
3712 return;
3713 }
3714
3715 ata_pio_sector(qc);
3716 }
3717 }
3718
3719 static void ata_pio_error(struct ata_port *ap)
3720 {
3721 struct ata_queued_cmd *qc;
3722
3723 qc = ata_qc_from_tag(ap, ap->active_tag);
3724 WARN_ON(qc == NULL);
3725
3726 if (qc->tf.command != ATA_CMD_PACKET)
3727 printk(KERN_WARNING "ata%u: PIO error\n", ap->id);
3728
3729 /* make sure qc->err_mask is available to
3730 * know what's wrong and recover
3731 */
3732 WARN_ON(qc->err_mask == 0);
3733
3734 ap->hsm_task_state = HSM_ST_IDLE;
3735
3736 ata_poll_qc_complete(qc);
3737 }
3738
3739 static void ata_pio_task(void *_data)
3740 {
3741 struct ata_port *ap = _data;
3742 unsigned long timeout;
3743 int qc_completed;
3744
3745 fsm_start:
3746 timeout = 0;
3747 qc_completed = 0;
3748
3749 switch (ap->hsm_task_state) {
3750 case HSM_ST_IDLE:
3751 return;
3752
3753 case HSM_ST:
3754 ata_pio_block(ap);
3755 break;
3756
3757 case HSM_ST_LAST:
3758 qc_completed = ata_pio_complete(ap);
3759 break;
3760
3761 case HSM_ST_POLL:
3762 case HSM_ST_LAST_POLL:
3763 timeout = ata_pio_poll(ap);
3764 break;
3765
3766 case HSM_ST_TMOUT:
3767 case HSM_ST_ERR:
3768 ata_pio_error(ap);
3769 return;
3770 }
3771
3772 if (timeout)
3773 ata_port_queue_task(ap, ata_pio_task, ap, timeout);
3774 else if (!qc_completed)
3775 goto fsm_start;
3776 }
3777
3778 /**
3779 * atapi_packet_task - Write CDB bytes to hardware
3780 * @_data: Port to which ATAPI device is attached.
3781 *
3782 * When device has indicated its readiness to accept
3783 * a CDB, this function is called. Send the CDB.
3784 * If DMA is to be performed, exit immediately.
3785 * Otherwise, we are in polling mode, so poll
3786 * status under operation succeeds or fails.
3787 *
3788 * LOCKING:
3789 * Kernel thread context (may sleep)
3790 */
3791
3792 static void atapi_packet_task(void *_data)
3793 {
3794 struct ata_port *ap = _data;
3795 struct ata_queued_cmd *qc;
3796 u8 status;
3797
3798 qc = ata_qc_from_tag(ap, ap->active_tag);
3799 WARN_ON(qc == NULL);
3800 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
3801
3802 /* sleep-wait for BSY to clear */
3803 DPRINTK("busy wait\n");
3804 if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB)) {
3805 qc->err_mask |= AC_ERR_TIMEOUT;
3806 goto err_out;
3807 }
3808
3809 /* make sure DRQ is set */
3810 status = ata_chk_status(ap);
3811 if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ) {
3812 qc->err_mask |= AC_ERR_HSM;
3813 goto err_out;
3814 }
3815
3816 /* send SCSI cdb */
3817 DPRINTK("send cdb\n");
3818 WARN_ON(qc->dev->cdb_len < 12);
3819
3820 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA ||
3821 qc->tf.protocol == ATA_PROT_ATAPI_NODATA) {
3822 unsigned long flags;
3823
3824 /* Once we're done issuing command and kicking bmdma,
3825 * irq handler takes over. To not lose irq, we need
3826 * to clear NOINTR flag before sending cdb, but
3827 * interrupt handler shouldn't be invoked before we're
3828 * finished. Hence, the following locking.
3829 */
3830 spin_lock_irqsave(&ap->host_set->lock, flags);
3831 ap->flags &= ~ATA_FLAG_NOINTR;
3832 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
3833 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA)
3834 ap->ops->bmdma_start(qc); /* initiate bmdma */
3835 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3836 } else {
3837 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
3838
3839 /* PIO commands are handled by polling */
3840 ap->hsm_task_state = HSM_ST;
3841 ata_port_queue_task(ap, ata_pio_task, ap, 0);
3842 }
3843
3844 return;
3845
3846 err_out:
3847 ata_poll_qc_complete(qc);
3848 }
3849
3850 /**
3851 * ata_qc_timeout - Handle timeout of queued command
3852 * @qc: Command that timed out
3853 *
3854 * Some part of the kernel (currently, only the SCSI layer)
3855 * has noticed that the active command on port @ap has not
3856 * completed after a specified length of time. Handle this
3857 * condition by disabling DMA (if necessary) and completing
3858 * transactions, with error if necessary.
3859 *
3860 * This also handles the case of the "lost interrupt", where
3861 * for some reason (possibly hardware bug, possibly driver bug)
3862 * an interrupt was not delivered to the driver, even though the
3863 * transaction completed successfully.
3864 *
3865 * LOCKING:
3866 * Inherited from SCSI layer (none, can sleep)
3867 */
3868
3869 static void ata_qc_timeout(struct ata_queued_cmd *qc)
3870 {
3871 struct ata_port *ap = qc->ap;
3872 struct ata_host_set *host_set = ap->host_set;
3873 u8 host_stat = 0, drv_stat;
3874 unsigned long flags;
3875
3876 DPRINTK("ENTER\n");
3877
3878 ap->hsm_task_state = HSM_ST_IDLE;
3879
3880 spin_lock_irqsave(&host_set->lock, flags);
3881
3882 switch (qc->tf.protocol) {
3883
3884 case ATA_PROT_DMA:
3885 case ATA_PROT_ATAPI_DMA:
3886 host_stat = ap->ops->bmdma_status(ap);
3887
3888 /* before we do anything else, clear DMA-Start bit */
3889 ap->ops->bmdma_stop(qc);
3890
3891 /* fall through */
3892
3893 default:
3894 ata_altstatus(ap);
3895 drv_stat = ata_chk_status(ap);
3896
3897 /* ack bmdma irq events */
3898 ap->ops->irq_clear(ap);
3899
3900 printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x\n",
3901 ap->id, qc->tf.command, drv_stat, host_stat);
3902
3903 /* complete taskfile transaction */
3904 qc->err_mask |= ac_err_mask(drv_stat);
3905 break;
3906 }
3907
3908 spin_unlock_irqrestore(&host_set->lock, flags);
3909
3910 ata_eh_qc_complete(qc);
3911
3912 DPRINTK("EXIT\n");
3913 }
3914
3915 /**
3916 * ata_eng_timeout - Handle timeout of queued command
3917 * @ap: Port on which timed-out command is active
3918 *
3919 * Some part of the kernel (currently, only the SCSI layer)
3920 * has noticed that the active command on port @ap has not
3921 * completed after a specified length of time. Handle this
3922 * condition by disabling DMA (if necessary) and completing
3923 * transactions, with error if necessary.
3924 *
3925 * This also handles the case of the "lost interrupt", where
3926 * for some reason (possibly hardware bug, possibly driver bug)
3927 * an interrupt was not delivered to the driver, even though the
3928 * transaction completed successfully.
3929 *
3930 * LOCKING:
3931 * Inherited from SCSI layer (none, can sleep)
3932 */
3933
3934 void ata_eng_timeout(struct ata_port *ap)
3935 {
3936 DPRINTK("ENTER\n");
3937
3938 ata_qc_timeout(ata_qc_from_tag(ap, ap->active_tag));
3939
3940 DPRINTK("EXIT\n");
3941 }
3942
3943 /**
3944 * ata_qc_new - Request an available ATA command, for queueing
3945 * @ap: Port associated with device @dev
3946 * @dev: Device from whom we request an available command structure
3947 *
3948 * LOCKING:
3949 * None.
3950 */
3951
3952 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
3953 {
3954 struct ata_queued_cmd *qc = NULL;
3955 unsigned int i;
3956
3957 for (i = 0; i < ATA_MAX_QUEUE; i++)
3958 if (!test_and_set_bit(i, &ap->qactive)) {
3959 qc = ata_qc_from_tag(ap, i);
3960 break;
3961 }
3962
3963 if (qc)
3964 qc->tag = i;
3965
3966 return qc;
3967 }
3968
3969 /**
3970 * ata_qc_new_init - Request an available ATA command, and initialize it
3971 * @ap: Port associated with device @dev
3972 * @dev: Device from whom we request an available command structure
3973 *
3974 * LOCKING:
3975 * None.
3976 */
3977
3978 struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
3979 struct ata_device *dev)
3980 {
3981 struct ata_queued_cmd *qc;
3982
3983 qc = ata_qc_new(ap);
3984 if (qc) {
3985 qc->scsicmd = NULL;
3986 qc->ap = ap;
3987 qc->dev = dev;
3988
3989 ata_qc_reinit(qc);
3990 }
3991
3992 return qc;
3993 }
3994
3995 /**
3996 * ata_qc_free - free unused ata_queued_cmd
3997 * @qc: Command to complete
3998 *
3999 * Designed to free unused ata_queued_cmd object
4000 * in case something prevents using it.
4001 *
4002 * LOCKING:
4003 * spin_lock_irqsave(host_set lock)
4004 */
4005 void ata_qc_free(struct ata_queued_cmd *qc)
4006 {
4007 struct ata_port *ap = qc->ap;
4008 unsigned int tag;
4009
4010 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4011
4012 qc->flags = 0;
4013 tag = qc->tag;
4014 if (likely(ata_tag_valid(tag))) {
4015 if (tag == ap->active_tag)
4016 ap->active_tag = ATA_TAG_POISON;
4017 qc->tag = ATA_TAG_POISON;
4018 clear_bit(tag, &ap->qactive);
4019 }
4020 }
4021
4022 void __ata_qc_complete(struct ata_queued_cmd *qc)
4023 {
4024 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4025 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
4026
4027 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4028 ata_sg_clean(qc);
4029
4030 /* atapi: mark qc as inactive to prevent the interrupt handler
4031 * from completing the command twice later, before the error handler
4032 * is called. (when rc != 0 and atapi request sense is needed)
4033 */
4034 qc->flags &= ~ATA_QCFLAG_ACTIVE;
4035
4036 /* call completion callback */
4037 qc->complete_fn(qc);
4038 }
4039
4040 static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
4041 {
4042 struct ata_port *ap = qc->ap;
4043
4044 switch (qc->tf.protocol) {
4045 case ATA_PROT_DMA:
4046 case ATA_PROT_ATAPI_DMA:
4047 return 1;
4048
4049 case ATA_PROT_ATAPI:
4050 case ATA_PROT_PIO:
4051 case ATA_PROT_PIO_MULT:
4052 if (ap->flags & ATA_FLAG_PIO_DMA)
4053 return 1;
4054
4055 /* fall through */
4056
4057 default:
4058 return 0;
4059 }
4060
4061 /* never reached */
4062 }
4063
4064 /**
4065 * ata_qc_issue - issue taskfile to device
4066 * @qc: command to issue to device
4067 *
4068 * Prepare an ATA command to submission to device.
4069 * This includes mapping the data into a DMA-able
4070 * area, filling in the S/G table, and finally
4071 * writing the taskfile to hardware, starting the command.
4072 *
4073 * LOCKING:
4074 * spin_lock_irqsave(host_set lock)
4075 *
4076 * RETURNS:
4077 * Zero on success, AC_ERR_* mask on failure
4078 */
4079
4080 unsigned int ata_qc_issue(struct ata_queued_cmd *qc)
4081 {
4082 struct ata_port *ap = qc->ap;
4083
4084 if (ata_should_dma_map(qc)) {
4085 if (qc->flags & ATA_QCFLAG_SG) {
4086 if (ata_sg_setup(qc))
4087 goto sg_err;
4088 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
4089 if (ata_sg_setup_one(qc))
4090 goto sg_err;
4091 }
4092 } else {
4093 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4094 }
4095
4096 ap->ops->qc_prep(qc);
4097
4098 qc->ap->active_tag = qc->tag;
4099 qc->flags |= ATA_QCFLAG_ACTIVE;
4100
4101 return ap->ops->qc_issue(qc);
4102
4103 sg_err:
4104 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4105 return AC_ERR_SYSTEM;
4106 }
4107
4108
4109 /**
4110 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
4111 * @qc: command to issue to device
4112 *
4113 * Using various libata functions and hooks, this function
4114 * starts an ATA command. ATA commands are grouped into
4115 * classes called "protocols", and issuing each type of protocol
4116 * is slightly different.
4117 *
4118 * May be used as the qc_issue() entry in ata_port_operations.
4119 *
4120 * LOCKING:
4121 * spin_lock_irqsave(host_set lock)
4122 *
4123 * RETURNS:
4124 * Zero on success, AC_ERR_* mask on failure
4125 */
4126
4127 unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
4128 {
4129 struct ata_port *ap = qc->ap;
4130
4131 ata_dev_select(ap, qc->dev->devno, 1, 0);
4132
4133 switch (qc->tf.protocol) {
4134 case ATA_PROT_NODATA:
4135 ata_tf_to_host(ap, &qc->tf);
4136 break;
4137
4138 case ATA_PROT_DMA:
4139 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4140 ap->ops->bmdma_setup(qc); /* set up bmdma */
4141 ap->ops->bmdma_start(qc); /* initiate bmdma */
4142 break;
4143
4144 case ATA_PROT_PIO: /* load tf registers, initiate polling pio */
4145 ata_qc_set_polling(qc);
4146 ata_tf_to_host(ap, &qc->tf);
4147 ap->hsm_task_state = HSM_ST;
4148 ata_port_queue_task(ap, ata_pio_task, ap, 0);
4149 break;
4150
4151 case ATA_PROT_ATAPI:
4152 ata_qc_set_polling(qc);
4153 ata_tf_to_host(ap, &qc->tf);
4154 ata_port_queue_task(ap, atapi_packet_task, ap, 0);
4155 break;
4156
4157 case ATA_PROT_ATAPI_NODATA:
4158 ap->flags |= ATA_FLAG_NOINTR;
4159 ata_tf_to_host(ap, &qc->tf);
4160 ata_port_queue_task(ap, atapi_packet_task, ap, 0);
4161 break;
4162
4163 case ATA_PROT_ATAPI_DMA:
4164 ap->flags |= ATA_FLAG_NOINTR;
4165 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4166 ap->ops->bmdma_setup(qc); /* set up bmdma */
4167 ata_port_queue_task(ap, atapi_packet_task, ap, 0);
4168 break;
4169
4170 default:
4171 WARN_ON(1);
4172 return AC_ERR_SYSTEM;
4173 }
4174
4175 return 0;
4176 }
4177
4178 /**
4179 * ata_bmdma_setup_mmio - Set up PCI IDE BMDMA transaction
4180 * @qc: Info associated with this ATA transaction.
4181 *
4182 * LOCKING:
4183 * spin_lock_irqsave(host_set lock)
4184 */
4185
4186 static void ata_bmdma_setup_mmio (struct ata_queued_cmd *qc)
4187 {
4188 struct ata_port *ap = qc->ap;
4189 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
4190 u8 dmactl;
4191 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
4192
4193 /* load PRD table addr. */
4194 mb(); /* make sure PRD table writes are visible to controller */
4195 writel(ap->prd_dma, mmio + ATA_DMA_TABLE_OFS);
4196
4197 /* specify data direction, triple-check start bit is clear */
4198 dmactl = readb(mmio + ATA_DMA_CMD);
4199 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
4200 if (!rw)
4201 dmactl |= ATA_DMA_WR;
4202 writeb(dmactl, mmio + ATA_DMA_CMD);
4203
4204 /* issue r/w command */
4205 ap->ops->exec_command(ap, &qc->tf);
4206 }
4207
4208 /**
4209 * ata_bmdma_start_mmio - Start a PCI IDE BMDMA transaction
4210 * @qc: Info associated with this ATA transaction.
4211 *
4212 * LOCKING:
4213 * spin_lock_irqsave(host_set lock)
4214 */
4215
4216 static void ata_bmdma_start_mmio (struct ata_queued_cmd *qc)
4217 {
4218 struct ata_port *ap = qc->ap;
4219 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
4220 u8 dmactl;
4221
4222 /* start host DMA transaction */
4223 dmactl = readb(mmio + ATA_DMA_CMD);
4224 writeb(dmactl | ATA_DMA_START, mmio + ATA_DMA_CMD);
4225
4226 /* Strictly, one may wish to issue a readb() here, to
4227 * flush the mmio write. However, control also passes
4228 * to the hardware at this point, and it will interrupt
4229 * us when we are to resume control. So, in effect,
4230 * we don't care when the mmio write flushes.
4231 * Further, a read of the DMA status register _immediately_
4232 * following the write may not be what certain flaky hardware
4233 * is expected, so I think it is best to not add a readb()
4234 * without first all the MMIO ATA cards/mobos.
4235 * Or maybe I'm just being paranoid.
4236 */
4237 }
4238
4239 /**
4240 * ata_bmdma_setup_pio - Set up PCI IDE BMDMA transaction (PIO)
4241 * @qc: Info associated with this ATA transaction.
4242 *
4243 * LOCKING:
4244 * spin_lock_irqsave(host_set lock)
4245 */
4246
4247 static void ata_bmdma_setup_pio (struct ata_queued_cmd *qc)
4248 {
4249 struct ata_port *ap = qc->ap;
4250 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
4251 u8 dmactl;
4252
4253 /* load PRD table addr. */
4254 outl(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
4255
4256 /* specify data direction, triple-check start bit is clear */
4257 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
4258 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
4259 if (!rw)
4260 dmactl |= ATA_DMA_WR;
4261 outb(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
4262
4263 /* issue r/w command */
4264 ap->ops->exec_command(ap, &qc->tf);
4265 }
4266
4267 /**
4268 * ata_bmdma_start_pio - Start a PCI IDE BMDMA transaction (PIO)
4269 * @qc: Info associated with this ATA transaction.
4270 *
4271 * LOCKING:
4272 * spin_lock_irqsave(host_set lock)
4273 */
4274
4275 static void ata_bmdma_start_pio (struct ata_queued_cmd *qc)
4276 {
4277 struct ata_port *ap = qc->ap;
4278 u8 dmactl;
4279
4280 /* start host DMA transaction */
4281 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
4282 outb(dmactl | ATA_DMA_START,
4283 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
4284 }
4285
4286
4287 /**
4288 * ata_bmdma_start - Start a PCI IDE BMDMA transaction
4289 * @qc: Info associated with this ATA transaction.
4290 *
4291 * Writes the ATA_DMA_START flag to the DMA command register.
4292 *
4293 * May be used as the bmdma_start() entry in ata_port_operations.
4294 *
4295 * LOCKING:
4296 * spin_lock_irqsave(host_set lock)
4297 */
4298 void ata_bmdma_start(struct ata_queued_cmd *qc)
4299 {
4300 if (qc->ap->flags & ATA_FLAG_MMIO)
4301 ata_bmdma_start_mmio(qc);
4302 else
4303 ata_bmdma_start_pio(qc);
4304 }
4305
4306
4307 /**
4308 * ata_bmdma_setup - Set up PCI IDE BMDMA transaction
4309 * @qc: Info associated with this ATA transaction.
4310 *
4311 * Writes address of PRD table to device's PRD Table Address
4312 * register, sets the DMA control register, and calls
4313 * ops->exec_command() to start the transfer.
4314 *
4315 * May be used as the bmdma_setup() entry in ata_port_operations.
4316 *
4317 * LOCKING:
4318 * spin_lock_irqsave(host_set lock)
4319 */
4320 void ata_bmdma_setup(struct ata_queued_cmd *qc)
4321 {
4322 if (qc->ap->flags & ATA_FLAG_MMIO)
4323 ata_bmdma_setup_mmio(qc);
4324 else
4325 ata_bmdma_setup_pio(qc);
4326 }
4327
4328
4329 /**
4330 * ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt.
4331 * @ap: Port associated with this ATA transaction.
4332 *
4333 * Clear interrupt and error flags in DMA status register.
4334 *
4335 * May be used as the irq_clear() entry in ata_port_operations.
4336 *
4337 * LOCKING:
4338 * spin_lock_irqsave(host_set lock)
4339 */
4340
4341 void ata_bmdma_irq_clear(struct ata_port *ap)
4342 {
4343 if (ap->flags & ATA_FLAG_MMIO) {
4344 void __iomem *mmio = ((void __iomem *) ap->ioaddr.bmdma_addr) + ATA_DMA_STATUS;
4345 writeb(readb(mmio), mmio);
4346 } else {
4347 unsigned long addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS;
4348 outb(inb(addr), addr);
4349 }
4350
4351 }
4352
4353
4354 /**
4355 * ata_bmdma_status - Read PCI IDE BMDMA status
4356 * @ap: Port associated with this ATA transaction.
4357 *
4358 * Read and return BMDMA status register.
4359 *
4360 * May be used as the bmdma_status() entry in ata_port_operations.
4361 *
4362 * LOCKING:
4363 * spin_lock_irqsave(host_set lock)
4364 */
4365
4366 u8 ata_bmdma_status(struct ata_port *ap)
4367 {
4368 u8 host_stat;
4369 if (ap->flags & ATA_FLAG_MMIO) {
4370 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
4371 host_stat = readb(mmio + ATA_DMA_STATUS);
4372 } else
4373 host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
4374 return host_stat;
4375 }
4376
4377
4378 /**
4379 * ata_bmdma_stop - Stop PCI IDE BMDMA transfer
4380 * @qc: Command we are ending DMA for
4381 *
4382 * Clears the ATA_DMA_START flag in the dma control register
4383 *
4384 * May be used as the bmdma_stop() entry in ata_port_operations.
4385 *
4386 * LOCKING:
4387 * spin_lock_irqsave(host_set lock)
4388 */
4389
4390 void ata_bmdma_stop(struct ata_queued_cmd *qc)
4391 {
4392 struct ata_port *ap = qc->ap;
4393 if (ap->flags & ATA_FLAG_MMIO) {
4394 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
4395
4396 /* clear start/stop bit */
4397 writeb(readb(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,
4398 mmio + ATA_DMA_CMD);
4399 } else {
4400 /* clear start/stop bit */
4401 outb(inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START,
4402 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
4403 }
4404
4405 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
4406 ata_altstatus(ap); /* dummy read */
4407 }
4408
4409 /**
4410 * ata_host_intr - Handle host interrupt for given (port, task)
4411 * @ap: Port on which interrupt arrived (possibly...)
4412 * @qc: Taskfile currently active in engine
4413 *
4414 * Handle host interrupt for given queued command. Currently,
4415 * only DMA interrupts are handled. All other commands are
4416 * handled via polling with interrupts disabled (nIEN bit).
4417 *
4418 * LOCKING:
4419 * spin_lock_irqsave(host_set lock)
4420 *
4421 * RETURNS:
4422 * One if interrupt was handled, zero if not (shared irq).
4423 */
4424
4425 inline unsigned int ata_host_intr (struct ata_port *ap,
4426 struct ata_queued_cmd *qc)
4427 {
4428 u8 status, host_stat;
4429
4430 switch (qc->tf.protocol) {
4431
4432 case ATA_PROT_DMA:
4433 case ATA_PROT_ATAPI_DMA:
4434 case ATA_PROT_ATAPI:
4435 /* check status of DMA engine */
4436 host_stat = ap->ops->bmdma_status(ap);
4437 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
4438
4439 /* if it's not our irq... */
4440 if (!(host_stat & ATA_DMA_INTR))
4441 goto idle_irq;
4442
4443 /* before we do anything else, clear DMA-Start bit */
4444 ap->ops->bmdma_stop(qc);
4445
4446 /* fall through */
4447
4448 case ATA_PROT_ATAPI_NODATA:
4449 case ATA_PROT_NODATA:
4450 /* check altstatus */
4451 status = ata_altstatus(ap);
4452 if (status & ATA_BUSY)
4453 goto idle_irq;
4454
4455 /* check main status, clearing INTRQ */
4456 status = ata_chk_status(ap);
4457 if (unlikely(status & ATA_BUSY))
4458 goto idle_irq;
4459 DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
4460 ap->id, qc->tf.protocol, status);
4461
4462 /* ack bmdma irq events */
4463 ap->ops->irq_clear(ap);
4464
4465 /* complete taskfile transaction */
4466 qc->err_mask |= ac_err_mask(status);
4467 ata_qc_complete(qc);
4468 break;
4469
4470 default:
4471 goto idle_irq;
4472 }
4473
4474 return 1; /* irq handled */
4475
4476 idle_irq:
4477 ap->stats.idle_irq++;
4478
4479 #ifdef ATA_IRQ_TRAP
4480 if ((ap->stats.idle_irq % 1000) == 0) {
4481 handled = 1;
4482 ata_irq_ack(ap, 0); /* debug trap */
4483 printk(KERN_WARNING "ata%d: irq trap\n", ap->id);
4484 }
4485 #endif
4486 return 0; /* irq not handled */
4487 }
4488
4489 /**
4490 * ata_interrupt - Default ATA host interrupt handler
4491 * @irq: irq line (unused)
4492 * @dev_instance: pointer to our ata_host_set information structure
4493 * @regs: unused
4494 *
4495 * Default interrupt handler for PCI IDE devices. Calls
4496 * ata_host_intr() for each port that is not disabled.
4497 *
4498 * LOCKING:
4499 * Obtains host_set lock during operation.
4500 *
4501 * RETURNS:
4502 * IRQ_NONE or IRQ_HANDLED.
4503 */
4504
4505 irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
4506 {
4507 struct ata_host_set *host_set = dev_instance;
4508 unsigned int i;
4509 unsigned int handled = 0;
4510 unsigned long flags;
4511
4512 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
4513 spin_lock_irqsave(&host_set->lock, flags);
4514
4515 for (i = 0; i < host_set->n_ports; i++) {
4516 struct ata_port *ap;
4517
4518 ap = host_set->ports[i];
4519 if (ap &&
4520 !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) {
4521 struct ata_queued_cmd *qc;
4522
4523 qc = ata_qc_from_tag(ap, ap->active_tag);
4524 if (qc && (!(qc->tf.ctl & ATA_NIEN)) &&
4525 (qc->flags & ATA_QCFLAG_ACTIVE))
4526 handled |= ata_host_intr(ap, qc);
4527 }
4528 }
4529
4530 spin_unlock_irqrestore(&host_set->lock, flags);
4531
4532 return IRQ_RETVAL(handled);
4533 }
4534
4535
4536 /*
4537 * Execute a 'simple' command, that only consists of the opcode 'cmd' itself,
4538 * without filling any other registers
4539 */
4540 static int ata_do_simple_cmd(struct ata_port *ap, struct ata_device *dev,
4541 u8 cmd)
4542 {
4543 struct ata_taskfile tf;
4544 int err;
4545
4546 ata_tf_init(ap, &tf, dev->devno);
4547
4548 tf.command = cmd;
4549 tf.flags |= ATA_TFLAG_DEVICE;
4550 tf.protocol = ATA_PROT_NODATA;
4551
4552 err = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0);
4553 if (err)
4554 printk(KERN_ERR "%s: ata command failed: %d\n",
4555 __FUNCTION__, err);
4556
4557 return err;
4558 }
4559
4560 static int ata_flush_cache(struct ata_port *ap, struct ata_device *dev)
4561 {
4562 u8 cmd;
4563
4564 if (!ata_try_flush_cache(dev))
4565 return 0;
4566
4567 if (ata_id_has_flush_ext(dev->id))
4568 cmd = ATA_CMD_FLUSH_EXT;
4569 else
4570 cmd = ATA_CMD_FLUSH;
4571
4572 return ata_do_simple_cmd(ap, dev, cmd);
4573 }
4574
4575 static int ata_standby_drive(struct ata_port *ap, struct ata_device *dev)
4576 {
4577 return ata_do_simple_cmd(ap, dev, ATA_CMD_STANDBYNOW1);
4578 }
4579
4580 static int ata_start_drive(struct ata_port *ap, struct ata_device *dev)
4581 {
4582 return ata_do_simple_cmd(ap, dev, ATA_CMD_IDLEIMMEDIATE);
4583 }
4584
4585 /**
4586 * ata_device_resume - wakeup a previously suspended devices
4587 * @ap: port the device is connected to
4588 * @dev: the device to resume
4589 *
4590 * Kick the drive back into action, by sending it an idle immediate
4591 * command and making sure its transfer mode matches between drive
4592 * and host.
4593 *
4594 */
4595 int ata_device_resume(struct ata_port *ap, struct ata_device *dev)
4596 {
4597 if (ap->flags & ATA_FLAG_SUSPENDED) {
4598 ap->flags &= ~ATA_FLAG_SUSPENDED;
4599 ata_set_mode(ap);
4600 }
4601 if (!ata_dev_present(dev))
4602 return 0;
4603 if (dev->class == ATA_DEV_ATA)
4604 ata_start_drive(ap, dev);
4605
4606 return 0;
4607 }
4608
4609 /**
4610 * ata_device_suspend - prepare a device for suspend
4611 * @ap: port the device is connected to
4612 * @dev: the device to suspend
4613 *
4614 * Flush the cache on the drive, if appropriate, then issue a
4615 * standbynow command.
4616 */
4617 int ata_device_suspend(struct ata_port *ap, struct ata_device *dev)
4618 {
4619 if (!ata_dev_present(dev))
4620 return 0;
4621 if (dev->class == ATA_DEV_ATA)
4622 ata_flush_cache(ap, dev);
4623
4624 ata_standby_drive(ap, dev);
4625 ap->flags |= ATA_FLAG_SUSPENDED;
4626 return 0;
4627 }
4628
4629 /**
4630 * ata_port_start - Set port up for dma.
4631 * @ap: Port to initialize
4632 *
4633 * Called just after data structures for each port are
4634 * initialized. Allocates space for PRD table.
4635 *
4636 * May be used as the port_start() entry in ata_port_operations.
4637 *
4638 * LOCKING:
4639 * Inherited from caller.
4640 */
4641
4642 int ata_port_start (struct ata_port *ap)
4643 {
4644 struct device *dev = ap->host_set->dev;
4645 int rc;
4646
4647 ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL);
4648 if (!ap->prd)
4649 return -ENOMEM;
4650
4651 rc = ata_pad_alloc(ap, dev);
4652 if (rc) {
4653 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
4654 return rc;
4655 }
4656
4657 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma);
4658
4659 return 0;
4660 }
4661
4662
4663 /**
4664 * ata_port_stop - Undo ata_port_start()
4665 * @ap: Port to shut down
4666 *
4667 * Frees the PRD table.
4668 *
4669 * May be used as the port_stop() entry in ata_port_operations.
4670 *
4671 * LOCKING:
4672 * Inherited from caller.
4673 */
4674
4675 void ata_port_stop (struct ata_port *ap)
4676 {
4677 struct device *dev = ap->host_set->dev;
4678
4679 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
4680 ata_pad_free(ap, dev);
4681 }
4682
4683 void ata_host_stop (struct ata_host_set *host_set)
4684 {
4685 if (host_set->mmio_base)
4686 iounmap(host_set->mmio_base);
4687 }
4688
4689
4690 /**
4691 * ata_host_remove - Unregister SCSI host structure with upper layers
4692 * @ap: Port to unregister
4693 * @do_unregister: 1 if we fully unregister, 0 to just stop the port
4694 *
4695 * LOCKING:
4696 * Inherited from caller.
4697 */
4698
4699 static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister)
4700 {
4701 struct Scsi_Host *sh = ap->host;
4702
4703 DPRINTK("ENTER\n");
4704
4705 if (do_unregister)
4706 scsi_remove_host(sh);
4707
4708 ap->ops->port_stop(ap);
4709 }
4710
4711 /**
4712 * ata_host_init - Initialize an ata_port structure
4713 * @ap: Structure to initialize
4714 * @host: associated SCSI mid-layer structure
4715 * @host_set: Collection of hosts to which @ap belongs
4716 * @ent: Probe information provided by low-level driver
4717 * @port_no: Port number associated with this ata_port
4718 *
4719 * Initialize a new ata_port structure, and its associated
4720 * scsi_host.
4721 *
4722 * LOCKING:
4723 * Inherited from caller.
4724 */
4725
4726 static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
4727 struct ata_host_set *host_set,
4728 const struct ata_probe_ent *ent, unsigned int port_no)
4729 {
4730 unsigned int i;
4731
4732 host->max_id = 16;
4733 host->max_lun = 1;
4734 host->max_channel = 1;
4735 host->unique_id = ata_unique_id++;
4736 host->max_cmd_len = 12;
4737
4738 ap->flags = ATA_FLAG_PORT_DISABLED;
4739 ap->id = host->unique_id;
4740 ap->host = host;
4741 ap->ctl = ATA_DEVCTL_OBS;
4742 ap->host_set = host_set;
4743 ap->port_no = port_no;
4744 ap->hard_port_no =
4745 ent->legacy_mode ? ent->hard_port_no : port_no;
4746 ap->pio_mask = ent->pio_mask;
4747 ap->mwdma_mask = ent->mwdma_mask;
4748 ap->udma_mask = ent->udma_mask;
4749 ap->flags |= ent->host_flags;
4750 ap->ops = ent->port_ops;
4751 ap->cbl = ATA_CBL_NONE;
4752 ap->active_tag = ATA_TAG_POISON;
4753 ap->last_ctl = 0xFF;
4754
4755 INIT_WORK(&ap->port_task, NULL, NULL);
4756 INIT_LIST_HEAD(&ap->eh_done_q);
4757
4758 for (i = 0; i < ATA_MAX_DEVICES; i++)
4759 ap->device[i].devno = i;
4760
4761 #ifdef ATA_IRQ_TRAP
4762 ap->stats.unhandled_irq = 1;
4763 ap->stats.idle_irq = 1;
4764 #endif
4765
4766 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
4767 }
4768
4769 /**
4770 * ata_host_add - Attach low-level ATA driver to system
4771 * @ent: Information provided by low-level driver
4772 * @host_set: Collections of ports to which we add
4773 * @port_no: Port number associated with this host
4774 *
4775 * Attach low-level ATA driver to system.
4776 *
4777 * LOCKING:
4778 * PCI/etc. bus probe sem.
4779 *
4780 * RETURNS:
4781 * New ata_port on success, for NULL on error.
4782 */
4783
4784 static struct ata_port * ata_host_add(const struct ata_probe_ent *ent,
4785 struct ata_host_set *host_set,
4786 unsigned int port_no)
4787 {
4788 struct Scsi_Host *host;
4789 struct ata_port *ap;
4790 int rc;
4791
4792 DPRINTK("ENTER\n");
4793 host = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
4794 if (!host)
4795 return NULL;
4796
4797 ap = (struct ata_port *) &host->hostdata[0];
4798
4799 ata_host_init(ap, host, host_set, ent, port_no);
4800
4801 rc = ap->ops->port_start(ap);
4802 if (rc)
4803 goto err_out;
4804
4805 return ap;
4806
4807 err_out:
4808 scsi_host_put(host);
4809 return NULL;
4810 }
4811
4812 /**
4813 * ata_device_add - Register hardware device with ATA and SCSI layers
4814 * @ent: Probe information describing hardware device to be registered
4815 *
4816 * This function processes the information provided in the probe
4817 * information struct @ent, allocates the necessary ATA and SCSI
4818 * host information structures, initializes them, and registers
4819 * everything with requisite kernel subsystems.
4820 *
4821 * This function requests irqs, probes the ATA bus, and probes
4822 * the SCSI bus.
4823 *
4824 * LOCKING:
4825 * PCI/etc. bus probe sem.
4826 *
4827 * RETURNS:
4828 * Number of ports registered. Zero on error (no ports registered).
4829 */
4830
4831 int ata_device_add(const struct ata_probe_ent *ent)
4832 {
4833 unsigned int count = 0, i;
4834 struct device *dev = ent->dev;
4835 struct ata_host_set *host_set;
4836
4837 DPRINTK("ENTER\n");
4838 /* alloc a container for our list of ATA ports (buses) */
4839 host_set = kzalloc(sizeof(struct ata_host_set) +
4840 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
4841 if (!host_set)
4842 return 0;
4843 spin_lock_init(&host_set->lock);
4844
4845 host_set->dev = dev;
4846 host_set->n_ports = ent->n_ports;
4847 host_set->irq = ent->irq;
4848 host_set->mmio_base = ent->mmio_base;
4849 host_set->private_data = ent->private_data;
4850 host_set->ops = ent->port_ops;
4851
4852 /* register each port bound to this device */
4853 for (i = 0; i < ent->n_ports; i++) {
4854 struct ata_port *ap;
4855 unsigned long xfer_mode_mask;
4856
4857 ap = ata_host_add(ent, host_set, i);
4858 if (!ap)
4859 goto err_out;
4860
4861 host_set->ports[i] = ap;
4862 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
4863 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
4864 (ap->pio_mask << ATA_SHIFT_PIO);
4865
4866 /* print per-port info to dmesg */
4867 printk(KERN_INFO "ata%u: %cATA max %s cmd 0x%lX ctl 0x%lX "
4868 "bmdma 0x%lX irq %lu\n",
4869 ap->id,
4870 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
4871 ata_mode_string(xfer_mode_mask),
4872 ap->ioaddr.cmd_addr,
4873 ap->ioaddr.ctl_addr,
4874 ap->ioaddr.bmdma_addr,
4875 ent->irq);
4876
4877 ata_chk_status(ap);
4878 host_set->ops->irq_clear(ap);
4879 count++;
4880 }
4881
4882 if (!count)
4883 goto err_free_ret;
4884
4885 /* obtain irq, that is shared between channels */
4886 if (request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags,
4887 DRV_NAME, host_set))
4888 goto err_out;
4889
4890 /* perform each probe synchronously */
4891 DPRINTK("probe begin\n");
4892 for (i = 0; i < count; i++) {
4893 struct ata_port *ap;
4894 int rc;
4895
4896 ap = host_set->ports[i];
4897
4898 DPRINTK("ata%u: bus probe begin\n", ap->id);
4899 rc = ata_bus_probe(ap);
4900 DPRINTK("ata%u: bus probe end\n", ap->id);
4901
4902 if (rc) {
4903 /* FIXME: do something useful here?
4904 * Current libata behavior will
4905 * tear down everything when
4906 * the module is removed
4907 * or the h/w is unplugged.
4908 */
4909 }
4910
4911 rc = scsi_add_host(ap->host, dev);
4912 if (rc) {
4913 printk(KERN_ERR "ata%u: scsi_add_host failed\n",
4914 ap->id);
4915 /* FIXME: do something useful here */
4916 /* FIXME: handle unconditional calls to
4917 * scsi_scan_host and ata_host_remove, below,
4918 * at the very least
4919 */
4920 }
4921 }
4922
4923 /* probes are done, now scan each port's disk(s) */
4924 DPRINTK("host probe begin\n");
4925 for (i = 0; i < count; i++) {
4926 struct ata_port *ap = host_set->ports[i];
4927
4928 ata_scsi_scan_host(ap);
4929 }
4930
4931 dev_set_drvdata(dev, host_set);
4932
4933 VPRINTK("EXIT, returning %u\n", ent->n_ports);
4934 return ent->n_ports; /* success */
4935
4936 err_out:
4937 for (i = 0; i < count; i++) {
4938 ata_host_remove(host_set->ports[i], 1);
4939 scsi_host_put(host_set->ports[i]->host);
4940 }
4941 err_free_ret:
4942 kfree(host_set);
4943 VPRINTK("EXIT, returning 0\n");
4944 return 0;
4945 }
4946
4947 /**
4948 * ata_host_set_remove - PCI layer callback for device removal
4949 * @host_set: ATA host set that was removed
4950 *
4951 * Unregister all objects associated with this host set. Free those
4952 * objects.
4953 *
4954 * LOCKING:
4955 * Inherited from calling layer (may sleep).
4956 */
4957
4958 void ata_host_set_remove(struct ata_host_set *host_set)
4959 {
4960 struct ata_port *ap;
4961 unsigned int i;
4962
4963 for (i = 0; i < host_set->n_ports; i++) {
4964 ap = host_set->ports[i];
4965 scsi_remove_host(ap->host);
4966 }
4967
4968 free_irq(host_set->irq, host_set);
4969
4970 for (i = 0; i < host_set->n_ports; i++) {
4971 ap = host_set->ports[i];
4972
4973 ata_scsi_release(ap->host);
4974
4975 if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
4976 struct ata_ioports *ioaddr = &ap->ioaddr;
4977
4978 if (ioaddr->cmd_addr == 0x1f0)
4979 release_region(0x1f0, 8);
4980 else if (ioaddr->cmd_addr == 0x170)
4981 release_region(0x170, 8);
4982 }
4983
4984 scsi_host_put(ap->host);
4985 }
4986
4987 if (host_set->ops->host_stop)
4988 host_set->ops->host_stop(host_set);
4989
4990 kfree(host_set);
4991 }
4992
4993 /**
4994 * ata_scsi_release - SCSI layer callback hook for host unload
4995 * @host: libata host to be unloaded
4996 *
4997 * Performs all duties necessary to shut down a libata port...
4998 * Kill port kthread, disable port, and release resources.
4999 *
5000 * LOCKING:
5001 * Inherited from SCSI layer.
5002 *
5003 * RETURNS:
5004 * One.
5005 */
5006
5007 int ata_scsi_release(struct Scsi_Host *host)
5008 {
5009 struct ata_port *ap = (struct ata_port *) &host->hostdata[0];
5010 int i;
5011
5012 DPRINTK("ENTER\n");
5013
5014 ap->ops->port_disable(ap);
5015 ata_host_remove(ap, 0);
5016 for (i = 0; i < ATA_MAX_DEVICES; i++)
5017 kfree(ap->device[i].id);
5018
5019 DPRINTK("EXIT\n");
5020 return 1;
5021 }
5022
5023 /**
5024 * ata_std_ports - initialize ioaddr with standard port offsets.
5025 * @ioaddr: IO address structure to be initialized
5026 *
5027 * Utility function which initializes data_addr, error_addr,
5028 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
5029 * device_addr, status_addr, and command_addr to standard offsets
5030 * relative to cmd_addr.
5031 *
5032 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
5033 */
5034
5035 void ata_std_ports(struct ata_ioports *ioaddr)
5036 {
5037 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
5038 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
5039 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
5040 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
5041 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
5042 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
5043 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
5044 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
5045 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
5046 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
5047 }
5048
5049
5050 #ifdef CONFIG_PCI
5051
5052 void ata_pci_host_stop (struct ata_host_set *host_set)
5053 {
5054 struct pci_dev *pdev = to_pci_dev(host_set->dev);
5055
5056 pci_iounmap(pdev, host_set->mmio_base);
5057 }
5058
5059 /**
5060 * ata_pci_remove_one - PCI layer callback for device removal
5061 * @pdev: PCI device that was removed
5062 *
5063 * PCI layer indicates to libata via this hook that
5064 * hot-unplug or module unload event has occurred.
5065 * Handle this by unregistering all objects associated
5066 * with this PCI device. Free those objects. Then finally
5067 * release PCI resources and disable device.
5068 *
5069 * LOCKING:
5070 * Inherited from PCI layer (may sleep).
5071 */
5072
5073 void ata_pci_remove_one (struct pci_dev *pdev)
5074 {
5075 struct device *dev = pci_dev_to_dev(pdev);
5076 struct ata_host_set *host_set = dev_get_drvdata(dev);
5077
5078 ata_host_set_remove(host_set);
5079 pci_release_regions(pdev);
5080 pci_disable_device(pdev);
5081 dev_set_drvdata(dev, NULL);
5082 }
5083
5084 /* move to PCI subsystem */
5085 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
5086 {
5087 unsigned long tmp = 0;
5088
5089 switch (bits->width) {
5090 case 1: {
5091 u8 tmp8 = 0;
5092 pci_read_config_byte(pdev, bits->reg, &tmp8);
5093 tmp = tmp8;
5094 break;
5095 }
5096 case 2: {
5097 u16 tmp16 = 0;
5098 pci_read_config_word(pdev, bits->reg, &tmp16);
5099 tmp = tmp16;
5100 break;
5101 }
5102 case 4: {
5103 u32 tmp32 = 0;
5104 pci_read_config_dword(pdev, bits->reg, &tmp32);
5105 tmp = tmp32;
5106 break;
5107 }
5108
5109 default:
5110 return -EINVAL;
5111 }
5112
5113 tmp &= bits->mask;
5114
5115 return (tmp == bits->val) ? 1 : 0;
5116 }
5117
5118 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state)
5119 {
5120 pci_save_state(pdev);
5121 pci_disable_device(pdev);
5122 pci_set_power_state(pdev, PCI_D3hot);
5123 return 0;
5124 }
5125
5126 int ata_pci_device_resume(struct pci_dev *pdev)
5127 {
5128 pci_set_power_state(pdev, PCI_D0);
5129 pci_restore_state(pdev);
5130 pci_enable_device(pdev);
5131 pci_set_master(pdev);
5132 return 0;
5133 }
5134 #endif /* CONFIG_PCI */
5135
5136
5137 static int __init ata_init(void)
5138 {
5139 ata_wq = create_workqueue("ata");
5140 if (!ata_wq)
5141 return -ENOMEM;
5142
5143 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
5144 return 0;
5145 }
5146
5147 static void __exit ata_exit(void)
5148 {
5149 destroy_workqueue(ata_wq);
5150 }
5151
5152 module_init(ata_init);
5153 module_exit(ata_exit);
5154
5155 static unsigned long ratelimit_time;
5156 static spinlock_t ata_ratelimit_lock = SPIN_LOCK_UNLOCKED;
5157
5158 int ata_ratelimit(void)
5159 {
5160 int rc;
5161 unsigned long flags;
5162
5163 spin_lock_irqsave(&ata_ratelimit_lock, flags);
5164
5165 if (time_after(jiffies, ratelimit_time)) {
5166 rc = 1;
5167 ratelimit_time = jiffies + (HZ/5);
5168 } else
5169 rc = 0;
5170
5171 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
5172
5173 return rc;
5174 }
5175
5176 /*
5177 * libata is essentially a library of internal helper functions for
5178 * low-level ATA host controller drivers. As such, the API/ABI is
5179 * likely to change as new drivers are added and updated.
5180 * Do not depend on ABI/API stability.
5181 */
5182
5183 EXPORT_SYMBOL_GPL(ata_std_bios_param);
5184 EXPORT_SYMBOL_GPL(ata_std_ports);
5185 EXPORT_SYMBOL_GPL(ata_device_add);
5186 EXPORT_SYMBOL_GPL(ata_host_set_remove);
5187 EXPORT_SYMBOL_GPL(ata_sg_init);
5188 EXPORT_SYMBOL_GPL(ata_sg_init_one);
5189 EXPORT_SYMBOL_GPL(__ata_qc_complete);
5190 EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
5191 EXPORT_SYMBOL_GPL(ata_eng_timeout);
5192 EXPORT_SYMBOL_GPL(ata_tf_load);
5193 EXPORT_SYMBOL_GPL(ata_tf_read);
5194 EXPORT_SYMBOL_GPL(ata_noop_dev_select);
5195 EXPORT_SYMBOL_GPL(ata_std_dev_select);
5196 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
5197 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
5198 EXPORT_SYMBOL_GPL(ata_check_status);
5199 EXPORT_SYMBOL_GPL(ata_altstatus);
5200 EXPORT_SYMBOL_GPL(ata_exec_command);
5201 EXPORT_SYMBOL_GPL(ata_port_start);
5202 EXPORT_SYMBOL_GPL(ata_port_stop);
5203 EXPORT_SYMBOL_GPL(ata_host_stop);
5204 EXPORT_SYMBOL_GPL(ata_interrupt);
5205 EXPORT_SYMBOL_GPL(ata_qc_prep);
5206 EXPORT_SYMBOL_GPL(ata_bmdma_setup);
5207 EXPORT_SYMBOL_GPL(ata_bmdma_start);
5208 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
5209 EXPORT_SYMBOL_GPL(ata_bmdma_status);
5210 EXPORT_SYMBOL_GPL(ata_bmdma_stop);
5211 EXPORT_SYMBOL_GPL(ata_port_probe);
5212 EXPORT_SYMBOL_GPL(sata_phy_reset);
5213 EXPORT_SYMBOL_GPL(__sata_phy_reset);
5214 EXPORT_SYMBOL_GPL(ata_bus_reset);
5215 EXPORT_SYMBOL_GPL(ata_std_probeinit);
5216 EXPORT_SYMBOL_GPL(ata_std_softreset);
5217 EXPORT_SYMBOL_GPL(sata_std_hardreset);
5218 EXPORT_SYMBOL_GPL(ata_std_postreset);
5219 EXPORT_SYMBOL_GPL(ata_std_probe_reset);
5220 EXPORT_SYMBOL_GPL(ata_drive_probe_reset);
5221 EXPORT_SYMBOL_GPL(ata_dev_revalidate);
5222 EXPORT_SYMBOL_GPL(ata_port_disable);
5223 EXPORT_SYMBOL_GPL(ata_ratelimit);
5224 EXPORT_SYMBOL_GPL(ata_busy_sleep);
5225 EXPORT_SYMBOL_GPL(ata_port_queue_task);
5226 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
5227 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
5228 EXPORT_SYMBOL_GPL(ata_scsi_timed_out);
5229 EXPORT_SYMBOL_GPL(ata_scsi_error);
5230 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
5231 EXPORT_SYMBOL_GPL(ata_scsi_release);
5232 EXPORT_SYMBOL_GPL(ata_host_intr);
5233 EXPORT_SYMBOL_GPL(ata_dev_classify);
5234 EXPORT_SYMBOL_GPL(ata_id_string);
5235 EXPORT_SYMBOL_GPL(ata_id_c_string);
5236 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
5237 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
5238 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
5239
5240 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
5241 EXPORT_SYMBOL_GPL(ata_timing_compute);
5242 EXPORT_SYMBOL_GPL(ata_timing_merge);
5243
5244 #ifdef CONFIG_PCI
5245 EXPORT_SYMBOL_GPL(pci_test_config_bits);
5246 EXPORT_SYMBOL_GPL(ata_pci_host_stop);
5247 EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
5248 EXPORT_SYMBOL_GPL(ata_pci_init_one);
5249 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
5250 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
5251 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
5252 #endif /* CONFIG_PCI */
5253
5254 EXPORT_SYMBOL_GPL(ata_device_suspend);
5255 EXPORT_SYMBOL_GPL(ata_device_resume);
5256 EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
5257 EXPORT_SYMBOL_GPL(ata_scsi_device_resume);
This page took 0.300722 seconds and 6 git commands to generate.