libata: clear eh_info on reset completion
[deliverable/linux.git] / drivers / ata / libata-eh.c
CommitLineData
ece1d636
TH
1/*
2 * libata-eh.c - libata error handling
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2006 Tejun Heo <htejun@gmail.com>
9 *
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; either version 2, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
24 * USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
33 */
34
ece1d636 35#include <linux/kernel.h>
242f9dcb 36#include <linux/blkdev.h>
2855568b 37#include <linux/pci.h>
ece1d636
TH
38#include <scsi/scsi.h>
39#include <scsi/scsi_host.h>
40#include <scsi/scsi_eh.h>
41#include <scsi/scsi_device.h>
42#include <scsi/scsi_cmnd.h>
c6fd2807 43#include "../scsi/scsi_transport_api.h"
ece1d636
TH
44
45#include <linux/libata.h>
46
47#include "libata.h"
48
7d47e8d4 49enum {
3884f7b0 50 /* speed down verdicts */
7d47e8d4
TH
51 ATA_EH_SPDN_NCQ_OFF = (1 << 0),
52 ATA_EH_SPDN_SPEED_DOWN = (1 << 1),
53 ATA_EH_SPDN_FALLBACK_TO_PIO = (1 << 2),
76326ac1 54 ATA_EH_SPDN_KEEP_ERRORS = (1 << 3),
3884f7b0
TH
55
56 /* error flags */
57 ATA_EFLAG_IS_IO = (1 << 0),
76326ac1 58 ATA_EFLAG_DUBIOUS_XFER = (1 << 1),
3884f7b0
TH
59
60 /* error categories */
61 ATA_ECAT_NONE = 0,
62 ATA_ECAT_ATA_BUS = 1,
63 ATA_ECAT_TOUT_HSM = 2,
64 ATA_ECAT_UNK_DEV = 3,
75f9cafc
TH
65 ATA_ECAT_DUBIOUS_NONE = 4,
66 ATA_ECAT_DUBIOUS_ATA_BUS = 5,
67 ATA_ECAT_DUBIOUS_TOUT_HSM = 6,
68 ATA_ECAT_DUBIOUS_UNK_DEV = 7,
69 ATA_ECAT_NR = 8,
7d47e8d4 70
87fbc5a0
TH
71 ATA_EH_CMD_DFL_TIMEOUT = 5000,
72
0a2c0f56
TH
73 /* always put at least this amount of time between resets */
74 ATA_EH_RESET_COOL_DOWN = 5000,
75
341c2c95
TH
76 /* Waiting in ->prereset can never be reliable. It's
77 * sometimes nice to wait there but it can't be depended upon;
78 * otherwise, we wouldn't be resetting. Just give it enough
79 * time for most drives to spin up.
80 */
81 ATA_EH_PRERESET_TIMEOUT = 10000,
82 ATA_EH_FASTDRAIN_INTERVAL = 3000,
11fc33da
TH
83
84 ATA_EH_UA_TRIES = 5,
c2c7a89c
TH
85
86 /* probe speed down parameters, see ata_eh_schedule_probe() */
87 ATA_EH_PROBE_TRIAL_INTERVAL = 60000, /* 1 min */
88 ATA_EH_PROBE_TRIALS = 2,
31daabda
TH
89};
90
91/* The following table determines how we sequence resets. Each entry
92 * represents timeout for that try. The first try can be soft or
93 * hardreset. All others are hardreset if available. In most cases
94 * the first reset w/ 10sec timeout should succeed. Following entries
95 * are mostly for error handling, hotplug and retarded devices.
96 */
97static const unsigned long ata_eh_reset_timeouts[] = {
341c2c95
TH
98 10000, /* most drives spin up by 10sec */
99 10000, /* > 99% working drives spin up before 20sec */
100 35000, /* give > 30 secs of idleness for retarded devices */
101 5000, /* and sweet one last chance */
d8af0eb6 102 ULONG_MAX, /* > 1 min has elapsed, give up */
31daabda
TH
103};
104
87fbc5a0
TH
105static const unsigned long ata_eh_identify_timeouts[] = {
106 5000, /* covers > 99% of successes and not too boring on failures */
107 10000, /* combined time till here is enough even for media access */
108 30000, /* for true idiots */
109 ULONG_MAX,
110};
111
112static const unsigned long ata_eh_other_timeouts[] = {
113 5000, /* same rationale as identify timeout */
114 10000, /* ditto */
115 /* but no merciful 30sec for other commands, it just isn't worth it */
116 ULONG_MAX,
117};
118
119struct ata_eh_cmd_timeout_ent {
120 const u8 *commands;
121 const unsigned long *timeouts;
122};
123
124/* The following table determines timeouts to use for EH internal
125 * commands. Each table entry is a command class and matches the
126 * commands the entry applies to and the timeout table to use.
127 *
128 * On the retry after a command timed out, the next timeout value from
129 * the table is used. If the table doesn't contain further entries,
130 * the last value is used.
131 *
132 * ehc->cmd_timeout_idx keeps track of which timeout to use per
133 * command class, so if SET_FEATURES times out on the first try, the
134 * next try will use the second timeout value only for that class.
135 */
136#define CMDS(cmds...) (const u8 []){ cmds, 0 }
137static const struct ata_eh_cmd_timeout_ent
138ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = {
139 { .commands = CMDS(ATA_CMD_ID_ATA, ATA_CMD_ID_ATAPI),
140 .timeouts = ata_eh_identify_timeouts, },
141 { .commands = CMDS(ATA_CMD_READ_NATIVE_MAX, ATA_CMD_READ_NATIVE_MAX_EXT),
142 .timeouts = ata_eh_other_timeouts, },
143 { .commands = CMDS(ATA_CMD_SET_MAX, ATA_CMD_SET_MAX_EXT),
144 .timeouts = ata_eh_other_timeouts, },
145 { .commands = CMDS(ATA_CMD_SET_FEATURES),
146 .timeouts = ata_eh_other_timeouts, },
147 { .commands = CMDS(ATA_CMD_INIT_DEV_PARAMS),
148 .timeouts = ata_eh_other_timeouts, },
149};
150#undef CMDS
151
ad9e2762 152static void __ata_port_freeze(struct ata_port *ap);
6ffa01d8 153#ifdef CONFIG_PM
500530f6
TH
154static void ata_eh_handle_port_suspend(struct ata_port *ap);
155static void ata_eh_handle_port_resume(struct ata_port *ap);
6ffa01d8
TH
156#else /* CONFIG_PM */
157static void ata_eh_handle_port_suspend(struct ata_port *ap)
158{ }
159
160static void ata_eh_handle_port_resume(struct ata_port *ap)
161{ }
6ffa01d8 162#endif /* CONFIG_PM */
ad9e2762 163
b64bbc39
TH
164static void __ata_ehi_pushv_desc(struct ata_eh_info *ehi, const char *fmt,
165 va_list args)
166{
167 ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len,
168 ATA_EH_DESC_LEN - ehi->desc_len,
169 fmt, args);
170}
171
172/**
173 * __ata_ehi_push_desc - push error description without adding separator
174 * @ehi: target EHI
175 * @fmt: printf format string
176 *
177 * Format string according to @fmt and append it to @ehi->desc.
178 *
179 * LOCKING:
180 * spin_lock_irqsave(host lock)
181 */
182void __ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
183{
184 va_list args;
185
186 va_start(args, fmt);
187 __ata_ehi_pushv_desc(ehi, fmt, args);
188 va_end(args);
189}
190
191/**
192 * ata_ehi_push_desc - push error description with separator
193 * @ehi: target EHI
194 * @fmt: printf format string
195 *
196 * Format string according to @fmt and append it to @ehi->desc.
197 * If @ehi->desc is not empty, ", " is added in-between.
198 *
199 * LOCKING:
200 * spin_lock_irqsave(host lock)
201 */
202void ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
203{
204 va_list args;
205
206 if (ehi->desc_len)
207 __ata_ehi_push_desc(ehi, ", ");
208
209 va_start(args, fmt);
210 __ata_ehi_pushv_desc(ehi, fmt, args);
211 va_end(args);
212}
213
214/**
215 * ata_ehi_clear_desc - clean error description
216 * @ehi: target EHI
217 *
218 * Clear @ehi->desc.
219 *
220 * LOCKING:
221 * spin_lock_irqsave(host lock)
222 */
223void ata_ehi_clear_desc(struct ata_eh_info *ehi)
224{
225 ehi->desc[0] = '\0';
226 ehi->desc_len = 0;
227}
228
cbcdd875
TH
229/**
230 * ata_port_desc - append port description
231 * @ap: target ATA port
232 * @fmt: printf format string
233 *
234 * Format string according to @fmt and append it to port
235 * description. If port description is not empty, " " is added
236 * in-between. This function is to be used while initializing
237 * ata_host. The description is printed on host registration.
238 *
239 * LOCKING:
240 * None.
241 */
242void ata_port_desc(struct ata_port *ap, const char *fmt, ...)
243{
244 va_list args;
245
246 WARN_ON(!(ap->pflags & ATA_PFLAG_INITIALIZING));
247
248 if (ap->link.eh_info.desc_len)
249 __ata_ehi_push_desc(&ap->link.eh_info, " ");
250
251 va_start(args, fmt);
252 __ata_ehi_pushv_desc(&ap->link.eh_info, fmt, args);
253 va_end(args);
254}
255
256#ifdef CONFIG_PCI
257
258/**
259 * ata_port_pbar_desc - append PCI BAR description
260 * @ap: target ATA port
261 * @bar: target PCI BAR
262 * @offset: offset into PCI BAR
263 * @name: name of the area
264 *
265 * If @offset is negative, this function formats a string which
266 * contains the name, address, size and type of the BAR and
267 * appends it to the port description. If @offset is zero or
268 * positive, only name and offsetted address is appended.
269 *
270 * LOCKING:
271 * None.
272 */
273void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset,
274 const char *name)
275{
276 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
277 char *type = "";
278 unsigned long long start, len;
279
280 if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
281 type = "m";
282 else if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
283 type = "i";
284
285 start = (unsigned long long)pci_resource_start(pdev, bar);
286 len = (unsigned long long)pci_resource_len(pdev, bar);
287
288 if (offset < 0)
289 ata_port_desc(ap, "%s %s%llu@0x%llx", name, type, len, start);
290 else
e6a73ab1
AM
291 ata_port_desc(ap, "%s 0x%llx", name,
292 start + (unsigned long long)offset);
cbcdd875
TH
293}
294
295#endif /* CONFIG_PCI */
296
87fbc5a0
TH
297static int ata_lookup_timeout_table(u8 cmd)
298{
299 int i;
300
301 for (i = 0; i < ATA_EH_CMD_TIMEOUT_TABLE_SIZE; i++) {
302 const u8 *cur;
303
304 for (cur = ata_eh_cmd_timeout_table[i].commands; *cur; cur++)
305 if (*cur == cmd)
306 return i;
307 }
308
309 return -1;
310}
311
312/**
313 * ata_internal_cmd_timeout - determine timeout for an internal command
314 * @dev: target device
315 * @cmd: internal command to be issued
316 *
317 * Determine timeout for internal command @cmd for @dev.
318 *
319 * LOCKING:
320 * EH context.
321 *
322 * RETURNS:
323 * Determined timeout.
324 */
325unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd)
326{
327 struct ata_eh_context *ehc = &dev->link->eh_context;
328 int ent = ata_lookup_timeout_table(cmd);
329 int idx;
330
331 if (ent < 0)
332 return ATA_EH_CMD_DFL_TIMEOUT;
333
334 idx = ehc->cmd_timeout_idx[dev->devno][ent];
335 return ata_eh_cmd_timeout_table[ent].timeouts[idx];
336}
337
338/**
339 * ata_internal_cmd_timed_out - notification for internal command timeout
340 * @dev: target device
341 * @cmd: internal command which timed out
342 *
343 * Notify EH that internal command @cmd for @dev timed out. This
344 * function should be called only for commands whose timeouts are
345 * determined using ata_internal_cmd_timeout().
346 *
347 * LOCKING:
348 * EH context.
349 */
350void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd)
351{
352 struct ata_eh_context *ehc = &dev->link->eh_context;
353 int ent = ata_lookup_timeout_table(cmd);
354 int idx;
355
356 if (ent < 0)
357 return;
358
359 idx = ehc->cmd_timeout_idx[dev->devno][ent];
360 if (ata_eh_cmd_timeout_table[ent].timeouts[idx + 1] != ULONG_MAX)
361 ehc->cmd_timeout_idx[dev->devno][ent]++;
362}
363
3884f7b0 364static void ata_ering_record(struct ata_ering *ering, unsigned int eflags,
0c247c55
TH
365 unsigned int err_mask)
366{
367 struct ata_ering_entry *ent;
368
369 WARN_ON(!err_mask);
370
371 ering->cursor++;
372 ering->cursor %= ATA_ERING_SIZE;
373
374 ent = &ering->ring[ering->cursor];
3884f7b0 375 ent->eflags = eflags;
0c247c55
TH
376 ent->err_mask = err_mask;
377 ent->timestamp = get_jiffies_64();
378}
379
76326ac1
TH
380static struct ata_ering_entry *ata_ering_top(struct ata_ering *ering)
381{
382 struct ata_ering_entry *ent = &ering->ring[ering->cursor];
383
384 if (ent->err_mask)
385 return ent;
386 return NULL;
387}
388
7d47e8d4 389static void ata_ering_clear(struct ata_ering *ering)
0c247c55 390{
7d47e8d4 391 memset(ering, 0, sizeof(*ering));
0c247c55
TH
392}
393
394static int ata_ering_map(struct ata_ering *ering,
395 int (*map_fn)(struct ata_ering_entry *, void *),
396 void *arg)
397{
398 int idx, rc = 0;
399 struct ata_ering_entry *ent;
400
401 idx = ering->cursor;
402 do {
403 ent = &ering->ring[idx];
404 if (!ent->err_mask)
405 break;
406 rc = map_fn(ent, arg);
407 if (rc)
408 break;
409 idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE;
410 } while (idx != ering->cursor);
411
412 return rc;
413}
414
64f65ca6
TH
415static unsigned int ata_eh_dev_action(struct ata_device *dev)
416{
9af5c9c9 417 struct ata_eh_context *ehc = &dev->link->eh_context;
64f65ca6
TH
418
419 return ehc->i.action | ehc->i.dev_action[dev->devno];
420}
421
f58229f8 422static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev,
af181c2d
TH
423 struct ata_eh_info *ehi, unsigned int action)
424{
f58229f8 425 struct ata_device *tdev;
af181c2d
TH
426
427 if (!dev) {
428 ehi->action &= ~action;
1eca4365 429 ata_for_each_dev(tdev, link, ALL)
f58229f8 430 ehi->dev_action[tdev->devno] &= ~action;
af181c2d
TH
431 } else {
432 /* doesn't make sense for port-wide EH actions */
433 WARN_ON(!(action & ATA_EH_PERDEV_MASK));
434
435 /* break ehi->action into ehi->dev_action */
436 if (ehi->action & action) {
1eca4365 437 ata_for_each_dev(tdev, link, ALL)
f58229f8
TH
438 ehi->dev_action[tdev->devno] |=
439 ehi->action & action;
af181c2d
TH
440 ehi->action &= ~action;
441 }
442
443 /* turn off the specified per-dev action */
444 ehi->dev_action[dev->devno] &= ~action;
445 }
446}
447
ece1d636
TH
448/**
449 * ata_scsi_timed_out - SCSI layer time out callback
450 * @cmd: timed out SCSI command
451 *
452 * Handles SCSI layer timeout. We race with normal completion of
453 * the qc for @cmd. If the qc is already gone, we lose and let
454 * the scsi command finish (EH_HANDLED). Otherwise, the qc has
455 * timed out and EH should be invoked. Prevent ata_qc_complete()
456 * from finishing it by setting EH_SCHEDULED and return
457 * EH_NOT_HANDLED.
458 *
ad9e2762
TH
459 * TODO: kill this function once old EH is gone.
460 *
ece1d636
TH
461 * LOCKING:
462 * Called from timer context
463 *
464 * RETURNS:
465 * EH_HANDLED or EH_NOT_HANDLED
466 */
242f9dcb 467enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
ece1d636
TH
468{
469 struct Scsi_Host *host = cmd->device->host;
35bb94b1 470 struct ata_port *ap = ata_shost_to_port(host);
ece1d636
TH
471 unsigned long flags;
472 struct ata_queued_cmd *qc;
242f9dcb 473 enum blk_eh_timer_return ret;
ece1d636
TH
474
475 DPRINTK("ENTER\n");
476
ad9e2762 477 if (ap->ops->error_handler) {
242f9dcb 478 ret = BLK_EH_NOT_HANDLED;
ad9e2762
TH
479 goto out;
480 }
481
242f9dcb 482 ret = BLK_EH_HANDLED;
ba6a1308 483 spin_lock_irqsave(ap->lock, flags);
9af5c9c9 484 qc = ata_qc_from_tag(ap, ap->link.active_tag);
ece1d636
TH
485 if (qc) {
486 WARN_ON(qc->scsicmd != cmd);
487 qc->flags |= ATA_QCFLAG_EH_SCHEDULED;
488 qc->err_mask |= AC_ERR_TIMEOUT;
242f9dcb 489 ret = BLK_EH_NOT_HANDLED;
ece1d636 490 }
ba6a1308 491 spin_unlock_irqrestore(ap->lock, flags);
ece1d636 492
ad9e2762 493 out:
ece1d636
TH
494 DPRINTK("EXIT, ret=%d\n", ret);
495 return ret;
496}
497
ece180d1
TH
498static void ata_eh_unload(struct ata_port *ap)
499{
500 struct ata_link *link;
501 struct ata_device *dev;
502 unsigned long flags;
503
504 /* Restore SControl IPM and SPD for the next driver and
505 * disable attached devices.
506 */
507 ata_for_each_link(link, ap, PMP_FIRST) {
508 sata_scr_write(link, SCR_CONTROL, link->saved_scontrol & 0xff0);
509 ata_for_each_dev(dev, link, ALL)
510 ata_dev_disable(dev);
511 }
512
513 /* freeze and set UNLOADED */
514 spin_lock_irqsave(ap->lock, flags);
515
516 ata_port_freeze(ap); /* won't be thawed */
517 ap->pflags &= ~ATA_PFLAG_EH_PENDING; /* clear pending from freeze */
518 ap->pflags |= ATA_PFLAG_UNLOADED;
519
520 spin_unlock_irqrestore(ap->lock, flags);
521}
522
ece1d636
TH
523/**
524 * ata_scsi_error - SCSI layer error handler callback
525 * @host: SCSI host on which error occurred
526 *
527 * Handles SCSI-layer-thrown error events.
528 *
529 * LOCKING:
530 * Inherited from SCSI layer (none, can sleep)
531 *
532 * RETURNS:
533 * Zero.
534 */
381544bb 535void ata_scsi_error(struct Scsi_Host *host)
ece1d636 536{
35bb94b1 537 struct ata_port *ap = ata_shost_to_port(host);
a1e10f7e 538 int i;
ad9e2762 539 unsigned long flags;
ece1d636
TH
540
541 DPRINTK("ENTER\n");
542
ad9e2762 543 /* synchronize with port task */
ece1d636
TH
544 ata_port_flush_task(ap);
545
cca3974e 546 /* synchronize with host lock and sort out timeouts */
ad9e2762
TH
547
548 /* For new EH, all qcs are finished in one of three ways -
549 * normal completion, error completion, and SCSI timeout.
c96f1732 550 * Both completions can race against SCSI timeout. When normal
ad9e2762
TH
551 * completion wins, the qc never reaches EH. When error
552 * completion wins, the qc has ATA_QCFLAG_FAILED set.
553 *
554 * When SCSI timeout wins, things are a bit more complex.
555 * Normal or error completion can occur after the timeout but
556 * before this point. In such cases, both types of
557 * completions are honored. A scmd is determined to have
558 * timed out iff its associated qc is active and not failed.
559 */
560 if (ap->ops->error_handler) {
561 struct scsi_cmnd *scmd, *tmp;
562 int nr_timedout = 0;
563
e30349d2 564 spin_lock_irqsave(ap->lock, flags);
c96f1732
AC
565
566 /* This must occur under the ap->lock as we don't want
567 a polled recovery to race the real interrupt handler
568
569 The lost_interrupt handler checks for any completed but
570 non-notified command and completes much like an IRQ handler.
571
572 We then fall into the error recovery code which will treat
573 this as if normal completion won the race */
574
575 if (ap->ops->lost_interrupt)
576 ap->ops->lost_interrupt(ap);
577
ad9e2762
TH
578 list_for_each_entry_safe(scmd, tmp, &host->eh_cmd_q, eh_entry) {
579 struct ata_queued_cmd *qc;
580
581 for (i = 0; i < ATA_MAX_QUEUE; i++) {
582 qc = __ata_qc_from_tag(ap, i);
583 if (qc->flags & ATA_QCFLAG_ACTIVE &&
584 qc->scsicmd == scmd)
585 break;
586 }
587
588 if (i < ATA_MAX_QUEUE) {
589 /* the scmd has an associated qc */
590 if (!(qc->flags & ATA_QCFLAG_FAILED)) {
591 /* which hasn't failed yet, timeout */
592 qc->err_mask |= AC_ERR_TIMEOUT;
593 qc->flags |= ATA_QCFLAG_FAILED;
594 nr_timedout++;
595 }
596 } else {
597 /* Normal completion occurred after
598 * SCSI timeout but before this point.
599 * Successfully complete it.
600 */
601 scmd->retries = scmd->allowed;
602 scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
603 }
604 }
605
606 /* If we have timed out qcs. They belong to EH from
607 * this point but the state of the controller is
608 * unknown. Freeze the port to make sure the IRQ
609 * handler doesn't diddle with those qcs. This must
610 * be done atomically w.r.t. setting QCFLAG_FAILED.
611 */
612 if (nr_timedout)
613 __ata_port_freeze(ap);
614
e30349d2 615 spin_unlock_irqrestore(ap->lock, flags);
a1e10f7e
TH
616
617 /* initialize eh_tries */
618 ap->eh_tries = ATA_EH_MAX_TRIES;
ad9e2762 619 } else
e30349d2 620 spin_unlock_wait(ap->lock);
c96f1732
AC
621
622 /* If we timed raced normal completion and there is nothing to
623 recover nr_timedout == 0 why exactly are we doing error recovery ? */
ad9e2762
TH
624
625 repeat:
626 /* invoke error handler */
627 if (ap->ops->error_handler) {
cf1b86c8
TH
628 struct ata_link *link;
629
5ddf24c5
TH
630 /* kill fast drain timer */
631 del_timer_sync(&ap->fastdrain_timer);
632
500530f6
TH
633 /* process port resume request */
634 ata_eh_handle_port_resume(ap);
635
f3e81b19 636 /* fetch & clear EH info */
e30349d2 637 spin_lock_irqsave(ap->lock, flags);
f3e81b19 638
1eca4365 639 ata_for_each_link(link, ap, HOST_FIRST) {
00115e0f
TH
640 struct ata_eh_context *ehc = &link->eh_context;
641 struct ata_device *dev;
642
cf1b86c8
TH
643 memset(&link->eh_context, 0, sizeof(link->eh_context));
644 link->eh_context.i = link->eh_info;
645 memset(&link->eh_info, 0, sizeof(link->eh_info));
00115e0f 646
1eca4365 647 ata_for_each_dev(dev, link, ENABLED) {
00115e0f
TH
648 int devno = dev->devno;
649
650 ehc->saved_xfer_mode[devno] = dev->xfer_mode;
651 if (ata_ncq_enabled(dev))
652 ehc->saved_ncq_enabled |= 1 << devno;
653 }
cf1b86c8 654 }
f3e81b19 655
b51e9e5d
TH
656 ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS;
657 ap->pflags &= ~ATA_PFLAG_EH_PENDING;
da917d69 658 ap->excl_link = NULL; /* don't maintain exclusion over EH */
f3e81b19 659
e30349d2 660 spin_unlock_irqrestore(ap->lock, flags);
ad9e2762 661
500530f6
TH
662 /* invoke EH, skip if unloading or suspended */
663 if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED)))
720ba126 664 ap->ops->error_handler(ap);
ece180d1
TH
665 else {
666 /* if unloading, commence suicide */
667 if ((ap->pflags & ATA_PFLAG_UNLOADING) &&
668 !(ap->pflags & ATA_PFLAG_UNLOADED))
669 ata_eh_unload(ap);
720ba126 670 ata_eh_finish(ap);
ece180d1 671 }
ad9e2762 672
500530f6
TH
673 /* process port suspend request */
674 ata_eh_handle_port_suspend(ap);
675
ad9e2762
TH
676 /* Exception might have happend after ->error_handler
677 * recovered the port but before this point. Repeat
678 * EH in such case.
679 */
e30349d2 680 spin_lock_irqsave(ap->lock, flags);
ad9e2762 681
b51e9e5d 682 if (ap->pflags & ATA_PFLAG_EH_PENDING) {
a1e10f7e 683 if (--ap->eh_tries) {
e30349d2 684 spin_unlock_irqrestore(ap->lock, flags);
ad9e2762
TH
685 goto repeat;
686 }
687 ata_port_printk(ap, KERN_ERR, "EH pending after %d "
a1e10f7e 688 "tries, giving up\n", ATA_EH_MAX_TRIES);
914616a3 689 ap->pflags &= ~ATA_PFLAG_EH_PENDING;
ad9e2762
TH
690 }
691
f3e81b19 692 /* this run is complete, make sure EH info is clear */
1eca4365 693 ata_for_each_link(link, ap, HOST_FIRST)
cf1b86c8 694 memset(&link->eh_info, 0, sizeof(link->eh_info));
f3e81b19 695
e30349d2 696 /* Clear host_eh_scheduled while holding ap->lock such
ad9e2762
TH
697 * that if exception occurs after this point but
698 * before EH completion, SCSI midlayer will
699 * re-initiate EH.
700 */
701 host->host_eh_scheduled = 0;
702
e30349d2 703 spin_unlock_irqrestore(ap->lock, flags);
ad9e2762 704 } else {
9af5c9c9 705 WARN_ON(ata_qc_from_tag(ap, ap->link.active_tag) == NULL);
ad9e2762
TH
706 ap->ops->eng_timeout(ap);
707 }
ece1d636 708
ad9e2762 709 /* finish or retry handled scmd's and clean up */
ece1d636
TH
710 WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q));
711
712 scsi_eh_flush_done_q(&ap->eh_done_q);
713
ad9e2762 714 /* clean up */
e30349d2 715 spin_lock_irqsave(ap->lock, flags);
ad9e2762 716
1cdaf534 717 if (ap->pflags & ATA_PFLAG_LOADING)
b51e9e5d 718 ap->pflags &= ~ATA_PFLAG_LOADING;
1cdaf534 719 else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG)
52bad64d 720 queue_delayed_work(ata_aux_wq, &ap->hotplug_task, 0);
1cdaf534
TH
721
722 if (ap->pflags & ATA_PFLAG_RECOVERED)
723 ata_port_printk(ap, KERN_INFO, "EH complete\n");
580b2102 724
b51e9e5d 725 ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED);
ad9e2762 726
c6cf9e99 727 /* tell wait_eh that we're done */
b51e9e5d 728 ap->pflags &= ~ATA_PFLAG_EH_IN_PROGRESS;
c6cf9e99
TH
729 wake_up_all(&ap->eh_wait_q);
730
e30349d2 731 spin_unlock_irqrestore(ap->lock, flags);
ad9e2762 732
ece1d636 733 DPRINTK("EXIT\n");
ece1d636
TH
734}
735
c6cf9e99
TH
736/**
737 * ata_port_wait_eh - Wait for the currently pending EH to complete
738 * @ap: Port to wait EH for
739 *
740 * Wait until the currently pending EH is complete.
741 *
742 * LOCKING:
743 * Kernel thread context (may sleep).
744 */
745void ata_port_wait_eh(struct ata_port *ap)
746{
747 unsigned long flags;
748 DEFINE_WAIT(wait);
749
750 retry:
ba6a1308 751 spin_lock_irqsave(ap->lock, flags);
c6cf9e99 752
b51e9e5d 753 while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) {
c6cf9e99 754 prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE);
ba6a1308 755 spin_unlock_irqrestore(ap->lock, flags);
c6cf9e99 756 schedule();
ba6a1308 757 spin_lock_irqsave(ap->lock, flags);
c6cf9e99 758 }
0a1b622e 759 finish_wait(&ap->eh_wait_q, &wait);
c6cf9e99 760
ba6a1308 761 spin_unlock_irqrestore(ap->lock, flags);
c6cf9e99
TH
762
763 /* make sure SCSI EH is complete */
cca3974e 764 if (scsi_host_in_recovery(ap->scsi_host)) {
c6cf9e99
TH
765 msleep(10);
766 goto retry;
767 }
768}
769
5ddf24c5
TH
770static int ata_eh_nr_in_flight(struct ata_port *ap)
771{
772 unsigned int tag;
773 int nr = 0;
774
775 /* count only non-internal commands */
776 for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++)
777 if (ata_qc_from_tag(ap, tag))
778 nr++;
779
780 return nr;
781}
782
783void ata_eh_fastdrain_timerfn(unsigned long arg)
784{
785 struct ata_port *ap = (void *)arg;
786 unsigned long flags;
787 int cnt;
788
789 spin_lock_irqsave(ap->lock, flags);
790
791 cnt = ata_eh_nr_in_flight(ap);
792
793 /* are we done? */
794 if (!cnt)
795 goto out_unlock;
796
797 if (cnt == ap->fastdrain_cnt) {
798 unsigned int tag;
799
800 /* No progress during the last interval, tag all
801 * in-flight qcs as timed out and freeze the port.
802 */
803 for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) {
804 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
805 if (qc)
806 qc->err_mask |= AC_ERR_TIMEOUT;
807 }
808
809 ata_port_freeze(ap);
810 } else {
811 /* some qcs have finished, give it another chance */
812 ap->fastdrain_cnt = cnt;
813 ap->fastdrain_timer.expires =
341c2c95 814 ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL);
5ddf24c5
TH
815 add_timer(&ap->fastdrain_timer);
816 }
817
818 out_unlock:
819 spin_unlock_irqrestore(ap->lock, flags);
820}
821
822/**
823 * ata_eh_set_pending - set ATA_PFLAG_EH_PENDING and activate fast drain
824 * @ap: target ATA port
825 * @fastdrain: activate fast drain
826 *
827 * Set ATA_PFLAG_EH_PENDING and activate fast drain if @fastdrain
828 * is non-zero and EH wasn't pending before. Fast drain ensures
829 * that EH kicks in in timely manner.
830 *
831 * LOCKING:
832 * spin_lock_irqsave(host lock)
833 */
834static void ata_eh_set_pending(struct ata_port *ap, int fastdrain)
835{
836 int cnt;
837
838 /* already scheduled? */
839 if (ap->pflags & ATA_PFLAG_EH_PENDING)
840 return;
841
842 ap->pflags |= ATA_PFLAG_EH_PENDING;
843
844 if (!fastdrain)
845 return;
846
847 /* do we have in-flight qcs? */
848 cnt = ata_eh_nr_in_flight(ap);
849 if (!cnt)
850 return;
851
852 /* activate fast drain */
853 ap->fastdrain_cnt = cnt;
341c2c95
TH
854 ap->fastdrain_timer.expires =
855 ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL);
5ddf24c5
TH
856 add_timer(&ap->fastdrain_timer);
857}
858
f686bcb8
TH
859/**
860 * ata_qc_schedule_eh - schedule qc for error handling
861 * @qc: command to schedule error handling for
862 *
863 * Schedule error handling for @qc. EH will kick in as soon as
864 * other commands are drained.
865 *
866 * LOCKING:
cca3974e 867 * spin_lock_irqsave(host lock)
f686bcb8
TH
868 */
869void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
870{
871 struct ata_port *ap = qc->ap;
872
873 WARN_ON(!ap->ops->error_handler);
874
875 qc->flags |= ATA_QCFLAG_FAILED;
5ddf24c5 876 ata_eh_set_pending(ap, 1);
f686bcb8
TH
877
878 /* The following will fail if timeout has already expired.
879 * ata_scsi_error() takes care of such scmds on EH entry.
880 * Note that ATA_QCFLAG_FAILED is unconditionally set after
881 * this function completes.
882 */
242f9dcb 883 blk_abort_request(qc->scsicmd->request);
f686bcb8
TH
884}
885
7b70fc03
TH
886/**
887 * ata_port_schedule_eh - schedule error handling without a qc
888 * @ap: ATA port to schedule EH for
889 *
890 * Schedule error handling for @ap. EH will kick in as soon as
891 * all commands are drained.
892 *
893 * LOCKING:
cca3974e 894 * spin_lock_irqsave(host lock)
7b70fc03
TH
895 */
896void ata_port_schedule_eh(struct ata_port *ap)
897{
898 WARN_ON(!ap->ops->error_handler);
899
f4d6d004
TH
900 if (ap->pflags & ATA_PFLAG_INITIALIZING)
901 return;
902
5ddf24c5 903 ata_eh_set_pending(ap, 1);
cca3974e 904 scsi_schedule_eh(ap->scsi_host);
7b70fc03
TH
905
906 DPRINTK("port EH scheduled\n");
907}
908
dbd82616 909static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link)
7b70fc03
TH
910{
911 int tag, nr_aborted = 0;
912
913 WARN_ON(!ap->ops->error_handler);
914
5ddf24c5
TH
915 /* we're gonna abort all commands, no need for fast drain */
916 ata_eh_set_pending(ap, 0);
917
7b70fc03
TH
918 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
919 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
920
dbd82616 921 if (qc && (!link || qc->dev->link == link)) {
7b70fc03
TH
922 qc->flags |= ATA_QCFLAG_FAILED;
923 ata_qc_complete(qc);
924 nr_aborted++;
925 }
926 }
927
928 if (!nr_aborted)
929 ata_port_schedule_eh(ap);
930
931 return nr_aborted;
932}
933
dbd82616
TH
934/**
935 * ata_link_abort - abort all qc's on the link
936 * @link: ATA link to abort qc's for
937 *
938 * Abort all active qc's active on @link and schedule EH.
939 *
940 * LOCKING:
941 * spin_lock_irqsave(host lock)
942 *
943 * RETURNS:
944 * Number of aborted qc's.
945 */
946int ata_link_abort(struct ata_link *link)
947{
948 return ata_do_link_abort(link->ap, link);
949}
950
951/**
952 * ata_port_abort - abort all qc's on the port
953 * @ap: ATA port to abort qc's for
954 *
955 * Abort all active qc's of @ap and schedule EH.
956 *
957 * LOCKING:
958 * spin_lock_irqsave(host_set lock)
959 *
960 * RETURNS:
961 * Number of aborted qc's.
962 */
963int ata_port_abort(struct ata_port *ap)
964{
965 return ata_do_link_abort(ap, NULL);
966}
967
e3180499
TH
968/**
969 * __ata_port_freeze - freeze port
970 * @ap: ATA port to freeze
971 *
972 * This function is called when HSM violation or some other
973 * condition disrupts normal operation of the port. Frozen port
974 * is not allowed to perform any operation until the port is
975 * thawed, which usually follows a successful reset.
976 *
977 * ap->ops->freeze() callback can be used for freezing the port
978 * hardware-wise (e.g. mask interrupt and stop DMA engine). If a
979 * port cannot be frozen hardware-wise, the interrupt handler
980 * must ack and clear interrupts unconditionally while the port
981 * is frozen.
982 *
983 * LOCKING:
cca3974e 984 * spin_lock_irqsave(host lock)
e3180499
TH
985 */
986static void __ata_port_freeze(struct ata_port *ap)
987{
988 WARN_ON(!ap->ops->error_handler);
989
990 if (ap->ops->freeze)
991 ap->ops->freeze(ap);
992
b51e9e5d 993 ap->pflags |= ATA_PFLAG_FROZEN;
e3180499 994
44877b4e 995 DPRINTK("ata%u port frozen\n", ap->print_id);
e3180499
TH
996}
997
998/**
999 * ata_port_freeze - abort & freeze port
1000 * @ap: ATA port to freeze
1001 *
54c38444
JG
1002 * Abort and freeze @ap. The freeze operation must be called
1003 * first, because some hardware requires special operations
1004 * before the taskfile registers are accessible.
e3180499
TH
1005 *
1006 * LOCKING:
cca3974e 1007 * spin_lock_irqsave(host lock)
e3180499
TH
1008 *
1009 * RETURNS:
1010 * Number of aborted commands.
1011 */
1012int ata_port_freeze(struct ata_port *ap)
1013{
1014 int nr_aborted;
1015
1016 WARN_ON(!ap->ops->error_handler);
1017
e3180499 1018 __ata_port_freeze(ap);
54c38444 1019 nr_aborted = ata_port_abort(ap);
e3180499
TH
1020
1021 return nr_aborted;
1022}
1023
7d77b247
TH
1024/**
1025 * sata_async_notification - SATA async notification handler
1026 * @ap: ATA port where async notification is received
1027 *
1028 * Handler to be called when async notification via SDB FIS is
1029 * received. This function schedules EH if necessary.
1030 *
1031 * LOCKING:
1032 * spin_lock_irqsave(host lock)
1033 *
1034 * RETURNS:
1035 * 1 if EH is scheduled, 0 otherwise.
1036 */
1037int sata_async_notification(struct ata_port *ap)
1038{
1039 u32 sntf;
1040 int rc;
1041
1042 if (!(ap->flags & ATA_FLAG_AN))
1043 return 0;
1044
1045 rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf);
1046 if (rc == 0)
1047 sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf);
1048
071f44b1 1049 if (!sata_pmp_attached(ap) || rc) {
7d77b247 1050 /* PMP is not attached or SNTF is not available */
071f44b1 1051 if (!sata_pmp_attached(ap)) {
7d77b247
TH
1052 /* PMP is not attached. Check whether ATAPI
1053 * AN is configured. If so, notify media
1054 * change.
1055 */
1056 struct ata_device *dev = ap->link.device;
1057
1058 if ((dev->class == ATA_DEV_ATAPI) &&
1059 (dev->flags & ATA_DFLAG_AN))
1060 ata_scsi_media_change_notify(dev);
1061 return 0;
1062 } else {
1063 /* PMP is attached but SNTF is not available.
1064 * ATAPI async media change notification is
1065 * not used. The PMP must be reporting PHY
1066 * status change, schedule EH.
1067 */
1068 ata_port_schedule_eh(ap);
1069 return 1;
1070 }
1071 } else {
1072 /* PMP is attached and SNTF is available */
1073 struct ata_link *link;
1074
1075 /* check and notify ATAPI AN */
1eca4365 1076 ata_for_each_link(link, ap, EDGE) {
7d77b247
TH
1077 if (!(sntf & (1 << link->pmp)))
1078 continue;
1079
1080 if ((link->device->class == ATA_DEV_ATAPI) &&
1081 (link->device->flags & ATA_DFLAG_AN))
1082 ata_scsi_media_change_notify(link->device);
1083 }
1084
1085 /* If PMP is reporting that PHY status of some
1086 * downstream ports has changed, schedule EH.
1087 */
1088 if (sntf & (1 << SATA_PMP_CTRL_PORT)) {
1089 ata_port_schedule_eh(ap);
1090 return 1;
1091 }
1092
1093 return 0;
1094 }
1095}
1096
e3180499
TH
1097/**
1098 * ata_eh_freeze_port - EH helper to freeze port
1099 * @ap: ATA port to freeze
1100 *
1101 * Freeze @ap.
1102 *
1103 * LOCKING:
1104 * None.
1105 */
1106void ata_eh_freeze_port(struct ata_port *ap)
1107{
1108 unsigned long flags;
1109
1110 if (!ap->ops->error_handler)
1111 return;
1112
ba6a1308 1113 spin_lock_irqsave(ap->lock, flags);
e3180499 1114 __ata_port_freeze(ap);
ba6a1308 1115 spin_unlock_irqrestore(ap->lock, flags);
e3180499
TH
1116}
1117
1118/**
1119 * ata_port_thaw_port - EH helper to thaw port
1120 * @ap: ATA port to thaw
1121 *
1122 * Thaw frozen port @ap.
1123 *
1124 * LOCKING:
1125 * None.
1126 */
1127void ata_eh_thaw_port(struct ata_port *ap)
1128{
1129 unsigned long flags;
1130
1131 if (!ap->ops->error_handler)
1132 return;
1133
ba6a1308 1134 spin_lock_irqsave(ap->lock, flags);
e3180499 1135
b51e9e5d 1136 ap->pflags &= ~ATA_PFLAG_FROZEN;
e3180499
TH
1137
1138 if (ap->ops->thaw)
1139 ap->ops->thaw(ap);
1140
ba6a1308 1141 spin_unlock_irqrestore(ap->lock, flags);
e3180499 1142
44877b4e 1143 DPRINTK("ata%u port thawed\n", ap->print_id);
e3180499
TH
1144}
1145
ece1d636
TH
1146static void ata_eh_scsidone(struct scsi_cmnd *scmd)
1147{
1148 /* nada */
1149}
1150
1151static void __ata_eh_qc_complete(struct ata_queued_cmd *qc)
1152{
1153 struct ata_port *ap = qc->ap;
1154 struct scsi_cmnd *scmd = qc->scsicmd;
1155 unsigned long flags;
1156
ba6a1308 1157 spin_lock_irqsave(ap->lock, flags);
ece1d636
TH
1158 qc->scsidone = ata_eh_scsidone;
1159 __ata_qc_complete(qc);
1160 WARN_ON(ata_tag_valid(qc->tag));
ba6a1308 1161 spin_unlock_irqrestore(ap->lock, flags);
ece1d636
TH
1162
1163 scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
1164}
1165
1166/**
1167 * ata_eh_qc_complete - Complete an active ATA command from EH
1168 * @qc: Command to complete
1169 *
1170 * Indicate to the mid and upper layers that an ATA command has
1171 * completed. To be used from EH.
1172 */
1173void ata_eh_qc_complete(struct ata_queued_cmd *qc)
1174{
1175 struct scsi_cmnd *scmd = qc->scsicmd;
1176 scmd->retries = scmd->allowed;
1177 __ata_eh_qc_complete(qc);
1178}
1179
1180/**
1181 * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH
1182 * @qc: Command to retry
1183 *
1184 * Indicate to the mid and upper layers that an ATA command
1185 * should be retried. To be used from EH.
1186 *
1187 * SCSI midlayer limits the number of retries to scmd->allowed.
1188 * scmd->retries is decremented for commands which get retried
1189 * due to unrelated failures (qc->err_mask is zero).
1190 */
1191void ata_eh_qc_retry(struct ata_queued_cmd *qc)
1192{
1193 struct scsi_cmnd *scmd = qc->scsicmd;
1194 if (!qc->err_mask && scmd->retries)
1195 scmd->retries--;
1196 __ata_eh_qc_complete(qc);
1197}
022bdb07 1198
678afac6
TH
1199/**
1200 * ata_dev_disable - disable ATA device
1201 * @dev: ATA device to disable
1202 *
1203 * Disable @dev.
1204 *
1205 * Locking:
1206 * EH context.
1207 */
1208void ata_dev_disable(struct ata_device *dev)
1209{
1210 if (!ata_dev_enabled(dev))
1211 return;
1212
1213 if (ata_msg_drv(dev->link->ap))
1214 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
1215 ata_acpi_on_disable(dev);
1216 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 | ATA_DNXFER_QUIET);
1217 dev->class++;
99cf610a
TH
1218
1219 /* From now till the next successful probe, ering is used to
1220 * track probe failures. Clear accumulated device error info.
1221 */
1222 ata_ering_clear(&dev->ering);
678afac6
TH
1223}
1224
0ea035a3
TH
1225/**
1226 * ata_eh_detach_dev - detach ATA device
1227 * @dev: ATA device to detach
1228 *
1229 * Detach @dev.
1230 *
1231 * LOCKING:
1232 * None.
1233 */
fb7fd614 1234void ata_eh_detach_dev(struct ata_device *dev)
0ea035a3 1235{
f58229f8
TH
1236 struct ata_link *link = dev->link;
1237 struct ata_port *ap = link->ap;
90484ebf 1238 struct ata_eh_context *ehc = &link->eh_context;
0ea035a3
TH
1239 unsigned long flags;
1240
1241 ata_dev_disable(dev);
1242
ba6a1308 1243 spin_lock_irqsave(ap->lock, flags);
0ea035a3
TH
1244
1245 dev->flags &= ~ATA_DFLAG_DETACH;
1246
1247 if (ata_scsi_offline_dev(dev)) {
1248 dev->flags |= ATA_DFLAG_DETACHED;
b51e9e5d 1249 ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
0ea035a3
TH
1250 }
1251
90484ebf 1252 /* clear per-dev EH info */
f58229f8
TH
1253 ata_eh_clear_action(link, dev, &link->eh_info, ATA_EH_PERDEV_MASK);
1254 ata_eh_clear_action(link, dev, &link->eh_context.i, ATA_EH_PERDEV_MASK);
90484ebf
TH
1255 ehc->saved_xfer_mode[dev->devno] = 0;
1256 ehc->saved_ncq_enabled &= ~(1 << dev->devno);
beb07c1a 1257
ba6a1308 1258 spin_unlock_irqrestore(ap->lock, flags);
0ea035a3
TH
1259}
1260
022bdb07
TH
1261/**
1262 * ata_eh_about_to_do - about to perform eh_action
955e57df 1263 * @link: target ATA link
47005f25 1264 * @dev: target ATA dev for per-dev action (can be NULL)
022bdb07
TH
1265 * @action: action about to be performed
1266 *
1267 * Called just before performing EH actions to clear related bits
955e57df
TH
1268 * in @link->eh_info such that eh actions are not unnecessarily
1269 * repeated.
022bdb07
TH
1270 *
1271 * LOCKING:
1272 * None.
1273 */
fb7fd614
TH
1274void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev,
1275 unsigned int action)
022bdb07 1276{
955e57df
TH
1277 struct ata_port *ap = link->ap;
1278 struct ata_eh_info *ehi = &link->eh_info;
1279 struct ata_eh_context *ehc = &link->eh_context;
022bdb07
TH
1280 unsigned long flags;
1281
ba6a1308 1282 spin_lock_irqsave(ap->lock, flags);
1cdaf534 1283
955e57df 1284 ata_eh_clear_action(link, dev, ehi, action);
1cdaf534 1285
a568d1d2
TH
1286 /* About to take EH action, set RECOVERED. Ignore actions on
1287 * slave links as master will do them again.
1288 */
1289 if (!(ehc->i.flags & ATA_EHI_QUIET) && link != ap->slave_link)
1cdaf534
TH
1290 ap->pflags |= ATA_PFLAG_RECOVERED;
1291
ba6a1308 1292 spin_unlock_irqrestore(ap->lock, flags);
022bdb07
TH
1293}
1294
47005f25
TH
1295/**
1296 * ata_eh_done - EH action complete
955e57df 1297* @ap: target ATA port
47005f25
TH
1298 * @dev: target ATA dev for per-dev action (can be NULL)
1299 * @action: action just completed
1300 *
1301 * Called right after performing EH actions to clear related bits
955e57df 1302 * in @link->eh_context.
47005f25
TH
1303 *
1304 * LOCKING:
1305 * None.
1306 */
fb7fd614
TH
1307void ata_eh_done(struct ata_link *link, struct ata_device *dev,
1308 unsigned int action)
47005f25 1309{
955e57df 1310 struct ata_eh_context *ehc = &link->eh_context;
9af5c9c9 1311
955e57df 1312 ata_eh_clear_action(link, dev, &ehc->i, action);
47005f25
TH
1313}
1314
022bdb07
TH
1315/**
1316 * ata_err_string - convert err_mask to descriptive string
1317 * @err_mask: error mask to convert to string
1318 *
1319 * Convert @err_mask to descriptive string. Errors are
1320 * prioritized according to severity and only the most severe
1321 * error is reported.
1322 *
1323 * LOCKING:
1324 * None.
1325 *
1326 * RETURNS:
1327 * Descriptive string for @err_mask
1328 */
2dcb407e 1329static const char *ata_err_string(unsigned int err_mask)
022bdb07
TH
1330{
1331 if (err_mask & AC_ERR_HOST_BUS)
1332 return "host bus error";
1333 if (err_mask & AC_ERR_ATA_BUS)
1334 return "ATA bus error";
1335 if (err_mask & AC_ERR_TIMEOUT)
1336 return "timeout";
1337 if (err_mask & AC_ERR_HSM)
1338 return "HSM violation";
1339 if (err_mask & AC_ERR_SYSTEM)
1340 return "internal error";
1341 if (err_mask & AC_ERR_MEDIA)
1342 return "media error";
1343 if (err_mask & AC_ERR_INVALID)
1344 return "invalid argument";
1345 if (err_mask & AC_ERR_DEV)
1346 return "device error";
1347 return "unknown error";
1348}
1349
e8ee8451
TH
1350/**
1351 * ata_read_log_page - read a specific log page
1352 * @dev: target device
1353 * @page: page to read
1354 * @buf: buffer to store read page
1355 * @sectors: number of sectors to read
1356 *
1357 * Read log page using READ_LOG_EXT command.
1358 *
1359 * LOCKING:
1360 * Kernel thread context (may sleep).
1361 *
1362 * RETURNS:
1363 * 0 on success, AC_ERR_* mask otherwise.
1364 */
1365static unsigned int ata_read_log_page(struct ata_device *dev,
1366 u8 page, void *buf, unsigned int sectors)
1367{
1368 struct ata_taskfile tf;
1369 unsigned int err_mask;
1370
1371 DPRINTK("read log page - page %d\n", page);
1372
1373 ata_tf_init(dev, &tf);
1374 tf.command = ATA_CMD_READ_LOG_EXT;
1375 tf.lbal = page;
1376 tf.nsect = sectors;
1377 tf.hob_nsect = sectors >> 8;
1378 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE;
1379 tf.protocol = ATA_PROT_PIO;
1380
1381 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
2b789108 1382 buf, sectors * ATA_SECT_SIZE, 0);
e8ee8451
TH
1383
1384 DPRINTK("EXIT, err_mask=%x\n", err_mask);
1385 return err_mask;
1386}
1387
1388/**
1389 * ata_eh_read_log_10h - Read log page 10h for NCQ error details
1390 * @dev: Device to read log page 10h from
1391 * @tag: Resulting tag of the failed command
1392 * @tf: Resulting taskfile registers of the failed command
1393 *
1394 * Read log page 10h to obtain NCQ error details and clear error
1395 * condition.
1396 *
1397 * LOCKING:
1398 * Kernel thread context (may sleep).
1399 *
1400 * RETURNS:
1401 * 0 on success, -errno otherwise.
1402 */
1403static int ata_eh_read_log_10h(struct ata_device *dev,
1404 int *tag, struct ata_taskfile *tf)
1405{
9af5c9c9 1406 u8 *buf = dev->link->ap->sector_buf;
e8ee8451
TH
1407 unsigned int err_mask;
1408 u8 csum;
1409 int i;
1410
1411 err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, buf, 1);
1412 if (err_mask)
1413 return -EIO;
1414
1415 csum = 0;
1416 for (i = 0; i < ATA_SECT_SIZE; i++)
1417 csum += buf[i];
1418 if (csum)
1419 ata_dev_printk(dev, KERN_WARNING,
1420 "invalid checksum 0x%x on log page 10h\n", csum);
1421
1422 if (buf[0] & 0x80)
1423 return -ENOENT;
1424
1425 *tag = buf[0] & 0x1f;
1426
1427 tf->command = buf[2];
1428 tf->feature = buf[3];
1429 tf->lbal = buf[4];
1430 tf->lbam = buf[5];
1431 tf->lbah = buf[6];
1432 tf->device = buf[7];
1433 tf->hob_lbal = buf[8];
1434 tf->hob_lbam = buf[9];
1435 tf->hob_lbah = buf[10];
1436 tf->nsect = buf[12];
1437 tf->hob_nsect = buf[13];
1438
1439 return 0;
1440}
1441
11fc33da
TH
1442/**
1443 * atapi_eh_tur - perform ATAPI TEST_UNIT_READY
1444 * @dev: target ATAPI device
1445 * @r_sense_key: out parameter for sense_key
1446 *
1447 * Perform ATAPI TEST_UNIT_READY.
1448 *
1449 * LOCKING:
1450 * EH context (may sleep).
1451 *
1452 * RETURNS:
1453 * 0 on success, AC_ERR_* mask on failure.
1454 */
1455static unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
1456{
1457 u8 cdb[ATAPI_CDB_LEN] = { TEST_UNIT_READY, 0, 0, 0, 0, 0 };
1458 struct ata_taskfile tf;
1459 unsigned int err_mask;
1460
1461 ata_tf_init(dev, &tf);
1462
1463 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1464 tf.command = ATA_CMD_PACKET;
1465 tf.protocol = ATAPI_PROT_NODATA;
1466
1467 err_mask = ata_exec_internal(dev, &tf, cdb, DMA_NONE, NULL, 0, 0);
1468 if (err_mask == AC_ERR_DEV)
1469 *r_sense_key = tf.feature >> 4;
1470 return err_mask;
1471}
1472
022bdb07
TH
1473/**
1474 * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
1475 * @dev: device to perform REQUEST_SENSE to
1476 * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
3eabddb8 1477 * @dfl_sense_key: default sense key to use
022bdb07
TH
1478 *
1479 * Perform ATAPI REQUEST_SENSE after the device reported CHECK
1480 * SENSE. This function is EH helper.
1481 *
1482 * LOCKING:
1483 * Kernel thread context (may sleep).
1484 *
1485 * RETURNS:
1486 * 0 on success, AC_ERR_* mask on failure
1487 */
3eabddb8
TH
1488static unsigned int atapi_eh_request_sense(struct ata_device *dev,
1489 u8 *sense_buf, u8 dfl_sense_key)
022bdb07 1490{
3eabddb8
TH
1491 u8 cdb[ATAPI_CDB_LEN] =
1492 { REQUEST_SENSE, 0, 0, 0, SCSI_SENSE_BUFFERSIZE, 0 };
9af5c9c9 1493 struct ata_port *ap = dev->link->ap;
022bdb07 1494 struct ata_taskfile tf;
022bdb07
TH
1495
1496 DPRINTK("ATAPI request sense\n");
1497
022bdb07
TH
1498 /* FIXME: is this needed? */
1499 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
1500
56287768
AL
1501 /* initialize sense_buf with the error register,
1502 * for the case where they are -not- overwritten
1503 */
022bdb07 1504 sense_buf[0] = 0x70;
3eabddb8 1505 sense_buf[2] = dfl_sense_key;
56287768 1506
a617c09f 1507 /* some devices time out if garbage left in tf */
56287768 1508 ata_tf_init(dev, &tf);
022bdb07 1509
022bdb07
TH
1510 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1511 tf.command = ATA_CMD_PACKET;
1512
1513 /* is it pointless to prefer PIO for "safety reasons"? */
1514 if (ap->flags & ATA_FLAG_PIO_DMA) {
0dc36888 1515 tf.protocol = ATAPI_PROT_DMA;
022bdb07
TH
1516 tf.feature |= ATAPI_PKT_DMA;
1517 } else {
0dc36888 1518 tf.protocol = ATAPI_PROT_PIO;
f2dfc1a1
TH
1519 tf.lbam = SCSI_SENSE_BUFFERSIZE;
1520 tf.lbah = 0;
022bdb07
TH
1521 }
1522
1523 return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE,
2b789108 1524 sense_buf, SCSI_SENSE_BUFFERSIZE, 0);
022bdb07
TH
1525}
1526
1527/**
1528 * ata_eh_analyze_serror - analyze SError for a failed port
0260731f 1529 * @link: ATA link to analyze SError for
022bdb07
TH
1530 *
1531 * Analyze SError if available and further determine cause of
1532 * failure.
1533 *
1534 * LOCKING:
1535 * None.
1536 */
0260731f 1537static void ata_eh_analyze_serror(struct ata_link *link)
022bdb07 1538{
0260731f 1539 struct ata_eh_context *ehc = &link->eh_context;
022bdb07
TH
1540 u32 serror = ehc->i.serror;
1541 unsigned int err_mask = 0, action = 0;
f9df58cb 1542 u32 hotplug_mask;
022bdb07 1543
e0614db2 1544 if (serror & (SERR_PERSISTENT | SERR_DATA)) {
022bdb07 1545 err_mask |= AC_ERR_ATA_BUS;
cf480626 1546 action |= ATA_EH_RESET;
022bdb07
TH
1547 }
1548 if (serror & SERR_PROTOCOL) {
1549 err_mask |= AC_ERR_HSM;
cf480626 1550 action |= ATA_EH_RESET;
022bdb07
TH
1551 }
1552 if (serror & SERR_INTERNAL) {
1553 err_mask |= AC_ERR_SYSTEM;
cf480626 1554 action |= ATA_EH_RESET;
022bdb07 1555 }
f9df58cb
TH
1556
1557 /* Determine whether a hotplug event has occurred. Both
1558 * SError.N/X are considered hotplug events for enabled or
1559 * host links. For disabled PMP links, only N bit is
1560 * considered as X bit is left at 1 for link plugging.
1561 */
1562 hotplug_mask = 0;
1563
1564 if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link))
1565 hotplug_mask = SERR_PHYRDY_CHG | SERR_DEV_XCHG;
1566 else
1567 hotplug_mask = SERR_PHYRDY_CHG;
1568
1569 if (serror & hotplug_mask)
084fe639 1570 ata_ehi_hotplugged(&ehc->i);
022bdb07
TH
1571
1572 ehc->i.err_mask |= err_mask;
1573 ehc->i.action |= action;
1574}
1575
e8ee8451
TH
1576/**
1577 * ata_eh_analyze_ncq_error - analyze NCQ error
0260731f 1578 * @link: ATA link to analyze NCQ error for
e8ee8451
TH
1579 *
1580 * Read log page 10h, determine the offending qc and acquire
1581 * error status TF. For NCQ device errors, all LLDDs have to do
1582 * is setting AC_ERR_DEV in ehi->err_mask. This function takes
1583 * care of the rest.
1584 *
1585 * LOCKING:
1586 * Kernel thread context (may sleep).
1587 */
10acf3b0 1588void ata_eh_analyze_ncq_error(struct ata_link *link)
e8ee8451 1589{
0260731f
TH
1590 struct ata_port *ap = link->ap;
1591 struct ata_eh_context *ehc = &link->eh_context;
1592 struct ata_device *dev = link->device;
e8ee8451
TH
1593 struct ata_queued_cmd *qc;
1594 struct ata_taskfile tf;
1595 int tag, rc;
1596
1597 /* if frozen, we can't do much */
b51e9e5d 1598 if (ap->pflags & ATA_PFLAG_FROZEN)
e8ee8451
TH
1599 return;
1600
1601 /* is it NCQ device error? */
0260731f 1602 if (!link->sactive || !(ehc->i.err_mask & AC_ERR_DEV))
e8ee8451
TH
1603 return;
1604
1605 /* has LLDD analyzed already? */
1606 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1607 qc = __ata_qc_from_tag(ap, tag);
1608
1609 if (!(qc->flags & ATA_QCFLAG_FAILED))
1610 continue;
1611
1612 if (qc->err_mask)
1613 return;
1614 }
1615
1616 /* okay, this error is ours */
1617 rc = ata_eh_read_log_10h(dev, &tag, &tf);
1618 if (rc) {
0260731f 1619 ata_link_printk(link, KERN_ERR, "failed to read log page 10h "
e8ee8451
TH
1620 "(errno=%d)\n", rc);
1621 return;
1622 }
1623
0260731f
TH
1624 if (!(link->sactive & (1 << tag))) {
1625 ata_link_printk(link, KERN_ERR, "log page 10h reported "
e8ee8451
TH
1626 "inactive tag %d\n", tag);
1627 return;
1628 }
1629
1630 /* we've got the perpetrator, condemn it */
1631 qc = __ata_qc_from_tag(ap, tag);
1632 memcpy(&qc->result_tf, &tf, sizeof(tf));
a6116c9e 1633 qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
5335b729 1634 qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ;
e8ee8451
TH
1635 ehc->i.err_mask &= ~AC_ERR_DEV;
1636}
1637
022bdb07
TH
1638/**
1639 * ata_eh_analyze_tf - analyze taskfile of a failed qc
1640 * @qc: qc to analyze
1641 * @tf: Taskfile registers to analyze
1642 *
1643 * Analyze taskfile of @qc and further determine cause of
1644 * failure. This function also requests ATAPI sense data if
1645 * avaliable.
1646 *
1647 * LOCKING:
1648 * Kernel thread context (may sleep).
1649 *
1650 * RETURNS:
1651 * Determined recovery action
1652 */
1653static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
1654 const struct ata_taskfile *tf)
1655{
1656 unsigned int tmp, action = 0;
1657 u8 stat = tf->command, err = tf->feature;
1658
1659 if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) {
1660 qc->err_mask |= AC_ERR_HSM;
cf480626 1661 return ATA_EH_RESET;
022bdb07
TH
1662 }
1663
a51d644a
TH
1664 if (stat & (ATA_ERR | ATA_DF))
1665 qc->err_mask |= AC_ERR_DEV;
1666 else
022bdb07
TH
1667 return 0;
1668
1669 switch (qc->dev->class) {
1670 case ATA_DEV_ATA:
1671 if (err & ATA_ICRC)
1672 qc->err_mask |= AC_ERR_ATA_BUS;
1673 if (err & ATA_UNC)
1674 qc->err_mask |= AC_ERR_MEDIA;
1675 if (err & ATA_IDNF)
1676 qc->err_mask |= AC_ERR_INVALID;
1677 break;
1678
1679 case ATA_DEV_ATAPI:
a569a30d 1680 if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) {
3eabddb8
TH
1681 tmp = atapi_eh_request_sense(qc->dev,
1682 qc->scsicmd->sense_buffer,
1683 qc->result_tf.feature >> 4);
a569a30d
TH
1684 if (!tmp) {
1685 /* ATA_QCFLAG_SENSE_VALID is used to
1686 * tell atapi_qc_complete() that sense
1687 * data is already valid.
1688 *
1689 * TODO: interpret sense data and set
1690 * appropriate err_mask.
1691 */
1692 qc->flags |= ATA_QCFLAG_SENSE_VALID;
1693 } else
1694 qc->err_mask |= tmp;
1695 }
022bdb07
TH
1696 }
1697
1698 if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS))
cf480626 1699 action |= ATA_EH_RESET;
022bdb07
TH
1700
1701 return action;
1702}
1703
76326ac1
TH
1704static int ata_eh_categorize_error(unsigned int eflags, unsigned int err_mask,
1705 int *xfer_ok)
022bdb07 1706{
76326ac1
TH
1707 int base = 0;
1708
1709 if (!(eflags & ATA_EFLAG_DUBIOUS_XFER))
1710 *xfer_ok = 1;
1711
1712 if (!*xfer_ok)
75f9cafc 1713 base = ATA_ECAT_DUBIOUS_NONE;
76326ac1 1714
7d47e8d4 1715 if (err_mask & AC_ERR_ATA_BUS)
76326ac1 1716 return base + ATA_ECAT_ATA_BUS;
022bdb07 1717
7d47e8d4 1718 if (err_mask & AC_ERR_TIMEOUT)
76326ac1 1719 return base + ATA_ECAT_TOUT_HSM;
7d47e8d4 1720
3884f7b0 1721 if (eflags & ATA_EFLAG_IS_IO) {
7d47e8d4 1722 if (err_mask & AC_ERR_HSM)
76326ac1 1723 return base + ATA_ECAT_TOUT_HSM;
7d47e8d4
TH
1724 if ((err_mask &
1725 (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV)
76326ac1 1726 return base + ATA_ECAT_UNK_DEV;
022bdb07
TH
1727 }
1728
1729 return 0;
1730}
1731
7d47e8d4 1732struct speed_down_verdict_arg {
022bdb07 1733 u64 since;
76326ac1 1734 int xfer_ok;
3884f7b0 1735 int nr_errors[ATA_ECAT_NR];
022bdb07
TH
1736};
1737
7d47e8d4 1738static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg)
022bdb07 1739{
7d47e8d4 1740 struct speed_down_verdict_arg *arg = void_arg;
76326ac1 1741 int cat;
022bdb07
TH
1742
1743 if (ent->timestamp < arg->since)
1744 return -1;
1745
76326ac1
TH
1746 cat = ata_eh_categorize_error(ent->eflags, ent->err_mask,
1747 &arg->xfer_ok);
7d47e8d4 1748 arg->nr_errors[cat]++;
76326ac1 1749
022bdb07
TH
1750 return 0;
1751}
1752
1753/**
7d47e8d4 1754 * ata_eh_speed_down_verdict - Determine speed down verdict
022bdb07
TH
1755 * @dev: Device of interest
1756 *
1757 * This function examines error ring of @dev and determines
7d47e8d4
TH
1758 * whether NCQ needs to be turned off, transfer speed should be
1759 * stepped down, or falling back to PIO is necessary.
022bdb07 1760 *
3884f7b0
TH
1761 * ECAT_ATA_BUS : ATA_BUS error for any command
1762 *
1763 * ECAT_TOUT_HSM : TIMEOUT for any command or HSM violation for
1764 * IO commands
1765 *
1766 * ECAT_UNK_DEV : Unknown DEV error for IO commands
1767 *
76326ac1
TH
1768 * ECAT_DUBIOUS_* : Identical to above three but occurred while
1769 * data transfer hasn't been verified.
1770 *
3884f7b0
TH
1771 * Verdicts are
1772 *
1773 * NCQ_OFF : Turn off NCQ.
022bdb07 1774 *
3884f7b0
TH
1775 * SPEED_DOWN : Speed down transfer speed but don't fall back
1776 * to PIO.
7d47e8d4 1777 *
3884f7b0 1778 * FALLBACK_TO_PIO : Fall back to PIO.
022bdb07 1779 *
3884f7b0 1780 * Even if multiple verdicts are returned, only one action is
76326ac1
TH
1781 * taken per error. An action triggered by non-DUBIOUS errors
1782 * clears ering, while one triggered by DUBIOUS_* errors doesn't.
1783 * This is to expedite speed down decisions right after device is
1784 * initially configured.
1785 *
1786 * The followings are speed down rules. #1 and #2 deal with
1787 * DUBIOUS errors.
7d47e8d4 1788 *
76326ac1
TH
1789 * 1. If more than one DUBIOUS_ATA_BUS or DUBIOUS_TOUT_HSM errors
1790 * occurred during last 5 mins, SPEED_DOWN and FALLBACK_TO_PIO.
1791 *
1792 * 2. If more than one DUBIOUS_TOUT_HSM or DUBIOUS_UNK_DEV errors
1793 * occurred during last 5 mins, NCQ_OFF.
1794 *
1795 * 3. If more than 8 ATA_BUS, TOUT_HSM or UNK_DEV errors
3884f7b0 1796 * ocurred during last 5 mins, FALLBACK_TO_PIO
7d47e8d4 1797 *
76326ac1 1798 * 4. If more than 3 TOUT_HSM or UNK_DEV errors occurred
3884f7b0
TH
1799 * during last 10 mins, NCQ_OFF.
1800 *
76326ac1 1801 * 5. If more than 3 ATA_BUS or TOUT_HSM errors, or more than 6
3884f7b0 1802 * UNK_DEV errors occurred during last 10 mins, SPEED_DOWN.
7d47e8d4 1803 *
022bdb07
TH
1804 * LOCKING:
1805 * Inherited from caller.
1806 *
1807 * RETURNS:
7d47e8d4 1808 * OR of ATA_EH_SPDN_* flags.
022bdb07 1809 */
7d47e8d4 1810static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev)
022bdb07 1811{
7d47e8d4
TH
1812 const u64 j5mins = 5LLU * 60 * HZ, j10mins = 10LLU * 60 * HZ;
1813 u64 j64 = get_jiffies_64();
1814 struct speed_down_verdict_arg arg;
1815 unsigned int verdict = 0;
022bdb07 1816
3884f7b0 1817 /* scan past 5 mins of error history */
7d47e8d4 1818 memset(&arg, 0, sizeof(arg));
3884f7b0 1819 arg.since = j64 - min(j64, j5mins);
7d47e8d4 1820 ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
022bdb07 1821
76326ac1
TH
1822 if (arg.nr_errors[ATA_ECAT_DUBIOUS_ATA_BUS] +
1823 arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] > 1)
1824 verdict |= ATA_EH_SPDN_SPEED_DOWN |
1825 ATA_EH_SPDN_FALLBACK_TO_PIO | ATA_EH_SPDN_KEEP_ERRORS;
1826
1827 if (arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] +
1828 arg.nr_errors[ATA_ECAT_DUBIOUS_UNK_DEV] > 1)
1829 verdict |= ATA_EH_SPDN_NCQ_OFF | ATA_EH_SPDN_KEEP_ERRORS;
1830
3884f7b0
TH
1831 if (arg.nr_errors[ATA_ECAT_ATA_BUS] +
1832 arg.nr_errors[ATA_ECAT_TOUT_HSM] +
663f99b8 1833 arg.nr_errors[ATA_ECAT_UNK_DEV] > 6)
3884f7b0 1834 verdict |= ATA_EH_SPDN_FALLBACK_TO_PIO;
022bdb07 1835
3884f7b0 1836 /* scan past 10 mins of error history */
022bdb07 1837 memset(&arg, 0, sizeof(arg));
3884f7b0 1838 arg.since = j64 - min(j64, j10mins);
7d47e8d4 1839 ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
022bdb07 1840
3884f7b0
TH
1841 if (arg.nr_errors[ATA_ECAT_TOUT_HSM] +
1842 arg.nr_errors[ATA_ECAT_UNK_DEV] > 3)
1843 verdict |= ATA_EH_SPDN_NCQ_OFF;
1844
1845 if (arg.nr_errors[ATA_ECAT_ATA_BUS] +
1846 arg.nr_errors[ATA_ECAT_TOUT_HSM] > 3 ||
663f99b8 1847 arg.nr_errors[ATA_ECAT_UNK_DEV] > 6)
3884f7b0 1848 verdict |= ATA_EH_SPDN_SPEED_DOWN;
022bdb07 1849
7d47e8d4 1850 return verdict;
022bdb07
TH
1851}
1852
1853/**
1854 * ata_eh_speed_down - record error and speed down if necessary
1855 * @dev: Failed device
3884f7b0 1856 * @eflags: mask of ATA_EFLAG_* flags
022bdb07
TH
1857 * @err_mask: err_mask of the error
1858 *
1859 * Record error and examine error history to determine whether
1860 * adjusting transmission speed is necessary. It also sets
1861 * transmission limits appropriately if such adjustment is
1862 * necessary.
1863 *
1864 * LOCKING:
1865 * Kernel thread context (may sleep).
1866 *
1867 * RETURNS:
7d47e8d4 1868 * Determined recovery action.
022bdb07 1869 */
3884f7b0
TH
1870static unsigned int ata_eh_speed_down(struct ata_device *dev,
1871 unsigned int eflags, unsigned int err_mask)
022bdb07 1872{
b1c72916 1873 struct ata_link *link = ata_dev_phys_link(dev);
76326ac1 1874 int xfer_ok = 0;
7d47e8d4
TH
1875 unsigned int verdict;
1876 unsigned int action = 0;
1877
1878 /* don't bother if Cat-0 error */
76326ac1 1879 if (ata_eh_categorize_error(eflags, err_mask, &xfer_ok) == 0)
022bdb07
TH
1880 return 0;
1881
1882 /* record error and determine whether speed down is necessary */
3884f7b0 1883 ata_ering_record(&dev->ering, eflags, err_mask);
7d47e8d4 1884 verdict = ata_eh_speed_down_verdict(dev);
022bdb07 1885
7d47e8d4
TH
1886 /* turn off NCQ? */
1887 if ((verdict & ATA_EH_SPDN_NCQ_OFF) &&
1888 (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ |
1889 ATA_DFLAG_NCQ_OFF)) == ATA_DFLAG_NCQ) {
1890 dev->flags |= ATA_DFLAG_NCQ_OFF;
1891 ata_dev_printk(dev, KERN_WARNING,
1892 "NCQ disabled due to excessive errors\n");
1893 goto done;
1894 }
022bdb07 1895
7d47e8d4
TH
1896 /* speed down? */
1897 if (verdict & ATA_EH_SPDN_SPEED_DOWN) {
1898 /* speed down SATA link speed if possible */
a07d499b 1899 if (sata_down_spd_limit(link, 0) == 0) {
cf480626 1900 action |= ATA_EH_RESET;
7d47e8d4
TH
1901 goto done;
1902 }
022bdb07 1903
7d47e8d4
TH
1904 /* lower transfer mode */
1905 if (dev->spdn_cnt < 2) {
1906 static const int dma_dnxfer_sel[] =
1907 { ATA_DNXFER_DMA, ATA_DNXFER_40C };
1908 static const int pio_dnxfer_sel[] =
1909 { ATA_DNXFER_PIO, ATA_DNXFER_FORCE_PIO0 };
1910 int sel;
1911
1912 if (dev->xfer_shift != ATA_SHIFT_PIO)
1913 sel = dma_dnxfer_sel[dev->spdn_cnt];
1914 else
1915 sel = pio_dnxfer_sel[dev->spdn_cnt];
1916
1917 dev->spdn_cnt++;
1918
1919 if (ata_down_xfermask_limit(dev, sel) == 0) {
cf480626 1920 action |= ATA_EH_RESET;
7d47e8d4
TH
1921 goto done;
1922 }
1923 }
1924 }
1925
1926 /* Fall back to PIO? Slowing down to PIO is meaningless for
663f99b8 1927 * SATA ATA devices. Consider it only for PATA and SATAPI.
7d47e8d4
TH
1928 */
1929 if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) &&
663f99b8 1930 (link->ap->cbl != ATA_CBL_SATA || dev->class == ATA_DEV_ATAPI) &&
7d47e8d4
TH
1931 (dev->xfer_shift != ATA_SHIFT_PIO)) {
1932 if (ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO) == 0) {
1933 dev->spdn_cnt = 0;
cf480626 1934 action |= ATA_EH_RESET;
7d47e8d4
TH
1935 goto done;
1936 }
1937 }
022bdb07 1938
022bdb07 1939 return 0;
7d47e8d4
TH
1940 done:
1941 /* device has been slowed down, blow error history */
76326ac1
TH
1942 if (!(verdict & ATA_EH_SPDN_KEEP_ERRORS))
1943 ata_ering_clear(&dev->ering);
7d47e8d4 1944 return action;
022bdb07
TH
1945}
1946
1947/**
9b1e2658
TH
1948 * ata_eh_link_autopsy - analyze error and determine recovery action
1949 * @link: host link to perform autopsy on
022bdb07 1950 *
0260731f
TH
1951 * Analyze why @link failed and determine which recovery actions
1952 * are needed. This function also sets more detailed AC_ERR_*
1953 * values and fills sense data for ATAPI CHECK SENSE.
022bdb07
TH
1954 *
1955 * LOCKING:
1956 * Kernel thread context (may sleep).
1957 */
9b1e2658 1958static void ata_eh_link_autopsy(struct ata_link *link)
022bdb07 1959{
0260731f 1960 struct ata_port *ap = link->ap;
936fd732 1961 struct ata_eh_context *ehc = &link->eh_context;
dfcc173d 1962 struct ata_device *dev;
3884f7b0
TH
1963 unsigned int all_err_mask = 0, eflags = 0;
1964 int tag;
022bdb07
TH
1965 u32 serror;
1966 int rc;
1967
1968 DPRINTK("ENTER\n");
1969
1cdaf534
TH
1970 if (ehc->i.flags & ATA_EHI_NO_AUTOPSY)
1971 return;
1972
022bdb07 1973 /* obtain and analyze SError */
936fd732 1974 rc = sata_scr_read(link, SCR_ERROR, &serror);
022bdb07
TH
1975 if (rc == 0) {
1976 ehc->i.serror |= serror;
0260731f 1977 ata_eh_analyze_serror(link);
4e57c517 1978 } else if (rc != -EOPNOTSUPP) {
cf480626 1979 /* SError read failed, force reset and probing */
b558eddd 1980 ehc->i.probe_mask |= ATA_ALL_DEVICES;
cf480626 1981 ehc->i.action |= ATA_EH_RESET;
4e57c517
TH
1982 ehc->i.err_mask |= AC_ERR_OTHER;
1983 }
022bdb07 1984
e8ee8451 1985 /* analyze NCQ failure */
0260731f 1986 ata_eh_analyze_ncq_error(link);
e8ee8451 1987
022bdb07
TH
1988 /* any real error trumps AC_ERR_OTHER */
1989 if (ehc->i.err_mask & ~AC_ERR_OTHER)
1990 ehc->i.err_mask &= ~AC_ERR_OTHER;
1991
1992 all_err_mask |= ehc->i.err_mask;
1993
1994 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1995 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
1996
b1c72916
TH
1997 if (!(qc->flags & ATA_QCFLAG_FAILED) ||
1998 ata_dev_phys_link(qc->dev) != link)
022bdb07
TH
1999 continue;
2000
2001 /* inherit upper level err_mask */
2002 qc->err_mask |= ehc->i.err_mask;
2003
022bdb07 2004 /* analyze TF */
4528e4da 2005 ehc->i.action |= ata_eh_analyze_tf(qc, &qc->result_tf);
022bdb07
TH
2006
2007 /* DEV errors are probably spurious in case of ATA_BUS error */
2008 if (qc->err_mask & AC_ERR_ATA_BUS)
2009 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA |
2010 AC_ERR_INVALID);
2011
2012 /* any real error trumps unknown error */
2013 if (qc->err_mask & ~AC_ERR_OTHER)
2014 qc->err_mask &= ~AC_ERR_OTHER;
2015
2016 /* SENSE_VALID trumps dev/unknown error and revalidation */
f90f0828 2017 if (qc->flags & ATA_QCFLAG_SENSE_VALID)
022bdb07 2018 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER);
022bdb07 2019
03faab78
TH
2020 /* determine whether the command is worth retrying */
2021 if (!(qc->err_mask & AC_ERR_INVALID) &&
2022 ((qc->flags & ATA_QCFLAG_IO) || qc->err_mask != AC_ERR_DEV))
2023 qc->flags |= ATA_QCFLAG_RETRY;
2024
022bdb07 2025 /* accumulate error info */
4528e4da 2026 ehc->i.dev = qc->dev;
022bdb07
TH
2027 all_err_mask |= qc->err_mask;
2028 if (qc->flags & ATA_QCFLAG_IO)
3884f7b0 2029 eflags |= ATA_EFLAG_IS_IO;
022bdb07
TH
2030 }
2031
a20f33ff 2032 /* enforce default EH actions */
b51e9e5d 2033 if (ap->pflags & ATA_PFLAG_FROZEN ||
a20f33ff 2034 all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT))
cf480626 2035 ehc->i.action |= ATA_EH_RESET;
3884f7b0
TH
2036 else if (((eflags & ATA_EFLAG_IS_IO) && all_err_mask) ||
2037 (!(eflags & ATA_EFLAG_IS_IO) && (all_err_mask & ~AC_ERR_DEV)))
4528e4da 2038 ehc->i.action |= ATA_EH_REVALIDATE;
022bdb07 2039
dfcc173d
TH
2040 /* If we have offending qcs and the associated failed device,
2041 * perform per-dev EH action only on the offending device.
2042 */
4528e4da 2043 if (ehc->i.dev) {
4528e4da
TH
2044 ehc->i.dev_action[ehc->i.dev->devno] |=
2045 ehc->i.action & ATA_EH_PERDEV_MASK;
2046 ehc->i.action &= ~ATA_EH_PERDEV_MASK;
47005f25
TH
2047 }
2048
2695e366
TH
2049 /* propagate timeout to host link */
2050 if ((all_err_mask & AC_ERR_TIMEOUT) && !ata_is_host_link(link))
2051 ap->link.eh_context.i.err_mask |= AC_ERR_TIMEOUT;
2052
2053 /* record error and consider speeding down */
dfcc173d 2054 dev = ehc->i.dev;
2695e366
TH
2055 if (!dev && ((ata_link_max_devices(link) == 1 &&
2056 ata_dev_enabled(link->device))))
2057 dev = link->device;
dfcc173d 2058
76326ac1
TH
2059 if (dev) {
2060 if (dev->flags & ATA_DFLAG_DUBIOUS_XFER)
2061 eflags |= ATA_EFLAG_DUBIOUS_XFER;
3884f7b0 2062 ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask);
76326ac1 2063 }
dfcc173d 2064
022bdb07
TH
2065 DPRINTK("EXIT\n");
2066}
2067
2068/**
9b1e2658
TH
2069 * ata_eh_autopsy - analyze error and determine recovery action
2070 * @ap: host port to perform autopsy on
2071 *
2072 * Analyze all links of @ap and determine why they failed and
2073 * which recovery actions are needed.
2074 *
2075 * LOCKING:
2076 * Kernel thread context (may sleep).
2077 */
fb7fd614 2078void ata_eh_autopsy(struct ata_port *ap)
9b1e2658
TH
2079{
2080 struct ata_link *link;
2081
1eca4365 2082 ata_for_each_link(link, ap, EDGE)
9b1e2658 2083 ata_eh_link_autopsy(link);
2695e366 2084
b1c72916
TH
2085 /* Handle the frigging slave link. Autopsy is done similarly
2086 * but actions and flags are transferred over to the master
2087 * link and handled from there.
2088 */
2089 if (ap->slave_link) {
2090 struct ata_eh_context *mehc = &ap->link.eh_context;
2091 struct ata_eh_context *sehc = &ap->slave_link->eh_context;
2092
848e4c68
TH
2093 /* transfer control flags from master to slave */
2094 sehc->i.flags |= mehc->i.flags & ATA_EHI_TO_SLAVE_MASK;
2095
2096 /* perform autopsy on the slave link */
b1c72916
TH
2097 ata_eh_link_autopsy(ap->slave_link);
2098
848e4c68 2099 /* transfer actions from slave to master and clear slave */
b1c72916
TH
2100 ata_eh_about_to_do(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
2101 mehc->i.action |= sehc->i.action;
2102 mehc->i.dev_action[1] |= sehc->i.dev_action[1];
2103 mehc->i.flags |= sehc->i.flags;
2104 ata_eh_done(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
2105 }
2106
2695e366
TH
2107 /* Autopsy of fanout ports can affect host link autopsy.
2108 * Perform host link autopsy last.
2109 */
071f44b1 2110 if (sata_pmp_attached(ap))
2695e366 2111 ata_eh_link_autopsy(&ap->link);
9b1e2658
TH
2112}
2113
2114/**
2115 * ata_eh_link_report - report error handling to user
0260731f 2116 * @link: ATA link EH is going on
022bdb07
TH
2117 *
2118 * Report EH to user.
2119 *
2120 * LOCKING:
2121 * None.
2122 */
9b1e2658 2123static void ata_eh_link_report(struct ata_link *link)
022bdb07 2124{
0260731f
TH
2125 struct ata_port *ap = link->ap;
2126 struct ata_eh_context *ehc = &link->eh_context;
022bdb07 2127 const char *frozen, *desc;
a1e10f7e 2128 char tries_buf[6];
022bdb07
TH
2129 int tag, nr_failed = 0;
2130
94ff3d54
TH
2131 if (ehc->i.flags & ATA_EHI_QUIET)
2132 return;
2133
022bdb07
TH
2134 desc = NULL;
2135 if (ehc->i.desc[0] != '\0')
2136 desc = ehc->i.desc;
2137
2138 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2139 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2140
b1c72916
TH
2141 if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2142 ata_dev_phys_link(qc->dev) != link ||
e027bd36
TH
2143 ((qc->flags & ATA_QCFLAG_QUIET) &&
2144 qc->err_mask == AC_ERR_DEV))
022bdb07
TH
2145 continue;
2146 if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask)
2147 continue;
2148
2149 nr_failed++;
2150 }
2151
2152 if (!nr_failed && !ehc->i.err_mask)
2153 return;
2154
2155 frozen = "";
b51e9e5d 2156 if (ap->pflags & ATA_PFLAG_FROZEN)
022bdb07
TH
2157 frozen = " frozen";
2158
a1e10f7e
TH
2159 memset(tries_buf, 0, sizeof(tries_buf));
2160 if (ap->eh_tries < ATA_EH_MAX_TRIES)
2161 snprintf(tries_buf, sizeof(tries_buf) - 1, " t%d",
2162 ap->eh_tries);
2163
022bdb07 2164 if (ehc->i.dev) {
e8ee8451 2165 ata_dev_printk(ehc->i.dev, KERN_ERR, "exception Emask 0x%x "
a1e10f7e
TH
2166 "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
2167 ehc->i.err_mask, link->sactive, ehc->i.serror,
2168 ehc->i.action, frozen, tries_buf);
022bdb07 2169 if (desc)
b64bbc39 2170 ata_dev_printk(ehc->i.dev, KERN_ERR, "%s\n", desc);
022bdb07 2171 } else {
0260731f 2172 ata_link_printk(link, KERN_ERR, "exception Emask 0x%x "
a1e10f7e
TH
2173 "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
2174 ehc->i.err_mask, link->sactive, ehc->i.serror,
2175 ehc->i.action, frozen, tries_buf);
022bdb07 2176 if (desc)
0260731f 2177 ata_link_printk(link, KERN_ERR, "%s\n", desc);
022bdb07
TH
2178 }
2179
1333e194 2180 if (ehc->i.serror)
da0e21d3 2181 ata_link_printk(link, KERN_ERR,
1333e194
RH
2182 "SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n",
2183 ehc->i.serror & SERR_DATA_RECOVERED ? "RecovData " : "",
2184 ehc->i.serror & SERR_COMM_RECOVERED ? "RecovComm " : "",
2185 ehc->i.serror & SERR_DATA ? "UnrecovData " : "",
2186 ehc->i.serror & SERR_PERSISTENT ? "Persist " : "",
2187 ehc->i.serror & SERR_PROTOCOL ? "Proto " : "",
2188 ehc->i.serror & SERR_INTERNAL ? "HostInt " : "",
2189 ehc->i.serror & SERR_PHYRDY_CHG ? "PHYRdyChg " : "",
2190 ehc->i.serror & SERR_PHY_INT_ERR ? "PHYInt " : "",
2191 ehc->i.serror & SERR_COMM_WAKE ? "CommWake " : "",
2192 ehc->i.serror & SERR_10B_8B_ERR ? "10B8B " : "",
2193 ehc->i.serror & SERR_DISPARITY ? "Dispar " : "",
2194 ehc->i.serror & SERR_CRC ? "BadCRC " : "",
2195 ehc->i.serror & SERR_HANDSHAKE ? "Handshk " : "",
2196 ehc->i.serror & SERR_LINK_SEQ_ERR ? "LinkSeq " : "",
2197 ehc->i.serror & SERR_TRANS_ST_ERROR ? "TrStaTrns " : "",
2198 ehc->i.serror & SERR_UNRECOG_FIS ? "UnrecFIS " : "",
2dcb407e 2199 ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : "");
1333e194 2200
022bdb07
TH
2201 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2202 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
8a937581 2203 struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf;
abb6a889
TH
2204 const u8 *cdb = qc->cdb;
2205 char data_buf[20] = "";
2206 char cdb_buf[70] = "";
022bdb07 2207
0260731f 2208 if (!(qc->flags & ATA_QCFLAG_FAILED) ||
b1c72916 2209 ata_dev_phys_link(qc->dev) != link || !qc->err_mask)
022bdb07
TH
2210 continue;
2211
abb6a889
TH
2212 if (qc->dma_dir != DMA_NONE) {
2213 static const char *dma_str[] = {
2214 [DMA_BIDIRECTIONAL] = "bidi",
2215 [DMA_TO_DEVICE] = "out",
2216 [DMA_FROM_DEVICE] = "in",
2217 };
2218 static const char *prot_str[] = {
2219 [ATA_PROT_PIO] = "pio",
2220 [ATA_PROT_DMA] = "dma",
2221 [ATA_PROT_NCQ] = "ncq",
0dc36888
TH
2222 [ATAPI_PROT_PIO] = "pio",
2223 [ATAPI_PROT_DMA] = "dma",
abb6a889
TH
2224 };
2225
2226 snprintf(data_buf, sizeof(data_buf), " %s %u %s",
2227 prot_str[qc->tf.protocol], qc->nbytes,
2228 dma_str[qc->dma_dir]);
2229 }
2230
e39eec13 2231 if (ata_is_atapi(qc->tf.protocol))
abb6a889
TH
2232 snprintf(cdb_buf, sizeof(cdb_buf),
2233 "cdb %02x %02x %02x %02x %02x %02x %02x %02x "
2234 "%02x %02x %02x %02x %02x %02x %02x %02x\n ",
2235 cdb[0], cdb[1], cdb[2], cdb[3],
2236 cdb[4], cdb[5], cdb[6], cdb[7],
2237 cdb[8], cdb[9], cdb[10], cdb[11],
2238 cdb[12], cdb[13], cdb[14], cdb[15]);
2239
8a937581
TH
2240 ata_dev_printk(qc->dev, KERN_ERR,
2241 "cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
abb6a889 2242 "tag %d%s\n %s"
8a937581 2243 "res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
5335b729 2244 "Emask 0x%x (%s)%s\n",
8a937581
TH
2245 cmd->command, cmd->feature, cmd->nsect,
2246 cmd->lbal, cmd->lbam, cmd->lbah,
2247 cmd->hob_feature, cmd->hob_nsect,
2248 cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah,
abb6a889 2249 cmd->device, qc->tag, data_buf, cdb_buf,
8a937581
TH
2250 res->command, res->feature, res->nsect,
2251 res->lbal, res->lbam, res->lbah,
2252 res->hob_feature, res->hob_nsect,
2253 res->hob_lbal, res->hob_lbam, res->hob_lbah,
5335b729
TH
2254 res->device, qc->err_mask, ata_err_string(qc->err_mask),
2255 qc->err_mask & AC_ERR_NCQ ? " <F>" : "");
1333e194
RH
2256
2257 if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ |
2dcb407e 2258 ATA_ERR)) {
1333e194
RH
2259 if (res->command & ATA_BUSY)
2260 ata_dev_printk(qc->dev, KERN_ERR,
2dcb407e 2261 "status: { Busy }\n");
1333e194
RH
2262 else
2263 ata_dev_printk(qc->dev, KERN_ERR,
2264 "status: { %s%s%s%s}\n",
2265 res->command & ATA_DRDY ? "DRDY " : "",
2266 res->command & ATA_DF ? "DF " : "",
2267 res->command & ATA_DRQ ? "DRQ " : "",
2dcb407e 2268 res->command & ATA_ERR ? "ERR " : "");
1333e194
RH
2269 }
2270
2271 if (cmd->command != ATA_CMD_PACKET &&
2272 (res->feature & (ATA_ICRC | ATA_UNC | ATA_IDNF |
2273 ATA_ABORTED)))
2274 ata_dev_printk(qc->dev, KERN_ERR,
2275 "error: { %s%s%s%s}\n",
2276 res->feature & ATA_ICRC ? "ICRC " : "",
2277 res->feature & ATA_UNC ? "UNC " : "",
2278 res->feature & ATA_IDNF ? "IDNF " : "",
2dcb407e 2279 res->feature & ATA_ABORTED ? "ABRT " : "");
022bdb07
TH
2280 }
2281}
2282
9b1e2658
TH
2283/**
2284 * ata_eh_report - report error handling to user
2285 * @ap: ATA port to report EH about
2286 *
2287 * Report EH to user.
2288 *
2289 * LOCKING:
2290 * None.
2291 */
fb7fd614 2292void ata_eh_report(struct ata_port *ap)
9b1e2658
TH
2293{
2294 struct ata_link *link;
2295
1eca4365 2296 ata_for_each_link(link, ap, HOST_FIRST)
9b1e2658
TH
2297 ata_eh_link_report(link);
2298}
2299
cc0680a5 2300static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset,
b1c72916
TH
2301 unsigned int *classes, unsigned long deadline,
2302 bool clear_classes)
d87fa38e 2303{
f58229f8 2304 struct ata_device *dev;
d87fa38e 2305
b1c72916 2306 if (clear_classes)
1eca4365 2307 ata_for_each_dev(dev, link, ALL)
b1c72916 2308 classes[dev->devno] = ATA_DEV_UNKNOWN;
d87fa38e 2309
f046519f 2310 return reset(link, classes, deadline);
d87fa38e
TH
2311}
2312
ae791c05 2313static int ata_eh_followup_srst_needed(struct ata_link *link,
5dbfc9cb 2314 int rc, const unsigned int *classes)
664faf09 2315{
45db2f6c 2316 if ((link->flags & ATA_LFLAG_NO_SRST) || ata_link_offline(link))
ae791c05 2317 return 0;
5dbfc9cb
TH
2318 if (rc == -EAGAIN)
2319 return 1;
071f44b1 2320 if (sata_pmp_supported(link->ap) && ata_is_host_link(link))
3495de73 2321 return 1;
664faf09
TH
2322 return 0;
2323}
2324
fb7fd614
TH
2325int ata_eh_reset(struct ata_link *link, int classify,
2326 ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
2327 ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
022bdb07 2328{
afaa5c37 2329 struct ata_port *ap = link->ap;
b1c72916 2330 struct ata_link *slave = ap->slave_link;
936fd732 2331 struct ata_eh_context *ehc = &link->eh_context;
705d2014 2332 struct ata_eh_context *sehc = slave ? &slave->eh_context : NULL;
664faf09 2333 unsigned int *classes = ehc->classes;
416dc9ed 2334 unsigned int lflags = link->flags;
1cdaf534 2335 int verbose = !(ehc->i.flags & ATA_EHI_QUIET);
d8af0eb6 2336 int max_tries = 0, try = 0;
b1c72916 2337 struct ata_link *failed_link;
f58229f8 2338 struct ata_device *dev;
416dc9ed 2339 unsigned long deadline, now;
022bdb07 2340 ata_reset_fn_t reset;
afaa5c37 2341 unsigned long flags;
416dc9ed 2342 u32 sstatus;
b1c72916 2343 int nr_unknown, rc;
022bdb07 2344
932648b0
TH
2345 /*
2346 * Prepare to reset
2347 */
d8af0eb6
TH
2348 while (ata_eh_reset_timeouts[max_tries] != ULONG_MAX)
2349 max_tries++;
05944bdf
TH
2350 if (link->flags & ATA_LFLAG_NO_HRST)
2351 hardreset = NULL;
2352 if (link->flags & ATA_LFLAG_NO_SRST)
2353 softreset = NULL;
d8af0eb6 2354
19b72321
TH
2355 /* make sure each reset attemp is at least COOL_DOWN apart */
2356 if (ehc->i.flags & ATA_EHI_DID_RESET) {
2357 now = jiffies;
2358 WARN_ON(time_after(ehc->last_reset, now));
2359 deadline = ata_deadline(ehc->last_reset,
2360 ATA_EH_RESET_COOL_DOWN);
2361 if (time_before(now, deadline))
2362 schedule_timeout_uninterruptible(deadline - now);
2363 }
0a2c0f56 2364
afaa5c37
TH
2365 spin_lock_irqsave(ap->lock, flags);
2366 ap->pflags |= ATA_PFLAG_RESETTING;
2367 spin_unlock_irqrestore(ap->lock, flags);
2368
cf480626 2369 ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
13abf50d 2370
1eca4365 2371 ata_for_each_dev(dev, link, ALL) {
cdeab114
TH
2372 /* If we issue an SRST then an ATA drive (not ATAPI)
2373 * may change configuration and be in PIO0 timing. If
2374 * we do a hard reset (or are coming from power on)
2375 * this is true for ATA or ATAPI. Until we've set a
2376 * suitable controller mode we should not touch the
2377 * bus as we may be talking too fast.
2378 */
2379 dev->pio_mode = XFER_PIO_0;
2380
2381 /* If the controller has a pio mode setup function
2382 * then use it to set the chipset to rights. Don't
2383 * touch the DMA setup as that will be dealt with when
2384 * configuring devices.
2385 */
2386 if (ap->ops->set_piomode)
2387 ap->ops->set_piomode(ap, dev);
2388 }
2389
cf480626 2390 /* prefer hardreset */
932648b0 2391 reset = NULL;
cf480626
TH
2392 ehc->i.action &= ~ATA_EH_RESET;
2393 if (hardreset) {
2394 reset = hardreset;
a674050e 2395 ehc->i.action |= ATA_EH_HARDRESET;
4f7faa3f 2396 } else if (softreset) {
cf480626 2397 reset = softreset;
a674050e 2398 ehc->i.action |= ATA_EH_SOFTRESET;
cf480626 2399 }
f5914a46
TH
2400
2401 if (prereset) {
b1c72916
TH
2402 unsigned long deadline = ata_deadline(jiffies,
2403 ATA_EH_PRERESET_TIMEOUT);
2404
2405 if (slave) {
2406 sehc->i.action &= ~ATA_EH_RESET;
2407 sehc->i.action |= ehc->i.action;
2408 }
2409
2410 rc = prereset(link, deadline);
2411
2412 /* If present, do prereset on slave link too. Reset
2413 * is skipped iff both master and slave links report
2414 * -ENOENT or clear ATA_EH_RESET.
2415 */
2416 if (slave && (rc == 0 || rc == -ENOENT)) {
2417 int tmp;
2418
2419 tmp = prereset(slave, deadline);
2420 if (tmp != -ENOENT)
2421 rc = tmp;
2422
2423 ehc->i.action |= sehc->i.action;
2424 }
2425
f5914a46 2426 if (rc) {
c961922b 2427 if (rc == -ENOENT) {
cc0680a5 2428 ata_link_printk(link, KERN_DEBUG,
4aa9ab67 2429 "port disabled. ignoring.\n");
cf480626 2430 ehc->i.action &= ~ATA_EH_RESET;
4aa9ab67 2431
1eca4365 2432 ata_for_each_dev(dev, link, ALL)
f58229f8 2433 classes[dev->devno] = ATA_DEV_NONE;
4aa9ab67
TH
2434
2435 rc = 0;
c961922b 2436 } else
cc0680a5 2437 ata_link_printk(link, KERN_ERR,
f5914a46 2438 "prereset failed (errno=%d)\n", rc);
fccb6ea5 2439 goto out;
f5914a46 2440 }
f5914a46 2441
932648b0 2442 /* prereset() might have cleared ATA_EH_RESET. If so,
d6515e6f 2443 * bang classes, thaw and return.
932648b0
TH
2444 */
2445 if (reset && !(ehc->i.action & ATA_EH_RESET)) {
1eca4365 2446 ata_for_each_dev(dev, link, ALL)
932648b0 2447 classes[dev->devno] = ATA_DEV_NONE;
d6515e6f
TH
2448 if ((ap->pflags & ATA_PFLAG_FROZEN) &&
2449 ata_is_host_link(link))
2450 ata_eh_thaw_port(ap);
932648b0
TH
2451 rc = 0;
2452 goto out;
2453 }
f5914a46
TH
2454 }
2455
022bdb07 2456 retry:
932648b0
TH
2457 /*
2458 * Perform reset
2459 */
dc98c32c
TH
2460 if (ata_is_host_link(link))
2461 ata_eh_freeze_port(ap);
2462
341c2c95 2463 deadline = ata_deadline(jiffies, ata_eh_reset_timeouts[try++]);
31daabda 2464
932648b0
TH
2465 if (reset) {
2466 if (verbose)
2467 ata_link_printk(link, KERN_INFO, "%s resetting link\n",
2468 reset == softreset ? "soft" : "hard");
2469
2470 /* mark that this EH session started with reset */
19b72321 2471 ehc->last_reset = jiffies;
932648b0
TH
2472 if (reset == hardreset)
2473 ehc->i.flags |= ATA_EHI_DID_HARDRESET;
2474 else
2475 ehc->i.flags |= ATA_EHI_DID_SOFTRESET;
022bdb07 2476
b1c72916
TH
2477 rc = ata_do_reset(link, reset, classes, deadline, true);
2478 if (rc && rc != -EAGAIN) {
2479 failed_link = link;
5dbfc9cb 2480 goto fail;
b1c72916
TH
2481 }
2482
2483 /* hardreset slave link if existent */
2484 if (slave && reset == hardreset) {
2485 int tmp;
2486
2487 if (verbose)
2488 ata_link_printk(slave, KERN_INFO,
2489 "hard resetting link\n");
2490
2491 ata_eh_about_to_do(slave, NULL, ATA_EH_RESET);
2492 tmp = ata_do_reset(slave, reset, classes, deadline,
2493 false);
2494 switch (tmp) {
2495 case -EAGAIN:
2496 rc = -EAGAIN;
2497 case 0:
2498 break;
2499 default:
2500 failed_link = slave;
2501 rc = tmp;
2502 goto fail;
2503 }
2504 }
022bdb07 2505
b1c72916 2506 /* perform follow-up SRST if necessary */
932648b0 2507 if (reset == hardreset &&
5dbfc9cb 2508 ata_eh_followup_srst_needed(link, rc, classes)) {
932648b0 2509 reset = softreset;
022bdb07 2510
932648b0
TH
2511 if (!reset) {
2512 ata_link_printk(link, KERN_ERR,
2513 "follow-up softreset required "
2514 "but no softreset avaliable\n");
b1c72916 2515 failed_link = link;
932648b0
TH
2516 rc = -EINVAL;
2517 goto fail;
2518 }
664faf09 2519
932648b0 2520 ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
b1c72916 2521 rc = ata_do_reset(link, reset, classes, deadline, true);
fe2c4d01
TH
2522 if (rc) {
2523 failed_link = link;
2524 goto fail;
2525 }
664faf09 2526 }
932648b0
TH
2527 } else {
2528 if (verbose)
2529 ata_link_printk(link, KERN_INFO, "no reset method "
2530 "available, skipping reset\n");
2531 if (!(lflags & ATA_LFLAG_ASSUME_CLASS))
2532 lflags |= ATA_LFLAG_ASSUME_ATA;
664faf09
TH
2533 }
2534
932648b0
TH
2535 /*
2536 * Post-reset processing
2537 */
1eca4365 2538 ata_for_each_dev(dev, link, ALL) {
416dc9ed
TH
2539 /* After the reset, the device state is PIO 0 and the
2540 * controller state is undefined. Reset also wakes up
2541 * drives from sleeping mode.
2542 */
2543 dev->pio_mode = XFER_PIO_0;
2544 dev->flags &= ~ATA_DFLAG_SLEEPING;
31daabda 2545
816ab897
TH
2546 if (!ata_phys_link_offline(ata_dev_phys_link(dev))) {
2547 /* apply class override */
2548 if (lflags & ATA_LFLAG_ASSUME_ATA)
2549 classes[dev->devno] = ATA_DEV_ATA;
2550 else if (lflags & ATA_LFLAG_ASSUME_SEMB)
2551 classes[dev->devno] = ATA_DEV_SEMB_UNSUP;
2552 } else
2553 classes[dev->devno] = ATA_DEV_NONE;
022bdb07
TH
2554 }
2555
416dc9ed
TH
2556 /* record current link speed */
2557 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0)
2558 link->sata_spd = (sstatus >> 4) & 0xf;
b1c72916
TH
2559 if (slave && sata_scr_read(slave, SCR_STATUS, &sstatus) == 0)
2560 slave->sata_spd = (sstatus >> 4) & 0xf;
008a7896 2561
dc98c32c
TH
2562 /* thaw the port */
2563 if (ata_is_host_link(link))
2564 ata_eh_thaw_port(ap);
2565
f046519f
TH
2566 /* postreset() should clear hardware SError. Although SError
2567 * is cleared during link resume, clearing SError here is
2568 * necessary as some PHYs raise hotplug events after SRST.
2569 * This introduces race condition where hotplug occurs between
2570 * reset and here. This race is mediated by cross checking
2571 * link onlineness and classification result later.
2572 */
b1c72916 2573 if (postreset) {
416dc9ed 2574 postreset(link, classes);
b1c72916
TH
2575 if (slave)
2576 postreset(slave, classes);
2577 }
20952b69 2578
1e641060
TH
2579 /*
2580 * Some controllers can't be frozen very well and may set
2581 * spuruious error conditions during reset. Clear accumulated
2582 * error information. As reset is the final recovery action,
2583 * nothing is lost by doing this.
2584 */
f046519f 2585 spin_lock_irqsave(link->ap->lock, flags);
1e641060 2586 memset(&link->eh_info, 0, sizeof(link->eh_info));
b1c72916 2587 if (slave)
1e641060
TH
2588 memset(&slave->eh_info, 0, sizeof(link->eh_info));
2589 ap->pflags &= ~ATA_PFLAG_EH_PENDING;
f046519f
TH
2590 spin_unlock_irqrestore(link->ap->lock, flags);
2591
2592 /* Make sure onlineness and classification result correspond.
2593 * Hotplug could have happened during reset and some
2594 * controllers fail to wait while a drive is spinning up after
2595 * being hotplugged causing misdetection. By cross checking
2596 * link onlineness and classification result, those conditions
2597 * can be reliably detected and retried.
2598 */
b1c72916 2599 nr_unknown = 0;
1eca4365 2600 ata_for_each_dev(dev, link, ALL) {
f046519f 2601 /* convert all ATA_DEV_UNKNOWN to ATA_DEV_NONE */
b1c72916 2602 if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
f046519f 2603 classes[dev->devno] = ATA_DEV_NONE;
b1c72916
TH
2604 if (ata_phys_link_online(ata_dev_phys_link(dev)))
2605 nr_unknown++;
2606 }
f046519f
TH
2607 }
2608
b1c72916 2609 if (classify && nr_unknown) {
f046519f
TH
2610 if (try < max_tries) {
2611 ata_link_printk(link, KERN_WARNING, "link online but "
2612 "device misclassified, retrying\n");
b1c72916 2613 failed_link = link;
f046519f
TH
2614 rc = -EAGAIN;
2615 goto fail;
2616 }
2617 ata_link_printk(link, KERN_WARNING,
2618 "link online but device misclassified, "
2619 "device detection might fail\n");
2620 }
2621
416dc9ed 2622 /* reset successful, schedule revalidation */
cf480626 2623 ata_eh_done(link, NULL, ATA_EH_RESET);
b1c72916
TH
2624 if (slave)
2625 ata_eh_done(slave, NULL, ATA_EH_RESET);
19b72321 2626 ehc->last_reset = jiffies; /* update to completion time */
416dc9ed 2627 ehc->i.action |= ATA_EH_REVALIDATE;
ae791c05 2628
416dc9ed 2629 rc = 0;
fccb6ea5
TH
2630 out:
2631 /* clear hotplug flag */
2632 ehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
b1c72916
TH
2633 if (slave)
2634 sehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
afaa5c37
TH
2635
2636 spin_lock_irqsave(ap->lock, flags);
2637 ap->pflags &= ~ATA_PFLAG_RESETTING;
2638 spin_unlock_irqrestore(ap->lock, flags);
2639
022bdb07 2640 return rc;
416dc9ed
TH
2641
2642 fail:
5958e302
TH
2643 /* if SCR isn't accessible on a fan-out port, PMP needs to be reset */
2644 if (!ata_is_host_link(link) &&
2645 sata_scr_read(link, SCR_STATUS, &sstatus))
2646 rc = -ERESTART;
2647
416dc9ed
TH
2648 if (rc == -ERESTART || try >= max_tries)
2649 goto out;
2650
2651 now = jiffies;
2652 if (time_before(now, deadline)) {
2653 unsigned long delta = deadline - now;
2654
b1c72916 2655 ata_link_printk(failed_link, KERN_WARNING,
0a2c0f56
TH
2656 "reset failed (errno=%d), retrying in %u secs\n",
2657 rc, DIV_ROUND_UP(jiffies_to_msecs(delta), 1000));
416dc9ed
TH
2658
2659 while (delta)
2660 delta = schedule_timeout_uninterruptible(delta);
2661 }
2662
b1c72916 2663 if (try == max_tries - 1) {
a07d499b 2664 sata_down_spd_limit(link, 0);
b1c72916 2665 if (slave)
a07d499b 2666 sata_down_spd_limit(slave, 0);
b1c72916 2667 } else if (rc == -EPIPE)
a07d499b 2668 sata_down_spd_limit(failed_link, 0);
b1c72916 2669
416dc9ed
TH
2670 if (hardreset)
2671 reset = hardreset;
2672 goto retry;
022bdb07
TH
2673}
2674
45fabbb7
EO
2675static inline void ata_eh_pull_park_action(struct ata_port *ap)
2676{
2677 struct ata_link *link;
2678 struct ata_device *dev;
2679 unsigned long flags;
2680
2681 /*
2682 * This function can be thought of as an extended version of
2683 * ata_eh_about_to_do() specially crafted to accommodate the
2684 * requirements of ATA_EH_PARK handling. Since the EH thread
2685 * does not leave the do {} while () loop in ata_eh_recover as
2686 * long as the timeout for a park request to *one* device on
2687 * the port has not expired, and since we still want to pick
2688 * up park requests to other devices on the same port or
2689 * timeout updates for the same device, we have to pull
2690 * ATA_EH_PARK actions from eh_info into eh_context.i
2691 * ourselves at the beginning of each pass over the loop.
2692 *
2693 * Additionally, all write accesses to &ap->park_req_pending
2694 * through INIT_COMPLETION() (see below) or complete_all()
2695 * (see ata_scsi_park_store()) are protected by the host lock.
2696 * As a result we have that park_req_pending.done is zero on
2697 * exit from this function, i.e. when ATA_EH_PARK actions for
2698 * *all* devices on port ap have been pulled into the
2699 * respective eh_context structs. If, and only if,
2700 * park_req_pending.done is non-zero by the time we reach
2701 * wait_for_completion_timeout(), another ATA_EH_PARK action
2702 * has been scheduled for at least one of the devices on port
2703 * ap and we have to cycle over the do {} while () loop in
2704 * ata_eh_recover() again.
2705 */
2706
2707 spin_lock_irqsave(ap->lock, flags);
2708 INIT_COMPLETION(ap->park_req_pending);
1eca4365
TH
2709 ata_for_each_link(link, ap, EDGE) {
2710 ata_for_each_dev(dev, link, ALL) {
45fabbb7
EO
2711 struct ata_eh_info *ehi = &link->eh_info;
2712
2713 link->eh_context.i.dev_action[dev->devno] |=
2714 ehi->dev_action[dev->devno] & ATA_EH_PARK;
2715 ata_eh_clear_action(link, dev, ehi, ATA_EH_PARK);
2716 }
2717 }
2718 spin_unlock_irqrestore(ap->lock, flags);
2719}
2720
2721static void ata_eh_park_issue_cmd(struct ata_device *dev, int park)
2722{
2723 struct ata_eh_context *ehc = &dev->link->eh_context;
2724 struct ata_taskfile tf;
2725 unsigned int err_mask;
2726
2727 ata_tf_init(dev, &tf);
2728 if (park) {
2729 ehc->unloaded_mask |= 1 << dev->devno;
2730 tf.command = ATA_CMD_IDLEIMMEDIATE;
2731 tf.feature = 0x44;
2732 tf.lbal = 0x4c;
2733 tf.lbam = 0x4e;
2734 tf.lbah = 0x55;
2735 } else {
2736 ehc->unloaded_mask &= ~(1 << dev->devno);
2737 tf.command = ATA_CMD_CHK_POWER;
2738 }
2739
2740 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
2741 tf.protocol |= ATA_PROT_NODATA;
2742 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
2743 if (park && (err_mask || tf.lbal != 0xc4)) {
2744 ata_dev_printk(dev, KERN_ERR, "head unload failed!\n");
2745 ehc->unloaded_mask &= ~(1 << dev->devno);
2746 }
2747}
2748
0260731f 2749static int ata_eh_revalidate_and_attach(struct ata_link *link,
084fe639 2750 struct ata_device **r_failed_dev)
022bdb07 2751{
0260731f
TH
2752 struct ata_port *ap = link->ap;
2753 struct ata_eh_context *ehc = &link->eh_context;
022bdb07 2754 struct ata_device *dev;
8c3c52a8 2755 unsigned int new_mask = 0;
084fe639 2756 unsigned long flags;
f58229f8 2757 int rc = 0;
022bdb07
TH
2758
2759 DPRINTK("ENTER\n");
2760
8c3c52a8
TH
2761 /* For PATA drive side cable detection to work, IDENTIFY must
2762 * be done backwards such that PDIAG- is released by the slave
2763 * device before the master device is identified.
2764 */
1eca4365 2765 ata_for_each_dev(dev, link, ALL_REVERSE) {
f58229f8
TH
2766 unsigned int action = ata_eh_dev_action(dev);
2767 unsigned int readid_flags = 0;
022bdb07 2768
bff04647
TH
2769 if (ehc->i.flags & ATA_EHI_DID_RESET)
2770 readid_flags |= ATA_READID_POSTRESET;
2771
9666f400 2772 if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) {
633273a3
TH
2773 WARN_ON(dev->class == ATA_DEV_PMP);
2774
b1c72916 2775 if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
022bdb07 2776 rc = -EIO;
8c3c52a8 2777 goto err;
022bdb07
TH
2778 }
2779
0260731f 2780 ata_eh_about_to_do(link, dev, ATA_EH_REVALIDATE);
422c9daa
TH
2781 rc = ata_dev_revalidate(dev, ehc->classes[dev->devno],
2782 readid_flags);
022bdb07 2783 if (rc)
8c3c52a8 2784 goto err;
022bdb07 2785
0260731f 2786 ata_eh_done(link, dev, ATA_EH_REVALIDATE);
47005f25 2787
baa1e78a
TH
2788 /* Configuration may have changed, reconfigure
2789 * transfer mode.
2790 */
2791 ehc->i.flags |= ATA_EHI_SETMODE;
2792
3057ac3c 2793 /* schedule the scsi_rescan_device() here */
2794 queue_work(ata_aux_wq, &(ap->scsi_rescan_task));
084fe639
TH
2795 } else if (dev->class == ATA_DEV_UNKNOWN &&
2796 ehc->tries[dev->devno] &&
2797 ata_class_enabled(ehc->classes[dev->devno])) {
842faa6c
TH
2798 /* Temporarily set dev->class, it will be
2799 * permanently set once all configurations are
2800 * complete. This is necessary because new
2801 * device configuration is done in two
2802 * separate loops.
2803 */
084fe639
TH
2804 dev->class = ehc->classes[dev->devno];
2805
633273a3
TH
2806 if (dev->class == ATA_DEV_PMP)
2807 rc = sata_pmp_attach(dev);
2808 else
2809 rc = ata_dev_read_id(dev, &dev->class,
2810 readid_flags, dev->id);
842faa6c
TH
2811
2812 /* read_id might have changed class, store and reset */
2813 ehc->classes[dev->devno] = dev->class;
2814 dev->class = ATA_DEV_UNKNOWN;
2815
8c3c52a8
TH
2816 switch (rc) {
2817 case 0:
99cf610a
TH
2818 /* clear error info accumulated during probe */
2819 ata_ering_clear(&dev->ering);
f58229f8 2820 new_mask |= 1 << dev->devno;
8c3c52a8
TH
2821 break;
2822 case -ENOENT:
55a8e2c8
TH
2823 /* IDENTIFY was issued to non-existent
2824 * device. No need to reset. Just
842faa6c 2825 * thaw and ignore the device.
55a8e2c8
TH
2826 */
2827 ata_eh_thaw_port(ap);
084fe639 2828 break;
8c3c52a8 2829 default:
8c3c52a8 2830 goto err;
084fe639 2831 }
8c3c52a8
TH
2832 }
2833 }
084fe639 2834
c1c4e8d5 2835 /* PDIAG- should have been released, ask cable type if post-reset */
33267325
TH
2836 if ((ehc->i.flags & ATA_EHI_DID_RESET) && ata_is_host_link(link)) {
2837 if (ap->ops->cable_detect)
2838 ap->cbl = ap->ops->cable_detect(ap);
2839 ata_force_cbl(ap);
2840 }
c1c4e8d5 2841
8c3c52a8
TH
2842 /* Configure new devices forward such that user doesn't see
2843 * device detection messages backwards.
2844 */
1eca4365 2845 ata_for_each_dev(dev, link, ALL) {
633273a3
TH
2846 if (!(new_mask & (1 << dev->devno)) ||
2847 dev->class == ATA_DEV_PMP)
8c3c52a8
TH
2848 continue;
2849
842faa6c
TH
2850 dev->class = ehc->classes[dev->devno];
2851
8c3c52a8
TH
2852 ehc->i.flags |= ATA_EHI_PRINTINFO;
2853 rc = ata_dev_configure(dev);
2854 ehc->i.flags &= ~ATA_EHI_PRINTINFO;
842faa6c
TH
2855 if (rc) {
2856 dev->class = ATA_DEV_UNKNOWN;
8c3c52a8 2857 goto err;
842faa6c 2858 }
8c3c52a8
TH
2859
2860 spin_lock_irqsave(ap->lock, flags);
2861 ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
2862 spin_unlock_irqrestore(ap->lock, flags);
2863
2864 /* new device discovered, configure xfermode */
2865 ehc->i.flags |= ATA_EHI_SETMODE;
022bdb07
TH
2866 }
2867
8c3c52a8 2868 return 0;
022bdb07 2869
8c3c52a8
TH
2870 err:
2871 *r_failed_dev = dev;
2872 DPRINTK("EXIT rc=%d\n", rc);
022bdb07
TH
2873 return rc;
2874}
2875
6f1d1e3a
TH
2876/**
2877 * ata_set_mode - Program timings and issue SET FEATURES - XFER
2878 * @link: link on which timings will be programmed
98a1708d 2879 * @r_failed_dev: out parameter for failed device
6f1d1e3a
TH
2880 *
2881 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2882 * ata_set_mode() fails, pointer to the failing device is
2883 * returned in @r_failed_dev.
2884 *
2885 * LOCKING:
2886 * PCI/etc. bus probe sem.
2887 *
2888 * RETURNS:
2889 * 0 on success, negative errno otherwise
2890 */
2891int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
2892{
2893 struct ata_port *ap = link->ap;
00115e0f
TH
2894 struct ata_device *dev;
2895 int rc;
6f1d1e3a 2896
76326ac1 2897 /* if data transfer is verified, clear DUBIOUS_XFER on ering top */
1eca4365 2898 ata_for_each_dev(dev, link, ENABLED) {
76326ac1
TH
2899 if (!(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) {
2900 struct ata_ering_entry *ent;
2901
2902 ent = ata_ering_top(&dev->ering);
2903 if (ent)
2904 ent->eflags &= ~ATA_EFLAG_DUBIOUS_XFER;
2905 }
2906 }
2907
6f1d1e3a
TH
2908 /* has private set_mode? */
2909 if (ap->ops->set_mode)
00115e0f
TH
2910 rc = ap->ops->set_mode(link, r_failed_dev);
2911 else
2912 rc = ata_do_set_mode(link, r_failed_dev);
2913
2914 /* if transfer mode has changed, set DUBIOUS_XFER on device */
1eca4365 2915 ata_for_each_dev(dev, link, ENABLED) {
00115e0f
TH
2916 struct ata_eh_context *ehc = &link->eh_context;
2917 u8 saved_xfer_mode = ehc->saved_xfer_mode[dev->devno];
2918 u8 saved_ncq = !!(ehc->saved_ncq_enabled & (1 << dev->devno));
2919
2920 if (dev->xfer_mode != saved_xfer_mode ||
2921 ata_ncq_enabled(dev) != saved_ncq)
2922 dev->flags |= ATA_DFLAG_DUBIOUS_XFER;
2923 }
2924
2925 return rc;
6f1d1e3a
TH
2926}
2927
11fc33da
TH
2928/**
2929 * atapi_eh_clear_ua - Clear ATAPI UNIT ATTENTION after reset
2930 * @dev: ATAPI device to clear UA for
2931 *
2932 * Resets and other operations can make an ATAPI device raise
2933 * UNIT ATTENTION which causes the next operation to fail. This
2934 * function clears UA.
2935 *
2936 * LOCKING:
2937 * EH context (may sleep).
2938 *
2939 * RETURNS:
2940 * 0 on success, -errno on failure.
2941 */
2942static int atapi_eh_clear_ua(struct ata_device *dev)
2943{
2944 int i;
2945
2946 for (i = 0; i < ATA_EH_UA_TRIES; i++) {
b5357081 2947 u8 *sense_buffer = dev->link->ap->sector_buf;
11fc33da
TH
2948 u8 sense_key = 0;
2949 unsigned int err_mask;
2950
2951 err_mask = atapi_eh_tur(dev, &sense_key);
2952 if (err_mask != 0 && err_mask != AC_ERR_DEV) {
2953 ata_dev_printk(dev, KERN_WARNING, "TEST_UNIT_READY "
2954 "failed (err_mask=0x%x)\n", err_mask);
2955 return -EIO;
2956 }
2957
2958 if (!err_mask || sense_key != UNIT_ATTENTION)
2959 return 0;
2960
2961 err_mask = atapi_eh_request_sense(dev, sense_buffer, sense_key);
2962 if (err_mask) {
2963 ata_dev_printk(dev, KERN_WARNING, "failed to clear "
2964 "UNIT ATTENTION (err_mask=0x%x)\n", err_mask);
2965 return -EIO;
2966 }
2967 }
2968
2969 ata_dev_printk(dev, KERN_WARNING,
2970 "UNIT ATTENTION persists after %d tries\n", ATA_EH_UA_TRIES);
2971
2972 return 0;
2973}
2974
0260731f 2975static int ata_link_nr_enabled(struct ata_link *link)
022bdb07 2976{
f58229f8
TH
2977 struct ata_device *dev;
2978 int cnt = 0;
022bdb07 2979
1eca4365
TH
2980 ata_for_each_dev(dev, link, ENABLED)
2981 cnt++;
022bdb07
TH
2982 return cnt;
2983}
2984
0260731f 2985static int ata_link_nr_vacant(struct ata_link *link)
084fe639 2986{
f58229f8
TH
2987 struct ata_device *dev;
2988 int cnt = 0;
084fe639 2989
1eca4365 2990 ata_for_each_dev(dev, link, ALL)
f58229f8 2991 if (dev->class == ATA_DEV_UNKNOWN)
084fe639
TH
2992 cnt++;
2993 return cnt;
2994}
2995
0260731f 2996static int ata_eh_skip_recovery(struct ata_link *link)
084fe639 2997{
672b2d65 2998 struct ata_port *ap = link->ap;
0260731f 2999 struct ata_eh_context *ehc = &link->eh_context;
f58229f8 3000 struct ata_device *dev;
084fe639 3001
f9df58cb
TH
3002 /* skip disabled links */
3003 if (link->flags & ATA_LFLAG_DISABLED)
3004 return 1;
3005
672b2d65
TH
3006 /* thaw frozen port and recover failed devices */
3007 if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link))
3008 return 0;
3009
3010 /* reset at least once if reset is requested */
3011 if ((ehc->i.action & ATA_EH_RESET) &&
3012 !(ehc->i.flags & ATA_EHI_DID_RESET))
084fe639
TH
3013 return 0;
3014
3015 /* skip if class codes for all vacant slots are ATA_DEV_NONE */
1eca4365 3016 ata_for_each_dev(dev, link, ALL) {
084fe639
TH
3017 if (dev->class == ATA_DEV_UNKNOWN &&
3018 ehc->classes[dev->devno] != ATA_DEV_NONE)
3019 return 0;
3020 }
3021
3022 return 1;
3023}
3024
c2c7a89c
TH
3025static int ata_count_probe_trials_cb(struct ata_ering_entry *ent, void *void_arg)
3026{
3027 u64 interval = msecs_to_jiffies(ATA_EH_PROBE_TRIAL_INTERVAL);
3028 u64 now = get_jiffies_64();
3029 int *trials = void_arg;
3030
3031 if (ent->timestamp < now - min(now, interval))
3032 return -1;
3033
3034 (*trials)++;
3035 return 0;
3036}
3037
02c05a27
TH
3038static int ata_eh_schedule_probe(struct ata_device *dev)
3039{
3040 struct ata_eh_context *ehc = &dev->link->eh_context;
c2c7a89c
TH
3041 struct ata_link *link = ata_dev_phys_link(dev);
3042 int trials = 0;
02c05a27
TH
3043
3044 if (!(ehc->i.probe_mask & (1 << dev->devno)) ||
3045 (ehc->did_probe_mask & (1 << dev->devno)))
3046 return 0;
3047
3048 ata_eh_detach_dev(dev);
3049 ata_dev_init(dev);
3050 ehc->did_probe_mask |= (1 << dev->devno);
cf480626 3051 ehc->i.action |= ATA_EH_RESET;
00115e0f
TH
3052 ehc->saved_xfer_mode[dev->devno] = 0;
3053 ehc->saved_ncq_enabled &= ~(1 << dev->devno);
02c05a27 3054
c2c7a89c
TH
3055 /* Record and count probe trials on the ering. The specific
3056 * error mask used is irrelevant. Because a successful device
3057 * detection clears the ering, this count accumulates only if
3058 * there are consecutive failed probes.
3059 *
3060 * If the count is equal to or higher than ATA_EH_PROBE_TRIALS
3061 * in the last ATA_EH_PROBE_TRIAL_INTERVAL, link speed is
3062 * forced to 1.5Gbps.
3063 *
3064 * This is to work around cases where failed link speed
3065 * negotiation results in device misdetection leading to
3066 * infinite DEVXCHG or PHRDY CHG events.
3067 */
3068 ata_ering_record(&dev->ering, 0, AC_ERR_OTHER);
3069 ata_ering_map(&dev->ering, ata_count_probe_trials_cb, &trials);
3070
3071 if (trials > ATA_EH_PROBE_TRIALS)
3072 sata_down_spd_limit(link, 1);
3073
02c05a27
TH
3074 return 1;
3075}
3076
9b1e2658 3077static int ata_eh_handle_dev_fail(struct ata_device *dev, int err)
fee7ca72 3078{
9af5c9c9 3079 struct ata_eh_context *ehc = &dev->link->eh_context;
fee7ca72 3080
cf9a590a
TH
3081 /* -EAGAIN from EH routine indicates retry without prejudice.
3082 * The requester is responsible for ensuring forward progress.
3083 */
3084 if (err != -EAGAIN)
3085 ehc->tries[dev->devno]--;
fee7ca72
TH
3086
3087 switch (err) {
3088 case -ENODEV:
3089 /* device missing or wrong IDENTIFY data, schedule probing */
3090 ehc->i.probe_mask |= (1 << dev->devno);
3091 case -EINVAL:
3092 /* give it just one more chance */
3093 ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1);
3094 case -EIO:
d89293ab 3095 if (ehc->tries[dev->devno] == 1) {
fee7ca72
TH
3096 /* This is the last chance, better to slow
3097 * down than lose it.
3098 */
a07d499b 3099 sata_down_spd_limit(ata_dev_phys_link(dev), 0);
d89293ab
TH
3100 if (dev->pio_mode > XFER_PIO_0)
3101 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
fee7ca72
TH
3102 }
3103 }
3104
3105 if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) {
3106 /* disable device if it has used up all its chances */
3107 ata_dev_disable(dev);
3108
3109 /* detach if offline */
b1c72916 3110 if (ata_phys_link_offline(ata_dev_phys_link(dev)))
fee7ca72
TH
3111 ata_eh_detach_dev(dev);
3112
02c05a27 3113 /* schedule probe if necessary */
87fbc5a0 3114 if (ata_eh_schedule_probe(dev)) {
fee7ca72 3115 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
87fbc5a0
TH
3116 memset(ehc->cmd_timeout_idx[dev->devno], 0,
3117 sizeof(ehc->cmd_timeout_idx[dev->devno]));
3118 }
9b1e2658
TH
3119
3120 return 1;
fee7ca72 3121 } else {
cf480626 3122 ehc->i.action |= ATA_EH_RESET;
9b1e2658 3123 return 0;
fee7ca72
TH
3124 }
3125}
3126
022bdb07
TH
3127/**
3128 * ata_eh_recover - recover host port after error
3129 * @ap: host port to recover
f5914a46 3130 * @prereset: prereset method (can be NULL)
022bdb07
TH
3131 * @softreset: softreset method (can be NULL)
3132 * @hardreset: hardreset method (can be NULL)
3133 * @postreset: postreset method (can be NULL)
9b1e2658 3134 * @r_failed_link: out parameter for failed link
022bdb07
TH
3135 *
3136 * This is the alpha and omega, eum and yang, heart and soul of
3137 * libata exception handling. On entry, actions required to
9b1e2658
TH
3138 * recover each link and hotplug requests are recorded in the
3139 * link's eh_context. This function executes all the operations
3140 * with appropriate retrials and fallbacks to resurrect failed
084fe639 3141 * devices, detach goners and greet newcomers.
022bdb07
TH
3142 *
3143 * LOCKING:
3144 * Kernel thread context (may sleep).
3145 *
3146 * RETURNS:
3147 * 0 on success, -errno on failure.
3148 */
fb7fd614
TH
3149int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
3150 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
3151 ata_postreset_fn_t postreset,
3152 struct ata_link **r_failed_link)
022bdb07 3153{
9b1e2658 3154 struct ata_link *link;
022bdb07 3155 struct ata_device *dev;
0a2c0f56 3156 int nr_failed_devs;
dc98c32c 3157 int rc;
45fabbb7 3158 unsigned long flags, deadline;
022bdb07
TH
3159
3160 DPRINTK("ENTER\n");
3161
3162 /* prep for recovery */
1eca4365 3163 ata_for_each_link(link, ap, EDGE) {
9b1e2658 3164 struct ata_eh_context *ehc = &link->eh_context;
084fe639 3165
f9df58cb
TH
3166 /* re-enable link? */
3167 if (ehc->i.action & ATA_EH_ENABLE_LINK) {
3168 ata_eh_about_to_do(link, NULL, ATA_EH_ENABLE_LINK);
3169 spin_lock_irqsave(ap->lock, flags);
3170 link->flags &= ~ATA_LFLAG_DISABLED;
3171 spin_unlock_irqrestore(ap->lock, flags);
3172 ata_eh_done(link, NULL, ATA_EH_ENABLE_LINK);
3173 }
3174
1eca4365 3175 ata_for_each_dev(dev, link, ALL) {
fd995f70
TH
3176 if (link->flags & ATA_LFLAG_NO_RETRY)
3177 ehc->tries[dev->devno] = 1;
3178 else
3179 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
084fe639 3180
9b1e2658
TH
3181 /* collect port action mask recorded in dev actions */
3182 ehc->i.action |= ehc->i.dev_action[dev->devno] &
3183 ~ATA_EH_PERDEV_MASK;
3184 ehc->i.dev_action[dev->devno] &= ATA_EH_PERDEV_MASK;
3185
3186 /* process hotplug request */
3187 if (dev->flags & ATA_DFLAG_DETACH)
3188 ata_eh_detach_dev(dev);
3189
02c05a27
TH
3190 /* schedule probe if necessary */
3191 if (!ata_dev_enabled(dev))
3192 ata_eh_schedule_probe(dev);
084fe639 3193 }
022bdb07
TH
3194 }
3195
3196 retry:
022bdb07 3197 rc = 0;
9b1e2658 3198 nr_failed_devs = 0;
022bdb07 3199
aeb2ecd6 3200 /* if UNLOADING, finish immediately */
b51e9e5d 3201 if (ap->pflags & ATA_PFLAG_UNLOADING)
aeb2ecd6
TH
3202 goto out;
3203
9b1e2658 3204 /* prep for EH */
1eca4365 3205 ata_for_each_link(link, ap, EDGE) {
9b1e2658 3206 struct ata_eh_context *ehc = &link->eh_context;
022bdb07 3207
9b1e2658
TH
3208 /* skip EH if possible. */
3209 if (ata_eh_skip_recovery(link))
3210 ehc->i.action = 0;
3211
1eca4365 3212 ata_for_each_dev(dev, link, ALL)
9b1e2658
TH
3213 ehc->classes[dev->devno] = ATA_DEV_UNKNOWN;
3214 }
084fe639 3215
022bdb07 3216 /* reset */
1eca4365 3217 ata_for_each_link(link, ap, EDGE) {
dc98c32c 3218 struct ata_eh_context *ehc = &link->eh_context;
9b1e2658 3219
dc98c32c
TH
3220 if (!(ehc->i.action & ATA_EH_RESET))
3221 continue;
9b1e2658 3222
dc98c32c
TH
3223 rc = ata_eh_reset(link, ata_link_nr_vacant(link),
3224 prereset, softreset, hardreset, postreset);
3225 if (rc) {
3226 ata_link_printk(link, KERN_ERR,
3227 "reset failed, giving up\n");
3228 goto out;
022bdb07 3229 }
022bdb07
TH
3230 }
3231
45fabbb7
EO
3232 do {
3233 unsigned long now;
3234
3235 /*
3236 * clears ATA_EH_PARK in eh_info and resets
3237 * ap->park_req_pending
3238 */
3239 ata_eh_pull_park_action(ap);
3240
3241 deadline = jiffies;
1eca4365
TH
3242 ata_for_each_link(link, ap, EDGE) {
3243 ata_for_each_dev(dev, link, ALL) {
45fabbb7
EO
3244 struct ata_eh_context *ehc = &link->eh_context;
3245 unsigned long tmp;
3246
3247 if (dev->class != ATA_DEV_ATA)
3248 continue;
3249 if (!(ehc->i.dev_action[dev->devno] &
3250 ATA_EH_PARK))
3251 continue;
3252 tmp = dev->unpark_deadline;
3253 if (time_before(deadline, tmp))
3254 deadline = tmp;
3255 else if (time_before_eq(tmp, jiffies))
3256 continue;
3257 if (ehc->unloaded_mask & (1 << dev->devno))
3258 continue;
3259
3260 ata_eh_park_issue_cmd(dev, 1);
3261 }
3262 }
3263
3264 now = jiffies;
3265 if (time_before_eq(deadline, now))
3266 break;
3267
3268 deadline = wait_for_completion_timeout(&ap->park_req_pending,
3269 deadline - now);
3270 } while (deadline);
1eca4365
TH
3271 ata_for_each_link(link, ap, EDGE) {
3272 ata_for_each_dev(dev, link, ALL) {
45fabbb7
EO
3273 if (!(link->eh_context.unloaded_mask &
3274 (1 << dev->devno)))
3275 continue;
3276
3277 ata_eh_park_issue_cmd(dev, 0);
3278 ata_eh_done(link, dev, ATA_EH_PARK);
3279 }
3280 }
3281
9b1e2658 3282 /* the rest */
1eca4365 3283 ata_for_each_link(link, ap, EDGE) {
9b1e2658 3284 struct ata_eh_context *ehc = &link->eh_context;
022bdb07 3285
9b1e2658
TH
3286 /* revalidate existing devices and attach new ones */
3287 rc = ata_eh_revalidate_and_attach(link, &dev);
4ae72a1e 3288 if (rc)
022bdb07 3289 goto dev_fail;
022bdb07 3290
633273a3
TH
3291 /* if PMP got attached, return, pmp EH will take care of it */
3292 if (link->device->class == ATA_DEV_PMP) {
3293 ehc->i.action = 0;
3294 return 0;
3295 }
3296
9b1e2658
TH
3297 /* configure transfer mode if necessary */
3298 if (ehc->i.flags & ATA_EHI_SETMODE) {
3299 rc = ata_set_mode(link, &dev);
3300 if (rc)
3301 goto dev_fail;
3302 ehc->i.flags &= ~ATA_EHI_SETMODE;
3303 }
3304
11fc33da
TH
3305 /* If reset has been issued, clear UA to avoid
3306 * disrupting the current users of the device.
3307 */
3308 if (ehc->i.flags & ATA_EHI_DID_RESET) {
1eca4365 3309 ata_for_each_dev(dev, link, ALL) {
11fc33da
TH
3310 if (dev->class != ATA_DEV_ATAPI)
3311 continue;
3312 rc = atapi_eh_clear_ua(dev);
3313 if (rc)
3314 goto dev_fail;
3315 }
3316 }
3317
3318 /* configure link power saving */
3ec25ebd 3319 if (ehc->i.action & ATA_EH_LPM)
1eca4365 3320 ata_for_each_dev(dev, link, ALL)
ca77329f
KCA
3321 ata_dev_enable_pm(dev, ap->pm_policy);
3322
9b1e2658
TH
3323 /* this link is okay now */
3324 ehc->i.flags = 0;
3325 continue;
022bdb07 3326
2dcb407e 3327dev_fail:
9b1e2658 3328 nr_failed_devs++;
0a2c0f56 3329 ata_eh_handle_dev_fail(dev, rc);
022bdb07 3330
b06ce3e5
TH
3331 if (ap->pflags & ATA_PFLAG_FROZEN) {
3332 /* PMP reset requires working host port.
3333 * Can't retry if it's frozen.
3334 */
071f44b1 3335 if (sata_pmp_attached(ap))
b06ce3e5 3336 goto out;
9b1e2658 3337 break;
b06ce3e5 3338 }
022bdb07
TH
3339 }
3340
0a2c0f56 3341 if (nr_failed_devs)
9b1e2658 3342 goto retry;
022bdb07 3343
9b1e2658
TH
3344 out:
3345 if (rc && r_failed_link)
3346 *r_failed_link = link;
3347
022bdb07
TH
3348 DPRINTK("EXIT, rc=%d\n", rc);
3349 return rc;
3350}
3351
3352/**
3353 * ata_eh_finish - finish up EH
3354 * @ap: host port to finish EH for
3355 *
3356 * Recovery is complete. Clean up EH states and retry or finish
3357 * failed qcs.
3358 *
3359 * LOCKING:
3360 * None.
3361 */
fb7fd614 3362void ata_eh_finish(struct ata_port *ap)
022bdb07
TH
3363{
3364 int tag;
3365
3366 /* retry or finish qcs */
3367 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
3368 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
3369
3370 if (!(qc->flags & ATA_QCFLAG_FAILED))
3371 continue;
3372
3373 if (qc->err_mask) {
3374 /* FIXME: Once EH migration is complete,
3375 * generate sense data in this function,
3376 * considering both err_mask and tf.
3377 */
03faab78 3378 if (qc->flags & ATA_QCFLAG_RETRY)
022bdb07 3379 ata_eh_qc_retry(qc);
03faab78
TH
3380 else
3381 ata_eh_qc_complete(qc);
022bdb07
TH
3382 } else {
3383 if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
3384 ata_eh_qc_complete(qc);
3385 } else {
3386 /* feed zero TF to sense generation */
3387 memset(&qc->result_tf, 0, sizeof(qc->result_tf));
3388 ata_eh_qc_retry(qc);
3389 }
3390 }
3391 }
da917d69
TH
3392
3393 /* make sure nr_active_links is zero after EH */
3394 WARN_ON(ap->nr_active_links);
3395 ap->nr_active_links = 0;
022bdb07
TH
3396}
3397
3398/**
3399 * ata_do_eh - do standard error handling
3400 * @ap: host port to handle error for
a1efdaba 3401 *
f5914a46 3402 * @prereset: prereset method (can be NULL)
022bdb07
TH
3403 * @softreset: softreset method (can be NULL)
3404 * @hardreset: hardreset method (can be NULL)
3405 * @postreset: postreset method (can be NULL)
3406 *
3407 * Perform standard error handling sequence.
3408 *
3409 * LOCKING:
3410 * Kernel thread context (may sleep).
3411 */
f5914a46
TH
3412void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
3413 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
3414 ata_postreset_fn_t postreset)
022bdb07 3415{
9b1e2658
TH
3416 struct ata_device *dev;
3417 int rc;
3418
3419 ata_eh_autopsy(ap);
3420 ata_eh_report(ap);
3421
3422 rc = ata_eh_recover(ap, prereset, softreset, hardreset, postreset,
3423 NULL);
3424 if (rc) {
1eca4365 3425 ata_for_each_dev(dev, &ap->link, ALL)
9b1e2658
TH
3426 ata_dev_disable(dev);
3427 }
3428
022bdb07
TH
3429 ata_eh_finish(ap);
3430}
500530f6 3431
a1efdaba
TH
3432/**
3433 * ata_std_error_handler - standard error handler
3434 * @ap: host port to handle error for
3435 *
3436 * Standard error handler
3437 *
3438 * LOCKING:
3439 * Kernel thread context (may sleep).
3440 */
3441void ata_std_error_handler(struct ata_port *ap)
3442{
3443 struct ata_port_operations *ops = ap->ops;
3444 ata_reset_fn_t hardreset = ops->hardreset;
3445
57c9efdf
TH
3446 /* ignore built-in hardreset if SCR access is not available */
3447 if (ata_is_builtin_hardreset(hardreset) && !sata_scr_valid(&ap->link))
a1efdaba
TH
3448 hardreset = NULL;
3449
3450 ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset);
3451}
3452
6ffa01d8 3453#ifdef CONFIG_PM
500530f6
TH
3454/**
3455 * ata_eh_handle_port_suspend - perform port suspend operation
3456 * @ap: port to suspend
3457 *
3458 * Suspend @ap.
3459 *
3460 * LOCKING:
3461 * Kernel thread context (may sleep).
3462 */
3463static void ata_eh_handle_port_suspend(struct ata_port *ap)
3464{
3465 unsigned long flags;
3466 int rc = 0;
3467
3468 /* are we suspending? */
3469 spin_lock_irqsave(ap->lock, flags);
3470 if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
3471 ap->pm_mesg.event == PM_EVENT_ON) {
3472 spin_unlock_irqrestore(ap->lock, flags);
3473 return;
3474 }
3475 spin_unlock_irqrestore(ap->lock, flags);
3476
3477 WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED);
3478
64578a3d
TH
3479 /* tell ACPI we're suspending */
3480 rc = ata_acpi_on_suspend(ap);
3481 if (rc)
3482 goto out;
3483
500530f6
TH
3484 /* suspend */
3485 ata_eh_freeze_port(ap);
3486
3487 if (ap->ops->port_suspend)
3488 rc = ap->ops->port_suspend(ap, ap->pm_mesg);
3489
bd3adca5 3490 ata_acpi_set_state(ap, PMSG_SUSPEND);
64578a3d 3491 out:
500530f6
TH
3492 /* report result */
3493 spin_lock_irqsave(ap->lock, flags);
3494
3495 ap->pflags &= ~ATA_PFLAG_PM_PENDING;
3496 if (rc == 0)
3497 ap->pflags |= ATA_PFLAG_SUSPENDED;
64578a3d 3498 else if (ap->pflags & ATA_PFLAG_FROZEN)
500530f6
TH
3499 ata_port_schedule_eh(ap);
3500
3501 if (ap->pm_result) {
3502 *ap->pm_result = rc;
3503 ap->pm_result = NULL;
3504 }
3505
3506 spin_unlock_irqrestore(ap->lock, flags);
3507
3508 return;
3509}
3510
3511/**
3512 * ata_eh_handle_port_resume - perform port resume operation
3513 * @ap: port to resume
3514 *
3515 * Resume @ap.
3516 *
500530f6
TH
3517 * LOCKING:
3518 * Kernel thread context (may sleep).
3519 */
3520static void ata_eh_handle_port_resume(struct ata_port *ap)
3521{
6f9c1ea2
TH
3522 struct ata_link *link;
3523 struct ata_device *dev;
500530f6 3524 unsigned long flags;
9666f400 3525 int rc = 0;
500530f6
TH
3526
3527 /* are we resuming? */
3528 spin_lock_irqsave(ap->lock, flags);
3529 if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
3530 ap->pm_mesg.event != PM_EVENT_ON) {
3531 spin_unlock_irqrestore(ap->lock, flags);
3532 return;
3533 }
3534 spin_unlock_irqrestore(ap->lock, flags);
3535
9666f400 3536 WARN_ON(!(ap->pflags & ATA_PFLAG_SUSPENDED));
500530f6 3537
6f9c1ea2
TH
3538 /*
3539 * Error timestamps are in jiffies which doesn't run while
3540 * suspended and PHY events during resume isn't too uncommon.
3541 * When the two are combined, it can lead to unnecessary speed
3542 * downs if the machine is suspended and resumed repeatedly.
3543 * Clear error history.
3544 */
3545 ata_for_each_link(link, ap, HOST_FIRST)
3546 ata_for_each_dev(dev, link, ALL)
3547 ata_ering_clear(&dev->ering);
3548
bd3adca5
SL
3549 ata_acpi_set_state(ap, PMSG_ON);
3550
500530f6
TH
3551 if (ap->ops->port_resume)
3552 rc = ap->ops->port_resume(ap);
3553
6746544c
TH
3554 /* tell ACPI that we're resuming */
3555 ata_acpi_on_resume(ap);
3556
9666f400 3557 /* report result */
500530f6
TH
3558 spin_lock_irqsave(ap->lock, flags);
3559 ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED);
3560 if (ap->pm_result) {
3561 *ap->pm_result = rc;
3562 ap->pm_result = NULL;
3563 }
3564 spin_unlock_irqrestore(ap->lock, flags);
3565}
6ffa01d8 3566#endif /* CONFIG_PM */
This page took 0.715507 seconds and 5 git commands to generate.