isci: Termination handling cleanup, added termination timeouts.
[deliverable/linux.git] / drivers / scsi / isci / task.c
1 /*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
56 #include <linux/completion.h>
57 #include "scic_task_request.h"
58 #include "scic_remote_device.h"
59 #include "scic_io_request.h"
60 #include "scic_sds_remote_device.h"
61 #include "scic_sds_remote_node_context.h"
62 #include "isci.h"
63 #include "request.h"
64 #include "sata.h"
65 #include "task.h"
66
67
68 /**
69 * isci_task_execute_task() - This function is one of the SAS Domain Template
70 * functions. This function is called by libsas to send a task down to
71 * hardware.
72 * @task: This parameter specifies the SAS task to send.
73 * @num: This parameter specifies the number of tasks to queue.
74 * @gfp_flags: This parameter specifies the context of this call.
75 *
76 * status, zero indicates success.
77 */
78 int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags)
79 {
80 struct isci_host *isci_host;
81 struct isci_request *request = NULL;
82 struct isci_remote_device *device;
83 unsigned long flags;
84 int ret;
85 enum sci_status status;
86 enum isci_status device_status;
87
88 dev_dbg(task->dev->port->ha->dev, "%s: num=%d\n", __func__, num);
89
90 if ((task->dev == NULL) || (task->dev->port == NULL)) {
91
92 /* Indicate SAS_TASK_UNDELIVERED, so that the scsi midlayer
93 * removes the target.
94 */
95 isci_task_complete_for_upper_layer(
96 task,
97 SAS_TASK_UNDELIVERED,
98 SAS_DEVICE_UNKNOWN,
99 isci_perform_normal_io_completion
100 );
101 return 0; /* The I/O was accepted (and failed). */
102 }
103 isci_host = isci_host_from_sas_ha(task->dev->port->ha);
104
105 /* Check if we have room for more tasks */
106 ret = isci_host_can_queue(isci_host, num);
107
108 if (ret) {
109 dev_warn(task->dev->port->ha->dev, "%s: queue full\n", __func__);
110 return ret;
111 }
112
113 do {
114 dev_dbg(task->dev->port->ha->dev,
115 "task = %p, num = %d; dev = %p; cmd = %p\n",
116 task, num, task->dev, task->uldd_task);
117
118 if ((task->dev == NULL) || (task->dev->port == NULL)) {
119 dev_warn(task->dev->port->ha->dev,
120 "%s: task %p's port or dev == NULL!\n",
121 __func__, task);
122
123 /* Indicate SAS_TASK_UNDELIVERED, so that the scsi
124 * midlayer removes the target.
125 */
126 isci_task_complete_for_upper_layer(
127 task,
128 SAS_TASK_UNDELIVERED,
129 SAS_DEVICE_UNKNOWN,
130 isci_perform_normal_io_completion
131 );
132 /* We don't have a valid host reference, so we
133 * can't control the host queueing condition.
134 */
135 goto next_task;
136 }
137
138 device = isci_dev_from_domain_dev(task->dev);
139
140 isci_host = isci_host_from_sas_ha(task->dev->port->ha);
141
142 if (device)
143 device_status = device->status;
144 else
145 device_status = isci_freed;
146
147 /* From this point onward, any process that needs to guarantee
148 * that there is no kernel I/O being started will have to wait
149 * for the quiesce spinlock.
150 */
151
152 if (device_status != isci_ready_for_io) {
153
154 /* Forces a retry from scsi mid layer. */
155 dev_warn(task->dev->port->ha->dev,
156 "%s: task %p: isci_host->status = %d, "
157 "device = %p; device_status = 0x%x\n\n",
158 __func__,
159 task,
160 isci_host_get_state(isci_host),
161 device, device_status);
162
163 if (device_status == isci_ready) {
164 /* Indicate QUEUE_FULL so that the scsi midlayer
165 * retries.
166 */
167 isci_task_complete_for_upper_layer(
168 task,
169 SAS_TASK_COMPLETE,
170 SAS_QUEUE_FULL,
171 isci_perform_normal_io_completion
172 );
173 } else {
174 /* Else, the device is going down. */
175 isci_task_complete_for_upper_layer(
176 task,
177 SAS_TASK_UNDELIVERED,
178 SAS_DEVICE_UNKNOWN,
179 isci_perform_normal_io_completion
180 );
181 }
182 isci_host_can_dequeue(isci_host, 1);
183 } else {
184 /* There is a device and it's ready for I/O. */
185 spin_lock_irqsave(&task->task_state_lock, flags);
186
187 if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
188
189 spin_unlock_irqrestore(&task->task_state_lock,
190 flags);
191
192 isci_task_complete_for_upper_layer(
193 task,
194 SAS_TASK_UNDELIVERED,
195 SAM_STAT_TASK_ABORTED,
196 isci_perform_normal_io_completion
197 );
198
199 /* The I/O was aborted. */
200
201 } else {
202 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
203 spin_unlock_irqrestore(&task->task_state_lock, flags);
204
205 /* build and send the request. */
206 status = isci_request_execute(isci_host, task, &request,
207 gfp_flags);
208
209 if (status != SCI_SUCCESS) {
210
211 spin_lock_irqsave(&task->task_state_lock, flags);
212 /* Did not really start this command. */
213 task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
214 spin_unlock_irqrestore(&task->task_state_lock, flags);
215
216 /* Indicate QUEUE_FULL so that the scsi
217 * midlayer retries. if the request
218 * failed for remote device reasons,
219 * it gets returned as
220 * SAS_TASK_UNDELIVERED next time
221 * through.
222 */
223 isci_task_complete_for_upper_layer(
224 task,
225 SAS_TASK_COMPLETE,
226 SAS_QUEUE_FULL,
227 isci_perform_normal_io_completion
228 );
229 isci_host_can_dequeue(isci_host, 1);
230 }
231 }
232 }
233 next_task:
234 task = list_entry(task->list.next, struct sas_task, list);
235 } while (--num > 0);
236 return 0;
237 }
238
239
240
241 /**
242 * isci_task_request_build() - This function builds the task request object.
243 * @isci_host: This parameter specifies the ISCI host object
244 * @request: This parameter points to the isci_request object allocated in the
245 * request construct function.
246 * @tmf: This parameter is the task management struct to be built
247 *
248 * SCI_SUCCESS on successfull completion, or specific failure code.
249 */
250 static enum sci_status isci_task_request_build(
251 struct isci_host *isci_host,
252 struct isci_request **isci_request,
253 struct isci_tmf *isci_tmf)
254 {
255 struct scic_sds_remote_device *sci_device;
256 enum sci_status status = SCI_FAILURE;
257 struct isci_request *request;
258 struct isci_remote_device *isci_device;
259 /* struct sci_sas_identify_address_frame_protocols dev_protocols; */
260 struct smp_discover_response_protocols dev_protocols;
261
262
263 dev_dbg(&isci_host->pdev->dev,
264 "%s: isci_tmf = %p\n", __func__, isci_tmf);
265
266 isci_device = isci_tmf->device;
267 sci_device = to_sci_dev(isci_device);
268
269 /* do common allocation and init of request object. */
270 status = isci_request_alloc_tmf(
271 isci_host,
272 isci_tmf,
273 &request,
274 isci_device,
275 GFP_ATOMIC
276 );
277
278 if (status != SCI_SUCCESS)
279 goto out;
280
281 /* let the core do it's construct. */
282 status = scic_task_request_construct(
283 isci_host->core_controller,
284 sci_device,
285 SCI_CONTROLLER_INVALID_IO_TAG,
286 request,
287 request->sci_request_mem_ptr,
288 &request->sci_request_handle
289 );
290
291 if (status != SCI_SUCCESS) {
292 dev_warn(&isci_host->pdev->dev,
293 "%s: scic_task_request_construct failed - "
294 "status = 0x%x\n",
295 __func__,
296 status);
297 goto errout;
298 }
299
300 sci_object_set_association(
301 request->sci_request_handle,
302 request
303 );
304
305 scic_remote_device_get_protocols(
306 sci_device,
307 &dev_protocols
308 );
309
310 /* let the core do it's protocol
311 * specific construction.
312 */
313 if (dev_protocols.u.bits.attached_ssp_target) {
314
315 isci_tmf->proto = SAS_PROTOCOL_SSP;
316 status = scic_task_request_construct_ssp(
317 request->sci_request_handle
318 );
319 if (status != SCI_SUCCESS)
320 goto errout;
321 }
322
323 if (dev_protocols.u.bits.attached_stp_target) {
324
325 isci_tmf->proto = SAS_PROTOCOL_SATA;
326 status = isci_sata_management_task_request_build(request);
327
328 if (status != SCI_SUCCESS)
329 goto errout;
330 }
331
332 goto out;
333
334 errout:
335
336 /* release the dma memory if we fail. */
337 isci_request_free(isci_host, request);
338 request = NULL;
339
340 out:
341 *isci_request = request;
342 return status;
343 }
344
345 /**
346 * isci_tmf_timeout_cb() - This function is called as a kernel callback when
347 * the timeout period for the TMF has expired.
348 *
349 *
350 */
351 static void isci_tmf_timeout_cb(void *tmf_request_arg)
352 {
353 struct isci_request *request = (struct isci_request *)tmf_request_arg;
354 struct isci_tmf *tmf = isci_request_access_tmf(request);
355 enum sci_status status;
356
357 BUG_ON(request->ttype != tmf_task);
358
359 /* This task management request has timed-out. Terminate the request
360 * so that the request eventually completes to the requestor in the
361 * request completion callback path.
362 */
363 /* Note - the timer callback function itself has provided spinlock
364 * exclusion from the start and completion paths. No need to take
365 * the request->isci_host->scic_lock here.
366 */
367
368 if (tmf->timeout_timer != NULL) {
369 /* Call the users callback, if any. */
370 if (tmf->cb_state_func != NULL)
371 tmf->cb_state_func(isci_tmf_timed_out, tmf,
372 tmf->cb_data);
373
374 /* Terminate the TMF transmit request. */
375 status = scic_controller_terminate_request(
376 request->isci_host->core_controller,
377 to_sci_dev(request->isci_device),
378 request->sci_request_handle
379 );
380
381 dev_dbg(&request->isci_host->pdev->dev,
382 "%s: tmf_request = %p; tmf = %p; status = %d\n",
383 __func__, request, tmf, status);
384 } else
385 dev_dbg(&request->isci_host->pdev->dev,
386 "%s: timer already canceled! "
387 "tmf_request = %p; tmf = %p\n",
388 __func__, request, tmf);
389
390 /* No need to unlock since the caller to this callback is doing it for
391 * us.
392 * request->isci_host->scic_lock
393 */
394 }
395
396 /**
397 * isci_task_execute_tmf() - This function builds and sends a task request,
398 * then waits for the completion.
399 * @isci_host: This parameter specifies the ISCI host object
400 * @tmf: This parameter is the pointer to the task management structure for
401 * this request.
402 * @timeout_ms: This parameter specifies the timeout period for the task
403 * management request.
404 *
405 * TMF_RESP_FUNC_COMPLETE on successful completion of the TMF (this includes
406 * error conditions reported in the IU status), or TMF_RESP_FUNC_FAILED.
407 */
408 int isci_task_execute_tmf(
409 struct isci_host *isci_host,
410 struct isci_tmf *tmf,
411 unsigned long timeout_ms)
412 {
413 DECLARE_COMPLETION_ONSTACK(completion);
414 enum sci_status status = SCI_FAILURE;
415 struct scic_sds_remote_device *sci_device;
416 struct isci_remote_device *isci_device = tmf->device;
417 struct isci_request *request;
418 int ret = TMF_RESP_FUNC_FAILED;
419 unsigned long flags;
420
421 /* sanity check, return TMF_RESP_FUNC_FAILED
422 * if the device is not there and ready.
423 */
424 if (!isci_device || isci_device->status != isci_ready_for_io) {
425 dev_dbg(&isci_host->pdev->dev,
426 "%s: isci_device = %p not ready (%d)\n",
427 __func__,
428 isci_device, isci_device->status);
429 return TMF_RESP_FUNC_FAILED;
430 } else
431 dev_dbg(&isci_host->pdev->dev,
432 "%s: isci_device = %p\n",
433 __func__, isci_device);
434
435 sci_device = to_sci_dev(isci_device);
436
437 /* Assign the pointer to the TMF's completion kernel wait structure. */
438 tmf->complete = &completion;
439
440 isci_task_request_build(
441 isci_host,
442 &request,
443 tmf
444 );
445
446 if (!request) {
447 dev_warn(&isci_host->pdev->dev,
448 "%s: isci_task_request_build failed\n",
449 __func__);
450 return TMF_RESP_FUNC_FAILED;
451 }
452
453 /* Allocate the TMF timeout timer. */
454 spin_lock_irqsave(&isci_host->scic_lock, flags);
455 tmf->timeout_timer = isci_timer_create(isci_host, request, isci_tmf_timeout_cb);
456
457 /* Start the timer. */
458 if (tmf->timeout_timer)
459 isci_timer_start(tmf->timeout_timer, timeout_ms);
460 else
461 dev_warn(&isci_host->pdev->dev,
462 "%s: isci_timer_create failed!!!!\n",
463 __func__);
464
465 /* start the TMF io. */
466 status = scic_controller_start_task(
467 isci_host->core_controller,
468 sci_device,
469 request->sci_request_handle,
470 SCI_CONTROLLER_INVALID_IO_TAG
471 );
472
473 if (status != SCI_SUCCESS) {
474 dev_warn(&isci_host->pdev->dev,
475 "%s: start_io failed - status = 0x%x, request = %p\n",
476 __func__,
477 status,
478 request);
479 goto cleanup_request;
480 }
481
482 /* Call the users callback, if any. */
483 if (tmf->cb_state_func != NULL)
484 tmf->cb_state_func(isci_tmf_started, tmf, tmf->cb_data);
485
486 /* Change the state of the TMF-bearing request to "started". */
487 isci_request_change_state(request, started);
488
489 /* add the request to the remote device request list. */
490 list_add(&request->dev_node, &isci_device->reqs_in_process);
491
492 spin_unlock_irqrestore(&isci_host->scic_lock, flags);
493
494 /* Wait for the TMF to complete, or a timeout. */
495 wait_for_completion(&completion);
496
497 isci_print_tmf(tmf);
498
499 if (tmf->status == SCI_SUCCESS)
500 ret = TMF_RESP_FUNC_COMPLETE;
501 else if (tmf->status == SCI_FAILURE_IO_RESPONSE_VALID) {
502 dev_dbg(&isci_host->pdev->dev,
503 "%s: tmf.status == "
504 "SCI_FAILURE_IO_RESPONSE_VALID\n",
505 __func__);
506 ret = TMF_RESP_FUNC_COMPLETE;
507 }
508 /* Else - leave the default "failed" status alone. */
509
510 dev_dbg(&isci_host->pdev->dev,
511 "%s: completed request = %p\n",
512 __func__,
513 request);
514
515 if (request->io_request_completion != NULL) {
516
517 /* The fact that this is non-NULL for a TMF request
518 * means there is a thread waiting for this TMF to
519 * finish.
520 */
521 complete(request->io_request_completion);
522 }
523
524 spin_lock_irqsave(&isci_host->scic_lock, flags);
525
526 cleanup_request:
527
528 /* Clean up the timer if needed. */
529 if (tmf->timeout_timer) {
530 isci_del_timer(isci_host, tmf->timeout_timer);
531 tmf->timeout_timer = NULL;
532 }
533
534 spin_unlock_irqrestore(&isci_host->scic_lock, flags);
535
536 isci_request_free(isci_host, request);
537
538 return ret;
539 }
540
541 void isci_task_build_tmf(
542 struct isci_tmf *tmf,
543 struct isci_remote_device *isci_device,
544 enum isci_tmf_function_codes code,
545 void (*tmf_sent_cb)(enum isci_tmf_cb_state,
546 struct isci_tmf *,
547 void *),
548 struct isci_request *old_request)
549 {
550 dev_dbg(&isci_device->isci_port->isci_host->pdev->dev,
551 "%s: isci_device = %p\n", __func__, isci_device);
552
553 memset(tmf, 0, sizeof(*tmf));
554
555 tmf->device = isci_device;
556 tmf->tmf_code = code;
557 tmf->timeout_timer = NULL;
558 tmf->cb_state_func = tmf_sent_cb;
559 tmf->cb_data = old_request;
560 tmf->io_tag = old_request->io_tag;
561
562 }
563
564 static struct isci_request *isci_task_get_request_from_task(
565 struct sas_task *task,
566 struct isci_host **isci_host,
567 struct isci_remote_device **isci_device)
568 {
569
570 struct isci_request *request = NULL;
571 unsigned long flags;
572
573 spin_lock_irqsave(&task->task_state_lock, flags);
574
575 request = task->lldd_task;
576
577 /* If task is already done, the request isn't valid */
578 if (!(task->task_state_flags & SAS_TASK_STATE_DONE) &&
579 (task->task_state_flags & SAS_TASK_AT_INITIATOR) &&
580 (request != NULL)) {
581
582 if (isci_host != NULL)
583 *isci_host = request->isci_host;
584
585 if (isci_device != NULL)
586 *isci_device = request->isci_device;
587 }
588
589 spin_unlock_irqrestore(&task->task_state_lock, flags);
590
591 return request;
592 }
593
594 /**
595 * isci_task_validate_request_to_abort() - This function checks the given I/O
596 * against the "started" state. If the request is still "started", it's
597 * state is changed to aborted. NOTE: isci_host->scic_lock MUST BE HELD
598 * BEFORE CALLING THIS FUNCTION.
599 * @isci_request: This parameter specifies the request object to control.
600 * @isci_host: This parameter specifies the ISCI host object
601 * @isci_device: This is the device to which the request is pending.
602 * @aborted_io_completion: This is a completion structure that will be added to
603 * the request in case it is changed to aborting; this completion is
604 * triggered when the request is fully completed.
605 *
606 * Either "started" on successful change of the task status to "aborted", or
607 * "unallocated" if the task cannot be controlled.
608 */
609 static enum isci_request_status isci_task_validate_request_to_abort(
610 struct isci_request *isci_request,
611 struct isci_host *isci_host,
612 struct isci_remote_device *isci_device,
613 struct completion *aborted_io_completion)
614 {
615 enum isci_request_status old_state = unallocated;
616
617 /* Only abort the task if it's in the
618 * device's request_in_process list
619 */
620 if (isci_request && !list_empty(&isci_request->dev_node)) {
621 old_state = isci_request_change_started_to_aborted(
622 isci_request, aborted_io_completion);
623
624 }
625
626 return old_state;
627 }
628
629 static void isci_request_cleanup_completed_loiterer(
630 struct isci_host *isci_host,
631 struct isci_remote_device *isci_device,
632 struct isci_request *isci_request)
633 {
634 struct sas_task *task;
635 unsigned long flags;
636
637 task = (isci_request->ttype == io_task)
638 ? isci_request_access_task(isci_request)
639 : NULL;
640
641 dev_dbg(&isci_host->pdev->dev,
642 "%s: isci_device=%p, request=%p, task=%p\n",
643 __func__, isci_device, isci_request, task);
644
645 spin_lock_irqsave(&isci_host->scic_lock, flags);
646 list_del_init(&isci_request->dev_node);
647 spin_unlock_irqrestore(&isci_host->scic_lock, flags);
648
649 if (task != NULL) {
650
651 spin_lock_irqsave(&task->task_state_lock, flags);
652 task->lldd_task = NULL;
653
654 isci_set_task_doneflags(task);
655
656 /* If this task is not in the abort path, call task_done. */
657 if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
658
659 spin_unlock_irqrestore(&task->task_state_lock, flags);
660 task->task_done(task);
661 } else
662 spin_unlock_irqrestore(&task->task_state_lock, flags);
663 }
664 isci_request_free(isci_host, isci_request);
665 }
666
667 /**
668 * @isci_termination_timed_out(): this function will deal with a request for
669 * which the wait for termination has timed-out.
670 *
671 * @isci_host This SCU.
672 * @isci_request The I/O request being terminated.
673 */
674 static void
675 isci_termination_timed_out(
676 struct isci_host * host,
677 struct isci_request * request
678 )
679 {
680 unsigned long state_flags;
681
682 dev_warn(&host->pdev->dev,
683 "%s: host = %p; request = %p\n",
684 __func__, host, request);
685
686 /* At this point, the request to terminate
687 * has timed out. The best we can do is to
688 * have the request die a silent death
689 * if it ever completes.
690 */
691 spin_lock_irqsave(&request->state_lock, state_flags);
692
693 if (request->status == started) {
694
695 /* Set the request state to "dead",
696 * and clear the task pointer so that an actual
697 * completion event callback doesn't do
698 * anything.
699 */
700 request->status = dead;
701
702 /* Clear the timeout completion event pointer.*/
703 request->io_request_completion = NULL;
704
705 if (request->ttype == io_task) {
706
707 /* Break links with the sas_task. */
708 if (request->ttype_ptr.io_task_ptr != NULL) {
709
710 request->ttype_ptr.io_task_ptr->lldd_task = NULL;
711 request->ttype_ptr.io_task_ptr = NULL;
712 }
713 }
714 }
715 spin_unlock_irqrestore(&request->state_lock, state_flags);
716 }
717
718
719 /**
720 * isci_terminate_request_core() - This function will terminate the given
721 * request, and wait for it to complete. This function must only be called
722 * from a thread that can wait. Note that the request is terminated and
723 * completed (back to the host, if started there).
724 * @isci_host: This SCU.
725 * @isci_device: The target.
726 * @isci_request: The I/O request to be terminated.
727 *
728 *
729 */
730 static void isci_terminate_request_core(
731 struct isci_host *isci_host,
732 struct isci_remote_device *isci_device,
733 struct isci_request *isci_request)
734 {
735 enum sci_status status = SCI_SUCCESS;
736 bool was_terminated = false;
737 bool needs_cleanup_handling = false;
738 enum isci_request_status request_status;
739 unsigned long flags;
740 unsigned long timeout_remaining;
741
742
743 dev_dbg(&isci_host->pdev->dev,
744 "%s: device = %p; request = %p\n",
745 __func__, isci_device, isci_request);
746
747 spin_lock_irqsave(&isci_host->scic_lock, flags);
748
749 /* Note that we are not going to control
750 * the target to abort the request.
751 */
752 isci_request->complete_in_target = true;
753
754 /* Make sure the request wasn't just sitting around signalling
755 * device condition (if the request handle is NULL, then the
756 * request completed but needed additional handling here).
757 */
758 if (isci_request->sci_request_handle != NULL) {
759 was_terminated = true;
760 needs_cleanup_handling = true;
761 status = scic_controller_terminate_request(
762 isci_host->core_controller,
763 to_sci_dev(isci_device),
764 isci_request->sci_request_handle
765 );
766 }
767 spin_unlock_irqrestore(&isci_host->scic_lock, flags);
768
769 /*
770 * The only time the request to terminate will
771 * fail is when the io request is completed and
772 * being aborted.
773 */
774 if (status != SCI_SUCCESS) {
775 dev_err(&isci_host->pdev->dev,
776 "%s: scic_controller_terminate_request"
777 " returned = 0x%x\n",
778 __func__,
779 status);
780 /* Clear the completion pointer from the request. */
781 isci_request->io_request_completion = NULL;
782
783 } else {
784 if (was_terminated) {
785 dev_dbg(&isci_host->pdev->dev,
786 "%s: before completion wait (%p)\n",
787 __func__,
788 isci_request->io_request_completion);
789
790 /* Wait here for the request to complete. */
791 #define TERMINATION_TIMEOUT_MSEC 50
792 timeout_remaining
793 = wait_for_completion_timeout(
794 isci_request->io_request_completion,
795 msecs_to_jiffies(TERMINATION_TIMEOUT_MSEC));
796
797 if (!timeout_remaining) {
798
799 isci_termination_timed_out(isci_host,
800 isci_request);
801
802 dev_err(&isci_host->pdev->dev,
803 "%s: *** Timeout waiting for "
804 "termination(%p/%p)\n",
805 __func__,
806 isci_request->io_request_completion,
807 isci_request);
808
809 } else
810 dev_dbg(&isci_host->pdev->dev,
811 "%s: after completion wait (%p)\n",
812 __func__,
813 isci_request->io_request_completion);
814 }
815 /* Clear the completion pointer from the request. */
816 isci_request->io_request_completion = NULL;
817
818 /* Peek at the status of the request. This will tell
819 * us if there was special handling on the request such that it
820 * needs to be detached and freed here.
821 */
822 spin_lock_irqsave(&isci_request->state_lock, flags);
823 request_status = isci_request_get_state(isci_request);
824
825 if ((isci_request->ttype == io_task) /* TMFs are in their own thread */
826 && ((request_status == aborted)
827 || (request_status == aborting)
828 || (request_status == terminating)
829 || (request_status == completed)
830 || (request_status == dead)
831 )
832 ) {
833
834 /* The completion routine won't free a request in
835 * the aborted/aborting/etc. states, so we do
836 * it here.
837 */
838 needs_cleanup_handling = true;
839 }
840 spin_unlock_irqrestore(&isci_request->state_lock, flags);
841
842 if (needs_cleanup_handling)
843 isci_request_cleanup_completed_loiterer(
844 isci_host, isci_device, isci_request
845 );
846 }
847 }
848
849 static void isci_terminate_request(
850 struct isci_host *isci_host,
851 struct isci_remote_device *isci_device,
852 struct isci_request *isci_request,
853 enum isci_request_status new_request_state)
854 {
855 enum isci_request_status old_state;
856 DECLARE_COMPLETION_ONSTACK(request_completion);
857
858 /* Change state to "new_request_state" if it is currently "started" */
859 old_state = isci_request_change_started_to_newstate(
860 isci_request,
861 &request_completion,
862 new_request_state
863 );
864
865 if ((old_state == started) || (old_state == completed)) {
866
867 /* If the old_state is started:
868 * This request was not already being aborted. If it had been,
869 * then the aborting I/O (ie. the TMF request) would not be in
870 * the aborting state, and thus would be terminated here. Note
871 * that since the TMF completion's call to the kernel function
872 * "complete()" does not happen until the pending I/O request
873 * terminate fully completes, we do not have to implement a
874 * special wait here for already aborting requests - the
875 * termination of the TMF request will force the request
876 * to finish it's already started terminate.
877 *
878 * If old_state == completed:
879 * This request completed from the SCU hardware perspective
880 * and now just needs cleaning up in terms of freeing the
881 * request and potentially calling up to libsas.
882 */
883 isci_terminate_request_core(isci_host, isci_device,
884 isci_request);
885 }
886 }
887
888 /**
889 * isci_terminate_pending_requests() - This function will change the all of the
890 * requests on the given device's state to "aborting", will terminate the
891 * requests, and wait for them to complete. This function must only be
892 * called from a thread that can wait. Note that the requests are all
893 * terminated and completed (back to the host, if started there).
894 * @isci_host: This parameter specifies SCU.
895 * @isci_device: This parameter specifies the target.
896 *
897 *
898 */
899 void isci_terminate_pending_requests(
900 struct isci_host *isci_host,
901 struct isci_remote_device *isci_device,
902 enum isci_request_status new_request_state)
903 {
904 struct isci_request *request;
905 struct isci_request *next_request;
906 unsigned long flags;
907 struct list_head aborted_request_list;
908
909 INIT_LIST_HEAD(&aborted_request_list);
910
911 dev_dbg(&isci_host->pdev->dev,
912 "%s: isci_device = %p (new request state = %d)\n",
913 __func__, isci_device, new_request_state);
914
915 spin_lock_irqsave(&isci_host->scic_lock, flags);
916
917 /* Move all of the pending requests off of the device list. */
918 list_splice_init(&isci_device->reqs_in_process,
919 &aborted_request_list);
920
921 spin_unlock_irqrestore(&isci_host->scic_lock, flags);
922
923 /* Iterate through the now-local list. */
924 list_for_each_entry_safe(request, next_request,
925 &aborted_request_list, dev_node) {
926
927 dev_warn(&isci_host->pdev->dev,
928 "%s: isci_device=%p request=%p; task=%p\n",
929 __func__,
930 isci_device, request,
931 ((request->ttype == io_task)
932 ? isci_request_access_task(request)
933 : NULL));
934
935 /* Mark all still pending I/O with the selected next
936 * state, terminate and free it.
937 */
938 isci_terminate_request(isci_host, isci_device,
939 request, new_request_state
940 );
941 }
942 }
943
944 /**
945 * isci_task_send_lu_reset_sas() - This function is called by of the SAS Domain
946 * Template functions.
947 * @lun: This parameter specifies the lun to be reset.
948 *
949 * status, zero indicates success.
950 */
951 static int isci_task_send_lu_reset_sas(
952 struct isci_host *isci_host,
953 struct isci_remote_device *isci_device,
954 u8 *lun)
955 {
956 struct isci_tmf tmf;
957 int ret = TMF_RESP_FUNC_FAILED;
958
959 dev_dbg(&isci_host->pdev->dev,
960 "%s: isci_host = %p, isci_device = %p\n",
961 __func__, isci_host, isci_device);
962 /* Send the LUN reset to the target. By the time the call returns,
963 * the TMF has fully exected in the target (in which case the return
964 * value is "TMF_RESP_FUNC_COMPLETE", or the request timed-out (or
965 * was otherwise unable to be executed ("TMF_RESP_FUNC_FAILED").
966 */
967 isci_task_build_tmf(&tmf, isci_device, isci_tmf_ssp_lun_reset, NULL,
968 NULL);
969
970 #define ISCI_LU_RESET_TIMEOUT_MS 2000 /* 2 second timeout. */
971 ret = isci_task_execute_tmf(isci_host, &tmf, ISCI_LU_RESET_TIMEOUT_MS);
972
973 if (ret == TMF_RESP_FUNC_COMPLETE)
974 dev_dbg(&isci_host->pdev->dev,
975 "%s: %p: TMF_LU_RESET passed\n",
976 __func__, isci_device);
977 else
978 dev_dbg(&isci_host->pdev->dev,
979 "%s: %p: TMF_LU_RESET failed (%x)\n",
980 __func__, isci_device, ret);
981
982 return ret;
983 }
984
985 /**
986 * isci_task_lu_reset() - This function is one of the SAS Domain Template
987 * functions. This is one of the Task Management functoins called by libsas,
988 * to reset the given lun. Note the assumption that while this call is
989 * executing, no I/O will be sent by the host to the device.
990 * @lun: This parameter specifies the lun to be reset.
991 *
992 * status, zero indicates success.
993 */
994 int isci_task_lu_reset(
995 struct domain_device *domain_device,
996 u8 *lun)
997 {
998 struct isci_host *isci_host = NULL;
999 struct isci_remote_device *isci_device = NULL;
1000 int ret;
1001 bool device_stopping = false;
1002
1003 if (domain_device == NULL) {
1004 pr_warn("%s: domain_device == NULL\n", __func__);
1005 return TMF_RESP_FUNC_FAILED;
1006 }
1007
1008 isci_device = isci_dev_from_domain_dev(domain_device);
1009
1010 if (domain_device->port != NULL)
1011 isci_host = isci_host_from_sas_ha(domain_device->port->ha);
1012
1013 pr_debug("%s: domain_device=%p, isci_host=%p; isci_device=%p\n",
1014 __func__, domain_device, isci_host, isci_device);
1015
1016 if (isci_device != NULL)
1017 device_stopping = (isci_device->status == isci_stopping)
1018 || (isci_device->status == isci_stopped);
1019
1020 /* If there is a device reset pending on any request in the
1021 * device's list, fail this LUN reset request in order to
1022 * escalate to the device reset.
1023 */
1024 if ((isci_device == NULL) ||
1025 (isci_host == NULL) ||
1026 ((isci_host != NULL) &&
1027 (isci_device != NULL) &&
1028 (device_stopping ||
1029 (isci_device_is_reset_pending(isci_host, isci_device))))) {
1030 dev_warn(&isci_host->pdev->dev,
1031 "%s: No dev (%p), no host (%p), or "
1032 "RESET PENDING: domain_device=%p\n",
1033 __func__, isci_device, isci_host, domain_device);
1034 return TMF_RESP_FUNC_FAILED;
1035 }
1036
1037 /* Send the task management part of the reset. */
1038 if (sas_protocol_ata(domain_device->tproto)) {
1039 ret = isci_task_send_lu_reset_sata(
1040 isci_host, isci_device, lun
1041 );
1042 } else
1043 ret = isci_task_send_lu_reset_sas(isci_host, isci_device, lun);
1044
1045 /* If the LUN reset worked, all the I/O can now be terminated. */
1046 if (ret == TMF_RESP_FUNC_COMPLETE)
1047 /* Terminate all I/O now. */
1048 isci_terminate_pending_requests(isci_host,
1049 isci_device,
1050 terminating);
1051
1052 return ret;
1053 }
1054
1055
1056 /* int (*lldd_clear_nexus_port)(struct asd_sas_port *); */
1057 int isci_task_clear_nexus_port(struct asd_sas_port *port)
1058 {
1059 return TMF_RESP_FUNC_FAILED;
1060 }
1061
1062
1063
1064 int isci_task_clear_nexus_ha(struct sas_ha_struct *ha)
1065 {
1066 return TMF_RESP_FUNC_FAILED;
1067 }
1068
1069 int isci_task_I_T_nexus_reset(struct domain_device *dev)
1070 {
1071 return TMF_RESP_FUNC_FAILED;
1072 }
1073
1074
1075 /* Task Management Functions. Must be called from process context. */
1076
1077 /**
1078 * isci_abort_task_process_cb() - This is a helper function for the abort task
1079 * TMF command. It manages the request state with respect to the successful
1080 * transmission / completion of the abort task request.
1081 * @cb_state: This parameter specifies when this function was called - after
1082 * the TMF request has been started and after it has timed-out.
1083 * @tmf: This parameter specifies the TMF in progress.
1084 *
1085 *
1086 */
1087 static void isci_abort_task_process_cb(
1088 enum isci_tmf_cb_state cb_state,
1089 struct isci_tmf *tmf,
1090 void *cb_data)
1091 {
1092 struct isci_request *old_request;
1093
1094 old_request = (struct isci_request *)cb_data;
1095
1096 dev_dbg(&old_request->isci_host->pdev->dev,
1097 "%s: tmf=%p, old_request=%p\n",
1098 __func__, tmf, old_request);
1099
1100 switch (cb_state) {
1101
1102 case isci_tmf_started:
1103 /* The TMF has been started. Nothing to do here, since the
1104 * request state was already set to "aborted" by the abort
1105 * task function.
1106 */
1107 BUG_ON(old_request->status != aborted);
1108 break;
1109
1110 case isci_tmf_timed_out:
1111
1112 /* Set the task's state to "aborting", since the abort task
1113 * function thread set it to "aborted" (above) in anticipation
1114 * of the task management request working correctly. Since the
1115 * timeout has now fired, the TMF request failed. We set the
1116 * state such that the request completion will indicate the
1117 * device is no longer present.
1118 */
1119 isci_request_change_state(old_request, aborting);
1120 break;
1121
1122 default:
1123 dev_err(&old_request->isci_host->pdev->dev,
1124 "%s: Bad cb_state (%d): tmf=%p, old_request=%p\n",
1125 __func__, cb_state, tmf, old_request);
1126 break;
1127 }
1128 }
1129
1130 /**
1131 * isci_task_abort_task() - This function is one of the SAS Domain Template
1132 * functions. This function is called by libsas to abort a specified task.
1133 * @task: This parameter specifies the SAS task to abort.
1134 *
1135 * status, zero indicates success.
1136 */
1137 int isci_task_abort_task(struct sas_task *task)
1138 {
1139 DECLARE_COMPLETION_ONSTACK(aborted_io_completion);
1140 struct isci_request *old_request = NULL;
1141 enum isci_request_status old_state;
1142 struct isci_remote_device *isci_device = NULL;
1143 struct isci_host *isci_host = NULL;
1144 struct isci_tmf tmf;
1145 int ret = TMF_RESP_FUNC_FAILED;
1146 unsigned long flags;
1147 bool any_dev_reset = false;
1148 bool device_stopping;
1149
1150 /* Get the isci_request reference from the task. Note that
1151 * this check does not depend on the pending request list
1152 * in the device, because tasks driving resets may land here
1153 * after completion in the core.
1154 */
1155 old_request = isci_task_get_request_from_task(task, &isci_host,
1156 &isci_device);
1157
1158 dev_dbg(&isci_host->pdev->dev,
1159 "%s: task = %p\n", __func__, task);
1160
1161 /* Check if the device has been / is currently being removed.
1162 * If so, no task management will be done, and the I/O will
1163 * be terminated.
1164 */
1165 device_stopping = (isci_device->status == isci_stopping)
1166 || (isci_device->status == isci_stopped);
1167
1168 /* This version of the driver will fail abort requests for
1169 * SATA/STP. Failing the abort request this way will cause the
1170 * SCSI error handler thread to escalate to LUN reset
1171 */
1172 if (sas_protocol_ata(task->task_proto) && !device_stopping) {
1173 dev_warn(&isci_host->pdev->dev,
1174 " task %p is for a STP/SATA device;"
1175 " returning TMF_RESP_FUNC_FAILED\n"
1176 " to cause a LUN reset...\n", task);
1177 return TMF_RESP_FUNC_FAILED;
1178 }
1179
1180 dev_dbg(&isci_host->pdev->dev,
1181 "%s: old_request == %p\n", __func__, old_request);
1182
1183 if (!device_stopping)
1184 any_dev_reset = isci_device_is_reset_pending(isci_host,isci_device);
1185
1186 spin_lock_irqsave(&task->task_state_lock, flags);
1187
1188 /* Don't do resets to stopping devices. */
1189 if (device_stopping) {
1190
1191 task->task_state_flags &= ~SAS_TASK_NEED_DEV_RESET;
1192 any_dev_reset = false;
1193
1194 } else /* See if there is a pending device reset for this device. */
1195 any_dev_reset = any_dev_reset
1196 || (task->task_state_flags & SAS_TASK_NEED_DEV_RESET);
1197
1198 /* If the extraction of the request reference from the task
1199 * failed, then the request has been completed (or if there is a
1200 * pending reset then this abort request function must be failed
1201 * in order to escalate to the target reset).
1202 */
1203 if ((old_request == NULL) || any_dev_reset) {
1204
1205 /* If the device reset task flag is set, fail the task
1206 * management request. Otherwise, the original request
1207 * has completed.
1208 */
1209 if (any_dev_reset) {
1210
1211 /* Turn off the task's DONE to make sure this
1212 * task is escalated to a target reset.
1213 */
1214 task->task_state_flags &= ~SAS_TASK_STATE_DONE;
1215
1216 /* Make the reset happen as soon as possible. */
1217 task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
1218
1219 spin_unlock_irqrestore(&task->task_state_lock, flags);
1220
1221 /* Fail the task management request in order to
1222 * escalate to the target reset.
1223 */
1224 ret = TMF_RESP_FUNC_FAILED;
1225
1226 dev_dbg(&isci_host->pdev->dev,
1227 "%s: Failing task abort in order to "
1228 "escalate to target reset because\n"
1229 "SAS_TASK_NEED_DEV_RESET is set for "
1230 "task %p on dev %p\n",
1231 __func__, task, isci_device);
1232
1233
1234 } else {
1235 /* The request has already completed and there
1236 * is nothing to do here other than to set the task
1237 * done bit, and indicate that the task abort function
1238 * was sucessful.
1239 */
1240 isci_set_task_doneflags(task);
1241
1242 spin_unlock_irqrestore(&task->task_state_lock, flags);
1243
1244 ret = TMF_RESP_FUNC_COMPLETE;
1245
1246 dev_dbg(&isci_host->pdev->dev,
1247 "%s: abort task not needed for %p\n",
1248 __func__, task);
1249 }
1250
1251 return ret;
1252 }
1253 else
1254 spin_unlock_irqrestore(&task->task_state_lock, flags);
1255
1256 spin_lock_irqsave(&isci_host->scic_lock, flags);
1257
1258 /* Check the request status and change to "aborting" if currently
1259 * "starting"; if true then set the I/O kernel completion
1260 * struct that will be triggered when the request completes.
1261 */
1262 old_state = isci_task_validate_request_to_abort(
1263 old_request, isci_host, isci_device,
1264 &aborted_io_completion);
1265 if ((old_state != started) && (old_state != completed)) {
1266
1267 spin_unlock_irqrestore(&isci_host->scic_lock, flags);
1268
1269 /* The request was already being handled by someone else (because
1270 * they got to set the state away from started).
1271 */
1272 dev_dbg(&isci_host->pdev->dev,
1273 "%s: device = %p; old_request %p already being aborted\n",
1274 __func__,
1275 isci_device, old_request);
1276
1277 return TMF_RESP_FUNC_COMPLETE;
1278 }
1279 if ((task->task_proto == SAS_PROTOCOL_SMP)
1280 || device_stopping
1281 || old_request->complete_in_target
1282 ) {
1283
1284 spin_unlock_irqrestore(&isci_host->scic_lock, flags);
1285
1286 dev_dbg(&isci_host->pdev->dev,
1287 "%s: SMP request (%d)"
1288 " or device is stopping (%d)"
1289 " or complete_in_target (%d), thus no TMF\n",
1290 __func__, (task->task_proto == SAS_PROTOCOL_SMP),
1291 device_stopping, old_request->complete_in_target);
1292
1293 /* Set the state on the task. */
1294 isci_task_all_done(task);
1295
1296 ret = TMF_RESP_FUNC_COMPLETE;
1297
1298 /* Stopping and SMP devices are not sent a TMF, and are not
1299 * reset, but the outstanding I/O request is terminated below.
1300 */
1301 } else {
1302 /* Fill in the tmf stucture */
1303 isci_task_build_tmf(&tmf, isci_device, isci_tmf_ssp_task_abort,
1304 isci_abort_task_process_cb, old_request);
1305
1306 spin_unlock_irqrestore(&isci_host->scic_lock, flags);
1307
1308 #define ISCI_ABORT_TASK_TIMEOUT_MS 500 /* half second timeout. */
1309 ret = isci_task_execute_tmf(isci_host, &tmf,
1310 ISCI_ABORT_TASK_TIMEOUT_MS);
1311
1312 if (ret != TMF_RESP_FUNC_COMPLETE)
1313 dev_err(&isci_host->pdev->dev,
1314 "%s: isci_task_send_tmf failed\n",
1315 __func__);
1316 }
1317 if (ret == TMF_RESP_FUNC_COMPLETE) {
1318 old_request->complete_in_target = true;
1319
1320 /* Clean up the request on our side, and wait for the aborted I/O to
1321 * complete.
1322 */
1323 isci_terminate_request_core(isci_host, isci_device, old_request);
1324 }
1325
1326 /* Make sure we do not leave a reference to aborted_io_completion */
1327 old_request->io_request_completion = NULL;
1328 return ret;
1329 }
1330
1331 /**
1332 * isci_task_abort_task_set() - This function is one of the SAS Domain Template
1333 * functions. This is one of the Task Management functoins called by libsas,
1334 * to abort all task for the given lun.
1335 * @d_device: This parameter specifies the domain device associated with this
1336 * request.
1337 * @lun: This parameter specifies the lun associated with this request.
1338 *
1339 * status, zero indicates success.
1340 */
1341 int isci_task_abort_task_set(
1342 struct domain_device *d_device,
1343 u8 *lun)
1344 {
1345 return TMF_RESP_FUNC_FAILED;
1346 }
1347
1348
1349 /**
1350 * isci_task_clear_aca() - This function is one of the SAS Domain Template
1351 * functions. This is one of the Task Management functoins called by libsas.
1352 * @d_device: This parameter specifies the domain device associated with this
1353 * request.
1354 * @lun: This parameter specifies the lun associated with this request.
1355 *
1356 * status, zero indicates success.
1357 */
1358 int isci_task_clear_aca(
1359 struct domain_device *d_device,
1360 u8 *lun)
1361 {
1362 return TMF_RESP_FUNC_FAILED;
1363 }
1364
1365
1366
1367 /**
1368 * isci_task_clear_task_set() - This function is one of the SAS Domain Template
1369 * functions. This is one of the Task Management functoins called by libsas.
1370 * @d_device: This parameter specifies the domain device associated with this
1371 * request.
1372 * @lun: This parameter specifies the lun associated with this request.
1373 *
1374 * status, zero indicates success.
1375 */
1376 int isci_task_clear_task_set(
1377 struct domain_device *d_device,
1378 u8 *lun)
1379 {
1380 return TMF_RESP_FUNC_FAILED;
1381 }
1382
1383
1384 /**
1385 * isci_task_query_task() - This function is implemented to cause libsas to
1386 * correctly escalate the failed abort to a LUN or target reset (this is
1387 * because sas_scsi_find_task libsas function does not correctly interpret
1388 * all return codes from the abort task call). When TMF_RESP_FUNC_SUCC is
1389 * returned, libsas turns this into a LUN reset; when FUNC_FAILED is
1390 * returned, libsas will turn this into a target reset
1391 * @task: This parameter specifies the sas task being queried.
1392 * @lun: This parameter specifies the lun associated with this request.
1393 *
1394 * status, zero indicates success.
1395 */
1396 int isci_task_query_task(
1397 struct sas_task *task)
1398 {
1399 /* See if there is a pending device reset for this device. */
1400 if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET)
1401 return TMF_RESP_FUNC_FAILED;
1402 else
1403 return TMF_RESP_FUNC_SUCC;
1404 }
1405
1406 /**
1407 * isci_task_request_complete() - This function is called by the sci core when
1408 * an task request completes.
1409 * @isci_host: This parameter specifies the ISCI host object
1410 * @request: This parameter is the completed isci_request object.
1411 * @completion_status: This parameter specifies the completion status from the
1412 * sci core.
1413 *
1414 * none.
1415 */
1416 void isci_task_request_complete(
1417 struct isci_host *isci_host,
1418 struct isci_request *request,
1419 enum sci_task_status completion_status)
1420 {
1421 struct isci_remote_device *isci_device = request->isci_device;
1422 enum isci_request_status old_state;
1423 struct isci_tmf *tmf = isci_request_access_tmf(request);
1424 struct completion *tmf_complete;
1425
1426 dev_dbg(&isci_host->pdev->dev,
1427 "%s: request = %p, status=%d\n",
1428 __func__, request, completion_status);
1429
1430 old_state = isci_request_change_state(request, completed);
1431
1432 tmf->status = completion_status;
1433 request->complete_in_target = true;
1434
1435 if (SAS_PROTOCOL_SSP == tmf->proto) {
1436
1437 memcpy(&tmf->resp.resp_iu,
1438 scic_io_request_get_response_iu_address(
1439 request->sci_request_handle
1440 ),
1441 sizeof(struct sci_ssp_response_iu));
1442
1443 } else if (SAS_PROTOCOL_SATA == tmf->proto) {
1444
1445 memcpy(&tmf->resp.d2h_fis,
1446 scic_stp_io_request_get_d2h_reg_address(
1447 request->sci_request_handle
1448 ),
1449 sizeof(struct sata_fis_reg_d2h)
1450 );
1451 }
1452
1453 /* Manage the timer if it is still running. */
1454 if (tmf->timeout_timer) {
1455 isci_del_timer(isci_host, tmf->timeout_timer);
1456 tmf->timeout_timer = NULL;
1457 }
1458
1459 /* PRINT_TMF( ((struct isci_tmf *)request->task)); */
1460 tmf_complete = tmf->complete;
1461
1462 scic_controller_complete_task(
1463 isci_host->core_controller,
1464 to_sci_dev(isci_device),
1465 request->sci_request_handle
1466 );
1467 /* NULL the request handle to make sure it cannot be terminated
1468 * or completed again.
1469 */
1470 request->sci_request_handle = NULL;
1471
1472 isci_request_change_state(request, unallocated);
1473 list_del_init(&request->dev_node);
1474
1475 /* The task management part completes last. */
1476 complete(tmf_complete);
1477 }
1478
1479
1480 /**
1481 * isci_task_ssp_request_get_lun() - This function is called by the sci core to
1482 * retrieve the lun for a given task request.
1483 * @request: This parameter is the isci_request object.
1484 *
1485 * lun for specified task request.
1486 */
1487 u32 isci_task_ssp_request_get_lun(struct isci_request *request)
1488 {
1489 struct isci_tmf *isci_tmf = isci_request_access_tmf(request);
1490
1491 dev_dbg(&request->isci_host->pdev->dev,
1492 "%s: lun = %d\n", __func__, isci_tmf->lun[0]);
1493 /* @todo: build lun from array of bytes to 32 bit */
1494 return isci_tmf->lun[0];
1495 }
1496
1497 /**
1498 * isci_task_ssp_request_get_function() - This function is called by the sci
1499 * core to retrieve the function for a given task request.
1500 * @request: This parameter is the isci_request object.
1501 *
1502 * function code for specified task request.
1503 */
1504 u8 isci_task_ssp_request_get_function(struct isci_request *request)
1505 {
1506 struct isci_tmf *isci_tmf = isci_request_access_tmf(request);
1507
1508 dev_dbg(&request->isci_host->pdev->dev,
1509 "%s: func = %d\n", __func__, isci_tmf->tmf_code);
1510
1511 return isci_tmf->tmf_code;
1512 }
1513
1514 /**
1515 * isci_task_ssp_request_get_io_tag_to_manage() - This function is called by
1516 * the sci core to retrieve the io tag for a given task request.
1517 * @request: This parameter is the isci_request object.
1518 *
1519 * io tag for specified task request.
1520 */
1521 u16 isci_task_ssp_request_get_io_tag_to_manage(struct isci_request *request)
1522 {
1523 u16 io_tag = SCI_CONTROLLER_INVALID_IO_TAG;
1524
1525 if (tmf_task == request->ttype) {
1526 struct isci_tmf *tmf = isci_request_access_tmf(request);
1527 io_tag = tmf->io_tag;
1528 }
1529
1530 dev_dbg(&request->isci_host->pdev->dev,
1531 "%s: request = %p, io_tag = %d\n",
1532 __func__, request, io_tag);
1533
1534 return io_tag;
1535 }
1536
1537 /**
1538 * isci_task_ssp_request_get_response_data_address() - This function is called
1539 * by the sci core to retrieve the response data address for a given task
1540 * request.
1541 * @request: This parameter is the isci_request object.
1542 *
1543 * response data address for specified task request.
1544 */
1545 void *isci_task_ssp_request_get_response_data_address(
1546 struct isci_request *request)
1547 {
1548 struct isci_tmf *isci_tmf = isci_request_access_tmf(request);
1549
1550 return &isci_tmf->resp.resp_iu;
1551 }
1552
1553 /**
1554 * isci_task_ssp_request_get_response_data_length() - This function is called
1555 * by the sci core to retrieve the response data length for a given task
1556 * request.
1557 * @request: This parameter is the isci_request object.
1558 *
1559 * response data length for specified task request.
1560 */
1561 u32 isci_task_ssp_request_get_response_data_length(
1562 struct isci_request *request)
1563 {
1564 struct isci_tmf *isci_tmf = isci_request_access_tmf(request);
1565
1566 return sizeof(isci_tmf->resp.resp_iu);
1567 }
1568
1569 /**
1570 * isci_bus_reset_handler() - This function performs a target reset of the
1571 * device referenced by "cmd'. This function is exported through the
1572 * "struct scsi_host_template" structure such that it is called when an I/O
1573 * recovery process has escalated to a target reset. Note that this function
1574 * is called from the scsi error handler event thread, so may block on calls.
1575 * @scsi_cmd: This parameter specifies the target to be reset.
1576 *
1577 * SUCCESS if the reset process was successful, else FAILED.
1578 */
1579 int isci_bus_reset_handler(struct scsi_cmnd *cmd)
1580 {
1581 unsigned long flags = 0;
1582 struct isci_host *isci_host = NULL;
1583 enum sci_status status;
1584 int base_status;
1585 struct isci_remote_device *isci_dev
1586 = isci_dev_from_domain_dev(
1587 sdev_to_domain_dev(cmd->device));
1588
1589 dev_dbg(&cmd->device->sdev_gendev,
1590 "%s: cmd %p, isci_dev %p\n",
1591 __func__, cmd, isci_dev);
1592
1593 if (!isci_dev) {
1594 dev_warn(&cmd->device->sdev_gendev,
1595 "%s: isci_dev is GONE!\n",
1596 __func__);
1597
1598 return TMF_RESP_FUNC_COMPLETE; /* Nothing to reset. */
1599 }
1600
1601 if (isci_dev->isci_port != NULL)
1602 isci_host = isci_dev->isci_port->isci_host;
1603
1604 if (isci_host != NULL)
1605 spin_lock_irqsave(&isci_host->scic_lock, flags);
1606
1607 status = scic_remote_device_reset(to_sci_dev(isci_dev));
1608 if (status != SCI_SUCCESS) {
1609
1610 if (isci_host != NULL)
1611 spin_unlock_irqrestore(&isci_host->scic_lock, flags);
1612
1613 scmd_printk(KERN_WARNING, cmd,
1614 "%s: scic_remote_device_reset(%p) returned %d!\n",
1615 __func__, isci_dev, status);
1616
1617 return TMF_RESP_FUNC_FAILED;
1618 }
1619 if (isci_host != NULL)
1620 spin_unlock_irqrestore(&isci_host->scic_lock, flags);
1621
1622 /* Make sure all pending requests are able to be fully terminated. */
1623 isci_device_clear_reset_pending(isci_dev);
1624
1625 /* Terminate in-progress I/O now. */
1626 isci_remote_device_nuke_requests(isci_dev);
1627
1628 /* Call into the libsas default handler (which calls sas_phy_reset). */
1629 base_status = sas_eh_bus_reset_handler(cmd);
1630
1631 if (base_status != SUCCESS) {
1632
1633 /* There can be cases where the resets to individual devices
1634 * behind an expander will fail because of an unplug of the
1635 * expander itself.
1636 */
1637 scmd_printk(KERN_WARNING, cmd,
1638 "%s: sas_eh_bus_reset_handler(%p) returned %d!\n",
1639 __func__, cmd, base_status);
1640 }
1641
1642 /* WHAT TO DO HERE IF sas_phy_reset FAILS? */
1643
1644 if (isci_host != NULL)
1645 spin_lock_irqsave(&isci_host->scic_lock, flags);
1646 status = scic_remote_device_reset_complete(to_sci_dev(isci_dev));
1647
1648 if (isci_host != NULL)
1649 spin_unlock_irqrestore(&isci_host->scic_lock, flags);
1650
1651 if (status != SCI_SUCCESS) {
1652 scmd_printk(KERN_WARNING, cmd,
1653 "%s: scic_remote_device_reset_complete(%p) "
1654 "returned %d!\n",
1655 __func__, isci_dev, status);
1656 }
1657 /* WHAT TO DO HERE IF scic_remote_device_reset_complete FAILS? */
1658
1659 dev_dbg(&cmd->device->sdev_gendev,
1660 "%s: cmd %p, isci_dev %p complete.\n",
1661 __func__, cmd, isci_dev);
1662
1663 return TMF_RESP_FUNC_COMPLETE;
1664 }
This page took 0.090896 seconds and 6 git commands to generate.