2677393db6f400a61e8b5f95daef4387e1504a9b
[deliverable/linux.git] / drivers / scsi / isci / core / scic_sds_stp_request.c
1 /*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
56 #include <scsi/sas.h>
57 #include "sas.h"
58 #include "sci_base_state.h"
59 #include "sci_base_state_machine.h"
60 #include "scic_io_request.h"
61 #include "scic_sds_controller.h"
62 #include "remote_device.h"
63 #include "scic_sds_request.h"
64 #include "scic_sds_stp_pio_request.h"
65 #include "scic_sds_stp_request.h"
66 #include "scic_sds_unsolicited_frame_control.h"
67 #include "sci_environment.h"
68 #include "sci_util.h"
69 #include "scu_completion_codes.h"
70 #include "scu_event_codes.h"
71 #include "scu_task_context.h"
72
73 void scic_sds_stp_request_assign_buffers(struct scic_sds_request *sci_req)
74 {
75 if (sci_req->was_tag_assigned_by_user == false)
76 sci_req->task_context_buffer = &sci_req->tc;
77 }
78
79 /**
80 * This method is will fill in the SCU Task Context for any type of SATA
81 * request. This is called from the various SATA constructors.
82 * @sci_req: The general IO request object which is to be used in
83 * constructing the SCU task context.
84 * @task_context: The buffer pointer for the SCU task context which is being
85 * constructed.
86 *
87 * The general io request construction is complete. The buffer assignment for
88 * the command buffer is complete. none Revisit task context construction to
89 * determine what is common for SSP/SMP/STP task context structures.
90 */
91 static void scu_sata_reqeust_construct_task_context(
92 struct scic_sds_request *sci_req,
93 struct scu_task_context *task_context)
94 {
95 dma_addr_t dma_addr;
96 struct scic_sds_controller *controller;
97 struct scic_sds_remote_device *target_device;
98 struct scic_sds_port *target_port;
99
100 controller = scic_sds_request_get_controller(sci_req);
101 target_device = scic_sds_request_get_device(sci_req);
102 target_port = scic_sds_request_get_port(sci_req);
103
104 /* Fill in the TC with the its required data */
105 task_context->abort = 0;
106 task_context->priority = SCU_TASK_PRIORITY_NORMAL;
107 task_context->initiator_request = 1;
108 task_context->connection_rate = target_device->connection_rate;
109 task_context->protocol_engine_index =
110 scic_sds_controller_get_protocol_engine_group(controller);
111 task_context->logical_port_index =
112 scic_sds_port_get_index(target_port);
113 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_STP;
114 task_context->valid = SCU_TASK_CONTEXT_VALID;
115 task_context->context_type = SCU_TASK_CONTEXT_TYPE;
116
117 task_context->remote_node_index =
118 scic_sds_remote_device_get_index(sci_req->target_device);
119 task_context->command_code = 0;
120
121 task_context->link_layer_control = 0;
122 task_context->do_not_dma_ssp_good_response = 1;
123 task_context->strict_ordering = 0;
124 task_context->control_frame = 0;
125 task_context->timeout_enable = 0;
126 task_context->block_guard_enable = 0;
127
128 task_context->address_modifier = 0;
129 task_context->task_phase = 0x01;
130
131 task_context->ssp_command_iu_length =
132 (sizeof(struct host_to_dev_fis) - sizeof(u32)) / sizeof(u32);
133
134 /* Set the first word of the H2D REG FIS */
135 task_context->type.words[0] = *(u32 *)&sci_req->stp.cmd;
136
137 if (sci_req->was_tag_assigned_by_user) {
138 /*
139 * Build the task context now since we have already read
140 * the data
141 */
142 sci_req->post_context =
143 (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
144 (scic_sds_controller_get_protocol_engine_group(
145 controller) <<
146 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
147 (scic_sds_port_get_index(target_port) <<
148 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
149 scic_sds_io_tag_get_index(sci_req->io_tag));
150 } else {
151 /*
152 * Build the task context now since we have already read
153 * the data.
154 * I/O tag index is not assigned because we have to wait
155 * until we get a TCi.
156 */
157 sci_req->post_context =
158 (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
159 (scic_sds_controller_get_protocol_engine_group(
160 controller) <<
161 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
162 (scic_sds_port_get_index(target_port) <<
163 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT));
164 }
165
166 /*
167 * Copy the physical address for the command buffer to the SCU Task
168 * Context. We must offset the command buffer by 4 bytes because the
169 * first 4 bytes are transfered in the body of the TC.
170 */
171 dma_addr = scic_io_request_get_dma_addr(sci_req,
172 ((char *) &sci_req->stp.cmd) +
173 sizeof(u32));
174
175 task_context->command_iu_upper = upper_32_bits(dma_addr);
176 task_context->command_iu_lower = lower_32_bits(dma_addr);
177
178 /* SATA Requests do not have a response buffer */
179 task_context->response_iu_upper = 0;
180 task_context->response_iu_lower = 0;
181 }
182
183 /**
184 *
185 * @sci_req:
186 *
187 * This method will perform any general sata request construction. What part of
188 * SATA IO request construction is general? none
189 */
190 static void scic_sds_stp_non_ncq_request_construct(
191 struct scic_sds_request *sci_req)
192 {
193 sci_req->has_started_substate_machine = true;
194 }
195
196 /**
197 *
198 * @sci_req: This parameter specifies the request to be constructed as an
199 * optimized request.
200 * @optimized_task_type: This parameter specifies whether the request is to be
201 * an UDMA request or a NCQ request. - A value of 0 indicates UDMA. - A
202 * value of 1 indicates NCQ.
203 *
204 * This method will perform request construction common to all types of STP
205 * requests that are optimized by the silicon (i.e. UDMA, NCQ). This method
206 * returns an indication as to whether the construction was successful.
207 */
208 static void scic_sds_stp_optimized_request_construct(struct scic_sds_request *sci_req,
209 u8 optimized_task_type,
210 u32 len,
211 enum dma_data_direction dir)
212 {
213 struct scu_task_context *task_context = sci_req->task_context_buffer;
214
215 /* Build the STP task context structure */
216 scu_sata_reqeust_construct_task_context(sci_req, task_context);
217
218 /* Copy over the SGL elements */
219 scic_sds_request_build_sgl(sci_req);
220
221 /* Copy over the number of bytes to be transfered */
222 task_context->transfer_length_bytes = len;
223
224 if (dir == DMA_TO_DEVICE) {
225 /*
226 * The difference between the DMA IN and DMA OUT request task type
227 * values are consistent with the difference between FPDMA READ
228 * and FPDMA WRITE values. Add the supplied task type parameter
229 * to this difference to set the task type properly for this
230 * DATA OUT (WRITE) case. */
231 task_context->task_type = optimized_task_type + (SCU_TASK_TYPE_DMA_OUT
232 - SCU_TASK_TYPE_DMA_IN);
233 } else {
234 /*
235 * For the DATA IN (READ) case, simply save the supplied
236 * optimized task type. */
237 task_context->task_type = optimized_task_type;
238 }
239 }
240
241 /**
242 *
243 * @sci_req: This parameter specifies the request to be constructed.
244 *
245 * This method will construct the STP UDMA request and its associated TC data.
246 * This method returns an indication as to whether the construction was
247 * successful. SCI_SUCCESS Currently this method always returns this value.
248 */
249 enum sci_status scic_sds_stp_ncq_request_construct(struct scic_sds_request *sci_req,
250 u32 len,
251 enum dma_data_direction dir)
252 {
253 scic_sds_stp_optimized_request_construct(sci_req,
254 SCU_TASK_TYPE_FPDMAQ_READ,
255 len, dir);
256 return SCI_SUCCESS;
257 }
258
259 /**
260 * scu_stp_raw_request_construct_task_context -
261 * @sci_req: This parameter specifies the STP request object for which to
262 * construct a RAW command frame task context.
263 * @task_context: This parameter specifies the SCU specific task context buffer
264 * to construct.
265 *
266 * This method performs the operations common to all SATA/STP requests
267 * utilizing the raw frame method. none
268 */
269 static void scu_stp_raw_request_construct_task_context(
270 struct scic_sds_stp_request *stp_req,
271 struct scu_task_context *task_context)
272 {
273 struct scic_sds_request *sci_req = to_sci_req(stp_req);
274
275 scu_sata_reqeust_construct_task_context(sci_req, task_context);
276
277 task_context->control_frame = 0;
278 task_context->priority = SCU_TASK_PRIORITY_NORMAL;
279 task_context->task_type = SCU_TASK_TYPE_SATA_RAW_FRAME;
280 task_context->type.stp.fis_type = FIS_REGH2D;
281 task_context->transfer_length_bytes = sizeof(struct host_to_dev_fis) - sizeof(u32);
282 }
283
284 void scic_stp_io_request_set_ncq_tag(
285 struct scic_sds_request *req,
286 u16 ncq_tag)
287 {
288 /**
289 * @note This could be made to return an error to the user if the user
290 * attempts to set the NCQ tag in the wrong state.
291 */
292 req->task_context_buffer->type.stp.ncq_tag = ncq_tag;
293 }
294
295 /**
296 *
297 * @sci_req:
298 *
299 * Get the next SGL element from the request. - Check on which SGL element pair
300 * we are working - if working on SLG pair element A - advance to element B -
301 * else - check to see if there are more SGL element pairs for this IO request
302 * - if there are more SGL element pairs - advance to the next pair and return
303 * element A struct scu_sgl_element*
304 */
305 static struct scu_sgl_element *scic_sds_stp_request_pio_get_next_sgl(struct scic_sds_stp_request *stp_req)
306 {
307 struct scu_sgl_element *current_sgl;
308 struct scic_sds_request *sci_req = to_sci_req(stp_req);
309 struct scic_sds_request_pio_sgl *pio_sgl = &stp_req->type.pio.request_current;
310
311 if (pio_sgl->sgl_set == SCU_SGL_ELEMENT_PAIR_A) {
312 if (pio_sgl->sgl_pair->B.address_lower == 0 &&
313 pio_sgl->sgl_pair->B.address_upper == 0) {
314 current_sgl = NULL;
315 } else {
316 pio_sgl->sgl_set = SCU_SGL_ELEMENT_PAIR_B;
317 current_sgl = &pio_sgl->sgl_pair->B;
318 }
319 } else {
320 if (pio_sgl->sgl_pair->next_pair_lower == 0 &&
321 pio_sgl->sgl_pair->next_pair_upper == 0) {
322 current_sgl = NULL;
323 } else {
324 u64 phys_addr;
325
326 phys_addr = pio_sgl->sgl_pair->next_pair_upper;
327 phys_addr <<= 32;
328 phys_addr |= pio_sgl->sgl_pair->next_pair_lower;
329
330 pio_sgl->sgl_pair = scic_request_get_virt_addr(sci_req, phys_addr);
331 pio_sgl->sgl_set = SCU_SGL_ELEMENT_PAIR_A;
332 current_sgl = &pio_sgl->sgl_pair->A;
333 }
334 }
335
336 return current_sgl;
337 }
338
339 /**
340 *
341 * @sci_req:
342 * @completion_code:
343 *
344 * This method processes a TC completion. The expected TC completion is for
345 * the transmission of the H2D register FIS containing the SATA/STP non-data
346 * request. This method always successfully processes the TC completion.
347 * SCI_SUCCESS This value is always returned.
348 */
349 static enum sci_status scic_sds_stp_request_non_data_await_h2d_tc_completion_handler(
350 struct scic_sds_request *sci_req,
351 u32 completion_code)
352 {
353 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
354 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
355 scic_sds_request_set_status(
356 sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS
357 );
358
359 sci_base_state_machine_change_state(
360 &sci_req->started_substate_machine,
361 SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE
362 );
363 break;
364
365 default:
366 /*
367 * All other completion status cause the IO to be complete. If a NAK
368 * was received, then it is up to the user to retry the request. */
369 scic_sds_request_set_status(
370 sci_req,
371 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
372 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
373 );
374
375 sci_base_state_machine_change_state(
376 &sci_req->state_machine, SCI_BASE_REQUEST_STATE_COMPLETED);
377 break;
378 }
379
380 return SCI_SUCCESS;
381 }
382
383 /**
384 *
385 * @request: This parameter specifies the request for which a frame has been
386 * received.
387 * @frame_index: This parameter specifies the index of the frame that has been
388 * received.
389 *
390 * This method processes frames received from the target while waiting for a
391 * device to host register FIS. If a non-register FIS is received during this
392 * time, it is treated as a protocol violation from an IO perspective. Indicate
393 * if the received frame was processed successfully.
394 */
395 static enum sci_status scic_sds_stp_request_non_data_await_d2h_frame_handler(
396 struct scic_sds_request *sci_req,
397 u32 frame_index)
398 {
399 enum sci_status status;
400 struct dev_to_host_fis *frame_header;
401 u32 *frame_buffer;
402 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
403 struct scic_sds_controller *scic = sci_req->owning_controller;
404
405 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
406 frame_index,
407 (void **)&frame_header);
408
409 if (status != SCI_SUCCESS) {
410 dev_err(scic_to_dev(sci_req->owning_controller),
411 "%s: SCIC IO Request 0x%p could not get frame header "
412 "for frame index %d, status %x\n",
413 __func__, stp_req, frame_index, status);
414
415 return status;
416 }
417
418 switch (frame_header->fis_type) {
419 case FIS_REGD2H:
420 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
421 frame_index,
422 (void **)&frame_buffer);
423
424 scic_sds_controller_copy_sata_response(&sci_req->stp.rsp,
425 frame_header,
426 frame_buffer);
427
428 /* The command has completed with error */
429 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_CHECK_RESPONSE,
430 SCI_FAILURE_IO_RESPONSE_VALID);
431 break;
432
433 default:
434 dev_warn(scic_to_dev(scic),
435 "%s: IO Request:0x%p Frame Id:%d protocol "
436 "violation occurred\n", __func__, stp_req,
437 frame_index);
438
439 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_UNEXP_FIS,
440 SCI_FAILURE_PROTOCOL_VIOLATION);
441 break;
442 }
443
444 sci_base_state_machine_change_state(&sci_req->state_machine,
445 SCI_BASE_REQUEST_STATE_COMPLETED);
446
447 /* Frame has been decoded return it to the controller */
448 scic_sds_controller_release_frame(scic, frame_index);
449
450 return status;
451 }
452
453 /* --------------------------------------------------------------------------- */
454
455 static const struct scic_sds_io_request_state_handler scic_sds_stp_request_started_non_data_substate_handler_table[] = {
456 [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE] = {
457 .abort_handler = scic_sds_request_started_state_abort_handler,
458 .tc_completion_handler = scic_sds_stp_request_non_data_await_h2d_tc_completion_handler,
459 },
460 [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE] = {
461 .abort_handler = scic_sds_request_started_state_abort_handler,
462 .frame_handler = scic_sds_stp_request_non_data_await_d2h_frame_handler,
463 }
464 };
465
466 static void scic_sds_stp_request_started_non_data_await_h2d_completion_enter(
467 void *object)
468 {
469 struct scic_sds_request *sci_req = object;
470
471 SET_STATE_HANDLER(
472 sci_req,
473 scic_sds_stp_request_started_non_data_substate_handler_table,
474 SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE
475 );
476
477 scic_sds_remote_device_set_working_request(
478 sci_req->target_device, sci_req
479 );
480 }
481
482 static void scic_sds_stp_request_started_non_data_await_d2h_enter(void *object)
483 {
484 struct scic_sds_request *sci_req = object;
485
486 SET_STATE_HANDLER(
487 sci_req,
488 scic_sds_stp_request_started_non_data_substate_handler_table,
489 SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE
490 );
491 }
492
493 /* --------------------------------------------------------------------------- */
494
495 static const struct sci_base_state scic_sds_stp_request_started_non_data_substate_table[] = {
496 [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE] = {
497 .enter_state = scic_sds_stp_request_started_non_data_await_h2d_completion_enter,
498 },
499 [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE] = {
500 .enter_state = scic_sds_stp_request_started_non_data_await_d2h_enter,
501 },
502 };
503
504 enum sci_status scic_sds_stp_non_data_request_construct(struct scic_sds_request *sci_req)
505 {
506 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
507
508 scic_sds_stp_non_ncq_request_construct(sci_req);
509
510 /* Build the STP task context structure */
511 scu_stp_raw_request_construct_task_context(stp_req, sci_req->task_context_buffer);
512
513 sci_base_state_machine_construct(&sci_req->started_substate_machine,
514 sci_req,
515 scic_sds_stp_request_started_non_data_substate_table,
516 SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE);
517
518 return SCI_SUCCESS;
519 }
520
521 #define SCU_MAX_FRAME_BUFFER_SIZE 0x400 /* 1K is the maximum SCU frame data payload */
522
523 /* transmit DATA_FIS from (current sgl + offset) for input
524 * parameter length. current sgl and offset is alreay stored in the IO request
525 */
526 static enum sci_status scic_sds_stp_request_pio_data_out_trasmit_data_frame(
527 struct scic_sds_request *sci_req,
528 u32 length)
529 {
530 struct scic_sds_controller *scic = sci_req->owning_controller;
531 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
532 struct scu_task_context *task_context;
533 struct scu_sgl_element *current_sgl;
534
535 /* Recycle the TC and reconstruct it for sending out DATA FIS containing
536 * for the data from current_sgl+offset for the input length
537 */
538 task_context = scic_sds_controller_get_task_context_buffer(scic,
539 sci_req->io_tag);
540
541 if (stp_req->type.pio.request_current.sgl_set == SCU_SGL_ELEMENT_PAIR_A)
542 current_sgl = &stp_req->type.pio.request_current.sgl_pair->A;
543 else
544 current_sgl = &stp_req->type.pio.request_current.sgl_pair->B;
545
546 /* update the TC */
547 task_context->command_iu_upper = current_sgl->address_upper;
548 task_context->command_iu_lower = current_sgl->address_lower;
549 task_context->transfer_length_bytes = length;
550 task_context->type.stp.fis_type = FIS_DATA;
551
552 /* send the new TC out. */
553 return scic_controller_continue_io(sci_req);
554 }
555
556 static enum sci_status scic_sds_stp_request_pio_data_out_transmit_data(struct scic_sds_request *sci_req)
557 {
558
559 struct scu_sgl_element *current_sgl;
560 u32 sgl_offset;
561 u32 remaining_bytes_in_current_sgl = 0;
562 enum sci_status status = SCI_SUCCESS;
563 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
564
565 sgl_offset = stp_req->type.pio.request_current.sgl_offset;
566
567 if (stp_req->type.pio.request_current.sgl_set == SCU_SGL_ELEMENT_PAIR_A) {
568 current_sgl = &(stp_req->type.pio.request_current.sgl_pair->A);
569 remaining_bytes_in_current_sgl = stp_req->type.pio.request_current.sgl_pair->A.length - sgl_offset;
570 } else {
571 current_sgl = &(stp_req->type.pio.request_current.sgl_pair->B);
572 remaining_bytes_in_current_sgl = stp_req->type.pio.request_current.sgl_pair->B.length - sgl_offset;
573 }
574
575
576 if (stp_req->type.pio.pio_transfer_bytes > 0) {
577 if (stp_req->type.pio.pio_transfer_bytes >= remaining_bytes_in_current_sgl) {
578 /* recycle the TC and send the H2D Data FIS from (current sgl + sgl_offset) and length = remaining_bytes_in_current_sgl */
579 status = scic_sds_stp_request_pio_data_out_trasmit_data_frame(sci_req, remaining_bytes_in_current_sgl);
580 if (status == SCI_SUCCESS) {
581 stp_req->type.pio.pio_transfer_bytes -= remaining_bytes_in_current_sgl;
582
583 /* update the current sgl, sgl_offset and save for future */
584 current_sgl = scic_sds_stp_request_pio_get_next_sgl(stp_req);
585 sgl_offset = 0;
586 }
587 } else if (stp_req->type.pio.pio_transfer_bytes < remaining_bytes_in_current_sgl) {
588 /* recycle the TC and send the H2D Data FIS from (current sgl + sgl_offset) and length = type.pio.pio_transfer_bytes */
589 scic_sds_stp_request_pio_data_out_trasmit_data_frame(sci_req, stp_req->type.pio.pio_transfer_bytes);
590
591 if (status == SCI_SUCCESS) {
592 /* Sgl offset will be adjusted and saved for future */
593 sgl_offset += stp_req->type.pio.pio_transfer_bytes;
594 current_sgl->address_lower += stp_req->type.pio.pio_transfer_bytes;
595 stp_req->type.pio.pio_transfer_bytes = 0;
596 }
597 }
598 }
599
600 if (status == SCI_SUCCESS) {
601 stp_req->type.pio.request_current.sgl_offset = sgl_offset;
602 }
603
604 return status;
605 }
606
607 /**
608 *
609 * @stp_request: The request that is used for the SGL processing.
610 * @data_buffer: The buffer of data to be copied.
611 * @length: The length of the data transfer.
612 *
613 * Copy the data from the buffer for the length specified to the IO reqeust SGL
614 * specified data region. enum sci_status
615 */
616 static enum sci_status
617 scic_sds_stp_request_pio_data_in_copy_data_buffer(struct scic_sds_stp_request *stp_req,
618 u8 *data_buf, u32 len)
619 {
620 struct scic_sds_request *sci_req;
621 struct isci_request *ireq;
622 u8 *src_addr;
623 int copy_len;
624 struct sas_task *task;
625 struct scatterlist *sg;
626 void *kaddr;
627 int total_len = len;
628
629 sci_req = to_sci_req(stp_req);
630 ireq = sci_req_to_ireq(sci_req);
631 task = isci_request_access_task(ireq);
632 src_addr = data_buf;
633
634 if (task->num_scatter > 0) {
635 sg = task->scatter;
636
637 while (total_len > 0) {
638 struct page *page = sg_page(sg);
639
640 copy_len = min_t(int, total_len, sg_dma_len(sg));
641 kaddr = kmap_atomic(page, KM_IRQ0);
642 memcpy(kaddr + sg->offset, src_addr, copy_len);
643 kunmap_atomic(kaddr, KM_IRQ0);
644 total_len -= copy_len;
645 src_addr += copy_len;
646 sg = sg_next(sg);
647 }
648 } else {
649 BUG_ON(task->total_xfer_len < total_len);
650 memcpy(task->scatter, src_addr, total_len);
651 }
652
653 return SCI_SUCCESS;
654 }
655
656 /**
657 *
658 * @sci_req: The PIO DATA IN request that is to receive the data.
659 * @data_buffer: The buffer to copy from.
660 *
661 * Copy the data buffer to the io request data region. enum sci_status
662 */
663 static enum sci_status scic_sds_stp_request_pio_data_in_copy_data(
664 struct scic_sds_stp_request *sci_req,
665 u8 *data_buffer)
666 {
667 enum sci_status status;
668
669 /*
670 * If there is less than 1K remaining in the transfer request
671 * copy just the data for the transfer */
672 if (sci_req->type.pio.pio_transfer_bytes < SCU_MAX_FRAME_BUFFER_SIZE) {
673 status = scic_sds_stp_request_pio_data_in_copy_data_buffer(
674 sci_req, data_buffer, sci_req->type.pio.pio_transfer_bytes);
675
676 if (status == SCI_SUCCESS)
677 sci_req->type.pio.pio_transfer_bytes = 0;
678 } else {
679 /* We are transfering the whole frame so copy */
680 status = scic_sds_stp_request_pio_data_in_copy_data_buffer(
681 sci_req, data_buffer, SCU_MAX_FRAME_BUFFER_SIZE);
682
683 if (status == SCI_SUCCESS)
684 sci_req->type.pio.pio_transfer_bytes -= SCU_MAX_FRAME_BUFFER_SIZE;
685 }
686
687 return status;
688 }
689
690 /**
691 *
692 * @sci_req:
693 * @completion_code:
694 *
695 * enum sci_status
696 */
697 static enum sci_status scic_sds_stp_request_pio_await_h2d_completion_tc_completion_handler(
698 struct scic_sds_request *sci_req,
699 u32 completion_code)
700 {
701 enum sci_status status = SCI_SUCCESS;
702
703 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
704 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
705 scic_sds_request_set_status(
706 sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS
707 );
708
709 sci_base_state_machine_change_state(
710 &sci_req->started_substate_machine,
711 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE
712 );
713 break;
714
715 default:
716 /*
717 * All other completion status cause the IO to be complete. If a NAK
718 * was received, then it is up to the user to retry the request. */
719 scic_sds_request_set_status(
720 sci_req,
721 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
722 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
723 );
724
725 sci_base_state_machine_change_state(
726 &sci_req->state_machine,
727 SCI_BASE_REQUEST_STATE_COMPLETED
728 );
729 break;
730 }
731
732 return status;
733 }
734
735 static enum sci_status scic_sds_stp_request_pio_await_frame_frame_handler(struct scic_sds_request *sci_req,
736 u32 frame_index)
737 {
738 struct scic_sds_controller *scic = sci_req->owning_controller;
739 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
740 struct isci_request *ireq = sci_req_to_ireq(sci_req);
741 struct sas_task *task = isci_request_access_task(ireq);
742 struct dev_to_host_fis *frame_header;
743 enum sci_status status;
744 u32 *frame_buffer;
745
746 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
747 frame_index,
748 (void **)&frame_header);
749
750 if (status != SCI_SUCCESS) {
751 dev_err(scic_to_dev(scic),
752 "%s: SCIC IO Request 0x%p could not get frame header "
753 "for frame index %d, status %x\n",
754 __func__, stp_req, frame_index, status);
755 return status;
756 }
757
758 switch (frame_header->fis_type) {
759 case FIS_PIO_SETUP:
760 /* Get from the frame buffer the PIO Setup Data */
761 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
762 frame_index,
763 (void **)&frame_buffer);
764
765 /* Get the data from the PIO Setup The SCU Hardware returns
766 * first word in the frame_header and the rest of the data is in
767 * the frame buffer so we need to back up one dword
768 */
769
770 /* transfer_count: first 16bits in the 4th dword */
771 stp_req->type.pio.pio_transfer_bytes = frame_buffer[3] & 0xffff;
772
773 /* ending_status: 4th byte in the 3rd dword */
774 stp_req->type.pio.ending_status = (frame_buffer[2] >> 24) & 0xff;
775
776 scic_sds_controller_copy_sata_response(&sci_req->stp.rsp,
777 frame_header,
778 frame_buffer);
779
780 sci_req->stp.rsp.status = stp_req->type.pio.ending_status;
781
782 /* The next state is dependent on whether the
783 * request was PIO Data-in or Data out
784 */
785 if (task->data_dir == DMA_FROM_DEVICE) {
786 sci_base_state_machine_change_state(&sci_req->started_substate_machine,
787 SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE);
788 } else if (task->data_dir == DMA_TO_DEVICE) {
789 /* Transmit data */
790 status = scic_sds_stp_request_pio_data_out_transmit_data(sci_req);
791 if (status != SCI_SUCCESS)
792 break;
793 sci_base_state_machine_change_state(&sci_req->started_substate_machine,
794 SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE);
795 }
796 break;
797 case FIS_SETDEVBITS:
798 sci_base_state_machine_change_state(&sci_req->started_substate_machine,
799 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE);
800 break;
801 case FIS_REGD2H:
802 if (frame_header->status & ATA_BUSY) {
803 /* Now why is the drive sending a D2H Register FIS when
804 * it is still busy? Do nothing since we are still in
805 * the right state.
806 */
807 dev_dbg(scic_to_dev(scic),
808 "%s: SCIC PIO Request 0x%p received "
809 "D2H Register FIS with BSY status "
810 "0x%x\n", __func__, stp_req,
811 frame_header->status);
812 break;
813 }
814
815 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
816 frame_index,
817 (void **)&frame_buffer);
818
819 scic_sds_controller_copy_sata_response(&sci_req->stp.req,
820 frame_header,
821 frame_buffer);
822
823 scic_sds_request_set_status(sci_req,
824 SCU_TASK_DONE_CHECK_RESPONSE,
825 SCI_FAILURE_IO_RESPONSE_VALID);
826
827 sci_base_state_machine_change_state(&sci_req->state_machine,
828 SCI_BASE_REQUEST_STATE_COMPLETED);
829 break;
830 default:
831 /* FIXME: what do we do here? */
832 break;
833 }
834
835 /* Frame is decoded return it to the controller */
836 scic_sds_controller_release_frame(scic, frame_index);
837
838 return status;
839 }
840
841 static enum sci_status scic_sds_stp_request_pio_data_in_await_data_frame_handler(struct scic_sds_request *sci_req,
842 u32 frame_index)
843 {
844 enum sci_status status;
845 struct dev_to_host_fis *frame_header;
846 struct sata_fis_data *frame_buffer;
847 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
848 struct scic_sds_controller *scic = sci_req->owning_controller;
849
850 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
851 frame_index,
852 (void **)&frame_header);
853
854 if (status != SCI_SUCCESS) {
855 dev_err(scic_to_dev(scic),
856 "%s: SCIC IO Request 0x%p could not get frame header "
857 "for frame index %d, status %x\n",
858 __func__, stp_req, frame_index, status);
859 return status;
860 }
861
862 if (frame_header->fis_type == FIS_DATA) {
863 if (stp_req->type.pio.request_current.sgl_pair == NULL) {
864 sci_req->saved_rx_frame_index = frame_index;
865 stp_req->type.pio.pio_transfer_bytes = 0;
866 } else {
867 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
868 frame_index,
869 (void **)&frame_buffer);
870
871 status = scic_sds_stp_request_pio_data_in_copy_data(stp_req,
872 (u8 *)frame_buffer);
873
874 /* Frame is decoded return it to the controller */
875 scic_sds_controller_release_frame(scic, frame_index);
876 }
877
878 /* Check for the end of the transfer, are there more
879 * bytes remaining for this data transfer
880 */
881 if (status != SCI_SUCCESS ||
882 stp_req->type.pio.pio_transfer_bytes != 0)
883 return status;
884
885 if ((stp_req->type.pio.ending_status & ATA_BUSY) == 0) {
886 scic_sds_request_set_status(sci_req,
887 SCU_TASK_DONE_CHECK_RESPONSE,
888 SCI_FAILURE_IO_RESPONSE_VALID);
889
890 sci_base_state_machine_change_state(&sci_req->state_machine,
891 SCI_BASE_REQUEST_STATE_COMPLETED);
892 } else {
893 sci_base_state_machine_change_state(&sci_req->started_substate_machine,
894 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE);
895 }
896 } else {
897 dev_err(scic_to_dev(scic),
898 "%s: SCIC PIO Request 0x%p received frame %d "
899 "with fis type 0x%02x when expecting a data "
900 "fis.\n", __func__, stp_req, frame_index,
901 frame_header->fis_type);
902
903 scic_sds_request_set_status(sci_req,
904 SCU_TASK_DONE_GOOD,
905 SCI_FAILURE_IO_REQUIRES_SCSI_ABORT);
906
907 sci_base_state_machine_change_state(&sci_req->state_machine,
908 SCI_BASE_REQUEST_STATE_COMPLETED);
909
910 /* Frame is decoded return it to the controller */
911 scic_sds_controller_release_frame(scic, frame_index);
912 }
913
914 return status;
915 }
916
917
918 /**
919 *
920 * @sci_req:
921 * @completion_code:
922 *
923 * enum sci_status
924 */
925 static enum sci_status scic_sds_stp_request_pio_data_out_await_data_transmit_completion_tc_completion_handler(
926
927 struct scic_sds_request *sci_req,
928 u32 completion_code)
929 {
930 enum sci_status status = SCI_SUCCESS;
931 bool all_frames_transferred = false;
932 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
933
934 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
935 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
936 /* Transmit data */
937 if (stp_req->type.pio.pio_transfer_bytes != 0) {
938 status = scic_sds_stp_request_pio_data_out_transmit_data(sci_req);
939 if (status == SCI_SUCCESS) {
940 if (stp_req->type.pio.pio_transfer_bytes == 0)
941 all_frames_transferred = true;
942 }
943 } else if (stp_req->type.pio.pio_transfer_bytes == 0) {
944 /*
945 * this will happen if the all data is written at the
946 * first time after the pio setup fis is received
947 */
948 all_frames_transferred = true;
949 }
950
951 /* all data transferred. */
952 if (all_frames_transferred) {
953 /*
954 * Change the state to SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_FRAME_SUBSTATE
955 * and wait for PIO_SETUP fis / or D2H REg fis. */
956 sci_base_state_machine_change_state(
957 &sci_req->started_substate_machine,
958 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE
959 );
960 }
961 break;
962
963 default:
964 /*
965 * All other completion status cause the IO to be complete. If a NAK
966 * was received, then it is up to the user to retry the request. */
967 scic_sds_request_set_status(
968 sci_req,
969 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
970 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
971 );
972
973 sci_base_state_machine_change_state(
974 &sci_req->state_machine,
975 SCI_BASE_REQUEST_STATE_COMPLETED
976 );
977 break;
978 }
979
980 return status;
981 }
982
983 /**
984 *
985 * @request: This is the request which is receiving the event.
986 * @event_code: This is the event code that the request on which the request is
987 * expected to take action.
988 *
989 * This method will handle any link layer events while waiting for the data
990 * frame. enum sci_status SCI_SUCCESS SCI_FAILURE
991 */
992 static enum sci_status scic_sds_stp_request_pio_data_in_await_data_event_handler(
993 struct scic_sds_request *request,
994 u32 event_code)
995 {
996 enum sci_status status;
997
998 switch (scu_get_event_specifier(event_code)) {
999 case SCU_TASK_DONE_CRC_ERR << SCU_EVENT_SPECIFIC_CODE_SHIFT:
1000 /*
1001 * We are waiting for data and the SCU has R_ERR the data frame.
1002 * Go back to waiting for the D2H Register FIS */
1003 sci_base_state_machine_change_state(
1004 &request->started_substate_machine,
1005 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE
1006 );
1007
1008 status = SCI_SUCCESS;
1009 break;
1010
1011 default:
1012 dev_err(scic_to_dev(request->owning_controller),
1013 "%s: SCIC PIO Request 0x%p received unexpected "
1014 "event 0x%08x\n",
1015 __func__, request, event_code);
1016
1017 /* / @todo Should we fail the PIO request when we get an unexpected event? */
1018 status = SCI_FAILURE;
1019 break;
1020 }
1021
1022 return status;
1023 }
1024
1025 /* --------------------------------------------------------------------------- */
1026
1027 static const struct scic_sds_io_request_state_handler scic_sds_stp_request_started_pio_substate_handler_table[] = {
1028 [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE] = {
1029 .abort_handler = scic_sds_request_started_state_abort_handler,
1030 .tc_completion_handler = scic_sds_stp_request_pio_await_h2d_completion_tc_completion_handler,
1031 },
1032 [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE] = {
1033 .abort_handler = scic_sds_request_started_state_abort_handler,
1034 .frame_handler = scic_sds_stp_request_pio_await_frame_frame_handler
1035 },
1036 [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE] = {
1037 .abort_handler = scic_sds_request_started_state_abort_handler,
1038 .event_handler = scic_sds_stp_request_pio_data_in_await_data_event_handler,
1039 .frame_handler = scic_sds_stp_request_pio_data_in_await_data_frame_handler
1040 },
1041 [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE] = {
1042 .abort_handler = scic_sds_request_started_state_abort_handler,
1043 .tc_completion_handler = scic_sds_stp_request_pio_data_out_await_data_transmit_completion_tc_completion_handler,
1044 }
1045 };
1046
1047 static void scic_sds_stp_request_started_pio_await_h2d_completion_enter(
1048 void *object)
1049 {
1050 struct scic_sds_request *sci_req = object;
1051
1052 SET_STATE_HANDLER(
1053 sci_req,
1054 scic_sds_stp_request_started_pio_substate_handler_table,
1055 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE
1056 );
1057
1058 scic_sds_remote_device_set_working_request(
1059 sci_req->target_device, sci_req);
1060 }
1061
1062 static void scic_sds_stp_request_started_pio_await_frame_enter(void *object)
1063 {
1064 struct scic_sds_request *sci_req = object;
1065
1066 SET_STATE_HANDLER(
1067 sci_req,
1068 scic_sds_stp_request_started_pio_substate_handler_table,
1069 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE
1070 );
1071 }
1072
1073 static void scic_sds_stp_request_started_pio_data_in_await_data_enter(
1074 void *object)
1075 {
1076 struct scic_sds_request *sci_req = object;
1077
1078 SET_STATE_HANDLER(
1079 sci_req,
1080 scic_sds_stp_request_started_pio_substate_handler_table,
1081 SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE
1082 );
1083 }
1084
1085 static void scic_sds_stp_request_started_pio_data_out_transmit_data_enter(
1086 void *object)
1087 {
1088 struct scic_sds_request *sci_req = object;
1089
1090 SET_STATE_HANDLER(
1091 sci_req,
1092 scic_sds_stp_request_started_pio_substate_handler_table,
1093 SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE
1094 );
1095 }
1096
1097 /* --------------------------------------------------------------------------- */
1098
1099 static const struct sci_base_state scic_sds_stp_request_started_pio_substate_table[] = {
1100 [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE] = {
1101 .enter_state = scic_sds_stp_request_started_pio_await_h2d_completion_enter,
1102 },
1103 [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE] = {
1104 .enter_state = scic_sds_stp_request_started_pio_await_frame_enter,
1105 },
1106 [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE] = {
1107 .enter_state = scic_sds_stp_request_started_pio_data_in_await_data_enter,
1108 },
1109 [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE] = {
1110 .enter_state = scic_sds_stp_request_started_pio_data_out_transmit_data_enter,
1111 }
1112 };
1113
1114 enum sci_status
1115 scic_sds_stp_pio_request_construct(struct scic_sds_request *sci_req,
1116 bool copy_rx_frame)
1117 {
1118 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
1119 struct scic_sds_stp_pio_request *pio = &stp_req->type.pio;
1120
1121 scic_sds_stp_non_ncq_request_construct(sci_req);
1122
1123 scu_stp_raw_request_construct_task_context(stp_req,
1124 sci_req->task_context_buffer);
1125
1126 pio->current_transfer_bytes = 0;
1127 pio->ending_error = 0;
1128 pio->ending_status = 0;
1129
1130 pio->request_current.sgl_offset = 0;
1131 pio->request_current.sgl_set = SCU_SGL_ELEMENT_PAIR_A;
1132
1133 if (copy_rx_frame) {
1134 scic_sds_request_build_sgl(sci_req);
1135 /* Since the IO request copy of the TC contains the same data as
1136 * the actual TC this pointer is vaild for either.
1137 */
1138 pio->request_current.sgl_pair = &sci_req->task_context_buffer->sgl_pair_ab;
1139 } else {
1140 /* The user does not want the data copied to the SGL buffer location */
1141 pio->request_current.sgl_pair = NULL;
1142 }
1143
1144 sci_base_state_machine_construct(&sci_req->started_substate_machine,
1145 sci_req,
1146 scic_sds_stp_request_started_pio_substate_table,
1147 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE);
1148
1149 return SCI_SUCCESS;
1150 }
1151
1152 static void scic_sds_stp_request_udma_complete_request(
1153 struct scic_sds_request *request,
1154 u32 scu_status,
1155 enum sci_status sci_status)
1156 {
1157 scic_sds_request_set_status(request, scu_status, sci_status);
1158 sci_base_state_machine_change_state(&request->state_machine,
1159 SCI_BASE_REQUEST_STATE_COMPLETED);
1160 }
1161
1162 static enum sci_status scic_sds_stp_request_udma_general_frame_handler(struct scic_sds_request *sci_req,
1163 u32 frame_index)
1164 {
1165 struct scic_sds_controller *scic = sci_req->owning_controller;
1166 struct dev_to_host_fis *frame_header;
1167 enum sci_status status;
1168 u32 *frame_buffer;
1169
1170 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1171 frame_index,
1172 (void **)&frame_header);
1173
1174 if ((status == SCI_SUCCESS) &&
1175 (frame_header->fis_type == FIS_REGD2H)) {
1176 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1177 frame_index,
1178 (void **)&frame_buffer);
1179
1180 scic_sds_controller_copy_sata_response(&sci_req->stp.rsp,
1181 frame_header,
1182 frame_buffer);
1183 }
1184
1185 scic_sds_controller_release_frame(scic, frame_index);
1186
1187 return status;
1188 }
1189
1190 static enum sci_status scic_sds_stp_request_udma_await_tc_completion_tc_completion_handler(
1191 struct scic_sds_request *sci_req,
1192 u32 completion_code)
1193 {
1194 enum sci_status status = SCI_SUCCESS;
1195
1196 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1197 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1198 scic_sds_stp_request_udma_complete_request(sci_req,
1199 SCU_TASK_DONE_GOOD,
1200 SCI_SUCCESS);
1201 break;
1202 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_FIS):
1203 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
1204 /*
1205 * We must check ther response buffer to see if the D2H Register FIS was
1206 * received before we got the TC completion. */
1207 if (sci_req->stp.rsp.fis_type == FIS_REGD2H) {
1208 scic_sds_remote_device_suspend(sci_req->target_device,
1209 SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
1210
1211 scic_sds_stp_request_udma_complete_request(sci_req,
1212 SCU_TASK_DONE_CHECK_RESPONSE,
1213 SCI_FAILURE_IO_RESPONSE_VALID);
1214 } else {
1215 /*
1216 * If we have an error completion status for the TC then we can expect a
1217 * D2H register FIS from the device so we must change state to wait for it */
1218 sci_base_state_machine_change_state(&sci_req->started_substate_machine,
1219 SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE);
1220 }
1221 break;
1222
1223 /*
1224 * / @todo Check to see if any of these completion status need to wait for
1225 * / the device to host register fis. */
1226 /* / @todo We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR - this comes only for B0 */
1227 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_INV_FIS_LEN):
1228 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
1229 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_R_ERR):
1230 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CMD_LL_R_ERR):
1231 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CRC_ERR):
1232 scic_sds_remote_device_suspend(sci_req->target_device,
1233 SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
1234 /* Fall through to the default case */
1235 default:
1236 /* All other completion status cause the IO to be complete. */
1237 scic_sds_stp_request_udma_complete_request(sci_req,
1238 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1239 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1240 break;
1241 }
1242
1243 return status;
1244 }
1245
1246 static enum sci_status scic_sds_stp_request_udma_await_d2h_reg_fis_frame_handler(
1247 struct scic_sds_request *sci_req,
1248 u32 frame_index)
1249 {
1250 enum sci_status status;
1251
1252 /* Use the general frame handler to copy the resposne data */
1253 status = scic_sds_stp_request_udma_general_frame_handler(sci_req, frame_index);
1254
1255 if (status != SCI_SUCCESS)
1256 return status;
1257
1258 scic_sds_stp_request_udma_complete_request(sci_req,
1259 SCU_TASK_DONE_CHECK_RESPONSE,
1260 SCI_FAILURE_IO_RESPONSE_VALID);
1261
1262 return status;
1263 }
1264
1265 /* --------------------------------------------------------------------------- */
1266
1267 static const struct scic_sds_io_request_state_handler scic_sds_stp_request_started_udma_substate_handler_table[] = {
1268 [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE] = {
1269 .abort_handler = scic_sds_request_started_state_abort_handler,
1270 .tc_completion_handler = scic_sds_stp_request_udma_await_tc_completion_tc_completion_handler,
1271 .frame_handler = scic_sds_stp_request_udma_general_frame_handler,
1272 },
1273 [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE] = {
1274 .abort_handler = scic_sds_request_started_state_abort_handler,
1275 .frame_handler = scic_sds_stp_request_udma_await_d2h_reg_fis_frame_handler,
1276 },
1277 };
1278
1279 static void scic_sds_stp_request_started_udma_await_tc_completion_enter(
1280 void *object)
1281 {
1282 struct scic_sds_request *sci_req = object;
1283
1284 SET_STATE_HANDLER(
1285 sci_req,
1286 scic_sds_stp_request_started_udma_substate_handler_table,
1287 SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE
1288 );
1289 }
1290
1291 /**
1292 *
1293 *
1294 * This state is entered when there is an TC completion failure. The hardware
1295 * received an unexpected condition while processing the IO request and now
1296 * will UF the D2H register FIS to complete the IO.
1297 */
1298 static void scic_sds_stp_request_started_udma_await_d2h_reg_fis_enter(
1299 void *object)
1300 {
1301 struct scic_sds_request *sci_req = object;
1302
1303 SET_STATE_HANDLER(
1304 sci_req,
1305 scic_sds_stp_request_started_udma_substate_handler_table,
1306 SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE
1307 );
1308 }
1309
1310 /* --------------------------------------------------------------------------- */
1311
1312 static const struct sci_base_state scic_sds_stp_request_started_udma_substate_table[] = {
1313 [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE] = {
1314 .enter_state = scic_sds_stp_request_started_udma_await_tc_completion_enter,
1315 },
1316 [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE] = {
1317 .enter_state = scic_sds_stp_request_started_udma_await_d2h_reg_fis_enter,
1318 },
1319 };
1320
1321 enum sci_status scic_sds_stp_udma_request_construct(struct scic_sds_request *sci_req,
1322 u32 len,
1323 enum dma_data_direction dir)
1324 {
1325 scic_sds_stp_non_ncq_request_construct(sci_req);
1326
1327 scic_sds_stp_optimized_request_construct(sci_req, SCU_TASK_TYPE_DMA_IN,
1328 len, dir);
1329
1330 sci_base_state_machine_construct(
1331 &sci_req->started_substate_machine,
1332 sci_req,
1333 scic_sds_stp_request_started_udma_substate_table,
1334 SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE
1335 );
1336
1337 return SCI_SUCCESS;
1338 }
1339
1340 /**
1341 *
1342 * @sci_req:
1343 * @completion_code:
1344 *
1345 * This method processes a TC completion. The expected TC completion is for
1346 * the transmission of the H2D register FIS containing the SATA/STP non-data
1347 * request. This method always successfully processes the TC completion.
1348 * SCI_SUCCESS This value is always returned.
1349 */
1350 static enum sci_status scic_sds_stp_request_soft_reset_await_h2d_asserted_tc_completion_handler(
1351 struct scic_sds_request *sci_req,
1352 u32 completion_code)
1353 {
1354 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1355 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1356 scic_sds_request_set_status(
1357 sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS
1358 );
1359
1360 sci_base_state_machine_change_state(
1361 &sci_req->started_substate_machine,
1362 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE
1363 );
1364 break;
1365
1366 default:
1367 /*
1368 * All other completion status cause the IO to be complete. If a NAK
1369 * was received, then it is up to the user to retry the request. */
1370 scic_sds_request_set_status(
1371 sci_req,
1372 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1373 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
1374 );
1375
1376 sci_base_state_machine_change_state(
1377 &sci_req->state_machine, SCI_BASE_REQUEST_STATE_COMPLETED);
1378 break;
1379 }
1380
1381 return SCI_SUCCESS;
1382 }
1383
1384 /**
1385 *
1386 * @sci_req:
1387 * @completion_code:
1388 *
1389 * This method processes a TC completion. The expected TC completion is for
1390 * the transmission of the H2D register FIS containing the SATA/STP non-data
1391 * request. This method always successfully processes the TC completion.
1392 * SCI_SUCCESS This value is always returned.
1393 */
1394 static enum sci_status scic_sds_stp_request_soft_reset_await_h2d_diagnostic_tc_completion_handler(
1395 struct scic_sds_request *sci_req,
1396 u32 completion_code)
1397 {
1398 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1399 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1400 scic_sds_request_set_status(
1401 sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS
1402 );
1403
1404 sci_base_state_machine_change_state(
1405 &sci_req->started_substate_machine,
1406 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE
1407 );
1408 break;
1409
1410 default:
1411 /*
1412 * All other completion status cause the IO to be complete. If a NAK
1413 * was received, then it is up to the user to retry the request. */
1414 scic_sds_request_set_status(
1415 sci_req,
1416 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1417 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
1418 );
1419
1420 sci_base_state_machine_change_state(&sci_req->state_machine,
1421 SCI_BASE_REQUEST_STATE_COMPLETED);
1422 break;
1423 }
1424
1425 return SCI_SUCCESS;
1426 }
1427
1428 /**
1429 *
1430 * @request: This parameter specifies the request for which a frame has been
1431 * received.
1432 * @frame_index: This parameter specifies the index of the frame that has been
1433 * received.
1434 *
1435 * This method processes frames received from the target while waiting for a
1436 * device to host register FIS. If a non-register FIS is received during this
1437 * time, it is treated as a protocol violation from an IO perspective. Indicate
1438 * if the received frame was processed successfully.
1439 */
1440 static enum sci_status scic_sds_stp_request_soft_reset_await_d2h_frame_handler(
1441 struct scic_sds_request *sci_req,
1442 u32 frame_index)
1443 {
1444 enum sci_status status;
1445 struct dev_to_host_fis *frame_header;
1446 u32 *frame_buffer;
1447 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
1448 struct scic_sds_controller *scic = sci_req->owning_controller;
1449
1450 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1451 frame_index,
1452 (void **)&frame_header);
1453 if (status != SCI_SUCCESS) {
1454 dev_err(scic_to_dev(scic),
1455 "%s: SCIC IO Request 0x%p could not get frame header "
1456 "for frame index %d, status %x\n",
1457 __func__, stp_req, frame_index, status);
1458 return status;
1459 }
1460
1461 switch (frame_header->fis_type) {
1462 case FIS_REGD2H:
1463 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1464 frame_index,
1465 (void **)&frame_buffer);
1466
1467 scic_sds_controller_copy_sata_response(&sci_req->stp.rsp,
1468 frame_header,
1469 frame_buffer);
1470
1471 /* The command has completed with error */
1472 scic_sds_request_set_status(sci_req,
1473 SCU_TASK_DONE_CHECK_RESPONSE,
1474 SCI_FAILURE_IO_RESPONSE_VALID);
1475 break;
1476
1477 default:
1478 dev_warn(scic_to_dev(scic),
1479 "%s: IO Request:0x%p Frame Id:%d protocol "
1480 "violation occurred\n", __func__, stp_req,
1481 frame_index);
1482
1483 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_UNEXP_FIS,
1484 SCI_FAILURE_PROTOCOL_VIOLATION);
1485 break;
1486 }
1487
1488 sci_base_state_machine_change_state(&sci_req->state_machine,
1489 SCI_BASE_REQUEST_STATE_COMPLETED);
1490
1491 /* Frame has been decoded return it to the controller */
1492 scic_sds_controller_release_frame(scic, frame_index);
1493
1494 return status;
1495 }
1496
1497 /* --------------------------------------------------------------------------- */
1498
1499 static const struct scic_sds_io_request_state_handler scic_sds_stp_request_started_soft_reset_substate_handler_table[] = {
1500 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE] = {
1501 .abort_handler = scic_sds_request_started_state_abort_handler,
1502 .tc_completion_handler = scic_sds_stp_request_soft_reset_await_h2d_asserted_tc_completion_handler,
1503 },
1504 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE] = {
1505 .abort_handler = scic_sds_request_started_state_abort_handler,
1506 .tc_completion_handler = scic_sds_stp_request_soft_reset_await_h2d_diagnostic_tc_completion_handler,
1507 },
1508 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE] = {
1509 .abort_handler = scic_sds_request_started_state_abort_handler,
1510 .frame_handler = scic_sds_stp_request_soft_reset_await_d2h_frame_handler,
1511 },
1512 };
1513
1514 static void scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter(
1515 void *object)
1516 {
1517 struct scic_sds_request *sci_req = object;
1518
1519 SET_STATE_HANDLER(
1520 sci_req,
1521 scic_sds_stp_request_started_soft_reset_substate_handler_table,
1522 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE
1523 );
1524
1525 scic_sds_remote_device_set_working_request(
1526 sci_req->target_device, sci_req
1527 );
1528 }
1529
1530 static void scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter(
1531 void *object)
1532 {
1533 struct scic_sds_request *sci_req = object;
1534 struct scu_task_context *task_context;
1535 struct host_to_dev_fis *h2d_fis;
1536 enum sci_status status;
1537
1538 /* Clear the SRST bit */
1539 h2d_fis = &sci_req->stp.cmd;
1540 h2d_fis->control = 0;
1541
1542 /* Clear the TC control bit */
1543 task_context = scic_sds_controller_get_task_context_buffer(
1544 sci_req->owning_controller, sci_req->io_tag);
1545 task_context->control_frame = 0;
1546
1547 status = scic_controller_continue_io(sci_req);
1548 if (status == SCI_SUCCESS) {
1549 SET_STATE_HANDLER(
1550 sci_req,
1551 scic_sds_stp_request_started_soft_reset_substate_handler_table,
1552 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE
1553 );
1554 }
1555 }
1556
1557 static void scic_sds_stp_request_started_soft_reset_await_d2h_response_enter(
1558 void *object)
1559 {
1560 struct scic_sds_request *sci_req = object;
1561
1562 SET_STATE_HANDLER(
1563 sci_req,
1564 scic_sds_stp_request_started_soft_reset_substate_handler_table,
1565 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE
1566 );
1567 }
1568
1569 static const struct sci_base_state scic_sds_stp_request_started_soft_reset_substate_table[] = {
1570 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE] = {
1571 .enter_state = scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter,
1572 },
1573 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE] = {
1574 .enter_state = scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter,
1575 },
1576 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE] = {
1577 .enter_state = scic_sds_stp_request_started_soft_reset_await_d2h_response_enter,
1578 },
1579 };
1580
1581 enum sci_status scic_sds_stp_soft_reset_request_construct(struct scic_sds_request *sci_req)
1582 {
1583 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
1584
1585 scic_sds_stp_non_ncq_request_construct(sci_req);
1586
1587 /* Build the STP task context structure */
1588 scu_stp_raw_request_construct_task_context(stp_req, sci_req->task_context_buffer);
1589
1590 sci_base_state_machine_construct(&sci_req->started_substate_machine,
1591 sci_req,
1592 scic_sds_stp_request_started_soft_reset_substate_table,
1593 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE);
1594
1595 return SCI_SUCCESS;
1596 }
This page took 0.062432 seconds and 4 git commands to generate.