isci: fix isci_terminate_pending() list management
[deliverable/linux.git] / drivers / scsi / isci / request.h
1 /*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
56 #ifndef _ISCI_REQUEST_H_
57 #define _ISCI_REQUEST_H_
58
59 #include "isci.h"
60 #include "host.h"
61 #include "scu_task_context.h"
62
63 /**
64 * struct isci_request_status - This enum defines the possible states of an I/O
65 * request.
66 *
67 *
68 */
69 enum isci_request_status {
70 unallocated = 0x00,
71 allocated = 0x01,
72 started = 0x02,
73 completed = 0x03,
74 aborting = 0x04,
75 aborted = 0x05,
76 terminating = 0x06,
77 dead = 0x07
78 };
79
80 enum task_type {
81 io_task = 0,
82 tmf_task = 1
83 };
84
85 enum sci_request_protocol {
86 SCIC_NO_PROTOCOL,
87 SCIC_SMP_PROTOCOL,
88 SCIC_SSP_PROTOCOL,
89 SCIC_STP_PROTOCOL
90 }; /* XXX remove me, use sas_task.{dev|task_proto} instead */;
91
92 struct scic_sds_stp_request {
93 union {
94 u32 ncq;
95
96 u32 udma;
97
98 struct scic_sds_stp_pio_request {
99 /*
100 * Total transfer for the entire PIO request recorded
101 * at request constuction time.
102 *
103 * @todo Should we just decrement this value for each
104 * byte of data transitted or received to elemenate
105 * the current_transfer_bytes field?
106 */
107 u32 total_transfer_bytes;
108
109 /*
110 * Total number of bytes received/transmitted in data
111 * frames since the start of the IO request. At the
112 * end of the IO request this should equal the
113 * total_transfer_bytes.
114 */
115 u32 current_transfer_bytes;
116
117 /*
118 * The number of bytes requested in the in the PIO
119 * setup.
120 */
121 u32 pio_transfer_bytes;
122
123 /*
124 * PIO Setup ending status value to tell us if we need
125 * to wait for another FIS or if the transfer is
126 * complete. On the receipt of a D2H FIS this will be
127 * the status field of that FIS.
128 */
129 u8 ending_status;
130
131 /*
132 * On receipt of a D2H FIS this will be the ending
133 * error field if the ending_status has the
134 * SATA_STATUS_ERR bit set.
135 */
136 u8 ending_error;
137
138 struct scic_sds_request_pio_sgl {
139 struct scu_sgl_element_pair *sgl_pair;
140 u8 sgl_set;
141 u32 sgl_offset;
142 } request_current;
143 } pio;
144
145 struct {
146 /*
147 * The number of bytes requested in the PIO setup
148 * before CDB data frame.
149 */
150 u32 device_preferred_cdb_length;
151 } packet;
152 } type;
153 };
154
155 struct scic_sds_request {
156 /*
157 * This field contains the information for the base request state
158 * machine.
159 */
160 struct sci_base_state_machine sm;
161
162 /*
163 * This field simply points to the controller to which this IO request
164 * is associated.
165 */
166 struct scic_sds_controller *owning_controller;
167
168 /*
169 * This field simply points to the remote device to which this IO
170 * request is associated.
171 */
172 struct scic_sds_remote_device *target_device;
173
174 /*
175 * This field is utilized to determine if the SCI user is managing
176 * the IO tag for this request or if the core is managing it.
177 */
178 bool was_tag_assigned_by_user;
179
180 /*
181 * This field indicates the IO tag for this request. The IO tag is
182 * comprised of the task_index and a sequence count. The sequence count
183 * is utilized to help identify tasks from one life to another.
184 */
185 u16 io_tag;
186
187 /*
188 * This field specifies the protocol being utilized for this
189 * IO request.
190 */
191 enum sci_request_protocol protocol;
192
193 /*
194 * This field indicates the completion status taken from the SCUs
195 * completion code. It indicates the completion result for the SCU
196 * hardware.
197 */
198 u32 scu_status;
199
200 /*
201 * This field indicates the completion status returned to the SCI user.
202 * It indicates the users view of the io request completion.
203 */
204 u32 sci_status;
205
206 /*
207 * This field contains the value to be utilized when posting
208 * (e.g. Post_TC, * Post_TC_Abort) this request to the silicon.
209 */
210 u32 post_context;
211
212 struct scu_task_context *task_context_buffer;
213 struct scu_task_context tc ____cacheline_aligned;
214
215 /* could be larger with sg chaining */
216 #define SCU_SGL_SIZE ((SCI_MAX_SCATTER_GATHER_ELEMENTS + 1) / 2)
217 struct scu_sgl_element_pair sg_table[SCU_SGL_SIZE] __attribute__ ((aligned(32)));
218
219 /*
220 * This field indicates if this request is a task management request or
221 * normal IO request.
222 */
223 bool is_task_management_request;
224
225 /*
226 * This field is a pointer to the stored rx frame data. It is used in
227 * STP internal requests and SMP response frames. If this field is
228 * non-NULL the saved frame must be released on IO request completion.
229 *
230 * @todo In the future do we want to keep a list of RX frame buffers?
231 */
232 u32 saved_rx_frame_index;
233
234 /*
235 * This field in the recorded device sequence for the io request.
236 * This is recorded during the build operation and is compared in the
237 * start operation. If the sequence is different then there was a
238 * change of devices from the build to start operations.
239 */
240 u8 device_sequence;
241
242 union {
243 struct {
244 union {
245 struct ssp_cmd_iu cmd;
246 struct ssp_task_iu tmf;
247 };
248 union {
249 struct ssp_response_iu rsp;
250 u8 rsp_buf[SSP_RESP_IU_MAX_SIZE];
251 };
252 } ssp;
253
254 struct {
255 struct smp_req cmd;
256 struct smp_resp rsp;
257 } smp;
258
259 struct {
260 struct scic_sds_stp_request req;
261 struct host_to_dev_fis cmd;
262 struct dev_to_host_fis rsp;
263 } stp;
264 };
265
266 };
267
268 static inline struct scic_sds_request *to_sci_req(struct scic_sds_stp_request *stp_req)
269 {
270 struct scic_sds_request *sci_req;
271
272 sci_req = container_of(stp_req, typeof(*sci_req), stp.req);
273 return sci_req;
274 }
275
276 struct isci_request {
277 enum isci_request_status status;
278 enum task_type ttype;
279 unsigned short io_tag;
280 bool complete_in_target;
281 bool terminated;
282
283 union ttype_ptr_union {
284 struct sas_task *io_task_ptr; /* When ttype==io_task */
285 struct isci_tmf *tmf_task_ptr; /* When ttype==tmf_task */
286 } ttype_ptr;
287 struct isci_host *isci_host;
288 struct isci_remote_device *isci_device;
289 /* For use in the requests_to_{complete|abort} lists: */
290 struct list_head completed_node;
291 /* For use in the reqs_in_process list: */
292 struct list_head dev_node;
293 spinlock_t state_lock;
294 dma_addr_t request_daddr;
295 dma_addr_t zero_scatter_daddr;
296
297 unsigned int num_sg_entries; /* returned by pci_alloc_sg */
298
299 /** Note: "io_request_completion" is completed in two different ways
300 * depending on whether this is a TMF or regular request.
301 * - TMF requests are completed in the thread that started them;
302 * - regular requests are completed in the request completion callback
303 * function.
304 * This difference in operation allows the aborter of a TMF request
305 * to be sure that once the TMF request completes, the I/O that the
306 * TMF was aborting is guaranteed to have completed.
307 */
308 struct completion *io_request_completion;
309 struct scic_sds_request sci;
310 };
311
312 static inline struct isci_request *sci_req_to_ireq(struct scic_sds_request *sci_req)
313 {
314 struct isci_request *ireq = container_of(sci_req, typeof(*ireq), sci);
315
316 return ireq;
317 }
318
319 /**
320 * enum sci_base_request_states - This enumeration depicts all the states for
321 * the common request state machine.
322 *
323 *
324 */
325 enum sci_base_request_states {
326 /*
327 * Simply the initial state for the base request state machine.
328 */
329 SCI_REQ_INIT,
330
331 /*
332 * This state indicates that the request has been constructed.
333 * This state is entered from the INITIAL state.
334 */
335 SCI_REQ_CONSTRUCTED,
336
337 /*
338 * This state indicates that the request has been started. This state
339 * is entered from the CONSTRUCTED state.
340 */
341 SCI_REQ_STARTED,
342
343 SCI_REQ_STP_UDMA_WAIT_TC_COMP,
344 SCI_REQ_STP_UDMA_WAIT_D2H,
345
346 SCI_REQ_STP_NON_DATA_WAIT_H2D,
347 SCI_REQ_STP_NON_DATA_WAIT_D2H,
348
349 SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED,
350 SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG,
351 SCI_REQ_STP_SOFT_RESET_WAIT_D2H,
352
353 /*
354 * While in this state the IO request object is waiting for the TC
355 * completion notification for the H2D Register FIS
356 */
357 SCI_REQ_STP_PIO_WAIT_H2D,
358
359 /*
360 * While in this state the IO request object is waiting for either a
361 * PIO Setup FIS or a D2H register FIS. The type of frame received is
362 * based on the result of the prior frame and line conditions.
363 */
364 SCI_REQ_STP_PIO_WAIT_FRAME,
365
366 /*
367 * While in this state the IO request object is waiting for a DATA
368 * frame from the device.
369 */
370 SCI_REQ_STP_PIO_DATA_IN,
371
372 /*
373 * While in this state the IO request object is waiting to transmit
374 * the next data frame to the device.
375 */
376 SCI_REQ_STP_PIO_DATA_OUT,
377
378 /*
379 * The AWAIT_TC_COMPLETION sub-state indicates that the started raw
380 * task management request is waiting for the transmission of the
381 * initial frame (i.e. command, task, etc.).
382 */
383 SCI_REQ_TASK_WAIT_TC_COMP,
384
385 /*
386 * This sub-state indicates that the started task management request
387 * is waiting for the reception of an unsolicited frame
388 * (i.e. response IU).
389 */
390 SCI_REQ_TASK_WAIT_TC_RESP,
391
392 /*
393 * This sub-state indicates that the started task management request
394 * is waiting for the reception of an unsolicited frame
395 * (i.e. response IU).
396 */
397 SCI_REQ_SMP_WAIT_RESP,
398
399 /*
400 * The AWAIT_TC_COMPLETION sub-state indicates that the started SMP
401 * request is waiting for the transmission of the initial frame
402 * (i.e. command, task, etc.).
403 */
404 SCI_REQ_SMP_WAIT_TC_COMP,
405
406 /*
407 * This state indicates that the request has completed.
408 * This state is entered from the STARTED state. This state is entered
409 * from the ABORTING state.
410 */
411 SCI_REQ_COMPLETED,
412
413 /*
414 * This state indicates that the request is in the process of being
415 * terminated/aborted.
416 * This state is entered from the CONSTRUCTED state.
417 * This state is entered from the STARTED state.
418 */
419 SCI_REQ_ABORTING,
420
421 /*
422 * Simply the final state for the base request state machine.
423 */
424 SCI_REQ_FINAL,
425 };
426
427 /**
428 * scic_sds_request_get_controller() -
429 *
430 * This macro will return the controller for this io request object
431 */
432 #define scic_sds_request_get_controller(sci_req) \
433 ((sci_req)->owning_controller)
434
435 /**
436 * scic_sds_request_get_device() -
437 *
438 * This macro will return the device for this io request object
439 */
440 #define scic_sds_request_get_device(sci_req) \
441 ((sci_req)->target_device)
442
443 /**
444 * scic_sds_request_get_port() -
445 *
446 * This macro will return the port for this io request object
447 */
448 #define scic_sds_request_get_port(sci_req) \
449 scic_sds_remote_device_get_port(scic_sds_request_get_device(sci_req))
450
451 /**
452 * scic_sds_request_get_post_context() -
453 *
454 * This macro returns the constructed post context result for the io request.
455 */
456 #define scic_sds_request_get_post_context(sci_req) \
457 ((sci_req)->post_context)
458
459 /**
460 * scic_sds_request_get_task_context() -
461 *
462 * This is a helper macro to return the os handle for this request object.
463 */
464 #define scic_sds_request_get_task_context(request) \
465 ((request)->task_context_buffer)
466
467 /**
468 * scic_sds_request_set_status() -
469 *
470 * This macro will set the scu hardware status and sci request completion
471 * status for an io request.
472 */
473 #define scic_sds_request_set_status(request, scu_status_code, sci_status_code) \
474 { \
475 (request)->scu_status = (scu_status_code); \
476 (request)->sci_status = (sci_status_code); \
477 }
478
479 /**
480 * SCU_SGL_ZERO() -
481 *
482 * This macro zeros the hardware SGL element data
483 */
484 #define SCU_SGL_ZERO(scu_sge) \
485 { \
486 (scu_sge).length = 0; \
487 (scu_sge).address_lower = 0; \
488 (scu_sge).address_upper = 0; \
489 (scu_sge).address_modifier = 0; \
490 }
491
492 /**
493 * SCU_SGL_COPY() -
494 *
495 * This macro copys the SGL Element data from the host os to the hardware SGL
496 * elment data
497 */
498 #define SCU_SGL_COPY(scu_sge, os_sge) \
499 { \
500 (scu_sge).length = sg_dma_len(sg); \
501 (scu_sge).address_upper = \
502 upper_32_bits(sg_dma_address(sg)); \
503 (scu_sge).address_lower = \
504 lower_32_bits(sg_dma_address(sg)); \
505 (scu_sge).address_modifier = 0; \
506 }
507
508 enum sci_status scic_sds_request_start(struct scic_sds_request *sci_req);
509 enum sci_status scic_sds_io_request_terminate(struct scic_sds_request *sci_req);
510 enum sci_status
511 scic_sds_io_request_event_handler(struct scic_sds_request *sci_req,
512 u32 event_code);
513 enum sci_status
514 scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req,
515 u32 frame_index);
516 enum sci_status
517 scic_sds_task_request_terminate(struct scic_sds_request *sci_req);
518 extern enum sci_status
519 scic_sds_request_complete(struct scic_sds_request *sci_req);
520 extern enum sci_status
521 scic_sds_io_request_tc_completion(struct scic_sds_request *sci_req, u32 code);
522
523 /* XXX open code in caller */
524 static inline void *scic_request_get_virt_addr(struct scic_sds_request *sci_req,
525 dma_addr_t phys_addr)
526 {
527 struct isci_request *ireq = sci_req_to_ireq(sci_req);
528 dma_addr_t offset;
529
530 BUG_ON(phys_addr < ireq->request_daddr);
531
532 offset = phys_addr - ireq->request_daddr;
533
534 BUG_ON(offset >= sizeof(*ireq));
535
536 return (char *)ireq + offset;
537 }
538
539 /* XXX open code in caller */
540 static inline dma_addr_t
541 scic_io_request_get_dma_addr(struct scic_sds_request *sci_req, void *virt_addr)
542 {
543 struct isci_request *ireq = sci_req_to_ireq(sci_req);
544
545 char *requested_addr = (char *)virt_addr;
546 char *base_addr = (char *)ireq;
547
548 BUG_ON(requested_addr < base_addr);
549 BUG_ON((requested_addr - base_addr) >= sizeof(*ireq));
550
551 return ireq->request_daddr + (requested_addr - base_addr);
552 }
553
554 /**
555 * This function gets the status of the request object.
556 * @request: This parameter points to the isci_request object
557 *
558 * status of the object as a isci_request_status enum.
559 */
560 static inline enum isci_request_status
561 isci_request_get_state(struct isci_request *isci_request)
562 {
563 BUG_ON(isci_request == NULL);
564
565 /*probably a bad sign... */
566 if (isci_request->status == unallocated)
567 dev_warn(&isci_request->isci_host->pdev->dev,
568 "%s: isci_request->status == unallocated\n",
569 __func__);
570
571 return isci_request->status;
572 }
573
574
575 /**
576 * isci_request_change_state() - This function sets the status of the request
577 * object.
578 * @request: This parameter points to the isci_request object
579 * @status: This Parameter is the new status of the object
580 *
581 */
582 static inline enum isci_request_status
583 isci_request_change_state(struct isci_request *isci_request,
584 enum isci_request_status status)
585 {
586 enum isci_request_status old_state;
587 unsigned long flags;
588
589 dev_dbg(&isci_request->isci_host->pdev->dev,
590 "%s: isci_request = %p, state = 0x%x\n",
591 __func__,
592 isci_request,
593 status);
594
595 BUG_ON(isci_request == NULL);
596
597 spin_lock_irqsave(&isci_request->state_lock, flags);
598 old_state = isci_request->status;
599 isci_request->status = status;
600 spin_unlock_irqrestore(&isci_request->state_lock, flags);
601
602 return old_state;
603 }
604
605 /**
606 * isci_request_change_started_to_newstate() - This function sets the status of
607 * the request object.
608 * @request: This parameter points to the isci_request object
609 * @status: This Parameter is the new status of the object
610 *
611 * state previous to any change.
612 */
613 static inline enum isci_request_status
614 isci_request_change_started_to_newstate(struct isci_request *isci_request,
615 struct completion *completion_ptr,
616 enum isci_request_status newstate)
617 {
618 enum isci_request_status old_state;
619 unsigned long flags;
620
621 spin_lock_irqsave(&isci_request->state_lock, flags);
622
623 old_state = isci_request->status;
624
625 if (old_state == started || old_state == aborting) {
626 BUG_ON(isci_request->io_request_completion != NULL);
627
628 isci_request->io_request_completion = completion_ptr;
629 isci_request->status = newstate;
630 }
631
632 spin_unlock_irqrestore(&isci_request->state_lock, flags);
633
634 dev_dbg(&isci_request->isci_host->pdev->dev,
635 "%s: isci_request = %p, old_state = 0x%x\n",
636 __func__,
637 isci_request,
638 old_state);
639
640 return old_state;
641 }
642
643 /**
644 * isci_request_change_started_to_aborted() - This function sets the status of
645 * the request object.
646 * @request: This parameter points to the isci_request object
647 * @completion_ptr: This parameter is saved as the kernel completion structure
648 * signalled when the old request completes.
649 *
650 * state previous to any change.
651 */
652 static inline enum isci_request_status
653 isci_request_change_started_to_aborted(struct isci_request *isci_request,
654 struct completion *completion_ptr)
655 {
656 return isci_request_change_started_to_newstate(isci_request,
657 completion_ptr,
658 aborted);
659 }
660 /**
661 * isci_request_free() - This function frees the request object.
662 * @isci_host: This parameter specifies the ISCI host object
663 * @isci_request: This parameter points to the isci_request object
664 *
665 */
666 static inline void isci_request_free(struct isci_host *isci_host,
667 struct isci_request *isci_request)
668 {
669 if (!isci_request)
670 return;
671
672 /* release the dma memory if we fail. */
673 dma_pool_free(isci_host->dma_pool,
674 isci_request,
675 isci_request->request_daddr);
676 }
677
678 #define isci_request_access_task(req) ((req)->ttype_ptr.io_task_ptr)
679
680 #define isci_request_access_tmf(req) ((req)->ttype_ptr.tmf_task_ptr)
681
682 int isci_request_alloc_tmf(struct isci_host *isci_host,
683 struct isci_tmf *isci_tmf,
684 struct isci_request **isci_request,
685 struct isci_remote_device *isci_device,
686 gfp_t gfp_flags);
687
688
689 int isci_request_execute(struct isci_host *isci_host,
690 struct sas_task *task,
691 struct isci_request **request,
692 gfp_t gfp_flags);
693
694 /**
695 * isci_request_unmap_sgl() - This function unmaps the DMA address of a given
696 * sgl
697 * @request: This parameter points to the isci_request object
698 * @*pdev: This Parameter is the pci_device struct for the controller
699 *
700 */
701 static inline void
702 isci_request_unmap_sgl(struct isci_request *request, struct pci_dev *pdev)
703 {
704 struct sas_task *task = isci_request_access_task(request);
705
706 dev_dbg(&request->isci_host->pdev->dev,
707 "%s: request = %p, task = %p,\n"
708 "task->data_dir = %d, is_sata = %d\n ",
709 __func__,
710 request,
711 task,
712 task->data_dir,
713 sas_protocol_ata(task->task_proto));
714
715 if ((task->data_dir != PCI_DMA_NONE) &&
716 !sas_protocol_ata(task->task_proto)) {
717 if (task->num_scatter == 0)
718 /* 0 indicates a single dma address */
719 dma_unmap_single(
720 &pdev->dev,
721 request->zero_scatter_daddr,
722 task->total_xfer_len,
723 task->data_dir
724 );
725
726 else /* unmap the sgl dma addresses */
727 dma_unmap_sg(
728 &pdev->dev,
729 task->scatter,
730 request->num_sg_entries,
731 task->data_dir
732 );
733 }
734 }
735
736 /**
737 * isci_request_io_request_get_next_sge() - This function is called by the sci
738 * core to retrieve the next sge for a given request.
739 * @request: This parameter is the isci_request object.
740 * @current_sge_address: This parameter is the last sge retrieved by the sci
741 * core for this request.
742 *
743 * pointer to the next sge for specified request.
744 */
745 static inline void *
746 isci_request_io_request_get_next_sge(struct isci_request *request,
747 void *current_sge_address)
748 {
749 struct sas_task *task = isci_request_access_task(request);
750 void *ret = NULL;
751
752 dev_dbg(&request->isci_host->pdev->dev,
753 "%s: request = %p, "
754 "current_sge_address = %p, "
755 "num_scatter = %d\n",
756 __func__,
757 request,
758 current_sge_address,
759 task->num_scatter);
760
761 if (!current_sge_address) /* First time through.. */
762 ret = task->scatter; /* always task->scatter */
763 else if (task->num_scatter == 0) /* Next element, if num_scatter == 0 */
764 ret = NULL; /* there is only one element. */
765 else
766 ret = sg_next(current_sge_address); /* sg_next returns NULL
767 * for the last element
768 */
769
770 dev_dbg(&request->isci_host->pdev->dev,
771 "%s: next sge address = %p\n",
772 __func__,
773 ret);
774
775 return ret;
776 }
777
778 void
779 isci_terminate_pending_requests(struct isci_host *ihost,
780 struct isci_remote_device *idev);
781 enum sci_status
782 scic_task_request_construct(struct scic_sds_controller *scic,
783 struct scic_sds_remote_device *sci_dev,
784 u16 io_tag,
785 struct scic_sds_request *sci_req);
786 enum sci_status
787 scic_task_request_construct_ssp(struct scic_sds_request *sci_req);
788 enum sci_status
789 scic_task_request_construct_sata(struct scic_sds_request *sci_req);
790 void
791 scic_stp_io_request_set_ncq_tag(struct scic_sds_request *sci_req, u16 ncq_tag);
792 void scic_sds_smp_request_copy_response(struct scic_sds_request *sci_req);
793 #endif /* !defined(_ISCI_REQUEST_H_) */
This page took 0.091539 seconds and 5 git commands to generate.