Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf-next
[deliverable/linux.git] / drivers / scsi / aic94xx / aic94xx_tmf.c
1 /*
2 * Aic94xx Task Management Functions
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This file is part of the aic94xx driver.
10 *
11 * The aic94xx driver is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; version 2 of the
14 * License.
15 *
16 * The aic94xx driver is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with the aic94xx driver; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 *
25 */
26
27 #include <linux/spinlock.h>
28 #include <linux/gfp.h>
29 #include "aic94xx.h"
30 #include "aic94xx_sas.h"
31 #include "aic94xx_hwi.h"
32
33 /* ---------- Internal enqueue ---------- */
34
35 static int asd_enqueue_internal(struct asd_ascb *ascb,
36 void (*tasklet_complete)(struct asd_ascb *,
37 struct done_list_struct *),
38 void (*timed_out)(unsigned long))
39 {
40 int res;
41
42 ascb->tasklet_complete = tasklet_complete;
43 ascb->uldd_timer = 1;
44
45 ascb->timer.data = (unsigned long) ascb;
46 ascb->timer.function = timed_out;
47 ascb->timer.expires = jiffies + AIC94XX_SCB_TIMEOUT;
48
49 add_timer(&ascb->timer);
50
51 res = asd_post_ascb_list(ascb->ha, ascb, 1);
52 if (unlikely(res))
53 del_timer(&ascb->timer);
54 return res;
55 }
56
57 /* ---------- CLEAR NEXUS ---------- */
58
59 struct tasklet_completion_status {
60 int dl_opcode;
61 int tmf_state;
62 u8 tag_valid:1;
63 __be16 tag;
64 };
65
66 #define DECLARE_TCS(tcs) \
67 struct tasklet_completion_status tcs = { \
68 .dl_opcode = 0, \
69 .tmf_state = 0, \
70 .tag_valid = 0, \
71 .tag = 0, \
72 }
73
74
75 static void asd_clear_nexus_tasklet_complete(struct asd_ascb *ascb,
76 struct done_list_struct *dl)
77 {
78 struct tasklet_completion_status *tcs = ascb->uldd_task;
79 ASD_DPRINTK("%s: here\n", __func__);
80 if (!del_timer(&ascb->timer)) {
81 ASD_DPRINTK("%s: couldn't delete timer\n", __func__);
82 return;
83 }
84 ASD_DPRINTK("%s: opcode: 0x%x\n", __func__, dl->opcode);
85 tcs->dl_opcode = dl->opcode;
86 complete(ascb->completion);
87 asd_ascb_free(ascb);
88 }
89
90 static void asd_clear_nexus_timedout(unsigned long data)
91 {
92 struct asd_ascb *ascb = (void *)data;
93 struct tasklet_completion_status *tcs = ascb->uldd_task;
94
95 ASD_DPRINTK("%s: here\n", __func__);
96 tcs->dl_opcode = TMF_RESP_FUNC_FAILED;
97 complete(ascb->completion);
98 }
99
100 #define CLEAR_NEXUS_PRE \
101 struct asd_ascb *ascb; \
102 struct scb *scb; \
103 int res; \
104 DECLARE_COMPLETION_ONSTACK(completion); \
105 DECLARE_TCS(tcs); \
106 \
107 ASD_DPRINTK("%s: PRE\n", __func__); \
108 res = 1; \
109 ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL); \
110 if (!ascb) \
111 return -ENOMEM; \
112 \
113 ascb->completion = &completion; \
114 ascb->uldd_task = &tcs; \
115 scb = ascb->scb; \
116 scb->header.opcode = CLEAR_NEXUS
117
118 #define CLEAR_NEXUS_POST \
119 ASD_DPRINTK("%s: POST\n", __func__); \
120 res = asd_enqueue_internal(ascb, asd_clear_nexus_tasklet_complete, \
121 asd_clear_nexus_timedout); \
122 if (res) \
123 goto out_err; \
124 ASD_DPRINTK("%s: clear nexus posted, waiting...\n", __func__); \
125 wait_for_completion(&completion); \
126 res = tcs.dl_opcode; \
127 if (res == TC_NO_ERROR) \
128 res = TMF_RESP_FUNC_COMPLETE; \
129 return res; \
130 out_err: \
131 asd_ascb_free(ascb); \
132 return res
133
134 int asd_clear_nexus_ha(struct sas_ha_struct *sas_ha)
135 {
136 struct asd_ha_struct *asd_ha = sas_ha->lldd_ha;
137
138 CLEAR_NEXUS_PRE;
139 scb->clear_nexus.nexus = NEXUS_ADAPTER;
140 CLEAR_NEXUS_POST;
141 }
142
143 int asd_clear_nexus_port(struct asd_sas_port *port)
144 {
145 struct asd_ha_struct *asd_ha = port->ha->lldd_ha;
146
147 CLEAR_NEXUS_PRE;
148 scb->clear_nexus.nexus = NEXUS_PORT;
149 scb->clear_nexus.conn_mask = port->phy_mask;
150 CLEAR_NEXUS_POST;
151 }
152
153 enum clear_nexus_phase {
154 NEXUS_PHASE_PRE,
155 NEXUS_PHASE_POST,
156 NEXUS_PHASE_RESUME,
157 };
158
159 static int asd_clear_nexus_I_T(struct domain_device *dev,
160 enum clear_nexus_phase phase)
161 {
162 struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
163
164 CLEAR_NEXUS_PRE;
165 scb->clear_nexus.nexus = NEXUS_I_T;
166 switch (phase) {
167 case NEXUS_PHASE_PRE:
168 scb->clear_nexus.flags = EXEC_Q | SUSPEND_TX;
169 break;
170 case NEXUS_PHASE_POST:
171 scb->clear_nexus.flags = SEND_Q | NOTINQ;
172 break;
173 case NEXUS_PHASE_RESUME:
174 scb->clear_nexus.flags = RESUME_TX;
175 }
176 scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
177 dev->lldd_dev);
178 CLEAR_NEXUS_POST;
179 }
180
181 int asd_I_T_nexus_reset(struct domain_device *dev)
182 {
183 int res, tmp_res, i;
184 struct sas_phy *phy = sas_get_local_phy(dev);
185 /* Standard mandates link reset for ATA (type 0) and
186 * hard reset for SSP (type 1) */
187 int reset_type = (dev->dev_type == SAS_SATA_DEV ||
188 (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
189
190 asd_clear_nexus_I_T(dev, NEXUS_PHASE_PRE);
191 /* send a hard reset */
192 ASD_DPRINTK("sending %s reset to %s\n",
193 reset_type ? "hard" : "soft", dev_name(&phy->dev));
194 res = sas_phy_reset(phy, reset_type);
195 if (res == TMF_RESP_FUNC_COMPLETE || res == -ENODEV) {
196 /* wait for the maximum settle time */
197 msleep(500);
198 /* clear all outstanding commands (keep nexus suspended) */
199 asd_clear_nexus_I_T(dev, NEXUS_PHASE_POST);
200 }
201 for (i = 0 ; i < 3; i++) {
202 tmp_res = asd_clear_nexus_I_T(dev, NEXUS_PHASE_RESUME);
203 if (tmp_res == TC_RESUME)
204 goto out;
205 msleep(500);
206 }
207
208 /* This is a bit of a problem: the sequencer is still suspended
209 * and is refusing to resume. Hope it will resume on a bigger hammer
210 * or the disk is lost */
211 dev_printk(KERN_ERR, &phy->dev,
212 "Failed to resume nexus after reset 0x%x\n", tmp_res);
213
214 res = TMF_RESP_FUNC_FAILED;
215 out:
216 sas_put_local_phy(phy);
217 return res;
218 }
219
220 static int asd_clear_nexus_I_T_L(struct domain_device *dev, u8 *lun)
221 {
222 struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
223
224 CLEAR_NEXUS_PRE;
225 scb->clear_nexus.nexus = NEXUS_I_T_L;
226 scb->clear_nexus.flags = SEND_Q | EXEC_Q | NOTINQ;
227 memcpy(scb->clear_nexus.ssp_task.lun, lun, 8);
228 scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
229 dev->lldd_dev);
230 CLEAR_NEXUS_POST;
231 }
232
233 static int asd_clear_nexus_tag(struct sas_task *task)
234 {
235 struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha;
236 struct asd_ascb *tascb = task->lldd_task;
237
238 CLEAR_NEXUS_PRE;
239 scb->clear_nexus.nexus = NEXUS_TAG;
240 memcpy(scb->clear_nexus.ssp_task.lun, task->ssp_task.LUN, 8);
241 scb->clear_nexus.ssp_task.tag = tascb->tag;
242 if (task->dev->tproto)
243 scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
244 task->dev->lldd_dev);
245 CLEAR_NEXUS_POST;
246 }
247
248 static int asd_clear_nexus_index(struct sas_task *task)
249 {
250 struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha;
251 struct asd_ascb *tascb = task->lldd_task;
252
253 CLEAR_NEXUS_PRE;
254 scb->clear_nexus.nexus = NEXUS_TRANS_CX;
255 if (task->dev->tproto)
256 scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
257 task->dev->lldd_dev);
258 scb->clear_nexus.index = cpu_to_le16(tascb->tc_index);
259 CLEAR_NEXUS_POST;
260 }
261
262 /* ---------- TMFs ---------- */
263
264 static void asd_tmf_timedout(unsigned long data)
265 {
266 struct asd_ascb *ascb = (void *) data;
267 struct tasklet_completion_status *tcs = ascb->uldd_task;
268
269 ASD_DPRINTK("tmf timed out\n");
270 tcs->tmf_state = TMF_RESP_FUNC_FAILED;
271 complete(ascb->completion);
272 }
273
274 static int asd_get_tmf_resp_tasklet(struct asd_ascb *ascb,
275 struct done_list_struct *dl)
276 {
277 struct asd_ha_struct *asd_ha = ascb->ha;
278 unsigned long flags;
279 struct tc_resp_sb_struct {
280 __le16 index_escb;
281 u8 len_lsb;
282 u8 flags;
283 } __attribute__ ((packed)) *resp_sb = (void *) dl->status_block;
284
285 int edb_id = ((resp_sb->flags & 0x70) >> 4)-1;
286 struct asd_ascb *escb;
287 struct asd_dma_tok *edb;
288 struct ssp_frame_hdr *fh;
289 struct ssp_response_iu *ru;
290 int res = TMF_RESP_FUNC_FAILED;
291
292 ASD_DPRINTK("tmf resp tasklet\n");
293
294 spin_lock_irqsave(&asd_ha->seq.tc_index_lock, flags);
295 escb = asd_tc_index_find(&asd_ha->seq,
296 (int)le16_to_cpu(resp_sb->index_escb));
297 spin_unlock_irqrestore(&asd_ha->seq.tc_index_lock, flags);
298
299 if (!escb) {
300 ASD_DPRINTK("Uh-oh! No escb for this dl?!\n");
301 return res;
302 }
303
304 edb = asd_ha->seq.edb_arr[edb_id + escb->edb_index];
305 ascb->tag = *(__be16 *)(edb->vaddr+4);
306 fh = edb->vaddr + 16;
307 ru = edb->vaddr + 16 + sizeof(*fh);
308 res = ru->status;
309 if (ru->datapres == 1) /* Response data present */
310 res = ru->resp_data[3];
311 #if 0
312 ascb->tag = fh->tag;
313 #endif
314 ascb->tag_valid = 1;
315
316 asd_invalidate_edb(escb, edb_id);
317 return res;
318 }
319
320 static void asd_tmf_tasklet_complete(struct asd_ascb *ascb,
321 struct done_list_struct *dl)
322 {
323 struct tasklet_completion_status *tcs;
324
325 if (!del_timer(&ascb->timer))
326 return;
327
328 tcs = ascb->uldd_task;
329 ASD_DPRINTK("tmf tasklet complete\n");
330
331 tcs->dl_opcode = dl->opcode;
332
333 if (dl->opcode == TC_SSP_RESP) {
334 tcs->tmf_state = asd_get_tmf_resp_tasklet(ascb, dl);
335 tcs->tag_valid = ascb->tag_valid;
336 tcs->tag = ascb->tag;
337 }
338
339 complete(ascb->completion);
340 asd_ascb_free(ascb);
341 }
342
343 static int asd_clear_nexus(struct sas_task *task)
344 {
345 int res = TMF_RESP_FUNC_FAILED;
346 int leftover;
347 struct asd_ascb *tascb = task->lldd_task;
348 DECLARE_COMPLETION_ONSTACK(completion);
349 unsigned long flags;
350
351 tascb->completion = &completion;
352
353 ASD_DPRINTK("task not done, clearing nexus\n");
354 if (tascb->tag_valid)
355 res = asd_clear_nexus_tag(task);
356 else
357 res = asd_clear_nexus_index(task);
358 leftover = wait_for_completion_timeout(&completion,
359 AIC94XX_SCB_TIMEOUT);
360 tascb->completion = NULL;
361 ASD_DPRINTK("came back from clear nexus\n");
362 spin_lock_irqsave(&task->task_state_lock, flags);
363 if (leftover < 1)
364 res = TMF_RESP_FUNC_FAILED;
365 if (task->task_state_flags & SAS_TASK_STATE_DONE)
366 res = TMF_RESP_FUNC_COMPLETE;
367 spin_unlock_irqrestore(&task->task_state_lock, flags);
368
369 return res;
370 }
371
372 /**
373 * asd_abort_task -- ABORT TASK TMF
374 * @task: the task to be aborted
375 *
376 * Before calling ABORT TASK the task state flags should be ORed with
377 * SAS_TASK_STATE_ABORTED (unless SAS_TASK_STATE_DONE is set) under
378 * the task_state_lock IRQ spinlock, then ABORT TASK *must* be called.
379 *
380 * Implements the ABORT TASK TMF, I_T_L_Q nexus.
381 * Returns: SAS TMF responses (see sas_task.h),
382 * -ENOMEM,
383 * -SAS_QUEUE_FULL.
384 *
385 * When ABORT TASK returns, the caller of ABORT TASK checks first the
386 * task->task_state_flags, and then the return value of ABORT TASK.
387 *
388 * If the task has task state bit SAS_TASK_STATE_DONE set, then the
389 * task was completed successfully prior to it being aborted. The
390 * caller of ABORT TASK has responsibility to call task->task_done()
391 * xor free the task, depending on their framework. The return code
392 * is TMF_RESP_FUNC_FAILED in this case.
393 *
394 * Else the SAS_TASK_STATE_DONE bit is not set,
395 * If the return code is TMF_RESP_FUNC_COMPLETE, then
396 * the task was aborted successfully. The caller of
397 * ABORT TASK has responsibility to call task->task_done()
398 * to finish the task, xor free the task depending on their
399 * framework.
400 * else
401 * the ABORT TASK returned some kind of error. The task
402 * was _not_ cancelled. Nothing can be assumed.
403 * The caller of ABORT TASK may wish to retry.
404 */
405 int asd_abort_task(struct sas_task *task)
406 {
407 struct asd_ascb *tascb = task->lldd_task;
408 struct asd_ha_struct *asd_ha = tascb->ha;
409 int res = 1;
410 unsigned long flags;
411 struct asd_ascb *ascb = NULL;
412 struct scb *scb;
413 int leftover;
414 DECLARE_TCS(tcs);
415 DECLARE_COMPLETION_ONSTACK(completion);
416 DECLARE_COMPLETION_ONSTACK(tascb_completion);
417
418 tascb->completion = &tascb_completion;
419
420 spin_lock_irqsave(&task->task_state_lock, flags);
421 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
422 spin_unlock_irqrestore(&task->task_state_lock, flags);
423 res = TMF_RESP_FUNC_COMPLETE;
424 ASD_DPRINTK("%s: task 0x%p done\n", __func__, task);
425 goto out_done;
426 }
427 spin_unlock_irqrestore(&task->task_state_lock, flags);
428
429 ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL);
430 if (!ascb)
431 return -ENOMEM;
432
433 ascb->uldd_task = &tcs;
434 ascb->completion = &completion;
435 scb = ascb->scb;
436 scb->header.opcode = SCB_ABORT_TASK;
437
438 switch (task->task_proto) {
439 case SAS_PROTOCOL_SATA:
440 case SAS_PROTOCOL_STP:
441 scb->abort_task.proto_conn_rate = (1 << 5); /* STP */
442 break;
443 case SAS_PROTOCOL_SSP:
444 scb->abort_task.proto_conn_rate = (1 << 4); /* SSP */
445 scb->abort_task.proto_conn_rate |= task->dev->linkrate;
446 break;
447 case SAS_PROTOCOL_SMP:
448 break;
449 default:
450 break;
451 }
452
453 if (task->task_proto == SAS_PROTOCOL_SSP) {
454 scb->abort_task.ssp_frame.frame_type = SSP_TASK;
455 memcpy(scb->abort_task.ssp_frame.hashed_dest_addr,
456 task->dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
457 memcpy(scb->abort_task.ssp_frame.hashed_src_addr,
458 task->dev->port->ha->hashed_sas_addr,
459 HASHED_SAS_ADDR_SIZE);
460 scb->abort_task.ssp_frame.tptt = cpu_to_be16(0xFFFF);
461
462 memcpy(scb->abort_task.ssp_task.lun, task->ssp_task.LUN, 8);
463 scb->abort_task.ssp_task.tmf = TMF_ABORT_TASK;
464 scb->abort_task.ssp_task.tag = cpu_to_be16(0xFFFF);
465 }
466
467 scb->abort_task.sister_scb = cpu_to_le16(0xFFFF);
468 scb->abort_task.conn_handle = cpu_to_le16(
469 (u16)(unsigned long)task->dev->lldd_dev);
470 scb->abort_task.retry_count = 1;
471 scb->abort_task.index = cpu_to_le16((u16)tascb->tc_index);
472 scb->abort_task.itnl_to = cpu_to_le16(ITNL_TIMEOUT_CONST);
473
474 res = asd_enqueue_internal(ascb, asd_tmf_tasklet_complete,
475 asd_tmf_timedout);
476 if (res)
477 goto out_free;
478 wait_for_completion(&completion);
479 ASD_DPRINTK("tmf came back\n");
480
481 tascb->tag = tcs.tag;
482 tascb->tag_valid = tcs.tag_valid;
483
484 spin_lock_irqsave(&task->task_state_lock, flags);
485 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
486 spin_unlock_irqrestore(&task->task_state_lock, flags);
487 res = TMF_RESP_FUNC_COMPLETE;
488 ASD_DPRINTK("%s: task 0x%p done\n", __func__, task);
489 goto out_done;
490 }
491 spin_unlock_irqrestore(&task->task_state_lock, flags);
492
493 if (tcs.dl_opcode == TC_SSP_RESP) {
494 /* The task to be aborted has been sent to the device.
495 * We got a Response IU for the ABORT TASK TMF. */
496 if (tcs.tmf_state == TMF_RESP_FUNC_COMPLETE)
497 res = asd_clear_nexus(task);
498 else
499 res = tcs.tmf_state;
500 } else if (tcs.dl_opcode == TC_NO_ERROR &&
501 tcs.tmf_state == TMF_RESP_FUNC_FAILED) {
502 /* timeout */
503 res = TMF_RESP_FUNC_FAILED;
504 } else {
505 /* In the following we assume that the managing layer
506 * will _never_ make a mistake, when issuing ABORT
507 * TASK.
508 */
509 switch (tcs.dl_opcode) {
510 default:
511 res = asd_clear_nexus(task);
512 /* fallthrough */
513 case TC_NO_ERROR:
514 break;
515 /* The task hasn't been sent to the device xor
516 * we never got a (sane) Response IU for the
517 * ABORT TASK TMF.
518 */
519 case TF_NAK_RECV:
520 res = TMF_RESP_INVALID_FRAME;
521 break;
522 case TF_TMF_TASK_DONE: /* done but not reported yet */
523 res = TMF_RESP_FUNC_FAILED;
524 leftover =
525 wait_for_completion_timeout(&tascb_completion,
526 AIC94XX_SCB_TIMEOUT);
527 spin_lock_irqsave(&task->task_state_lock, flags);
528 if (leftover < 1)
529 res = TMF_RESP_FUNC_FAILED;
530 if (task->task_state_flags & SAS_TASK_STATE_DONE)
531 res = TMF_RESP_FUNC_COMPLETE;
532 spin_unlock_irqrestore(&task->task_state_lock, flags);
533 break;
534 case TF_TMF_NO_TAG:
535 case TF_TMF_TAG_FREE: /* the tag is in the free list */
536 case TF_TMF_NO_CONN_HANDLE: /* no such device */
537 res = TMF_RESP_FUNC_COMPLETE;
538 break;
539 case TF_TMF_NO_CTX: /* not in seq, or proto != SSP */
540 res = TMF_RESP_FUNC_ESUPP;
541 break;
542 }
543 }
544 out_done:
545 tascb->completion = NULL;
546 if (res == TMF_RESP_FUNC_COMPLETE) {
547 task->lldd_task = NULL;
548 mb();
549 asd_ascb_free(tascb);
550 }
551 ASD_DPRINTK("task 0x%p aborted, res: 0x%x\n", task, res);
552 return res;
553
554 out_free:
555 asd_ascb_free(ascb);
556 ASD_DPRINTK("task 0x%p aborted, res: 0x%x\n", task, res);
557 return res;
558 }
559
560 /**
561 * asd_initiate_ssp_tmf -- send a TMF to an I_T_L or I_T_L_Q nexus
562 * @dev: pointer to struct domain_device of interest
563 * @lun: pointer to u8[8] which is the LUN
564 * @tmf: the TMF to be performed (see sas_task.h or the SAS spec)
565 * @index: the transaction context of the task to be queried if QT TMF
566 *
567 * This function is used to send ABORT TASK SET, CLEAR ACA,
568 * CLEAR TASK SET, LU RESET and QUERY TASK TMFs.
569 *
570 * No SCBs should be queued to the I_T_L nexus when this SCB is
571 * pending.
572 *
573 * Returns: TMF response code (see sas_task.h or the SAS spec)
574 */
575 static int asd_initiate_ssp_tmf(struct domain_device *dev, u8 *lun,
576 int tmf, int index)
577 {
578 struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
579 struct asd_ascb *ascb;
580 int res = 1;
581 struct scb *scb;
582 DECLARE_COMPLETION_ONSTACK(completion);
583 DECLARE_TCS(tcs);
584
585 if (!(dev->tproto & SAS_PROTOCOL_SSP))
586 return TMF_RESP_FUNC_ESUPP;
587
588 ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL);
589 if (!ascb)
590 return -ENOMEM;
591
592 ascb->completion = &completion;
593 ascb->uldd_task = &tcs;
594 scb = ascb->scb;
595
596 if (tmf == TMF_QUERY_TASK)
597 scb->header.opcode = QUERY_SSP_TASK;
598 else
599 scb->header.opcode = INITIATE_SSP_TMF;
600
601 scb->ssp_tmf.proto_conn_rate = (1 << 4); /* SSP */
602 scb->ssp_tmf.proto_conn_rate |= dev->linkrate;
603 /* SSP frame header */
604 scb->ssp_tmf.ssp_frame.frame_type = SSP_TASK;
605 memcpy(scb->ssp_tmf.ssp_frame.hashed_dest_addr,
606 dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
607 memcpy(scb->ssp_tmf.ssp_frame.hashed_src_addr,
608 dev->port->ha->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
609 scb->ssp_tmf.ssp_frame.tptt = cpu_to_be16(0xFFFF);
610 /* SSP Task IU */
611 memcpy(scb->ssp_tmf.ssp_task.lun, lun, 8);
612 scb->ssp_tmf.ssp_task.tmf = tmf;
613
614 scb->ssp_tmf.sister_scb = cpu_to_le16(0xFFFF);
615 scb->ssp_tmf.conn_handle= cpu_to_le16((u16)(unsigned long)
616 dev->lldd_dev);
617 scb->ssp_tmf.retry_count = 1;
618 scb->ssp_tmf.itnl_to = cpu_to_le16(ITNL_TIMEOUT_CONST);
619 if (tmf == TMF_QUERY_TASK)
620 scb->ssp_tmf.index = cpu_to_le16(index);
621
622 res = asd_enqueue_internal(ascb, asd_tmf_tasklet_complete,
623 asd_tmf_timedout);
624 if (res)
625 goto out_err;
626 wait_for_completion(&completion);
627
628 switch (tcs.dl_opcode) {
629 case TC_NO_ERROR:
630 res = TMF_RESP_FUNC_COMPLETE;
631 break;
632 case TF_NAK_RECV:
633 res = TMF_RESP_INVALID_FRAME;
634 break;
635 case TF_TMF_TASK_DONE:
636 res = TMF_RESP_FUNC_FAILED;
637 break;
638 case TF_TMF_NO_TAG:
639 case TF_TMF_TAG_FREE: /* the tag is in the free list */
640 case TF_TMF_NO_CONN_HANDLE: /* no such device */
641 res = TMF_RESP_FUNC_COMPLETE;
642 break;
643 case TF_TMF_NO_CTX: /* not in seq, or proto != SSP */
644 res = TMF_RESP_FUNC_ESUPP;
645 break;
646 default:
647 /* Allow TMF response codes to propagate upwards */
648 res = tcs.dl_opcode;
649 break;
650 }
651 return res;
652 out_err:
653 asd_ascb_free(ascb);
654 return res;
655 }
656
657 int asd_abort_task_set(struct domain_device *dev, u8 *lun)
658 {
659 int res = asd_initiate_ssp_tmf(dev, lun, TMF_ABORT_TASK_SET, 0);
660
661 if (res == TMF_RESP_FUNC_COMPLETE)
662 asd_clear_nexus_I_T_L(dev, lun);
663 return res;
664 }
665
666 int asd_clear_aca(struct domain_device *dev, u8 *lun)
667 {
668 int res = asd_initiate_ssp_tmf(dev, lun, TMF_CLEAR_ACA, 0);
669
670 if (res == TMF_RESP_FUNC_COMPLETE)
671 asd_clear_nexus_I_T_L(dev, lun);
672 return res;
673 }
674
675 int asd_clear_task_set(struct domain_device *dev, u8 *lun)
676 {
677 int res = asd_initiate_ssp_tmf(dev, lun, TMF_CLEAR_TASK_SET, 0);
678
679 if (res == TMF_RESP_FUNC_COMPLETE)
680 asd_clear_nexus_I_T_L(dev, lun);
681 return res;
682 }
683
684 int asd_lu_reset(struct domain_device *dev, u8 *lun)
685 {
686 int res = asd_initiate_ssp_tmf(dev, lun, TMF_LU_RESET, 0);
687
688 if (res == TMF_RESP_FUNC_COMPLETE)
689 asd_clear_nexus_I_T_L(dev, lun);
690 return res;
691 }
692
693 /**
694 * asd_query_task -- send a QUERY TASK TMF to an I_T_L_Q nexus
695 * task: pointer to sas_task struct of interest
696 *
697 * Returns: TMF_RESP_FUNC_COMPLETE if the task is not in the task set,
698 * or TMF_RESP_FUNC_SUCC if the task is in the task set.
699 *
700 * Normally the management layer sets the task to aborted state,
701 * and then calls query task and then abort task.
702 */
703 int asd_query_task(struct sas_task *task)
704 {
705 struct asd_ascb *ascb = task->lldd_task;
706 int index;
707
708 if (ascb) {
709 index = ascb->tc_index;
710 return asd_initiate_ssp_tmf(task->dev, task->ssp_task.LUN,
711 TMF_QUERY_TASK, index);
712 }
713 return TMF_RESP_FUNC_COMPLETE;
714 }
This page took 0.054863 seconds and 5 git commands to generate.