target: Export transport_generic_request_failure symbol
[deliverable/linux.git] / drivers / target / target_core_transport.c
CommitLineData
c66ac9db
NB
1/*******************************************************************************
2 * Filename: target_core_transport.c
3 *
4 * This file contains the Generic Target Engine Core.
5 *
6 * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
7 * Copyright (c) 2005, 2006, 2007 SBE, Inc.
8 * Copyright (c) 2007-2010 Rising Tide Systems
9 * Copyright (c) 2008-2010 Linux-iSCSI.org
10 *
11 * Nicholas A. Bellinger <nab@kernel.org>
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
26 *
27 ******************************************************************************/
28
c66ac9db
NB
29#include <linux/net.h>
30#include <linux/delay.h>
31#include <linux/string.h>
32#include <linux/timer.h>
33#include <linux/slab.h>
34#include <linux/blkdev.h>
35#include <linux/spinlock.h>
c66ac9db
NB
36#include <linux/kthread.h>
37#include <linux/in.h>
38#include <linux/cdrom.h>
827509e3 39#include <linux/module.h>
c66ac9db
NB
40#include <asm/unaligned.h>
41#include <net/sock.h>
42#include <net/tcp.h>
43#include <scsi/scsi.h>
44#include <scsi/scsi_cmnd.h>
e66ecd50 45#include <scsi/scsi_tcq.h>
c66ac9db
NB
46
47#include <target/target_core_base.h>
c4795fb2
CH
48#include <target/target_core_backend.h>
49#include <target/target_core_fabric.h>
c66ac9db
NB
50#include <target/target_core_configfs.h>
51
e26d99ae 52#include "target_core_internal.h"
c66ac9db 53#include "target_core_alua.h"
c66ac9db 54#include "target_core_pr.h"
c66ac9db
NB
55#include "target_core_ua.h"
56
e3d6f909 57static int sub_api_initialized;
c66ac9db 58
35e0e757 59static struct workqueue_struct *target_completion_wq;
c66ac9db 60static struct kmem_cache *se_sess_cache;
c66ac9db 61struct kmem_cache *se_ua_cache;
c66ac9db
NB
62struct kmem_cache *t10_pr_reg_cache;
63struct kmem_cache *t10_alua_lu_gp_cache;
64struct kmem_cache *t10_alua_lu_gp_mem_cache;
65struct kmem_cache *t10_alua_tg_pt_gp_cache;
66struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
67
c66ac9db 68static int transport_generic_write_pending(struct se_cmd *);
5951146d 69static int transport_processing_thread(void *param);
4d2300cc 70static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *);
c66ac9db 71static void transport_complete_task_attr(struct se_cmd *cmd);
07bde79a 72static void transport_handle_queue_full(struct se_cmd *cmd,
e057f533 73 struct se_device *dev);
c66ac9db 74static void transport_free_dev_tasks(struct se_cmd *cmd);
05d1c7c0 75static int transport_generic_get_mem(struct se_cmd *cmd);
39c05f32 76static void transport_put_cmd(struct se_cmd *cmd);
3df8d40b 77static void transport_remove_cmd_from_queue(struct se_cmd *cmd);
c66ac9db 78static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq);
35e0e757 79static void target_complete_ok_work(struct work_struct *work);
c66ac9db 80
e3d6f909 81int init_se_kmem_caches(void)
c66ac9db 82{
c66ac9db
NB
83 se_sess_cache = kmem_cache_create("se_sess_cache",
84 sizeof(struct se_session), __alignof__(struct se_session),
85 0, NULL);
6708bb27
AG
86 if (!se_sess_cache) {
87 pr_err("kmem_cache_create() for struct se_session"
c66ac9db 88 " failed\n");
c8e31f26 89 goto out;
c66ac9db
NB
90 }
91 se_ua_cache = kmem_cache_create("se_ua_cache",
92 sizeof(struct se_ua), __alignof__(struct se_ua),
93 0, NULL);
6708bb27
AG
94 if (!se_ua_cache) {
95 pr_err("kmem_cache_create() for struct se_ua failed\n");
35e0e757 96 goto out_free_sess_cache;
c66ac9db 97 }
c66ac9db
NB
98 t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache",
99 sizeof(struct t10_pr_registration),
100 __alignof__(struct t10_pr_registration), 0, NULL);
6708bb27
AG
101 if (!t10_pr_reg_cache) {
102 pr_err("kmem_cache_create() for struct t10_pr_registration"
c66ac9db 103 " failed\n");
35e0e757 104 goto out_free_ua_cache;
c66ac9db
NB
105 }
106 t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache",
107 sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp),
108 0, NULL);
6708bb27
AG
109 if (!t10_alua_lu_gp_cache) {
110 pr_err("kmem_cache_create() for t10_alua_lu_gp_cache"
c66ac9db 111 " failed\n");
35e0e757 112 goto out_free_pr_reg_cache;
c66ac9db
NB
113 }
114 t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache",
115 sizeof(struct t10_alua_lu_gp_member),
116 __alignof__(struct t10_alua_lu_gp_member), 0, NULL);
6708bb27
AG
117 if (!t10_alua_lu_gp_mem_cache) {
118 pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_"
c66ac9db 119 "cache failed\n");
35e0e757 120 goto out_free_lu_gp_cache;
c66ac9db
NB
121 }
122 t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache",
123 sizeof(struct t10_alua_tg_pt_gp),
124 __alignof__(struct t10_alua_tg_pt_gp), 0, NULL);
6708bb27
AG
125 if (!t10_alua_tg_pt_gp_cache) {
126 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
c66ac9db 127 "cache failed\n");
35e0e757 128 goto out_free_lu_gp_mem_cache;
c66ac9db
NB
129 }
130 t10_alua_tg_pt_gp_mem_cache = kmem_cache_create(
131 "t10_alua_tg_pt_gp_mem_cache",
132 sizeof(struct t10_alua_tg_pt_gp_member),
133 __alignof__(struct t10_alua_tg_pt_gp_member),
134 0, NULL);
6708bb27
AG
135 if (!t10_alua_tg_pt_gp_mem_cache) {
136 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
c66ac9db 137 "mem_t failed\n");
35e0e757 138 goto out_free_tg_pt_gp_cache;
c66ac9db
NB
139 }
140
35e0e757
CH
141 target_completion_wq = alloc_workqueue("target_completion",
142 WQ_MEM_RECLAIM, 0);
143 if (!target_completion_wq)
144 goto out_free_tg_pt_gp_mem_cache;
145
c66ac9db 146 return 0;
35e0e757
CH
147
148out_free_tg_pt_gp_mem_cache:
149 kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
150out_free_tg_pt_gp_cache:
151 kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
152out_free_lu_gp_mem_cache:
153 kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
154out_free_lu_gp_cache:
155 kmem_cache_destroy(t10_alua_lu_gp_cache);
156out_free_pr_reg_cache:
157 kmem_cache_destroy(t10_pr_reg_cache);
158out_free_ua_cache:
159 kmem_cache_destroy(se_ua_cache);
160out_free_sess_cache:
161 kmem_cache_destroy(se_sess_cache);
c66ac9db 162out:
e3d6f909 163 return -ENOMEM;
c66ac9db
NB
164}
165
e3d6f909 166void release_se_kmem_caches(void)
c66ac9db 167{
35e0e757 168 destroy_workqueue(target_completion_wq);
c66ac9db
NB
169 kmem_cache_destroy(se_sess_cache);
170 kmem_cache_destroy(se_ua_cache);
c66ac9db
NB
171 kmem_cache_destroy(t10_pr_reg_cache);
172 kmem_cache_destroy(t10_alua_lu_gp_cache);
173 kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
174 kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
175 kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
c66ac9db
NB
176}
177
e3d6f909
AG
178/* This code ensures unique mib indexes are handed out. */
179static DEFINE_SPINLOCK(scsi_mib_index_lock);
180static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX];
e89d15ee
NB
181
182/*
183 * Allocate a new row index for the entry type specified
184 */
185u32 scsi_get_new_index(scsi_index_t type)
186{
187 u32 new_index;
188
e3d6f909 189 BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX));
e89d15ee 190
e3d6f909
AG
191 spin_lock(&scsi_mib_index_lock);
192 new_index = ++scsi_mib_index[type];
193 spin_unlock(&scsi_mib_index_lock);
e89d15ee
NB
194
195 return new_index;
196}
197
e26d99ae 198static void transport_init_queue_obj(struct se_queue_obj *qobj)
c66ac9db
NB
199{
200 atomic_set(&qobj->queue_cnt, 0);
201 INIT_LIST_HEAD(&qobj->qobj_list);
202 init_waitqueue_head(&qobj->thread_wq);
203 spin_lock_init(&qobj->cmd_queue_lock);
204}
c66ac9db 205
dbc5623e 206void transport_subsystem_check_init(void)
c66ac9db
NB
207{
208 int ret;
209
dbc5623e
NB
210 if (sub_api_initialized)
211 return;
212
c66ac9db
NB
213 ret = request_module("target_core_iblock");
214 if (ret != 0)
6708bb27 215 pr_err("Unable to load target_core_iblock\n");
c66ac9db
NB
216
217 ret = request_module("target_core_file");
218 if (ret != 0)
6708bb27 219 pr_err("Unable to load target_core_file\n");
c66ac9db
NB
220
221 ret = request_module("target_core_pscsi");
222 if (ret != 0)
6708bb27 223 pr_err("Unable to load target_core_pscsi\n");
c66ac9db
NB
224
225 ret = request_module("target_core_stgt");
226 if (ret != 0)
6708bb27 227 pr_err("Unable to load target_core_stgt\n");
c66ac9db 228
e3d6f909 229 sub_api_initialized = 1;
dbc5623e 230 return;
c66ac9db
NB
231}
232
233struct se_session *transport_init_session(void)
234{
235 struct se_session *se_sess;
236
237 se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
6708bb27
AG
238 if (!se_sess) {
239 pr_err("Unable to allocate struct se_session from"
c66ac9db
NB
240 " se_sess_cache\n");
241 return ERR_PTR(-ENOMEM);
242 }
243 INIT_LIST_HEAD(&se_sess->sess_list);
244 INIT_LIST_HEAD(&se_sess->sess_acl_list);
a17f091d
NB
245 INIT_LIST_HEAD(&se_sess->sess_cmd_list);
246 INIT_LIST_HEAD(&se_sess->sess_wait_list);
247 spin_lock_init(&se_sess->sess_cmd_lock);
c66ac9db
NB
248
249 return se_sess;
250}
251EXPORT_SYMBOL(transport_init_session);
252
253/*
254 * Called with spin_lock_bh(&struct se_portal_group->session_lock called.
255 */
256void __transport_register_session(
257 struct se_portal_group *se_tpg,
258 struct se_node_acl *se_nacl,
259 struct se_session *se_sess,
260 void *fabric_sess_ptr)
261{
262 unsigned char buf[PR_REG_ISID_LEN];
263
264 se_sess->se_tpg = se_tpg;
265 se_sess->fabric_sess_ptr = fabric_sess_ptr;
266 /*
267 * Used by struct se_node_acl's under ConfigFS to locate active se_session-t
268 *
269 * Only set for struct se_session's that will actually be moving I/O.
270 * eg: *NOT* discovery sessions.
271 */
272 if (se_nacl) {
273 /*
274 * If the fabric module supports an ISID based TransportID,
275 * save this value in binary from the fabric I_T Nexus now.
276 */
e3d6f909 277 if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) {
c66ac9db 278 memset(&buf[0], 0, PR_REG_ISID_LEN);
e3d6f909 279 se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess,
c66ac9db
NB
280 &buf[0], PR_REG_ISID_LEN);
281 se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
282 }
283 spin_lock_irq(&se_nacl->nacl_sess_lock);
284 /*
285 * The se_nacl->nacl_sess pointer will be set to the
286 * last active I_T Nexus for each struct se_node_acl.
287 */
288 se_nacl->nacl_sess = se_sess;
289
290 list_add_tail(&se_sess->sess_acl_list,
291 &se_nacl->acl_sess_list);
292 spin_unlock_irq(&se_nacl->nacl_sess_lock);
293 }
294 list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
295
6708bb27 296 pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
e3d6f909 297 se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr);
c66ac9db
NB
298}
299EXPORT_SYMBOL(__transport_register_session);
300
301void transport_register_session(
302 struct se_portal_group *se_tpg,
303 struct se_node_acl *se_nacl,
304 struct se_session *se_sess,
305 void *fabric_sess_ptr)
306{
307 spin_lock_bh(&se_tpg->session_lock);
308 __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr);
309 spin_unlock_bh(&se_tpg->session_lock);
310}
311EXPORT_SYMBOL(transport_register_session);
312
313void transport_deregister_session_configfs(struct se_session *se_sess)
314{
315 struct se_node_acl *se_nacl;
23388864 316 unsigned long flags;
c66ac9db
NB
317 /*
318 * Used by struct se_node_acl's under ConfigFS to locate active struct se_session
319 */
320 se_nacl = se_sess->se_node_acl;
6708bb27 321 if (se_nacl) {
23388864 322 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
c66ac9db
NB
323 list_del(&se_sess->sess_acl_list);
324 /*
325 * If the session list is empty, then clear the pointer.
326 * Otherwise, set the struct se_session pointer from the tail
327 * element of the per struct se_node_acl active session list.
328 */
329 if (list_empty(&se_nacl->acl_sess_list))
330 se_nacl->nacl_sess = NULL;
331 else {
332 se_nacl->nacl_sess = container_of(
333 se_nacl->acl_sess_list.prev,
334 struct se_session, sess_acl_list);
335 }
23388864 336 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
c66ac9db
NB
337 }
338}
339EXPORT_SYMBOL(transport_deregister_session_configfs);
340
341void transport_free_session(struct se_session *se_sess)
342{
343 kmem_cache_free(se_sess_cache, se_sess);
344}
345EXPORT_SYMBOL(transport_free_session);
346
347void transport_deregister_session(struct se_session *se_sess)
348{
349 struct se_portal_group *se_tpg = se_sess->se_tpg;
350 struct se_node_acl *se_nacl;
e63a8e19 351 unsigned long flags;
c66ac9db 352
6708bb27 353 if (!se_tpg) {
c66ac9db
NB
354 transport_free_session(se_sess);
355 return;
356 }
c66ac9db 357
e63a8e19 358 spin_lock_irqsave(&se_tpg->session_lock, flags);
c66ac9db
NB
359 list_del(&se_sess->sess_list);
360 se_sess->se_tpg = NULL;
361 se_sess->fabric_sess_ptr = NULL;
e63a8e19 362 spin_unlock_irqrestore(&se_tpg->session_lock, flags);
c66ac9db
NB
363
364 /*
365 * Determine if we need to do extra work for this initiator node's
366 * struct se_node_acl if it had been previously dynamically generated.
367 */
368 se_nacl = se_sess->se_node_acl;
6708bb27 369 if (se_nacl) {
e63a8e19 370 spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
c66ac9db 371 if (se_nacl->dynamic_node_acl) {
6708bb27
AG
372 if (!se_tpg->se_tpg_tfo->tpg_check_demo_mode_cache(
373 se_tpg)) {
c66ac9db
NB
374 list_del(&se_nacl->acl_list);
375 se_tpg->num_node_acls--;
e63a8e19 376 spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
c66ac9db
NB
377
378 core_tpg_wait_for_nacl_pr_ref(se_nacl);
c66ac9db 379 core_free_device_list_for_node(se_nacl, se_tpg);
e3d6f909 380 se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg,
c66ac9db 381 se_nacl);
e63a8e19 382 spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
c66ac9db
NB
383 }
384 }
e63a8e19 385 spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
c66ac9db
NB
386 }
387
388 transport_free_session(se_sess);
389
6708bb27 390 pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
e3d6f909 391 se_tpg->se_tpg_tfo->get_fabric_name());
c66ac9db
NB
392}
393EXPORT_SYMBOL(transport_deregister_session);
394
395/*
a1d8b49a 396 * Called with cmd->t_state_lock held.
c66ac9db
NB
397 */
398static void transport_all_task_dev_remove_state(struct se_cmd *cmd)
399{
42bf829e 400 struct se_device *dev = cmd->se_dev;
c66ac9db
NB
401 struct se_task *task;
402 unsigned long flags;
403
42bf829e
CH
404 if (!dev)
405 return;
c66ac9db 406
42bf829e 407 list_for_each_entry(task, &cmd->t_task_list, t_list) {
6c76bf95 408 if (task->task_flags & TF_ACTIVE)
c66ac9db
NB
409 continue;
410
c66ac9db 411 spin_lock_irqsave(&dev->execute_task_lock, flags);
1880807a
CH
412 if (task->t_state_active) {
413 pr_debug("Removed ITT: 0x%08x dev: %p task[%p]\n",
414 cmd->se_tfo->get_task_tag(cmd), dev, task);
c66ac9db 415
1880807a
CH
416 list_del(&task->t_state_list);
417 atomic_dec(&cmd->t_task_cdbs_ex_left);
418 task->t_state_active = false;
419 }
420 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
c66ac9db 421 }
1880807a 422
c66ac9db
NB
423}
424
425/* transport_cmd_check_stop():
426 *
7d680f3b 427 * 'transport_off = 1' determines if CMD_T_ACTIVE should be cleared.
c66ac9db
NB
428 * 'transport_off = 2' determines if task_dev_state should be removed.
429 *
430 * A non-zero u8 t_state sets cmd->t_state.
431 * Returns 1 when command is stopped, else 0.
432 */
433static int transport_cmd_check_stop(
434 struct se_cmd *cmd,
435 int transport_off,
436 u8 t_state)
437{
438 unsigned long flags;
439
a1d8b49a 440 spin_lock_irqsave(&cmd->t_state_lock, flags);
c66ac9db
NB
441 /*
442 * Determine if IOCTL context caller in requesting the stopping of this
443 * command for LUN shutdown purposes.
444 */
7d680f3b
CH
445 if (cmd->transport_state & CMD_T_LUN_STOP) {
446 pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n",
447 __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd));
c66ac9db 448
7d680f3b 449 cmd->transport_state &= ~CMD_T_ACTIVE;
c66ac9db
NB
450 if (transport_off == 2)
451 transport_all_task_dev_remove_state(cmd);
a1d8b49a 452 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db 453
a1d8b49a 454 complete(&cmd->transport_lun_stop_comp);
c66ac9db
NB
455 return 1;
456 }
457 /*
458 * Determine if frontend context caller is requesting the stopping of
e3d6f909 459 * this command for frontend exceptions.
c66ac9db 460 */
7d680f3b
CH
461 if (cmd->transport_state & CMD_T_STOP) {
462 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n",
463 __func__, __LINE__,
e3d6f909 464 cmd->se_tfo->get_task_tag(cmd));
c66ac9db 465
c66ac9db
NB
466 if (transport_off == 2)
467 transport_all_task_dev_remove_state(cmd);
468
469 /*
470 * Clear struct se_cmd->se_lun before the transport_off == 2 handoff
471 * to FE.
472 */
473 if (transport_off == 2)
474 cmd->se_lun = NULL;
a1d8b49a 475 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db 476
a1d8b49a 477 complete(&cmd->t_transport_stop_comp);
c66ac9db
NB
478 return 1;
479 }
480 if (transport_off) {
7d680f3b 481 cmd->transport_state &= ~CMD_T_ACTIVE;
c66ac9db
NB
482 if (transport_off == 2) {
483 transport_all_task_dev_remove_state(cmd);
484 /*
485 * Clear struct se_cmd->se_lun before the transport_off == 2
486 * handoff to fabric module.
487 */
488 cmd->se_lun = NULL;
489 /*
490 * Some fabric modules like tcm_loop can release
25985edc 491 * their internally allocated I/O reference now and
c66ac9db 492 * struct se_cmd now.
88dd9e26
NB
493 *
494 * Fabric modules are expected to return '1' here if the
495 * se_cmd being passed is released at this point,
496 * or zero if not being released.
c66ac9db 497 */
e3d6f909 498 if (cmd->se_tfo->check_stop_free != NULL) {
c66ac9db 499 spin_unlock_irqrestore(
a1d8b49a 500 &cmd->t_state_lock, flags);
c66ac9db 501
88dd9e26 502 return cmd->se_tfo->check_stop_free(cmd);
c66ac9db
NB
503 }
504 }
a1d8b49a 505 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
506
507 return 0;
508 } else if (t_state)
509 cmd->t_state = t_state;
a1d8b49a 510 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
511
512 return 0;
513}
514
515static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
516{
517 return transport_cmd_check_stop(cmd, 2, 0);
518}
519
520static void transport_lun_remove_cmd(struct se_cmd *cmd)
521{
e3d6f909 522 struct se_lun *lun = cmd->se_lun;
c66ac9db
NB
523 unsigned long flags;
524
525 if (!lun)
526 return;
527
a1d8b49a 528 spin_lock_irqsave(&cmd->t_state_lock, flags);
7d680f3b
CH
529 if (cmd->transport_state & CMD_T_DEV_ACTIVE) {
530 cmd->transport_state &= ~CMD_T_DEV_ACTIVE;
531 transport_all_task_dev_remove_state(cmd);
c66ac9db 532 }
a1d8b49a 533 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db 534
c66ac9db 535 spin_lock_irqsave(&lun->lun_cmd_lock, flags);
3d26fea0
CH
536 if (!list_empty(&cmd->se_lun_node))
537 list_del_init(&cmd->se_lun_node);
c66ac9db
NB
538 spin_unlock_irqrestore(&lun->lun_cmd_lock, flags);
539}
540
541void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
542{
c8e31f26 543 if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
8dc52b54 544 transport_lun_remove_cmd(cmd);
c66ac9db
NB
545
546 if (transport_cmd_check_stop_to_fabric(cmd))
547 return;
77039d1e 548 if (remove) {
3df8d40b 549 transport_remove_cmd_from_queue(cmd);
e6a2573f 550 transport_put_cmd(cmd);
77039d1e 551 }
c66ac9db
NB
552}
553
f7a5cc0b
CH
554static void transport_add_cmd_to_queue(struct se_cmd *cmd, int t_state,
555 bool at_head)
c66ac9db
NB
556{
557 struct se_device *dev = cmd->se_dev;
e3d6f909 558 struct se_queue_obj *qobj = &dev->dev_queue_obj;
c66ac9db
NB
559 unsigned long flags;
560
c66ac9db 561 if (t_state) {
a1d8b49a 562 spin_lock_irqsave(&cmd->t_state_lock, flags);
c66ac9db 563 cmd->t_state = t_state;
7d680f3b 564 cmd->transport_state |= CMD_T_ACTIVE;
a1d8b49a 565 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
566 }
567
568 spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
79a7fef2
RD
569
570 /* If the cmd is already on the list, remove it before we add it */
571 if (!list_empty(&cmd->se_queue_node))
572 list_del(&cmd->se_queue_node);
573 else
574 atomic_inc(&qobj->queue_cnt);
575
f7a5cc0b 576 if (at_head)
07bde79a 577 list_add(&cmd->se_queue_node, &qobj->qobj_list);
f7a5cc0b 578 else
07bde79a 579 list_add_tail(&cmd->se_queue_node, &qobj->qobj_list);
7d680f3b 580 cmd->transport_state |= CMD_T_QUEUED;
c66ac9db
NB
581 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
582
c66ac9db 583 wake_up_interruptible(&qobj->thread_wq);
c66ac9db
NB
584}
585
5951146d
AG
586static struct se_cmd *
587transport_get_cmd_from_queue(struct se_queue_obj *qobj)
c66ac9db 588{
5951146d 589 struct se_cmd *cmd;
c66ac9db
NB
590 unsigned long flags;
591
592 spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
593 if (list_empty(&qobj->qobj_list)) {
594 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
595 return NULL;
596 }
5951146d 597 cmd = list_first_entry(&qobj->qobj_list, struct se_cmd, se_queue_node);
c66ac9db 598
7d680f3b 599 cmd->transport_state &= ~CMD_T_QUEUED;
79a7fef2 600 list_del_init(&cmd->se_queue_node);
c66ac9db
NB
601 atomic_dec(&qobj->queue_cnt);
602 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
603
5951146d 604 return cmd;
c66ac9db
NB
605}
606
3df8d40b 607static void transport_remove_cmd_from_queue(struct se_cmd *cmd)
c66ac9db 608{
3df8d40b 609 struct se_queue_obj *qobj = &cmd->se_dev->dev_queue_obj;
c66ac9db
NB
610 unsigned long flags;
611
612 spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
7d680f3b 613 if (!(cmd->transport_state & CMD_T_QUEUED)) {
c66ac9db
NB
614 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
615 return;
616 }
7d680f3b 617 cmd->transport_state &= ~CMD_T_QUEUED;
79a7fef2
RD
618 atomic_dec(&qobj->queue_cnt);
619 list_del_init(&cmd->se_queue_node);
c66ac9db 620 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
c66ac9db
NB
621}
622
623/*
624 * Completion function used by TCM subsystem plugins (such as FILEIO)
625 * for queueing up response from struct se_subsystem_api->do_task()
626 */
627void transport_complete_sync_cache(struct se_cmd *cmd, int good)
628{
a1d8b49a 629 struct se_task *task = list_entry(cmd->t_task_list.next,
c66ac9db
NB
630 struct se_task, t_list);
631
632 if (good) {
633 cmd->scsi_status = SAM_STAT_GOOD;
634 task->task_scsi_status = GOOD;
635 } else {
636 task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
03e98c9e
NB
637 task->task_se_cmd->scsi_sense_reason =
638 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
639
c66ac9db
NB
640 }
641
642 transport_complete_task(task, good);
643}
644EXPORT_SYMBOL(transport_complete_sync_cache);
645
35e0e757
CH
646static void target_complete_failure_work(struct work_struct *work)
647{
648 struct se_cmd *cmd = container_of(work, struct se_cmd, work);
649
03e98c9e 650 transport_generic_request_failure(cmd);
35e0e757
CH
651}
652
c66ac9db
NB
653/* transport_complete_task():
654 *
655 * Called from interrupt and non interrupt context depending
656 * on the transport plugin.
657 */
658void transport_complete_task(struct se_task *task, int success)
659{
e3d6f909 660 struct se_cmd *cmd = task->task_se_cmd;
42bf829e 661 struct se_device *dev = cmd->se_dev;
c66ac9db 662 unsigned long flags;
c66ac9db 663
a1d8b49a 664 spin_lock_irqsave(&cmd->t_state_lock, flags);
6c76bf95 665 task->task_flags &= ~TF_ACTIVE;
c66ac9db
NB
666
667 /*
668 * See if any sense data exists, if so set the TASK_SENSE flag.
669 * Also check for any other post completion work that needs to be
670 * done by the plugins.
671 */
672 if (dev && dev->transport->transport_complete) {
673 if (dev->transport->transport_complete(task) != 0) {
674 cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE;
ef804a84 675 task->task_flags |= TF_HAS_SENSE;
c66ac9db
NB
676 success = 1;
677 }
678 }
679
680 /*
681 * See if we are waiting for outstanding struct se_task
682 * to complete for an exception condition
683 */
6c76bf95 684 if (task->task_flags & TF_REQUEST_STOP) {
a1d8b49a 685 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
686 complete(&task->task_stop_comp);
687 return;
688 }
2235007c
CH
689
690 if (!success)
7d680f3b 691 cmd->transport_state |= CMD_T_FAILED;
2235007c 692
c66ac9db
NB
693 /*
694 * Decrement the outstanding t_task_cdbs_left count. The last
695 * struct se_task from struct se_cmd will complete itself into the
696 * device queue depending upon int success.
697 */
6708bb27 698 if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) {
a1d8b49a 699 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
700 return;
701 }
702
7d680f3b 703 if (cmd->transport_state & CMD_T_FAILED) {
41e16e98 704 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
35e0e757 705 INIT_WORK(&cmd->work, target_complete_failure_work);
c66ac9db 706 } else {
7d680f3b 707 cmd->transport_state |= CMD_T_COMPLETE;
35e0e757 708 INIT_WORK(&cmd->work, target_complete_ok_work);
c66ac9db 709 }
35e0e757 710
35e0e757 711 cmd->t_state = TRANSPORT_COMPLETE;
7d680f3b 712 cmd->transport_state |= CMD_T_ACTIVE;
a1d8b49a 713 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db 714
35e0e757 715 queue_work(target_completion_wq, &cmd->work);
c66ac9db
NB
716}
717EXPORT_SYMBOL(transport_complete_task);
718
719/*
720 * Called by transport_add_tasks_from_cmd() once a struct se_cmd's
721 * struct se_task list are ready to be added to the active execution list
722 * struct se_device
723
724 * Called with se_dev_t->execute_task_lock called.
725 */
726static inline int transport_add_task_check_sam_attr(
727 struct se_task *task,
728 struct se_task *task_prev,
729 struct se_device *dev)
730{
731 /*
732 * No SAM Task attribute emulation enabled, add to tail of
733 * execution queue
734 */
735 if (dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) {
736 list_add_tail(&task->t_execute_list, &dev->execute_task_list);
737 return 0;
738 }
739 /*
740 * HEAD_OF_QUEUE attribute for received CDB, which means
741 * the first task that is associated with a struct se_cmd goes to
742 * head of the struct se_device->execute_task_list, and task_prev
743 * after that for each subsequent task
744 */
e66ecd50 745 if (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG) {
c66ac9db
NB
746 list_add(&task->t_execute_list,
747 (task_prev != NULL) ?
748 &task_prev->t_execute_list :
749 &dev->execute_task_list);
750
6708bb27 751 pr_debug("Set HEAD_OF_QUEUE for task CDB: 0x%02x"
c66ac9db 752 " in execution queue\n",
6708bb27 753 task->task_se_cmd->t_task_cdb[0]);
c66ac9db
NB
754 return 1;
755 }
756 /*
757 * For ORDERED, SIMPLE or UNTAGGED attribute tasks once they have been
758 * transitioned from Dermant -> Active state, and are added to the end
759 * of the struct se_device->execute_task_list
760 */
761 list_add_tail(&task->t_execute_list, &dev->execute_task_list);
762 return 0;
763}
764
765/* __transport_add_task_to_execute_queue():
766 *
767 * Called with se_dev_t->execute_task_lock called.
768 */
769static void __transport_add_task_to_execute_queue(
770 struct se_task *task,
771 struct se_task *task_prev,
772 struct se_device *dev)
773{
774 int head_of_queue;
775
776 head_of_queue = transport_add_task_check_sam_attr(task, task_prev, dev);
777 atomic_inc(&dev->execute_tasks);
778
1880807a 779 if (task->t_state_active)
c66ac9db
NB
780 return;
781 /*
782 * Determine if this task needs to go to HEAD_OF_QUEUE for the
783 * state list as well. Running with SAM Task Attribute emulation
784 * will always return head_of_queue == 0 here
785 */
786 if (head_of_queue)
787 list_add(&task->t_state_list, (task_prev) ?
788 &task_prev->t_state_list :
789 &dev->state_task_list);
790 else
791 list_add_tail(&task->t_state_list, &dev->state_task_list);
792
1880807a 793 task->t_state_active = true;
c66ac9db 794
6708bb27 795 pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
e3d6f909 796 task->task_se_cmd->se_tfo->get_task_tag(task->task_se_cmd),
c66ac9db
NB
797 task, dev);
798}
799
800static void transport_add_tasks_to_state_queue(struct se_cmd *cmd)
801{
42bf829e 802 struct se_device *dev = cmd->se_dev;
c66ac9db
NB
803 struct se_task *task;
804 unsigned long flags;
805
a1d8b49a
AG
806 spin_lock_irqsave(&cmd->t_state_lock, flags);
807 list_for_each_entry(task, &cmd->t_task_list, t_list) {
c66ac9db 808 spin_lock(&dev->execute_task_lock);
1880807a
CH
809 if (!task->t_state_active) {
810 list_add_tail(&task->t_state_list,
811 &dev->state_task_list);
812 task->t_state_active = true;
813
814 pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
815 task->task_se_cmd->se_tfo->get_task_tag(
816 task->task_se_cmd), task, dev);
817 }
c66ac9db
NB
818 spin_unlock(&dev->execute_task_lock);
819 }
a1d8b49a 820 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
821}
822
4d2300cc 823static void __transport_add_tasks_from_cmd(struct se_cmd *cmd)
c66ac9db 824{
5951146d 825 struct se_device *dev = cmd->se_dev;
c66ac9db 826 struct se_task *task, *task_prev = NULL;
c66ac9db 827
a1d8b49a 828 list_for_each_entry(task, &cmd->t_task_list, t_list) {
04629b7b 829 if (!list_empty(&task->t_execute_list))
c66ac9db
NB
830 continue;
831 /*
832 * __transport_add_task_to_execute_queue() handles the
833 * SAM Task Attribute emulation if enabled
834 */
835 __transport_add_task_to_execute_queue(task, task_prev, dev);
c66ac9db
NB
836 task_prev = task;
837 }
4d2300cc
NB
838}
839
840static void transport_add_tasks_from_cmd(struct se_cmd *cmd)
841{
842 unsigned long flags;
843 struct se_device *dev = cmd->se_dev;
844
845 spin_lock_irqsave(&dev->execute_task_lock, flags);
846 __transport_add_tasks_from_cmd(cmd);
c66ac9db 847 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
c66ac9db
NB
848}
849
04629b7b
CH
850void __transport_remove_task_from_execute_queue(struct se_task *task,
851 struct se_device *dev)
852{
853 list_del_init(&task->t_execute_list);
854 atomic_dec(&dev->execute_tasks);
855}
856
e26d99ae 857static void transport_remove_task_from_execute_queue(
c66ac9db
NB
858 struct se_task *task,
859 struct se_device *dev)
860{
861 unsigned long flags;
862
04629b7b 863 if (WARN_ON(list_empty(&task->t_execute_list)))
af57c3ac 864 return;
af57c3ac 865
c66ac9db 866 spin_lock_irqsave(&dev->execute_task_lock, flags);
04629b7b 867 __transport_remove_task_from_execute_queue(task, dev);
c66ac9db
NB
868 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
869}
870
07bde79a 871/*
f147abb4 872 * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status
07bde79a
NB
873 */
874
875static void target_qf_do_work(struct work_struct *work)
876{
877 struct se_device *dev = container_of(work, struct se_device,
878 qf_work_queue);
bcac364a 879 LIST_HEAD(qf_cmd_list);
07bde79a
NB
880 struct se_cmd *cmd, *cmd_tmp;
881
882 spin_lock_irq(&dev->qf_cmd_lock);
bcac364a
RD
883 list_splice_init(&dev->qf_cmd_list, &qf_cmd_list);
884 spin_unlock_irq(&dev->qf_cmd_lock);
07bde79a 885
bcac364a 886 list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {
07bde79a
NB
887 list_del(&cmd->se_qf_node);
888 atomic_dec(&dev->dev_qf_count);
889 smp_mb__after_atomic_dec();
07bde79a 890
6708bb27 891 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
07bde79a 892 " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
e057f533 893 (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" :
07bde79a
NB
894 (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING"
895 : "UNKNOWN");
f7a5cc0b
CH
896
897 transport_add_cmd_to_queue(cmd, cmd->t_state, true);
07bde79a 898 }
07bde79a
NB
899}
900
c66ac9db
NB
901unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
902{
903 switch (cmd->data_direction) {
904 case DMA_NONE:
905 return "NONE";
906 case DMA_FROM_DEVICE:
907 return "READ";
908 case DMA_TO_DEVICE:
909 return "WRITE";
910 case DMA_BIDIRECTIONAL:
911 return "BIDI";
912 default:
913 break;
914 }
915
916 return "UNKNOWN";
917}
918
919void transport_dump_dev_state(
920 struct se_device *dev,
921 char *b,
922 int *bl)
923{
924 *bl += sprintf(b + *bl, "Status: ");
925 switch (dev->dev_status) {
926 case TRANSPORT_DEVICE_ACTIVATED:
927 *bl += sprintf(b + *bl, "ACTIVATED");
928 break;
929 case TRANSPORT_DEVICE_DEACTIVATED:
930 *bl += sprintf(b + *bl, "DEACTIVATED");
931 break;
932 case TRANSPORT_DEVICE_SHUTDOWN:
933 *bl += sprintf(b + *bl, "SHUTDOWN");
934 break;
935 case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
936 case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
937 *bl += sprintf(b + *bl, "OFFLINE");
938 break;
939 default:
940 *bl += sprintf(b + *bl, "UNKNOWN=%d", dev->dev_status);
941 break;
942 }
943
65586d51
NB
944 *bl += sprintf(b + *bl, " Execute/Max Queue Depth: %d/%d",
945 atomic_read(&dev->execute_tasks), dev->queue_depth);
c66ac9db 946 *bl += sprintf(b + *bl, " SectorSize: %u MaxSectors: %u\n",
e3d6f909 947 dev->se_sub_dev->se_dev_attrib.block_size, dev->se_sub_dev->se_dev_attrib.max_sectors);
c66ac9db
NB
948 *bl += sprintf(b + *bl, " ");
949}
950
c66ac9db
NB
951void transport_dump_vpd_proto_id(
952 struct t10_vpd *vpd,
953 unsigned char *p_buf,
954 int p_buf_len)
955{
956 unsigned char buf[VPD_TMP_BUF_SIZE];
957 int len;
958
959 memset(buf, 0, VPD_TMP_BUF_SIZE);
960 len = sprintf(buf, "T10 VPD Protocol Identifier: ");
961
962 switch (vpd->protocol_identifier) {
963 case 0x00:
964 sprintf(buf+len, "Fibre Channel\n");
965 break;
966 case 0x10:
967 sprintf(buf+len, "Parallel SCSI\n");
968 break;
969 case 0x20:
970 sprintf(buf+len, "SSA\n");
971 break;
972 case 0x30:
973 sprintf(buf+len, "IEEE 1394\n");
974 break;
975 case 0x40:
976 sprintf(buf+len, "SCSI Remote Direct Memory Access"
977 " Protocol\n");
978 break;
979 case 0x50:
980 sprintf(buf+len, "Internet SCSI (iSCSI)\n");
981 break;
982 case 0x60:
983 sprintf(buf+len, "SAS Serial SCSI Protocol\n");
984 break;
985 case 0x70:
986 sprintf(buf+len, "Automation/Drive Interface Transport"
987 " Protocol\n");
988 break;
989 case 0x80:
990 sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n");
991 break;
992 default:
993 sprintf(buf+len, "Unknown 0x%02x\n",
994 vpd->protocol_identifier);
995 break;
996 }
997
998 if (p_buf)
999 strncpy(p_buf, buf, p_buf_len);
1000 else
6708bb27 1001 pr_debug("%s", buf);
c66ac9db
NB
1002}
1003
1004void
1005transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83)
1006{
1007 /*
1008 * Check if the Protocol Identifier Valid (PIV) bit is set..
1009 *
1010 * from spc3r23.pdf section 7.5.1
1011 */
1012 if (page_83[1] & 0x80) {
1013 vpd->protocol_identifier = (page_83[0] & 0xf0);
1014 vpd->protocol_identifier_set = 1;
1015 transport_dump_vpd_proto_id(vpd, NULL, 0);
1016 }
1017}
1018EXPORT_SYMBOL(transport_set_vpd_proto_id);
1019
1020int transport_dump_vpd_assoc(
1021 struct t10_vpd *vpd,
1022 unsigned char *p_buf,
1023 int p_buf_len)
1024{
1025 unsigned char buf[VPD_TMP_BUF_SIZE];
e3d6f909
AG
1026 int ret = 0;
1027 int len;
c66ac9db
NB
1028
1029 memset(buf, 0, VPD_TMP_BUF_SIZE);
1030 len = sprintf(buf, "T10 VPD Identifier Association: ");
1031
1032 switch (vpd->association) {
1033 case 0x00:
1034 sprintf(buf+len, "addressed logical unit\n");
1035 break;
1036 case 0x10:
1037 sprintf(buf+len, "target port\n");
1038 break;
1039 case 0x20:
1040 sprintf(buf+len, "SCSI target device\n");
1041 break;
1042 default:
1043 sprintf(buf+len, "Unknown 0x%02x\n", vpd->association);
e3d6f909 1044 ret = -EINVAL;
c66ac9db
NB
1045 break;
1046 }
1047
1048 if (p_buf)
1049 strncpy(p_buf, buf, p_buf_len);
1050 else
6708bb27 1051 pr_debug("%s", buf);
c66ac9db
NB
1052
1053 return ret;
1054}
1055
1056int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83)
1057{
1058 /*
1059 * The VPD identification association..
1060 *
1061 * from spc3r23.pdf Section 7.6.3.1 Table 297
1062 */
1063 vpd->association = (page_83[1] & 0x30);
1064 return transport_dump_vpd_assoc(vpd, NULL, 0);
1065}
1066EXPORT_SYMBOL(transport_set_vpd_assoc);
1067
1068int transport_dump_vpd_ident_type(
1069 struct t10_vpd *vpd,
1070 unsigned char *p_buf,
1071 int p_buf_len)
1072{
1073 unsigned char buf[VPD_TMP_BUF_SIZE];
e3d6f909
AG
1074 int ret = 0;
1075 int len;
c66ac9db
NB
1076
1077 memset(buf, 0, VPD_TMP_BUF_SIZE);
1078 len = sprintf(buf, "T10 VPD Identifier Type: ");
1079
1080 switch (vpd->device_identifier_type) {
1081 case 0x00:
1082 sprintf(buf+len, "Vendor specific\n");
1083 break;
1084 case 0x01:
1085 sprintf(buf+len, "T10 Vendor ID based\n");
1086 break;
1087 case 0x02:
1088 sprintf(buf+len, "EUI-64 based\n");
1089 break;
1090 case 0x03:
1091 sprintf(buf+len, "NAA\n");
1092 break;
1093 case 0x04:
1094 sprintf(buf+len, "Relative target port identifier\n");
1095 break;
1096 case 0x08:
1097 sprintf(buf+len, "SCSI name string\n");
1098 break;
1099 default:
1100 sprintf(buf+len, "Unsupported: 0x%02x\n",
1101 vpd->device_identifier_type);
e3d6f909 1102 ret = -EINVAL;
c66ac9db
NB
1103 break;
1104 }
1105
e3d6f909
AG
1106 if (p_buf) {
1107 if (p_buf_len < strlen(buf)+1)
1108 return -EINVAL;
c66ac9db 1109 strncpy(p_buf, buf, p_buf_len);
e3d6f909 1110 } else {
6708bb27 1111 pr_debug("%s", buf);
e3d6f909 1112 }
c66ac9db
NB
1113
1114 return ret;
1115}
1116
1117int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83)
1118{
1119 /*
1120 * The VPD identifier type..
1121 *
1122 * from spc3r23.pdf Section 7.6.3.1 Table 298
1123 */
1124 vpd->device_identifier_type = (page_83[1] & 0x0f);
1125 return transport_dump_vpd_ident_type(vpd, NULL, 0);
1126}
1127EXPORT_SYMBOL(transport_set_vpd_ident_type);
1128
1129int transport_dump_vpd_ident(
1130 struct t10_vpd *vpd,
1131 unsigned char *p_buf,
1132 int p_buf_len)
1133{
1134 unsigned char buf[VPD_TMP_BUF_SIZE];
1135 int ret = 0;
1136
1137 memset(buf, 0, VPD_TMP_BUF_SIZE);
1138
1139 switch (vpd->device_identifier_code_set) {
1140 case 0x01: /* Binary */
1141 sprintf(buf, "T10 VPD Binary Device Identifier: %s\n",
1142 &vpd->device_identifier[0]);
1143 break;
1144 case 0x02: /* ASCII */
1145 sprintf(buf, "T10 VPD ASCII Device Identifier: %s\n",
1146 &vpd->device_identifier[0]);
1147 break;
1148 case 0x03: /* UTF-8 */
1149 sprintf(buf, "T10 VPD UTF-8 Device Identifier: %s\n",
1150 &vpd->device_identifier[0]);
1151 break;
1152 default:
1153 sprintf(buf, "T10 VPD Device Identifier encoding unsupported:"
1154 " 0x%02x", vpd->device_identifier_code_set);
e3d6f909 1155 ret = -EINVAL;
c66ac9db
NB
1156 break;
1157 }
1158
1159 if (p_buf)
1160 strncpy(p_buf, buf, p_buf_len);
1161 else
6708bb27 1162 pr_debug("%s", buf);
c66ac9db
NB
1163
1164 return ret;
1165}
1166
1167int
1168transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83)
1169{
1170 static const char hex_str[] = "0123456789abcdef";
1171 int j = 0, i = 4; /* offset to start of the identifer */
1172
1173 /*
1174 * The VPD Code Set (encoding)
1175 *
1176 * from spc3r23.pdf Section 7.6.3.1 Table 296
1177 */
1178 vpd->device_identifier_code_set = (page_83[0] & 0x0f);
1179 switch (vpd->device_identifier_code_set) {
1180 case 0x01: /* Binary */
1181 vpd->device_identifier[j++] =
1182 hex_str[vpd->device_identifier_type];
1183 while (i < (4 + page_83[3])) {
1184 vpd->device_identifier[j++] =
1185 hex_str[(page_83[i] & 0xf0) >> 4];
1186 vpd->device_identifier[j++] =
1187 hex_str[page_83[i] & 0x0f];
1188 i++;
1189 }
1190 break;
1191 case 0x02: /* ASCII */
1192 case 0x03: /* UTF-8 */
1193 while (i < (4 + page_83[3]))
1194 vpd->device_identifier[j++] = page_83[i++];
1195 break;
1196 default:
1197 break;
1198 }
1199
1200 return transport_dump_vpd_ident(vpd, NULL, 0);
1201}
1202EXPORT_SYMBOL(transport_set_vpd_ident);
1203
1204static void core_setup_task_attr_emulation(struct se_device *dev)
1205{
1206 /*
1207 * If this device is from Target_Core_Mod/pSCSI, disable the
1208 * SAM Task Attribute emulation.
1209 *
1210 * This is currently not available in upsream Linux/SCSI Target
1211 * mode code, and is assumed to be disabled while using TCM/pSCSI.
1212 */
e3d6f909 1213 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
c66ac9db
NB
1214 dev->dev_task_attr_type = SAM_TASK_ATTR_PASSTHROUGH;
1215 return;
1216 }
1217
1218 dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED;
6708bb27 1219 pr_debug("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x"
e3d6f909
AG
1220 " device\n", dev->transport->name,
1221 dev->transport->get_device_rev(dev));
c66ac9db
NB
1222}
1223
1224static void scsi_dump_inquiry(struct se_device *dev)
1225{
e3d6f909 1226 struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn;
e59a41b6 1227 char buf[17];
c66ac9db
NB
1228 int i, device_type;
1229 /*
1230 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
1231 */
c66ac9db
NB
1232 for (i = 0; i < 8; i++)
1233 if (wwn->vendor[i] >= 0x20)
e59a41b6 1234 buf[i] = wwn->vendor[i];
c66ac9db 1235 else
e59a41b6
SAS
1236 buf[i] = ' ';
1237 buf[i] = '\0';
1238 pr_debug(" Vendor: %s\n", buf);
c66ac9db 1239
c66ac9db
NB
1240 for (i = 0; i < 16; i++)
1241 if (wwn->model[i] >= 0x20)
e59a41b6 1242 buf[i] = wwn->model[i];
c66ac9db 1243 else
e59a41b6
SAS
1244 buf[i] = ' ';
1245 buf[i] = '\0';
1246 pr_debug(" Model: %s\n", buf);
c66ac9db 1247
c66ac9db
NB
1248 for (i = 0; i < 4; i++)
1249 if (wwn->revision[i] >= 0x20)
e59a41b6 1250 buf[i] = wwn->revision[i];
c66ac9db 1251 else
e59a41b6
SAS
1252 buf[i] = ' ';
1253 buf[i] = '\0';
1254 pr_debug(" Revision: %s\n", buf);
c66ac9db 1255
e3d6f909 1256 device_type = dev->transport->get_device_type(dev);
6708bb27
AG
1257 pr_debug(" Type: %s ", scsi_device_type(device_type));
1258 pr_debug(" ANSI SCSI revision: %02x\n",
e3d6f909 1259 dev->transport->get_device_rev(dev));
c66ac9db
NB
1260}
1261
1262struct se_device *transport_add_device_to_core_hba(
1263 struct se_hba *hba,
1264 struct se_subsystem_api *transport,
1265 struct se_subsystem_dev *se_dev,
1266 u32 device_flags,
1267 void *transport_dev,
1268 struct se_dev_limits *dev_limits,
1269 const char *inquiry_prod,
1270 const char *inquiry_rev)
1271{
12a18bdc 1272 int force_pt;
c66ac9db
NB
1273 struct se_device *dev;
1274
1275 dev = kzalloc(sizeof(struct se_device), GFP_KERNEL);
6708bb27
AG
1276 if (!dev) {
1277 pr_err("Unable to allocate memory for se_dev_t\n");
c66ac9db
NB
1278 return NULL;
1279 }
c66ac9db 1280
e3d6f909 1281 transport_init_queue_obj(&dev->dev_queue_obj);
c66ac9db
NB
1282 dev->dev_flags = device_flags;
1283 dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED;
5951146d 1284 dev->dev_ptr = transport_dev;
c66ac9db
NB
1285 dev->se_hba = hba;
1286 dev->se_sub_dev = se_dev;
1287 dev->transport = transport;
c66ac9db
NB
1288 INIT_LIST_HEAD(&dev->dev_list);
1289 INIT_LIST_HEAD(&dev->dev_sep_list);
1290 INIT_LIST_HEAD(&dev->dev_tmr_list);
1291 INIT_LIST_HEAD(&dev->execute_task_list);
1292 INIT_LIST_HEAD(&dev->delayed_cmd_list);
c66ac9db 1293 INIT_LIST_HEAD(&dev->state_task_list);
07bde79a 1294 INIT_LIST_HEAD(&dev->qf_cmd_list);
c66ac9db
NB
1295 spin_lock_init(&dev->execute_task_lock);
1296 spin_lock_init(&dev->delayed_cmd_lock);
c66ac9db
NB
1297 spin_lock_init(&dev->dev_reservation_lock);
1298 spin_lock_init(&dev->dev_status_lock);
c66ac9db
NB
1299 spin_lock_init(&dev->se_port_lock);
1300 spin_lock_init(&dev->se_tmr_lock);
07bde79a 1301 spin_lock_init(&dev->qf_cmd_lock);
c66ac9db
NB
1302 atomic_set(&dev->dev_ordered_id, 0);
1303
1304 se_dev_set_default_attribs(dev, dev_limits);
1305
1306 dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
1307 dev->creation_time = get_jiffies_64();
1308 spin_lock_init(&dev->stats_lock);
1309
1310 spin_lock(&hba->device_lock);
1311 list_add_tail(&dev->dev_list, &hba->hba_dev_list);
1312 hba->dev_count++;
1313 spin_unlock(&hba->device_lock);
1314 /*
1315 * Setup the SAM Task Attribute emulation for struct se_device
1316 */
1317 core_setup_task_attr_emulation(dev);
1318 /*
1319 * Force PR and ALUA passthrough emulation with internal object use.
1320 */
1321 force_pt = (hba->hba_flags & HBA_FLAGS_INTERNAL_USE);
1322 /*
1323 * Setup the Reservations infrastructure for struct se_device
1324 */
1325 core_setup_reservations(dev, force_pt);
1326 /*
1327 * Setup the Asymmetric Logical Unit Assignment for struct se_device
1328 */
1329 if (core_setup_alua(dev, force_pt) < 0)
1330 goto out;
1331
1332 /*
1333 * Startup the struct se_device processing thread
1334 */
1335 dev->process_thread = kthread_run(transport_processing_thread, dev,
e3d6f909 1336 "LIO_%s", dev->transport->name);
c66ac9db 1337 if (IS_ERR(dev->process_thread)) {
6708bb27 1338 pr_err("Unable to create kthread: LIO_%s\n",
e3d6f909 1339 dev->transport->name);
c66ac9db
NB
1340 goto out;
1341 }
07bde79a
NB
1342 /*
1343 * Setup work_queue for QUEUE_FULL
1344 */
1345 INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
c66ac9db
NB
1346 /*
1347 * Preload the initial INQUIRY const values if we are doing
1348 * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
1349 * passthrough because this is being provided by the backend LLD.
1350 * This is required so that transport_get_inquiry() copies these
1351 * originals once back into DEV_T10_WWN(dev) for the virtual device
1352 * setup.
1353 */
e3d6f909 1354 if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
f22c1196 1355 if (!inquiry_prod || !inquiry_rev) {
6708bb27 1356 pr_err("All non TCM/pSCSI plugins require"
c66ac9db
NB
1357 " INQUIRY consts\n");
1358 goto out;
1359 }
1360
e3d6f909
AG
1361 strncpy(&dev->se_sub_dev->t10_wwn.vendor[0], "LIO-ORG", 8);
1362 strncpy(&dev->se_sub_dev->t10_wwn.model[0], inquiry_prod, 16);
1363 strncpy(&dev->se_sub_dev->t10_wwn.revision[0], inquiry_rev, 4);
c66ac9db
NB
1364 }
1365 scsi_dump_inquiry(dev);
1366
12a18bdc 1367 return dev;
c66ac9db 1368out:
c66ac9db
NB
1369 kthread_stop(dev->process_thread);
1370
1371 spin_lock(&hba->device_lock);
1372 list_del(&dev->dev_list);
1373 hba->dev_count--;
1374 spin_unlock(&hba->device_lock);
1375
1376 se_release_vpd_for_dev(dev);
1377
c66ac9db
NB
1378 kfree(dev);
1379
1380 return NULL;
1381}
1382EXPORT_SYMBOL(transport_add_device_to_core_hba);
1383
1384/* transport_generic_prepare_cdb():
1385 *
1386 * Since the Initiator sees iSCSI devices as LUNs, the SCSI CDB will
1387 * contain the iSCSI LUN in bits 7-5 of byte 1 as per SAM-2.
1388 * The point of this is since we are mapping iSCSI LUNs to
1389 * SCSI Target IDs having a non-zero LUN in the CDB will throw the
1390 * devices and HBAs for a loop.
1391 */
1392static inline void transport_generic_prepare_cdb(
1393 unsigned char *cdb)
1394{
1395 switch (cdb[0]) {
1396 case READ_10: /* SBC - RDProtect */
1397 case READ_12: /* SBC - RDProtect */
1398 case READ_16: /* SBC - RDProtect */
1399 case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */
1400 case VERIFY: /* SBC - VRProtect */
1401 case VERIFY_16: /* SBC - VRProtect */
1402 case WRITE_VERIFY: /* SBC - VRProtect */
1403 case WRITE_VERIFY_12: /* SBC - VRProtect */
1404 break;
1405 default:
1406 cdb[1] &= 0x1f; /* clear logical unit number */
1407 break;
1408 }
1409}
1410
1411static struct se_task *
1412transport_generic_get_task(struct se_cmd *cmd,
1413 enum dma_data_direction data_direction)
1414{
1415 struct se_task *task;
5951146d 1416 struct se_device *dev = cmd->se_dev;
c66ac9db 1417
6708bb27 1418 task = dev->transport->alloc_task(cmd->t_task_cdb);
c66ac9db 1419 if (!task) {
6708bb27 1420 pr_err("Unable to allocate struct se_task\n");
c66ac9db
NB
1421 return NULL;
1422 }
1423
1424 INIT_LIST_HEAD(&task->t_list);
1425 INIT_LIST_HEAD(&task->t_execute_list);
1426 INIT_LIST_HEAD(&task->t_state_list);
1427 init_completion(&task->task_stop_comp);
c66ac9db 1428 task->task_se_cmd = cmd;
c66ac9db
NB
1429 task->task_data_direction = data_direction;
1430
c66ac9db
NB
1431 return task;
1432}
1433
1434static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *);
1435
c66ac9db
NB
1436/*
1437 * Used by fabric modules containing a local struct se_cmd within their
1438 * fabric dependent per I/O descriptor.
1439 */
1440void transport_init_se_cmd(
1441 struct se_cmd *cmd,
1442 struct target_core_fabric_ops *tfo,
1443 struct se_session *se_sess,
1444 u32 data_length,
1445 int data_direction,
1446 int task_attr,
1447 unsigned char *sense_buffer)
1448{
5951146d
AG
1449 INIT_LIST_HEAD(&cmd->se_lun_node);
1450 INIT_LIST_HEAD(&cmd->se_delayed_node);
07bde79a 1451 INIT_LIST_HEAD(&cmd->se_qf_node);
79a7fef2 1452 INIT_LIST_HEAD(&cmd->se_queue_node);
a17f091d 1453 INIT_LIST_HEAD(&cmd->se_cmd_list);
a1d8b49a
AG
1454 INIT_LIST_HEAD(&cmd->t_task_list);
1455 init_completion(&cmd->transport_lun_fe_stop_comp);
1456 init_completion(&cmd->transport_lun_stop_comp);
1457 init_completion(&cmd->t_transport_stop_comp);
a17f091d 1458 init_completion(&cmd->cmd_wait_comp);
a1d8b49a 1459 spin_lock_init(&cmd->t_state_lock);
7d680f3b 1460 cmd->transport_state = CMD_T_DEV_ACTIVE;
c66ac9db
NB
1461
1462 cmd->se_tfo = tfo;
1463 cmd->se_sess = se_sess;
1464 cmd->data_length = data_length;
1465 cmd->data_direction = data_direction;
1466 cmd->sam_task_attr = task_attr;
1467 cmd->sense_buffer = sense_buffer;
1468}
1469EXPORT_SYMBOL(transport_init_se_cmd);
1470
1471static int transport_check_alloc_task_attr(struct se_cmd *cmd)
1472{
1473 /*
1474 * Check if SAM Task Attribute emulation is enabled for this
1475 * struct se_device storage object
1476 */
5951146d 1477 if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
c66ac9db
NB
1478 return 0;
1479
e66ecd50 1480 if (cmd->sam_task_attr == MSG_ACA_TAG) {
6708bb27 1481 pr_debug("SAM Task Attribute ACA"
c66ac9db 1482 " emulation is not supported\n");
e3d6f909 1483 return -EINVAL;
c66ac9db
NB
1484 }
1485 /*
1486 * Used to determine when ORDERED commands should go from
1487 * Dormant to Active status.
1488 */
5951146d 1489 cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
c66ac9db 1490 smp_mb__after_atomic_inc();
6708bb27 1491 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
c66ac9db 1492 cmd->se_ordered_id, cmd->sam_task_attr,
6708bb27 1493 cmd->se_dev->transport->name);
c66ac9db
NB
1494 return 0;
1495}
1496
c66ac9db
NB
1497/* transport_generic_allocate_tasks():
1498 *
1499 * Called from fabric RX Thread.
1500 */
1501int transport_generic_allocate_tasks(
1502 struct se_cmd *cmd,
1503 unsigned char *cdb)
1504{
1505 int ret;
1506
1507 transport_generic_prepare_cdb(cdb);
c66ac9db
NB
1508 /*
1509 * Ensure that the received CDB is less than the max (252 + 8) bytes
1510 * for VARIABLE_LENGTH_CMD
1511 */
1512 if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) {
6708bb27 1513 pr_err("Received SCSI CDB with command_size: %d that"
c66ac9db
NB
1514 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1515 scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
03e98c9e
NB
1516 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1517 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
e3d6f909 1518 return -EINVAL;
c66ac9db
NB
1519 }
1520 /*
1521 * If the received CDB is larger than TCM_MAX_COMMAND_SIZE,
1522 * allocate the additional extended CDB buffer now.. Otherwise
1523 * setup the pointer from __t_task_cdb to t_task_cdb.
1524 */
a1d8b49a
AG
1525 if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) {
1526 cmd->t_task_cdb = kzalloc(scsi_command_size(cdb),
c66ac9db 1527 GFP_KERNEL);
6708bb27
AG
1528 if (!cmd->t_task_cdb) {
1529 pr_err("Unable to allocate cmd->t_task_cdb"
a1d8b49a 1530 " %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
c66ac9db 1531 scsi_command_size(cdb),
a1d8b49a 1532 (unsigned long)sizeof(cmd->__t_task_cdb));
03e98c9e
NB
1533 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1534 cmd->scsi_sense_reason =
1535 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
e3d6f909 1536 return -ENOMEM;
c66ac9db
NB
1537 }
1538 } else
a1d8b49a 1539 cmd->t_task_cdb = &cmd->__t_task_cdb[0];
c66ac9db 1540 /*
a1d8b49a 1541 * Copy the original CDB into cmd->
c66ac9db 1542 */
a1d8b49a 1543 memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb));
c66ac9db
NB
1544 /*
1545 * Setup the received CDB based on SCSI defined opcodes and
1546 * perform unit attention, persistent reservations and ALUA
a1d8b49a 1547 * checks for virtual device backends. The cmd->t_task_cdb
c66ac9db
NB
1548 * pointer is expected to be setup before we reach this point.
1549 */
1550 ret = transport_generic_cmd_sequencer(cmd, cdb);
1551 if (ret < 0)
1552 return ret;
1553 /*
1554 * Check for SAM Task Attribute Emulation
1555 */
1556 if (transport_check_alloc_task_attr(cmd) < 0) {
1557 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1558 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
5951146d 1559 return -EINVAL;
c66ac9db
NB
1560 }
1561 spin_lock(&cmd->se_lun->lun_sep_lock);
1562 if (cmd->se_lun->lun_sep)
1563 cmd->se_lun->lun_sep->sep_stats.cmd_pdus++;
1564 spin_unlock(&cmd->se_lun->lun_sep_lock);
1565 return 0;
1566}
1567EXPORT_SYMBOL(transport_generic_allocate_tasks);
1568
695434e1
NB
1569/*
1570 * Used by fabric module frontends to queue tasks directly.
1571 * Many only be used from process context only
1572 */
1573int transport_handle_cdb_direct(
1574 struct se_cmd *cmd)
1575{
dd8ae59d
NB
1576 int ret;
1577
695434e1
NB
1578 if (!cmd->se_lun) {
1579 dump_stack();
6708bb27 1580 pr_err("cmd->se_lun is NULL\n");
695434e1
NB
1581 return -EINVAL;
1582 }
1583 if (in_interrupt()) {
1584 dump_stack();
6708bb27 1585 pr_err("transport_generic_handle_cdb cannot be called"
695434e1
NB
1586 " from interrupt context\n");
1587 return -EINVAL;
1588 }
dd8ae59d 1589 /*
7d680f3b 1590 * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE following
dd8ae59d
NB
1591 * transport_generic_handle_cdb*() -> transport_add_cmd_to_queue()
1592 * in existing usage to ensure that outstanding descriptors are handled
d14921d6 1593 * correctly during shutdown via transport_wait_for_tasks()
dd8ae59d
NB
1594 *
1595 * Also, we don't take cmd->t_state_lock here as we only expect
1596 * this to be called for initial descriptor submission.
1597 */
1598 cmd->t_state = TRANSPORT_NEW_CMD;
7d680f3b
CH
1599 cmd->transport_state |= CMD_T_ACTIVE;
1600
dd8ae59d
NB
1601 /*
1602 * transport_generic_new_cmd() is already handling QUEUE_FULL,
1603 * so follow TRANSPORT_NEW_CMD processing thread context usage
1604 * and call transport_generic_request_failure() if necessary..
1605 */
1606 ret = transport_generic_new_cmd(cmd);
03e98c9e
NB
1607 if (ret < 0)
1608 transport_generic_request_failure(cmd);
1609
dd8ae59d 1610 return 0;
695434e1
NB
1611}
1612EXPORT_SYMBOL(transport_handle_cdb_direct);
1613
a6360785
NB
1614/**
1615 * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd
1616 *
1617 * @se_cmd: command descriptor to submit
1618 * @se_sess: associated se_sess for endpoint
1619 * @cdb: pointer to SCSI CDB
1620 * @sense: pointer to SCSI sense buffer
1621 * @unpacked_lun: unpacked LUN to reference for struct se_lun
1622 * @data_length: fabric expected data transfer length
1623 * @task_addr: SAM task attribute
1624 * @data_dir: DMA data direction
1625 * @flags: flags for command submission from target_sc_flags_tables
1626 *
1627 * This may only be called from process context, and also currently
1628 * assumes internal allocation of fabric payload buffer by target-core.
1629 **/
1edcdb49 1630void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
a6360785
NB
1631 unsigned char *cdb, unsigned char *sense, u32 unpacked_lun,
1632 u32 data_length, int task_attr, int data_dir, int flags)
1633{
1634 struct se_portal_group *se_tpg;
1635 int rc;
1636
1637 se_tpg = se_sess->se_tpg;
1638 BUG_ON(!se_tpg);
1639 BUG_ON(se_cmd->se_tfo || se_cmd->se_sess);
1640 BUG_ON(in_interrupt());
1641 /*
1642 * Initialize se_cmd for target operation. From this point
1643 * exceptions are handled by sending exception status via
1644 * target_core_fabric_ops->queue_status() callback
1645 */
1646 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
1647 data_length, data_dir, task_attr, sense);
1648 /*
1649 * Obtain struct se_cmd->cmd_kref reference and add new cmd to
1650 * se_sess->sess_cmd_list. A second kref_get here is necessary
1651 * for fabrics using TARGET_SCF_ACK_KREF that expect a second
1652 * kref_put() to happen during fabric packet acknowledgement.
1653 */
1654 target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF));
1655 /*
1656 * Signal bidirectional data payloads to target-core
1657 */
1658 if (flags & TARGET_SCF_BIDI_OP)
1659 se_cmd->se_cmd_flags |= SCF_BIDI;
1660 /*
1661 * Locate se_lun pointer and attach it to struct se_cmd
1662 */
735703ca
NB
1663 if (transport_lookup_cmd_lun(se_cmd, unpacked_lun) < 0) {
1664 transport_send_check_condition_and_sense(se_cmd,
1665 se_cmd->scsi_sense_reason, 0);
1666 target_put_sess_cmd(se_sess, se_cmd);
1667 return;
1668 }
a6360785
NB
1669 /*
1670 * Sanitize CDBs via transport_generic_cmd_sequencer() and
1671 * allocate the necessary tasks to complete the received CDB+data
1672 */
1673 rc = transport_generic_allocate_tasks(se_cmd, cdb);
735703ca
NB
1674 if (rc != 0) {
1675 transport_generic_request_failure(se_cmd);
1676 return;
1677 }
a6360785
NB
1678 /*
1679 * Dispatch se_cmd descriptor to se_lun->lun_se_dev backend
1680 * for immediate execution of READs, otherwise wait for
1681 * transport_generic_handle_data() to be called for WRITEs
1682 * when fabric has filled the incoming buffer.
1683 */
1684 transport_handle_cdb_direct(se_cmd);
1edcdb49 1685 return;
a6360785
NB
1686}
1687EXPORT_SYMBOL(target_submit_cmd);
1688
ea98d7f9
AG
1689/**
1690 * target_submit_tmr - lookup unpacked lun and submit uninitialized se_cmd
1691 * for TMR CDBs
1692 *
1693 * @se_cmd: command descriptor to submit
1694 * @se_sess: associated se_sess for endpoint
1695 * @sense: pointer to SCSI sense buffer
1696 * @unpacked_lun: unpacked LUN to reference for struct se_lun
1697 * @fabric_context: fabric context for TMR req
1698 * @tm_type: Type of TM request
1699 *
1700 * Callable from all contexts.
1701 **/
1702
1703void target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
1704 unsigned char *sense, u32 unpacked_lun,
1705 void *fabric_tmr_ptr, unsigned char tm_type, int flags)
1706{
1707 struct se_portal_group *se_tpg;
1708 int ret;
1709
1710 se_tpg = se_sess->se_tpg;
1711 BUG_ON(!se_tpg);
1712
1713 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
1714 0, DMA_NONE, MSG_SIMPLE_TAG, sense);
1715
1716 /* See target_submit_cmd for commentary */
1717 target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF));
1718
1719 ret = core_tmr_alloc_req(se_cmd, fabric_tmr_ptr, tm_type, GFP_KERNEL);
1720 if (ret < 0) {
1721 dump_stack();
1722 /* FIXME XXX */
1723 return;
1724 }
1725
1726 ret = transport_lookup_tmr_lun(se_cmd, unpacked_lun);
1727 if (ret) {
1728 transport_send_check_condition_and_sense(se_cmd,
1729 se_cmd->scsi_sense_reason, 0);
1730 transport_generic_free_cmd(se_cmd, 0);
1731 return;
1732 }
1733 transport_generic_handle_tmr(se_cmd);
1734}
1735EXPORT_SYMBOL(target_submit_tmr);
1736
c66ac9db
NB
1737/*
1738 * Used by fabric module frontends defining a TFO->new_cmd_map() caller
1739 * to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to
1740 * complete setup in TCM process context w/ TFO->new_cmd_map().
1741 */
1742int transport_generic_handle_cdb_map(
1743 struct se_cmd *cmd)
1744{
e3d6f909 1745 if (!cmd->se_lun) {
c66ac9db 1746 dump_stack();
6708bb27 1747 pr_err("cmd->se_lun is NULL\n");
e3d6f909 1748 return -EINVAL;
c66ac9db
NB
1749 }
1750
f7a5cc0b 1751 transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP, false);
c66ac9db
NB
1752 return 0;
1753}
1754EXPORT_SYMBOL(transport_generic_handle_cdb_map);
1755
1756/* transport_generic_handle_data():
1757 *
1758 *
1759 */
1760int transport_generic_handle_data(
1761 struct se_cmd *cmd)
1762{
1763 /*
1764 * For the software fabric case, then we assume the nexus is being
1765 * failed/shutdown when signals are pending from the kthread context
1766 * caller, so we return a failure. For the HW target mode case running
1767 * in interrupt code, the signal_pending() check is skipped.
1768 */
1769 if (!in_interrupt() && signal_pending(current))
e3d6f909 1770 return -EPERM;
c66ac9db
NB
1771 /*
1772 * If the received CDB has aleady been ABORTED by the generic
1773 * target engine, we now call transport_check_aborted_status()
1774 * to queue any delated TASK_ABORTED status for the received CDB to the
25985edc 1775 * fabric module as we are expecting no further incoming DATA OUT
c66ac9db
NB
1776 * sequences at this point.
1777 */
1778 if (transport_check_aborted_status(cmd, 1) != 0)
1779 return 0;
1780
f7a5cc0b 1781 transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_WRITE, false);
c66ac9db
NB
1782 return 0;
1783}
1784EXPORT_SYMBOL(transport_generic_handle_data);
1785
1786/* transport_generic_handle_tmr():
1787 *
1788 *
1789 */
1790int transport_generic_handle_tmr(
1791 struct se_cmd *cmd)
1792{
f7a5cc0b 1793 transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR, false);
c66ac9db
NB
1794 return 0;
1795}
1796EXPORT_SYMBOL(transport_generic_handle_tmr);
1797
cdbb70bb
CH
1798/*
1799 * If the task is active, request it to be stopped and sleep until it
1800 * has completed.
1801 */
1802bool target_stop_task(struct se_task *task, unsigned long *flags)
1803{
1804 struct se_cmd *cmd = task->task_se_cmd;
1805 bool was_active = false;
1806
1807 if (task->task_flags & TF_ACTIVE) {
1808 task->task_flags |= TF_REQUEST_STOP;
1809 spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
1810
1811 pr_debug("Task %p waiting to complete\n", task);
1812 wait_for_completion(&task->task_stop_comp);
1813 pr_debug("Task %p stopped successfully\n", task);
1814
1815 spin_lock_irqsave(&cmd->t_state_lock, *flags);
1816 atomic_dec(&cmd->t_task_cdbs_left);
1817 task->task_flags &= ~(TF_ACTIVE | TF_REQUEST_STOP);
1818 was_active = true;
1819 }
1820
cdbb70bb
CH
1821 return was_active;
1822}
1823
c66ac9db
NB
1824static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
1825{
1826 struct se_task *task, *task_tmp;
1827 unsigned long flags;
1828 int ret = 0;
1829
6708bb27 1830 pr_debug("ITT[0x%08x] - Stopping tasks\n",
e3d6f909 1831 cmd->se_tfo->get_task_tag(cmd));
c66ac9db
NB
1832
1833 /*
1834 * No tasks remain in the execution queue
1835 */
a1d8b49a 1836 spin_lock_irqsave(&cmd->t_state_lock, flags);
c66ac9db 1837 list_for_each_entry_safe(task, task_tmp,
a1d8b49a 1838 &cmd->t_task_list, t_list) {
04629b7b 1839 pr_debug("Processing task %p\n", task);
c66ac9db
NB
1840 /*
1841 * If the struct se_task has not been sent and is not active,
1842 * remove the struct se_task from the execution queue.
1843 */
6c76bf95 1844 if (!(task->task_flags & (TF_ACTIVE | TF_SENT))) {
a1d8b49a 1845 spin_unlock_irqrestore(&cmd->t_state_lock,
c66ac9db
NB
1846 flags);
1847 transport_remove_task_from_execute_queue(task,
42bf829e 1848 cmd->se_dev);
c66ac9db 1849
04629b7b 1850 pr_debug("Task %p removed from execute queue\n", task);
a1d8b49a 1851 spin_lock_irqsave(&cmd->t_state_lock, flags);
c66ac9db
NB
1852 continue;
1853 }
1854
cdbb70bb 1855 if (!target_stop_task(task, &flags)) {
04629b7b 1856 pr_debug("Task %p - did nothing\n", task);
c66ac9db
NB
1857 ret++;
1858 }
c66ac9db 1859 }
a1d8b49a 1860 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
1861
1862 return ret;
1863}
1864
c66ac9db
NB
1865/*
1866 * Handle SAM-esque emulation for generic transport request failures.
1867 */
2fbff127 1868void transport_generic_request_failure(struct se_cmd *cmd)
c66ac9db 1869{
07bde79a
NB
1870 int ret = 0;
1871
6708bb27 1872 pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
e3d6f909 1873 " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),
a1d8b49a 1874 cmd->t_task_cdb[0]);
03e98c9e 1875 pr_debug("-----[ i_state: %d t_state: %d scsi_sense_reason: %d\n",
e3d6f909 1876 cmd->se_tfo->get_cmd_state(cmd),
03e98c9e 1877 cmd->t_state, cmd->scsi_sense_reason);
6708bb27 1878 pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d"
c66ac9db 1879 " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --"
7d680f3b
CH
1880 " CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n",
1881 cmd->t_task_list_num,
a1d8b49a
AG
1882 atomic_read(&cmd->t_task_cdbs_left),
1883 atomic_read(&cmd->t_task_cdbs_sent),
1884 atomic_read(&cmd->t_task_cdbs_ex_left),
7d680f3b
CH
1885 (cmd->transport_state & CMD_T_ACTIVE) != 0,
1886 (cmd->transport_state & CMD_T_STOP) != 0,
1887 (cmd->transport_state & CMD_T_SENT) != 0);
c66ac9db 1888
c66ac9db
NB
1889 /*
1890 * For SAM Task Attribute emulation for failed struct se_cmd
1891 */
1892 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
1893 transport_complete_task_attr(cmd);
1894
03e98c9e
NB
1895 switch (cmd->scsi_sense_reason) {
1896 case TCM_NON_EXISTENT_LUN:
1897 case TCM_UNSUPPORTED_SCSI_OPCODE:
1898 case TCM_INVALID_CDB_FIELD:
1899 case TCM_INVALID_PARAMETER_LIST:
1900 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
1901 case TCM_UNKNOWN_MODE_PAGE:
1902 case TCM_WRITE_PROTECTED:
1903 case TCM_CHECK_CONDITION_ABORT_CMD:
1904 case TCM_CHECK_CONDITION_UNIT_ATTENTION:
1905 case TCM_CHECK_CONDITION_NOT_READY:
c66ac9db 1906 break;
03e98c9e 1907 case TCM_RESERVATION_CONFLICT:
c66ac9db
NB
1908 /*
1909 * No SENSE Data payload for this case, set SCSI Status
1910 * and queue the response to $FABRIC_MOD.
1911 *
1912 * Uses linux/include/scsi/scsi.h SAM status codes defs
1913 */
1914 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
1915 /*
1916 * For UA Interlock Code 11b, a RESERVATION CONFLICT will
1917 * establish a UNIT ATTENTION with PREVIOUS RESERVATION
1918 * CONFLICT STATUS.
1919 *
1920 * See spc4r17, section 7.4.6 Control Mode Page, Table 349
1921 */
e3d6f909
AG
1922 if (cmd->se_sess &&
1923 cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2)
1924 core_scsi3_ua_allocate(cmd->se_sess->se_node_acl,
c66ac9db
NB
1925 cmd->orig_fe_lun, 0x2C,
1926 ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
1927
07bde79a 1928 ret = cmd->se_tfo->queue_status(cmd);
f147abb4 1929 if (ret == -EAGAIN || ret == -ENOMEM)
07bde79a 1930 goto queue_full;
c66ac9db 1931 goto check_stop;
c66ac9db 1932 default:
6708bb27 1933 pr_err("Unknown transport error for CDB 0x%02x: %d\n",
03e98c9e 1934 cmd->t_task_cdb[0], cmd->scsi_sense_reason);
c66ac9db
NB
1935 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
1936 break;
1937 }
16ab8e60
NB
1938 /*
1939 * If a fabric does not define a cmd->se_tfo->new_cmd_map caller,
1940 * make the call to transport_send_check_condition_and_sense()
1941 * directly. Otherwise expect the fabric to make the call to
1942 * transport_send_check_condition_and_sense() after handling
1943 * possible unsoliticied write data payloads.
1944 */
03e98c9e
NB
1945 ret = transport_send_check_condition_and_sense(cmd,
1946 cmd->scsi_sense_reason, 0);
1947 if (ret == -EAGAIN || ret == -ENOMEM)
1948 goto queue_full;
07bde79a 1949
c66ac9db
NB
1950check_stop:
1951 transport_lun_remove_cmd(cmd);
6708bb27 1952 if (!transport_cmd_check_stop_to_fabric(cmd))
c66ac9db 1953 ;
07bde79a
NB
1954 return;
1955
1956queue_full:
e057f533
CH
1957 cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
1958 transport_handle_queue_full(cmd, cmd->se_dev);
c66ac9db 1959}
2fbff127 1960EXPORT_SYMBOL(transport_generic_request_failure);
c66ac9db 1961
c66ac9db
NB
1962static inline u32 transport_lba_21(unsigned char *cdb)
1963{
1964 return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3];
1965}
1966
1967static inline u32 transport_lba_32(unsigned char *cdb)
1968{
1969 return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
1970}
1971
1972static inline unsigned long long transport_lba_64(unsigned char *cdb)
1973{
1974 unsigned int __v1, __v2;
1975
1976 __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
1977 __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
1978
1979 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
1980}
1981
1982/*
1983 * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs
1984 */
1985static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
1986{
1987 unsigned int __v1, __v2;
1988
1989 __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15];
1990 __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19];
1991
1992 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
1993}
1994
1995static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd)
1996{
1997 unsigned long flags;
1998
a1d8b49a 1999 spin_lock_irqsave(&se_cmd->t_state_lock, flags);
c66ac9db 2000 se_cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
a1d8b49a 2001 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
c66ac9db
NB
2002}
2003
c66ac9db
NB
2004/*
2005 * Called from Fabric Module context from transport_execute_tasks()
2006 *
2007 * The return of this function determins if the tasks from struct se_cmd
2008 * get added to the execution queue in transport_execute_tasks(),
2009 * or are added to the delayed or ordered lists here.
2010 */
2011static inline int transport_execute_task_attr(struct se_cmd *cmd)
2012{
5951146d 2013 if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
c66ac9db
NB
2014 return 1;
2015 /*
25985edc 2016 * Check for the existence of HEAD_OF_QUEUE, and if true return 1
c66ac9db
NB
2017 * to allow the passed struct se_cmd list of tasks to the front of the list.
2018 */
e66ecd50 2019 if (cmd->sam_task_attr == MSG_HEAD_TAG) {
6708bb27 2020 pr_debug("Added HEAD_OF_QUEUE for CDB:"
c66ac9db 2021 " 0x%02x, se_ordered_id: %u\n",
6708bb27 2022 cmd->t_task_cdb[0],
c66ac9db
NB
2023 cmd->se_ordered_id);
2024 return 1;
e66ecd50 2025 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
5951146d 2026 atomic_inc(&cmd->se_dev->dev_ordered_sync);
c66ac9db
NB
2027 smp_mb__after_atomic_inc();
2028
6708bb27 2029 pr_debug("Added ORDERED for CDB: 0x%02x to ordered"
c66ac9db 2030 " list, se_ordered_id: %u\n",
a1d8b49a 2031 cmd->t_task_cdb[0],
c66ac9db
NB
2032 cmd->se_ordered_id);
2033 /*
2034 * Add ORDERED command to tail of execution queue if
2035 * no other older commands exist that need to be
2036 * completed first.
2037 */
6708bb27 2038 if (!atomic_read(&cmd->se_dev->simple_cmds))
c66ac9db
NB
2039 return 1;
2040 } else {
2041 /*
2042 * For SIMPLE and UNTAGGED Task Attribute commands
2043 */
5951146d 2044 atomic_inc(&cmd->se_dev->simple_cmds);
c66ac9db
NB
2045 smp_mb__after_atomic_inc();
2046 }
2047 /*
2048 * Otherwise if one or more outstanding ORDERED task attribute exist,
2049 * add the dormant task(s) built for the passed struct se_cmd to the
2050 * execution queue and become in Active state for this struct se_device.
2051 */
5951146d 2052 if (atomic_read(&cmd->se_dev->dev_ordered_sync) != 0) {
c66ac9db
NB
2053 /*
2054 * Otherwise, add cmd w/ tasks to delayed cmd queue that
25985edc 2055 * will be drained upon completion of HEAD_OF_QUEUE task.
c66ac9db 2056 */
5951146d 2057 spin_lock(&cmd->se_dev->delayed_cmd_lock);
c66ac9db 2058 cmd->se_cmd_flags |= SCF_DELAYED_CMD_FROM_SAM_ATTR;
5951146d
AG
2059 list_add_tail(&cmd->se_delayed_node,
2060 &cmd->se_dev->delayed_cmd_list);
2061 spin_unlock(&cmd->se_dev->delayed_cmd_lock);
c66ac9db 2062
6708bb27 2063 pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to"
c66ac9db 2064 " delayed CMD list, se_ordered_id: %u\n",
a1d8b49a 2065 cmd->t_task_cdb[0], cmd->sam_task_attr,
c66ac9db
NB
2066 cmd->se_ordered_id);
2067 /*
2068 * Return zero to let transport_execute_tasks() know
2069 * not to add the delayed tasks to the execution list.
2070 */
2071 return 0;
2072 }
2073 /*
2074 * Otherwise, no ORDERED task attributes exist..
2075 */
2076 return 1;
2077}
2078
2079/*
2080 * Called from fabric module context in transport_generic_new_cmd() and
2081 * transport_generic_process_write()
2082 */
2083static int transport_execute_tasks(struct se_cmd *cmd)
2084{
2085 int add_tasks;
40be67f4 2086 struct se_device *se_dev = cmd->se_dev;
c66ac9db
NB
2087 /*
2088 * Call transport_cmd_check_stop() to see if a fabric exception
25985edc 2089 * has occurred that prevents execution.
c66ac9db 2090 */
6708bb27 2091 if (!transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING)) {
c66ac9db
NB
2092 /*
2093 * Check for SAM Task Attribute emulation and HEAD_OF_QUEUE
2094 * attribute for the tasks of the received struct se_cmd CDB
2095 */
2096 add_tasks = transport_execute_task_attr(cmd);
e3d6f909 2097 if (!add_tasks)
c66ac9db
NB
2098 goto execute_tasks;
2099 /*
4d2300cc
NB
2100 * __transport_execute_tasks() -> __transport_add_tasks_from_cmd()
2101 * adds associated se_tasks while holding dev->execute_task_lock
2102 * before I/O dispath to avoid a double spinlock access.
c66ac9db 2103 */
4d2300cc
NB
2104 __transport_execute_tasks(se_dev, cmd);
2105 return 0;
c66ac9db 2106 }
4d2300cc 2107
c66ac9db 2108execute_tasks:
4d2300cc 2109 __transport_execute_tasks(se_dev, NULL);
c66ac9db
NB
2110 return 0;
2111}
2112
2113/*
2114 * Called to check struct se_device tcq depth window, and once open pull struct se_task
2115 * from struct se_device->execute_task_list and
2116 *
2117 * Called from transport_processing_thread()
2118 */
4d2300cc 2119static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *new_cmd)
c66ac9db
NB
2120{
2121 int error;
2122 struct se_cmd *cmd = NULL;
e3d6f909 2123 struct se_task *task = NULL;
c66ac9db
NB
2124 unsigned long flags;
2125
c66ac9db 2126check_depth:
e3d6f909 2127 spin_lock_irq(&dev->execute_task_lock);
4d2300cc
NB
2128 if (new_cmd != NULL)
2129 __transport_add_tasks_from_cmd(new_cmd);
2130
e3d6f909
AG
2131 if (list_empty(&dev->execute_task_list)) {
2132 spin_unlock_irq(&dev->execute_task_lock);
c66ac9db
NB
2133 return 0;
2134 }
e3d6f909
AG
2135 task = list_first_entry(&dev->execute_task_list,
2136 struct se_task, t_execute_list);
04629b7b 2137 __transport_remove_task_from_execute_queue(task, dev);
e3d6f909 2138 spin_unlock_irq(&dev->execute_task_lock);
c66ac9db 2139
e3d6f909 2140 cmd = task->task_se_cmd;
a1d8b49a 2141 spin_lock_irqsave(&cmd->t_state_lock, flags);
6c76bf95 2142 task->task_flags |= (TF_ACTIVE | TF_SENT);
a1d8b49a 2143 atomic_inc(&cmd->t_task_cdbs_sent);
c66ac9db 2144
a1d8b49a
AG
2145 if (atomic_read(&cmd->t_task_cdbs_sent) ==
2146 cmd->t_task_list_num)
7d680f3b 2147 cmd->transport_state |= CMD_T_SENT;
c66ac9db 2148
a1d8b49a 2149 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db 2150
5bda90c8
CH
2151 if (cmd->execute_task)
2152 error = cmd->execute_task(task);
2153 else
2154 error = dev->transport->do_task(task);
d29a5b6a 2155 if (error != 0) {
d29a5b6a
CH
2156 spin_lock_irqsave(&cmd->t_state_lock, flags);
2157 task->task_flags &= ~TF_ACTIVE;
7d680f3b 2158 cmd->transport_state &= ~CMD_T_SENT;
d29a5b6a 2159 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
7d680f3b 2160
d29a5b6a 2161 transport_stop_tasks_for_cmd(cmd);
03e98c9e 2162 transport_generic_request_failure(cmd);
c66ac9db
NB
2163 }
2164
4d2300cc 2165 new_cmd = NULL;
c66ac9db
NB
2166 goto check_depth;
2167
2168 return 0;
2169}
2170
c66ac9db
NB
2171static inline u32 transport_get_sectors_6(
2172 unsigned char *cdb,
2173 struct se_cmd *cmd,
2174 int *ret)
2175{
5951146d 2176 struct se_device *dev = cmd->se_dev;
c66ac9db
NB
2177
2178 /*
2179 * Assume TYPE_DISK for non struct se_device objects.
2180 * Use 8-bit sector value.
2181 */
2182 if (!dev)
2183 goto type_disk;
2184
2185 /*
2186 * Use 24-bit allocation length for TYPE_TAPE.
2187 */
e3d6f909 2188 if (dev->transport->get_device_type(dev) == TYPE_TAPE)
c66ac9db
NB
2189 return (u32)(cdb[2] << 16) + (cdb[3] << 8) + cdb[4];
2190
2191 /*
2192 * Everything else assume TYPE_DISK Sector CDB location.
9b5cd7f3
RD
2193 * Use 8-bit sector value. SBC-3 says:
2194 *
2195 * A TRANSFER LENGTH field set to zero specifies that 256
2196 * logical blocks shall be written. Any other value
2197 * specifies the number of logical blocks that shall be
2198 * written.
c66ac9db
NB
2199 */
2200type_disk:
9b5cd7f3 2201 return cdb[4] ? : 256;
c66ac9db
NB
2202}
2203
2204static inline u32 transport_get_sectors_10(
2205 unsigned char *cdb,
2206 struct se_cmd *cmd,
2207 int *ret)
2208{
5951146d 2209 struct se_device *dev = cmd->se_dev;
c66ac9db
NB
2210
2211 /*
2212 * Assume TYPE_DISK for non struct se_device objects.
2213 * Use 16-bit sector value.
2214 */
2215 if (!dev)
2216 goto type_disk;
2217
2218 /*
2219 * XXX_10 is not defined in SSC, throw an exception
2220 */
e3d6f909
AG
2221 if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
2222 *ret = -EINVAL;
c66ac9db
NB
2223 return 0;
2224 }
2225
2226 /*
2227 * Everything else assume TYPE_DISK Sector CDB location.
2228 * Use 16-bit sector value.
2229 */
2230type_disk:
2231 return (u32)(cdb[7] << 8) + cdb[8];
2232}
2233
2234static inline u32 transport_get_sectors_12(
2235 unsigned char *cdb,
2236 struct se_cmd *cmd,
2237 int *ret)
2238{
5951146d 2239 struct se_device *dev = cmd->se_dev;
c66ac9db
NB
2240
2241 /*
2242 * Assume TYPE_DISK for non struct se_device objects.
2243 * Use 32-bit sector value.
2244 */
2245 if (!dev)
2246 goto type_disk;
2247
2248 /*
2249 * XXX_12 is not defined in SSC, throw an exception
2250 */
e3d6f909
AG
2251 if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
2252 *ret = -EINVAL;
c66ac9db
NB
2253 return 0;
2254 }
2255
2256 /*
2257 * Everything else assume TYPE_DISK Sector CDB location.
2258 * Use 32-bit sector value.
2259 */
2260type_disk:
2261 return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9];
2262}
2263
2264static inline u32 transport_get_sectors_16(
2265 unsigned char *cdb,
2266 struct se_cmd *cmd,
2267 int *ret)
2268{
5951146d 2269 struct se_device *dev = cmd->se_dev;
c66ac9db
NB
2270
2271 /*
2272 * Assume TYPE_DISK for non struct se_device objects.
2273 * Use 32-bit sector value.
2274 */
2275 if (!dev)
2276 goto type_disk;
2277
2278 /*
2279 * Use 24-bit allocation length for TYPE_TAPE.
2280 */
e3d6f909 2281 if (dev->transport->get_device_type(dev) == TYPE_TAPE)
c66ac9db
NB
2282 return (u32)(cdb[12] << 16) + (cdb[13] << 8) + cdb[14];
2283
2284type_disk:
2285 return (u32)(cdb[10] << 24) + (cdb[11] << 16) +
2286 (cdb[12] << 8) + cdb[13];
2287}
2288
2289/*
2290 * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants
2291 */
2292static inline u32 transport_get_sectors_32(
2293 unsigned char *cdb,
2294 struct se_cmd *cmd,
2295 int *ret)
2296{
2297 /*
2298 * Assume TYPE_DISK for non struct se_device objects.
2299 * Use 32-bit sector value.
2300 */
2301 return (u32)(cdb[28] << 24) + (cdb[29] << 16) +
2302 (cdb[30] << 8) + cdb[31];
2303
2304}
2305
2306static inline u32 transport_get_size(
2307 u32 sectors,
2308 unsigned char *cdb,
2309 struct se_cmd *cmd)
2310{
5951146d 2311 struct se_device *dev = cmd->se_dev;
c66ac9db 2312
e3d6f909 2313 if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
c66ac9db 2314 if (cdb[1] & 1) { /* sectors */
e3d6f909 2315 return dev->se_sub_dev->se_dev_attrib.block_size * sectors;
c66ac9db
NB
2316 } else /* bytes */
2317 return sectors;
2318 }
2319#if 0
6708bb27 2320 pr_debug("Returning block_size: %u, sectors: %u == %u for"
e3d6f909
AG
2321 " %s object\n", dev->se_sub_dev->se_dev_attrib.block_size, sectors,
2322 dev->se_sub_dev->se_dev_attrib.block_size * sectors,
2323 dev->transport->name);
c66ac9db 2324#endif
e3d6f909 2325 return dev->se_sub_dev->se_dev_attrib.block_size * sectors;
c66ac9db
NB
2326}
2327
c66ac9db
NB
2328static void transport_xor_callback(struct se_cmd *cmd)
2329{
2330 unsigned char *buf, *addr;
ec98f782 2331 struct scatterlist *sg;
c66ac9db
NB
2332 unsigned int offset;
2333 int i;
ec98f782 2334 int count;
c66ac9db
NB
2335 /*
2336 * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
2337 *
2338 * 1) read the specified logical block(s);
2339 * 2) transfer logical blocks from the data-out buffer;
2340 * 3) XOR the logical blocks transferred from the data-out buffer with
2341 * the logical blocks read, storing the resulting XOR data in a buffer;
2342 * 4) if the DISABLE WRITE bit is set to zero, then write the logical
2343 * blocks transferred from the data-out buffer; and
2344 * 5) transfer the resulting XOR data to the data-in buffer.
2345 */
2346 buf = kmalloc(cmd->data_length, GFP_KERNEL);
6708bb27
AG
2347 if (!buf) {
2348 pr_err("Unable to allocate xor_callback buf\n");
c66ac9db
NB
2349 return;
2350 }
2351 /*
ec98f782 2352 * Copy the scatterlist WRITE buffer located at cmd->t_data_sg
c66ac9db
NB
2353 * into the locally allocated *buf
2354 */
ec98f782
AG
2355 sg_copy_to_buffer(cmd->t_data_sg,
2356 cmd->t_data_nents,
2357 buf,
2358 cmd->data_length);
2359
c66ac9db
NB
2360 /*
2361 * Now perform the XOR against the BIDI read memory located at
a1d8b49a 2362 * cmd->t_mem_bidi_list
c66ac9db
NB
2363 */
2364
2365 offset = 0;
ec98f782
AG
2366 for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) {
2367 addr = kmap_atomic(sg_page(sg), KM_USER0);
2368 if (!addr)
c66ac9db
NB
2369 goto out;
2370
ec98f782
AG
2371 for (i = 0; i < sg->length; i++)
2372 *(addr + sg->offset + i) ^= *(buf + offset + i);
c66ac9db 2373
ec98f782 2374 offset += sg->length;
c66ac9db
NB
2375 kunmap_atomic(addr, KM_USER0);
2376 }
ec98f782 2377
c66ac9db
NB
2378out:
2379 kfree(buf);
2380}
2381
2382/*
2383 * Used to obtain Sense Data from underlying Linux/SCSI struct scsi_cmnd
2384 */
2385static int transport_get_sense_data(struct se_cmd *cmd)
2386{
2387 unsigned char *buffer = cmd->sense_buffer, *sense_buffer = NULL;
42bf829e 2388 struct se_device *dev = cmd->se_dev;
c66ac9db
NB
2389 struct se_task *task = NULL, *task_tmp;
2390 unsigned long flags;
2391 u32 offset = 0;
2392
e3d6f909
AG
2393 WARN_ON(!cmd->se_lun);
2394
42bf829e
CH
2395 if (!dev)
2396 return 0;
2397
a1d8b49a 2398 spin_lock_irqsave(&cmd->t_state_lock, flags);
c66ac9db 2399 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
a1d8b49a 2400 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
2401 return 0;
2402 }
2403
2404 list_for_each_entry_safe(task, task_tmp,
a1d8b49a 2405 &cmd->t_task_list, t_list) {
ef804a84 2406 if (!(task->task_flags & TF_HAS_SENSE))
c66ac9db
NB
2407 continue;
2408
e3d6f909 2409 if (!dev->transport->get_sense_buffer) {
6708bb27 2410 pr_err("dev->transport->get_sense_buffer"
c66ac9db
NB
2411 " is NULL\n");
2412 continue;
2413 }
2414
e3d6f909 2415 sense_buffer = dev->transport->get_sense_buffer(task);
6708bb27 2416 if (!sense_buffer) {
04629b7b 2417 pr_err("ITT[0x%08x]_TASK[%p]: Unable to locate"
c66ac9db 2418 " sense buffer for task with sense\n",
04629b7b 2419 cmd->se_tfo->get_task_tag(cmd), task);
c66ac9db
NB
2420 continue;
2421 }
a1d8b49a 2422 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db 2423
e3d6f909 2424 offset = cmd->se_tfo->set_fabric_sense_len(cmd,
c66ac9db
NB
2425 TRANSPORT_SENSE_BUFFER);
2426
5951146d 2427 memcpy(&buffer[offset], sense_buffer,
c66ac9db
NB
2428 TRANSPORT_SENSE_BUFFER);
2429 cmd->scsi_status = task->task_scsi_status;
2430 /* Automatically padded */
2431 cmd->scsi_sense_length =
2432 (TRANSPORT_SENSE_BUFFER + offset);
2433
6708bb27 2434 pr_debug("HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x"
c66ac9db 2435 " and sense\n",
e3d6f909 2436 dev->se_hba->hba_id, dev->transport->name,
c66ac9db
NB
2437 cmd->scsi_status);
2438 return 0;
2439 }
a1d8b49a 2440 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
2441
2442 return -1;
2443}
2444
ec98f782
AG
2445static inline long long transport_dev_end_lba(struct se_device *dev)
2446{
2447 return dev->transport->get_blocks(dev) + 1;
2448}
2449
2450static int transport_cmd_get_valid_sectors(struct se_cmd *cmd)
2451{
2452 struct se_device *dev = cmd->se_dev;
2453 u32 sectors;
2454
2455 if (dev->transport->get_device_type(dev) != TYPE_DISK)
2456 return 0;
2457
2458 sectors = (cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size);
2459
6708bb27
AG
2460 if ((cmd->t_task_lba + sectors) > transport_dev_end_lba(dev)) {
2461 pr_err("LBA: %llu Sectors: %u exceeds"
ec98f782
AG
2462 " transport_dev_end_lba(): %llu\n",
2463 cmd->t_task_lba, sectors,
2464 transport_dev_end_lba(dev));
7abbe7f3 2465 return -EINVAL;
ec98f782
AG
2466 }
2467
7abbe7f3 2468 return 0;
ec98f782
AG
2469}
2470
706d5860
NB
2471static int target_check_write_same_discard(unsigned char *flags, struct se_device *dev)
2472{
2473 /*
2474 * Determine if the received WRITE_SAME is used to for direct
2475 * passthrough into Linux/SCSI with struct request via TCM/pSCSI
2476 * or we are signaling the use of internal WRITE_SAME + UNMAP=1
2477 * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK code.
2478 */
2479 int passthrough = (dev->transport->transport_type ==
2480 TRANSPORT_PLUGIN_PHBA_PDEV);
2481
2482 if (!passthrough) {
2483 if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
2484 pr_err("WRITE_SAME PBDATA and LBDATA"
2485 " bits not supported for Block Discard"
2486 " Emulation\n");
2487 return -ENOSYS;
2488 }
2489 /*
2490 * Currently for the emulated case we only accept
2491 * tpws with the UNMAP=1 bit set.
2492 */
2493 if (!(flags[0] & 0x08)) {
2494 pr_err("WRITE_SAME w/o UNMAP bit not"
2495 " supported for Block Discard Emulation\n");
2496 return -ENOSYS;
2497 }
2498 }
2499
2500 return 0;
2501}
2502
c66ac9db
NB
2503/* transport_generic_cmd_sequencer():
2504 *
2505 * Generic Command Sequencer that should work for most DAS transport
2506 * drivers.
2507 *
2508 * Called from transport_generic_allocate_tasks() in the $FABRIC_MOD
2509 * RX Thread.
2510 *
2511 * FIXME: Need to support other SCSI OPCODES where as well.
2512 */
2513static int transport_generic_cmd_sequencer(
2514 struct se_cmd *cmd,
2515 unsigned char *cdb)
2516{
5951146d 2517 struct se_device *dev = cmd->se_dev;
c66ac9db
NB
2518 struct se_subsystem_dev *su_dev = dev->se_sub_dev;
2519 int ret = 0, sector_ret = 0, passthrough;
2520 u32 sectors = 0, size = 0, pr_reg_type = 0;
2521 u16 service_action;
2522 u8 alua_ascq = 0;
2523 /*
2524 * Check for an existing UNIT ATTENTION condition
2525 */
2526 if (core_scsi3_ua_check(cmd, cdb) < 0) {
c66ac9db
NB
2527 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
2528 cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION;
5951146d 2529 return -EINVAL;
c66ac9db
NB
2530 }
2531 /*
2532 * Check status of Asymmetric Logical Unit Assignment port
2533 */
e3d6f909 2534 ret = su_dev->t10_alua.alua_state_check(cmd, cdb, &alua_ascq);
c66ac9db 2535 if (ret != 0) {
c66ac9db 2536 /*
25985edc 2537 * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
c66ac9db
NB
2538 * The ALUA additional sense code qualifier (ASCQ) is determined
2539 * by the ALUA primary or secondary access state..
2540 */
2541 if (ret > 0) {
2542#if 0
6708bb27 2543 pr_debug("[%s]: ALUA TG Port not available,"
c66ac9db 2544 " SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n",
e3d6f909 2545 cmd->se_tfo->get_fabric_name(), alua_ascq);
c66ac9db
NB
2546#endif
2547 transport_set_sense_codes(cmd, 0x04, alua_ascq);
2548 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
2549 cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY;
5951146d 2550 return -EINVAL;
c66ac9db
NB
2551 }
2552 goto out_invalid_cdb_field;
2553 }
2554 /*
2555 * Check status for SPC-3 Persistent Reservations
2556 */
e3d6f909
AG
2557 if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type) != 0) {
2558 if (su_dev->t10_pr.pr_ops.t10_seq_non_holder(
03e98c9e
NB
2559 cmd, cdb, pr_reg_type) != 0) {
2560 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
2561 cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
2562 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
2563 return -EBUSY;
2564 }
c66ac9db
NB
2565 /*
2566 * This means the CDB is allowed for the SCSI Initiator port
2567 * when said port is *NOT* holding the legacy SPC-2 or
2568 * SPC-3 Persistent Reservation.
2569 */
2570 }
2571
5bda90c8
CH
2572 /*
2573 * If we operate in passthrough mode we skip most CDB emulation and
2574 * instead hand the commands down to the physical SCSI device.
2575 */
2576 passthrough =
2577 (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV);
2578
c66ac9db
NB
2579 switch (cdb[0]) {
2580 case READ_6:
2581 sectors = transport_get_sectors_6(cdb, cmd, &sector_ret);
2582 if (sector_ret)
2583 goto out_unsupported_cdb;
2584 size = transport_get_size(sectors, cdb, cmd);
a1d8b49a 2585 cmd->t_task_lba = transport_lba_21(cdb);
c66ac9db
NB
2586 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2587 break;
2588 case READ_10:
2589 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
2590 if (sector_ret)
2591 goto out_unsupported_cdb;
2592 size = transport_get_size(sectors, cdb, cmd);
a1d8b49a 2593 cmd->t_task_lba = transport_lba_32(cdb);
c66ac9db
NB
2594 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2595 break;
2596 case READ_12:
2597 sectors = transport_get_sectors_12(cdb, cmd, &sector_ret);
2598 if (sector_ret)
2599 goto out_unsupported_cdb;
2600 size = transport_get_size(sectors, cdb, cmd);
a1d8b49a 2601 cmd->t_task_lba = transport_lba_32(cdb);
c66ac9db
NB
2602 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2603 break;
2604 case READ_16:
2605 sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
2606 if (sector_ret)
2607 goto out_unsupported_cdb;
2608 size = transport_get_size(sectors, cdb, cmd);
a1d8b49a 2609 cmd->t_task_lba = transport_lba_64(cdb);
c66ac9db
NB
2610 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2611 break;
2612 case WRITE_6:
2613 sectors = transport_get_sectors_6(cdb, cmd, &sector_ret);
2614 if (sector_ret)
2615 goto out_unsupported_cdb;
2616 size = transport_get_size(sectors, cdb, cmd);
a1d8b49a 2617 cmd->t_task_lba = transport_lba_21(cdb);
c66ac9db
NB
2618 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2619 break;
2620 case WRITE_10:
2621 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
2622 if (sector_ret)
2623 goto out_unsupported_cdb;
2624 size = transport_get_size(sectors, cdb, cmd);
a1d8b49a 2625 cmd->t_task_lba = transport_lba_32(cdb);
2d3a4b51
CH
2626 if (cdb[1] & 0x8)
2627 cmd->se_cmd_flags |= SCF_FUA;
c66ac9db
NB
2628 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2629 break;
2630 case WRITE_12:
2631 sectors = transport_get_sectors_12(cdb, cmd, &sector_ret);
2632 if (sector_ret)
2633 goto out_unsupported_cdb;
2634 size = transport_get_size(sectors, cdb, cmd);
a1d8b49a 2635 cmd->t_task_lba = transport_lba_32(cdb);
2d3a4b51
CH
2636 if (cdb[1] & 0x8)
2637 cmd->se_cmd_flags |= SCF_FUA;
c66ac9db
NB
2638 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2639 break;
2640 case WRITE_16:
2641 sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
2642 if (sector_ret)
2643 goto out_unsupported_cdb;
2644 size = transport_get_size(sectors, cdb, cmd);
a1d8b49a 2645 cmd->t_task_lba = transport_lba_64(cdb);
2d3a4b51
CH
2646 if (cdb[1] & 0x8)
2647 cmd->se_cmd_flags |= SCF_FUA;
c66ac9db
NB
2648 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2649 break;
2650 case XDWRITEREAD_10:
2651 if ((cmd->data_direction != DMA_TO_DEVICE) ||
33c3fafc 2652 !(cmd->se_cmd_flags & SCF_BIDI))
c66ac9db
NB
2653 goto out_invalid_cdb_field;
2654 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
2655 if (sector_ret)
2656 goto out_unsupported_cdb;
2657 size = transport_get_size(sectors, cdb, cmd);
a1d8b49a 2658 cmd->t_task_lba = transport_lba_32(cdb);
c66ac9db 2659 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
7c1c6af3 2660
5bda90c8
CH
2661 /*
2662 * Do now allow BIDI commands for passthrough mode.
2663 */
2664 if (passthrough)
7c1c6af3 2665 goto out_unsupported_cdb;
5bda90c8 2666
c66ac9db 2667 /*
35e0e757 2668 * Setup BIDI XOR callback to be run after I/O completion.
c66ac9db
NB
2669 */
2670 cmd->transport_complete_callback = &transport_xor_callback;
2d3a4b51
CH
2671 if (cdb[1] & 0x8)
2672 cmd->se_cmd_flags |= SCF_FUA;
c66ac9db
NB
2673 break;
2674 case VARIABLE_LENGTH_CMD:
2675 service_action = get_unaligned_be16(&cdb[8]);
c66ac9db
NB
2676 switch (service_action) {
2677 case XDWRITEREAD_32:
2678 sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
2679 if (sector_ret)
2680 goto out_unsupported_cdb;
2681 size = transport_get_size(sectors, cdb, cmd);
2682 /*
2683 * Use WRITE_32 and READ_32 opcodes for the emulated
2684 * XDWRITE_READ_32 logic.
2685 */
a1d8b49a 2686 cmd->t_task_lba = transport_lba_64_ext(cdb);
c66ac9db
NB
2687 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2688
5bda90c8
CH
2689 /*
2690 * Do now allow BIDI commands for passthrough mode.
2691 */
c66ac9db 2692 if (passthrough)
7c1c6af3 2693 goto out_unsupported_cdb;
5bda90c8 2694
c66ac9db 2695 /*
35e0e757
CH
2696 * Setup BIDI XOR callback to be run during after I/O
2697 * completion.
c66ac9db
NB
2698 */
2699 cmd->transport_complete_callback = &transport_xor_callback;
2d3a4b51
CH
2700 if (cdb[1] & 0x8)
2701 cmd->se_cmd_flags |= SCF_FUA;
c66ac9db
NB
2702 break;
2703 case WRITE_SAME_32:
2704 sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
2705 if (sector_ret)
2706 goto out_unsupported_cdb;
dd3a5ad8 2707
6708bb27 2708 if (sectors)
12850626 2709 size = transport_get_size(1, cdb, cmd);
6708bb27
AG
2710 else {
2711 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
2712 " supported\n");
2713 goto out_invalid_cdb_field;
2714 }
dd3a5ad8 2715
a1d8b49a 2716 cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
c66ac9db
NB
2717 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2718
706d5860 2719 if (target_check_write_same_discard(&cdb[10], dev) < 0)
67236c44 2720 goto out_unsupported_cdb;
5bda90c8
CH
2721 if (!passthrough)
2722 cmd->execute_task = target_emulate_write_same;
c66ac9db
NB
2723 break;
2724 default:
6708bb27 2725 pr_err("VARIABLE_LENGTH_CMD service action"
c66ac9db
NB
2726 " 0x%04x not supported\n", service_action);
2727 goto out_unsupported_cdb;
2728 }
2729 break;
e434f1f1 2730 case MAINTENANCE_IN:
e3d6f909 2731 if (dev->transport->get_device_type(dev) != TYPE_ROM) {
c66ac9db
NB
2732 /* MAINTENANCE_IN from SCC-2 */
2733 /*
2734 * Check for emulated MI_REPORT_TARGET_PGS.
2735 */
e76a35d6
CH
2736 if (cdb[1] == MI_REPORT_TARGET_PGS &&
2737 su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
2738 cmd->execute_task =
2739 target_emulate_report_target_port_groups;
c66ac9db
NB
2740 }
2741 size = (cdb[6] << 24) | (cdb[7] << 16) |
2742 (cdb[8] << 8) | cdb[9];
2743 } else {
2744 /* GPCMD_SEND_KEY from multi media commands */
2745 size = (cdb[8] << 8) + cdb[9];
2746 }
05d1c7c0 2747 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
c66ac9db
NB
2748 break;
2749 case MODE_SELECT:
2750 size = cdb[4];
2751 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2752 break;
2753 case MODE_SELECT_10:
2754 size = (cdb[7] << 8) + cdb[8];
2755 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2756 break;
2757 case MODE_SENSE:
2758 size = cdb[4];
05d1c7c0 2759 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
5bda90c8
CH
2760 if (!passthrough)
2761 cmd->execute_task = target_emulate_modesense;
c66ac9db
NB
2762 break;
2763 case MODE_SENSE_10:
5bda90c8
CH
2764 size = (cdb[7] << 8) + cdb[8];
2765 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2766 if (!passthrough)
2767 cmd->execute_task = target_emulate_modesense;
2768 break;
c66ac9db
NB
2769 case GPCMD_READ_BUFFER_CAPACITY:
2770 case GPCMD_SEND_OPC:
2771 case LOG_SELECT:
2772 case LOG_SENSE:
2773 size = (cdb[7] << 8) + cdb[8];
05d1c7c0 2774 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
c66ac9db
NB
2775 break;
2776 case READ_BLOCK_LIMITS:
2777 size = READ_BLOCK_LEN;
05d1c7c0 2778 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
c66ac9db
NB
2779 break;
2780 case GPCMD_GET_CONFIGURATION:
2781 case GPCMD_READ_FORMAT_CAPACITIES:
2782 case GPCMD_READ_DISC_INFO:
2783 case GPCMD_READ_TRACK_RZONE_INFO:
2784 size = (cdb[7] << 8) + cdb[8];
2785 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2786 break;
2787 case PERSISTENT_RESERVE_IN:
617c0e06 2788 if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS)
e76a35d6 2789 cmd->execute_task = target_scsi3_emulate_pr_in;
617c0e06
CH
2790 size = (cdb[7] << 8) + cdb[8];
2791 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2792 break;
c66ac9db 2793 case PERSISTENT_RESERVE_OUT:
617c0e06 2794 if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS)
e76a35d6 2795 cmd->execute_task = target_scsi3_emulate_pr_out;
c66ac9db 2796 size = (cdb[7] << 8) + cdb[8];
05d1c7c0 2797 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
c66ac9db
NB
2798 break;
2799 case GPCMD_MECHANISM_STATUS:
2800 case GPCMD_READ_DVD_STRUCTURE:
2801 size = (cdb[8] << 8) + cdb[9];
2802 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2803 break;
2804 case READ_POSITION:
2805 size = READ_POSITION_LEN;
05d1c7c0 2806 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
c66ac9db 2807 break;
e434f1f1 2808 case MAINTENANCE_OUT:
e3d6f909 2809 if (dev->transport->get_device_type(dev) != TYPE_ROM) {
c66ac9db
NB
2810 /* MAINTENANCE_OUT from SCC-2
2811 *
2812 * Check for emulated MO_SET_TARGET_PGS.
2813 */
e76a35d6
CH
2814 if (cdb[1] == MO_SET_TARGET_PGS &&
2815 su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
2816 cmd->execute_task =
2817 target_emulate_set_target_port_groups;
c66ac9db
NB
2818 }
2819
2820 size = (cdb[6] << 24) | (cdb[7] << 16) |
2821 (cdb[8] << 8) | cdb[9];
2822 } else {
2823 /* GPCMD_REPORT_KEY from multi media commands */
2824 size = (cdb[8] << 8) + cdb[9];
2825 }
05d1c7c0 2826 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
c66ac9db
NB
2827 break;
2828 case INQUIRY:
2829 size = (cdb[3] << 8) + cdb[4];
2830 /*
2831 * Do implict HEAD_OF_QUEUE processing for INQUIRY.
2832 * See spc4r17 section 5.3
2833 */
5951146d 2834 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
e66ecd50 2835 cmd->sam_task_attr = MSG_HEAD_TAG;
05d1c7c0 2836 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
5bda90c8
CH
2837 if (!passthrough)
2838 cmd->execute_task = target_emulate_inquiry;
c66ac9db
NB
2839 break;
2840 case READ_BUFFER:
2841 size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
05d1c7c0 2842 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
c66ac9db
NB
2843 break;
2844 case READ_CAPACITY:
2845 size = READ_CAP_LEN;
05d1c7c0 2846 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
5bda90c8
CH
2847 if (!passthrough)
2848 cmd->execute_task = target_emulate_readcapacity;
c66ac9db
NB
2849 break;
2850 case READ_MEDIA_SERIAL_NUMBER:
2851 case SECURITY_PROTOCOL_IN:
2852 case SECURITY_PROTOCOL_OUT:
2853 size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
05d1c7c0 2854 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
c66ac9db
NB
2855 break;
2856 case SERVICE_ACTION_IN:
5bda90c8
CH
2857 switch (cmd->t_task_cdb[1] & 0x1f) {
2858 case SAI_READ_CAPACITY_16:
2859 if (!passthrough)
2860 cmd->execute_task =
2861 target_emulate_readcapacity_16;
2862 break;
2863 default:
2864 if (passthrough)
2865 break;
2866
2867 pr_err("Unsupported SA: 0x%02x\n",
2868 cmd->t_task_cdb[1] & 0x1f);
2869 goto out_unsupported_cdb;
2870 }
2871 /*FALLTHROUGH*/
c66ac9db
NB
2872 case ACCESS_CONTROL_IN:
2873 case ACCESS_CONTROL_OUT:
2874 case EXTENDED_COPY:
2875 case READ_ATTRIBUTE:
2876 case RECEIVE_COPY_RESULTS:
2877 case WRITE_ATTRIBUTE:
2878 size = (cdb[10] << 24) | (cdb[11] << 16) |
2879 (cdb[12] << 8) | cdb[13];
05d1c7c0 2880 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
c66ac9db
NB
2881 break;
2882 case RECEIVE_DIAGNOSTIC:
2883 case SEND_DIAGNOSTIC:
2884 size = (cdb[3] << 8) | cdb[4];
05d1c7c0 2885 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
c66ac9db
NB
2886 break;
2887/* #warning FIXME: Figure out correct GPCMD_READ_CD blocksize. */
2888#if 0
2889 case GPCMD_READ_CD:
2890 sectors = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
2891 size = (2336 * sectors);
05d1c7c0 2892 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
c66ac9db
NB
2893 break;
2894#endif
2895 case READ_TOC:
2896 size = cdb[8];
05d1c7c0 2897 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
c66ac9db
NB
2898 break;
2899 case REQUEST_SENSE:
2900 size = cdb[4];
05d1c7c0 2901 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
5bda90c8
CH
2902 if (!passthrough)
2903 cmd->execute_task = target_emulate_request_sense;
c66ac9db
NB
2904 break;
2905 case READ_ELEMENT_STATUS:
2906 size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9];
05d1c7c0 2907 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
c66ac9db
NB
2908 break;
2909 case WRITE_BUFFER:
2910 size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
05d1c7c0 2911 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
c66ac9db
NB
2912 break;
2913 case RESERVE:
2914 case RESERVE_10:
2915 /*
2916 * The SPC-2 RESERVE does not contain a size in the SCSI CDB.
2917 * Assume the passthrough or $FABRIC_MOD will tell us about it.
2918 */
2919 if (cdb[0] == RESERVE_10)
2920 size = (cdb[7] << 8) | cdb[8];
2921 else
2922 size = cmd->data_length;
2923
2924 /*
2925 * Setup the legacy emulated handler for SPC-2 and
2926 * >= SPC-3 compatible reservation handling (CRH=1)
2927 * Otherwise, we assume the underlying SCSI logic is
2928 * is running in SPC_PASSTHROUGH, and wants reservations
2929 * emulation disabled.
2930 */
e76a35d6
CH
2931 if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH)
2932 cmd->execute_task = target_scsi2_reservation_reserve;
c66ac9db
NB
2933 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
2934 break;
2935 case RELEASE:
2936 case RELEASE_10:
2937 /*
2938 * The SPC-2 RELEASE does not contain a size in the SCSI CDB.
2939 * Assume the passthrough or $FABRIC_MOD will tell us about it.
2940 */
2941 if (cdb[0] == RELEASE_10)
2942 size = (cdb[7] << 8) | cdb[8];
2943 else
2944 size = cmd->data_length;
2945
e76a35d6
CH
2946 if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH)
2947 cmd->execute_task = target_scsi2_reservation_release;
c66ac9db
NB
2948 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
2949 break;
2950 case SYNCHRONIZE_CACHE:
8e94b8db 2951 case SYNCHRONIZE_CACHE_16:
c66ac9db
NB
2952 /*
2953 * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE
2954 */
2955 if (cdb[0] == SYNCHRONIZE_CACHE) {
2956 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
a1d8b49a 2957 cmd->t_task_lba = transport_lba_32(cdb);
c66ac9db
NB
2958 } else {
2959 sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
a1d8b49a 2960 cmd->t_task_lba = transport_lba_64(cdb);
c66ac9db
NB
2961 }
2962 if (sector_ret)
2963 goto out_unsupported_cdb;
2964
2965 size = transport_get_size(sectors, cdb, cmd);
2966 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
2967
5bda90c8 2968 if (passthrough)
c66ac9db 2969 break;
5bda90c8 2970
c66ac9db
NB
2971 /*
2972 * Check to ensure that LBA + Range does not exceed past end of
7abbe7f3 2973 * device for IBLOCK and FILEIO ->do_sync_cache() backend calls
c66ac9db 2974 */
7abbe7f3
NB
2975 if ((cmd->t_task_lba != 0) || (sectors != 0)) {
2976 if (transport_cmd_get_valid_sectors(cmd) < 0)
2977 goto out_invalid_cdb_field;
2978 }
5bda90c8 2979 cmd->execute_task = target_emulate_synchronize_cache;
c66ac9db
NB
2980 break;
2981 case UNMAP:
2982 size = get_unaligned_be16(&cdb[7]);
05d1c7c0 2983 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
5bda90c8
CH
2984 if (!passthrough)
2985 cmd->execute_task = target_emulate_unmap;
c66ac9db
NB
2986 break;
2987 case WRITE_SAME_16:
2988 sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
2989 if (sector_ret)
2990 goto out_unsupported_cdb;
dd3a5ad8 2991
6708bb27 2992 if (sectors)
12850626 2993 size = transport_get_size(1, cdb, cmd);
6708bb27
AG
2994 else {
2995 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
2996 goto out_invalid_cdb_field;
2997 }
dd3a5ad8 2998
5db0753b 2999 cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
706d5860
NB
3000 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3001
3002 if (target_check_write_same_discard(&cdb[1], dev) < 0)
67236c44 3003 goto out_unsupported_cdb;
5bda90c8
CH
3004 if (!passthrough)
3005 cmd->execute_task = target_emulate_write_same;
706d5860
NB
3006 break;
3007 case WRITE_SAME:
3008 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
3009 if (sector_ret)
3010 goto out_unsupported_cdb;
3011
3012 if (sectors)
12850626 3013 size = transport_get_size(1, cdb, cmd);
706d5860
NB
3014 else {
3015 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
3016 goto out_invalid_cdb_field;
c66ac9db 3017 }
706d5860
NB
3018
3019 cmd->t_task_lba = get_unaligned_be32(&cdb[2]);
c66ac9db 3020 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
706d5860
NB
3021 /*
3022 * Follow sbcr26 with WRITE_SAME (10) and check for the existence
3023 * of byte 1 bit 3 UNMAP instead of original reserved field
3024 */
3025 if (target_check_write_same_discard(&cdb[1], dev) < 0)
67236c44 3026 goto out_unsupported_cdb;
5bda90c8
CH
3027 if (!passthrough)
3028 cmd->execute_task = target_emulate_write_same;
c66ac9db
NB
3029 break;
3030 case ALLOW_MEDIUM_REMOVAL:
c66ac9db 3031 case ERASE:
c66ac9db
NB
3032 case REZERO_UNIT:
3033 case SEEK_10:
c66ac9db
NB
3034 case SPACE:
3035 case START_STOP:
3036 case TEST_UNIT_READY:
3037 case VERIFY:
3038 case WRITE_FILEMARKS:
5bda90c8
CH
3039 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
3040 if (!passthrough)
3041 cmd->execute_task = target_emulate_noop;
3042 break;
3043 case GPCMD_CLOSE_TRACK:
3044 case INITIALIZE_ELEMENT_STATUS:
3045 case GPCMD_LOAD_UNLOAD:
3046 case GPCMD_SET_SPEED:
c66ac9db
NB
3047 case MOVE_MEDIUM:
3048 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
3049 break;
3050 case REPORT_LUNS:
e76a35d6 3051 cmd->execute_task = target_report_luns;
c66ac9db
NB
3052 size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
3053 /*
3054 * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS
3055 * See spc4r17 section 5.3
3056 */
5951146d 3057 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
e66ecd50 3058 cmd->sam_task_attr = MSG_HEAD_TAG;
05d1c7c0 3059 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
c66ac9db
NB
3060 break;
3061 default:
6708bb27 3062 pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode"
c66ac9db 3063 " 0x%02x, sending CHECK_CONDITION.\n",
e3d6f909 3064 cmd->se_tfo->get_fabric_name(), cdb[0]);
c66ac9db
NB
3065 goto out_unsupported_cdb;
3066 }
3067
3068 if (size != cmd->data_length) {
6708bb27 3069 pr_warn("TARGET_CORE[%s]: Expected Transfer Length:"
c66ac9db 3070 " %u does not match SCSI CDB Length: %u for SAM Opcode:"
e3d6f909 3071 " 0x%02x\n", cmd->se_tfo->get_fabric_name(),
c66ac9db
NB
3072 cmd->data_length, size, cdb[0]);
3073
3074 cmd->cmd_spdtl = size;
3075
3076 if (cmd->data_direction == DMA_TO_DEVICE) {
6708bb27 3077 pr_err("Rejecting underflow/overflow"
c66ac9db
NB
3078 " WRITE data\n");
3079 goto out_invalid_cdb_field;
3080 }
3081 /*
3082 * Reject READ_* or WRITE_* with overflow/underflow for
3083 * type SCF_SCSI_DATA_SG_IO_CDB.
3084 */
6708bb27
AG
3085 if (!ret && (dev->se_sub_dev->se_dev_attrib.block_size != 512)) {
3086 pr_err("Failing OVERFLOW/UNDERFLOW for LBA op"
c66ac9db 3087 " CDB on non 512-byte sector setup subsystem"
e3d6f909 3088 " plugin: %s\n", dev->transport->name);
c66ac9db
NB
3089 /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
3090 goto out_invalid_cdb_field;
3091 }
3092
3093 if (size > cmd->data_length) {
3094 cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
3095 cmd->residual_count = (size - cmd->data_length);
3096 } else {
3097 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
3098 cmd->residual_count = (cmd->data_length - size);
3099 }
3100 cmd->data_length = size;
3101 }
3102
5bda90c8
CH
3103 /* reject any command that we don't have a handler for */
3104 if (!(passthrough || cmd->execute_task ||
3105 (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)))
3106 goto out_unsupported_cdb;
3107
c66ac9db
NB
3108 transport_set_supported_SAM_opcode(cmd);
3109 return ret;
3110
3111out_unsupported_cdb:
3112 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3113 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
5951146d 3114 return -EINVAL;
c66ac9db
NB
3115out_invalid_cdb_field:
3116 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3117 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
5951146d 3118 return -EINVAL;
c66ac9db
NB
3119}
3120
c66ac9db 3121/*
35e0e757 3122 * Called from I/O completion to determine which dormant/delayed
c66ac9db
NB
3123 * and ordered cmds need to have their tasks added to the execution queue.
3124 */
3125static void transport_complete_task_attr(struct se_cmd *cmd)
3126{
5951146d 3127 struct se_device *dev = cmd->se_dev;
c66ac9db
NB
3128 struct se_cmd *cmd_p, *cmd_tmp;
3129 int new_active_tasks = 0;
3130
e66ecd50 3131 if (cmd->sam_task_attr == MSG_SIMPLE_TAG) {
c66ac9db
NB
3132 atomic_dec(&dev->simple_cmds);
3133 smp_mb__after_atomic_dec();
3134 dev->dev_cur_ordered_id++;
6708bb27 3135 pr_debug("Incremented dev->dev_cur_ordered_id: %u for"
c66ac9db
NB
3136 " SIMPLE: %u\n", dev->dev_cur_ordered_id,
3137 cmd->se_ordered_id);
e66ecd50 3138 } else if (cmd->sam_task_attr == MSG_HEAD_TAG) {
c66ac9db 3139 dev->dev_cur_ordered_id++;
6708bb27 3140 pr_debug("Incremented dev_cur_ordered_id: %u for"
c66ac9db
NB
3141 " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
3142 cmd->se_ordered_id);
e66ecd50 3143 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
c66ac9db
NB
3144 atomic_dec(&dev->dev_ordered_sync);
3145 smp_mb__after_atomic_dec();
c66ac9db
NB
3146
3147 dev->dev_cur_ordered_id++;
6708bb27 3148 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:"
c66ac9db
NB
3149 " %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id);
3150 }
3151 /*
3152 * Process all commands up to the last received
3153 * ORDERED task attribute which requires another blocking
3154 * boundary
3155 */
3156 spin_lock(&dev->delayed_cmd_lock);
3157 list_for_each_entry_safe(cmd_p, cmd_tmp,
5951146d 3158 &dev->delayed_cmd_list, se_delayed_node) {
c66ac9db 3159
5951146d 3160 list_del(&cmd_p->se_delayed_node);
c66ac9db
NB
3161 spin_unlock(&dev->delayed_cmd_lock);
3162
6708bb27 3163 pr_debug("Calling add_tasks() for"
c66ac9db
NB
3164 " cmd_p: 0x%02x Task Attr: 0x%02x"
3165 " Dormant -> Active, se_ordered_id: %u\n",
6708bb27 3166 cmd_p->t_task_cdb[0],
c66ac9db
NB
3167 cmd_p->sam_task_attr, cmd_p->se_ordered_id);
3168
3169 transport_add_tasks_from_cmd(cmd_p);
3170 new_active_tasks++;
3171
3172 spin_lock(&dev->delayed_cmd_lock);
e66ecd50 3173 if (cmd_p->sam_task_attr == MSG_ORDERED_TAG)
c66ac9db
NB
3174 break;
3175 }
3176 spin_unlock(&dev->delayed_cmd_lock);
3177 /*
3178 * If new tasks have become active, wake up the transport thread
3179 * to do the processing of the Active tasks.
3180 */
3181 if (new_active_tasks != 0)
e3d6f909 3182 wake_up_interruptible(&dev->dev_queue_obj.thread_wq);
c66ac9db
NB
3183}
3184
e057f533 3185static void transport_complete_qf(struct se_cmd *cmd)
07bde79a
NB
3186{
3187 int ret = 0;
3188
e057f533
CH
3189 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
3190 transport_complete_task_attr(cmd);
3191
3192 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
3193 ret = cmd->se_tfo->queue_status(cmd);
3194 if (ret)
3195 goto out;
3196 }
07bde79a
NB
3197
3198 switch (cmd->data_direction) {
3199 case DMA_FROM_DEVICE:
3200 ret = cmd->se_tfo->queue_data_in(cmd);
3201 break;
3202 case DMA_TO_DEVICE:
ec98f782 3203 if (cmd->t_bidi_data_sg) {
07bde79a
NB
3204 ret = cmd->se_tfo->queue_data_in(cmd);
3205 if (ret < 0)
e057f533 3206 break;
07bde79a
NB
3207 }
3208 /* Fall through for DMA_TO_DEVICE */
3209 case DMA_NONE:
3210 ret = cmd->se_tfo->queue_status(cmd);
3211 break;
3212 default:
3213 break;
3214 }
3215
e057f533
CH
3216out:
3217 if (ret < 0) {
3218 transport_handle_queue_full(cmd, cmd->se_dev);
3219 return;
3220 }
3221 transport_lun_remove_cmd(cmd);
3222 transport_cmd_check_stop_to_fabric(cmd);
07bde79a
NB
3223}
3224
3225static void transport_handle_queue_full(
3226 struct se_cmd *cmd,
e057f533 3227 struct se_device *dev)
07bde79a
NB
3228{
3229 spin_lock_irq(&dev->qf_cmd_lock);
07bde79a
NB
3230 list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
3231 atomic_inc(&dev->dev_qf_count);
3232 smp_mb__after_atomic_inc();
3233 spin_unlock_irq(&cmd->se_dev->qf_cmd_lock);
3234
3235 schedule_work(&cmd->se_dev->qf_work_queue);
3236}
3237
35e0e757 3238static void target_complete_ok_work(struct work_struct *work)
c66ac9db 3239{
35e0e757 3240 struct se_cmd *cmd = container_of(work, struct se_cmd, work);
07bde79a 3241 int reason = 0, ret;
35e0e757 3242
c66ac9db
NB
3243 /*
3244 * Check if we need to move delayed/dormant tasks from cmds on the
3245 * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task
3246 * Attribute.
3247 */
5951146d 3248 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
c66ac9db 3249 transport_complete_task_attr(cmd);
07bde79a
NB
3250 /*
3251 * Check to schedule QUEUE_FULL work, or execute an existing
3252 * cmd->transport_qf_callback()
3253 */
3254 if (atomic_read(&cmd->se_dev->dev_qf_count) != 0)
3255 schedule_work(&cmd->se_dev->qf_work_queue);
3256
c66ac9db
NB
3257 /*
3258 * Check if we need to retrieve a sense buffer from
3259 * the struct se_cmd in question.
3260 */
3261 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
3262 if (transport_get_sense_data(cmd) < 0)
3263 reason = TCM_NON_EXISTENT_LUN;
3264
3265 /*
3266 * Only set when an struct se_task->task_scsi_status returned
3267 * a non GOOD status.
3268 */
3269 if (cmd->scsi_status) {
07bde79a 3270 ret = transport_send_check_condition_and_sense(
c66ac9db 3271 cmd, reason, 1);
f147abb4 3272 if (ret == -EAGAIN || ret == -ENOMEM)
07bde79a
NB
3273 goto queue_full;
3274
c66ac9db
NB
3275 transport_lun_remove_cmd(cmd);
3276 transport_cmd_check_stop_to_fabric(cmd);
3277 return;
3278 }
3279 }
3280 /*
25985edc 3281 * Check for a callback, used by amongst other things
c66ac9db
NB
3282 * XDWRITE_READ_10 emulation.
3283 */
3284 if (cmd->transport_complete_callback)
3285 cmd->transport_complete_callback(cmd);
3286
3287 switch (cmd->data_direction) {
3288 case DMA_FROM_DEVICE:
3289 spin_lock(&cmd->se_lun->lun_sep_lock);
e3d6f909
AG
3290 if (cmd->se_lun->lun_sep) {
3291 cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
c66ac9db
NB
3292 cmd->data_length;
3293 }
3294 spin_unlock(&cmd->se_lun->lun_sep_lock);
c66ac9db 3295
07bde79a 3296 ret = cmd->se_tfo->queue_data_in(cmd);
f147abb4 3297 if (ret == -EAGAIN || ret == -ENOMEM)
07bde79a 3298 goto queue_full;
c66ac9db
NB
3299 break;
3300 case DMA_TO_DEVICE:
3301 spin_lock(&cmd->se_lun->lun_sep_lock);
e3d6f909
AG
3302 if (cmd->se_lun->lun_sep) {
3303 cmd->se_lun->lun_sep->sep_stats.rx_data_octets +=
c66ac9db
NB
3304 cmd->data_length;
3305 }
3306 spin_unlock(&cmd->se_lun->lun_sep_lock);
3307 /*
3308 * Check if we need to send READ payload for BIDI-COMMAND
3309 */
ec98f782 3310 if (cmd->t_bidi_data_sg) {
c66ac9db 3311 spin_lock(&cmd->se_lun->lun_sep_lock);
e3d6f909
AG
3312 if (cmd->se_lun->lun_sep) {
3313 cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
c66ac9db
NB
3314 cmd->data_length;
3315 }
3316 spin_unlock(&cmd->se_lun->lun_sep_lock);
07bde79a 3317 ret = cmd->se_tfo->queue_data_in(cmd);
f147abb4 3318 if (ret == -EAGAIN || ret == -ENOMEM)
07bde79a 3319 goto queue_full;
c66ac9db
NB
3320 break;
3321 }
3322 /* Fall through for DMA_TO_DEVICE */
3323 case DMA_NONE:
07bde79a 3324 ret = cmd->se_tfo->queue_status(cmd);
f147abb4 3325 if (ret == -EAGAIN || ret == -ENOMEM)
07bde79a 3326 goto queue_full;
c66ac9db
NB
3327 break;
3328 default:
3329 break;
3330 }
3331
3332 transport_lun_remove_cmd(cmd);
3333 transport_cmd_check_stop_to_fabric(cmd);
07bde79a
NB
3334 return;
3335
3336queue_full:
6708bb27 3337 pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p,"
07bde79a 3338 " data_direction: %d\n", cmd, cmd->data_direction);
e057f533
CH
3339 cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
3340 transport_handle_queue_full(cmd, cmd->se_dev);
c66ac9db
NB
3341}
3342
3343static void transport_free_dev_tasks(struct se_cmd *cmd)
3344{
3345 struct se_task *task, *task_tmp;
3346 unsigned long flags;
0c2cfe5f 3347 LIST_HEAD(dispose_list);
c66ac9db 3348
a1d8b49a 3349 spin_lock_irqsave(&cmd->t_state_lock, flags);
c66ac9db 3350 list_for_each_entry_safe(task, task_tmp,
a1d8b49a 3351 &cmd->t_task_list, t_list) {
0c2cfe5f
CH
3352 if (!(task->task_flags & TF_ACTIVE))
3353 list_move_tail(&task->t_list, &dispose_list);
3354 }
3355 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3356
3357 while (!list_empty(&dispose_list)) {
3358 task = list_first_entry(&dispose_list, struct se_task, t_list);
c66ac9db 3359
af3f00c7
CH
3360 if (task->task_sg != cmd->t_data_sg &&
3361 task->task_sg != cmd->t_bidi_data_sg)
3362 kfree(task->task_sg);
c66ac9db
NB
3363
3364 list_del(&task->t_list);
3365
42bf829e 3366 cmd->se_dev->transport->free_task(task);
c66ac9db 3367 }
c66ac9db
NB
3368}
3369
6708bb27 3370static inline void transport_free_sgl(struct scatterlist *sgl, int nents)
c66ac9db 3371{
ec98f782 3372 struct scatterlist *sg;
ec98f782 3373 int count;
c66ac9db 3374
6708bb27
AG
3375 for_each_sg(sgl, sg, nents, count)
3376 __free_page(sg_page(sg));
c66ac9db 3377
6708bb27
AG
3378 kfree(sgl);
3379}
c66ac9db 3380
6708bb27
AG
3381static inline void transport_free_pages(struct se_cmd *cmd)
3382{
3383 if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)
3384 return;
3385
3386 transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents);
ec98f782
AG
3387 cmd->t_data_sg = NULL;
3388 cmd->t_data_nents = 0;
c66ac9db 3389
6708bb27 3390 transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);
ec98f782
AG
3391 cmd->t_bidi_data_sg = NULL;
3392 cmd->t_bidi_data_nents = 0;
c66ac9db
NB
3393}
3394
e26d99ae
CH
3395/**
3396 * transport_release_cmd - free a command
3397 * @cmd: command to free
3398 *
3399 * This routine unconditionally frees a command, and reference counting
3400 * or list removal must be done in the caller.
3401 */
3402static void transport_release_cmd(struct se_cmd *cmd)
3403{
3404 BUG_ON(!cmd->se_tfo);
3405
c8e31f26 3406 if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
e26d99ae
CH
3407 core_tmr_release_req(cmd->se_tmr_req);
3408 if (cmd->t_task_cdb != cmd->__t_task_cdb)
3409 kfree(cmd->t_task_cdb);
3410 /*
7481deb4
NB
3411 * If this cmd has been setup with target_get_sess_cmd(), drop
3412 * the kref and call ->release_cmd() in kref callback.
e26d99ae 3413 */
7481deb4
NB
3414 if (cmd->check_release != 0) {
3415 target_put_sess_cmd(cmd->se_sess, cmd);
3416 return;
3417 }
e26d99ae
CH
3418 cmd->se_tfo->release_cmd(cmd);
3419}
3420
d3df7825
CH
3421/**
3422 * transport_put_cmd - release a reference to a command
3423 * @cmd: command to release
3424 *
3425 * This routine releases our reference to the command and frees it if possible.
3426 */
39c05f32 3427static void transport_put_cmd(struct se_cmd *cmd)
c66ac9db
NB
3428{
3429 unsigned long flags;
4911e3cc 3430 int free_tasks = 0;
c66ac9db 3431
a1d8b49a 3432 spin_lock_irqsave(&cmd->t_state_lock, flags);
4911e3cc
CH
3433 if (atomic_read(&cmd->t_fe_count)) {
3434 if (!atomic_dec_and_test(&cmd->t_fe_count))
3435 goto out_busy;
3436 }
3437
3438 if (atomic_read(&cmd->t_se_count)) {
3439 if (!atomic_dec_and_test(&cmd->t_se_count))
3440 goto out_busy;
3441 }
3442
7d680f3b
CH
3443 if (cmd->transport_state & CMD_T_DEV_ACTIVE) {
3444 cmd->transport_state &= ~CMD_T_DEV_ACTIVE;
4911e3cc
CH
3445 transport_all_task_dev_remove_state(cmd);
3446 free_tasks = 1;
c66ac9db 3447 }
a1d8b49a 3448 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db 3449
4911e3cc
CH
3450 if (free_tasks != 0)
3451 transport_free_dev_tasks(cmd);
d3df7825 3452
c66ac9db 3453 transport_free_pages(cmd);
31afc39c 3454 transport_release_cmd(cmd);
39c05f32 3455 return;
4911e3cc
CH
3456out_busy:
3457 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
3458}
3459
c66ac9db 3460/*
ec98f782
AG
3461 * transport_generic_map_mem_to_cmd - Use fabric-alloced pages instead of
3462 * allocating in the core.
c66ac9db
NB
3463 * @cmd: Associated se_cmd descriptor
3464 * @mem: SGL style memory for TCM WRITE / READ
3465 * @sg_mem_num: Number of SGL elements
3466 * @mem_bidi_in: SGL style memory for TCM BIDI READ
3467 * @sg_mem_bidi_num: Number of BIDI READ SGL elements
3468 *
3469 * Return: nonzero return cmd was rejected for -ENOMEM or inproper usage
3470 * of parameters.
3471 */
3472int transport_generic_map_mem_to_cmd(
3473 struct se_cmd *cmd,
5951146d
AG
3474 struct scatterlist *sgl,
3475 u32 sgl_count,
3476 struct scatterlist *sgl_bidi,
3477 u32 sgl_bidi_count)
c66ac9db 3478{
5951146d 3479 if (!sgl || !sgl_count)
c66ac9db 3480 return 0;
c66ac9db 3481
c66ac9db
NB
3482 if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) ||
3483 (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) {
fef58a60
NB
3484 /*
3485 * Reject SCSI data overflow with map_mem_to_cmd() as incoming
3486 * scatterlists already have been set to follow what the fabric
3487 * passes for the original expected data transfer length.
3488 */
3489 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
3490 pr_warn("Rejecting SCSI DATA overflow for fabric using"
3491 " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n");
3492 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3493 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
3494 return -EINVAL;
3495 }
c66ac9db 3496
ec98f782
AG
3497 cmd->t_data_sg = sgl;
3498 cmd->t_data_nents = sgl_count;
c66ac9db 3499
ec98f782
AG
3500 if (sgl_bidi && sgl_bidi_count) {
3501 cmd->t_bidi_data_sg = sgl_bidi;
3502 cmd->t_bidi_data_nents = sgl_bidi_count;
c66ac9db
NB
3503 }
3504 cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
c66ac9db
NB
3505 }
3506
3507 return 0;
3508}
3509EXPORT_SYMBOL(transport_generic_map_mem_to_cmd);
3510
4949314c 3511void *transport_kmap_data_sg(struct se_cmd *cmd)
05d1c7c0 3512{
ec98f782 3513 struct scatterlist *sg = cmd->t_data_sg;
4949314c
AG
3514 struct page **pages;
3515 int i;
05d1c7c0 3516
ec98f782 3517 BUG_ON(!sg);
05d1c7c0 3518 /*
ec98f782
AG
3519 * We need to take into account a possible offset here for fabrics like
3520 * tcm_loop who may be using a contig buffer from the SCSI midlayer for
3521 * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd()
05d1c7c0 3522 */
4949314c
AG
3523 if (!cmd->t_data_nents)
3524 return NULL;
3525 else if (cmd->t_data_nents == 1)
3526 return kmap(sg_page(sg)) + sg->offset;
3527
3528 /* >1 page. use vmap */
3529 pages = kmalloc(sizeof(*pages) * cmd->t_data_nents, GFP_KERNEL);
3530 if (!pages)
3531 return NULL;
3532
3533 /* convert sg[] to pages[] */
3534 for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) {
3535 pages[i] = sg_page(sg);
3536 }
3537
3538 cmd->t_data_vmap = vmap(pages, cmd->t_data_nents, VM_MAP, PAGE_KERNEL);
3539 kfree(pages);
3540 if (!cmd->t_data_vmap)
3541 return NULL;
3542
3543 return cmd->t_data_vmap + cmd->t_data_sg[0].offset;
05d1c7c0 3544}
4949314c 3545EXPORT_SYMBOL(transport_kmap_data_sg);
05d1c7c0 3546
4949314c 3547void transport_kunmap_data_sg(struct se_cmd *cmd)
05d1c7c0 3548{
a1edf9cf 3549 if (!cmd->t_data_nents) {
4949314c 3550 return;
a1edf9cf 3551 } else if (cmd->t_data_nents == 1) {
4949314c 3552 kunmap(sg_page(cmd->t_data_sg));
a1edf9cf
AG
3553 return;
3554 }
4949314c
AG
3555
3556 vunmap(cmd->t_data_vmap);
3557 cmd->t_data_vmap = NULL;
05d1c7c0 3558}
4949314c 3559EXPORT_SYMBOL(transport_kunmap_data_sg);
05d1c7c0 3560
c66ac9db 3561static int
05d1c7c0 3562transport_generic_get_mem(struct se_cmd *cmd)
c66ac9db 3563{
ec98f782
AG
3564 u32 length = cmd->data_length;
3565 unsigned int nents;
3566 struct page *page;
9db9da33 3567 gfp_t zero_flag;
ec98f782 3568 int i = 0;
c66ac9db 3569
ec98f782
AG
3570 nents = DIV_ROUND_UP(length, PAGE_SIZE);
3571 cmd->t_data_sg = kmalloc(sizeof(struct scatterlist) * nents, GFP_KERNEL);
3572 if (!cmd->t_data_sg)
3573 return -ENOMEM;
c66ac9db 3574
ec98f782
AG
3575 cmd->t_data_nents = nents;
3576 sg_init_table(cmd->t_data_sg, nents);
c66ac9db 3577
9db9da33 3578 zero_flag = cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB ? 0 : __GFP_ZERO;
3579
ec98f782
AG
3580 while (length) {
3581 u32 page_len = min_t(u32, length, PAGE_SIZE);
9db9da33 3582 page = alloc_page(GFP_KERNEL | zero_flag);
ec98f782
AG
3583 if (!page)
3584 goto out;
c66ac9db 3585
ec98f782
AG
3586 sg_set_page(&cmd->t_data_sg[i], page, page_len, 0);
3587 length -= page_len;
3588 i++;
c66ac9db 3589 }
c66ac9db 3590 return 0;
c66ac9db 3591
ec98f782
AG
3592out:
3593 while (i >= 0) {
3594 __free_page(sg_page(&cmd->t_data_sg[i]));
3595 i--;
c66ac9db 3596 }
ec98f782
AG
3597 kfree(cmd->t_data_sg);
3598 cmd->t_data_sg = NULL;
3599 return -ENOMEM;
c66ac9db
NB
3600}
3601
a1d8b49a
AG
3602/* Reduce sectors if they are too long for the device */
3603static inline sector_t transport_limit_task_sectors(
c66ac9db
NB
3604 struct se_device *dev,
3605 unsigned long long lba,
a1d8b49a 3606 sector_t sectors)
c66ac9db 3607{
a1d8b49a 3608 sectors = min_t(sector_t, sectors, dev->se_sub_dev->se_dev_attrib.max_sectors);
c66ac9db 3609
a1d8b49a
AG
3610 if (dev->transport->get_device_type(dev) == TYPE_DISK)
3611 if ((lba + sectors) > transport_dev_end_lba(dev))
3612 sectors = ((transport_dev_end_lba(dev) - lba) + 1);
c66ac9db 3613
a1d8b49a 3614 return sectors;
c66ac9db
NB
3615}
3616
c66ac9db
NB
3617
3618/*
3619 * This function can be used by HW target mode drivers to create a linked
3620 * scatterlist from all contiguously allocated struct se_task->task_sg[].
3621 * This is intended to be called during the completion path by TCM Core
3622 * when struct target_core_fabric_ops->check_task_sg_chaining is enabled.
3623 */
3624void transport_do_task_sg_chain(struct se_cmd *cmd)
3625{
ec98f782
AG
3626 struct scatterlist *sg_first = NULL;
3627 struct scatterlist *sg_prev = NULL;
3628 int sg_prev_nents = 0;
3629 struct scatterlist *sg;
c66ac9db 3630 struct se_task *task;
ec98f782 3631 u32 chained_nents = 0;
c66ac9db
NB
3632 int i;
3633
ec98f782
AG
3634 BUG_ON(!cmd->se_tfo->task_sg_chaining);
3635
c66ac9db
NB
3636 /*
3637 * Walk the struct se_task list and setup scatterlist chains
a1d8b49a 3638 * for each contiguously allocated struct se_task->task_sg[].
c66ac9db 3639 */
a1d8b49a 3640 list_for_each_entry(task, &cmd->t_task_list, t_list) {
ec98f782 3641 if (!task->task_sg)
c66ac9db
NB
3642 continue;
3643
ec98f782
AG
3644 if (!sg_first) {
3645 sg_first = task->task_sg;
6708bb27 3646 chained_nents = task->task_sg_nents;
97868c89 3647 } else {
ec98f782 3648 sg_chain(sg_prev, sg_prev_nents, task->task_sg);
6708bb27 3649 chained_nents += task->task_sg_nents;
97868c89 3650 }
c3c74c7a
NB
3651 /*
3652 * For the padded tasks, use the extra SGL vector allocated
3653 * in transport_allocate_data_tasks() for the sg_prev_nents
04629b7b
CH
3654 * offset into sg_chain() above.
3655 *
3656 * We do not need the padding for the last task (or a single
3657 * task), but in that case we will never use the sg_prev_nents
3658 * value below which would be incorrect.
c3c74c7a 3659 */
04629b7b 3660 sg_prev_nents = (task->task_sg_nents + 1);
ec98f782 3661 sg_prev = task->task_sg;
c66ac9db
NB
3662 }
3663 /*
3664 * Setup the starting pointer and total t_tasks_sg_linked_no including
3665 * padding SGs for linking and to mark the end.
3666 */
a1d8b49a 3667 cmd->t_tasks_sg_chained = sg_first;
ec98f782 3668 cmd->t_tasks_sg_chained_no = chained_nents;
c66ac9db 3669
6708bb27 3670 pr_debug("Setup cmd: %p cmd->t_tasks_sg_chained: %p and"
a1d8b49a
AG
3671 " t_tasks_sg_chained_no: %u\n", cmd, cmd->t_tasks_sg_chained,
3672 cmd->t_tasks_sg_chained_no);
c66ac9db 3673
a1d8b49a
AG
3674 for_each_sg(cmd->t_tasks_sg_chained, sg,
3675 cmd->t_tasks_sg_chained_no, i) {
c66ac9db 3676
6708bb27 3677 pr_debug("SG[%d]: %p page: %p length: %d offset: %d\n",
5951146d 3678 i, sg, sg_page(sg), sg->length, sg->offset);
c66ac9db 3679 if (sg_is_chain(sg))
6708bb27 3680 pr_debug("SG: %p sg_is_chain=1\n", sg);
c66ac9db 3681 if (sg_is_last(sg))
6708bb27 3682 pr_debug("SG: %p sg_is_last=1\n", sg);
c66ac9db 3683 }
c66ac9db
NB
3684}
3685EXPORT_SYMBOL(transport_do_task_sg_chain);
3686
a1d8b49a
AG
3687/*
3688 * Break up cmd into chunks transport can handle
3689 */
38b40067
CH
3690static int
3691transport_allocate_data_tasks(struct se_cmd *cmd,
c66ac9db 3692 enum dma_data_direction data_direction,
38b40067 3693 struct scatterlist *cmd_sg, unsigned int sgl_nents)
c66ac9db 3694{
5951146d 3695 struct se_device *dev = cmd->se_dev;
a3eedc22 3696 int task_count, i;
38b40067
CH
3697 unsigned long long lba;
3698 sector_t sectors, dev_max_sectors;
3699 u32 sector_size;
3700
3701 if (transport_cmd_get_valid_sectors(cmd) < 0)
3702 return -EINVAL;
3703
3704 dev_max_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors;
3705 sector_size = dev->se_sub_dev->se_dev_attrib.block_size;
a1d8b49a 3706
ec98f782 3707 WARN_ON(cmd->data_length % sector_size);
38b40067
CH
3708
3709 lba = cmd->t_task_lba;
ec98f782 3710 sectors = DIV_ROUND_UP(cmd->data_length, sector_size);
277c5f27 3711 task_count = DIV_ROUND_UP_SECTOR_T(sectors, dev_max_sectors);
af3f00c7
CH
3712
3713 /*
3714 * If we need just a single task reuse the SG list in the command
3715 * and avoid a lot of work.
3716 */
3717 if (task_count == 1) {
3718 struct se_task *task;
3719 unsigned long flags;
3720
3721 task = transport_generic_get_task(cmd, data_direction);
3722 if (!task)
3723 return -ENOMEM;
3724
3725 task->task_sg = cmd_sg;
3726 task->task_sg_nents = sgl_nents;
3727
3728 task->task_lba = lba;
3729 task->task_sectors = sectors;
3730 task->task_size = task->task_sectors * sector_size;
3731
3732 spin_lock_irqsave(&cmd->t_state_lock, flags);
3733 list_add_tail(&task->t_list, &cmd->t_task_list);
3734 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3735
3736 return task_count;
3737 }
3738
ec98f782 3739 for (i = 0; i < task_count; i++) {
38b40067 3740 struct se_task *task;
c3c74c7a 3741 unsigned int task_size, task_sg_nents_padded;
38b40067
CH
3742 struct scatterlist *sg;
3743 unsigned long flags;
ec98f782 3744 int count;
a1d8b49a 3745
c66ac9db 3746 task = transport_generic_get_task(cmd, data_direction);
a1d8b49a 3747 if (!task)
ec98f782 3748 return -ENOMEM;
c66ac9db 3749
c66ac9db 3750 task->task_lba = lba;
ec98f782
AG
3751 task->task_sectors = min(sectors, dev_max_sectors);
3752 task->task_size = task->task_sectors * sector_size;
c66ac9db 3753
525a48a2
NB
3754 /*
3755 * This now assumes that passed sg_ents are in PAGE_SIZE chunks
3756 * in order to calculate the number per task SGL entries
3757 */
3758 task->task_sg_nents = DIV_ROUND_UP(task->task_size, PAGE_SIZE);
c66ac9db 3759 /*
ec98f782
AG
3760 * Check if the fabric module driver is requesting that all
3761 * struct se_task->task_sg[] be chained together.. If so,
3762 * then allocate an extra padding SG entry for linking and
c3c74c7a
NB
3763 * marking the end of the chained SGL for every task except
3764 * the last one for (task_count > 1) operation, or skipping
3765 * the extra padding for the (task_count == 1) case.
c66ac9db 3766 */
c3c74c7a
NB
3767 if (cmd->se_tfo->task_sg_chaining && (i < (task_count - 1))) {
3768 task_sg_nents_padded = (task->task_sg_nents + 1);
c3c74c7a
NB
3769 } else
3770 task_sg_nents_padded = task->task_sg_nents;
c66ac9db 3771
1d20bb61 3772 task->task_sg = kmalloc(sizeof(struct scatterlist) *
c3c74c7a 3773 task_sg_nents_padded, GFP_KERNEL);
ec98f782
AG
3774 if (!task->task_sg) {
3775 cmd->se_dev->transport->free_task(task);
3776 return -ENOMEM;
3777 }
3778
c3c74c7a 3779 sg_init_table(task->task_sg, task_sg_nents_padded);
c66ac9db 3780
ec98f782
AG
3781 task_size = task->task_size;
3782
3783 /* Build new sgl, only up to task_size */
6708bb27 3784 for_each_sg(task->task_sg, sg, task->task_sg_nents, count) {
ec98f782
AG
3785 if (cmd_sg->length > task_size)
3786 break;
3787
3788 *sg = *cmd_sg;
3789 task_size -= cmd_sg->length;
3790 cmd_sg = sg_next(cmd_sg);
c66ac9db 3791 }
c66ac9db 3792
ec98f782
AG
3793 lba += task->task_sectors;
3794 sectors -= task->task_sectors;
c66ac9db 3795
ec98f782
AG
3796 spin_lock_irqsave(&cmd->t_state_lock, flags);
3797 list_add_tail(&task->t_list, &cmd->t_task_list);
3798 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
3799 }
3800
ec98f782 3801 return task_count;
c66ac9db
NB
3802}
3803
3804static int
ec98f782 3805transport_allocate_control_task(struct se_cmd *cmd)
c66ac9db 3806{
c66ac9db 3807 struct se_task *task;
ec98f782 3808 unsigned long flags;
c66ac9db 3809
91ec1d35
NB
3810 /* Workaround for handling zero-length control CDBs */
3811 if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) &&
3812 !cmd->data_length)
3813 return 0;
3814
c66ac9db
NB
3815 task = transport_generic_get_task(cmd, cmd->data_direction);
3816 if (!task)
ec98f782 3817 return -ENOMEM;
c66ac9db 3818
af3f00c7 3819 task->task_sg = cmd->t_data_sg;
c66ac9db 3820 task->task_size = cmd->data_length;
6708bb27 3821 task->task_sg_nents = cmd->t_data_nents;
c66ac9db 3822
ec98f782
AG
3823 spin_lock_irqsave(&cmd->t_state_lock, flags);
3824 list_add_tail(&task->t_list, &cmd->t_task_list);
3825 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db 3826
6708bb27 3827 /* Success! Return number of tasks allocated */
a3eedc22 3828 return 1;
ec98f782
AG
3829}
3830
da0f7619
CH
3831/*
3832 * Allocate any required ressources to execute the command, and either place
3833 * it on the execution queue if possible. For writes we might not have the
3834 * payload yet, thus notify the fabric via a call to ->write_pending instead.
c66ac9db 3835 */
a1d8b49a 3836int transport_generic_new_cmd(struct se_cmd *cmd)
c66ac9db 3837{
da0f7619 3838 struct se_device *dev = cmd->se_dev;
9ac54987 3839 int task_cdbs, task_cdbs_bidi = 0;
da0f7619 3840 int set_counts = 1;
c66ac9db
NB
3841 int ret = 0;
3842
3843 /*
3844 * Determine is the TCM fabric module has already allocated physical
3845 * memory, and is directly calling transport_generic_map_mem_to_cmd()
ec98f782 3846 * beforehand.
c66ac9db 3847 */
ec98f782
AG
3848 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) &&
3849 cmd->data_length) {
05d1c7c0 3850 ret = transport_generic_get_mem(cmd);
c66ac9db 3851 if (ret < 0)
03e98c9e 3852 goto out_fail;
c66ac9db 3853 }
da0f7619 3854
1d20bb61 3855 /*
38b40067 3856 * For BIDI command set up the read tasks first.
1d20bb61 3857 */
da0f7619 3858 if (cmd->t_bidi_data_sg &&
38b40067
CH
3859 dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
3860 BUG_ON(!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB));
3861
9ac54987
NB
3862 task_cdbs_bidi = transport_allocate_data_tasks(cmd,
3863 DMA_FROM_DEVICE, cmd->t_bidi_data_sg,
3864 cmd->t_bidi_data_nents);
3865 if (task_cdbs_bidi <= 0)
da0f7619
CH
3866 goto out_fail;
3867
3868 atomic_inc(&cmd->t_fe_count);
3869 atomic_inc(&cmd->t_se_count);
3870 set_counts = 0;
3871 }
38b40067
CH
3872
3873 if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
3874 task_cdbs = transport_allocate_data_tasks(cmd,
3875 cmd->data_direction, cmd->t_data_sg,
3876 cmd->t_data_nents);
3877 } else {
3878 task_cdbs = transport_allocate_control_task(cmd);
3879 }
3880
410f6702 3881 if (task_cdbs < 0)
da0f7619 3882 goto out_fail;
410f6702 3883 else if (!task_cdbs && (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) {
7d680f3b 3884 spin_lock_irq(&cmd->t_state_lock);
410f6702 3885 cmd->t_state = TRANSPORT_COMPLETE;
7d680f3b
CH
3886 cmd->transport_state |= CMD_T_ACTIVE;
3887 spin_unlock_irq(&cmd->t_state_lock);
91ec1d35
NB
3888
3889 if (cmd->t_task_cdb[0] == REQUEST_SENSE) {
3890 u8 ua_asc = 0, ua_ascq = 0;
3891
3892 core_scsi3_ua_clear_for_request_sense(cmd,
3893 &ua_asc, &ua_ascq);
3894 }
3895
410f6702
RD
3896 INIT_WORK(&cmd->work, target_complete_ok_work);
3897 queue_work(target_completion_wq, &cmd->work);
3898 return 0;
3899 }
da0f7619
CH
3900
3901 if (set_counts) {
3902 atomic_inc(&cmd->t_fe_count);
3903 atomic_inc(&cmd->t_se_count);
3904 }
3905
9ac54987
NB
3906 cmd->t_task_list_num = (task_cdbs + task_cdbs_bidi);
3907 atomic_set(&cmd->t_task_cdbs_left, cmd->t_task_list_num);
3908 atomic_set(&cmd->t_task_cdbs_ex_left, cmd->t_task_list_num);
da0f7619 3909
c66ac9db 3910 /*
a1d8b49a 3911 * For WRITEs, let the fabric know its buffer is ready..
c66ac9db
NB
3912 * This WRITE struct se_cmd (and all of its associated struct se_task's)
3913 * will be added to the struct se_device execution queue after its WRITE
3914 * data has arrived. (ie: It gets handled by the transport processing
3915 * thread a second time)
3916 */
3917 if (cmd->data_direction == DMA_TO_DEVICE) {
3918 transport_add_tasks_to_state_queue(cmd);
3919 return transport_generic_write_pending(cmd);
3920 }
3921 /*
3922 * Everything else but a WRITE, add the struct se_cmd's struct se_task's
3923 * to the execution queue.
3924 */
3925 transport_execute_tasks(cmd);
3926 return 0;
da0f7619
CH
3927
3928out_fail:
3929 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3930 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
3931 return -EINVAL;
c66ac9db 3932}
a1d8b49a 3933EXPORT_SYMBOL(transport_generic_new_cmd);
c66ac9db
NB
3934
3935/* transport_generic_process_write():
3936 *
3937 *
3938 */
3939void transport_generic_process_write(struct se_cmd *cmd)
3940{
c66ac9db
NB
3941 transport_execute_tasks(cmd);
3942}
3943EXPORT_SYMBOL(transport_generic_process_write);
3944
e057f533 3945static void transport_write_pending_qf(struct se_cmd *cmd)
07bde79a 3946{
f147abb4
NB
3947 int ret;
3948
3949 ret = cmd->se_tfo->write_pending(cmd);
3950 if (ret == -EAGAIN || ret == -ENOMEM) {
e057f533
CH
3951 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n",
3952 cmd);
3953 transport_handle_queue_full(cmd, cmd->se_dev);
3954 }
07bde79a
NB
3955}
3956
c66ac9db
NB
3957static int transport_generic_write_pending(struct se_cmd *cmd)
3958{
3959 unsigned long flags;
3960 int ret;
3961
a1d8b49a 3962 spin_lock_irqsave(&cmd->t_state_lock, flags);
c66ac9db 3963 cmd->t_state = TRANSPORT_WRITE_PENDING;
a1d8b49a 3964 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
07bde79a 3965
c66ac9db
NB
3966 /*
3967 * Clear the se_cmd for WRITE_PENDING status in order to set
7d680f3b
CH
3968 * CMD_T_ACTIVE so that transport_generic_handle_data can be called
3969 * from HW target mode interrupt code. This is safe to be called
3970 * with transport_off=1 before the cmd->se_tfo->write_pending
c66ac9db
NB
3971 * because the se_cmd->se_lun pointer is not being cleared.
3972 */
3973 transport_cmd_check_stop(cmd, 1, 0);
3974
3975 /*
3976 * Call the fabric write_pending function here to let the
3977 * frontend know that WRITE buffers are ready.
3978 */
e3d6f909 3979 ret = cmd->se_tfo->write_pending(cmd);
f147abb4 3980 if (ret == -EAGAIN || ret == -ENOMEM)
07bde79a
NB
3981 goto queue_full;
3982 else if (ret < 0)
c66ac9db
NB
3983 return ret;
3984
03e98c9e 3985 return 1;
07bde79a
NB
3986
3987queue_full:
6708bb27 3988 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
07bde79a 3989 cmd->t_state = TRANSPORT_COMPLETE_QF_WP;
e057f533 3990 transport_handle_queue_full(cmd, cmd->se_dev);
f147abb4 3991 return 0;
c66ac9db
NB
3992}
3993
39c05f32 3994void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
c66ac9db 3995{
d14921d6 3996 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
c8e31f26 3997 if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
d14921d6
NB
3998 transport_wait_for_tasks(cmd);
3999
35462975 4000 transport_release_cmd(cmd);
d14921d6
NB
4001 } else {
4002 if (wait_for_tasks)
4003 transport_wait_for_tasks(cmd);
4004
c66ac9db
NB
4005 core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd);
4006
82f1c8a4 4007 if (cmd->se_lun)
c66ac9db 4008 transport_lun_remove_cmd(cmd);
c66ac9db 4009
f4366772
NB
4010 transport_free_dev_tasks(cmd);
4011
39c05f32 4012 transport_put_cmd(cmd);
c66ac9db
NB
4013 }
4014}
4015EXPORT_SYMBOL(transport_generic_free_cmd);
4016
a17f091d
NB
4017/* target_get_sess_cmd - Add command to active ->sess_cmd_list
4018 * @se_sess: session to reference
4019 * @se_cmd: command descriptor to add
a6360785 4020 * @ack_kref: Signal that fabric will perform an ack target_put_sess_cmd()
a17f091d 4021 */
a6360785
NB
4022void target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd,
4023 bool ack_kref)
a17f091d
NB
4024{
4025 unsigned long flags;
4026
7481deb4 4027 kref_init(&se_cmd->cmd_kref);
a6360785
NB
4028 /*
4029 * Add a second kref if the fabric caller is expecting to handle
4030 * fabric acknowledgement that requires two target_put_sess_cmd()
4031 * invocations before se_cmd descriptor release.
4032 */
4033 if (ack_kref == true)
4034 kref_get(&se_cmd->cmd_kref);
7481deb4 4035
a17f091d
NB
4036 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
4037 list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
4038 se_cmd->check_release = 1;
4039 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
4040}
4041EXPORT_SYMBOL(target_get_sess_cmd);
4042
7481deb4 4043static void target_release_cmd_kref(struct kref *kref)
a17f091d 4044{
7481deb4
NB
4045 struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
4046 struct se_session *se_sess = se_cmd->se_sess;
a17f091d
NB
4047 unsigned long flags;
4048
4049 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
4050 if (list_empty(&se_cmd->se_cmd_list)) {
4051 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
4052 WARN_ON(1);
7481deb4 4053 return;
a17f091d 4054 }
a17f091d
NB
4055 if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) {
4056 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
4057 complete(&se_cmd->cmd_wait_comp);
7481deb4 4058 return;
a17f091d
NB
4059 }
4060 list_del(&se_cmd->se_cmd_list);
4061 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
4062
7481deb4
NB
4063 se_cmd->se_tfo->release_cmd(se_cmd);
4064}
4065
4066/* target_put_sess_cmd - Check for active I/O shutdown via kref_put
4067 * @se_sess: session to reference
4068 * @se_cmd: command descriptor to drop
4069 */
4070int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
4071{
4072 return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref);
a17f091d
NB
4073}
4074EXPORT_SYMBOL(target_put_sess_cmd);
4075
4076/* target_splice_sess_cmd_list - Split active cmds into sess_wait_list
4077 * @se_sess: session to split
4078 */
4079void target_splice_sess_cmd_list(struct se_session *se_sess)
4080{
4081 struct se_cmd *se_cmd;
4082 unsigned long flags;
4083
4084 WARN_ON(!list_empty(&se_sess->sess_wait_list));
4085 INIT_LIST_HEAD(&se_sess->sess_wait_list);
4086
4087 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
4088 se_sess->sess_tearing_down = 1;
4089
4090 list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list);
4091
4092 list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list)
4093 se_cmd->cmd_wait_set = 1;
4094
4095 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
4096}
4097EXPORT_SYMBOL(target_splice_sess_cmd_list);
4098
4099/* target_wait_for_sess_cmds - Wait for outstanding descriptors
4100 * @se_sess: session to wait for active I/O
4101 * @wait_for_tasks: Make extra transport_wait_for_tasks call
4102 */
4103void target_wait_for_sess_cmds(
4104 struct se_session *se_sess,
4105 int wait_for_tasks)
4106{
4107 struct se_cmd *se_cmd, *tmp_cmd;
4108 bool rc = false;
4109
4110 list_for_each_entry_safe(se_cmd, tmp_cmd,
4111 &se_sess->sess_wait_list, se_cmd_list) {
4112 list_del(&se_cmd->se_cmd_list);
4113
4114 pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
4115 " %d\n", se_cmd, se_cmd->t_state,
4116 se_cmd->se_tfo->get_cmd_state(se_cmd));
4117
4118 if (wait_for_tasks) {
4119 pr_debug("Calling transport_wait_for_tasks se_cmd: %p t_state: %d,"
4120 " fabric state: %d\n", se_cmd, se_cmd->t_state,
4121 se_cmd->se_tfo->get_cmd_state(se_cmd));
4122
4123 rc = transport_wait_for_tasks(se_cmd);
4124
4125 pr_debug("After transport_wait_for_tasks se_cmd: %p t_state: %d,"
4126 " fabric state: %d\n", se_cmd, se_cmd->t_state,
4127 se_cmd->se_tfo->get_cmd_state(se_cmd));
4128 }
4129
4130 if (!rc) {
4131 wait_for_completion(&se_cmd->cmd_wait_comp);
4132 pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d"
4133 " fabric state: %d\n", se_cmd, se_cmd->t_state,
4134 se_cmd->se_tfo->get_cmd_state(se_cmd));
4135 }
4136
4137 se_cmd->se_tfo->release_cmd(se_cmd);
4138 }
4139}
4140EXPORT_SYMBOL(target_wait_for_sess_cmds);
4141
c66ac9db
NB
4142/* transport_lun_wait_for_tasks():
4143 *
4144 * Called from ConfigFS context to stop the passed struct se_cmd to allow
4145 * an struct se_lun to be successfully shutdown.
4146 */
4147static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
4148{
4149 unsigned long flags;
4150 int ret;
4151 /*
4152 * If the frontend has already requested this struct se_cmd to
4153 * be stopped, we can safely ignore this struct se_cmd.
4154 */
a1d8b49a 4155 spin_lock_irqsave(&cmd->t_state_lock, flags);
7d680f3b
CH
4156 if (cmd->transport_state & CMD_T_STOP) {
4157 cmd->transport_state &= ~CMD_T_LUN_STOP;
4158
4159 pr_debug("ConfigFS ITT[0x%08x] - CMD_T_STOP, skipping\n",
4160 cmd->se_tfo->get_task_tag(cmd));
a1d8b49a 4161 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db 4162 transport_cmd_check_stop(cmd, 1, 0);
e3d6f909 4163 return -EPERM;
c66ac9db 4164 }
7d680f3b 4165 cmd->transport_state |= CMD_T_LUN_FE_STOP;
a1d8b49a 4166 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db 4167
5951146d 4168 wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
c66ac9db
NB
4169
4170 ret = transport_stop_tasks_for_cmd(cmd);
4171
6708bb27
AG
4172 pr_debug("ConfigFS: cmd: %p t_tasks: %d stop tasks ret:"
4173 " %d\n", cmd, cmd->t_task_list_num, ret);
c66ac9db 4174 if (!ret) {
6708bb27 4175 pr_debug("ConfigFS: ITT[0x%08x] - stopping cmd....\n",
e3d6f909 4176 cmd->se_tfo->get_task_tag(cmd));
a1d8b49a 4177 wait_for_completion(&cmd->transport_lun_stop_comp);
6708bb27 4178 pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n",
e3d6f909 4179 cmd->se_tfo->get_task_tag(cmd));
c66ac9db 4180 }
3df8d40b 4181 transport_remove_cmd_from_queue(cmd);
c66ac9db
NB
4182
4183 return 0;
4184}
4185
c66ac9db
NB
4186static void __transport_clear_lun_from_sessions(struct se_lun *lun)
4187{
4188 struct se_cmd *cmd = NULL;
4189 unsigned long lun_flags, cmd_flags;
4190 /*
4191 * Do exception processing and return CHECK_CONDITION status to the
4192 * Initiator Port.
4193 */
4194 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
5951146d
AG
4195 while (!list_empty(&lun->lun_cmd_list)) {
4196 cmd = list_first_entry(&lun->lun_cmd_list,
4197 struct se_cmd, se_lun_node);
3d26fea0 4198 list_del_init(&cmd->se_lun_node);
5951146d 4199
c66ac9db
NB
4200 /*
4201 * This will notify iscsi_target_transport.c:
4202 * transport_cmd_check_stop() that a LUN shutdown is in
4203 * progress for the iscsi_cmd_t.
4204 */
a1d8b49a 4205 spin_lock(&cmd->t_state_lock);
6708bb27 4206 pr_debug("SE_LUN[%d] - Setting cmd->transport"
c66ac9db 4207 "_lun_stop for ITT: 0x%08x\n",
e3d6f909
AG
4208 cmd->se_lun->unpacked_lun,
4209 cmd->se_tfo->get_task_tag(cmd));
7d680f3b 4210 cmd->transport_state |= CMD_T_LUN_STOP;
a1d8b49a 4211 spin_unlock(&cmd->t_state_lock);
c66ac9db
NB
4212
4213 spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
4214
6708bb27
AG
4215 if (!cmd->se_lun) {
4216 pr_err("ITT: 0x%08x, [i,t]_state: %u/%u\n",
e3d6f909
AG
4217 cmd->se_tfo->get_task_tag(cmd),
4218 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
c66ac9db
NB
4219 BUG();
4220 }
4221 /*
4222 * If the Storage engine still owns the iscsi_cmd_t, determine
4223 * and/or stop its context.
4224 */
6708bb27 4225 pr_debug("SE_LUN[%d] - ITT: 0x%08x before transport"
e3d6f909
AG
4226 "_lun_wait_for_tasks()\n", cmd->se_lun->unpacked_lun,
4227 cmd->se_tfo->get_task_tag(cmd));
c66ac9db 4228
e3d6f909 4229 if (transport_lun_wait_for_tasks(cmd, cmd->se_lun) < 0) {
c66ac9db
NB
4230 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
4231 continue;
4232 }
4233
6708bb27 4234 pr_debug("SE_LUN[%d] - ITT: 0x%08x after transport_lun"
c66ac9db 4235 "_wait_for_tasks(): SUCCESS\n",
e3d6f909
AG
4236 cmd->se_lun->unpacked_lun,
4237 cmd->se_tfo->get_task_tag(cmd));
c66ac9db 4238
a1d8b49a 4239 spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
7d680f3b 4240 if (!(cmd->transport_state & CMD_T_DEV_ACTIVE)) {
a1d8b49a 4241 spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
c66ac9db
NB
4242 goto check_cond;
4243 }
7d680f3b 4244 cmd->transport_state &= ~CMD_T_DEV_ACTIVE;
c66ac9db 4245 transport_all_task_dev_remove_state(cmd);
a1d8b49a 4246 spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
c66ac9db
NB
4247
4248 transport_free_dev_tasks(cmd);
4249 /*
4250 * The Storage engine stopped this struct se_cmd before it was
4251 * send to the fabric frontend for delivery back to the
4252 * Initiator Node. Return this SCSI CDB back with an
4253 * CHECK_CONDITION status.
4254 */
4255check_cond:
4256 transport_send_check_condition_and_sense(cmd,
4257 TCM_NON_EXISTENT_LUN, 0);
4258 /*
4259 * If the fabric frontend is waiting for this iscsi_cmd_t to
4260 * be released, notify the waiting thread now that LU has
4261 * finished accessing it.
4262 */
a1d8b49a 4263 spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
7d680f3b 4264 if (cmd->transport_state & CMD_T_LUN_FE_STOP) {
6708bb27 4265 pr_debug("SE_LUN[%d] - Detected FE stop for"
c66ac9db
NB
4266 " struct se_cmd: %p ITT: 0x%08x\n",
4267 lun->unpacked_lun,
e3d6f909 4268 cmd, cmd->se_tfo->get_task_tag(cmd));
c66ac9db 4269
a1d8b49a 4270 spin_unlock_irqrestore(&cmd->t_state_lock,
c66ac9db
NB
4271 cmd_flags);
4272 transport_cmd_check_stop(cmd, 1, 0);
a1d8b49a 4273 complete(&cmd->transport_lun_fe_stop_comp);
c66ac9db
NB
4274 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
4275 continue;
4276 }
6708bb27 4277 pr_debug("SE_LUN[%d] - ITT: 0x%08x finished processing\n",
e3d6f909 4278 lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd));
c66ac9db 4279
a1d8b49a 4280 spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
c66ac9db
NB
4281 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
4282 }
4283 spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
4284}
4285
4286static int transport_clear_lun_thread(void *p)
4287{
8359cf43 4288 struct se_lun *lun = p;
c66ac9db
NB
4289
4290 __transport_clear_lun_from_sessions(lun);
4291 complete(&lun->lun_shutdown_comp);
4292
4293 return 0;
4294}
4295
4296int transport_clear_lun_from_sessions(struct se_lun *lun)
4297{
4298 struct task_struct *kt;
4299
5951146d 4300 kt = kthread_run(transport_clear_lun_thread, lun,
c66ac9db
NB
4301 "tcm_cl_%u", lun->unpacked_lun);
4302 if (IS_ERR(kt)) {
6708bb27 4303 pr_err("Unable to start clear_lun thread\n");
e3d6f909 4304 return PTR_ERR(kt);
c66ac9db
NB
4305 }
4306 wait_for_completion(&lun->lun_shutdown_comp);
4307
4308 return 0;
4309}
4310
d14921d6
NB
4311/**
4312 * transport_wait_for_tasks - wait for completion to occur
4313 * @cmd: command to wait
c66ac9db 4314 *
d14921d6
NB
4315 * Called from frontend fabric context to wait for storage engine
4316 * to pause and/or release frontend generated struct se_cmd.
c66ac9db 4317 */
a17f091d 4318bool transport_wait_for_tasks(struct se_cmd *cmd)
c66ac9db
NB
4319{
4320 unsigned long flags;
4321
a1d8b49a 4322 spin_lock_irqsave(&cmd->t_state_lock, flags);
c8e31f26
AG
4323 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) &&
4324 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
d14921d6 4325 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
a17f091d 4326 return false;
d14921d6
NB
4327 }
4328 /*
4329 * Only perform a possible wait_for_tasks if SCF_SUPPORTED_SAM_OPCODE
4330 * has been set in transport_set_supported_SAM_opcode().
4331 */
c8e31f26
AG
4332 if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) &&
4333 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
d14921d6 4334 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
a17f091d 4335 return false;
d14921d6 4336 }
c66ac9db
NB
4337 /*
4338 * If we are already stopped due to an external event (ie: LUN shutdown)
4339 * sleep until the connection can have the passed struct se_cmd back.
a1d8b49a 4340 * The cmd->transport_lun_stopped_sem will be upped by
c66ac9db
NB
4341 * transport_clear_lun_from_sessions() once the ConfigFS context caller
4342 * has completed its operation on the struct se_cmd.
4343 */
7d680f3b 4344 if (cmd->transport_state & CMD_T_LUN_STOP) {
6708bb27 4345 pr_debug("wait_for_tasks: Stopping"
e3d6f909 4346 " wait_for_completion(&cmd->t_tasktransport_lun_fe"
c66ac9db 4347 "_stop_comp); for ITT: 0x%08x\n",
e3d6f909 4348 cmd->se_tfo->get_task_tag(cmd));
c66ac9db
NB
4349 /*
4350 * There is a special case for WRITES where a FE exception +
4351 * LUN shutdown means ConfigFS context is still sleeping on
4352 * transport_lun_stop_comp in transport_lun_wait_for_tasks().
4353 * We go ahead and up transport_lun_stop_comp just to be sure
4354 * here.
4355 */
a1d8b49a
AG
4356 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4357 complete(&cmd->transport_lun_stop_comp);
4358 wait_for_completion(&cmd->transport_lun_fe_stop_comp);
4359 spin_lock_irqsave(&cmd->t_state_lock, flags);
c66ac9db
NB
4360
4361 transport_all_task_dev_remove_state(cmd);
4362 /*
4363 * At this point, the frontend who was the originator of this
4364 * struct se_cmd, now owns the structure and can be released through
4365 * normal means below.
4366 */
6708bb27 4367 pr_debug("wait_for_tasks: Stopped"
e3d6f909 4368 " wait_for_completion(&cmd->t_tasktransport_lun_fe_"
c66ac9db 4369 "stop_comp); for ITT: 0x%08x\n",
e3d6f909 4370 cmd->se_tfo->get_task_tag(cmd));
c66ac9db 4371
7d680f3b 4372 cmd->transport_state &= ~CMD_T_LUN_STOP;
c66ac9db 4373 }
7d680f3b
CH
4374
4375 if (!(cmd->transport_state & CMD_T_ACTIVE) ||
4376 (cmd->transport_state & CMD_T_ABORTED)) {
d14921d6 4377 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
a17f091d 4378 return false;
d14921d6 4379 }
c66ac9db 4380
7d680f3b 4381 cmd->transport_state |= CMD_T_STOP;
c66ac9db 4382
6708bb27 4383 pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08x"
7d680f3b 4384 " i_state: %d, t_state: %d, CMD_T_STOP\n",
f2da9dbd
CH
4385 cmd, cmd->se_tfo->get_task_tag(cmd),
4386 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
c66ac9db 4387
a1d8b49a 4388 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db 4389
5951146d 4390 wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
c66ac9db 4391
a1d8b49a 4392 wait_for_completion(&cmd->t_transport_stop_comp);
c66ac9db 4393
a1d8b49a 4394 spin_lock_irqsave(&cmd->t_state_lock, flags);
7d680f3b 4395 cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP);
c66ac9db 4396
6708bb27 4397 pr_debug("wait_for_tasks: Stopped wait_for_compltion("
a1d8b49a 4398 "&cmd->t_transport_stop_comp) for ITT: 0x%08x\n",
e3d6f909 4399 cmd->se_tfo->get_task_tag(cmd));
c66ac9db 4400
d14921d6 4401 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
a17f091d
NB
4402
4403 return true;
c66ac9db 4404}
d14921d6 4405EXPORT_SYMBOL(transport_wait_for_tasks);
c66ac9db
NB
4406
4407static int transport_get_sense_codes(
4408 struct se_cmd *cmd,
4409 u8 *asc,
4410 u8 *ascq)
4411{
4412 *asc = cmd->scsi_asc;
4413 *ascq = cmd->scsi_ascq;
4414
4415 return 0;
4416}
4417
4418static int transport_set_sense_codes(
4419 struct se_cmd *cmd,
4420 u8 asc,
4421 u8 ascq)
4422{
4423 cmd->scsi_asc = asc;
4424 cmd->scsi_ascq = ascq;
4425
4426 return 0;
4427}
4428
4429int transport_send_check_condition_and_sense(
4430 struct se_cmd *cmd,
4431 u8 reason,
4432 int from_transport)
4433{
4434 unsigned char *buffer = cmd->sense_buffer;
4435 unsigned long flags;
4436 int offset;
4437 u8 asc = 0, ascq = 0;
4438
a1d8b49a 4439 spin_lock_irqsave(&cmd->t_state_lock, flags);
c66ac9db 4440 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
a1d8b49a 4441 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
4442 return 0;
4443 }
4444 cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION;
a1d8b49a 4445 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
4446
4447 if (!reason && from_transport)
4448 goto after_reason;
4449
4450 if (!from_transport)
4451 cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
4452 /*
4453 * Data Segment and SenseLength of the fabric response PDU.
4454 *
4455 * TRANSPORT_SENSE_BUFFER is now set to SCSI_SENSE_BUFFERSIZE
4456 * from include/scsi/scsi_cmnd.h
4457 */
e3d6f909 4458 offset = cmd->se_tfo->set_fabric_sense_len(cmd,
c66ac9db
NB
4459 TRANSPORT_SENSE_BUFFER);
4460 /*
4461 * Actual SENSE DATA, see SPC-3 7.23.2 SPC_SENSE_KEY_OFFSET uses
4462 * SENSE KEY values from include/scsi/scsi.h
4463 */
4464 switch (reason) {
4465 case TCM_NON_EXISTENT_LUN:
eb39d340
NB
4466 /* CURRENT ERROR */
4467 buffer[offset] = 0x70;
895f3022 4468 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
eb39d340
NB
4469 /* ILLEGAL REQUEST */
4470 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4471 /* LOGICAL UNIT NOT SUPPORTED */
4472 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x25;
4473 break;
c66ac9db
NB
4474 case TCM_UNSUPPORTED_SCSI_OPCODE:
4475 case TCM_SECTOR_COUNT_TOO_MANY:
4476 /* CURRENT ERROR */
4477 buffer[offset] = 0x70;
895f3022 4478 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
c66ac9db
NB
4479 /* ILLEGAL REQUEST */
4480 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4481 /* INVALID COMMAND OPERATION CODE */
4482 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x20;
4483 break;
4484 case TCM_UNKNOWN_MODE_PAGE:
4485 /* CURRENT ERROR */
4486 buffer[offset] = 0x70;
895f3022 4487 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
c66ac9db
NB
4488 /* ILLEGAL REQUEST */
4489 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4490 /* INVALID FIELD IN CDB */
4491 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
4492 break;
4493 case TCM_CHECK_CONDITION_ABORT_CMD:
4494 /* CURRENT ERROR */
4495 buffer[offset] = 0x70;
895f3022 4496 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
c66ac9db
NB
4497 /* ABORTED COMMAND */
4498 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4499 /* BUS DEVICE RESET FUNCTION OCCURRED */
4500 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x29;
4501 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x03;
4502 break;
4503 case TCM_INCORRECT_AMOUNT_OF_DATA:
4504 /* CURRENT ERROR */
4505 buffer[offset] = 0x70;
895f3022 4506 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
c66ac9db
NB
4507 /* ABORTED COMMAND */
4508 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4509 /* WRITE ERROR */
4510 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c;
4511 /* NOT ENOUGH UNSOLICITED DATA */
4512 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0d;
4513 break;
4514 case TCM_INVALID_CDB_FIELD:
4515 /* CURRENT ERROR */
4516 buffer[offset] = 0x70;
895f3022 4517 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
9fbc8909
RD
4518 /* ILLEGAL REQUEST */
4519 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
c66ac9db
NB
4520 /* INVALID FIELD IN CDB */
4521 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
4522 break;
4523 case TCM_INVALID_PARAMETER_LIST:
4524 /* CURRENT ERROR */
4525 buffer[offset] = 0x70;
895f3022 4526 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
9fbc8909
RD
4527 /* ILLEGAL REQUEST */
4528 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
c66ac9db
NB
4529 /* INVALID FIELD IN PARAMETER LIST */
4530 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x26;
4531 break;
4532 case TCM_UNEXPECTED_UNSOLICITED_DATA:
4533 /* CURRENT ERROR */
4534 buffer[offset] = 0x70;
895f3022 4535 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
c66ac9db
NB
4536 /* ABORTED COMMAND */
4537 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4538 /* WRITE ERROR */
4539 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c;
4540 /* UNEXPECTED_UNSOLICITED_DATA */
4541 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0c;
4542 break;
4543 case TCM_SERVICE_CRC_ERROR:
4544 /* CURRENT ERROR */
4545 buffer[offset] = 0x70;
895f3022 4546 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
c66ac9db
NB
4547 /* ABORTED COMMAND */
4548 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4549 /* PROTOCOL SERVICE CRC ERROR */
4550 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x47;
4551 /* N/A */
4552 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x05;
4553 break;
4554 case TCM_SNACK_REJECTED:
4555 /* CURRENT ERROR */
4556 buffer[offset] = 0x70;
895f3022 4557 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
c66ac9db
NB
4558 /* ABORTED COMMAND */
4559 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4560 /* READ ERROR */
4561 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x11;
4562 /* FAILED RETRANSMISSION REQUEST */
4563 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x13;
4564 break;
4565 case TCM_WRITE_PROTECTED:
4566 /* CURRENT ERROR */
4567 buffer[offset] = 0x70;
895f3022 4568 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
c66ac9db
NB
4569 /* DATA PROTECT */
4570 buffer[offset+SPC_SENSE_KEY_OFFSET] = DATA_PROTECT;
4571 /* WRITE PROTECTED */
4572 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x27;
4573 break;
4574 case TCM_CHECK_CONDITION_UNIT_ATTENTION:
4575 /* CURRENT ERROR */
4576 buffer[offset] = 0x70;
895f3022 4577 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
c66ac9db
NB
4578 /* UNIT ATTENTION */
4579 buffer[offset+SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
4580 core_scsi3_ua_for_check_condition(cmd, &asc, &ascq);
4581 buffer[offset+SPC_ASC_KEY_OFFSET] = asc;
4582 buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq;
4583 break;
4584 case TCM_CHECK_CONDITION_NOT_READY:
4585 /* CURRENT ERROR */
4586 buffer[offset] = 0x70;
895f3022 4587 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
c66ac9db
NB
4588 /* Not Ready */
4589 buffer[offset+SPC_SENSE_KEY_OFFSET] = NOT_READY;
4590 transport_get_sense_codes(cmd, &asc, &ascq);
4591 buffer[offset+SPC_ASC_KEY_OFFSET] = asc;
4592 buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq;
4593 break;
4594 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
4595 default:
4596 /* CURRENT ERROR */
4597 buffer[offset] = 0x70;
895f3022 4598 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
c66ac9db
NB
4599 /* ILLEGAL REQUEST */
4600 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4601 /* LOGICAL UNIT COMMUNICATION FAILURE */
4602 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x80;
4603 break;
4604 }
4605 /*
4606 * This code uses linux/include/scsi/scsi.h SAM status codes!
4607 */
4608 cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
4609 /*
4610 * Automatically padded, this value is encoded in the fabric's
4611 * data_length response PDU containing the SCSI defined sense data.
4612 */
4613 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER + offset;
4614
4615after_reason:
07bde79a 4616 return cmd->se_tfo->queue_status(cmd);
c66ac9db
NB
4617}
4618EXPORT_SYMBOL(transport_send_check_condition_and_sense);
4619
4620int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
4621{
4622 int ret = 0;
4623
7d680f3b 4624 if (cmd->transport_state & CMD_T_ABORTED) {
6708bb27 4625 if (!send_status ||
c66ac9db
NB
4626 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
4627 return 1;
4628#if 0
6708bb27 4629 pr_debug("Sending delayed SAM_STAT_TASK_ABORTED"
c66ac9db 4630 " status for CDB: 0x%02x ITT: 0x%08x\n",
a1d8b49a 4631 cmd->t_task_cdb[0],
e3d6f909 4632 cmd->se_tfo->get_task_tag(cmd));
c66ac9db
NB
4633#endif
4634 cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS;
e3d6f909 4635 cmd->se_tfo->queue_status(cmd);
c66ac9db
NB
4636 ret = 1;
4637 }
4638 return ret;
4639}
4640EXPORT_SYMBOL(transport_check_aborted_status);
4641
4642void transport_send_task_abort(struct se_cmd *cmd)
4643{
c252f003
NB
4644 unsigned long flags;
4645
4646 spin_lock_irqsave(&cmd->t_state_lock, flags);
4647 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
4648 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4649 return;
4650 }
4651 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4652
c66ac9db
NB
4653 /*
4654 * If there are still expected incoming fabric WRITEs, we wait
4655 * until until they have completed before sending a TASK_ABORTED
4656 * response. This response with TASK_ABORTED status will be
4657 * queued back to fabric module by transport_check_aborted_status().
4658 */
4659 if (cmd->data_direction == DMA_TO_DEVICE) {
e3d6f909 4660 if (cmd->se_tfo->write_pending_status(cmd) != 0) {
7d680f3b 4661 cmd->transport_state |= CMD_T_ABORTED;
c66ac9db 4662 smp_mb__after_atomic_inc();
c66ac9db
NB
4663 }
4664 }
4665 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
4666#if 0
6708bb27 4667 pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x,"
a1d8b49a 4668 " ITT: 0x%08x\n", cmd->t_task_cdb[0],
e3d6f909 4669 cmd->se_tfo->get_task_tag(cmd));
c66ac9db 4670#endif
e3d6f909 4671 cmd->se_tfo->queue_status(cmd);
c66ac9db
NB
4672}
4673
e26d99ae 4674static int transport_generic_do_tmr(struct se_cmd *cmd)
c66ac9db 4675{
5951146d 4676 struct se_device *dev = cmd->se_dev;
c66ac9db
NB
4677 struct se_tmr_req *tmr = cmd->se_tmr_req;
4678 int ret;
4679
4680 switch (tmr->function) {
5c6cd613 4681 case TMR_ABORT_TASK:
c66ac9db
NB
4682 tmr->response = TMR_FUNCTION_REJECTED;
4683 break;
5c6cd613
NB
4684 case TMR_ABORT_TASK_SET:
4685 case TMR_CLEAR_ACA:
4686 case TMR_CLEAR_TASK_SET:
c66ac9db
NB
4687 tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
4688 break;
5c6cd613 4689 case TMR_LUN_RESET:
c66ac9db
NB
4690 ret = core_tmr_lun_reset(dev, tmr, NULL, NULL);
4691 tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE :
4692 TMR_FUNCTION_REJECTED;
4693 break;
5c6cd613 4694 case TMR_TARGET_WARM_RESET:
c66ac9db
NB
4695 tmr->response = TMR_FUNCTION_REJECTED;
4696 break;
5c6cd613 4697 case TMR_TARGET_COLD_RESET:
c66ac9db
NB
4698 tmr->response = TMR_FUNCTION_REJECTED;
4699 break;
c66ac9db 4700 default:
6708bb27 4701 pr_err("Uknown TMR function: 0x%02x.\n",
c66ac9db
NB
4702 tmr->function);
4703 tmr->response = TMR_FUNCTION_REJECTED;
4704 break;
4705 }
4706
4707 cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
e3d6f909 4708 cmd->se_tfo->queue_tm_rsp(cmd);
c66ac9db 4709
b7b8bef7 4710 transport_cmd_check_stop_to_fabric(cmd);
c66ac9db
NB
4711 return 0;
4712}
4713
c66ac9db
NB
4714/* transport_processing_thread():
4715 *
4716 *
4717 */
4718static int transport_processing_thread(void *param)
4719{
5951146d 4720 int ret;
c66ac9db 4721 struct se_cmd *cmd;
8359cf43 4722 struct se_device *dev = param;
c66ac9db 4723
c66ac9db 4724 while (!kthread_should_stop()) {
e3d6f909
AG
4725 ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq,
4726 atomic_read(&dev->dev_queue_obj.queue_cnt) ||
c66ac9db
NB
4727 kthread_should_stop());
4728 if (ret < 0)
4729 goto out;
4730
c66ac9db 4731get_cmd:
5951146d
AG
4732 cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj);
4733 if (!cmd)
c66ac9db
NB
4734 continue;
4735
5951146d 4736 switch (cmd->t_state) {
680b73c5
CH
4737 case TRANSPORT_NEW_CMD:
4738 BUG();
4739 break;
c66ac9db 4740 case TRANSPORT_NEW_CMD_MAP:
6708bb27
AG
4741 if (!cmd->se_tfo->new_cmd_map) {
4742 pr_err("cmd->se_tfo->new_cmd_map is"
c66ac9db
NB
4743 " NULL for TRANSPORT_NEW_CMD_MAP\n");
4744 BUG();
4745 }
e3d6f909 4746 ret = cmd->se_tfo->new_cmd_map(cmd);
c66ac9db 4747 if (ret < 0) {
03e98c9e 4748 transport_generic_request_failure(cmd);
c66ac9db
NB
4749 break;
4750 }
c66ac9db 4751 ret = transport_generic_new_cmd(cmd);
f147abb4 4752 if (ret < 0) {
03e98c9e
NB
4753 transport_generic_request_failure(cmd);
4754 break;
c66ac9db
NB
4755 }
4756 break;
4757 case TRANSPORT_PROCESS_WRITE:
4758 transport_generic_process_write(cmd);
4759 break;
c66ac9db
NB
4760 case TRANSPORT_PROCESS_TMR:
4761 transport_generic_do_tmr(cmd);
4762 break;
07bde79a 4763 case TRANSPORT_COMPLETE_QF_WP:
e057f533
CH
4764 transport_write_pending_qf(cmd);
4765 break;
4766 case TRANSPORT_COMPLETE_QF_OK:
4767 transport_complete_qf(cmd);
07bde79a 4768 break;
c66ac9db 4769 default:
f2da9dbd
CH
4770 pr_err("Unknown t_state: %d for ITT: 0x%08x "
4771 "i_state: %d on SE LUN: %u\n",
4772 cmd->t_state,
e3d6f909
AG
4773 cmd->se_tfo->get_task_tag(cmd),
4774 cmd->se_tfo->get_cmd_state(cmd),
4775 cmd->se_lun->unpacked_lun);
c66ac9db
NB
4776 BUG();
4777 }
4778
4779 goto get_cmd;
4780 }
4781
4782out:
ce8762f6
NB
4783 WARN_ON(!list_empty(&dev->state_task_list));
4784 WARN_ON(!list_empty(&dev->dev_queue_obj.qobj_list));
c66ac9db
NB
4785 dev->process_thread = NULL;
4786 return 0;
4787}
This page took 0.356446 seconds and 5 git commands to generate.