Commit | Line | Data |
---|---|---|
c66ac9db NB |
1 | /******************************************************************************* |
2 | * Filename: target_core_transport.c | |
3 | * | |
4 | * This file contains the Generic Target Engine Core. | |
5 | * | |
6 | * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc. | |
7 | * Copyright (c) 2005, 2006, 2007 SBE, Inc. | |
8 | * Copyright (c) 2007-2010 Rising Tide Systems | |
9 | * Copyright (c) 2008-2010 Linux-iSCSI.org | |
10 | * | |
11 | * Nicholas A. Bellinger <nab@kernel.org> | |
12 | * | |
13 | * This program is free software; you can redistribute it and/or modify | |
14 | * it under the terms of the GNU General Public License as published by | |
15 | * the Free Software Foundation; either version 2 of the License, or | |
16 | * (at your option) any later version. | |
17 | * | |
18 | * This program is distributed in the hope that it will be useful, | |
19 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
20 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
21 | * GNU General Public License for more details. | |
22 | * | |
23 | * You should have received a copy of the GNU General Public License | |
24 | * along with this program; if not, write to the Free Software | |
25 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
26 | * | |
27 | ******************************************************************************/ | |
28 | ||
29 | #include <linux/version.h> | |
30 | #include <linux/net.h> | |
31 | #include <linux/delay.h> | |
32 | #include <linux/string.h> | |
33 | #include <linux/timer.h> | |
34 | #include <linux/slab.h> | |
35 | #include <linux/blkdev.h> | |
36 | #include <linux/spinlock.h> | |
c66ac9db NB |
37 | #include <linux/kthread.h> |
38 | #include <linux/in.h> | |
39 | #include <linux/cdrom.h> | |
40 | #include <asm/unaligned.h> | |
41 | #include <net/sock.h> | |
42 | #include <net/tcp.h> | |
43 | #include <scsi/scsi.h> | |
44 | #include <scsi/scsi_cmnd.h> | |
45 | #include <scsi/libsas.h> /* For TASK_ATTR_* */ | |
46 | ||
47 | #include <target/target_core_base.h> | |
48 | #include <target/target_core_device.h> | |
49 | #include <target/target_core_tmr.h> | |
50 | #include <target/target_core_tpg.h> | |
51 | #include <target/target_core_transport.h> | |
52 | #include <target/target_core_fabric_ops.h> | |
53 | #include <target/target_core_configfs.h> | |
54 | ||
55 | #include "target_core_alua.h" | |
56 | #include "target_core_hba.h" | |
57 | #include "target_core_pr.h" | |
58 | #include "target_core_scdb.h" | |
59 | #include "target_core_ua.h" | |
60 | ||
61 | /* #define DEBUG_CDB_HANDLER */ | |
62 | #ifdef DEBUG_CDB_HANDLER | |
63 | #define DEBUG_CDB_H(x...) printk(KERN_INFO x) | |
64 | #else | |
65 | #define DEBUG_CDB_H(x...) | |
66 | #endif | |
67 | ||
68 | /* #define DEBUG_CMD_MAP */ | |
69 | #ifdef DEBUG_CMD_MAP | |
70 | #define DEBUG_CMD_M(x...) printk(KERN_INFO x) | |
71 | #else | |
72 | #define DEBUG_CMD_M(x...) | |
73 | #endif | |
74 | ||
75 | /* #define DEBUG_MEM_ALLOC */ | |
76 | #ifdef DEBUG_MEM_ALLOC | |
77 | #define DEBUG_MEM(x...) printk(KERN_INFO x) | |
78 | #else | |
79 | #define DEBUG_MEM(x...) | |
80 | #endif | |
81 | ||
82 | /* #define DEBUG_MEM2_ALLOC */ | |
83 | #ifdef DEBUG_MEM2_ALLOC | |
84 | #define DEBUG_MEM2(x...) printk(KERN_INFO x) | |
85 | #else | |
86 | #define DEBUG_MEM2(x...) | |
87 | #endif | |
88 | ||
89 | /* #define DEBUG_SG_CALC */ | |
90 | #ifdef DEBUG_SG_CALC | |
91 | #define DEBUG_SC(x...) printk(KERN_INFO x) | |
92 | #else | |
93 | #define DEBUG_SC(x...) | |
94 | #endif | |
95 | ||
96 | /* #define DEBUG_SE_OBJ */ | |
97 | #ifdef DEBUG_SE_OBJ | |
98 | #define DEBUG_SO(x...) printk(KERN_INFO x) | |
99 | #else | |
100 | #define DEBUG_SO(x...) | |
101 | #endif | |
102 | ||
103 | /* #define DEBUG_CMD_VOL */ | |
104 | #ifdef DEBUG_CMD_VOL | |
105 | #define DEBUG_VOL(x...) printk(KERN_INFO x) | |
106 | #else | |
107 | #define DEBUG_VOL(x...) | |
108 | #endif | |
109 | ||
110 | /* #define DEBUG_CMD_STOP */ | |
111 | #ifdef DEBUG_CMD_STOP | |
112 | #define DEBUG_CS(x...) printk(KERN_INFO x) | |
113 | #else | |
114 | #define DEBUG_CS(x...) | |
115 | #endif | |
116 | ||
117 | /* #define DEBUG_PASSTHROUGH */ | |
118 | #ifdef DEBUG_PASSTHROUGH | |
119 | #define DEBUG_PT(x...) printk(KERN_INFO x) | |
120 | #else | |
121 | #define DEBUG_PT(x...) | |
122 | #endif | |
123 | ||
124 | /* #define DEBUG_TASK_STOP */ | |
125 | #ifdef DEBUG_TASK_STOP | |
126 | #define DEBUG_TS(x...) printk(KERN_INFO x) | |
127 | #else | |
128 | #define DEBUG_TS(x...) | |
129 | #endif | |
130 | ||
131 | /* #define DEBUG_TRANSPORT_STOP */ | |
132 | #ifdef DEBUG_TRANSPORT_STOP | |
133 | #define DEBUG_TRANSPORT_S(x...) printk(KERN_INFO x) | |
134 | #else | |
135 | #define DEBUG_TRANSPORT_S(x...) | |
136 | #endif | |
137 | ||
138 | /* #define DEBUG_TASK_FAILURE */ | |
139 | #ifdef DEBUG_TASK_FAILURE | |
140 | #define DEBUG_TF(x...) printk(KERN_INFO x) | |
141 | #else | |
142 | #define DEBUG_TF(x...) | |
143 | #endif | |
144 | ||
145 | /* #define DEBUG_DEV_OFFLINE */ | |
146 | #ifdef DEBUG_DEV_OFFLINE | |
147 | #define DEBUG_DO(x...) printk(KERN_INFO x) | |
148 | #else | |
149 | #define DEBUG_DO(x...) | |
150 | #endif | |
151 | ||
152 | /* #define DEBUG_TASK_STATE */ | |
153 | #ifdef DEBUG_TASK_STATE | |
154 | #define DEBUG_TSTATE(x...) printk(KERN_INFO x) | |
155 | #else | |
156 | #define DEBUG_TSTATE(x...) | |
157 | #endif | |
158 | ||
159 | /* #define DEBUG_STATUS_THR */ | |
160 | #ifdef DEBUG_STATUS_THR | |
161 | #define DEBUG_ST(x...) printk(KERN_INFO x) | |
162 | #else | |
163 | #define DEBUG_ST(x...) | |
164 | #endif | |
165 | ||
166 | /* #define DEBUG_TASK_TIMEOUT */ | |
167 | #ifdef DEBUG_TASK_TIMEOUT | |
168 | #define DEBUG_TT(x...) printk(KERN_INFO x) | |
169 | #else | |
170 | #define DEBUG_TT(x...) | |
171 | #endif | |
172 | ||
173 | /* #define DEBUG_GENERIC_REQUEST_FAILURE */ | |
174 | #ifdef DEBUG_GENERIC_REQUEST_FAILURE | |
175 | #define DEBUG_GRF(x...) printk(KERN_INFO x) | |
176 | #else | |
177 | #define DEBUG_GRF(x...) | |
178 | #endif | |
179 | ||
180 | /* #define DEBUG_SAM_TASK_ATTRS */ | |
181 | #ifdef DEBUG_SAM_TASK_ATTRS | |
182 | #define DEBUG_STA(x...) printk(KERN_INFO x) | |
183 | #else | |
184 | #define DEBUG_STA(x...) | |
185 | #endif | |
186 | ||
187 | struct se_global *se_global; | |
188 | ||
189 | static struct kmem_cache *se_cmd_cache; | |
190 | static struct kmem_cache *se_sess_cache; | |
191 | struct kmem_cache *se_tmr_req_cache; | |
192 | struct kmem_cache *se_ua_cache; | |
193 | struct kmem_cache *se_mem_cache; | |
194 | struct kmem_cache *t10_pr_reg_cache; | |
195 | struct kmem_cache *t10_alua_lu_gp_cache; | |
196 | struct kmem_cache *t10_alua_lu_gp_mem_cache; | |
197 | struct kmem_cache *t10_alua_tg_pt_gp_cache; | |
198 | struct kmem_cache *t10_alua_tg_pt_gp_mem_cache; | |
199 | ||
200 | /* Used for transport_dev_get_map_*() */ | |
201 | typedef int (*map_func_t)(struct se_task *, u32); | |
202 | ||
203 | static int transport_generic_write_pending(struct se_cmd *); | |
204 | static int transport_processing_thread(void *); | |
205 | static int __transport_execute_tasks(struct se_device *dev); | |
206 | static void transport_complete_task_attr(struct se_cmd *cmd); | |
207 | static void transport_direct_request_timeout(struct se_cmd *cmd); | |
208 | static void transport_free_dev_tasks(struct se_cmd *cmd); | |
209 | static u32 transport_generic_get_cdb_count(struct se_cmd *cmd, | |
210 | unsigned long long starting_lba, u32 sectors, | |
211 | enum dma_data_direction data_direction, | |
212 | struct list_head *mem_list, int set_counts); | |
213 | static int transport_generic_get_mem(struct se_cmd *cmd, u32 length, | |
214 | u32 dma_size); | |
215 | static int transport_generic_remove(struct se_cmd *cmd, | |
216 | int release_to_pool, int session_reinstatement); | |
217 | static int transport_get_sectors(struct se_cmd *cmd); | |
218 | static struct list_head *transport_init_se_mem_list(void); | |
219 | static int transport_map_sg_to_mem(struct se_cmd *cmd, | |
220 | struct list_head *se_mem_list, void *in_mem, | |
221 | u32 *se_mem_cnt); | |
222 | static void transport_memcpy_se_mem_read_contig(struct se_cmd *cmd, | |
223 | unsigned char *dst, struct list_head *se_mem_list); | |
224 | static void transport_release_fe_cmd(struct se_cmd *cmd); | |
225 | static void transport_remove_cmd_from_queue(struct se_cmd *cmd, | |
226 | struct se_queue_obj *qobj); | |
227 | static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq); | |
228 | static void transport_stop_all_task_timers(struct se_cmd *cmd); | |
229 | ||
230 | int transport_emulate_control_cdb(struct se_task *task); | |
231 | ||
232 | int init_se_global(void) | |
233 | { | |
234 | struct se_global *global; | |
235 | ||
236 | global = kzalloc(sizeof(struct se_global), GFP_KERNEL); | |
237 | if (!(global)) { | |
238 | printk(KERN_ERR "Unable to allocate memory for struct se_global\n"); | |
239 | return -1; | |
240 | } | |
241 | ||
242 | INIT_LIST_HEAD(&global->g_lu_gps_list); | |
243 | INIT_LIST_HEAD(&global->g_se_tpg_list); | |
244 | INIT_LIST_HEAD(&global->g_hba_list); | |
245 | INIT_LIST_HEAD(&global->g_se_dev_list); | |
246 | spin_lock_init(&global->g_device_lock); | |
247 | spin_lock_init(&global->hba_lock); | |
248 | spin_lock_init(&global->se_tpg_lock); | |
249 | spin_lock_init(&global->lu_gps_lock); | |
250 | spin_lock_init(&global->plugin_class_lock); | |
251 | ||
252 | se_cmd_cache = kmem_cache_create("se_cmd_cache", | |
253 | sizeof(struct se_cmd), __alignof__(struct se_cmd), 0, NULL); | |
254 | if (!(se_cmd_cache)) { | |
255 | printk(KERN_ERR "kmem_cache_create for struct se_cmd failed\n"); | |
256 | goto out; | |
257 | } | |
258 | se_tmr_req_cache = kmem_cache_create("se_tmr_cache", | |
259 | sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req), | |
260 | 0, NULL); | |
261 | if (!(se_tmr_req_cache)) { | |
262 | printk(KERN_ERR "kmem_cache_create() for struct se_tmr_req" | |
263 | " failed\n"); | |
264 | goto out; | |
265 | } | |
266 | se_sess_cache = kmem_cache_create("se_sess_cache", | |
267 | sizeof(struct se_session), __alignof__(struct se_session), | |
268 | 0, NULL); | |
269 | if (!(se_sess_cache)) { | |
270 | printk(KERN_ERR "kmem_cache_create() for struct se_session" | |
271 | " failed\n"); | |
272 | goto out; | |
273 | } | |
274 | se_ua_cache = kmem_cache_create("se_ua_cache", | |
275 | sizeof(struct se_ua), __alignof__(struct se_ua), | |
276 | 0, NULL); | |
277 | if (!(se_ua_cache)) { | |
278 | printk(KERN_ERR "kmem_cache_create() for struct se_ua failed\n"); | |
279 | goto out; | |
280 | } | |
281 | se_mem_cache = kmem_cache_create("se_mem_cache", | |
282 | sizeof(struct se_mem), __alignof__(struct se_mem), 0, NULL); | |
283 | if (!(se_mem_cache)) { | |
284 | printk(KERN_ERR "kmem_cache_create() for struct se_mem failed\n"); | |
285 | goto out; | |
286 | } | |
287 | t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache", | |
288 | sizeof(struct t10_pr_registration), | |
289 | __alignof__(struct t10_pr_registration), 0, NULL); | |
290 | if (!(t10_pr_reg_cache)) { | |
291 | printk(KERN_ERR "kmem_cache_create() for struct t10_pr_registration" | |
292 | " failed\n"); | |
293 | goto out; | |
294 | } | |
295 | t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache", | |
296 | sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp), | |
297 | 0, NULL); | |
298 | if (!(t10_alua_lu_gp_cache)) { | |
299 | printk(KERN_ERR "kmem_cache_create() for t10_alua_lu_gp_cache" | |
300 | " failed\n"); | |
301 | goto out; | |
302 | } | |
303 | t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache", | |
304 | sizeof(struct t10_alua_lu_gp_member), | |
305 | __alignof__(struct t10_alua_lu_gp_member), 0, NULL); | |
306 | if (!(t10_alua_lu_gp_mem_cache)) { | |
307 | printk(KERN_ERR "kmem_cache_create() for t10_alua_lu_gp_mem_" | |
308 | "cache failed\n"); | |
309 | goto out; | |
310 | } | |
311 | t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache", | |
312 | sizeof(struct t10_alua_tg_pt_gp), | |
313 | __alignof__(struct t10_alua_tg_pt_gp), 0, NULL); | |
314 | if (!(t10_alua_tg_pt_gp_cache)) { | |
315 | printk(KERN_ERR "kmem_cache_create() for t10_alua_tg_pt_gp_" | |
316 | "cache failed\n"); | |
317 | goto out; | |
318 | } | |
319 | t10_alua_tg_pt_gp_mem_cache = kmem_cache_create( | |
320 | "t10_alua_tg_pt_gp_mem_cache", | |
321 | sizeof(struct t10_alua_tg_pt_gp_member), | |
322 | __alignof__(struct t10_alua_tg_pt_gp_member), | |
323 | 0, NULL); | |
324 | if (!(t10_alua_tg_pt_gp_mem_cache)) { | |
325 | printk(KERN_ERR "kmem_cache_create() for t10_alua_tg_pt_gp_" | |
326 | "mem_t failed\n"); | |
327 | goto out; | |
328 | } | |
329 | ||
330 | se_global = global; | |
331 | ||
332 | return 0; | |
333 | out: | |
334 | if (se_cmd_cache) | |
335 | kmem_cache_destroy(se_cmd_cache); | |
336 | if (se_tmr_req_cache) | |
337 | kmem_cache_destroy(se_tmr_req_cache); | |
338 | if (se_sess_cache) | |
339 | kmem_cache_destroy(se_sess_cache); | |
340 | if (se_ua_cache) | |
341 | kmem_cache_destroy(se_ua_cache); | |
342 | if (se_mem_cache) | |
343 | kmem_cache_destroy(se_mem_cache); | |
344 | if (t10_pr_reg_cache) | |
345 | kmem_cache_destroy(t10_pr_reg_cache); | |
346 | if (t10_alua_lu_gp_cache) | |
347 | kmem_cache_destroy(t10_alua_lu_gp_cache); | |
348 | if (t10_alua_lu_gp_mem_cache) | |
349 | kmem_cache_destroy(t10_alua_lu_gp_mem_cache); | |
350 | if (t10_alua_tg_pt_gp_cache) | |
351 | kmem_cache_destroy(t10_alua_tg_pt_gp_cache); | |
352 | if (t10_alua_tg_pt_gp_mem_cache) | |
353 | kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); | |
354 | kfree(global); | |
355 | return -1; | |
356 | } | |
357 | ||
358 | void release_se_global(void) | |
359 | { | |
360 | struct se_global *global; | |
361 | ||
362 | global = se_global; | |
363 | if (!(global)) | |
364 | return; | |
365 | ||
366 | kmem_cache_destroy(se_cmd_cache); | |
367 | kmem_cache_destroy(se_tmr_req_cache); | |
368 | kmem_cache_destroy(se_sess_cache); | |
369 | kmem_cache_destroy(se_ua_cache); | |
370 | kmem_cache_destroy(se_mem_cache); | |
371 | kmem_cache_destroy(t10_pr_reg_cache); | |
372 | kmem_cache_destroy(t10_alua_lu_gp_cache); | |
373 | kmem_cache_destroy(t10_alua_lu_gp_mem_cache); | |
374 | kmem_cache_destroy(t10_alua_tg_pt_gp_cache); | |
375 | kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); | |
376 | kfree(global); | |
377 | ||
378 | se_global = NULL; | |
379 | } | |
380 | ||
e89d15ee NB |
381 | /* SCSI statistics table index */ |
382 | static struct scsi_index_table scsi_index_table; | |
383 | ||
384 | /* | |
385 | * Initialize the index table for allocating unique row indexes to various mib | |
386 | * tables. | |
387 | */ | |
388 | void init_scsi_index_table(void) | |
389 | { | |
390 | memset(&scsi_index_table, 0, sizeof(struct scsi_index_table)); | |
391 | spin_lock_init(&scsi_index_table.lock); | |
392 | } | |
393 | ||
394 | /* | |
395 | * Allocate a new row index for the entry type specified | |
396 | */ | |
397 | u32 scsi_get_new_index(scsi_index_t type) | |
398 | { | |
399 | u32 new_index; | |
400 | ||
401 | if ((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)) { | |
402 | printk(KERN_ERR "Invalid index type %d\n", type); | |
403 | return -EINVAL; | |
404 | } | |
405 | ||
406 | spin_lock(&scsi_index_table.lock); | |
407 | new_index = ++scsi_index_table.scsi_mib_index[type]; | |
408 | if (new_index == 0) | |
409 | new_index = ++scsi_index_table.scsi_mib_index[type]; | |
410 | spin_unlock(&scsi_index_table.lock); | |
411 | ||
412 | return new_index; | |
413 | } | |
414 | ||
c66ac9db NB |
415 | void transport_init_queue_obj(struct se_queue_obj *qobj) |
416 | { | |
417 | atomic_set(&qobj->queue_cnt, 0); | |
418 | INIT_LIST_HEAD(&qobj->qobj_list); | |
419 | init_waitqueue_head(&qobj->thread_wq); | |
420 | spin_lock_init(&qobj->cmd_queue_lock); | |
421 | } | |
422 | EXPORT_SYMBOL(transport_init_queue_obj); | |
423 | ||
424 | static int transport_subsystem_reqmods(void) | |
425 | { | |
426 | int ret; | |
427 | ||
428 | ret = request_module("target_core_iblock"); | |
429 | if (ret != 0) | |
430 | printk(KERN_ERR "Unable to load target_core_iblock\n"); | |
431 | ||
432 | ret = request_module("target_core_file"); | |
433 | if (ret != 0) | |
434 | printk(KERN_ERR "Unable to load target_core_file\n"); | |
435 | ||
436 | ret = request_module("target_core_pscsi"); | |
437 | if (ret != 0) | |
438 | printk(KERN_ERR "Unable to load target_core_pscsi\n"); | |
439 | ||
440 | ret = request_module("target_core_stgt"); | |
441 | if (ret != 0) | |
442 | printk(KERN_ERR "Unable to load target_core_stgt\n"); | |
443 | ||
444 | return 0; | |
445 | } | |
446 | ||
447 | int transport_subsystem_check_init(void) | |
448 | { | |
449 | if (se_global->g_sub_api_initialized) | |
450 | return 0; | |
451 | /* | |
452 | * Request the loading of known TCM subsystem plugins.. | |
453 | */ | |
454 | if (transport_subsystem_reqmods() < 0) | |
455 | return -1; | |
456 | ||
457 | se_global->g_sub_api_initialized = 1; | |
458 | return 0; | |
459 | } | |
460 | ||
461 | struct se_session *transport_init_session(void) | |
462 | { | |
463 | struct se_session *se_sess; | |
464 | ||
465 | se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL); | |
466 | if (!(se_sess)) { | |
467 | printk(KERN_ERR "Unable to allocate struct se_session from" | |
468 | " se_sess_cache\n"); | |
469 | return ERR_PTR(-ENOMEM); | |
470 | } | |
471 | INIT_LIST_HEAD(&se_sess->sess_list); | |
472 | INIT_LIST_HEAD(&se_sess->sess_acl_list); | |
c66ac9db NB |
473 | |
474 | return se_sess; | |
475 | } | |
476 | EXPORT_SYMBOL(transport_init_session); | |
477 | ||
478 | /* | |
479 | * Called with spin_lock_bh(&struct se_portal_group->session_lock called. | |
480 | */ | |
481 | void __transport_register_session( | |
482 | struct se_portal_group *se_tpg, | |
483 | struct se_node_acl *se_nacl, | |
484 | struct se_session *se_sess, | |
485 | void *fabric_sess_ptr) | |
486 | { | |
487 | unsigned char buf[PR_REG_ISID_LEN]; | |
488 | ||
489 | se_sess->se_tpg = se_tpg; | |
490 | se_sess->fabric_sess_ptr = fabric_sess_ptr; | |
491 | /* | |
492 | * Used by struct se_node_acl's under ConfigFS to locate active se_session-t | |
493 | * | |
494 | * Only set for struct se_session's that will actually be moving I/O. | |
495 | * eg: *NOT* discovery sessions. | |
496 | */ | |
497 | if (se_nacl) { | |
498 | /* | |
499 | * If the fabric module supports an ISID based TransportID, | |
500 | * save this value in binary from the fabric I_T Nexus now. | |
501 | */ | |
502 | if (TPG_TFO(se_tpg)->sess_get_initiator_sid != NULL) { | |
503 | memset(&buf[0], 0, PR_REG_ISID_LEN); | |
504 | TPG_TFO(se_tpg)->sess_get_initiator_sid(se_sess, | |
505 | &buf[0], PR_REG_ISID_LEN); | |
506 | se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]); | |
507 | } | |
508 | spin_lock_irq(&se_nacl->nacl_sess_lock); | |
509 | /* | |
510 | * The se_nacl->nacl_sess pointer will be set to the | |
511 | * last active I_T Nexus for each struct se_node_acl. | |
512 | */ | |
513 | se_nacl->nacl_sess = se_sess; | |
514 | ||
515 | list_add_tail(&se_sess->sess_acl_list, | |
516 | &se_nacl->acl_sess_list); | |
517 | spin_unlock_irq(&se_nacl->nacl_sess_lock); | |
518 | } | |
519 | list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list); | |
520 | ||
521 | printk(KERN_INFO "TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n", | |
522 | TPG_TFO(se_tpg)->get_fabric_name(), se_sess->fabric_sess_ptr); | |
523 | } | |
524 | EXPORT_SYMBOL(__transport_register_session); | |
525 | ||
526 | void transport_register_session( | |
527 | struct se_portal_group *se_tpg, | |
528 | struct se_node_acl *se_nacl, | |
529 | struct se_session *se_sess, | |
530 | void *fabric_sess_ptr) | |
531 | { | |
532 | spin_lock_bh(&se_tpg->session_lock); | |
533 | __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr); | |
534 | spin_unlock_bh(&se_tpg->session_lock); | |
535 | } | |
536 | EXPORT_SYMBOL(transport_register_session); | |
537 | ||
538 | void transport_deregister_session_configfs(struct se_session *se_sess) | |
539 | { | |
540 | struct se_node_acl *se_nacl; | |
541 | ||
542 | /* | |
543 | * Used by struct se_node_acl's under ConfigFS to locate active struct se_session | |
544 | */ | |
545 | se_nacl = se_sess->se_node_acl; | |
546 | if ((se_nacl)) { | |
547 | spin_lock_irq(&se_nacl->nacl_sess_lock); | |
548 | list_del(&se_sess->sess_acl_list); | |
549 | /* | |
550 | * If the session list is empty, then clear the pointer. | |
551 | * Otherwise, set the struct se_session pointer from the tail | |
552 | * element of the per struct se_node_acl active session list. | |
553 | */ | |
554 | if (list_empty(&se_nacl->acl_sess_list)) | |
555 | se_nacl->nacl_sess = NULL; | |
556 | else { | |
557 | se_nacl->nacl_sess = container_of( | |
558 | se_nacl->acl_sess_list.prev, | |
559 | struct se_session, sess_acl_list); | |
560 | } | |
561 | spin_unlock_irq(&se_nacl->nacl_sess_lock); | |
562 | } | |
563 | } | |
564 | EXPORT_SYMBOL(transport_deregister_session_configfs); | |
565 | ||
566 | void transport_free_session(struct se_session *se_sess) | |
567 | { | |
568 | kmem_cache_free(se_sess_cache, se_sess); | |
569 | } | |
570 | EXPORT_SYMBOL(transport_free_session); | |
571 | ||
572 | void transport_deregister_session(struct se_session *se_sess) | |
573 | { | |
574 | struct se_portal_group *se_tpg = se_sess->se_tpg; | |
575 | struct se_node_acl *se_nacl; | |
576 | ||
577 | if (!(se_tpg)) { | |
578 | transport_free_session(se_sess); | |
579 | return; | |
580 | } | |
c66ac9db NB |
581 | |
582 | spin_lock_bh(&se_tpg->session_lock); | |
583 | list_del(&se_sess->sess_list); | |
584 | se_sess->se_tpg = NULL; | |
585 | se_sess->fabric_sess_ptr = NULL; | |
586 | spin_unlock_bh(&se_tpg->session_lock); | |
587 | ||
588 | /* | |
589 | * Determine if we need to do extra work for this initiator node's | |
590 | * struct se_node_acl if it had been previously dynamically generated. | |
591 | */ | |
592 | se_nacl = se_sess->se_node_acl; | |
593 | if ((se_nacl)) { | |
594 | spin_lock_bh(&se_tpg->acl_node_lock); | |
595 | if (se_nacl->dynamic_node_acl) { | |
596 | if (!(TPG_TFO(se_tpg)->tpg_check_demo_mode_cache( | |
597 | se_tpg))) { | |
598 | list_del(&se_nacl->acl_list); | |
599 | se_tpg->num_node_acls--; | |
600 | spin_unlock_bh(&se_tpg->acl_node_lock); | |
601 | ||
602 | core_tpg_wait_for_nacl_pr_ref(se_nacl); | |
c66ac9db NB |
603 | core_free_device_list_for_node(se_nacl, se_tpg); |
604 | TPG_TFO(se_tpg)->tpg_release_fabric_acl(se_tpg, | |
605 | se_nacl); | |
606 | spin_lock_bh(&se_tpg->acl_node_lock); | |
607 | } | |
608 | } | |
609 | spin_unlock_bh(&se_tpg->acl_node_lock); | |
610 | } | |
611 | ||
612 | transport_free_session(se_sess); | |
613 | ||
614 | printk(KERN_INFO "TARGET_CORE[%s]: Deregistered fabric_sess\n", | |
615 | TPG_TFO(se_tpg)->get_fabric_name()); | |
616 | } | |
617 | EXPORT_SYMBOL(transport_deregister_session); | |
618 | ||
619 | /* | |
620 | * Called with T_TASK(cmd)->t_state_lock held. | |
621 | */ | |
622 | static void transport_all_task_dev_remove_state(struct se_cmd *cmd) | |
623 | { | |
624 | struct se_device *dev; | |
625 | struct se_task *task; | |
626 | unsigned long flags; | |
627 | ||
628 | if (!T_TASK(cmd)) | |
629 | return; | |
630 | ||
631 | list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) { | |
632 | dev = task->se_dev; | |
633 | if (!(dev)) | |
634 | continue; | |
635 | ||
636 | if (atomic_read(&task->task_active)) | |
637 | continue; | |
638 | ||
639 | if (!(atomic_read(&task->task_state_active))) | |
640 | continue; | |
641 | ||
642 | spin_lock_irqsave(&dev->execute_task_lock, flags); | |
643 | list_del(&task->t_state_list); | |
644 | DEBUG_TSTATE("Removed ITT: 0x%08x dev: %p task[%p]\n", | |
645 | CMD_TFO(cmd)->tfo_get_task_tag(cmd), dev, task); | |
646 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); | |
647 | ||
648 | atomic_set(&task->task_state_active, 0); | |
649 | atomic_dec(&T_TASK(cmd)->t_task_cdbs_ex_left); | |
650 | } | |
651 | } | |
652 | ||
653 | /* transport_cmd_check_stop(): | |
654 | * | |
655 | * 'transport_off = 1' determines if t_transport_active should be cleared. | |
656 | * 'transport_off = 2' determines if task_dev_state should be removed. | |
657 | * | |
658 | * A non-zero u8 t_state sets cmd->t_state. | |
659 | * Returns 1 when command is stopped, else 0. | |
660 | */ | |
661 | static int transport_cmd_check_stop( | |
662 | struct se_cmd *cmd, | |
663 | int transport_off, | |
664 | u8 t_state) | |
665 | { | |
666 | unsigned long flags; | |
667 | ||
668 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | |
669 | /* | |
670 | * Determine if IOCTL context caller in requesting the stopping of this | |
671 | * command for LUN shutdown purposes. | |
672 | */ | |
673 | if (atomic_read(&T_TASK(cmd)->transport_lun_stop)) { | |
674 | DEBUG_CS("%s:%d atomic_read(&T_TASK(cmd)->transport_lun_stop)" | |
675 | " == TRUE for ITT: 0x%08x\n", __func__, __LINE__, | |
676 | CMD_TFO(cmd)->get_task_tag(cmd)); | |
677 | ||
678 | cmd->deferred_t_state = cmd->t_state; | |
679 | cmd->t_state = TRANSPORT_DEFERRED_CMD; | |
680 | atomic_set(&T_TASK(cmd)->t_transport_active, 0); | |
681 | if (transport_off == 2) | |
682 | transport_all_task_dev_remove_state(cmd); | |
683 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
684 | ||
685 | complete(&T_TASK(cmd)->transport_lun_stop_comp); | |
686 | return 1; | |
687 | } | |
688 | /* | |
689 | * Determine if frontend context caller is requesting the stopping of | |
690 | * this command for frontend excpections. | |
691 | */ | |
692 | if (atomic_read(&T_TASK(cmd)->t_transport_stop)) { | |
693 | DEBUG_CS("%s:%d atomic_read(&T_TASK(cmd)->t_transport_stop) ==" | |
694 | " TRUE for ITT: 0x%08x\n", __func__, __LINE__, | |
695 | CMD_TFO(cmd)->get_task_tag(cmd)); | |
696 | ||
697 | cmd->deferred_t_state = cmd->t_state; | |
698 | cmd->t_state = TRANSPORT_DEFERRED_CMD; | |
699 | if (transport_off == 2) | |
700 | transport_all_task_dev_remove_state(cmd); | |
701 | ||
702 | /* | |
703 | * Clear struct se_cmd->se_lun before the transport_off == 2 handoff | |
704 | * to FE. | |
705 | */ | |
706 | if (transport_off == 2) | |
707 | cmd->se_lun = NULL; | |
708 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
709 | ||
710 | complete(&T_TASK(cmd)->t_transport_stop_comp); | |
711 | return 1; | |
712 | } | |
713 | if (transport_off) { | |
714 | atomic_set(&T_TASK(cmd)->t_transport_active, 0); | |
715 | if (transport_off == 2) { | |
716 | transport_all_task_dev_remove_state(cmd); | |
717 | /* | |
718 | * Clear struct se_cmd->se_lun before the transport_off == 2 | |
719 | * handoff to fabric module. | |
720 | */ | |
721 | cmd->se_lun = NULL; | |
722 | /* | |
723 | * Some fabric modules like tcm_loop can release | |
724 | * their internally allocated I/O refrence now and | |
725 | * struct se_cmd now. | |
726 | */ | |
727 | if (CMD_TFO(cmd)->check_stop_free != NULL) { | |
728 | spin_unlock_irqrestore( | |
729 | &T_TASK(cmd)->t_state_lock, flags); | |
730 | ||
731 | CMD_TFO(cmd)->check_stop_free(cmd); | |
732 | return 1; | |
733 | } | |
734 | } | |
735 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
736 | ||
737 | return 0; | |
738 | } else if (t_state) | |
739 | cmd->t_state = t_state; | |
740 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
741 | ||
742 | return 0; | |
743 | } | |
744 | ||
745 | static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd) | |
746 | { | |
747 | return transport_cmd_check_stop(cmd, 2, 0); | |
748 | } | |
749 | ||
750 | static void transport_lun_remove_cmd(struct se_cmd *cmd) | |
751 | { | |
752 | struct se_lun *lun = SE_LUN(cmd); | |
753 | unsigned long flags; | |
754 | ||
755 | if (!lun) | |
756 | return; | |
757 | ||
758 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | |
759 | if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) { | |
760 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
761 | goto check_lun; | |
762 | } | |
763 | atomic_set(&T_TASK(cmd)->transport_dev_active, 0); | |
764 | transport_all_task_dev_remove_state(cmd); | |
765 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
766 | ||
767 | transport_free_dev_tasks(cmd); | |
768 | ||
769 | check_lun: | |
770 | spin_lock_irqsave(&lun->lun_cmd_lock, flags); | |
771 | if (atomic_read(&T_TASK(cmd)->transport_lun_active)) { | |
772 | list_del(&cmd->se_lun_list); | |
773 | atomic_set(&T_TASK(cmd)->transport_lun_active, 0); | |
774 | #if 0 | |
775 | printk(KERN_INFO "Removed ITT: 0x%08x from LUN LIST[%d]\n" | |
776 | CMD_TFO(cmd)->get_task_tag(cmd), lun->unpacked_lun); | |
777 | #endif | |
778 | } | |
779 | spin_unlock_irqrestore(&lun->lun_cmd_lock, flags); | |
780 | } | |
781 | ||
782 | void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) | |
783 | { | |
784 | transport_remove_cmd_from_queue(cmd, SE_DEV(cmd)->dev_queue_obj); | |
785 | transport_lun_remove_cmd(cmd); | |
786 | ||
787 | if (transport_cmd_check_stop_to_fabric(cmd)) | |
788 | return; | |
789 | if (remove) | |
790 | transport_generic_remove(cmd, 0, 0); | |
791 | } | |
792 | ||
793 | void transport_cmd_finish_abort_tmr(struct se_cmd *cmd) | |
794 | { | |
795 | transport_remove_cmd_from_queue(cmd, SE_DEV(cmd)->dev_queue_obj); | |
796 | ||
797 | if (transport_cmd_check_stop_to_fabric(cmd)) | |
798 | return; | |
799 | ||
800 | transport_generic_remove(cmd, 0, 0); | |
801 | } | |
802 | ||
803 | static int transport_add_cmd_to_queue( | |
804 | struct se_cmd *cmd, | |
805 | int t_state) | |
806 | { | |
807 | struct se_device *dev = cmd->se_dev; | |
808 | struct se_queue_obj *qobj = dev->dev_queue_obj; | |
809 | struct se_queue_req *qr; | |
810 | unsigned long flags; | |
811 | ||
812 | qr = kzalloc(sizeof(struct se_queue_req), GFP_ATOMIC); | |
813 | if (!(qr)) { | |
814 | printk(KERN_ERR "Unable to allocate memory for" | |
815 | " struct se_queue_req\n"); | |
816 | return -1; | |
817 | } | |
818 | INIT_LIST_HEAD(&qr->qr_list); | |
819 | ||
820 | qr->cmd = (void *)cmd; | |
821 | qr->state = t_state; | |
822 | ||
823 | if (t_state) { | |
824 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | |
825 | cmd->t_state = t_state; | |
826 | atomic_set(&T_TASK(cmd)->t_transport_active, 1); | |
827 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
828 | } | |
829 | ||
830 | spin_lock_irqsave(&qobj->cmd_queue_lock, flags); | |
831 | list_add_tail(&qr->qr_list, &qobj->qobj_list); | |
832 | atomic_inc(&T_TASK(cmd)->t_transport_queue_active); | |
833 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | |
834 | ||
835 | atomic_inc(&qobj->queue_cnt); | |
836 | wake_up_interruptible(&qobj->thread_wq); | |
837 | return 0; | |
838 | } | |
839 | ||
840 | /* | |
841 | * Called with struct se_queue_obj->cmd_queue_lock held. | |
842 | */ | |
843 | static struct se_queue_req * | |
844 | __transport_get_qr_from_queue(struct se_queue_obj *qobj) | |
845 | { | |
846 | struct se_cmd *cmd; | |
847 | struct se_queue_req *qr = NULL; | |
848 | ||
849 | if (list_empty(&qobj->qobj_list)) | |
850 | return NULL; | |
851 | ||
852 | list_for_each_entry(qr, &qobj->qobj_list, qr_list) | |
853 | break; | |
854 | ||
855 | if (qr->cmd) { | |
856 | cmd = (struct se_cmd *)qr->cmd; | |
857 | atomic_dec(&T_TASK(cmd)->t_transport_queue_active); | |
858 | } | |
859 | list_del(&qr->qr_list); | |
860 | atomic_dec(&qobj->queue_cnt); | |
861 | ||
862 | return qr; | |
863 | } | |
864 | ||
865 | static struct se_queue_req * | |
866 | transport_get_qr_from_queue(struct se_queue_obj *qobj) | |
867 | { | |
868 | struct se_cmd *cmd; | |
869 | struct se_queue_req *qr; | |
870 | unsigned long flags; | |
871 | ||
872 | spin_lock_irqsave(&qobj->cmd_queue_lock, flags); | |
873 | if (list_empty(&qobj->qobj_list)) { | |
874 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | |
875 | return NULL; | |
876 | } | |
877 | ||
878 | list_for_each_entry(qr, &qobj->qobj_list, qr_list) | |
879 | break; | |
880 | ||
881 | if (qr->cmd) { | |
882 | cmd = (struct se_cmd *)qr->cmd; | |
883 | atomic_dec(&T_TASK(cmd)->t_transport_queue_active); | |
884 | } | |
885 | list_del(&qr->qr_list); | |
886 | atomic_dec(&qobj->queue_cnt); | |
887 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | |
888 | ||
889 | return qr; | |
890 | } | |
891 | ||
892 | static void transport_remove_cmd_from_queue(struct se_cmd *cmd, | |
893 | struct se_queue_obj *qobj) | |
894 | { | |
895 | struct se_cmd *q_cmd; | |
896 | struct se_queue_req *qr = NULL, *qr_p = NULL; | |
897 | unsigned long flags; | |
898 | ||
899 | spin_lock_irqsave(&qobj->cmd_queue_lock, flags); | |
900 | if (!(atomic_read(&T_TASK(cmd)->t_transport_queue_active))) { | |
901 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | |
902 | return; | |
903 | } | |
904 | ||
905 | list_for_each_entry_safe(qr, qr_p, &qobj->qobj_list, qr_list) { | |
906 | q_cmd = (struct se_cmd *)qr->cmd; | |
907 | if (q_cmd != cmd) | |
908 | continue; | |
909 | ||
910 | atomic_dec(&T_TASK(q_cmd)->t_transport_queue_active); | |
911 | atomic_dec(&qobj->queue_cnt); | |
912 | list_del(&qr->qr_list); | |
913 | kfree(qr); | |
914 | } | |
915 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | |
916 | ||
917 | if (atomic_read(&T_TASK(cmd)->t_transport_queue_active)) { | |
918 | printk(KERN_ERR "ITT: 0x%08x t_transport_queue_active: %d\n", | |
919 | CMD_TFO(cmd)->get_task_tag(cmd), | |
920 | atomic_read(&T_TASK(cmd)->t_transport_queue_active)); | |
921 | } | |
922 | } | |
923 | ||
924 | /* | |
925 | * Completion function used by TCM subsystem plugins (such as FILEIO) | |
926 | * for queueing up response from struct se_subsystem_api->do_task() | |
927 | */ | |
928 | void transport_complete_sync_cache(struct se_cmd *cmd, int good) | |
929 | { | |
930 | struct se_task *task = list_entry(T_TASK(cmd)->t_task_list.next, | |
931 | struct se_task, t_list); | |
932 | ||
933 | if (good) { | |
934 | cmd->scsi_status = SAM_STAT_GOOD; | |
935 | task->task_scsi_status = GOOD; | |
936 | } else { | |
937 | task->task_scsi_status = SAM_STAT_CHECK_CONDITION; | |
938 | task->task_error_status = PYX_TRANSPORT_ILLEGAL_REQUEST; | |
939 | TASK_CMD(task)->transport_error_status = | |
940 | PYX_TRANSPORT_ILLEGAL_REQUEST; | |
941 | } | |
942 | ||
943 | transport_complete_task(task, good); | |
944 | } | |
945 | EXPORT_SYMBOL(transport_complete_sync_cache); | |
946 | ||
947 | /* transport_complete_task(): | |
948 | * | |
949 | * Called from interrupt and non interrupt context depending | |
950 | * on the transport plugin. | |
951 | */ | |
952 | void transport_complete_task(struct se_task *task, int success) | |
953 | { | |
954 | struct se_cmd *cmd = TASK_CMD(task); | |
955 | struct se_device *dev = task->se_dev; | |
956 | int t_state; | |
957 | unsigned long flags; | |
958 | #if 0 | |
959 | printk(KERN_INFO "task: %p CDB: 0x%02x obj_ptr: %p\n", task, | |
960 | T_TASK(cmd)->t_task_cdb[0], dev); | |
961 | #endif | |
962 | if (dev) { | |
963 | spin_lock_irqsave(&SE_HBA(dev)->hba_queue_lock, flags); | |
964 | atomic_inc(&dev->depth_left); | |
965 | atomic_inc(&SE_HBA(dev)->left_queue_depth); | |
966 | spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags); | |
967 | } | |
968 | ||
969 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | |
970 | atomic_set(&task->task_active, 0); | |
971 | ||
972 | /* | |
973 | * See if any sense data exists, if so set the TASK_SENSE flag. | |
974 | * Also check for any other post completion work that needs to be | |
975 | * done by the plugins. | |
976 | */ | |
977 | if (dev && dev->transport->transport_complete) { | |
978 | if (dev->transport->transport_complete(task) != 0) { | |
979 | cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE; | |
980 | task->task_sense = 1; | |
981 | success = 1; | |
982 | } | |
983 | } | |
984 | ||
985 | /* | |
986 | * See if we are waiting for outstanding struct se_task | |
987 | * to complete for an exception condition | |
988 | */ | |
989 | if (atomic_read(&task->task_stop)) { | |
990 | /* | |
991 | * Decrement T_TASK(cmd)->t_se_count if this task had | |
992 | * previously thrown its timeout exception handler. | |
993 | */ | |
994 | if (atomic_read(&task->task_timeout)) { | |
995 | atomic_dec(&T_TASK(cmd)->t_se_count); | |
996 | atomic_set(&task->task_timeout, 0); | |
997 | } | |
998 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
999 | ||
1000 | complete(&task->task_stop_comp); | |
1001 | return; | |
1002 | } | |
1003 | /* | |
1004 | * If the task's timeout handler has fired, use the t_task_cdbs_timeout | |
1005 | * left counter to determine when the struct se_cmd is ready to be queued to | |
1006 | * the processing thread. | |
1007 | */ | |
1008 | if (atomic_read(&task->task_timeout)) { | |
1009 | if (!(atomic_dec_and_test( | |
1010 | &T_TASK(cmd)->t_task_cdbs_timeout_left))) { | |
1011 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, | |
1012 | flags); | |
1013 | return; | |
1014 | } | |
1015 | t_state = TRANSPORT_COMPLETE_TIMEOUT; | |
1016 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
1017 | ||
1018 | transport_add_cmd_to_queue(cmd, t_state); | |
1019 | return; | |
1020 | } | |
1021 | atomic_dec(&T_TASK(cmd)->t_task_cdbs_timeout_left); | |
1022 | ||
1023 | /* | |
1024 | * Decrement the outstanding t_task_cdbs_left count. The last | |
1025 | * struct se_task from struct se_cmd will complete itself into the | |
1026 | * device queue depending upon int success. | |
1027 | */ | |
1028 | if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_left))) { | |
1029 | if (!success) | |
1030 | T_TASK(cmd)->t_tasks_failed = 1; | |
1031 | ||
1032 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
1033 | return; | |
1034 | } | |
1035 | ||
1036 | if (!success || T_TASK(cmd)->t_tasks_failed) { | |
1037 | t_state = TRANSPORT_COMPLETE_FAILURE; | |
1038 | if (!task->task_error_status) { | |
1039 | task->task_error_status = | |
1040 | PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; | |
1041 | cmd->transport_error_status = | |
1042 | PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; | |
1043 | } | |
1044 | } else { | |
1045 | atomic_set(&T_TASK(cmd)->t_transport_complete, 1); | |
1046 | t_state = TRANSPORT_COMPLETE_OK; | |
1047 | } | |
1048 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
1049 | ||
1050 | transport_add_cmd_to_queue(cmd, t_state); | |
1051 | } | |
1052 | EXPORT_SYMBOL(transport_complete_task); | |
1053 | ||
1054 | /* | |
1055 | * Called by transport_add_tasks_from_cmd() once a struct se_cmd's | |
1056 | * struct se_task list are ready to be added to the active execution list | |
1057 | * struct se_device | |
1058 | ||
1059 | * Called with se_dev_t->execute_task_lock called. | |
1060 | */ | |
1061 | static inline int transport_add_task_check_sam_attr( | |
1062 | struct se_task *task, | |
1063 | struct se_task *task_prev, | |
1064 | struct se_device *dev) | |
1065 | { | |
1066 | /* | |
1067 | * No SAM Task attribute emulation enabled, add to tail of | |
1068 | * execution queue | |
1069 | */ | |
1070 | if (dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) { | |
1071 | list_add_tail(&task->t_execute_list, &dev->execute_task_list); | |
1072 | return 0; | |
1073 | } | |
1074 | /* | |
1075 | * HEAD_OF_QUEUE attribute for received CDB, which means | |
1076 | * the first task that is associated with a struct se_cmd goes to | |
1077 | * head of the struct se_device->execute_task_list, and task_prev | |
1078 | * after that for each subsequent task | |
1079 | */ | |
1080 | if (task->task_se_cmd->sam_task_attr == TASK_ATTR_HOQ) { | |
1081 | list_add(&task->t_execute_list, | |
1082 | (task_prev != NULL) ? | |
1083 | &task_prev->t_execute_list : | |
1084 | &dev->execute_task_list); | |
1085 | ||
1086 | DEBUG_STA("Set HEAD_OF_QUEUE for task CDB: 0x%02x" | |
1087 | " in execution queue\n", | |
1088 | T_TASK(task->task_se_cmd)->t_task_cdb[0]); | |
1089 | return 1; | |
1090 | } | |
1091 | /* | |
1092 | * For ORDERED, SIMPLE or UNTAGGED attribute tasks once they have been | |
1093 | * transitioned from Dermant -> Active state, and are added to the end | |
1094 | * of the struct se_device->execute_task_list | |
1095 | */ | |
1096 | list_add_tail(&task->t_execute_list, &dev->execute_task_list); | |
1097 | return 0; | |
1098 | } | |
1099 | ||
1100 | /* __transport_add_task_to_execute_queue(): | |
1101 | * | |
1102 | * Called with se_dev_t->execute_task_lock called. | |
1103 | */ | |
1104 | static void __transport_add_task_to_execute_queue( | |
1105 | struct se_task *task, | |
1106 | struct se_task *task_prev, | |
1107 | struct se_device *dev) | |
1108 | { | |
1109 | int head_of_queue; | |
1110 | ||
1111 | head_of_queue = transport_add_task_check_sam_attr(task, task_prev, dev); | |
1112 | atomic_inc(&dev->execute_tasks); | |
1113 | ||
1114 | if (atomic_read(&task->task_state_active)) | |
1115 | return; | |
1116 | /* | |
1117 | * Determine if this task needs to go to HEAD_OF_QUEUE for the | |
1118 | * state list as well. Running with SAM Task Attribute emulation | |
1119 | * will always return head_of_queue == 0 here | |
1120 | */ | |
1121 | if (head_of_queue) | |
1122 | list_add(&task->t_state_list, (task_prev) ? | |
1123 | &task_prev->t_state_list : | |
1124 | &dev->state_task_list); | |
1125 | else | |
1126 | list_add_tail(&task->t_state_list, &dev->state_task_list); | |
1127 | ||
1128 | atomic_set(&task->task_state_active, 1); | |
1129 | ||
1130 | DEBUG_TSTATE("Added ITT: 0x%08x task[%p] to dev: %p\n", | |
1131 | CMD_TFO(task->task_se_cmd)->get_task_tag(task->task_se_cmd), | |
1132 | task, dev); | |
1133 | } | |
1134 | ||
1135 | static void transport_add_tasks_to_state_queue(struct se_cmd *cmd) | |
1136 | { | |
1137 | struct se_device *dev; | |
1138 | struct se_task *task; | |
1139 | unsigned long flags; | |
1140 | ||
1141 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | |
1142 | list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) { | |
1143 | dev = task->se_dev; | |
1144 | ||
1145 | if (atomic_read(&task->task_state_active)) | |
1146 | continue; | |
1147 | ||
1148 | spin_lock(&dev->execute_task_lock); | |
1149 | list_add_tail(&task->t_state_list, &dev->state_task_list); | |
1150 | atomic_set(&task->task_state_active, 1); | |
1151 | ||
1152 | DEBUG_TSTATE("Added ITT: 0x%08x task[%p] to dev: %p\n", | |
1153 | CMD_TFO(task->task_se_cmd)->get_task_tag( | |
1154 | task->task_se_cmd), task, dev); | |
1155 | ||
1156 | spin_unlock(&dev->execute_task_lock); | |
1157 | } | |
1158 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
1159 | } | |
1160 | ||
1161 | static void transport_add_tasks_from_cmd(struct se_cmd *cmd) | |
1162 | { | |
1163 | struct se_device *dev = SE_DEV(cmd); | |
1164 | struct se_task *task, *task_prev = NULL; | |
1165 | unsigned long flags; | |
1166 | ||
1167 | spin_lock_irqsave(&dev->execute_task_lock, flags); | |
1168 | list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) { | |
1169 | if (atomic_read(&task->task_execute_queue)) | |
1170 | continue; | |
1171 | /* | |
1172 | * __transport_add_task_to_execute_queue() handles the | |
1173 | * SAM Task Attribute emulation if enabled | |
1174 | */ | |
1175 | __transport_add_task_to_execute_queue(task, task_prev, dev); | |
1176 | atomic_set(&task->task_execute_queue, 1); | |
1177 | task_prev = task; | |
1178 | } | |
1179 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); | |
1180 | ||
1181 | return; | |
1182 | } | |
1183 | ||
1184 | /* transport_get_task_from_execute_queue(): | |
1185 | * | |
1186 | * Called with dev->execute_task_lock held. | |
1187 | */ | |
1188 | static struct se_task * | |
1189 | transport_get_task_from_execute_queue(struct se_device *dev) | |
1190 | { | |
1191 | struct se_task *task; | |
1192 | ||
1193 | if (list_empty(&dev->execute_task_list)) | |
1194 | return NULL; | |
1195 | ||
1196 | list_for_each_entry(task, &dev->execute_task_list, t_execute_list) | |
1197 | break; | |
1198 | ||
1199 | list_del(&task->t_execute_list); | |
1200 | atomic_dec(&dev->execute_tasks); | |
1201 | ||
1202 | return task; | |
1203 | } | |
1204 | ||
1205 | /* transport_remove_task_from_execute_queue(): | |
1206 | * | |
1207 | * | |
1208 | */ | |
52208ae3 | 1209 | void transport_remove_task_from_execute_queue( |
c66ac9db NB |
1210 | struct se_task *task, |
1211 | struct se_device *dev) | |
1212 | { | |
1213 | unsigned long flags; | |
1214 | ||
1215 | spin_lock_irqsave(&dev->execute_task_lock, flags); | |
1216 | list_del(&task->t_execute_list); | |
1217 | atomic_dec(&dev->execute_tasks); | |
1218 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); | |
1219 | } | |
1220 | ||
1221 | unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd) | |
1222 | { | |
1223 | switch (cmd->data_direction) { | |
1224 | case DMA_NONE: | |
1225 | return "NONE"; | |
1226 | case DMA_FROM_DEVICE: | |
1227 | return "READ"; | |
1228 | case DMA_TO_DEVICE: | |
1229 | return "WRITE"; | |
1230 | case DMA_BIDIRECTIONAL: | |
1231 | return "BIDI"; | |
1232 | default: | |
1233 | break; | |
1234 | } | |
1235 | ||
1236 | return "UNKNOWN"; | |
1237 | } | |
1238 | ||
1239 | void transport_dump_dev_state( | |
1240 | struct se_device *dev, | |
1241 | char *b, | |
1242 | int *bl) | |
1243 | { | |
1244 | *bl += sprintf(b + *bl, "Status: "); | |
1245 | switch (dev->dev_status) { | |
1246 | case TRANSPORT_DEVICE_ACTIVATED: | |
1247 | *bl += sprintf(b + *bl, "ACTIVATED"); | |
1248 | break; | |
1249 | case TRANSPORT_DEVICE_DEACTIVATED: | |
1250 | *bl += sprintf(b + *bl, "DEACTIVATED"); | |
1251 | break; | |
1252 | case TRANSPORT_DEVICE_SHUTDOWN: | |
1253 | *bl += sprintf(b + *bl, "SHUTDOWN"); | |
1254 | break; | |
1255 | case TRANSPORT_DEVICE_OFFLINE_ACTIVATED: | |
1256 | case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED: | |
1257 | *bl += sprintf(b + *bl, "OFFLINE"); | |
1258 | break; | |
1259 | default: | |
1260 | *bl += sprintf(b + *bl, "UNKNOWN=%d", dev->dev_status); | |
1261 | break; | |
1262 | } | |
1263 | ||
1264 | *bl += sprintf(b + *bl, " Execute/Left/Max Queue Depth: %d/%d/%d", | |
1265 | atomic_read(&dev->execute_tasks), atomic_read(&dev->depth_left), | |
1266 | dev->queue_depth); | |
1267 | *bl += sprintf(b + *bl, " SectorSize: %u MaxSectors: %u\n", | |
1268 | DEV_ATTRIB(dev)->block_size, DEV_ATTRIB(dev)->max_sectors); | |
1269 | *bl += sprintf(b + *bl, " "); | |
1270 | } | |
1271 | ||
1272 | /* transport_release_all_cmds(): | |
1273 | * | |
1274 | * | |
1275 | */ | |
1276 | static void transport_release_all_cmds(struct se_device *dev) | |
1277 | { | |
1278 | struct se_cmd *cmd = NULL; | |
1279 | struct se_queue_req *qr = NULL, *qr_p = NULL; | |
1280 | int bug_out = 0, t_state; | |
1281 | unsigned long flags; | |
1282 | ||
1283 | spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags); | |
1284 | list_for_each_entry_safe(qr, qr_p, &dev->dev_queue_obj->qobj_list, | |
1285 | qr_list) { | |
1286 | ||
1287 | cmd = (struct se_cmd *)qr->cmd; | |
1288 | t_state = qr->state; | |
1289 | list_del(&qr->qr_list); | |
1290 | kfree(qr); | |
1291 | spin_unlock_irqrestore(&dev->dev_queue_obj->cmd_queue_lock, | |
1292 | flags); | |
1293 | ||
1294 | printk(KERN_ERR "Releasing ITT: 0x%08x, i_state: %u," | |
1295 | " t_state: %u directly\n", | |
1296 | CMD_TFO(cmd)->get_task_tag(cmd), | |
1297 | CMD_TFO(cmd)->get_cmd_state(cmd), t_state); | |
1298 | ||
1299 | transport_release_fe_cmd(cmd); | |
1300 | bug_out = 1; | |
1301 | ||
1302 | spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags); | |
1303 | } | |
1304 | spin_unlock_irqrestore(&dev->dev_queue_obj->cmd_queue_lock, flags); | |
1305 | #if 0 | |
1306 | if (bug_out) | |
1307 | BUG(); | |
1308 | #endif | |
1309 | } | |
1310 | ||
1311 | void transport_dump_vpd_proto_id( | |
1312 | struct t10_vpd *vpd, | |
1313 | unsigned char *p_buf, | |
1314 | int p_buf_len) | |
1315 | { | |
1316 | unsigned char buf[VPD_TMP_BUF_SIZE]; | |
1317 | int len; | |
1318 | ||
1319 | memset(buf, 0, VPD_TMP_BUF_SIZE); | |
1320 | len = sprintf(buf, "T10 VPD Protocol Identifier: "); | |
1321 | ||
1322 | switch (vpd->protocol_identifier) { | |
1323 | case 0x00: | |
1324 | sprintf(buf+len, "Fibre Channel\n"); | |
1325 | break; | |
1326 | case 0x10: | |
1327 | sprintf(buf+len, "Parallel SCSI\n"); | |
1328 | break; | |
1329 | case 0x20: | |
1330 | sprintf(buf+len, "SSA\n"); | |
1331 | break; | |
1332 | case 0x30: | |
1333 | sprintf(buf+len, "IEEE 1394\n"); | |
1334 | break; | |
1335 | case 0x40: | |
1336 | sprintf(buf+len, "SCSI Remote Direct Memory Access" | |
1337 | " Protocol\n"); | |
1338 | break; | |
1339 | case 0x50: | |
1340 | sprintf(buf+len, "Internet SCSI (iSCSI)\n"); | |
1341 | break; | |
1342 | case 0x60: | |
1343 | sprintf(buf+len, "SAS Serial SCSI Protocol\n"); | |
1344 | break; | |
1345 | case 0x70: | |
1346 | sprintf(buf+len, "Automation/Drive Interface Transport" | |
1347 | " Protocol\n"); | |
1348 | break; | |
1349 | case 0x80: | |
1350 | sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n"); | |
1351 | break; | |
1352 | default: | |
1353 | sprintf(buf+len, "Unknown 0x%02x\n", | |
1354 | vpd->protocol_identifier); | |
1355 | break; | |
1356 | } | |
1357 | ||
1358 | if (p_buf) | |
1359 | strncpy(p_buf, buf, p_buf_len); | |
1360 | else | |
1361 | printk(KERN_INFO "%s", buf); | |
1362 | } | |
1363 | ||
1364 | void | |
1365 | transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83) | |
1366 | { | |
1367 | /* | |
1368 | * Check if the Protocol Identifier Valid (PIV) bit is set.. | |
1369 | * | |
1370 | * from spc3r23.pdf section 7.5.1 | |
1371 | */ | |
1372 | if (page_83[1] & 0x80) { | |
1373 | vpd->protocol_identifier = (page_83[0] & 0xf0); | |
1374 | vpd->protocol_identifier_set = 1; | |
1375 | transport_dump_vpd_proto_id(vpd, NULL, 0); | |
1376 | } | |
1377 | } | |
1378 | EXPORT_SYMBOL(transport_set_vpd_proto_id); | |
1379 | ||
1380 | int transport_dump_vpd_assoc( | |
1381 | struct t10_vpd *vpd, | |
1382 | unsigned char *p_buf, | |
1383 | int p_buf_len) | |
1384 | { | |
1385 | unsigned char buf[VPD_TMP_BUF_SIZE]; | |
1386 | int ret = 0, len; | |
1387 | ||
1388 | memset(buf, 0, VPD_TMP_BUF_SIZE); | |
1389 | len = sprintf(buf, "T10 VPD Identifier Association: "); | |
1390 | ||
1391 | switch (vpd->association) { | |
1392 | case 0x00: | |
1393 | sprintf(buf+len, "addressed logical unit\n"); | |
1394 | break; | |
1395 | case 0x10: | |
1396 | sprintf(buf+len, "target port\n"); | |
1397 | break; | |
1398 | case 0x20: | |
1399 | sprintf(buf+len, "SCSI target device\n"); | |
1400 | break; | |
1401 | default: | |
1402 | sprintf(buf+len, "Unknown 0x%02x\n", vpd->association); | |
1403 | ret = -1; | |
1404 | break; | |
1405 | } | |
1406 | ||
1407 | if (p_buf) | |
1408 | strncpy(p_buf, buf, p_buf_len); | |
1409 | else | |
1410 | printk("%s", buf); | |
1411 | ||
1412 | return ret; | |
1413 | } | |
1414 | ||
1415 | int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83) | |
1416 | { | |
1417 | /* | |
1418 | * The VPD identification association.. | |
1419 | * | |
1420 | * from spc3r23.pdf Section 7.6.3.1 Table 297 | |
1421 | */ | |
1422 | vpd->association = (page_83[1] & 0x30); | |
1423 | return transport_dump_vpd_assoc(vpd, NULL, 0); | |
1424 | } | |
1425 | EXPORT_SYMBOL(transport_set_vpd_assoc); | |
1426 | ||
1427 | int transport_dump_vpd_ident_type( | |
1428 | struct t10_vpd *vpd, | |
1429 | unsigned char *p_buf, | |
1430 | int p_buf_len) | |
1431 | { | |
1432 | unsigned char buf[VPD_TMP_BUF_SIZE]; | |
1433 | int ret = 0, len; | |
1434 | ||
1435 | memset(buf, 0, VPD_TMP_BUF_SIZE); | |
1436 | len = sprintf(buf, "T10 VPD Identifier Type: "); | |
1437 | ||
1438 | switch (vpd->device_identifier_type) { | |
1439 | case 0x00: | |
1440 | sprintf(buf+len, "Vendor specific\n"); | |
1441 | break; | |
1442 | case 0x01: | |
1443 | sprintf(buf+len, "T10 Vendor ID based\n"); | |
1444 | break; | |
1445 | case 0x02: | |
1446 | sprintf(buf+len, "EUI-64 based\n"); | |
1447 | break; | |
1448 | case 0x03: | |
1449 | sprintf(buf+len, "NAA\n"); | |
1450 | break; | |
1451 | case 0x04: | |
1452 | sprintf(buf+len, "Relative target port identifier\n"); | |
1453 | break; | |
1454 | case 0x08: | |
1455 | sprintf(buf+len, "SCSI name string\n"); | |
1456 | break; | |
1457 | default: | |
1458 | sprintf(buf+len, "Unsupported: 0x%02x\n", | |
1459 | vpd->device_identifier_type); | |
1460 | ret = -1; | |
1461 | break; | |
1462 | } | |
1463 | ||
1464 | if (p_buf) | |
1465 | strncpy(p_buf, buf, p_buf_len); | |
1466 | else | |
1467 | printk("%s", buf); | |
1468 | ||
1469 | return ret; | |
1470 | } | |
1471 | ||
1472 | int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83) | |
1473 | { | |
1474 | /* | |
1475 | * The VPD identifier type.. | |
1476 | * | |
1477 | * from spc3r23.pdf Section 7.6.3.1 Table 298 | |
1478 | */ | |
1479 | vpd->device_identifier_type = (page_83[1] & 0x0f); | |
1480 | return transport_dump_vpd_ident_type(vpd, NULL, 0); | |
1481 | } | |
1482 | EXPORT_SYMBOL(transport_set_vpd_ident_type); | |
1483 | ||
1484 | int transport_dump_vpd_ident( | |
1485 | struct t10_vpd *vpd, | |
1486 | unsigned char *p_buf, | |
1487 | int p_buf_len) | |
1488 | { | |
1489 | unsigned char buf[VPD_TMP_BUF_SIZE]; | |
1490 | int ret = 0; | |
1491 | ||
1492 | memset(buf, 0, VPD_TMP_BUF_SIZE); | |
1493 | ||
1494 | switch (vpd->device_identifier_code_set) { | |
1495 | case 0x01: /* Binary */ | |
1496 | sprintf(buf, "T10 VPD Binary Device Identifier: %s\n", | |
1497 | &vpd->device_identifier[0]); | |
1498 | break; | |
1499 | case 0x02: /* ASCII */ | |
1500 | sprintf(buf, "T10 VPD ASCII Device Identifier: %s\n", | |
1501 | &vpd->device_identifier[0]); | |
1502 | break; | |
1503 | case 0x03: /* UTF-8 */ | |
1504 | sprintf(buf, "T10 VPD UTF-8 Device Identifier: %s\n", | |
1505 | &vpd->device_identifier[0]); | |
1506 | break; | |
1507 | default: | |
1508 | sprintf(buf, "T10 VPD Device Identifier encoding unsupported:" | |
1509 | " 0x%02x", vpd->device_identifier_code_set); | |
1510 | ret = -1; | |
1511 | break; | |
1512 | } | |
1513 | ||
1514 | if (p_buf) | |
1515 | strncpy(p_buf, buf, p_buf_len); | |
1516 | else | |
1517 | printk("%s", buf); | |
1518 | ||
1519 | return ret; | |
1520 | } | |
1521 | ||
1522 | int | |
1523 | transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83) | |
1524 | { | |
1525 | static const char hex_str[] = "0123456789abcdef"; | |
1526 | int j = 0, i = 4; /* offset to start of the identifer */ | |
1527 | ||
1528 | /* | |
1529 | * The VPD Code Set (encoding) | |
1530 | * | |
1531 | * from spc3r23.pdf Section 7.6.3.1 Table 296 | |
1532 | */ | |
1533 | vpd->device_identifier_code_set = (page_83[0] & 0x0f); | |
1534 | switch (vpd->device_identifier_code_set) { | |
1535 | case 0x01: /* Binary */ | |
1536 | vpd->device_identifier[j++] = | |
1537 | hex_str[vpd->device_identifier_type]; | |
1538 | while (i < (4 + page_83[3])) { | |
1539 | vpd->device_identifier[j++] = | |
1540 | hex_str[(page_83[i] & 0xf0) >> 4]; | |
1541 | vpd->device_identifier[j++] = | |
1542 | hex_str[page_83[i] & 0x0f]; | |
1543 | i++; | |
1544 | } | |
1545 | break; | |
1546 | case 0x02: /* ASCII */ | |
1547 | case 0x03: /* UTF-8 */ | |
1548 | while (i < (4 + page_83[3])) | |
1549 | vpd->device_identifier[j++] = page_83[i++]; | |
1550 | break; | |
1551 | default: | |
1552 | break; | |
1553 | } | |
1554 | ||
1555 | return transport_dump_vpd_ident(vpd, NULL, 0); | |
1556 | } | |
1557 | EXPORT_SYMBOL(transport_set_vpd_ident); | |
1558 | ||
1559 | static void core_setup_task_attr_emulation(struct se_device *dev) | |
1560 | { | |
1561 | /* | |
1562 | * If this device is from Target_Core_Mod/pSCSI, disable the | |
1563 | * SAM Task Attribute emulation. | |
1564 | * | |
1565 | * This is currently not available in upsream Linux/SCSI Target | |
1566 | * mode code, and is assumed to be disabled while using TCM/pSCSI. | |
1567 | */ | |
1568 | if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { | |
1569 | dev->dev_task_attr_type = SAM_TASK_ATTR_PASSTHROUGH; | |
1570 | return; | |
1571 | } | |
1572 | ||
1573 | dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED; | |
1574 | DEBUG_STA("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x" | |
1575 | " device\n", TRANSPORT(dev)->name, | |
1576 | TRANSPORT(dev)->get_device_rev(dev)); | |
1577 | } | |
1578 | ||
1579 | static void scsi_dump_inquiry(struct se_device *dev) | |
1580 | { | |
1581 | struct t10_wwn *wwn = DEV_T10_WWN(dev); | |
1582 | int i, device_type; | |
1583 | /* | |
1584 | * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer | |
1585 | */ | |
1586 | printk(" Vendor: "); | |
1587 | for (i = 0; i < 8; i++) | |
1588 | if (wwn->vendor[i] >= 0x20) | |
1589 | printk("%c", wwn->vendor[i]); | |
1590 | else | |
1591 | printk(" "); | |
1592 | ||
1593 | printk(" Model: "); | |
1594 | for (i = 0; i < 16; i++) | |
1595 | if (wwn->model[i] >= 0x20) | |
1596 | printk("%c", wwn->model[i]); | |
1597 | else | |
1598 | printk(" "); | |
1599 | ||
1600 | printk(" Revision: "); | |
1601 | for (i = 0; i < 4; i++) | |
1602 | if (wwn->revision[i] >= 0x20) | |
1603 | printk("%c", wwn->revision[i]); | |
1604 | else | |
1605 | printk(" "); | |
1606 | ||
1607 | printk("\n"); | |
1608 | ||
1609 | device_type = TRANSPORT(dev)->get_device_type(dev); | |
1610 | printk(" Type: %s ", scsi_device_type(device_type)); | |
1611 | printk(" ANSI SCSI revision: %02x\n", | |
1612 | TRANSPORT(dev)->get_device_rev(dev)); | |
1613 | } | |
1614 | ||
1615 | struct se_device *transport_add_device_to_core_hba( | |
1616 | struct se_hba *hba, | |
1617 | struct se_subsystem_api *transport, | |
1618 | struct se_subsystem_dev *se_dev, | |
1619 | u32 device_flags, | |
1620 | void *transport_dev, | |
1621 | struct se_dev_limits *dev_limits, | |
1622 | const char *inquiry_prod, | |
1623 | const char *inquiry_rev) | |
1624 | { | |
1625 | int ret = 0, force_pt; | |
1626 | struct se_device *dev; | |
1627 | ||
1628 | dev = kzalloc(sizeof(struct se_device), GFP_KERNEL); | |
1629 | if (!(dev)) { | |
1630 | printk(KERN_ERR "Unable to allocate memory for se_dev_t\n"); | |
1631 | return NULL; | |
1632 | } | |
1633 | dev->dev_queue_obj = kzalloc(sizeof(struct se_queue_obj), GFP_KERNEL); | |
1634 | if (!(dev->dev_queue_obj)) { | |
1635 | printk(KERN_ERR "Unable to allocate memory for" | |
1636 | " dev->dev_queue_obj\n"); | |
1637 | kfree(dev); | |
1638 | return NULL; | |
1639 | } | |
1640 | transport_init_queue_obj(dev->dev_queue_obj); | |
1641 | ||
1642 | dev->dev_status_queue_obj = kzalloc(sizeof(struct se_queue_obj), | |
1643 | GFP_KERNEL); | |
1644 | if (!(dev->dev_status_queue_obj)) { | |
1645 | printk(KERN_ERR "Unable to allocate memory for" | |
1646 | " dev->dev_status_queue_obj\n"); | |
1647 | kfree(dev->dev_queue_obj); | |
1648 | kfree(dev); | |
1649 | return NULL; | |
1650 | } | |
1651 | transport_init_queue_obj(dev->dev_status_queue_obj); | |
1652 | ||
1653 | dev->dev_flags = device_flags; | |
1654 | dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED; | |
1655 | dev->dev_ptr = (void *) transport_dev; | |
1656 | dev->se_hba = hba; | |
1657 | dev->se_sub_dev = se_dev; | |
1658 | dev->transport = transport; | |
1659 | atomic_set(&dev->active_cmds, 0); | |
1660 | INIT_LIST_HEAD(&dev->dev_list); | |
1661 | INIT_LIST_HEAD(&dev->dev_sep_list); | |
1662 | INIT_LIST_HEAD(&dev->dev_tmr_list); | |
1663 | INIT_LIST_HEAD(&dev->execute_task_list); | |
1664 | INIT_LIST_HEAD(&dev->delayed_cmd_list); | |
1665 | INIT_LIST_HEAD(&dev->ordered_cmd_list); | |
1666 | INIT_LIST_HEAD(&dev->state_task_list); | |
1667 | spin_lock_init(&dev->execute_task_lock); | |
1668 | spin_lock_init(&dev->delayed_cmd_lock); | |
1669 | spin_lock_init(&dev->ordered_cmd_lock); | |
1670 | spin_lock_init(&dev->state_task_lock); | |
1671 | spin_lock_init(&dev->dev_alua_lock); | |
1672 | spin_lock_init(&dev->dev_reservation_lock); | |
1673 | spin_lock_init(&dev->dev_status_lock); | |
1674 | spin_lock_init(&dev->dev_status_thr_lock); | |
1675 | spin_lock_init(&dev->se_port_lock); | |
1676 | spin_lock_init(&dev->se_tmr_lock); | |
1677 | ||
1678 | dev->queue_depth = dev_limits->queue_depth; | |
1679 | atomic_set(&dev->depth_left, dev->queue_depth); | |
1680 | atomic_set(&dev->dev_ordered_id, 0); | |
1681 | ||
1682 | se_dev_set_default_attribs(dev, dev_limits); | |
1683 | ||
1684 | dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX); | |
1685 | dev->creation_time = get_jiffies_64(); | |
1686 | spin_lock_init(&dev->stats_lock); | |
1687 | ||
1688 | spin_lock(&hba->device_lock); | |
1689 | list_add_tail(&dev->dev_list, &hba->hba_dev_list); | |
1690 | hba->dev_count++; | |
1691 | spin_unlock(&hba->device_lock); | |
1692 | /* | |
1693 | * Setup the SAM Task Attribute emulation for struct se_device | |
1694 | */ | |
1695 | core_setup_task_attr_emulation(dev); | |
1696 | /* | |
1697 | * Force PR and ALUA passthrough emulation with internal object use. | |
1698 | */ | |
1699 | force_pt = (hba->hba_flags & HBA_FLAGS_INTERNAL_USE); | |
1700 | /* | |
1701 | * Setup the Reservations infrastructure for struct se_device | |
1702 | */ | |
1703 | core_setup_reservations(dev, force_pt); | |
1704 | /* | |
1705 | * Setup the Asymmetric Logical Unit Assignment for struct se_device | |
1706 | */ | |
1707 | if (core_setup_alua(dev, force_pt) < 0) | |
1708 | goto out; | |
1709 | ||
1710 | /* | |
1711 | * Startup the struct se_device processing thread | |
1712 | */ | |
1713 | dev->process_thread = kthread_run(transport_processing_thread, dev, | |
1714 | "LIO_%s", TRANSPORT(dev)->name); | |
1715 | if (IS_ERR(dev->process_thread)) { | |
1716 | printk(KERN_ERR "Unable to create kthread: LIO_%s\n", | |
1717 | TRANSPORT(dev)->name); | |
1718 | goto out; | |
1719 | } | |
1720 | ||
1721 | /* | |
1722 | * Preload the initial INQUIRY const values if we are doing | |
1723 | * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI | |
1724 | * passthrough because this is being provided by the backend LLD. | |
1725 | * This is required so that transport_get_inquiry() copies these | |
1726 | * originals once back into DEV_T10_WWN(dev) for the virtual device | |
1727 | * setup. | |
1728 | */ | |
1729 | if (TRANSPORT(dev)->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) { | |
1730 | if (!(inquiry_prod) || !(inquiry_prod)) { | |
1731 | printk(KERN_ERR "All non TCM/pSCSI plugins require" | |
1732 | " INQUIRY consts\n"); | |
1733 | goto out; | |
1734 | } | |
1735 | ||
1736 | strncpy(&DEV_T10_WWN(dev)->vendor[0], "LIO-ORG", 8); | |
1737 | strncpy(&DEV_T10_WWN(dev)->model[0], inquiry_prod, 16); | |
1738 | strncpy(&DEV_T10_WWN(dev)->revision[0], inquiry_rev, 4); | |
1739 | } | |
1740 | scsi_dump_inquiry(dev); | |
1741 | ||
1742 | out: | |
1743 | if (!ret) | |
1744 | return dev; | |
1745 | kthread_stop(dev->process_thread); | |
1746 | ||
1747 | spin_lock(&hba->device_lock); | |
1748 | list_del(&dev->dev_list); | |
1749 | hba->dev_count--; | |
1750 | spin_unlock(&hba->device_lock); | |
1751 | ||
1752 | se_release_vpd_for_dev(dev); | |
1753 | ||
1754 | kfree(dev->dev_status_queue_obj); | |
1755 | kfree(dev->dev_queue_obj); | |
1756 | kfree(dev); | |
1757 | ||
1758 | return NULL; | |
1759 | } | |
1760 | EXPORT_SYMBOL(transport_add_device_to_core_hba); | |
1761 | ||
1762 | /* transport_generic_prepare_cdb(): | |
1763 | * | |
1764 | * Since the Initiator sees iSCSI devices as LUNs, the SCSI CDB will | |
1765 | * contain the iSCSI LUN in bits 7-5 of byte 1 as per SAM-2. | |
1766 | * The point of this is since we are mapping iSCSI LUNs to | |
1767 | * SCSI Target IDs having a non-zero LUN in the CDB will throw the | |
1768 | * devices and HBAs for a loop. | |
1769 | */ | |
1770 | static inline void transport_generic_prepare_cdb( | |
1771 | unsigned char *cdb) | |
1772 | { | |
1773 | switch (cdb[0]) { | |
1774 | case READ_10: /* SBC - RDProtect */ | |
1775 | case READ_12: /* SBC - RDProtect */ | |
1776 | case READ_16: /* SBC - RDProtect */ | |
1777 | case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */ | |
1778 | case VERIFY: /* SBC - VRProtect */ | |
1779 | case VERIFY_16: /* SBC - VRProtect */ | |
1780 | case WRITE_VERIFY: /* SBC - VRProtect */ | |
1781 | case WRITE_VERIFY_12: /* SBC - VRProtect */ | |
1782 | break; | |
1783 | default: | |
1784 | cdb[1] &= 0x1f; /* clear logical unit number */ | |
1785 | break; | |
1786 | } | |
1787 | } | |
1788 | ||
1789 | static struct se_task * | |
1790 | transport_generic_get_task(struct se_cmd *cmd, | |
1791 | enum dma_data_direction data_direction) | |
1792 | { | |
1793 | struct se_task *task; | |
1794 | struct se_device *dev = SE_DEV(cmd); | |
1795 | unsigned long flags; | |
1796 | ||
1797 | task = dev->transport->alloc_task(cmd); | |
1798 | if (!task) { | |
1799 | printk(KERN_ERR "Unable to allocate struct se_task\n"); | |
1800 | return NULL; | |
1801 | } | |
1802 | ||
1803 | INIT_LIST_HEAD(&task->t_list); | |
1804 | INIT_LIST_HEAD(&task->t_execute_list); | |
1805 | INIT_LIST_HEAD(&task->t_state_list); | |
1806 | init_completion(&task->task_stop_comp); | |
1807 | task->task_no = T_TASK(cmd)->t_tasks_no++; | |
1808 | task->task_se_cmd = cmd; | |
1809 | task->se_dev = dev; | |
1810 | task->task_data_direction = data_direction; | |
1811 | ||
1812 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | |
1813 | list_add_tail(&task->t_list, &T_TASK(cmd)->t_task_list); | |
1814 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
1815 | ||
1816 | return task; | |
1817 | } | |
1818 | ||
1819 | static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *); | |
1820 | ||
1821 | void transport_device_setup_cmd(struct se_cmd *cmd) | |
1822 | { | |
1823 | cmd->se_dev = SE_LUN(cmd)->lun_se_dev; | |
1824 | } | |
1825 | EXPORT_SYMBOL(transport_device_setup_cmd); | |
1826 | ||
1827 | /* | |
1828 | * Used by fabric modules containing a local struct se_cmd within their | |
1829 | * fabric dependent per I/O descriptor. | |
1830 | */ | |
1831 | void transport_init_se_cmd( | |
1832 | struct se_cmd *cmd, | |
1833 | struct target_core_fabric_ops *tfo, | |
1834 | struct se_session *se_sess, | |
1835 | u32 data_length, | |
1836 | int data_direction, | |
1837 | int task_attr, | |
1838 | unsigned char *sense_buffer) | |
1839 | { | |
1840 | INIT_LIST_HEAD(&cmd->se_lun_list); | |
1841 | INIT_LIST_HEAD(&cmd->se_delayed_list); | |
1842 | INIT_LIST_HEAD(&cmd->se_ordered_list); | |
1843 | /* | |
1844 | * Setup t_task pointer to t_task_backstore | |
1845 | */ | |
1846 | cmd->t_task = &cmd->t_task_backstore; | |
1847 | ||
1848 | INIT_LIST_HEAD(&T_TASK(cmd)->t_task_list); | |
1849 | init_completion(&T_TASK(cmd)->transport_lun_fe_stop_comp); | |
1850 | init_completion(&T_TASK(cmd)->transport_lun_stop_comp); | |
1851 | init_completion(&T_TASK(cmd)->t_transport_stop_comp); | |
1852 | spin_lock_init(&T_TASK(cmd)->t_state_lock); | |
1853 | atomic_set(&T_TASK(cmd)->transport_dev_active, 1); | |
1854 | ||
1855 | cmd->se_tfo = tfo; | |
1856 | cmd->se_sess = se_sess; | |
1857 | cmd->data_length = data_length; | |
1858 | cmd->data_direction = data_direction; | |
1859 | cmd->sam_task_attr = task_attr; | |
1860 | cmd->sense_buffer = sense_buffer; | |
1861 | } | |
1862 | EXPORT_SYMBOL(transport_init_se_cmd); | |
1863 | ||
1864 | static int transport_check_alloc_task_attr(struct se_cmd *cmd) | |
1865 | { | |
1866 | /* | |
1867 | * Check if SAM Task Attribute emulation is enabled for this | |
1868 | * struct se_device storage object | |
1869 | */ | |
1870 | if (SE_DEV(cmd)->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) | |
1871 | return 0; | |
1872 | ||
1873 | if (cmd->sam_task_attr == TASK_ATTR_ACA) { | |
1874 | DEBUG_STA("SAM Task Attribute ACA" | |
1875 | " emulation is not supported\n"); | |
1876 | return -1; | |
1877 | } | |
1878 | /* | |
1879 | * Used to determine when ORDERED commands should go from | |
1880 | * Dormant to Active status. | |
1881 | */ | |
1882 | cmd->se_ordered_id = atomic_inc_return(&SE_DEV(cmd)->dev_ordered_id); | |
1883 | smp_mb__after_atomic_inc(); | |
1884 | DEBUG_STA("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n", | |
1885 | cmd->se_ordered_id, cmd->sam_task_attr, | |
1886 | TRANSPORT(cmd->se_dev)->name); | |
1887 | return 0; | |
1888 | } | |
1889 | ||
1890 | void transport_free_se_cmd( | |
1891 | struct se_cmd *se_cmd) | |
1892 | { | |
1893 | if (se_cmd->se_tmr_req) | |
1894 | core_tmr_release_req(se_cmd->se_tmr_req); | |
1895 | /* | |
1896 | * Check and free any extended CDB buffer that was allocated | |
1897 | */ | |
1898 | if (T_TASK(se_cmd)->t_task_cdb != T_TASK(se_cmd)->__t_task_cdb) | |
1899 | kfree(T_TASK(se_cmd)->t_task_cdb); | |
1900 | } | |
1901 | EXPORT_SYMBOL(transport_free_se_cmd); | |
1902 | ||
1903 | static void transport_generic_wait_for_tasks(struct se_cmd *, int, int); | |
1904 | ||
1905 | /* transport_generic_allocate_tasks(): | |
1906 | * | |
1907 | * Called from fabric RX Thread. | |
1908 | */ | |
1909 | int transport_generic_allocate_tasks( | |
1910 | struct se_cmd *cmd, | |
1911 | unsigned char *cdb) | |
1912 | { | |
1913 | int ret; | |
1914 | ||
1915 | transport_generic_prepare_cdb(cdb); | |
1916 | ||
1917 | /* | |
1918 | * This is needed for early exceptions. | |
1919 | */ | |
1920 | cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks; | |
1921 | ||
1922 | transport_device_setup_cmd(cmd); | |
1923 | /* | |
1924 | * Ensure that the received CDB is less than the max (252 + 8) bytes | |
1925 | * for VARIABLE_LENGTH_CMD | |
1926 | */ | |
1927 | if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) { | |
1928 | printk(KERN_ERR "Received SCSI CDB with command_size: %d that" | |
1929 | " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", | |
1930 | scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE); | |
1931 | return -1; | |
1932 | } | |
1933 | /* | |
1934 | * If the received CDB is larger than TCM_MAX_COMMAND_SIZE, | |
1935 | * allocate the additional extended CDB buffer now.. Otherwise | |
1936 | * setup the pointer from __t_task_cdb to t_task_cdb. | |
1937 | */ | |
1938 | if (scsi_command_size(cdb) > sizeof(T_TASK(cmd)->__t_task_cdb)) { | |
1939 | T_TASK(cmd)->t_task_cdb = kzalloc(scsi_command_size(cdb), | |
1940 | GFP_KERNEL); | |
1941 | if (!(T_TASK(cmd)->t_task_cdb)) { | |
1942 | printk(KERN_ERR "Unable to allocate T_TASK(cmd)->t_task_cdb" | |
1943 | " %u > sizeof(T_TASK(cmd)->__t_task_cdb): %lu ops\n", | |
1944 | scsi_command_size(cdb), | |
1945 | (unsigned long)sizeof(T_TASK(cmd)->__t_task_cdb)); | |
1946 | return -1; | |
1947 | } | |
1948 | } else | |
1949 | T_TASK(cmd)->t_task_cdb = &T_TASK(cmd)->__t_task_cdb[0]; | |
1950 | /* | |
1951 | * Copy the original CDB into T_TASK(cmd). | |
1952 | */ | |
1953 | memcpy(T_TASK(cmd)->t_task_cdb, cdb, scsi_command_size(cdb)); | |
1954 | /* | |
1955 | * Setup the received CDB based on SCSI defined opcodes and | |
1956 | * perform unit attention, persistent reservations and ALUA | |
1957 | * checks for virtual device backends. The T_TASK(cmd)->t_task_cdb | |
1958 | * pointer is expected to be setup before we reach this point. | |
1959 | */ | |
1960 | ret = transport_generic_cmd_sequencer(cmd, cdb); | |
1961 | if (ret < 0) | |
1962 | return ret; | |
1963 | /* | |
1964 | * Check for SAM Task Attribute Emulation | |
1965 | */ | |
1966 | if (transport_check_alloc_task_attr(cmd) < 0) { | |
1967 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | |
1968 | cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; | |
1969 | return -2; | |
1970 | } | |
1971 | spin_lock(&cmd->se_lun->lun_sep_lock); | |
1972 | if (cmd->se_lun->lun_sep) | |
1973 | cmd->se_lun->lun_sep->sep_stats.cmd_pdus++; | |
1974 | spin_unlock(&cmd->se_lun->lun_sep_lock); | |
1975 | return 0; | |
1976 | } | |
1977 | EXPORT_SYMBOL(transport_generic_allocate_tasks); | |
1978 | ||
1979 | /* | |
1980 | * Used by fabric module frontends not defining a TFO->new_cmd_map() | |
1981 | * to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD statis | |
1982 | */ | |
1983 | int transport_generic_handle_cdb( | |
1984 | struct se_cmd *cmd) | |
1985 | { | |
1986 | if (!SE_LUN(cmd)) { | |
1987 | dump_stack(); | |
1988 | printk(KERN_ERR "SE_LUN(cmd) is NULL\n"); | |
1989 | return -1; | |
1990 | } | |
1991 | ||
1992 | transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD); | |
1993 | return 0; | |
1994 | } | |
1995 | EXPORT_SYMBOL(transport_generic_handle_cdb); | |
1996 | ||
1997 | /* | |
1998 | * Used by fabric module frontends defining a TFO->new_cmd_map() caller | |
1999 | * to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to | |
2000 | * complete setup in TCM process context w/ TFO->new_cmd_map(). | |
2001 | */ | |
2002 | int transport_generic_handle_cdb_map( | |
2003 | struct se_cmd *cmd) | |
2004 | { | |
2005 | if (!SE_LUN(cmd)) { | |
2006 | dump_stack(); | |
2007 | printk(KERN_ERR "SE_LUN(cmd) is NULL\n"); | |
2008 | return -1; | |
2009 | } | |
2010 | ||
2011 | transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP); | |
2012 | return 0; | |
2013 | } | |
2014 | EXPORT_SYMBOL(transport_generic_handle_cdb_map); | |
2015 | ||
2016 | /* transport_generic_handle_data(): | |
2017 | * | |
2018 | * | |
2019 | */ | |
2020 | int transport_generic_handle_data( | |
2021 | struct se_cmd *cmd) | |
2022 | { | |
2023 | /* | |
2024 | * For the software fabric case, then we assume the nexus is being | |
2025 | * failed/shutdown when signals are pending from the kthread context | |
2026 | * caller, so we return a failure. For the HW target mode case running | |
2027 | * in interrupt code, the signal_pending() check is skipped. | |
2028 | */ | |
2029 | if (!in_interrupt() && signal_pending(current)) | |
2030 | return -1; | |
2031 | /* | |
2032 | * If the received CDB has aleady been ABORTED by the generic | |
2033 | * target engine, we now call transport_check_aborted_status() | |
2034 | * to queue any delated TASK_ABORTED status for the received CDB to the | |
2035 | * fabric module as we are expecting no futher incoming DATA OUT | |
2036 | * sequences at this point. | |
2037 | */ | |
2038 | if (transport_check_aborted_status(cmd, 1) != 0) | |
2039 | return 0; | |
2040 | ||
2041 | transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_WRITE); | |
2042 | return 0; | |
2043 | } | |
2044 | EXPORT_SYMBOL(transport_generic_handle_data); | |
2045 | ||
2046 | /* transport_generic_handle_tmr(): | |
2047 | * | |
2048 | * | |
2049 | */ | |
2050 | int transport_generic_handle_tmr( | |
2051 | struct se_cmd *cmd) | |
2052 | { | |
2053 | /* | |
2054 | * This is needed for early exceptions. | |
2055 | */ | |
2056 | cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks; | |
2057 | transport_device_setup_cmd(cmd); | |
2058 | ||
2059 | transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR); | |
2060 | return 0; | |
2061 | } | |
2062 | EXPORT_SYMBOL(transport_generic_handle_tmr); | |
2063 | ||
2064 | static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) | |
2065 | { | |
2066 | struct se_task *task, *task_tmp; | |
2067 | unsigned long flags; | |
2068 | int ret = 0; | |
2069 | ||
2070 | DEBUG_TS("ITT[0x%08x] - Stopping tasks\n", | |
2071 | CMD_TFO(cmd)->get_task_tag(cmd)); | |
2072 | ||
2073 | /* | |
2074 | * No tasks remain in the execution queue | |
2075 | */ | |
2076 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | |
2077 | list_for_each_entry_safe(task, task_tmp, | |
2078 | &T_TASK(cmd)->t_task_list, t_list) { | |
2079 | DEBUG_TS("task_no[%d] - Processing task %p\n", | |
2080 | task->task_no, task); | |
2081 | /* | |
2082 | * If the struct se_task has not been sent and is not active, | |
2083 | * remove the struct se_task from the execution queue. | |
2084 | */ | |
2085 | if (!atomic_read(&task->task_sent) && | |
2086 | !atomic_read(&task->task_active)) { | |
2087 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, | |
2088 | flags); | |
2089 | transport_remove_task_from_execute_queue(task, | |
2090 | task->se_dev); | |
2091 | ||
2092 | DEBUG_TS("task_no[%d] - Removed from execute queue\n", | |
2093 | task->task_no); | |
2094 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | |
2095 | continue; | |
2096 | } | |
2097 | ||
2098 | /* | |
2099 | * If the struct se_task is active, sleep until it is returned | |
2100 | * from the plugin. | |
2101 | */ | |
2102 | if (atomic_read(&task->task_active)) { | |
2103 | atomic_set(&task->task_stop, 1); | |
2104 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, | |
2105 | flags); | |
2106 | ||
2107 | DEBUG_TS("task_no[%d] - Waiting to complete\n", | |
2108 | task->task_no); | |
2109 | wait_for_completion(&task->task_stop_comp); | |
2110 | DEBUG_TS("task_no[%d] - Stopped successfully\n", | |
2111 | task->task_no); | |
2112 | ||
2113 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | |
2114 | atomic_dec(&T_TASK(cmd)->t_task_cdbs_left); | |
2115 | ||
2116 | atomic_set(&task->task_active, 0); | |
2117 | atomic_set(&task->task_stop, 0); | |
2118 | } else { | |
2119 | DEBUG_TS("task_no[%d] - Did nothing\n", task->task_no); | |
2120 | ret++; | |
2121 | } | |
2122 | ||
2123 | __transport_stop_task_timer(task, &flags); | |
2124 | } | |
2125 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
2126 | ||
2127 | return ret; | |
2128 | } | |
2129 | ||
2130 | static void transport_failure_reset_queue_depth(struct se_device *dev) | |
2131 | { | |
2132 | unsigned long flags; | |
2133 | ||
6eab04a8 | 2134 | spin_lock_irqsave(&SE_HBA(dev)->hba_queue_lock, flags); |
c66ac9db NB |
2135 | atomic_inc(&dev->depth_left); |
2136 | atomic_inc(&SE_HBA(dev)->left_queue_depth); | |
2137 | spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags); | |
2138 | } | |
2139 | ||
2140 | /* | |
2141 | * Handle SAM-esque emulation for generic transport request failures. | |
2142 | */ | |
2143 | static void transport_generic_request_failure( | |
2144 | struct se_cmd *cmd, | |
2145 | struct se_device *dev, | |
2146 | int complete, | |
2147 | int sc) | |
2148 | { | |
2149 | DEBUG_GRF("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x" | |
2150 | " CDB: 0x%02x\n", cmd, CMD_TFO(cmd)->get_task_tag(cmd), | |
2151 | T_TASK(cmd)->t_task_cdb[0]); | |
2152 | DEBUG_GRF("-----[ i_state: %d t_state/def_t_state:" | |
2153 | " %d/%d transport_error_status: %d\n", | |
2154 | CMD_TFO(cmd)->get_cmd_state(cmd), | |
2155 | cmd->t_state, cmd->deferred_t_state, | |
2156 | cmd->transport_error_status); | |
2157 | DEBUG_GRF("-----[ t_task_cdbs: %d t_task_cdbs_left: %d" | |
2158 | " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --" | |
2159 | " t_transport_active: %d t_transport_stop: %d" | |
2160 | " t_transport_sent: %d\n", T_TASK(cmd)->t_task_cdbs, | |
2161 | atomic_read(&T_TASK(cmd)->t_task_cdbs_left), | |
2162 | atomic_read(&T_TASK(cmd)->t_task_cdbs_sent), | |
2163 | atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left), | |
2164 | atomic_read(&T_TASK(cmd)->t_transport_active), | |
2165 | atomic_read(&T_TASK(cmd)->t_transport_stop), | |
2166 | atomic_read(&T_TASK(cmd)->t_transport_sent)); | |
2167 | ||
2168 | transport_stop_all_task_timers(cmd); | |
2169 | ||
2170 | if (dev) | |
2171 | transport_failure_reset_queue_depth(dev); | |
2172 | /* | |
2173 | * For SAM Task Attribute emulation for failed struct se_cmd | |
2174 | */ | |
2175 | if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) | |
2176 | transport_complete_task_attr(cmd); | |
2177 | ||
2178 | if (complete) { | |
2179 | transport_direct_request_timeout(cmd); | |
2180 | cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE; | |
2181 | } | |
2182 | ||
2183 | switch (cmd->transport_error_status) { | |
2184 | case PYX_TRANSPORT_UNKNOWN_SAM_OPCODE: | |
2185 | cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; | |
2186 | break; | |
2187 | case PYX_TRANSPORT_REQ_TOO_MANY_SECTORS: | |
2188 | cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY; | |
2189 | break; | |
2190 | case PYX_TRANSPORT_INVALID_CDB_FIELD: | |
2191 | cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; | |
2192 | break; | |
2193 | case PYX_TRANSPORT_INVALID_PARAMETER_LIST: | |
2194 | cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; | |
2195 | break; | |
2196 | case PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES: | |
2197 | if (!sc) | |
2198 | transport_new_cmd_failure(cmd); | |
2199 | /* | |
2200 | * Currently for PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES, | |
2201 | * we force this session to fall back to session | |
2202 | * recovery. | |
2203 | */ | |
2204 | CMD_TFO(cmd)->fall_back_to_erl0(cmd->se_sess); | |
2205 | CMD_TFO(cmd)->stop_session(cmd->se_sess, 0, 0); | |
2206 | ||
2207 | goto check_stop; | |
2208 | case PYX_TRANSPORT_LU_COMM_FAILURE: | |
2209 | case PYX_TRANSPORT_ILLEGAL_REQUEST: | |
2210 | cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | |
2211 | break; | |
2212 | case PYX_TRANSPORT_UNKNOWN_MODE_PAGE: | |
2213 | cmd->scsi_sense_reason = TCM_UNKNOWN_MODE_PAGE; | |
2214 | break; | |
2215 | case PYX_TRANSPORT_WRITE_PROTECTED: | |
2216 | cmd->scsi_sense_reason = TCM_WRITE_PROTECTED; | |
2217 | break; | |
2218 | case PYX_TRANSPORT_RESERVATION_CONFLICT: | |
2219 | /* | |
2220 | * No SENSE Data payload for this case, set SCSI Status | |
2221 | * and queue the response to $FABRIC_MOD. | |
2222 | * | |
2223 | * Uses linux/include/scsi/scsi.h SAM status codes defs | |
2224 | */ | |
2225 | cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; | |
2226 | /* | |
2227 | * For UA Interlock Code 11b, a RESERVATION CONFLICT will | |
2228 | * establish a UNIT ATTENTION with PREVIOUS RESERVATION | |
2229 | * CONFLICT STATUS. | |
2230 | * | |
2231 | * See spc4r17, section 7.4.6 Control Mode Page, Table 349 | |
2232 | */ | |
2233 | if (SE_SESS(cmd) && | |
2234 | DEV_ATTRIB(cmd->se_dev)->emulate_ua_intlck_ctrl == 2) | |
2235 | core_scsi3_ua_allocate(SE_SESS(cmd)->se_node_acl, | |
2236 | cmd->orig_fe_lun, 0x2C, | |
2237 | ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); | |
2238 | ||
2239 | CMD_TFO(cmd)->queue_status(cmd); | |
2240 | goto check_stop; | |
2241 | case PYX_TRANSPORT_USE_SENSE_REASON: | |
2242 | /* | |
2243 | * struct se_cmd->scsi_sense_reason already set | |
2244 | */ | |
2245 | break; | |
2246 | default: | |
2247 | printk(KERN_ERR "Unknown transport error for CDB 0x%02x: %d\n", | |
2248 | T_TASK(cmd)->t_task_cdb[0], | |
2249 | cmd->transport_error_status); | |
2250 | cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; | |
2251 | break; | |
2252 | } | |
2253 | ||
2254 | if (!sc) | |
2255 | transport_new_cmd_failure(cmd); | |
2256 | else | |
2257 | transport_send_check_condition_and_sense(cmd, | |
2258 | cmd->scsi_sense_reason, 0); | |
2259 | check_stop: | |
2260 | transport_lun_remove_cmd(cmd); | |
2261 | if (!(transport_cmd_check_stop_to_fabric(cmd))) | |
2262 | ; | |
2263 | } | |
2264 | ||
2265 | static void transport_direct_request_timeout(struct se_cmd *cmd) | |
2266 | { | |
2267 | unsigned long flags; | |
2268 | ||
2269 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | |
2270 | if (!(atomic_read(&T_TASK(cmd)->t_transport_timeout))) { | |
2271 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
2272 | return; | |
2273 | } | |
2274 | if (atomic_read(&T_TASK(cmd)->t_task_cdbs_timeout_left)) { | |
2275 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
2276 | return; | |
2277 | } | |
2278 | ||
2279 | atomic_sub(atomic_read(&T_TASK(cmd)->t_transport_timeout), | |
2280 | &T_TASK(cmd)->t_se_count); | |
2281 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
2282 | } | |
2283 | ||
2284 | static void transport_generic_request_timeout(struct se_cmd *cmd) | |
2285 | { | |
2286 | unsigned long flags; | |
2287 | ||
2288 | /* | |
2289 | * Reset T_TASK(cmd)->t_se_count to allow transport_generic_remove() | |
2290 | * to allow last call to free memory resources. | |
2291 | */ | |
2292 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | |
2293 | if (atomic_read(&T_TASK(cmd)->t_transport_timeout) > 1) { | |
2294 | int tmp = (atomic_read(&T_TASK(cmd)->t_transport_timeout) - 1); | |
2295 | ||
2296 | atomic_sub(tmp, &T_TASK(cmd)->t_se_count); | |
2297 | } | |
2298 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
2299 | ||
2300 | transport_generic_remove(cmd, 0, 0); | |
2301 | } | |
2302 | ||
2303 | static int | |
2304 | transport_generic_allocate_buf(struct se_cmd *cmd, u32 data_length) | |
2305 | { | |
2306 | unsigned char *buf; | |
2307 | ||
2308 | buf = kzalloc(data_length, GFP_KERNEL); | |
2309 | if (!(buf)) { | |
2310 | printk(KERN_ERR "Unable to allocate memory for buffer\n"); | |
2311 | return -1; | |
2312 | } | |
2313 | ||
2314 | T_TASK(cmd)->t_tasks_se_num = 0; | |
2315 | T_TASK(cmd)->t_task_buf = buf; | |
2316 | ||
2317 | return 0; | |
2318 | } | |
2319 | ||
2320 | static inline u32 transport_lba_21(unsigned char *cdb) | |
2321 | { | |
2322 | return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3]; | |
2323 | } | |
2324 | ||
2325 | static inline u32 transport_lba_32(unsigned char *cdb) | |
2326 | { | |
2327 | return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; | |
2328 | } | |
2329 | ||
2330 | static inline unsigned long long transport_lba_64(unsigned char *cdb) | |
2331 | { | |
2332 | unsigned int __v1, __v2; | |
2333 | ||
2334 | __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; | |
2335 | __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; | |
2336 | ||
2337 | return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; | |
2338 | } | |
2339 | ||
2340 | /* | |
2341 | * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs | |
2342 | */ | |
2343 | static inline unsigned long long transport_lba_64_ext(unsigned char *cdb) | |
2344 | { | |
2345 | unsigned int __v1, __v2; | |
2346 | ||
2347 | __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15]; | |
2348 | __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19]; | |
2349 | ||
2350 | return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; | |
2351 | } | |
2352 | ||
2353 | static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd) | |
2354 | { | |
2355 | unsigned long flags; | |
2356 | ||
2357 | spin_lock_irqsave(&T_TASK(se_cmd)->t_state_lock, flags); | |
2358 | se_cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE; | |
2359 | spin_unlock_irqrestore(&T_TASK(se_cmd)->t_state_lock, flags); | |
2360 | } | |
2361 | ||
2362 | /* | |
2363 | * Called from interrupt context. | |
2364 | */ | |
2365 | static void transport_task_timeout_handler(unsigned long data) | |
2366 | { | |
2367 | struct se_task *task = (struct se_task *)data; | |
2368 | struct se_cmd *cmd = TASK_CMD(task); | |
2369 | unsigned long flags; | |
2370 | ||
2371 | DEBUG_TT("transport task timeout fired! task: %p cmd: %p\n", task, cmd); | |
2372 | ||
2373 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | |
2374 | if (task->task_flags & TF_STOP) { | |
2375 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
2376 | return; | |
2377 | } | |
2378 | task->task_flags &= ~TF_RUNNING; | |
2379 | ||
2380 | /* | |
2381 | * Determine if transport_complete_task() has already been called. | |
2382 | */ | |
2383 | if (!(atomic_read(&task->task_active))) { | |
2384 | DEBUG_TT("transport task: %p cmd: %p timeout task_active" | |
2385 | " == 0\n", task, cmd); | |
2386 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
2387 | return; | |
2388 | } | |
2389 | ||
2390 | atomic_inc(&T_TASK(cmd)->t_se_count); | |
2391 | atomic_inc(&T_TASK(cmd)->t_transport_timeout); | |
2392 | T_TASK(cmd)->t_tasks_failed = 1; | |
2393 | ||
2394 | atomic_set(&task->task_timeout, 1); | |
2395 | task->task_error_status = PYX_TRANSPORT_TASK_TIMEOUT; | |
2396 | task->task_scsi_status = 1; | |
2397 | ||
2398 | if (atomic_read(&task->task_stop)) { | |
2399 | DEBUG_TT("transport task: %p cmd: %p timeout task_stop" | |
2400 | " == 1\n", task, cmd); | |
2401 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
2402 | complete(&task->task_stop_comp); | |
2403 | return; | |
2404 | } | |
2405 | ||
2406 | if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_left))) { | |
2407 | DEBUG_TT("transport task: %p cmd: %p timeout non zero" | |
2408 | " t_task_cdbs_left\n", task, cmd); | |
2409 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
2410 | return; | |
2411 | } | |
2412 | DEBUG_TT("transport task: %p cmd: %p timeout ZERO t_task_cdbs_left\n", | |
2413 | task, cmd); | |
2414 | ||
2415 | cmd->t_state = TRANSPORT_COMPLETE_FAILURE; | |
2416 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
2417 | ||
2418 | transport_add_cmd_to_queue(cmd, TRANSPORT_COMPLETE_FAILURE); | |
2419 | } | |
2420 | ||
2421 | /* | |
2422 | * Called with T_TASK(cmd)->t_state_lock held. | |
2423 | */ | |
2424 | static void transport_start_task_timer(struct se_task *task) | |
2425 | { | |
2426 | struct se_device *dev = task->se_dev; | |
2427 | int timeout; | |
2428 | ||
2429 | if (task->task_flags & TF_RUNNING) | |
2430 | return; | |
2431 | /* | |
2432 | * If the task_timeout is disabled, exit now. | |
2433 | */ | |
2434 | timeout = DEV_ATTRIB(dev)->task_timeout; | |
2435 | if (!(timeout)) | |
2436 | return; | |
2437 | ||
2438 | init_timer(&task->task_timer); | |
2439 | task->task_timer.expires = (get_jiffies_64() + timeout * HZ); | |
2440 | task->task_timer.data = (unsigned long) task; | |
2441 | task->task_timer.function = transport_task_timeout_handler; | |
2442 | ||
2443 | task->task_flags |= TF_RUNNING; | |
2444 | add_timer(&task->task_timer); | |
2445 | #if 0 | |
2446 | printk(KERN_INFO "Starting task timer for cmd: %p task: %p seconds:" | |
2447 | " %d\n", task->task_se_cmd, task, timeout); | |
2448 | #endif | |
2449 | } | |
2450 | ||
2451 | /* | |
2452 | * Called with spin_lock_irq(&T_TASK(cmd)->t_state_lock) held. | |
2453 | */ | |
2454 | void __transport_stop_task_timer(struct se_task *task, unsigned long *flags) | |
2455 | { | |
2456 | struct se_cmd *cmd = TASK_CMD(task); | |
2457 | ||
2458 | if (!(task->task_flags & TF_RUNNING)) | |
2459 | return; | |
2460 | ||
2461 | task->task_flags |= TF_STOP; | |
2462 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, *flags); | |
2463 | ||
2464 | del_timer_sync(&task->task_timer); | |
2465 | ||
2466 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, *flags); | |
2467 | task->task_flags &= ~TF_RUNNING; | |
2468 | task->task_flags &= ~TF_STOP; | |
2469 | } | |
2470 | ||
2471 | static void transport_stop_all_task_timers(struct se_cmd *cmd) | |
2472 | { | |
2473 | struct se_task *task = NULL, *task_tmp; | |
2474 | unsigned long flags; | |
2475 | ||
2476 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | |
2477 | list_for_each_entry_safe(task, task_tmp, | |
2478 | &T_TASK(cmd)->t_task_list, t_list) | |
2479 | __transport_stop_task_timer(task, &flags); | |
2480 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
2481 | } | |
2482 | ||
2483 | static inline int transport_tcq_window_closed(struct se_device *dev) | |
2484 | { | |
2485 | if (dev->dev_tcq_window_closed++ < | |
2486 | PYX_TRANSPORT_WINDOW_CLOSED_THRESHOLD) { | |
2487 | msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_SHORT); | |
2488 | } else | |
2489 | msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG); | |
2490 | ||
2491 | wake_up_interruptible(&dev->dev_queue_obj->thread_wq); | |
2492 | return 0; | |
2493 | } | |
2494 | ||
2495 | /* | |
2496 | * Called from Fabric Module context from transport_execute_tasks() | |
2497 | * | |
2498 | * The return of this function determins if the tasks from struct se_cmd | |
2499 | * get added to the execution queue in transport_execute_tasks(), | |
2500 | * or are added to the delayed or ordered lists here. | |
2501 | */ | |
2502 | static inline int transport_execute_task_attr(struct se_cmd *cmd) | |
2503 | { | |
2504 | if (SE_DEV(cmd)->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) | |
2505 | return 1; | |
2506 | /* | |
2507 | * Check for the existance of HEAD_OF_QUEUE, and if true return 1 | |
2508 | * to allow the passed struct se_cmd list of tasks to the front of the list. | |
2509 | */ | |
2510 | if (cmd->sam_task_attr == TASK_ATTR_HOQ) { | |
2511 | atomic_inc(&SE_DEV(cmd)->dev_hoq_count); | |
2512 | smp_mb__after_atomic_inc(); | |
2513 | DEBUG_STA("Added HEAD_OF_QUEUE for CDB:" | |
2514 | " 0x%02x, se_ordered_id: %u\n", | |
2515 | T_TASK(cmd)->t_task_cdb[0], | |
2516 | cmd->se_ordered_id); | |
2517 | return 1; | |
2518 | } else if (cmd->sam_task_attr == TASK_ATTR_ORDERED) { | |
2519 | spin_lock(&SE_DEV(cmd)->ordered_cmd_lock); | |
2520 | list_add_tail(&cmd->se_ordered_list, | |
2521 | &SE_DEV(cmd)->ordered_cmd_list); | |
2522 | spin_unlock(&SE_DEV(cmd)->ordered_cmd_lock); | |
2523 | ||
2524 | atomic_inc(&SE_DEV(cmd)->dev_ordered_sync); | |
2525 | smp_mb__after_atomic_inc(); | |
2526 | ||
2527 | DEBUG_STA("Added ORDERED for CDB: 0x%02x to ordered" | |
2528 | " list, se_ordered_id: %u\n", | |
2529 | T_TASK(cmd)->t_task_cdb[0], | |
2530 | cmd->se_ordered_id); | |
2531 | /* | |
2532 | * Add ORDERED command to tail of execution queue if | |
2533 | * no other older commands exist that need to be | |
2534 | * completed first. | |
2535 | */ | |
2536 | if (!(atomic_read(&SE_DEV(cmd)->simple_cmds))) | |
2537 | return 1; | |
2538 | } else { | |
2539 | /* | |
2540 | * For SIMPLE and UNTAGGED Task Attribute commands | |
2541 | */ | |
2542 | atomic_inc(&SE_DEV(cmd)->simple_cmds); | |
2543 | smp_mb__after_atomic_inc(); | |
2544 | } | |
2545 | /* | |
2546 | * Otherwise if one or more outstanding ORDERED task attribute exist, | |
2547 | * add the dormant task(s) built for the passed struct se_cmd to the | |
2548 | * execution queue and become in Active state for this struct se_device. | |
2549 | */ | |
2550 | if (atomic_read(&SE_DEV(cmd)->dev_ordered_sync) != 0) { | |
2551 | /* | |
2552 | * Otherwise, add cmd w/ tasks to delayed cmd queue that | |
2553 | * will be drained upon competion of HEAD_OF_QUEUE task. | |
2554 | */ | |
2555 | spin_lock(&SE_DEV(cmd)->delayed_cmd_lock); | |
2556 | cmd->se_cmd_flags |= SCF_DELAYED_CMD_FROM_SAM_ATTR; | |
2557 | list_add_tail(&cmd->se_delayed_list, | |
2558 | &SE_DEV(cmd)->delayed_cmd_list); | |
2559 | spin_unlock(&SE_DEV(cmd)->delayed_cmd_lock); | |
2560 | ||
2561 | DEBUG_STA("Added CDB: 0x%02x Task Attr: 0x%02x to" | |
2562 | " delayed CMD list, se_ordered_id: %u\n", | |
2563 | T_TASK(cmd)->t_task_cdb[0], cmd->sam_task_attr, | |
2564 | cmd->se_ordered_id); | |
2565 | /* | |
2566 | * Return zero to let transport_execute_tasks() know | |
2567 | * not to add the delayed tasks to the execution list. | |
2568 | */ | |
2569 | return 0; | |
2570 | } | |
2571 | /* | |
2572 | * Otherwise, no ORDERED task attributes exist.. | |
2573 | */ | |
2574 | return 1; | |
2575 | } | |
2576 | ||
2577 | /* | |
2578 | * Called from fabric module context in transport_generic_new_cmd() and | |
2579 | * transport_generic_process_write() | |
2580 | */ | |
2581 | static int transport_execute_tasks(struct se_cmd *cmd) | |
2582 | { | |
2583 | int add_tasks; | |
2584 | ||
2585 | if (!(cmd->se_cmd_flags & SCF_SE_DISABLE_ONLINE_CHECK)) { | |
2586 | if (se_dev_check_online(cmd->se_orig_obj_ptr) != 0) { | |
2587 | cmd->transport_error_status = | |
2588 | PYX_TRANSPORT_LU_COMM_FAILURE; | |
2589 | transport_generic_request_failure(cmd, NULL, 0, 1); | |
2590 | return 0; | |
2591 | } | |
2592 | } | |
2593 | /* | |
2594 | * Call transport_cmd_check_stop() to see if a fabric exception | |
2595 | * has occured that prevents execution. | |
2596 | */ | |
2597 | if (!(transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING))) { | |
2598 | /* | |
2599 | * Check for SAM Task Attribute emulation and HEAD_OF_QUEUE | |
2600 | * attribute for the tasks of the received struct se_cmd CDB | |
2601 | */ | |
2602 | add_tasks = transport_execute_task_attr(cmd); | |
2603 | if (add_tasks == 0) | |
2604 | goto execute_tasks; | |
2605 | /* | |
2606 | * This calls transport_add_tasks_from_cmd() to handle | |
2607 | * HEAD_OF_QUEUE ordering for SAM Task Attribute emulation | |
2608 | * (if enabled) in __transport_add_task_to_execute_queue() and | |
2609 | * transport_add_task_check_sam_attr(). | |
2610 | */ | |
2611 | transport_add_tasks_from_cmd(cmd); | |
2612 | } | |
2613 | /* | |
2614 | * Kick the execution queue for the cmd associated struct se_device | |
2615 | * storage object. | |
2616 | */ | |
2617 | execute_tasks: | |
2618 | __transport_execute_tasks(SE_DEV(cmd)); | |
2619 | return 0; | |
2620 | } | |
2621 | ||
2622 | /* | |
2623 | * Called to check struct se_device tcq depth window, and once open pull struct se_task | |
2624 | * from struct se_device->execute_task_list and | |
2625 | * | |
2626 | * Called from transport_processing_thread() | |
2627 | */ | |
2628 | static int __transport_execute_tasks(struct se_device *dev) | |
2629 | { | |
2630 | int error; | |
2631 | struct se_cmd *cmd = NULL; | |
2632 | struct se_task *task; | |
2633 | unsigned long flags; | |
2634 | ||
2635 | /* | |
2636 | * Check if there is enough room in the device and HBA queue to send | |
2637 | * struct se_transport_task's to the selected transport. | |
2638 | */ | |
2639 | check_depth: | |
2640 | spin_lock_irqsave(&SE_HBA(dev)->hba_queue_lock, flags); | |
2641 | if (!(atomic_read(&dev->depth_left)) || | |
2642 | !(atomic_read(&SE_HBA(dev)->left_queue_depth))) { | |
2643 | spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags); | |
2644 | return transport_tcq_window_closed(dev); | |
2645 | } | |
2646 | dev->dev_tcq_window_closed = 0; | |
2647 | ||
2648 | spin_lock(&dev->execute_task_lock); | |
2649 | task = transport_get_task_from_execute_queue(dev); | |
2650 | spin_unlock(&dev->execute_task_lock); | |
2651 | ||
2652 | if (!task) { | |
2653 | spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags); | |
2654 | return 0; | |
2655 | } | |
2656 | ||
2657 | atomic_dec(&dev->depth_left); | |
2658 | atomic_dec(&SE_HBA(dev)->left_queue_depth); | |
2659 | spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags); | |
2660 | ||
2661 | cmd = TASK_CMD(task); | |
2662 | ||
2663 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | |
2664 | atomic_set(&task->task_active, 1); | |
2665 | atomic_set(&task->task_sent, 1); | |
2666 | atomic_inc(&T_TASK(cmd)->t_task_cdbs_sent); | |
2667 | ||
2668 | if (atomic_read(&T_TASK(cmd)->t_task_cdbs_sent) == | |
2669 | T_TASK(cmd)->t_task_cdbs) | |
2670 | atomic_set(&cmd->transport_sent, 1); | |
2671 | ||
2672 | transport_start_task_timer(task); | |
2673 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
2674 | /* | |
2675 | * The struct se_cmd->transport_emulate_cdb() function pointer is used | |
2676 | * to grab REPORT_LUNS CDBs before they hit the | |
2677 | * struct se_subsystem_api->do_task() caller below. | |
2678 | */ | |
2679 | if (cmd->transport_emulate_cdb) { | |
2680 | error = cmd->transport_emulate_cdb(cmd); | |
2681 | if (error != 0) { | |
2682 | cmd->transport_error_status = error; | |
2683 | atomic_set(&task->task_active, 0); | |
2684 | atomic_set(&cmd->transport_sent, 0); | |
2685 | transport_stop_tasks_for_cmd(cmd); | |
2686 | transport_generic_request_failure(cmd, dev, 0, 1); | |
2687 | goto check_depth; | |
2688 | } | |
2689 | /* | |
2690 | * Handle the successful completion for transport_emulate_cdb() | |
2691 | * for synchronous operation, following SCF_EMULATE_CDB_ASYNC | |
2692 | * Otherwise the caller is expected to complete the task with | |
2693 | * proper status. | |
2694 | */ | |
2695 | if (!(cmd->se_cmd_flags & SCF_EMULATE_CDB_ASYNC)) { | |
2696 | cmd->scsi_status = SAM_STAT_GOOD; | |
2697 | task->task_scsi_status = GOOD; | |
2698 | transport_complete_task(task, 1); | |
2699 | } | |
2700 | } else { | |
2701 | /* | |
2702 | * Currently for all virtual TCM plugins including IBLOCK, FILEIO and | |
2703 | * RAMDISK we use the internal transport_emulate_control_cdb() logic | |
2704 | * with struct se_subsystem_api callers for the primary SPC-3 TYPE_DISK | |
2705 | * LUN emulation code. | |
2706 | * | |
2707 | * For TCM/pSCSI and all other SCF_SCSI_DATA_SG_IO_CDB I/O tasks we | |
2708 | * call ->do_task() directly and let the underlying TCM subsystem plugin | |
2709 | * code handle the CDB emulation. | |
2710 | */ | |
2711 | if ((TRANSPORT(dev)->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) && | |
2712 | (!(TASK_CMD(task)->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB))) | |
2713 | error = transport_emulate_control_cdb(task); | |
2714 | else | |
2715 | error = TRANSPORT(dev)->do_task(task); | |
2716 | ||
2717 | if (error != 0) { | |
2718 | cmd->transport_error_status = error; | |
2719 | atomic_set(&task->task_active, 0); | |
2720 | atomic_set(&cmd->transport_sent, 0); | |
2721 | transport_stop_tasks_for_cmd(cmd); | |
2722 | transport_generic_request_failure(cmd, dev, 0, 1); | |
2723 | } | |
2724 | } | |
2725 | ||
2726 | goto check_depth; | |
2727 | ||
2728 | return 0; | |
2729 | } | |
2730 | ||
2731 | void transport_new_cmd_failure(struct se_cmd *se_cmd) | |
2732 | { | |
2733 | unsigned long flags; | |
2734 | /* | |
2735 | * Any unsolicited data will get dumped for failed command inside of | |
2736 | * the fabric plugin | |
2737 | */ | |
2738 | spin_lock_irqsave(&T_TASK(se_cmd)->t_state_lock, flags); | |
2739 | se_cmd->se_cmd_flags |= SCF_SE_CMD_FAILED; | |
2740 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | |
2741 | spin_unlock_irqrestore(&T_TASK(se_cmd)->t_state_lock, flags); | |
2742 | ||
2743 | CMD_TFO(se_cmd)->new_cmd_failure(se_cmd); | |
2744 | } | |
2745 | ||
2746 | static void transport_nop_wait_for_tasks(struct se_cmd *, int, int); | |
2747 | ||
2748 | static inline u32 transport_get_sectors_6( | |
2749 | unsigned char *cdb, | |
2750 | struct se_cmd *cmd, | |
2751 | int *ret) | |
2752 | { | |
2753 | struct se_device *dev = SE_LUN(cmd)->lun_se_dev; | |
2754 | ||
2755 | /* | |
2756 | * Assume TYPE_DISK for non struct se_device objects. | |
2757 | * Use 8-bit sector value. | |
2758 | */ | |
2759 | if (!dev) | |
2760 | goto type_disk; | |
2761 | ||
2762 | /* | |
2763 | * Use 24-bit allocation length for TYPE_TAPE. | |
2764 | */ | |
2765 | if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) | |
2766 | return (u32)(cdb[2] << 16) + (cdb[3] << 8) + cdb[4]; | |
2767 | ||
2768 | /* | |
2769 | * Everything else assume TYPE_DISK Sector CDB location. | |
2770 | * Use 8-bit sector value. | |
2771 | */ | |
2772 | type_disk: | |
2773 | return (u32)cdb[4]; | |
2774 | } | |
2775 | ||
2776 | static inline u32 transport_get_sectors_10( | |
2777 | unsigned char *cdb, | |
2778 | struct se_cmd *cmd, | |
2779 | int *ret) | |
2780 | { | |
2781 | struct se_device *dev = SE_LUN(cmd)->lun_se_dev; | |
2782 | ||
2783 | /* | |
2784 | * Assume TYPE_DISK for non struct se_device objects. | |
2785 | * Use 16-bit sector value. | |
2786 | */ | |
2787 | if (!dev) | |
2788 | goto type_disk; | |
2789 | ||
2790 | /* | |
2791 | * XXX_10 is not defined in SSC, throw an exception | |
2792 | */ | |
2793 | if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) { | |
2794 | *ret = -1; | |
2795 | return 0; | |
2796 | } | |
2797 | ||
2798 | /* | |
2799 | * Everything else assume TYPE_DISK Sector CDB location. | |
2800 | * Use 16-bit sector value. | |
2801 | */ | |
2802 | type_disk: | |
2803 | return (u32)(cdb[7] << 8) + cdb[8]; | |
2804 | } | |
2805 | ||
2806 | static inline u32 transport_get_sectors_12( | |
2807 | unsigned char *cdb, | |
2808 | struct se_cmd *cmd, | |
2809 | int *ret) | |
2810 | { | |
2811 | struct se_device *dev = SE_LUN(cmd)->lun_se_dev; | |
2812 | ||
2813 | /* | |
2814 | * Assume TYPE_DISK for non struct se_device objects. | |
2815 | * Use 32-bit sector value. | |
2816 | */ | |
2817 | if (!dev) | |
2818 | goto type_disk; | |
2819 | ||
2820 | /* | |
2821 | * XXX_12 is not defined in SSC, throw an exception | |
2822 | */ | |
2823 | if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) { | |
2824 | *ret = -1; | |
2825 | return 0; | |
2826 | } | |
2827 | ||
2828 | /* | |
2829 | * Everything else assume TYPE_DISK Sector CDB location. | |
2830 | * Use 32-bit sector value. | |
2831 | */ | |
2832 | type_disk: | |
2833 | return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9]; | |
2834 | } | |
2835 | ||
2836 | static inline u32 transport_get_sectors_16( | |
2837 | unsigned char *cdb, | |
2838 | struct se_cmd *cmd, | |
2839 | int *ret) | |
2840 | { | |
2841 | struct se_device *dev = SE_LUN(cmd)->lun_se_dev; | |
2842 | ||
2843 | /* | |
2844 | * Assume TYPE_DISK for non struct se_device objects. | |
2845 | * Use 32-bit sector value. | |
2846 | */ | |
2847 | if (!dev) | |
2848 | goto type_disk; | |
2849 | ||
2850 | /* | |
2851 | * Use 24-bit allocation length for TYPE_TAPE. | |
2852 | */ | |
2853 | if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) | |
2854 | return (u32)(cdb[12] << 16) + (cdb[13] << 8) + cdb[14]; | |
2855 | ||
2856 | type_disk: | |
2857 | return (u32)(cdb[10] << 24) + (cdb[11] << 16) + | |
2858 | (cdb[12] << 8) + cdb[13]; | |
2859 | } | |
2860 | ||
2861 | /* | |
2862 | * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants | |
2863 | */ | |
2864 | static inline u32 transport_get_sectors_32( | |
2865 | unsigned char *cdb, | |
2866 | struct se_cmd *cmd, | |
2867 | int *ret) | |
2868 | { | |
2869 | /* | |
2870 | * Assume TYPE_DISK for non struct se_device objects. | |
2871 | * Use 32-bit sector value. | |
2872 | */ | |
2873 | return (u32)(cdb[28] << 24) + (cdb[29] << 16) + | |
2874 | (cdb[30] << 8) + cdb[31]; | |
2875 | ||
2876 | } | |
2877 | ||
2878 | static inline u32 transport_get_size( | |
2879 | u32 sectors, | |
2880 | unsigned char *cdb, | |
2881 | struct se_cmd *cmd) | |
2882 | { | |
2883 | struct se_device *dev = SE_DEV(cmd); | |
2884 | ||
2885 | if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) { | |
2886 | if (cdb[1] & 1) { /* sectors */ | |
2887 | return DEV_ATTRIB(dev)->block_size * sectors; | |
2888 | } else /* bytes */ | |
2889 | return sectors; | |
2890 | } | |
2891 | #if 0 | |
2892 | printk(KERN_INFO "Returning block_size: %u, sectors: %u == %u for" | |
2893 | " %s object\n", DEV_ATTRIB(dev)->block_size, sectors, | |
2894 | DEV_ATTRIB(dev)->block_size * sectors, | |
2895 | TRANSPORT(dev)->name); | |
2896 | #endif | |
2897 | return DEV_ATTRIB(dev)->block_size * sectors; | |
2898 | } | |
2899 | ||
2900 | unsigned char transport_asciihex_to_binaryhex(unsigned char val[2]) | |
2901 | { | |
2902 | unsigned char result = 0; | |
2903 | /* | |
2904 | * MSB | |
2905 | */ | |
2906 | if ((val[0] >= 'a') && (val[0] <= 'f')) | |
2907 | result = ((val[0] - 'a' + 10) & 0xf) << 4; | |
2908 | else | |
2909 | if ((val[0] >= 'A') && (val[0] <= 'F')) | |
2910 | result = ((val[0] - 'A' + 10) & 0xf) << 4; | |
2911 | else /* digit */ | |
2912 | result = ((val[0] - '0') & 0xf) << 4; | |
2913 | /* | |
2914 | * LSB | |
2915 | */ | |
2916 | if ((val[1] >= 'a') && (val[1] <= 'f')) | |
2917 | result |= ((val[1] - 'a' + 10) & 0xf); | |
2918 | else | |
2919 | if ((val[1] >= 'A') && (val[1] <= 'F')) | |
2920 | result |= ((val[1] - 'A' + 10) & 0xf); | |
2921 | else /* digit */ | |
2922 | result |= ((val[1] - '0') & 0xf); | |
2923 | ||
2924 | return result; | |
2925 | } | |
2926 | EXPORT_SYMBOL(transport_asciihex_to_binaryhex); | |
2927 | ||
2928 | static void transport_xor_callback(struct se_cmd *cmd) | |
2929 | { | |
2930 | unsigned char *buf, *addr; | |
2931 | struct se_mem *se_mem; | |
2932 | unsigned int offset; | |
2933 | int i; | |
2934 | /* | |
2935 | * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command | |
2936 | * | |
2937 | * 1) read the specified logical block(s); | |
2938 | * 2) transfer logical blocks from the data-out buffer; | |
2939 | * 3) XOR the logical blocks transferred from the data-out buffer with | |
2940 | * the logical blocks read, storing the resulting XOR data in a buffer; | |
2941 | * 4) if the DISABLE WRITE bit is set to zero, then write the logical | |
2942 | * blocks transferred from the data-out buffer; and | |
2943 | * 5) transfer the resulting XOR data to the data-in buffer. | |
2944 | */ | |
2945 | buf = kmalloc(cmd->data_length, GFP_KERNEL); | |
2946 | if (!(buf)) { | |
2947 | printk(KERN_ERR "Unable to allocate xor_callback buf\n"); | |
2948 | return; | |
2949 | } | |
2950 | /* | |
2951 | * Copy the scatterlist WRITE buffer located at T_TASK(cmd)->t_mem_list | |
2952 | * into the locally allocated *buf | |
2953 | */ | |
2954 | transport_memcpy_se_mem_read_contig(cmd, buf, T_TASK(cmd)->t_mem_list); | |
2955 | /* | |
2956 | * Now perform the XOR against the BIDI read memory located at | |
2957 | * T_TASK(cmd)->t_mem_bidi_list | |
2958 | */ | |
2959 | ||
2960 | offset = 0; | |
2961 | list_for_each_entry(se_mem, T_TASK(cmd)->t_mem_bidi_list, se_list) { | |
2962 | addr = (unsigned char *)kmap_atomic(se_mem->se_page, KM_USER0); | |
2963 | if (!(addr)) | |
2964 | goto out; | |
2965 | ||
2966 | for (i = 0; i < se_mem->se_len; i++) | |
2967 | *(addr + se_mem->se_off + i) ^= *(buf + offset + i); | |
2968 | ||
2969 | offset += se_mem->se_len; | |
2970 | kunmap_atomic(addr, KM_USER0); | |
2971 | } | |
2972 | out: | |
2973 | kfree(buf); | |
2974 | } | |
2975 | ||
2976 | /* | |
2977 | * Used to obtain Sense Data from underlying Linux/SCSI struct scsi_cmnd | |
2978 | */ | |
2979 | static int transport_get_sense_data(struct se_cmd *cmd) | |
2980 | { | |
2981 | unsigned char *buffer = cmd->sense_buffer, *sense_buffer = NULL; | |
2982 | struct se_device *dev; | |
2983 | struct se_task *task = NULL, *task_tmp; | |
2984 | unsigned long flags; | |
2985 | u32 offset = 0; | |
2986 | ||
2987 | if (!SE_LUN(cmd)) { | |
2988 | printk(KERN_ERR "SE_LUN(cmd) is NULL\n"); | |
2989 | return -1; | |
2990 | } | |
2991 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | |
2992 | if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { | |
2993 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
2994 | return 0; | |
2995 | } | |
2996 | ||
2997 | list_for_each_entry_safe(task, task_tmp, | |
2998 | &T_TASK(cmd)->t_task_list, t_list) { | |
2999 | ||
3000 | if (!task->task_sense) | |
3001 | continue; | |
3002 | ||
3003 | dev = task->se_dev; | |
3004 | if (!(dev)) | |
3005 | continue; | |
3006 | ||
3007 | if (!TRANSPORT(dev)->get_sense_buffer) { | |
3008 | printk(KERN_ERR "TRANSPORT(dev)->get_sense_buffer" | |
3009 | " is NULL\n"); | |
3010 | continue; | |
3011 | } | |
3012 | ||
3013 | sense_buffer = TRANSPORT(dev)->get_sense_buffer(task); | |
3014 | if (!(sense_buffer)) { | |
3015 | printk(KERN_ERR "ITT[0x%08x]_TASK[%d]: Unable to locate" | |
3016 | " sense buffer for task with sense\n", | |
3017 | CMD_TFO(cmd)->get_task_tag(cmd), task->task_no); | |
3018 | continue; | |
3019 | } | |
3020 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
3021 | ||
3022 | offset = CMD_TFO(cmd)->set_fabric_sense_len(cmd, | |
3023 | TRANSPORT_SENSE_BUFFER); | |
3024 | ||
3025 | memcpy((void *)&buffer[offset], (void *)sense_buffer, | |
3026 | TRANSPORT_SENSE_BUFFER); | |
3027 | cmd->scsi_status = task->task_scsi_status; | |
3028 | /* Automatically padded */ | |
3029 | cmd->scsi_sense_length = | |
3030 | (TRANSPORT_SENSE_BUFFER + offset); | |
3031 | ||
3032 | printk(KERN_INFO "HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x" | |
3033 | " and sense\n", | |
3034 | dev->se_hba->hba_id, TRANSPORT(dev)->name, | |
3035 | cmd->scsi_status); | |
3036 | return 0; | |
3037 | } | |
3038 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
3039 | ||
3040 | return -1; | |
3041 | } | |
3042 | ||
3043 | static int transport_allocate_resources(struct se_cmd *cmd) | |
3044 | { | |
3045 | u32 length = cmd->data_length; | |
3046 | ||
3047 | if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) || | |
3048 | (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) | |
3049 | return transport_generic_get_mem(cmd, length, PAGE_SIZE); | |
3050 | else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB) | |
3051 | return transport_generic_allocate_buf(cmd, length); | |
3052 | else | |
3053 | return 0; | |
3054 | } | |
3055 | ||
3056 | static int | |
3057 | transport_handle_reservation_conflict(struct se_cmd *cmd) | |
3058 | { | |
3059 | cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks; | |
3060 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | |
3061 | cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT; | |
3062 | cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; | |
3063 | /* | |
3064 | * For UA Interlock Code 11b, a RESERVATION CONFLICT will | |
3065 | * establish a UNIT ATTENTION with PREVIOUS RESERVATION | |
3066 | * CONFLICT STATUS. | |
3067 | * | |
3068 | * See spc4r17, section 7.4.6 Control Mode Page, Table 349 | |
3069 | */ | |
3070 | if (SE_SESS(cmd) && | |
3071 | DEV_ATTRIB(cmd->se_dev)->emulate_ua_intlck_ctrl == 2) | |
3072 | core_scsi3_ua_allocate(SE_SESS(cmd)->se_node_acl, | |
3073 | cmd->orig_fe_lun, 0x2C, | |
3074 | ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); | |
3075 | return -2; | |
3076 | } | |
3077 | ||
3078 | /* transport_generic_cmd_sequencer(): | |
3079 | * | |
3080 | * Generic Command Sequencer that should work for most DAS transport | |
3081 | * drivers. | |
3082 | * | |
3083 | * Called from transport_generic_allocate_tasks() in the $FABRIC_MOD | |
3084 | * RX Thread. | |
3085 | * | |
3086 | * FIXME: Need to support other SCSI OPCODES where as well. | |
3087 | */ | |
3088 | static int transport_generic_cmd_sequencer( | |
3089 | struct se_cmd *cmd, | |
3090 | unsigned char *cdb) | |
3091 | { | |
3092 | struct se_device *dev = SE_DEV(cmd); | |
3093 | struct se_subsystem_dev *su_dev = dev->se_sub_dev; | |
3094 | int ret = 0, sector_ret = 0, passthrough; | |
3095 | u32 sectors = 0, size = 0, pr_reg_type = 0; | |
3096 | u16 service_action; | |
3097 | u8 alua_ascq = 0; | |
3098 | /* | |
3099 | * Check for an existing UNIT ATTENTION condition | |
3100 | */ | |
3101 | if (core_scsi3_ua_check(cmd, cdb) < 0) { | |
3102 | cmd->transport_wait_for_tasks = | |
3103 | &transport_nop_wait_for_tasks; | |
3104 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | |
3105 | cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION; | |
3106 | return -2; | |
3107 | } | |
3108 | /* | |
3109 | * Check status of Asymmetric Logical Unit Assignment port | |
3110 | */ | |
3111 | ret = T10_ALUA(su_dev)->alua_state_check(cmd, cdb, &alua_ascq); | |
3112 | if (ret != 0) { | |
3113 | cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks; | |
3114 | /* | |
3115 | * Set SCSI additional sense code (ASC) to 'LUN Not Accessable'; | |
3116 | * The ALUA additional sense code qualifier (ASCQ) is determined | |
3117 | * by the ALUA primary or secondary access state.. | |
3118 | */ | |
3119 | if (ret > 0) { | |
3120 | #if 0 | |
3121 | printk(KERN_INFO "[%s]: ALUA TG Port not available," | |
3122 | " SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n", | |
3123 | CMD_TFO(cmd)->get_fabric_name(), alua_ascq); | |
3124 | #endif | |
3125 | transport_set_sense_codes(cmd, 0x04, alua_ascq); | |
3126 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | |
3127 | cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY; | |
3128 | return -2; | |
3129 | } | |
3130 | goto out_invalid_cdb_field; | |
3131 | } | |
3132 | /* | |
3133 | * Check status for SPC-3 Persistent Reservations | |
3134 | */ | |
3135 | if (T10_PR_OPS(su_dev)->t10_reservation_check(cmd, &pr_reg_type) != 0) { | |
3136 | if (T10_PR_OPS(su_dev)->t10_seq_non_holder( | |
3137 | cmd, cdb, pr_reg_type) != 0) | |
3138 | return transport_handle_reservation_conflict(cmd); | |
3139 | /* | |
3140 | * This means the CDB is allowed for the SCSI Initiator port | |
3141 | * when said port is *NOT* holding the legacy SPC-2 or | |
3142 | * SPC-3 Persistent Reservation. | |
3143 | */ | |
3144 | } | |
3145 | ||
3146 | switch (cdb[0]) { | |
3147 | case READ_6: | |
3148 | sectors = transport_get_sectors_6(cdb, cmd, §or_ret); | |
3149 | if (sector_ret) | |
3150 | goto out_unsupported_cdb; | |
3151 | size = transport_get_size(sectors, cdb, cmd); | |
3152 | cmd->transport_split_cdb = &split_cdb_XX_6; | |
3153 | T_TASK(cmd)->t_task_lba = transport_lba_21(cdb); | |
3154 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | |
3155 | break; | |
3156 | case READ_10: | |
3157 | sectors = transport_get_sectors_10(cdb, cmd, §or_ret); | |
3158 | if (sector_ret) | |
3159 | goto out_unsupported_cdb; | |
3160 | size = transport_get_size(sectors, cdb, cmd); | |
3161 | cmd->transport_split_cdb = &split_cdb_XX_10; | |
3162 | T_TASK(cmd)->t_task_lba = transport_lba_32(cdb); | |
3163 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | |
3164 | break; | |
3165 | case READ_12: | |
3166 | sectors = transport_get_sectors_12(cdb, cmd, §or_ret); | |
3167 | if (sector_ret) | |
3168 | goto out_unsupported_cdb; | |
3169 | size = transport_get_size(sectors, cdb, cmd); | |
3170 | cmd->transport_split_cdb = &split_cdb_XX_12; | |
3171 | T_TASK(cmd)->t_task_lba = transport_lba_32(cdb); | |
3172 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | |
3173 | break; | |
3174 | case READ_16: | |
3175 | sectors = transport_get_sectors_16(cdb, cmd, §or_ret); | |
3176 | if (sector_ret) | |
3177 | goto out_unsupported_cdb; | |
3178 | size = transport_get_size(sectors, cdb, cmd); | |
3179 | cmd->transport_split_cdb = &split_cdb_XX_16; | |
3180 | T_TASK(cmd)->t_task_lba = transport_lba_64(cdb); | |
3181 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | |
3182 | break; | |
3183 | case WRITE_6: | |
3184 | sectors = transport_get_sectors_6(cdb, cmd, §or_ret); | |
3185 | if (sector_ret) | |
3186 | goto out_unsupported_cdb; | |
3187 | size = transport_get_size(sectors, cdb, cmd); | |
3188 | cmd->transport_split_cdb = &split_cdb_XX_6; | |
3189 | T_TASK(cmd)->t_task_lba = transport_lba_21(cdb); | |
3190 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | |
3191 | break; | |
3192 | case WRITE_10: | |
3193 | sectors = transport_get_sectors_10(cdb, cmd, §or_ret); | |
3194 | if (sector_ret) | |
3195 | goto out_unsupported_cdb; | |
3196 | size = transport_get_size(sectors, cdb, cmd); | |
3197 | cmd->transport_split_cdb = &split_cdb_XX_10; | |
3198 | T_TASK(cmd)->t_task_lba = transport_lba_32(cdb); | |
3199 | T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8); | |
3200 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | |
3201 | break; | |
3202 | case WRITE_12: | |
3203 | sectors = transport_get_sectors_12(cdb, cmd, §or_ret); | |
3204 | if (sector_ret) | |
3205 | goto out_unsupported_cdb; | |
3206 | size = transport_get_size(sectors, cdb, cmd); | |
3207 | cmd->transport_split_cdb = &split_cdb_XX_12; | |
3208 | T_TASK(cmd)->t_task_lba = transport_lba_32(cdb); | |
3209 | T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8); | |
3210 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | |
3211 | break; | |
3212 | case WRITE_16: | |
3213 | sectors = transport_get_sectors_16(cdb, cmd, §or_ret); | |
3214 | if (sector_ret) | |
3215 | goto out_unsupported_cdb; | |
3216 | size = transport_get_size(sectors, cdb, cmd); | |
3217 | cmd->transport_split_cdb = &split_cdb_XX_16; | |
3218 | T_TASK(cmd)->t_task_lba = transport_lba_64(cdb); | |
3219 | T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8); | |
3220 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | |
3221 | break; | |
3222 | case XDWRITEREAD_10: | |
3223 | if ((cmd->data_direction != DMA_TO_DEVICE) || | |
3224 | !(T_TASK(cmd)->t_tasks_bidi)) | |
3225 | goto out_invalid_cdb_field; | |
3226 | sectors = transport_get_sectors_10(cdb, cmd, §or_ret); | |
3227 | if (sector_ret) | |
3228 | goto out_unsupported_cdb; | |
3229 | size = transport_get_size(sectors, cdb, cmd); | |
3230 | cmd->transport_split_cdb = &split_cdb_XX_10; | |
3231 | T_TASK(cmd)->t_task_lba = transport_lba_32(cdb); | |
3232 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | |
3233 | passthrough = (TRANSPORT(dev)->transport_type == | |
3234 | TRANSPORT_PLUGIN_PHBA_PDEV); | |
3235 | /* | |
3236 | * Skip the remaining assignments for TCM/PSCSI passthrough | |
3237 | */ | |
3238 | if (passthrough) | |
3239 | break; | |
3240 | /* | |
3241 | * Setup BIDI XOR callback to be run during transport_generic_complete_ok() | |
3242 | */ | |
3243 | cmd->transport_complete_callback = &transport_xor_callback; | |
3244 | T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8); | |
3245 | break; | |
3246 | case VARIABLE_LENGTH_CMD: | |
3247 | service_action = get_unaligned_be16(&cdb[8]); | |
3248 | /* | |
3249 | * Determine if this is TCM/PSCSI device and we should disable | |
3250 | * internal emulation for this CDB. | |
3251 | */ | |
3252 | passthrough = (TRANSPORT(dev)->transport_type == | |
3253 | TRANSPORT_PLUGIN_PHBA_PDEV); | |
3254 | ||
3255 | switch (service_action) { | |
3256 | case XDWRITEREAD_32: | |
3257 | sectors = transport_get_sectors_32(cdb, cmd, §or_ret); | |
3258 | if (sector_ret) | |
3259 | goto out_unsupported_cdb; | |
3260 | size = transport_get_size(sectors, cdb, cmd); | |
3261 | /* | |
3262 | * Use WRITE_32 and READ_32 opcodes for the emulated | |
3263 | * XDWRITE_READ_32 logic. | |
3264 | */ | |
3265 | cmd->transport_split_cdb = &split_cdb_XX_32; | |
3266 | T_TASK(cmd)->t_task_lba = transport_lba_64_ext(cdb); | |
3267 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | |
3268 | ||
3269 | /* | |
3270 | * Skip the remaining assignments for TCM/PSCSI passthrough | |
3271 | */ | |
3272 | if (passthrough) | |
3273 | break; | |
3274 | ||
3275 | /* | |
3276 | * Setup BIDI XOR callback to be run during | |
3277 | * transport_generic_complete_ok() | |
3278 | */ | |
3279 | cmd->transport_complete_callback = &transport_xor_callback; | |
3280 | T_TASK(cmd)->t_tasks_fua = (cdb[10] & 0x8); | |
3281 | break; | |
3282 | case WRITE_SAME_32: | |
3283 | sectors = transport_get_sectors_32(cdb, cmd, §or_ret); | |
3284 | if (sector_ret) | |
3285 | goto out_unsupported_cdb; | |
3286 | size = transport_get_size(sectors, cdb, cmd); | |
3287 | T_TASK(cmd)->t_task_lba = get_unaligned_be64(&cdb[12]); | |
3288 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | |
3289 | ||
3290 | /* | |
3291 | * Skip the remaining assignments for TCM/PSCSI passthrough | |
3292 | */ | |
3293 | if (passthrough) | |
3294 | break; | |
3295 | ||
3296 | if ((cdb[10] & 0x04) || (cdb[10] & 0x02)) { | |
3297 | printk(KERN_ERR "WRITE_SAME PBDATA and LBDATA" | |
3298 | " bits not supported for Block Discard" | |
3299 | " Emulation\n"); | |
3300 | goto out_invalid_cdb_field; | |
3301 | } | |
3302 | /* | |
3303 | * Currently for the emulated case we only accept | |
3304 | * tpws with the UNMAP=1 bit set. | |
3305 | */ | |
3306 | if (!(cdb[10] & 0x08)) { | |
3307 | printk(KERN_ERR "WRITE_SAME w/o UNMAP bit not" | |
3308 | " supported for Block Discard Emulation\n"); | |
3309 | goto out_invalid_cdb_field; | |
3310 | } | |
3311 | break; | |
3312 | default: | |
3313 | printk(KERN_ERR "VARIABLE_LENGTH_CMD service action" | |
3314 | " 0x%04x not supported\n", service_action); | |
3315 | goto out_unsupported_cdb; | |
3316 | } | |
3317 | break; | |
3318 | case 0xa3: | |
3319 | if (TRANSPORT(dev)->get_device_type(dev) != TYPE_ROM) { | |
3320 | /* MAINTENANCE_IN from SCC-2 */ | |
3321 | /* | |
3322 | * Check for emulated MI_REPORT_TARGET_PGS. | |
3323 | */ | |
3324 | if (cdb[1] == MI_REPORT_TARGET_PGS) { | |
3325 | cmd->transport_emulate_cdb = | |
3326 | (T10_ALUA(su_dev)->alua_type == | |
3327 | SPC3_ALUA_EMULATED) ? | |
3328 | &core_emulate_report_target_port_groups : | |
3329 | NULL; | |
3330 | } | |
3331 | size = (cdb[6] << 24) | (cdb[7] << 16) | | |
3332 | (cdb[8] << 8) | cdb[9]; | |
3333 | } else { | |
3334 | /* GPCMD_SEND_KEY from multi media commands */ | |
3335 | size = (cdb[8] << 8) + cdb[9]; | |
3336 | } | |
3337 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | |
3338 | break; | |
3339 | case MODE_SELECT: | |
3340 | size = cdb[4]; | |
3341 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | |
3342 | break; | |
3343 | case MODE_SELECT_10: | |
3344 | size = (cdb[7] << 8) + cdb[8]; | |
3345 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | |
3346 | break; | |
3347 | case MODE_SENSE: | |
3348 | size = cdb[4]; | |
3349 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | |
3350 | break; | |
3351 | case MODE_SENSE_10: | |
3352 | case GPCMD_READ_BUFFER_CAPACITY: | |
3353 | case GPCMD_SEND_OPC: | |
3354 | case LOG_SELECT: | |
3355 | case LOG_SENSE: | |
3356 | size = (cdb[7] << 8) + cdb[8]; | |
3357 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | |
3358 | break; | |
3359 | case READ_BLOCK_LIMITS: | |
3360 | size = READ_BLOCK_LEN; | |
3361 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | |
3362 | break; | |
3363 | case GPCMD_GET_CONFIGURATION: | |
3364 | case GPCMD_READ_FORMAT_CAPACITIES: | |
3365 | case GPCMD_READ_DISC_INFO: | |
3366 | case GPCMD_READ_TRACK_RZONE_INFO: | |
3367 | size = (cdb[7] << 8) + cdb[8]; | |
3368 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | |
3369 | break; | |
3370 | case PERSISTENT_RESERVE_IN: | |
3371 | case PERSISTENT_RESERVE_OUT: | |
3372 | cmd->transport_emulate_cdb = | |
3373 | (T10_RES(su_dev)->res_type == | |
3374 | SPC3_PERSISTENT_RESERVATIONS) ? | |
3375 | &core_scsi3_emulate_pr : NULL; | |
3376 | size = (cdb[7] << 8) + cdb[8]; | |
3377 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | |
3378 | break; | |
3379 | case GPCMD_MECHANISM_STATUS: | |
3380 | case GPCMD_READ_DVD_STRUCTURE: | |
3381 | size = (cdb[8] << 8) + cdb[9]; | |
3382 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | |
3383 | break; | |
3384 | case READ_POSITION: | |
3385 | size = READ_POSITION_LEN; | |
3386 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | |
3387 | break; | |
3388 | case 0xa4: | |
3389 | if (TRANSPORT(dev)->get_device_type(dev) != TYPE_ROM) { | |
3390 | /* MAINTENANCE_OUT from SCC-2 | |
3391 | * | |
3392 | * Check for emulated MO_SET_TARGET_PGS. | |
3393 | */ | |
3394 | if (cdb[1] == MO_SET_TARGET_PGS) { | |
3395 | cmd->transport_emulate_cdb = | |
3396 | (T10_ALUA(su_dev)->alua_type == | |
3397 | SPC3_ALUA_EMULATED) ? | |
3398 | &core_emulate_set_target_port_groups : | |
3399 | NULL; | |
3400 | } | |
3401 | ||
3402 | size = (cdb[6] << 24) | (cdb[7] << 16) | | |
3403 | (cdb[8] << 8) | cdb[9]; | |
3404 | } else { | |
3405 | /* GPCMD_REPORT_KEY from multi media commands */ | |
3406 | size = (cdb[8] << 8) + cdb[9]; | |
3407 | } | |
3408 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | |
3409 | break; | |
3410 | case INQUIRY: | |
3411 | size = (cdb[3] << 8) + cdb[4]; | |
3412 | /* | |
3413 | * Do implict HEAD_OF_QUEUE processing for INQUIRY. | |
3414 | * See spc4r17 section 5.3 | |
3415 | */ | |
3416 | if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) | |
3417 | cmd->sam_task_attr = TASK_ATTR_HOQ; | |
3418 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | |
3419 | break; | |
3420 | case READ_BUFFER: | |
3421 | size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; | |
3422 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | |
3423 | break; | |
3424 | case READ_CAPACITY: | |
3425 | size = READ_CAP_LEN; | |
3426 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | |
3427 | break; | |
3428 | case READ_MEDIA_SERIAL_NUMBER: | |
3429 | case SECURITY_PROTOCOL_IN: | |
3430 | case SECURITY_PROTOCOL_OUT: | |
3431 | size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; | |
3432 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | |
3433 | break; | |
3434 | case SERVICE_ACTION_IN: | |
3435 | case ACCESS_CONTROL_IN: | |
3436 | case ACCESS_CONTROL_OUT: | |
3437 | case EXTENDED_COPY: | |
3438 | case READ_ATTRIBUTE: | |
3439 | case RECEIVE_COPY_RESULTS: | |
3440 | case WRITE_ATTRIBUTE: | |
3441 | size = (cdb[10] << 24) | (cdb[11] << 16) | | |
3442 | (cdb[12] << 8) | cdb[13]; | |
3443 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | |
3444 | break; | |
3445 | case RECEIVE_DIAGNOSTIC: | |
3446 | case SEND_DIAGNOSTIC: | |
3447 | size = (cdb[3] << 8) | cdb[4]; | |
3448 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | |
3449 | break; | |
3450 | /* #warning FIXME: Figure out correct GPCMD_READ_CD blocksize. */ | |
3451 | #if 0 | |
3452 | case GPCMD_READ_CD: | |
3453 | sectors = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; | |
3454 | size = (2336 * sectors); | |
3455 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | |
3456 | break; | |
3457 | #endif | |
3458 | case READ_TOC: | |
3459 | size = cdb[8]; | |
3460 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | |
3461 | break; | |
3462 | case REQUEST_SENSE: | |
3463 | size = cdb[4]; | |
3464 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | |
3465 | break; | |
3466 | case READ_ELEMENT_STATUS: | |
3467 | size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9]; | |
3468 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | |
3469 | break; | |
3470 | case WRITE_BUFFER: | |
3471 | size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; | |
3472 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | |
3473 | break; | |
3474 | case RESERVE: | |
3475 | case RESERVE_10: | |
3476 | /* | |
3477 | * The SPC-2 RESERVE does not contain a size in the SCSI CDB. | |
3478 | * Assume the passthrough or $FABRIC_MOD will tell us about it. | |
3479 | */ | |
3480 | if (cdb[0] == RESERVE_10) | |
3481 | size = (cdb[7] << 8) | cdb[8]; | |
3482 | else | |
3483 | size = cmd->data_length; | |
3484 | ||
3485 | /* | |
3486 | * Setup the legacy emulated handler for SPC-2 and | |
3487 | * >= SPC-3 compatible reservation handling (CRH=1) | |
3488 | * Otherwise, we assume the underlying SCSI logic is | |
3489 | * is running in SPC_PASSTHROUGH, and wants reservations | |
3490 | * emulation disabled. | |
3491 | */ | |
3492 | cmd->transport_emulate_cdb = | |
3493 | (T10_RES(su_dev)->res_type != | |
3494 | SPC_PASSTHROUGH) ? | |
3495 | &core_scsi2_emulate_crh : NULL; | |
3496 | cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; | |
3497 | break; | |
3498 | case RELEASE: | |
3499 | case RELEASE_10: | |
3500 | /* | |
3501 | * The SPC-2 RELEASE does not contain a size in the SCSI CDB. | |
3502 | * Assume the passthrough or $FABRIC_MOD will tell us about it. | |
3503 | */ | |
3504 | if (cdb[0] == RELEASE_10) | |
3505 | size = (cdb[7] << 8) | cdb[8]; | |
3506 | else | |
3507 | size = cmd->data_length; | |
3508 | ||
3509 | cmd->transport_emulate_cdb = | |
3510 | (T10_RES(su_dev)->res_type != | |
3511 | SPC_PASSTHROUGH) ? | |
3512 | &core_scsi2_emulate_crh : NULL; | |
3513 | cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; | |
3514 | break; | |
3515 | case SYNCHRONIZE_CACHE: | |
3516 | case 0x91: /* SYNCHRONIZE_CACHE_16: */ | |
3517 | /* | |
3518 | * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE | |
3519 | */ | |
3520 | if (cdb[0] == SYNCHRONIZE_CACHE) { | |
3521 | sectors = transport_get_sectors_10(cdb, cmd, §or_ret); | |
3522 | T_TASK(cmd)->t_task_lba = transport_lba_32(cdb); | |
3523 | } else { | |
3524 | sectors = transport_get_sectors_16(cdb, cmd, §or_ret); | |
3525 | T_TASK(cmd)->t_task_lba = transport_lba_64(cdb); | |
3526 | } | |
3527 | if (sector_ret) | |
3528 | goto out_unsupported_cdb; | |
3529 | ||
3530 | size = transport_get_size(sectors, cdb, cmd); | |
3531 | cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; | |
3532 | ||
3533 | /* | |
3534 | * For TCM/pSCSI passthrough, skip cmd->transport_emulate_cdb() | |
3535 | */ | |
3536 | if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) | |
3537 | break; | |
3538 | /* | |
3539 | * Set SCF_EMULATE_CDB_ASYNC to ensure asynchronous operation | |
3540 | * for SYNCHRONIZE_CACHE* Immed=1 case in __transport_execute_tasks() | |
3541 | */ | |
3542 | cmd->se_cmd_flags |= SCF_EMULATE_CDB_ASYNC; | |
3543 | /* | |
3544 | * Check to ensure that LBA + Range does not exceed past end of | |
3545 | * device. | |
3546 | */ | |
3547 | if (transport_get_sectors(cmd) < 0) | |
3548 | goto out_invalid_cdb_field; | |
3549 | break; | |
3550 | case UNMAP: | |
3551 | size = get_unaligned_be16(&cdb[7]); | |
3552 | passthrough = (TRANSPORT(dev)->transport_type == | |
3553 | TRANSPORT_PLUGIN_PHBA_PDEV); | |
3554 | /* | |
3555 | * Determine if the received UNMAP used to for direct passthrough | |
3556 | * into Linux/SCSI with struct request via TCM/pSCSI or we are | |
3557 | * signaling the use of internal transport_generic_unmap() emulation | |
3558 | * for UNMAP -> Linux/BLOCK disbard with TCM/IBLOCK and TCM/FILEIO | |
3559 | * subsystem plugin backstores. | |
3560 | */ | |
3561 | if (!(passthrough)) | |
3562 | cmd->se_cmd_flags |= SCF_EMULATE_SYNC_UNMAP; | |
3563 | ||
3564 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | |
3565 | break; | |
3566 | case WRITE_SAME_16: | |
3567 | sectors = transport_get_sectors_16(cdb, cmd, §or_ret); | |
3568 | if (sector_ret) | |
3569 | goto out_unsupported_cdb; | |
3570 | size = transport_get_size(sectors, cdb, cmd); | |
3571 | T_TASK(cmd)->t_task_lba = get_unaligned_be16(&cdb[2]); | |
3572 | passthrough = (TRANSPORT(dev)->transport_type == | |
3573 | TRANSPORT_PLUGIN_PHBA_PDEV); | |
3574 | /* | |
3575 | * Determine if the received WRITE_SAME_16 is used to for direct | |
3576 | * passthrough into Linux/SCSI with struct request via TCM/pSCSI | |
3577 | * or we are signaling the use of internal WRITE_SAME + UNMAP=1 | |
3578 | * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK and | |
3579 | * TCM/FILEIO subsystem plugin backstores. | |
3580 | */ | |
3581 | if (!(passthrough)) { | |
3582 | if ((cdb[1] & 0x04) || (cdb[1] & 0x02)) { | |
3583 | printk(KERN_ERR "WRITE_SAME PBDATA and LBDATA" | |
3584 | " bits not supported for Block Discard" | |
3585 | " Emulation\n"); | |
3586 | goto out_invalid_cdb_field; | |
3587 | } | |
3588 | /* | |
3589 | * Currently for the emulated case we only accept | |
3590 | * tpws with the UNMAP=1 bit set. | |
3591 | */ | |
3592 | if (!(cdb[1] & 0x08)) { | |
3593 | printk(KERN_ERR "WRITE_SAME w/o UNMAP bit not " | |
3594 | " supported for Block Discard Emulation\n"); | |
3595 | goto out_invalid_cdb_field; | |
3596 | } | |
3597 | } | |
3598 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | |
3599 | break; | |
3600 | case ALLOW_MEDIUM_REMOVAL: | |
3601 | case GPCMD_CLOSE_TRACK: | |
3602 | case ERASE: | |
3603 | case INITIALIZE_ELEMENT_STATUS: | |
3604 | case GPCMD_LOAD_UNLOAD: | |
3605 | case REZERO_UNIT: | |
3606 | case SEEK_10: | |
3607 | case GPCMD_SET_SPEED: | |
3608 | case SPACE: | |
3609 | case START_STOP: | |
3610 | case TEST_UNIT_READY: | |
3611 | case VERIFY: | |
3612 | case WRITE_FILEMARKS: | |
3613 | case MOVE_MEDIUM: | |
3614 | cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; | |
3615 | break; | |
3616 | case REPORT_LUNS: | |
3617 | cmd->transport_emulate_cdb = | |
3618 | &transport_core_report_lun_response; | |
3619 | size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; | |
3620 | /* | |
3621 | * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS | |
3622 | * See spc4r17 section 5.3 | |
3623 | */ | |
3624 | if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) | |
3625 | cmd->sam_task_attr = TASK_ATTR_HOQ; | |
3626 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | |
3627 | break; | |
3628 | default: | |
3629 | printk(KERN_WARNING "TARGET_CORE[%s]: Unsupported SCSI Opcode" | |
3630 | " 0x%02x, sending CHECK_CONDITION.\n", | |
3631 | CMD_TFO(cmd)->get_fabric_name(), cdb[0]); | |
3632 | cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks; | |
3633 | goto out_unsupported_cdb; | |
3634 | } | |
3635 | ||
3636 | if (size != cmd->data_length) { | |
3637 | printk(KERN_WARNING "TARGET_CORE[%s]: Expected Transfer Length:" | |
3638 | " %u does not match SCSI CDB Length: %u for SAM Opcode:" | |
3639 | " 0x%02x\n", CMD_TFO(cmd)->get_fabric_name(), | |
3640 | cmd->data_length, size, cdb[0]); | |
3641 | ||
3642 | cmd->cmd_spdtl = size; | |
3643 | ||
3644 | if (cmd->data_direction == DMA_TO_DEVICE) { | |
3645 | printk(KERN_ERR "Rejecting underflow/overflow" | |
3646 | " WRITE data\n"); | |
3647 | goto out_invalid_cdb_field; | |
3648 | } | |
3649 | /* | |
3650 | * Reject READ_* or WRITE_* with overflow/underflow for | |
3651 | * type SCF_SCSI_DATA_SG_IO_CDB. | |
3652 | */ | |
3653 | if (!(ret) && (DEV_ATTRIB(dev)->block_size != 512)) { | |
3654 | printk(KERN_ERR "Failing OVERFLOW/UNDERFLOW for LBA op" | |
3655 | " CDB on non 512-byte sector setup subsystem" | |
3656 | " plugin: %s\n", TRANSPORT(dev)->name); | |
3657 | /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */ | |
3658 | goto out_invalid_cdb_field; | |
3659 | } | |
3660 | ||
3661 | if (size > cmd->data_length) { | |
3662 | cmd->se_cmd_flags |= SCF_OVERFLOW_BIT; | |
3663 | cmd->residual_count = (size - cmd->data_length); | |
3664 | } else { | |
3665 | cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; | |
3666 | cmd->residual_count = (cmd->data_length - size); | |
3667 | } | |
3668 | cmd->data_length = size; | |
3669 | } | |
3670 | ||
3671 | transport_set_supported_SAM_opcode(cmd); | |
3672 | return ret; | |
3673 | ||
3674 | out_unsupported_cdb: | |
3675 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | |
3676 | cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; | |
3677 | return -2; | |
3678 | out_invalid_cdb_field: | |
3679 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | |
3680 | cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; | |
3681 | return -2; | |
3682 | } | |
3683 | ||
3684 | static inline void transport_release_tasks(struct se_cmd *); | |
3685 | ||
3686 | /* | |
3687 | * This function will copy a contiguous *src buffer into a destination | |
3688 | * struct scatterlist array. | |
3689 | */ | |
3690 | static void transport_memcpy_write_contig( | |
3691 | struct se_cmd *cmd, | |
3692 | struct scatterlist *sg_d, | |
3693 | unsigned char *src) | |
3694 | { | |
3695 | u32 i = 0, length = 0, total_length = cmd->data_length; | |
3696 | void *dst; | |
3697 | ||
3698 | while (total_length) { | |
3699 | length = sg_d[i].length; | |
3700 | ||
3701 | if (length > total_length) | |
3702 | length = total_length; | |
3703 | ||
3704 | dst = sg_virt(&sg_d[i]); | |
3705 | ||
3706 | memcpy(dst, src, length); | |
3707 | ||
3708 | if (!(total_length -= length)) | |
3709 | return; | |
3710 | ||
3711 | src += length; | |
3712 | i++; | |
3713 | } | |
3714 | } | |
3715 | ||
3716 | /* | |
3717 | * This function will copy a struct scatterlist array *sg_s into a destination | |
3718 | * contiguous *dst buffer. | |
3719 | */ | |
3720 | static void transport_memcpy_read_contig( | |
3721 | struct se_cmd *cmd, | |
3722 | unsigned char *dst, | |
3723 | struct scatterlist *sg_s) | |
3724 | { | |
3725 | u32 i = 0, length = 0, total_length = cmd->data_length; | |
3726 | void *src; | |
3727 | ||
3728 | while (total_length) { | |
3729 | length = sg_s[i].length; | |
3730 | ||
3731 | if (length > total_length) | |
3732 | length = total_length; | |
3733 | ||
3734 | src = sg_virt(&sg_s[i]); | |
3735 | ||
3736 | memcpy(dst, src, length); | |
3737 | ||
3738 | if (!(total_length -= length)) | |
3739 | return; | |
3740 | ||
3741 | dst += length; | |
3742 | i++; | |
3743 | } | |
3744 | } | |
3745 | ||
3746 | static void transport_memcpy_se_mem_read_contig( | |
3747 | struct se_cmd *cmd, | |
3748 | unsigned char *dst, | |
3749 | struct list_head *se_mem_list) | |
3750 | { | |
3751 | struct se_mem *se_mem; | |
3752 | void *src; | |
3753 | u32 length = 0, total_length = cmd->data_length; | |
3754 | ||
3755 | list_for_each_entry(se_mem, se_mem_list, se_list) { | |
3756 | length = se_mem->se_len; | |
3757 | ||
3758 | if (length > total_length) | |
3759 | length = total_length; | |
3760 | ||
3761 | src = page_address(se_mem->se_page) + se_mem->se_off; | |
3762 | ||
3763 | memcpy(dst, src, length); | |
3764 | ||
3765 | if (!(total_length -= length)) | |
3766 | return; | |
3767 | ||
3768 | dst += length; | |
3769 | } | |
3770 | } | |
3771 | ||
3772 | /* | |
3773 | * Called from transport_generic_complete_ok() and | |
3774 | * transport_generic_request_failure() to determine which dormant/delayed | |
3775 | * and ordered cmds need to have their tasks added to the execution queue. | |
3776 | */ | |
3777 | static void transport_complete_task_attr(struct se_cmd *cmd) | |
3778 | { | |
3779 | struct se_device *dev = SE_DEV(cmd); | |
3780 | struct se_cmd *cmd_p, *cmd_tmp; | |
3781 | int new_active_tasks = 0; | |
3782 | ||
3783 | if (cmd->sam_task_attr == TASK_ATTR_SIMPLE) { | |
3784 | atomic_dec(&dev->simple_cmds); | |
3785 | smp_mb__after_atomic_dec(); | |
3786 | dev->dev_cur_ordered_id++; | |
3787 | DEBUG_STA("Incremented dev->dev_cur_ordered_id: %u for" | |
3788 | " SIMPLE: %u\n", dev->dev_cur_ordered_id, | |
3789 | cmd->se_ordered_id); | |
3790 | } else if (cmd->sam_task_attr == TASK_ATTR_HOQ) { | |
3791 | atomic_dec(&dev->dev_hoq_count); | |
3792 | smp_mb__after_atomic_dec(); | |
3793 | dev->dev_cur_ordered_id++; | |
3794 | DEBUG_STA("Incremented dev_cur_ordered_id: %u for" | |
3795 | " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id, | |
3796 | cmd->se_ordered_id); | |
3797 | } else if (cmd->sam_task_attr == TASK_ATTR_ORDERED) { | |
3798 | spin_lock(&dev->ordered_cmd_lock); | |
3799 | list_del(&cmd->se_ordered_list); | |
3800 | atomic_dec(&dev->dev_ordered_sync); | |
3801 | smp_mb__after_atomic_dec(); | |
3802 | spin_unlock(&dev->ordered_cmd_lock); | |
3803 | ||
3804 | dev->dev_cur_ordered_id++; | |
3805 | DEBUG_STA("Incremented dev_cur_ordered_id: %u for ORDERED:" | |
3806 | " %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id); | |
3807 | } | |
3808 | /* | |
3809 | * Process all commands up to the last received | |
3810 | * ORDERED task attribute which requires another blocking | |
3811 | * boundary | |
3812 | */ | |
3813 | spin_lock(&dev->delayed_cmd_lock); | |
3814 | list_for_each_entry_safe(cmd_p, cmd_tmp, | |
3815 | &dev->delayed_cmd_list, se_delayed_list) { | |
3816 | ||
3817 | list_del(&cmd_p->se_delayed_list); | |
3818 | spin_unlock(&dev->delayed_cmd_lock); | |
3819 | ||
3820 | DEBUG_STA("Calling add_tasks() for" | |
3821 | " cmd_p: 0x%02x Task Attr: 0x%02x" | |
3822 | " Dormant -> Active, se_ordered_id: %u\n", | |
3823 | T_TASK(cmd_p)->t_task_cdb[0], | |
3824 | cmd_p->sam_task_attr, cmd_p->se_ordered_id); | |
3825 | ||
3826 | transport_add_tasks_from_cmd(cmd_p); | |
3827 | new_active_tasks++; | |
3828 | ||
3829 | spin_lock(&dev->delayed_cmd_lock); | |
3830 | if (cmd_p->sam_task_attr == TASK_ATTR_ORDERED) | |
3831 | break; | |
3832 | } | |
3833 | spin_unlock(&dev->delayed_cmd_lock); | |
3834 | /* | |
3835 | * If new tasks have become active, wake up the transport thread | |
3836 | * to do the processing of the Active tasks. | |
3837 | */ | |
3838 | if (new_active_tasks != 0) | |
3839 | wake_up_interruptible(&dev->dev_queue_obj->thread_wq); | |
3840 | } | |
3841 | ||
3842 | static void transport_generic_complete_ok(struct se_cmd *cmd) | |
3843 | { | |
3844 | int reason = 0; | |
3845 | /* | |
3846 | * Check if we need to move delayed/dormant tasks from cmds on the | |
3847 | * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task | |
3848 | * Attribute. | |
3849 | */ | |
3850 | if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) | |
3851 | transport_complete_task_attr(cmd); | |
3852 | /* | |
3853 | * Check if we need to retrieve a sense buffer from | |
3854 | * the struct se_cmd in question. | |
3855 | */ | |
3856 | if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { | |
3857 | if (transport_get_sense_data(cmd) < 0) | |
3858 | reason = TCM_NON_EXISTENT_LUN; | |
3859 | ||
3860 | /* | |
3861 | * Only set when an struct se_task->task_scsi_status returned | |
3862 | * a non GOOD status. | |
3863 | */ | |
3864 | if (cmd->scsi_status) { | |
3865 | transport_send_check_condition_and_sense( | |
3866 | cmd, reason, 1); | |
3867 | transport_lun_remove_cmd(cmd); | |
3868 | transport_cmd_check_stop_to_fabric(cmd); | |
3869 | return; | |
3870 | } | |
3871 | } | |
3872 | /* | |
3873 | * Check for a callback, used by amoungst other things | |
3874 | * XDWRITE_READ_10 emulation. | |
3875 | */ | |
3876 | if (cmd->transport_complete_callback) | |
3877 | cmd->transport_complete_callback(cmd); | |
3878 | ||
3879 | switch (cmd->data_direction) { | |
3880 | case DMA_FROM_DEVICE: | |
3881 | spin_lock(&cmd->se_lun->lun_sep_lock); | |
3882 | if (SE_LUN(cmd)->lun_sep) { | |
3883 | SE_LUN(cmd)->lun_sep->sep_stats.tx_data_octets += | |
3884 | cmd->data_length; | |
3885 | } | |
3886 | spin_unlock(&cmd->se_lun->lun_sep_lock); | |
3887 | /* | |
3888 | * If enabled by TCM fabirc module pre-registered SGL | |
3889 | * memory, perform the memcpy() from the TCM internal | |
3890 | * contigious buffer back to the original SGL. | |
3891 | */ | |
3892 | if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG) | |
3893 | transport_memcpy_write_contig(cmd, | |
3894 | T_TASK(cmd)->t_task_pt_sgl, | |
3895 | T_TASK(cmd)->t_task_buf); | |
3896 | ||
3897 | CMD_TFO(cmd)->queue_data_in(cmd); | |
3898 | break; | |
3899 | case DMA_TO_DEVICE: | |
3900 | spin_lock(&cmd->se_lun->lun_sep_lock); | |
3901 | if (SE_LUN(cmd)->lun_sep) { | |
3902 | SE_LUN(cmd)->lun_sep->sep_stats.rx_data_octets += | |
3903 | cmd->data_length; | |
3904 | } | |
3905 | spin_unlock(&cmd->se_lun->lun_sep_lock); | |
3906 | /* | |
3907 | * Check if we need to send READ payload for BIDI-COMMAND | |
3908 | */ | |
3909 | if (T_TASK(cmd)->t_mem_bidi_list != NULL) { | |
3910 | spin_lock(&cmd->se_lun->lun_sep_lock); | |
3911 | if (SE_LUN(cmd)->lun_sep) { | |
3912 | SE_LUN(cmd)->lun_sep->sep_stats.tx_data_octets += | |
3913 | cmd->data_length; | |
3914 | } | |
3915 | spin_unlock(&cmd->se_lun->lun_sep_lock); | |
3916 | CMD_TFO(cmd)->queue_data_in(cmd); | |
3917 | break; | |
3918 | } | |
3919 | /* Fall through for DMA_TO_DEVICE */ | |
3920 | case DMA_NONE: | |
3921 | CMD_TFO(cmd)->queue_status(cmd); | |
3922 | break; | |
3923 | default: | |
3924 | break; | |
3925 | } | |
3926 | ||
3927 | transport_lun_remove_cmd(cmd); | |
3928 | transport_cmd_check_stop_to_fabric(cmd); | |
3929 | } | |
3930 | ||
3931 | static void transport_free_dev_tasks(struct se_cmd *cmd) | |
3932 | { | |
3933 | struct se_task *task, *task_tmp; | |
3934 | unsigned long flags; | |
3935 | ||
3936 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | |
3937 | list_for_each_entry_safe(task, task_tmp, | |
3938 | &T_TASK(cmd)->t_task_list, t_list) { | |
3939 | if (atomic_read(&task->task_active)) | |
3940 | continue; | |
3941 | ||
3942 | kfree(task->task_sg_bidi); | |
3943 | kfree(task->task_sg); | |
3944 | ||
3945 | list_del(&task->t_list); | |
3946 | ||
3947 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
3948 | if (task->se_dev) | |
3949 | TRANSPORT(task->se_dev)->free_task(task); | |
3950 | else | |
3951 | printk(KERN_ERR "task[%u] - task->se_dev is NULL\n", | |
3952 | task->task_no); | |
3953 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | |
3954 | } | |
3955 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
3956 | } | |
3957 | ||
3958 | static inline void transport_free_pages(struct se_cmd *cmd) | |
3959 | { | |
3960 | struct se_mem *se_mem, *se_mem_tmp; | |
3961 | int free_page = 1; | |
3962 | ||
3963 | if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) | |
3964 | free_page = 0; | |
3965 | if (cmd->se_dev->transport->do_se_mem_map) | |
3966 | free_page = 0; | |
3967 | ||
3968 | if (T_TASK(cmd)->t_task_buf) { | |
3969 | kfree(T_TASK(cmd)->t_task_buf); | |
3970 | T_TASK(cmd)->t_task_buf = NULL; | |
3971 | return; | |
3972 | } | |
3973 | ||
3974 | /* | |
3975 | * Caller will handle releasing of struct se_mem. | |
3976 | */ | |
3977 | if (cmd->se_cmd_flags & SCF_CMD_PASSTHROUGH_NOALLOC) | |
3978 | return; | |
3979 | ||
3980 | if (!(T_TASK(cmd)->t_tasks_se_num)) | |
3981 | return; | |
3982 | ||
3983 | list_for_each_entry_safe(se_mem, se_mem_tmp, | |
3984 | T_TASK(cmd)->t_mem_list, se_list) { | |
3985 | /* | |
3986 | * We only release call __free_page(struct se_mem->se_page) when | |
3987 | * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use, | |
3988 | */ | |
3989 | if (free_page) | |
3990 | __free_page(se_mem->se_page); | |
3991 | ||
3992 | list_del(&se_mem->se_list); | |
3993 | kmem_cache_free(se_mem_cache, se_mem); | |
3994 | } | |
3995 | ||
3996 | if (T_TASK(cmd)->t_mem_bidi_list && T_TASK(cmd)->t_tasks_se_bidi_num) { | |
3997 | list_for_each_entry_safe(se_mem, se_mem_tmp, | |
3998 | T_TASK(cmd)->t_mem_bidi_list, se_list) { | |
3999 | /* | |
4000 | * We only release call __free_page(struct se_mem->se_page) when | |
4001 | * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use, | |
4002 | */ | |
4003 | if (free_page) | |
4004 | __free_page(se_mem->se_page); | |
4005 | ||
4006 | list_del(&se_mem->se_list); | |
4007 | kmem_cache_free(se_mem_cache, se_mem); | |
4008 | } | |
4009 | } | |
4010 | ||
4011 | kfree(T_TASK(cmd)->t_mem_bidi_list); | |
4012 | T_TASK(cmd)->t_mem_bidi_list = NULL; | |
4013 | kfree(T_TASK(cmd)->t_mem_list); | |
4014 | T_TASK(cmd)->t_mem_list = NULL; | |
4015 | T_TASK(cmd)->t_tasks_se_num = 0; | |
4016 | } | |
4017 | ||
4018 | static inline void transport_release_tasks(struct se_cmd *cmd) | |
4019 | { | |
4020 | transport_free_dev_tasks(cmd); | |
4021 | } | |
4022 | ||
4023 | static inline int transport_dec_and_check(struct se_cmd *cmd) | |
4024 | { | |
4025 | unsigned long flags; | |
4026 | ||
4027 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | |
4028 | if (atomic_read(&T_TASK(cmd)->t_fe_count)) { | |
4029 | if (!(atomic_dec_and_test(&T_TASK(cmd)->t_fe_count))) { | |
4030 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, | |
4031 | flags); | |
4032 | return 1; | |
4033 | } | |
4034 | } | |
4035 | ||
4036 | if (atomic_read(&T_TASK(cmd)->t_se_count)) { | |
4037 | if (!(atomic_dec_and_test(&T_TASK(cmd)->t_se_count))) { | |
4038 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, | |
4039 | flags); | |
4040 | return 1; | |
4041 | } | |
4042 | } | |
4043 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
4044 | ||
4045 | return 0; | |
4046 | } | |
4047 | ||
4048 | static void transport_release_fe_cmd(struct se_cmd *cmd) | |
4049 | { | |
4050 | unsigned long flags; | |
4051 | ||
4052 | if (transport_dec_and_check(cmd)) | |
4053 | return; | |
4054 | ||
4055 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | |
4056 | if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) { | |
4057 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
4058 | goto free_pages; | |
4059 | } | |
4060 | atomic_set(&T_TASK(cmd)->transport_dev_active, 0); | |
4061 | transport_all_task_dev_remove_state(cmd); | |
4062 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
4063 | ||
4064 | transport_release_tasks(cmd); | |
4065 | free_pages: | |
4066 | transport_free_pages(cmd); | |
4067 | transport_free_se_cmd(cmd); | |
4068 | CMD_TFO(cmd)->release_cmd_direct(cmd); | |
4069 | } | |
4070 | ||
4071 | static int transport_generic_remove( | |
4072 | struct se_cmd *cmd, | |
4073 | int release_to_pool, | |
4074 | int session_reinstatement) | |
4075 | { | |
4076 | unsigned long flags; | |
4077 | ||
4078 | if (!(T_TASK(cmd))) | |
4079 | goto release_cmd; | |
4080 | ||
4081 | if (transport_dec_and_check(cmd)) { | |
4082 | if (session_reinstatement) { | |
4083 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | |
4084 | transport_all_task_dev_remove_state(cmd); | |
4085 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, | |
4086 | flags); | |
4087 | } | |
4088 | return 1; | |
4089 | } | |
4090 | ||
4091 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | |
4092 | if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) { | |
4093 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
4094 | goto free_pages; | |
4095 | } | |
4096 | atomic_set(&T_TASK(cmd)->transport_dev_active, 0); | |
4097 | transport_all_task_dev_remove_state(cmd); | |
4098 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
4099 | ||
4100 | transport_release_tasks(cmd); | |
4101 | free_pages: | |
4102 | transport_free_pages(cmd); | |
4103 | ||
4104 | release_cmd: | |
4105 | if (release_to_pool) { | |
4106 | transport_release_cmd_to_pool(cmd); | |
4107 | } else { | |
4108 | transport_free_se_cmd(cmd); | |
4109 | CMD_TFO(cmd)->release_cmd_direct(cmd); | |
4110 | } | |
4111 | ||
4112 | return 0; | |
4113 | } | |
4114 | ||
4115 | /* | |
4116 | * transport_generic_map_mem_to_cmd - Perform SGL -> struct se_mem map | |
4117 | * @cmd: Associated se_cmd descriptor | |
4118 | * @mem: SGL style memory for TCM WRITE / READ | |
4119 | * @sg_mem_num: Number of SGL elements | |
4120 | * @mem_bidi_in: SGL style memory for TCM BIDI READ | |
4121 | * @sg_mem_bidi_num: Number of BIDI READ SGL elements | |
4122 | * | |
4123 | * Return: nonzero return cmd was rejected for -ENOMEM or inproper usage | |
4124 | * of parameters. | |
4125 | */ | |
4126 | int transport_generic_map_mem_to_cmd( | |
4127 | struct se_cmd *cmd, | |
4128 | struct scatterlist *mem, | |
4129 | u32 sg_mem_num, | |
4130 | struct scatterlist *mem_bidi_in, | |
4131 | u32 sg_mem_bidi_num) | |
4132 | { | |
4133 | u32 se_mem_cnt_out = 0; | |
4134 | int ret; | |
4135 | ||
4136 | if (!(mem) || !(sg_mem_num)) | |
4137 | return 0; | |
4138 | /* | |
4139 | * Passed *mem will contain a list_head containing preformatted | |
4140 | * struct se_mem elements... | |
4141 | */ | |
4142 | if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM)) { | |
4143 | if ((mem_bidi_in) || (sg_mem_bidi_num)) { | |
4144 | printk(KERN_ERR "SCF_CMD_PASSTHROUGH_NOALLOC not supported" | |
4145 | " with BIDI-COMMAND\n"); | |
4146 | return -ENOSYS; | |
4147 | } | |
4148 | ||
4149 | T_TASK(cmd)->t_mem_list = (struct list_head *)mem; | |
4150 | T_TASK(cmd)->t_tasks_se_num = sg_mem_num; | |
4151 | cmd->se_cmd_flags |= SCF_CMD_PASSTHROUGH_NOALLOC; | |
4152 | return 0; | |
4153 | } | |
4154 | /* | |
4155 | * Otherwise, assume the caller is passing a struct scatterlist | |
4156 | * array from include/linux/scatterlist.h | |
4157 | */ | |
4158 | if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) || | |
4159 | (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) { | |
4160 | /* | |
4161 | * For CDB using TCM struct se_mem linked list scatterlist memory | |
4162 | * processed into a TCM struct se_subsystem_dev, we do the mapping | |
4163 | * from the passed physical memory to struct se_mem->se_page here. | |
4164 | */ | |
4165 | T_TASK(cmd)->t_mem_list = transport_init_se_mem_list(); | |
4166 | if (!(T_TASK(cmd)->t_mem_list)) | |
4167 | return -ENOMEM; | |
4168 | ||
4169 | ret = transport_map_sg_to_mem(cmd, | |
4170 | T_TASK(cmd)->t_mem_list, mem, &se_mem_cnt_out); | |
4171 | if (ret < 0) | |
4172 | return -ENOMEM; | |
4173 | ||
4174 | T_TASK(cmd)->t_tasks_se_num = se_mem_cnt_out; | |
4175 | /* | |
4176 | * Setup BIDI READ list of struct se_mem elements | |
4177 | */ | |
4178 | if ((mem_bidi_in) && (sg_mem_bidi_num)) { | |
4179 | T_TASK(cmd)->t_mem_bidi_list = transport_init_se_mem_list(); | |
4180 | if (!(T_TASK(cmd)->t_mem_bidi_list)) { | |
4181 | kfree(T_TASK(cmd)->t_mem_list); | |
4182 | return -ENOMEM; | |
4183 | } | |
4184 | se_mem_cnt_out = 0; | |
4185 | ||
4186 | ret = transport_map_sg_to_mem(cmd, | |
4187 | T_TASK(cmd)->t_mem_bidi_list, mem_bidi_in, | |
4188 | &se_mem_cnt_out); | |
4189 | if (ret < 0) { | |
4190 | kfree(T_TASK(cmd)->t_mem_list); | |
4191 | return -ENOMEM; | |
4192 | } | |
4193 | ||
4194 | T_TASK(cmd)->t_tasks_se_bidi_num = se_mem_cnt_out; | |
4195 | } | |
4196 | cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; | |
4197 | ||
4198 | } else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB) { | |
4199 | if (mem_bidi_in || sg_mem_bidi_num) { | |
4200 | printk(KERN_ERR "BIDI-Commands not supported using " | |
4201 | "SCF_SCSI_CONTROL_NONSG_IO_CDB\n"); | |
4202 | return -ENOSYS; | |
4203 | } | |
4204 | /* | |
4205 | * For incoming CDBs using a contiguous buffer internall with TCM, | |
4206 | * save the passed struct scatterlist memory. After TCM storage object | |
4207 | * processing has completed for this struct se_cmd, TCM core will call | |
4208 | * transport_memcpy_[write,read]_contig() as necessary from | |
4209 | * transport_generic_complete_ok() and transport_write_pending() in order | |
4210 | * to copy the TCM buffer to/from the original passed *mem in SGL -> | |
4211 | * struct scatterlist format. | |
4212 | */ | |
4213 | cmd->se_cmd_flags |= SCF_PASSTHROUGH_CONTIG_TO_SG; | |
4214 | T_TASK(cmd)->t_task_pt_sgl = mem; | |
4215 | } | |
4216 | ||
4217 | return 0; | |
4218 | } | |
4219 | EXPORT_SYMBOL(transport_generic_map_mem_to_cmd); | |
4220 | ||
4221 | ||
4222 | static inline long long transport_dev_end_lba(struct se_device *dev) | |
4223 | { | |
4224 | return dev->transport->get_blocks(dev) + 1; | |
4225 | } | |
4226 | ||
4227 | static int transport_get_sectors(struct se_cmd *cmd) | |
4228 | { | |
4229 | struct se_device *dev = SE_DEV(cmd); | |
4230 | ||
4231 | T_TASK(cmd)->t_tasks_sectors = | |
4232 | (cmd->data_length / DEV_ATTRIB(dev)->block_size); | |
4233 | if (!(T_TASK(cmd)->t_tasks_sectors)) | |
4234 | T_TASK(cmd)->t_tasks_sectors = 1; | |
4235 | ||
4236 | if (TRANSPORT(dev)->get_device_type(dev) != TYPE_DISK) | |
4237 | return 0; | |
4238 | ||
4239 | if ((T_TASK(cmd)->t_task_lba + T_TASK(cmd)->t_tasks_sectors) > | |
4240 | transport_dev_end_lba(dev)) { | |
4241 | printk(KERN_ERR "LBA: %llu Sectors: %u exceeds" | |
4242 | " transport_dev_end_lba(): %llu\n", | |
4243 | T_TASK(cmd)->t_task_lba, T_TASK(cmd)->t_tasks_sectors, | |
4244 | transport_dev_end_lba(dev)); | |
4245 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | |
4246 | cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY; | |
4247 | return PYX_TRANSPORT_REQ_TOO_MANY_SECTORS; | |
4248 | } | |
4249 | ||
4250 | return 0; | |
4251 | } | |
4252 | ||
4253 | static int transport_new_cmd_obj(struct se_cmd *cmd) | |
4254 | { | |
4255 | struct se_device *dev = SE_DEV(cmd); | |
4256 | u32 task_cdbs = 0, rc; | |
4257 | ||
4258 | if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) { | |
4259 | task_cdbs++; | |
4260 | T_TASK(cmd)->t_task_cdbs++; | |
4261 | } else { | |
4262 | int set_counts = 1; | |
4263 | ||
4264 | /* | |
4265 | * Setup any BIDI READ tasks and memory from | |
4266 | * T_TASK(cmd)->t_mem_bidi_list so the READ struct se_tasks | |
4267 | * are queued first for the non pSCSI passthrough case. | |
4268 | */ | |
4269 | if ((T_TASK(cmd)->t_mem_bidi_list != NULL) && | |
4270 | (TRANSPORT(dev)->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV)) { | |
4271 | rc = transport_generic_get_cdb_count(cmd, | |
4272 | T_TASK(cmd)->t_task_lba, | |
4273 | T_TASK(cmd)->t_tasks_sectors, | |
4274 | DMA_FROM_DEVICE, T_TASK(cmd)->t_mem_bidi_list, | |
4275 | set_counts); | |
4276 | if (!(rc)) { | |
4277 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | |
4278 | cmd->scsi_sense_reason = | |
4279 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | |
4280 | return PYX_TRANSPORT_LU_COMM_FAILURE; | |
4281 | } | |
4282 | set_counts = 0; | |
4283 | } | |
4284 | /* | |
4285 | * Setup the tasks and memory from T_TASK(cmd)->t_mem_list | |
4286 | * Note for BIDI transfers this will contain the WRITE payload | |
4287 | */ | |
4288 | task_cdbs = transport_generic_get_cdb_count(cmd, | |
4289 | T_TASK(cmd)->t_task_lba, | |
4290 | T_TASK(cmd)->t_tasks_sectors, | |
4291 | cmd->data_direction, T_TASK(cmd)->t_mem_list, | |
4292 | set_counts); | |
4293 | if (!(task_cdbs)) { | |
4294 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | |
4295 | cmd->scsi_sense_reason = | |
4296 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | |
4297 | return PYX_TRANSPORT_LU_COMM_FAILURE; | |
4298 | } | |
4299 | T_TASK(cmd)->t_task_cdbs += task_cdbs; | |
4300 | ||
4301 | #if 0 | |
4302 | printk(KERN_INFO "data_length: %u, LBA: %llu t_tasks_sectors:" | |
4303 | " %u, t_task_cdbs: %u\n", obj_ptr, cmd->data_length, | |
4304 | T_TASK(cmd)->t_task_lba, T_TASK(cmd)->t_tasks_sectors, | |
4305 | T_TASK(cmd)->t_task_cdbs); | |
4306 | #endif | |
4307 | } | |
4308 | ||
4309 | atomic_set(&T_TASK(cmd)->t_task_cdbs_left, task_cdbs); | |
4310 | atomic_set(&T_TASK(cmd)->t_task_cdbs_ex_left, task_cdbs); | |
4311 | atomic_set(&T_TASK(cmd)->t_task_cdbs_timeout_left, task_cdbs); | |
4312 | return 0; | |
4313 | } | |
4314 | ||
4315 | static struct list_head *transport_init_se_mem_list(void) | |
4316 | { | |
4317 | struct list_head *se_mem_list; | |
4318 | ||
4319 | se_mem_list = kzalloc(sizeof(struct list_head), GFP_KERNEL); | |
4320 | if (!(se_mem_list)) { | |
4321 | printk(KERN_ERR "Unable to allocate memory for se_mem_list\n"); | |
4322 | return NULL; | |
4323 | } | |
4324 | INIT_LIST_HEAD(se_mem_list); | |
4325 | ||
4326 | return se_mem_list; | |
4327 | } | |
4328 | ||
4329 | static int | |
4330 | transport_generic_get_mem(struct se_cmd *cmd, u32 length, u32 dma_size) | |
4331 | { | |
4332 | unsigned char *buf; | |
4333 | struct se_mem *se_mem; | |
4334 | ||
4335 | T_TASK(cmd)->t_mem_list = transport_init_se_mem_list(); | |
4336 | if (!(T_TASK(cmd)->t_mem_list)) | |
4337 | return -ENOMEM; | |
4338 | ||
4339 | /* | |
4340 | * If the device uses memory mapping this is enough. | |
4341 | */ | |
4342 | if (cmd->se_dev->transport->do_se_mem_map) | |
4343 | return 0; | |
4344 | ||
4345 | /* | |
4346 | * Setup BIDI-COMMAND READ list of struct se_mem elements | |
4347 | */ | |
4348 | if (T_TASK(cmd)->t_tasks_bidi) { | |
4349 | T_TASK(cmd)->t_mem_bidi_list = transport_init_se_mem_list(); | |
4350 | if (!(T_TASK(cmd)->t_mem_bidi_list)) { | |
4351 | kfree(T_TASK(cmd)->t_mem_list); | |
4352 | return -ENOMEM; | |
4353 | } | |
4354 | } | |
4355 | ||
4356 | while (length) { | |
4357 | se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL); | |
4358 | if (!(se_mem)) { | |
4359 | printk(KERN_ERR "Unable to allocate struct se_mem\n"); | |
4360 | goto out; | |
4361 | } | |
4362 | INIT_LIST_HEAD(&se_mem->se_list); | |
4363 | se_mem->se_len = (length > dma_size) ? dma_size : length; | |
4364 | ||
4365 | /* #warning FIXME Allocate contigous pages for struct se_mem elements */ | |
4366 | se_mem->se_page = (struct page *) alloc_pages(GFP_KERNEL, 0); | |
4367 | if (!(se_mem->se_page)) { | |
4368 | printk(KERN_ERR "alloc_pages() failed\n"); | |
4369 | goto out; | |
4370 | } | |
4371 | ||
4372 | buf = kmap_atomic(se_mem->se_page, KM_IRQ0); | |
4373 | if (!(buf)) { | |
4374 | printk(KERN_ERR "kmap_atomic() failed\n"); | |
4375 | goto out; | |
4376 | } | |
4377 | memset(buf, 0, se_mem->se_len); | |
4378 | kunmap_atomic(buf, KM_IRQ0); | |
4379 | ||
4380 | list_add_tail(&se_mem->se_list, T_TASK(cmd)->t_mem_list); | |
4381 | T_TASK(cmd)->t_tasks_se_num++; | |
4382 | ||
4383 | DEBUG_MEM("Allocated struct se_mem page(%p) Length(%u)" | |
4384 | " Offset(%u)\n", se_mem->se_page, se_mem->se_len, | |
4385 | se_mem->se_off); | |
4386 | ||
4387 | length -= se_mem->se_len; | |
4388 | } | |
4389 | ||
4390 | DEBUG_MEM("Allocated total struct se_mem elements(%u)\n", | |
4391 | T_TASK(cmd)->t_tasks_se_num); | |
4392 | ||
4393 | return 0; | |
4394 | out: | |
4395 | return -1; | |
4396 | } | |
4397 | ||
4398 | extern u32 transport_calc_sg_num( | |
4399 | struct se_task *task, | |
4400 | struct se_mem *in_se_mem, | |
4401 | u32 task_offset) | |
4402 | { | |
4403 | struct se_cmd *se_cmd = task->task_se_cmd; | |
4404 | struct se_device *se_dev = SE_DEV(se_cmd); | |
4405 | struct se_mem *se_mem = in_se_mem; | |
4406 | struct target_core_fabric_ops *tfo = CMD_TFO(se_cmd); | |
4407 | u32 sg_length, task_size = task->task_size, task_sg_num_padded; | |
4408 | ||
4409 | while (task_size != 0) { | |
4410 | DEBUG_SC("se_mem->se_page(%p) se_mem->se_len(%u)" | |
4411 | " se_mem->se_off(%u) task_offset(%u)\n", | |
4412 | se_mem->se_page, se_mem->se_len, | |
4413 | se_mem->se_off, task_offset); | |
4414 | ||
4415 | if (task_offset == 0) { | |
4416 | if (task_size >= se_mem->se_len) { | |
4417 | sg_length = se_mem->se_len; | |
4418 | ||
4419 | if (!(list_is_last(&se_mem->se_list, | |
4420 | T_TASK(se_cmd)->t_mem_list))) | |
4421 | se_mem = list_entry(se_mem->se_list.next, | |
4422 | struct se_mem, se_list); | |
4423 | } else { | |
4424 | sg_length = task_size; | |
4425 | task_size -= sg_length; | |
4426 | goto next; | |
4427 | } | |
4428 | ||
4429 | DEBUG_SC("sg_length(%u) task_size(%u)\n", | |
4430 | sg_length, task_size); | |
4431 | } else { | |
4432 | if ((se_mem->se_len - task_offset) > task_size) { | |
4433 | sg_length = task_size; | |
4434 | task_size -= sg_length; | |
4435 | goto next; | |
4436 | } else { | |
4437 | sg_length = (se_mem->se_len - task_offset); | |
4438 | ||
4439 | if (!(list_is_last(&se_mem->se_list, | |
4440 | T_TASK(se_cmd)->t_mem_list))) | |
4441 | se_mem = list_entry(se_mem->se_list.next, | |
4442 | struct se_mem, se_list); | |
4443 | } | |
4444 | ||
4445 | DEBUG_SC("sg_length(%u) task_size(%u)\n", | |
4446 | sg_length, task_size); | |
4447 | ||
4448 | task_offset = 0; | |
4449 | } | |
4450 | task_size -= sg_length; | |
4451 | next: | |
4452 | DEBUG_SC("task[%u] - Reducing task_size to(%u)\n", | |
4453 | task->task_no, task_size); | |
4454 | ||
4455 | task->task_sg_num++; | |
4456 | } | |
4457 | /* | |
4458 | * Check if the fabric module driver is requesting that all | |
4459 | * struct se_task->task_sg[] be chained together.. If so, | |
4460 | * then allocate an extra padding SG entry for linking and | |
4461 | * marking the end of the chained SGL. | |
4462 | */ | |
4463 | if (tfo->task_sg_chaining) { | |
4464 | task_sg_num_padded = (task->task_sg_num + 1); | |
4465 | task->task_padded_sg = 1; | |
4466 | } else | |
4467 | task_sg_num_padded = task->task_sg_num; | |
4468 | ||
4469 | task->task_sg = kzalloc(task_sg_num_padded * | |
4470 | sizeof(struct scatterlist), GFP_KERNEL); | |
4471 | if (!(task->task_sg)) { | |
4472 | printk(KERN_ERR "Unable to allocate memory for" | |
4473 | " task->task_sg\n"); | |
4474 | return 0; | |
4475 | } | |
4476 | sg_init_table(&task->task_sg[0], task_sg_num_padded); | |
4477 | /* | |
4478 | * Setup task->task_sg_bidi for SCSI READ payload for | |
4479 | * TCM/pSCSI passthrough if present for BIDI-COMMAND | |
4480 | */ | |
4481 | if ((T_TASK(se_cmd)->t_mem_bidi_list != NULL) && | |
4482 | (TRANSPORT(se_dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)) { | |
4483 | task->task_sg_bidi = kzalloc(task_sg_num_padded * | |
4484 | sizeof(struct scatterlist), GFP_KERNEL); | |
4485 | if (!(task->task_sg_bidi)) { | |
4486 | printk(KERN_ERR "Unable to allocate memory for" | |
4487 | " task->task_sg_bidi\n"); | |
4488 | return 0; | |
4489 | } | |
4490 | sg_init_table(&task->task_sg_bidi[0], task_sg_num_padded); | |
4491 | } | |
4492 | /* | |
4493 | * For the chaining case, setup the proper end of SGL for the | |
4494 | * initial submission struct task into struct se_subsystem_api. | |
4495 | * This will be cleared later by transport_do_task_sg_chain() | |
4496 | */ | |
4497 | if (task->task_padded_sg) { | |
4498 | sg_mark_end(&task->task_sg[task->task_sg_num - 1]); | |
4499 | /* | |
4500 | * Added the 'if' check before marking end of bi-directional | |
4501 | * scatterlist (which gets created only in case of request | |
4502 | * (RD + WR). | |
4503 | */ | |
4504 | if (task->task_sg_bidi) | |
4505 | sg_mark_end(&task->task_sg_bidi[task->task_sg_num - 1]); | |
4506 | } | |
4507 | ||
4508 | DEBUG_SC("Successfully allocated task->task_sg_num(%u)," | |
4509 | " task_sg_num_padded(%u)\n", task->task_sg_num, | |
4510 | task_sg_num_padded); | |
4511 | ||
4512 | return task->task_sg_num; | |
4513 | } | |
4514 | ||
4515 | static inline int transport_set_tasks_sectors_disk( | |
4516 | struct se_task *task, | |
4517 | struct se_device *dev, | |
4518 | unsigned long long lba, | |
4519 | u32 sectors, | |
4520 | int *max_sectors_set) | |
4521 | { | |
4522 | if ((lba + sectors) > transport_dev_end_lba(dev)) { | |
4523 | task->task_sectors = ((transport_dev_end_lba(dev) - lba) + 1); | |
4524 | ||
4525 | if (task->task_sectors > DEV_ATTRIB(dev)->max_sectors) { | |
4526 | task->task_sectors = DEV_ATTRIB(dev)->max_sectors; | |
4527 | *max_sectors_set = 1; | |
4528 | } | |
4529 | } else { | |
4530 | if (sectors > DEV_ATTRIB(dev)->max_sectors) { | |
4531 | task->task_sectors = DEV_ATTRIB(dev)->max_sectors; | |
4532 | *max_sectors_set = 1; | |
4533 | } else | |
4534 | task->task_sectors = sectors; | |
4535 | } | |
4536 | ||
4537 | return 0; | |
4538 | } | |
4539 | ||
4540 | static inline int transport_set_tasks_sectors_non_disk( | |
4541 | struct se_task *task, | |
4542 | struct se_device *dev, | |
4543 | unsigned long long lba, | |
4544 | u32 sectors, | |
4545 | int *max_sectors_set) | |
4546 | { | |
4547 | if (sectors > DEV_ATTRIB(dev)->max_sectors) { | |
4548 | task->task_sectors = DEV_ATTRIB(dev)->max_sectors; | |
4549 | *max_sectors_set = 1; | |
4550 | } else | |
4551 | task->task_sectors = sectors; | |
4552 | ||
4553 | return 0; | |
4554 | } | |
4555 | ||
4556 | static inline int transport_set_tasks_sectors( | |
4557 | struct se_task *task, | |
4558 | struct se_device *dev, | |
4559 | unsigned long long lba, | |
4560 | u32 sectors, | |
4561 | int *max_sectors_set) | |
4562 | { | |
4563 | return (TRANSPORT(dev)->get_device_type(dev) == TYPE_DISK) ? | |
4564 | transport_set_tasks_sectors_disk(task, dev, lba, sectors, | |
4565 | max_sectors_set) : | |
4566 | transport_set_tasks_sectors_non_disk(task, dev, lba, sectors, | |
4567 | max_sectors_set); | |
4568 | } | |
4569 | ||
4570 | static int transport_map_sg_to_mem( | |
4571 | struct se_cmd *cmd, | |
4572 | struct list_head *se_mem_list, | |
4573 | void *in_mem, | |
4574 | u32 *se_mem_cnt) | |
4575 | { | |
4576 | struct se_mem *se_mem; | |
4577 | struct scatterlist *sg; | |
4578 | u32 sg_count = 1, cmd_size = cmd->data_length; | |
4579 | ||
4580 | if (!in_mem) { | |
4581 | printk(KERN_ERR "No source scatterlist\n"); | |
4582 | return -1; | |
4583 | } | |
4584 | sg = (struct scatterlist *)in_mem; | |
4585 | ||
4586 | while (cmd_size) { | |
4587 | se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL); | |
4588 | if (!(se_mem)) { | |
4589 | printk(KERN_ERR "Unable to allocate struct se_mem\n"); | |
4590 | return -1; | |
4591 | } | |
4592 | INIT_LIST_HEAD(&se_mem->se_list); | |
4593 | DEBUG_MEM("sg_to_mem: Starting loop with cmd_size: %u" | |
4594 | " sg_page: %p offset: %d length: %d\n", cmd_size, | |
4595 | sg_page(sg), sg->offset, sg->length); | |
4596 | ||
4597 | se_mem->se_page = sg_page(sg); | |
4598 | se_mem->se_off = sg->offset; | |
4599 | ||
4600 | if (cmd_size > sg->length) { | |
4601 | se_mem->se_len = sg->length; | |
4602 | sg = sg_next(sg); | |
4603 | sg_count++; | |
4604 | } else | |
4605 | se_mem->se_len = cmd_size; | |
4606 | ||
4607 | cmd_size -= se_mem->se_len; | |
4608 | ||
4609 | DEBUG_MEM("sg_to_mem: *se_mem_cnt: %u cmd_size: %u\n", | |
4610 | *se_mem_cnt, cmd_size); | |
4611 | DEBUG_MEM("sg_to_mem: Final se_page: %p se_off: %d se_len: %d\n", | |
4612 | se_mem->se_page, se_mem->se_off, se_mem->se_len); | |
4613 | ||
4614 | list_add_tail(&se_mem->se_list, se_mem_list); | |
4615 | (*se_mem_cnt)++; | |
4616 | } | |
4617 | ||
4618 | DEBUG_MEM("task[0] - Mapped(%u) struct scatterlist segments to(%u)" | |
4619 | " struct se_mem\n", sg_count, *se_mem_cnt); | |
4620 | ||
4621 | if (sg_count != *se_mem_cnt) | |
4622 | BUG(); | |
4623 | ||
4624 | return 0; | |
4625 | } | |
4626 | ||
4627 | /* transport_map_mem_to_sg(): | |
4628 | * | |
4629 | * | |
4630 | */ | |
4631 | int transport_map_mem_to_sg( | |
4632 | struct se_task *task, | |
4633 | struct list_head *se_mem_list, | |
4634 | void *in_mem, | |
4635 | struct se_mem *in_se_mem, | |
4636 | struct se_mem **out_se_mem, | |
4637 | u32 *se_mem_cnt, | |
4638 | u32 *task_offset) | |
4639 | { | |
4640 | struct se_cmd *se_cmd = task->task_se_cmd; | |
4641 | struct se_mem *se_mem = in_se_mem; | |
4642 | struct scatterlist *sg = (struct scatterlist *)in_mem; | |
4643 | u32 task_size = task->task_size, sg_no = 0; | |
4644 | ||
4645 | if (!sg) { | |
4646 | printk(KERN_ERR "Unable to locate valid struct" | |
4647 | " scatterlist pointer\n"); | |
4648 | return -1; | |
4649 | } | |
4650 | ||
4651 | while (task_size != 0) { | |
4652 | /* | |
4653 | * Setup the contigious array of scatterlists for | |
4654 | * this struct se_task. | |
4655 | */ | |
4656 | sg_assign_page(sg, se_mem->se_page); | |
4657 | ||
4658 | if (*task_offset == 0) { | |
4659 | sg->offset = se_mem->se_off; | |
4660 | ||
4661 | if (task_size >= se_mem->se_len) { | |
4662 | sg->length = se_mem->se_len; | |
4663 | ||
4664 | if (!(list_is_last(&se_mem->se_list, | |
4665 | T_TASK(se_cmd)->t_mem_list))) { | |
4666 | se_mem = list_entry(se_mem->se_list.next, | |
4667 | struct se_mem, se_list); | |
4668 | (*se_mem_cnt)++; | |
4669 | } | |
4670 | } else { | |
4671 | sg->length = task_size; | |
4672 | /* | |
4673 | * Determine if we need to calculate an offset | |
4674 | * into the struct se_mem on the next go around.. | |
4675 | */ | |
4676 | task_size -= sg->length; | |
4677 | if (!(task_size)) | |
4678 | *task_offset = sg->length; | |
4679 | ||
4680 | goto next; | |
4681 | } | |
4682 | ||
4683 | } else { | |
4684 | sg->offset = (*task_offset + se_mem->se_off); | |
4685 | ||
4686 | if ((se_mem->se_len - *task_offset) > task_size) { | |
4687 | sg->length = task_size; | |
4688 | /* | |
4689 | * Determine if we need to calculate an offset | |
4690 | * into the struct se_mem on the next go around.. | |
4691 | */ | |
4692 | task_size -= sg->length; | |
4693 | if (!(task_size)) | |
4694 | *task_offset += sg->length; | |
4695 | ||
4696 | goto next; | |
4697 | } else { | |
4698 | sg->length = (se_mem->se_len - *task_offset); | |
4699 | ||
4700 | if (!(list_is_last(&se_mem->se_list, | |
4701 | T_TASK(se_cmd)->t_mem_list))) { | |
4702 | se_mem = list_entry(se_mem->se_list.next, | |
4703 | struct se_mem, se_list); | |
4704 | (*se_mem_cnt)++; | |
4705 | } | |
4706 | } | |
4707 | ||
4708 | *task_offset = 0; | |
4709 | } | |
4710 | task_size -= sg->length; | |
4711 | next: | |
4712 | DEBUG_MEM("task[%u] mem_to_sg - sg[%u](%p)(%u)(%u) - Reducing" | |
4713 | " task_size to(%u), task_offset: %u\n", task->task_no, sg_no, | |
4714 | sg_page(sg), sg->length, sg->offset, task_size, *task_offset); | |
4715 | ||
4716 | sg_no++; | |
4717 | if (!(task_size)) | |
4718 | break; | |
4719 | ||
4720 | sg = sg_next(sg); | |
4721 | ||
4722 | if (task_size > se_cmd->data_length) | |
4723 | BUG(); | |
4724 | } | |
4725 | *out_se_mem = se_mem; | |
4726 | ||
4727 | DEBUG_MEM("task[%u] - Mapped(%u) struct se_mem segments to total(%u)" | |
4728 | " SGs\n", task->task_no, *se_mem_cnt, sg_no); | |
4729 | ||
4730 | return 0; | |
4731 | } | |
4732 | ||
4733 | /* | |
4734 | * This function can be used by HW target mode drivers to create a linked | |
4735 | * scatterlist from all contiguously allocated struct se_task->task_sg[]. | |
4736 | * This is intended to be called during the completion path by TCM Core | |
4737 | * when struct target_core_fabric_ops->check_task_sg_chaining is enabled. | |
4738 | */ | |
4739 | void transport_do_task_sg_chain(struct se_cmd *cmd) | |
4740 | { | |
4741 | struct scatterlist *sg_head = NULL, *sg_link = NULL, *sg_first = NULL; | |
4742 | struct scatterlist *sg_head_cur = NULL, *sg_link_cur = NULL; | |
4743 | struct scatterlist *sg, *sg_end = NULL, *sg_end_cur = NULL; | |
4744 | struct se_task *task; | |
4745 | struct target_core_fabric_ops *tfo = CMD_TFO(cmd); | |
4746 | u32 task_sg_num = 0, sg_count = 0; | |
4747 | int i; | |
4748 | ||
4749 | if (tfo->task_sg_chaining == 0) { | |
4750 | printk(KERN_ERR "task_sg_chaining is diabled for fabric module:" | |
4751 | " %s\n", tfo->get_fabric_name()); | |
4752 | dump_stack(); | |
4753 | return; | |
4754 | } | |
4755 | /* | |
4756 | * Walk the struct se_task list and setup scatterlist chains | |
4757 | * for each contiguosly allocated struct se_task->task_sg[]. | |
4758 | */ | |
4759 | list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) { | |
4760 | if (!(task->task_sg) || !(task->task_padded_sg)) | |
4761 | continue; | |
4762 | ||
4763 | if (sg_head && sg_link) { | |
4764 | sg_head_cur = &task->task_sg[0]; | |
4765 | sg_link_cur = &task->task_sg[task->task_sg_num]; | |
4766 | /* | |
4767 | * Either add chain or mark end of scatterlist | |
4768 | */ | |
4769 | if (!(list_is_last(&task->t_list, | |
4770 | &T_TASK(cmd)->t_task_list))) { | |
4771 | /* | |
4772 | * Clear existing SGL termination bit set in | |
4773 | * transport_calc_sg_num(), see sg_mark_end() | |
4774 | */ | |
4775 | sg_end_cur = &task->task_sg[task->task_sg_num - 1]; | |
4776 | sg_end_cur->page_link &= ~0x02; | |
4777 | ||
4778 | sg_chain(sg_head, task_sg_num, sg_head_cur); | |
4779 | sg_count += (task->task_sg_num + 1); | |
4780 | } else | |
4781 | sg_count += task->task_sg_num; | |
4782 | ||
4783 | sg_head = sg_head_cur; | |
4784 | sg_link = sg_link_cur; | |
4785 | task_sg_num = task->task_sg_num; | |
4786 | continue; | |
4787 | } | |
4788 | sg_head = sg_first = &task->task_sg[0]; | |
4789 | sg_link = &task->task_sg[task->task_sg_num]; | |
4790 | task_sg_num = task->task_sg_num; | |
4791 | /* | |
4792 | * Check for single task.. | |
4793 | */ | |
4794 | if (!(list_is_last(&task->t_list, &T_TASK(cmd)->t_task_list))) { | |
4795 | /* | |
4796 | * Clear existing SGL termination bit set in | |
4797 | * transport_calc_sg_num(), see sg_mark_end() | |
4798 | */ | |
4799 | sg_end = &task->task_sg[task->task_sg_num - 1]; | |
4800 | sg_end->page_link &= ~0x02; | |
4801 | sg_count += (task->task_sg_num + 1); | |
4802 | } else | |
4803 | sg_count += task->task_sg_num; | |
4804 | } | |
4805 | /* | |
4806 | * Setup the starting pointer and total t_tasks_sg_linked_no including | |
4807 | * padding SGs for linking and to mark the end. | |
4808 | */ | |
4809 | T_TASK(cmd)->t_tasks_sg_chained = sg_first; | |
4810 | T_TASK(cmd)->t_tasks_sg_chained_no = sg_count; | |
4811 | ||
4812 | DEBUG_CMD_M("Setup T_TASK(cmd)->t_tasks_sg_chained: %p and" | |
4813 | " t_tasks_sg_chained_no: %u\n", T_TASK(cmd)->t_tasks_sg_chained, | |
4814 | T_TASK(cmd)->t_tasks_sg_chained_no); | |
4815 | ||
4816 | for_each_sg(T_TASK(cmd)->t_tasks_sg_chained, sg, | |
4817 | T_TASK(cmd)->t_tasks_sg_chained_no, i) { | |
4818 | ||
4819 | DEBUG_CMD_M("SG: %p page: %p length: %d offset: %d\n", | |
4820 | sg, sg_page(sg), sg->length, sg->offset); | |
4821 | if (sg_is_chain(sg)) | |
4822 | DEBUG_CMD_M("SG: %p sg_is_chain=1\n", sg); | |
4823 | if (sg_is_last(sg)) | |
4824 | DEBUG_CMD_M("SG: %p sg_is_last=1\n", sg); | |
4825 | } | |
4826 | ||
4827 | } | |
4828 | EXPORT_SYMBOL(transport_do_task_sg_chain); | |
4829 | ||
4830 | static int transport_do_se_mem_map( | |
4831 | struct se_device *dev, | |
4832 | struct se_task *task, | |
4833 | struct list_head *se_mem_list, | |
4834 | void *in_mem, | |
4835 | struct se_mem *in_se_mem, | |
4836 | struct se_mem **out_se_mem, | |
4837 | u32 *se_mem_cnt, | |
4838 | u32 *task_offset_in) | |
4839 | { | |
4840 | u32 task_offset = *task_offset_in; | |
4841 | int ret = 0; | |
4842 | /* | |
4843 | * se_subsystem_api_t->do_se_mem_map is used when internal allocation | |
4844 | * has been done by the transport plugin. | |
4845 | */ | |
4846 | if (TRANSPORT(dev)->do_se_mem_map) { | |
4847 | ret = TRANSPORT(dev)->do_se_mem_map(task, se_mem_list, | |
4848 | in_mem, in_se_mem, out_se_mem, se_mem_cnt, | |
4849 | task_offset_in); | |
4850 | if (ret == 0) | |
4851 | T_TASK(task->task_se_cmd)->t_tasks_se_num += *se_mem_cnt; | |
4852 | ||
4853 | return ret; | |
4854 | } | |
e63af958 NB |
4855 | |
4856 | BUG_ON(list_empty(se_mem_list)); | |
c66ac9db NB |
4857 | /* |
4858 | * This is the normal path for all normal non BIDI and BIDI-COMMAND | |
4859 | * WRITE payloads.. If we need to do BIDI READ passthrough for | |
4860 | * TCM/pSCSI the first call to transport_do_se_mem_map -> | |
4861 | * transport_calc_sg_num() -> transport_map_mem_to_sg() will do the | |
4862 | * allocation for task->task_sg_bidi, and the subsequent call to | |
4863 | * transport_do_se_mem_map() from transport_generic_get_cdb_count() | |
4864 | */ | |
4865 | if (!(task->task_sg_bidi)) { | |
4866 | /* | |
4867 | * Assume default that transport plugin speaks preallocated | |
4868 | * scatterlists. | |
4869 | */ | |
4870 | if (!(transport_calc_sg_num(task, in_se_mem, task_offset))) | |
4871 | return -1; | |
4872 | /* | |
4873 | * struct se_task->task_sg now contains the struct scatterlist array. | |
4874 | */ | |
4875 | return transport_map_mem_to_sg(task, se_mem_list, task->task_sg, | |
4876 | in_se_mem, out_se_mem, se_mem_cnt, | |
4877 | task_offset_in); | |
4878 | } | |
4879 | /* | |
4880 | * Handle the se_mem_list -> struct task->task_sg_bidi | |
4881 | * memory map for the extra BIDI READ payload | |
4882 | */ | |
4883 | return transport_map_mem_to_sg(task, se_mem_list, task->task_sg_bidi, | |
4884 | in_se_mem, out_se_mem, se_mem_cnt, | |
4885 | task_offset_in); | |
4886 | } | |
4887 | ||
4888 | static u32 transport_generic_get_cdb_count( | |
4889 | struct se_cmd *cmd, | |
4890 | unsigned long long lba, | |
4891 | u32 sectors, | |
4892 | enum dma_data_direction data_direction, | |
4893 | struct list_head *mem_list, | |
4894 | int set_counts) | |
4895 | { | |
4896 | unsigned char *cdb = NULL; | |
4897 | struct se_task *task; | |
4898 | struct se_mem *se_mem = NULL, *se_mem_lout = NULL; | |
4899 | struct se_mem *se_mem_bidi = NULL, *se_mem_bidi_lout = NULL; | |
4900 | struct se_device *dev = SE_DEV(cmd); | |
4901 | int max_sectors_set = 0, ret; | |
4902 | u32 task_offset_in = 0, se_mem_cnt = 0, se_mem_bidi_cnt = 0, task_cdbs = 0; | |
4903 | ||
4904 | if (!mem_list) { | |
4905 | printk(KERN_ERR "mem_list is NULL in transport_generic_get" | |
4906 | "_cdb_count()\n"); | |
4907 | return 0; | |
4908 | } | |
4909 | /* | |
4910 | * While using RAMDISK_DR backstores is the only case where | |
4911 | * mem_list will ever be empty at this point. | |
4912 | */ | |
4913 | if (!(list_empty(mem_list))) | |
4914 | se_mem = list_entry(mem_list->next, struct se_mem, se_list); | |
4915 | /* | |
4916 | * Check for extra se_mem_bidi mapping for BIDI-COMMANDs to | |
4917 | * struct se_task->task_sg_bidi for TCM/pSCSI passthrough operation | |
4918 | */ | |
4919 | if ((T_TASK(cmd)->t_mem_bidi_list != NULL) && | |
4920 | !(list_empty(T_TASK(cmd)->t_mem_bidi_list)) && | |
4921 | (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)) | |
4922 | se_mem_bidi = list_entry(T_TASK(cmd)->t_mem_bidi_list->next, | |
4923 | struct se_mem, se_list); | |
4924 | ||
4925 | while (sectors) { | |
4926 | DEBUG_VOL("ITT[0x%08x] LBA(%llu) SectorsLeft(%u) EOBJ(%llu)\n", | |
4927 | CMD_TFO(cmd)->get_task_tag(cmd), lba, sectors, | |
4928 | transport_dev_end_lba(dev)); | |
4929 | ||
4930 | task = transport_generic_get_task(cmd, data_direction); | |
4931 | if (!(task)) | |
4932 | goto out; | |
4933 | ||
4934 | transport_set_tasks_sectors(task, dev, lba, sectors, | |
4935 | &max_sectors_set); | |
4936 | ||
4937 | task->task_lba = lba; | |
4938 | lba += task->task_sectors; | |
4939 | sectors -= task->task_sectors; | |
4940 | task->task_size = (task->task_sectors * | |
4941 | DEV_ATTRIB(dev)->block_size); | |
4942 | ||
4943 | cdb = TRANSPORT(dev)->get_cdb(task); | |
4944 | if ((cdb)) { | |
4945 | memcpy(cdb, T_TASK(cmd)->t_task_cdb, | |
4946 | scsi_command_size(T_TASK(cmd)->t_task_cdb)); | |
4947 | cmd->transport_split_cdb(task->task_lba, | |
4948 | &task->task_sectors, cdb); | |
4949 | } | |
4950 | ||
4951 | /* | |
4952 | * Perform the SE OBJ plugin and/or Transport plugin specific | |
4953 | * mapping for T_TASK(cmd)->t_mem_list. And setup the | |
4954 | * task->task_sg and if necessary task->task_sg_bidi | |
4955 | */ | |
4956 | ret = transport_do_se_mem_map(dev, task, mem_list, | |
4957 | NULL, se_mem, &se_mem_lout, &se_mem_cnt, | |
4958 | &task_offset_in); | |
4959 | if (ret < 0) | |
4960 | goto out; | |
4961 | ||
4962 | se_mem = se_mem_lout; | |
4963 | /* | |
4964 | * Setup the T_TASK(cmd)->t_mem_bidi_list -> task->task_sg_bidi | |
4965 | * mapping for SCSI READ for BIDI-COMMAND passthrough with TCM/pSCSI | |
4966 | * | |
4967 | * Note that the first call to transport_do_se_mem_map() above will | |
4968 | * allocate struct se_task->task_sg_bidi in transport_do_se_mem_map() | |
4969 | * -> transport_calc_sg_num(), and the second here will do the | |
4970 | * mapping for SCSI READ for BIDI-COMMAND passthrough with TCM/pSCSI. | |
4971 | */ | |
4972 | if (task->task_sg_bidi != NULL) { | |
4973 | ret = transport_do_se_mem_map(dev, task, | |
4974 | T_TASK(cmd)->t_mem_bidi_list, NULL, | |
4975 | se_mem_bidi, &se_mem_bidi_lout, &se_mem_bidi_cnt, | |
4976 | &task_offset_in); | |
4977 | if (ret < 0) | |
4978 | goto out; | |
4979 | ||
4980 | se_mem_bidi = se_mem_bidi_lout; | |
4981 | } | |
4982 | task_cdbs++; | |
4983 | ||
4984 | DEBUG_VOL("Incremented task_cdbs(%u) task->task_sg_num(%u)\n", | |
4985 | task_cdbs, task->task_sg_num); | |
4986 | ||
4987 | if (max_sectors_set) { | |
4988 | max_sectors_set = 0; | |
4989 | continue; | |
4990 | } | |
4991 | ||
4992 | if (!sectors) | |
4993 | break; | |
4994 | } | |
4995 | ||
4996 | if (set_counts) { | |
4997 | atomic_inc(&T_TASK(cmd)->t_fe_count); | |
4998 | atomic_inc(&T_TASK(cmd)->t_se_count); | |
4999 | } | |
5000 | ||
5001 | DEBUG_VOL("ITT[0x%08x] total %s cdbs(%u)\n", | |
5002 | CMD_TFO(cmd)->get_task_tag(cmd), (data_direction == DMA_TO_DEVICE) | |
5003 | ? "DMA_TO_DEVICE" : "DMA_FROM_DEVICE", task_cdbs); | |
5004 | ||
5005 | return task_cdbs; | |
5006 | out: | |
5007 | return 0; | |
5008 | } | |
5009 | ||
5010 | static int | |
5011 | transport_map_control_cmd_to_task(struct se_cmd *cmd) | |
5012 | { | |
5013 | struct se_device *dev = SE_DEV(cmd); | |
5014 | unsigned char *cdb; | |
5015 | struct se_task *task; | |
5016 | int ret; | |
5017 | ||
5018 | task = transport_generic_get_task(cmd, cmd->data_direction); | |
5019 | if (!task) | |
5020 | return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; | |
5021 | ||
5022 | cdb = TRANSPORT(dev)->get_cdb(task); | |
5023 | if (cdb) | |
5024 | memcpy(cdb, cmd->t_task->t_task_cdb, | |
5025 | scsi_command_size(cmd->t_task->t_task_cdb)); | |
5026 | ||
5027 | task->task_size = cmd->data_length; | |
5028 | task->task_sg_num = | |
5029 | (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) ? 1 : 0; | |
5030 | ||
5031 | atomic_inc(&cmd->t_task->t_fe_count); | |
5032 | atomic_inc(&cmd->t_task->t_se_count); | |
5033 | ||
5034 | if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) { | |
5035 | struct se_mem *se_mem = NULL, *se_mem_lout = NULL; | |
5036 | u32 se_mem_cnt = 0, task_offset = 0; | |
5037 | ||
e63af958 NB |
5038 | if (!list_empty(T_TASK(cmd)->t_mem_list)) |
5039 | se_mem = list_entry(T_TASK(cmd)->t_mem_list->next, | |
5040 | struct se_mem, se_list); | |
c66ac9db NB |
5041 | |
5042 | ret = transport_do_se_mem_map(dev, task, | |
5043 | cmd->t_task->t_mem_list, NULL, se_mem, | |
5044 | &se_mem_lout, &se_mem_cnt, &task_offset); | |
5045 | if (ret < 0) | |
5046 | return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; | |
5047 | ||
5048 | if (dev->transport->map_task_SG) | |
5049 | return dev->transport->map_task_SG(task); | |
5050 | return 0; | |
5051 | } else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB) { | |
5052 | if (dev->transport->map_task_non_SG) | |
5053 | return dev->transport->map_task_non_SG(task); | |
5054 | return 0; | |
5055 | } else if (cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) { | |
5056 | if (dev->transport->cdb_none) | |
5057 | return dev->transport->cdb_none(task); | |
5058 | return 0; | |
5059 | } else { | |
5060 | BUG(); | |
5061 | return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; | |
5062 | } | |
5063 | } | |
5064 | ||
5065 | /* transport_generic_new_cmd(): Called from transport_processing_thread() | |
5066 | * | |
5067 | * Allocate storage transport resources from a set of values predefined | |
5068 | * by transport_generic_cmd_sequencer() from the iSCSI Target RX process. | |
5069 | * Any non zero return here is treated as an "out of resource' op here. | |
5070 | */ | |
5071 | /* | |
5072 | * Generate struct se_task(s) and/or their payloads for this CDB. | |
5073 | */ | |
5074 | static int transport_generic_new_cmd(struct se_cmd *cmd) | |
5075 | { | |
5076 | struct se_portal_group *se_tpg; | |
5077 | struct se_task *task; | |
5078 | struct se_device *dev = SE_DEV(cmd); | |
5079 | int ret = 0; | |
5080 | ||
5081 | /* | |
5082 | * Determine is the TCM fabric module has already allocated physical | |
5083 | * memory, and is directly calling transport_generic_map_mem_to_cmd() | |
5084 | * to setup beforehand the linked list of physical memory at | |
5085 | * T_TASK(cmd)->t_mem_list of struct se_mem->se_page | |
5086 | */ | |
5087 | if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)) { | |
5088 | ret = transport_allocate_resources(cmd); | |
5089 | if (ret < 0) | |
5090 | return ret; | |
5091 | } | |
5092 | ||
5093 | ret = transport_get_sectors(cmd); | |
5094 | if (ret < 0) | |
5095 | return ret; | |
5096 | ||
5097 | ret = transport_new_cmd_obj(cmd); | |
5098 | if (ret < 0) | |
5099 | return ret; | |
5100 | ||
5101 | /* | |
5102 | * Determine if the calling TCM fabric module is talking to | |
5103 | * Linux/NET via kernel sockets and needs to allocate a | |
5104 | * struct iovec array to complete the struct se_cmd | |
5105 | */ | |
5106 | se_tpg = SE_LUN(cmd)->lun_sep->sep_tpg; | |
5107 | if (TPG_TFO(se_tpg)->alloc_cmd_iovecs != NULL) { | |
5108 | ret = TPG_TFO(se_tpg)->alloc_cmd_iovecs(cmd); | |
5109 | if (ret < 0) | |
5110 | return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; | |
5111 | } | |
5112 | ||
5113 | if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { | |
5114 | list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) { | |
5115 | if (atomic_read(&task->task_sent)) | |
5116 | continue; | |
5117 | if (!dev->transport->map_task_SG) | |
5118 | continue; | |
5119 | ||
5120 | ret = dev->transport->map_task_SG(task); | |
5121 | if (ret < 0) | |
5122 | return ret; | |
5123 | } | |
5124 | } else { | |
5125 | ret = transport_map_control_cmd_to_task(cmd); | |
5126 | if (ret < 0) | |
5127 | return ret; | |
5128 | } | |
5129 | ||
5130 | /* | |
5131 | * For WRITEs, let the iSCSI Target RX Thread know its buffer is ready.. | |
5132 | * This WRITE struct se_cmd (and all of its associated struct se_task's) | |
5133 | * will be added to the struct se_device execution queue after its WRITE | |
5134 | * data has arrived. (ie: It gets handled by the transport processing | |
5135 | * thread a second time) | |
5136 | */ | |
5137 | if (cmd->data_direction == DMA_TO_DEVICE) { | |
5138 | transport_add_tasks_to_state_queue(cmd); | |
5139 | return transport_generic_write_pending(cmd); | |
5140 | } | |
5141 | /* | |
5142 | * Everything else but a WRITE, add the struct se_cmd's struct se_task's | |
5143 | * to the execution queue. | |
5144 | */ | |
5145 | transport_execute_tasks(cmd); | |
5146 | return 0; | |
5147 | } | |
5148 | ||
5149 | /* transport_generic_process_write(): | |
5150 | * | |
5151 | * | |
5152 | */ | |
5153 | void transport_generic_process_write(struct se_cmd *cmd) | |
5154 | { | |
5155 | #if 0 | |
5156 | /* | |
5157 | * Copy SCSI Presented DTL sector(s) from received buffers allocated to | |
5158 | * original EDTL | |
5159 | */ | |
5160 | if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { | |
5161 | if (!T_TASK(cmd)->t_tasks_se_num) { | |
5162 | unsigned char *dst, *buf = | |
5163 | (unsigned char *)T_TASK(cmd)->t_task_buf; | |
5164 | ||
5165 | dst = kzalloc(cmd->cmd_spdtl), GFP_KERNEL); | |
5166 | if (!(dst)) { | |
5167 | printk(KERN_ERR "Unable to allocate memory for" | |
5168 | " WRITE underflow\n"); | |
5169 | transport_generic_request_failure(cmd, NULL, | |
5170 | PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1); | |
5171 | return; | |
5172 | } | |
5173 | memcpy(dst, buf, cmd->cmd_spdtl); | |
5174 | ||
5175 | kfree(T_TASK(cmd)->t_task_buf); | |
5176 | T_TASK(cmd)->t_task_buf = dst; | |
5177 | } else { | |
5178 | struct scatterlist *sg = | |
5179 | (struct scatterlist *sg)T_TASK(cmd)->t_task_buf; | |
5180 | struct scatterlist *orig_sg; | |
5181 | ||
5182 | orig_sg = kzalloc(sizeof(struct scatterlist) * | |
5183 | T_TASK(cmd)->t_tasks_se_num, | |
5184 | GFP_KERNEL))) { | |
5185 | if (!(orig_sg)) { | |
5186 | printk(KERN_ERR "Unable to allocate memory" | |
5187 | " for WRITE underflow\n"); | |
5188 | transport_generic_request_failure(cmd, NULL, | |
5189 | PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1); | |
5190 | return; | |
5191 | } | |
5192 | ||
5193 | memcpy(orig_sg, T_TASK(cmd)->t_task_buf, | |
5194 | sizeof(struct scatterlist) * | |
5195 | T_TASK(cmd)->t_tasks_se_num); | |
5196 | ||
5197 | cmd->data_length = cmd->cmd_spdtl; | |
5198 | /* | |
5199 | * FIXME, clear out original struct se_task and state | |
5200 | * information. | |
5201 | */ | |
5202 | if (transport_generic_new_cmd(cmd) < 0) { | |
5203 | transport_generic_request_failure(cmd, NULL, | |
5204 | PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1); | |
5205 | kfree(orig_sg); | |
5206 | return; | |
5207 | } | |
5208 | ||
5209 | transport_memcpy_write_sg(cmd, orig_sg); | |
5210 | } | |
5211 | } | |
5212 | #endif | |
5213 | transport_execute_tasks(cmd); | |
5214 | } | |
5215 | EXPORT_SYMBOL(transport_generic_process_write); | |
5216 | ||
5217 | /* transport_generic_write_pending(): | |
5218 | * | |
5219 | * | |
5220 | */ | |
5221 | static int transport_generic_write_pending(struct se_cmd *cmd) | |
5222 | { | |
5223 | unsigned long flags; | |
5224 | int ret; | |
5225 | ||
5226 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | |
5227 | cmd->t_state = TRANSPORT_WRITE_PENDING; | |
5228 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
5229 | /* | |
5230 | * For the TCM control CDBs using a contiguous buffer, do the memcpy | |
5231 | * from the passed Linux/SCSI struct scatterlist located at | |
5232 | * T_TASK(se_cmd)->t_task_pt_buf to the contiguous buffer at | |
5233 | * T_TASK(se_cmd)->t_task_buf. | |
5234 | */ | |
5235 | if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG) | |
5236 | transport_memcpy_read_contig(cmd, | |
5237 | T_TASK(cmd)->t_task_buf, | |
5238 | T_TASK(cmd)->t_task_pt_sgl); | |
5239 | /* | |
5240 | * Clear the se_cmd for WRITE_PENDING status in order to set | |
5241 | * T_TASK(cmd)->t_transport_active=0 so that transport_generic_handle_data | |
5242 | * can be called from HW target mode interrupt code. This is safe | |
5243 | * to be called with transport_off=1 before the CMD_TFO(cmd)->write_pending | |
5244 | * because the se_cmd->se_lun pointer is not being cleared. | |
5245 | */ | |
5246 | transport_cmd_check_stop(cmd, 1, 0); | |
5247 | ||
5248 | /* | |
5249 | * Call the fabric write_pending function here to let the | |
5250 | * frontend know that WRITE buffers are ready. | |
5251 | */ | |
5252 | ret = CMD_TFO(cmd)->write_pending(cmd); | |
5253 | if (ret < 0) | |
5254 | return ret; | |
5255 | ||
5256 | return PYX_TRANSPORT_WRITE_PENDING; | |
5257 | } | |
5258 | ||
5259 | /* transport_release_cmd_to_pool(): | |
5260 | * | |
5261 | * | |
5262 | */ | |
5263 | void transport_release_cmd_to_pool(struct se_cmd *cmd) | |
5264 | { | |
5265 | BUG_ON(!T_TASK(cmd)); | |
5266 | BUG_ON(!CMD_TFO(cmd)); | |
5267 | ||
5268 | transport_free_se_cmd(cmd); | |
5269 | CMD_TFO(cmd)->release_cmd_to_pool(cmd); | |
5270 | } | |
5271 | EXPORT_SYMBOL(transport_release_cmd_to_pool); | |
5272 | ||
5273 | /* transport_generic_free_cmd(): | |
5274 | * | |
5275 | * Called from processing frontend to release storage engine resources | |
5276 | */ | |
5277 | void transport_generic_free_cmd( | |
5278 | struct se_cmd *cmd, | |
5279 | int wait_for_tasks, | |
5280 | int release_to_pool, | |
5281 | int session_reinstatement) | |
5282 | { | |
5283 | if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) || !T_TASK(cmd)) | |
5284 | transport_release_cmd_to_pool(cmd); | |
5285 | else { | |
5286 | core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd); | |
5287 | ||
5288 | if (SE_LUN(cmd)) { | |
5289 | #if 0 | |
5290 | printk(KERN_INFO "cmd: %p ITT: 0x%08x contains" | |
5291 | " SE_LUN(cmd)\n", cmd, | |
5292 | CMD_TFO(cmd)->get_task_tag(cmd)); | |
5293 | #endif | |
5294 | transport_lun_remove_cmd(cmd); | |
5295 | } | |
5296 | ||
5297 | if (wait_for_tasks && cmd->transport_wait_for_tasks) | |
5298 | cmd->transport_wait_for_tasks(cmd, 0, 0); | |
5299 | ||
5300 | transport_generic_remove(cmd, release_to_pool, | |
5301 | session_reinstatement); | |
5302 | } | |
5303 | } | |
5304 | EXPORT_SYMBOL(transport_generic_free_cmd); | |
5305 | ||
5306 | static void transport_nop_wait_for_tasks( | |
5307 | struct se_cmd *cmd, | |
5308 | int remove_cmd, | |
5309 | int session_reinstatement) | |
5310 | { | |
5311 | return; | |
5312 | } | |
5313 | ||
5314 | /* transport_lun_wait_for_tasks(): | |
5315 | * | |
5316 | * Called from ConfigFS context to stop the passed struct se_cmd to allow | |
5317 | * an struct se_lun to be successfully shutdown. | |
5318 | */ | |
5319 | static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun) | |
5320 | { | |
5321 | unsigned long flags; | |
5322 | int ret; | |
5323 | /* | |
5324 | * If the frontend has already requested this struct se_cmd to | |
5325 | * be stopped, we can safely ignore this struct se_cmd. | |
5326 | */ | |
5327 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | |
5328 | if (atomic_read(&T_TASK(cmd)->t_transport_stop)) { | |
5329 | atomic_set(&T_TASK(cmd)->transport_lun_stop, 0); | |
5330 | DEBUG_TRANSPORT_S("ConfigFS ITT[0x%08x] - t_transport_stop ==" | |
5331 | " TRUE, skipping\n", CMD_TFO(cmd)->get_task_tag(cmd)); | |
5332 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
5333 | transport_cmd_check_stop(cmd, 1, 0); | |
5334 | return -1; | |
5335 | } | |
5336 | atomic_set(&T_TASK(cmd)->transport_lun_fe_stop, 1); | |
5337 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
5338 | ||
5339 | wake_up_interruptible(&SE_DEV(cmd)->dev_queue_obj->thread_wq); | |
5340 | ||
5341 | ret = transport_stop_tasks_for_cmd(cmd); | |
5342 | ||
5343 | DEBUG_TRANSPORT_S("ConfigFS: cmd: %p t_task_cdbs: %d stop tasks ret:" | |
5344 | " %d\n", cmd, T_TASK(cmd)->t_task_cdbs, ret); | |
5345 | if (!ret) { | |
5346 | DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopping cmd....\n", | |
5347 | CMD_TFO(cmd)->get_task_tag(cmd)); | |
5348 | wait_for_completion(&T_TASK(cmd)->transport_lun_stop_comp); | |
5349 | DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopped cmd....\n", | |
5350 | CMD_TFO(cmd)->get_task_tag(cmd)); | |
5351 | } | |
5352 | transport_remove_cmd_from_queue(cmd, SE_DEV(cmd)->dev_queue_obj); | |
5353 | ||
5354 | return 0; | |
5355 | } | |
5356 | ||
5357 | /* #define DEBUG_CLEAR_LUN */ | |
5358 | #ifdef DEBUG_CLEAR_LUN | |
5359 | #define DEBUG_CLEAR_L(x...) printk(KERN_INFO x) | |
5360 | #else | |
5361 | #define DEBUG_CLEAR_L(x...) | |
5362 | #endif | |
5363 | ||
5364 | static void __transport_clear_lun_from_sessions(struct se_lun *lun) | |
5365 | { | |
5366 | struct se_cmd *cmd = NULL; | |
5367 | unsigned long lun_flags, cmd_flags; | |
5368 | /* | |
5369 | * Do exception processing and return CHECK_CONDITION status to the | |
5370 | * Initiator Port. | |
5371 | */ | |
5372 | spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); | |
5373 | while (!list_empty_careful(&lun->lun_cmd_list)) { | |
5374 | cmd = list_entry(lun->lun_cmd_list.next, | |
5375 | struct se_cmd, se_lun_list); | |
5376 | list_del(&cmd->se_lun_list); | |
5377 | ||
5378 | if (!(T_TASK(cmd))) { | |
5379 | printk(KERN_ERR "ITT: 0x%08x, T_TASK(cmd) = NULL" | |
5380 | "[i,t]_state: %u/%u\n", | |
5381 | CMD_TFO(cmd)->get_task_tag(cmd), | |
5382 | CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state); | |
5383 | BUG(); | |
5384 | } | |
5385 | atomic_set(&T_TASK(cmd)->transport_lun_active, 0); | |
5386 | /* | |
5387 | * This will notify iscsi_target_transport.c: | |
5388 | * transport_cmd_check_stop() that a LUN shutdown is in | |
5389 | * progress for the iscsi_cmd_t. | |
5390 | */ | |
5391 | spin_lock(&T_TASK(cmd)->t_state_lock); | |
5392 | DEBUG_CLEAR_L("SE_LUN[%d] - Setting T_TASK(cmd)->transport" | |
5393 | "_lun_stop for ITT: 0x%08x\n", | |
5394 | SE_LUN(cmd)->unpacked_lun, | |
5395 | CMD_TFO(cmd)->get_task_tag(cmd)); | |
5396 | atomic_set(&T_TASK(cmd)->transport_lun_stop, 1); | |
5397 | spin_unlock(&T_TASK(cmd)->t_state_lock); | |
5398 | ||
5399 | spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); | |
5400 | ||
5401 | if (!(SE_LUN(cmd))) { | |
5402 | printk(KERN_ERR "ITT: 0x%08x, [i,t]_state: %u/%u\n", | |
5403 | CMD_TFO(cmd)->get_task_tag(cmd), | |
5404 | CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state); | |
5405 | BUG(); | |
5406 | } | |
5407 | /* | |
5408 | * If the Storage engine still owns the iscsi_cmd_t, determine | |
5409 | * and/or stop its context. | |
5410 | */ | |
5411 | DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x before transport" | |
5412 | "_lun_wait_for_tasks()\n", SE_LUN(cmd)->unpacked_lun, | |
5413 | CMD_TFO(cmd)->get_task_tag(cmd)); | |
5414 | ||
5415 | if (transport_lun_wait_for_tasks(cmd, SE_LUN(cmd)) < 0) { | |
5416 | spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); | |
5417 | continue; | |
5418 | } | |
5419 | ||
5420 | DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x after transport_lun" | |
5421 | "_wait_for_tasks(): SUCCESS\n", | |
5422 | SE_LUN(cmd)->unpacked_lun, | |
5423 | CMD_TFO(cmd)->get_task_tag(cmd)); | |
5424 | ||
5425 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, cmd_flags); | |
5426 | if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) { | |
5427 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, cmd_flags); | |
5428 | goto check_cond; | |
5429 | } | |
5430 | atomic_set(&T_TASK(cmd)->transport_dev_active, 0); | |
5431 | transport_all_task_dev_remove_state(cmd); | |
5432 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, cmd_flags); | |
5433 | ||
5434 | transport_free_dev_tasks(cmd); | |
5435 | /* | |
5436 | * The Storage engine stopped this struct se_cmd before it was | |
5437 | * send to the fabric frontend for delivery back to the | |
5438 | * Initiator Node. Return this SCSI CDB back with an | |
5439 | * CHECK_CONDITION status. | |
5440 | */ | |
5441 | check_cond: | |
5442 | transport_send_check_condition_and_sense(cmd, | |
5443 | TCM_NON_EXISTENT_LUN, 0); | |
5444 | /* | |
5445 | * If the fabric frontend is waiting for this iscsi_cmd_t to | |
5446 | * be released, notify the waiting thread now that LU has | |
5447 | * finished accessing it. | |
5448 | */ | |
5449 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, cmd_flags); | |
5450 | if (atomic_read(&T_TASK(cmd)->transport_lun_fe_stop)) { | |
5451 | DEBUG_CLEAR_L("SE_LUN[%d] - Detected FE stop for" | |
5452 | " struct se_cmd: %p ITT: 0x%08x\n", | |
5453 | lun->unpacked_lun, | |
5454 | cmd, CMD_TFO(cmd)->get_task_tag(cmd)); | |
5455 | ||
5456 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, | |
5457 | cmd_flags); | |
5458 | transport_cmd_check_stop(cmd, 1, 0); | |
5459 | complete(&T_TASK(cmd)->transport_lun_fe_stop_comp); | |
5460 | spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); | |
5461 | continue; | |
5462 | } | |
5463 | DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x finished processing\n", | |
5464 | lun->unpacked_lun, CMD_TFO(cmd)->get_task_tag(cmd)); | |
5465 | ||
5466 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, cmd_flags); | |
5467 | spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); | |
5468 | } | |
5469 | spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); | |
5470 | } | |
5471 | ||
5472 | static int transport_clear_lun_thread(void *p) | |
5473 | { | |
5474 | struct se_lun *lun = (struct se_lun *)p; | |
5475 | ||
5476 | __transport_clear_lun_from_sessions(lun); | |
5477 | complete(&lun->lun_shutdown_comp); | |
5478 | ||
5479 | return 0; | |
5480 | } | |
5481 | ||
5482 | int transport_clear_lun_from_sessions(struct se_lun *lun) | |
5483 | { | |
5484 | struct task_struct *kt; | |
5485 | ||
5486 | kt = kthread_run(transport_clear_lun_thread, (void *)lun, | |
5487 | "tcm_cl_%u", lun->unpacked_lun); | |
5488 | if (IS_ERR(kt)) { | |
5489 | printk(KERN_ERR "Unable to start clear_lun thread\n"); | |
5490 | return -1; | |
5491 | } | |
5492 | wait_for_completion(&lun->lun_shutdown_comp); | |
5493 | ||
5494 | return 0; | |
5495 | } | |
5496 | ||
5497 | /* transport_generic_wait_for_tasks(): | |
5498 | * | |
5499 | * Called from frontend or passthrough context to wait for storage engine | |
5500 | * to pause and/or release frontend generated struct se_cmd. | |
5501 | */ | |
5502 | static void transport_generic_wait_for_tasks( | |
5503 | struct se_cmd *cmd, | |
5504 | int remove_cmd, | |
5505 | int session_reinstatement) | |
5506 | { | |
5507 | unsigned long flags; | |
5508 | ||
5509 | if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req)) | |
5510 | return; | |
5511 | ||
5512 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | |
5513 | /* | |
5514 | * If we are already stopped due to an external event (ie: LUN shutdown) | |
5515 | * sleep until the connection can have the passed struct se_cmd back. | |
5516 | * The T_TASK(cmd)->transport_lun_stopped_sem will be upped by | |
5517 | * transport_clear_lun_from_sessions() once the ConfigFS context caller | |
5518 | * has completed its operation on the struct se_cmd. | |
5519 | */ | |
5520 | if (atomic_read(&T_TASK(cmd)->transport_lun_stop)) { | |
5521 | ||
5522 | DEBUG_TRANSPORT_S("wait_for_tasks: Stopping" | |
5523 | " wait_for_completion(&T_TASK(cmd)transport_lun_fe" | |
5524 | "_stop_comp); for ITT: 0x%08x\n", | |
5525 | CMD_TFO(cmd)->get_task_tag(cmd)); | |
5526 | /* | |
5527 | * There is a special case for WRITES where a FE exception + | |
5528 | * LUN shutdown means ConfigFS context is still sleeping on | |
5529 | * transport_lun_stop_comp in transport_lun_wait_for_tasks(). | |
5530 | * We go ahead and up transport_lun_stop_comp just to be sure | |
5531 | * here. | |
5532 | */ | |
5533 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
5534 | complete(&T_TASK(cmd)->transport_lun_stop_comp); | |
5535 | wait_for_completion(&T_TASK(cmd)->transport_lun_fe_stop_comp); | |
5536 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | |
5537 | ||
5538 | transport_all_task_dev_remove_state(cmd); | |
5539 | /* | |
5540 | * At this point, the frontend who was the originator of this | |
5541 | * struct se_cmd, now owns the structure and can be released through | |
5542 | * normal means below. | |
5543 | */ | |
5544 | DEBUG_TRANSPORT_S("wait_for_tasks: Stopped" | |
5545 | " wait_for_completion(&T_TASK(cmd)transport_lun_fe_" | |
5546 | "stop_comp); for ITT: 0x%08x\n", | |
5547 | CMD_TFO(cmd)->get_task_tag(cmd)); | |
5548 | ||
5549 | atomic_set(&T_TASK(cmd)->transport_lun_stop, 0); | |
5550 | } | |
52208ae3 NB |
5551 | if (!atomic_read(&T_TASK(cmd)->t_transport_active) || |
5552 | atomic_read(&T_TASK(cmd)->t_transport_aborted)) | |
c66ac9db NB |
5553 | goto remove; |
5554 | ||
5555 | atomic_set(&T_TASK(cmd)->t_transport_stop, 1); | |
5556 | ||
5557 | DEBUG_TRANSPORT_S("wait_for_tasks: Stopping %p ITT: 0x%08x" | |
5558 | " i_state: %d, t_state/def_t_state: %d/%d, t_transport_stop" | |
5559 | " = TRUE\n", cmd, CMD_TFO(cmd)->get_task_tag(cmd), | |
5560 | CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state, | |
5561 | cmd->deferred_t_state); | |
5562 | ||
5563 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
5564 | ||
5565 | wake_up_interruptible(&SE_DEV(cmd)->dev_queue_obj->thread_wq); | |
5566 | ||
5567 | wait_for_completion(&T_TASK(cmd)->t_transport_stop_comp); | |
5568 | ||
5569 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | |
5570 | atomic_set(&T_TASK(cmd)->t_transport_active, 0); | |
5571 | atomic_set(&T_TASK(cmd)->t_transport_stop, 0); | |
5572 | ||
5573 | DEBUG_TRANSPORT_S("wait_for_tasks: Stopped wait_for_compltion(" | |
5574 | "&T_TASK(cmd)->t_transport_stop_comp) for ITT: 0x%08x\n", | |
5575 | CMD_TFO(cmd)->get_task_tag(cmd)); | |
5576 | remove: | |
5577 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
5578 | if (!remove_cmd) | |
5579 | return; | |
5580 | ||
5581 | transport_generic_free_cmd(cmd, 0, 0, session_reinstatement); | |
5582 | } | |
5583 | ||
5584 | static int transport_get_sense_codes( | |
5585 | struct se_cmd *cmd, | |
5586 | u8 *asc, | |
5587 | u8 *ascq) | |
5588 | { | |
5589 | *asc = cmd->scsi_asc; | |
5590 | *ascq = cmd->scsi_ascq; | |
5591 | ||
5592 | return 0; | |
5593 | } | |
5594 | ||
5595 | static int transport_set_sense_codes( | |
5596 | struct se_cmd *cmd, | |
5597 | u8 asc, | |
5598 | u8 ascq) | |
5599 | { | |
5600 | cmd->scsi_asc = asc; | |
5601 | cmd->scsi_ascq = ascq; | |
5602 | ||
5603 | return 0; | |
5604 | } | |
5605 | ||
5606 | int transport_send_check_condition_and_sense( | |
5607 | struct se_cmd *cmd, | |
5608 | u8 reason, | |
5609 | int from_transport) | |
5610 | { | |
5611 | unsigned char *buffer = cmd->sense_buffer; | |
5612 | unsigned long flags; | |
5613 | int offset; | |
5614 | u8 asc = 0, ascq = 0; | |
5615 | ||
5616 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | |
5617 | if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { | |
5618 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
5619 | return 0; | |
5620 | } | |
5621 | cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION; | |
5622 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
5623 | ||
5624 | if (!reason && from_transport) | |
5625 | goto after_reason; | |
5626 | ||
5627 | if (!from_transport) | |
5628 | cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE; | |
5629 | /* | |
5630 | * Data Segment and SenseLength of the fabric response PDU. | |
5631 | * | |
5632 | * TRANSPORT_SENSE_BUFFER is now set to SCSI_SENSE_BUFFERSIZE | |
5633 | * from include/scsi/scsi_cmnd.h | |
5634 | */ | |
5635 | offset = CMD_TFO(cmd)->set_fabric_sense_len(cmd, | |
5636 | TRANSPORT_SENSE_BUFFER); | |
5637 | /* | |
5638 | * Actual SENSE DATA, see SPC-3 7.23.2 SPC_SENSE_KEY_OFFSET uses | |
5639 | * SENSE KEY values from include/scsi/scsi.h | |
5640 | */ | |
5641 | switch (reason) { | |
5642 | case TCM_NON_EXISTENT_LUN: | |
5643 | case TCM_UNSUPPORTED_SCSI_OPCODE: | |
5644 | case TCM_SECTOR_COUNT_TOO_MANY: | |
5645 | /* CURRENT ERROR */ | |
5646 | buffer[offset] = 0x70; | |
5647 | /* ILLEGAL REQUEST */ | |
5648 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; | |
5649 | /* INVALID COMMAND OPERATION CODE */ | |
5650 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x20; | |
5651 | break; | |
5652 | case TCM_UNKNOWN_MODE_PAGE: | |
5653 | /* CURRENT ERROR */ | |
5654 | buffer[offset] = 0x70; | |
5655 | /* ILLEGAL REQUEST */ | |
5656 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; | |
5657 | /* INVALID FIELD IN CDB */ | |
5658 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24; | |
5659 | break; | |
5660 | case TCM_CHECK_CONDITION_ABORT_CMD: | |
5661 | /* CURRENT ERROR */ | |
5662 | buffer[offset] = 0x70; | |
5663 | /* ABORTED COMMAND */ | |
5664 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; | |
5665 | /* BUS DEVICE RESET FUNCTION OCCURRED */ | |
5666 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x29; | |
5667 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x03; | |
5668 | break; | |
5669 | case TCM_INCORRECT_AMOUNT_OF_DATA: | |
5670 | /* CURRENT ERROR */ | |
5671 | buffer[offset] = 0x70; | |
5672 | /* ABORTED COMMAND */ | |
5673 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; | |
5674 | /* WRITE ERROR */ | |
5675 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c; | |
5676 | /* NOT ENOUGH UNSOLICITED DATA */ | |
5677 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0d; | |
5678 | break; | |
5679 | case TCM_INVALID_CDB_FIELD: | |
5680 | /* CURRENT ERROR */ | |
5681 | buffer[offset] = 0x70; | |
5682 | /* ABORTED COMMAND */ | |
5683 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; | |
5684 | /* INVALID FIELD IN CDB */ | |
5685 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24; | |
5686 | break; | |
5687 | case TCM_INVALID_PARAMETER_LIST: | |
5688 | /* CURRENT ERROR */ | |
5689 | buffer[offset] = 0x70; | |
5690 | /* ABORTED COMMAND */ | |
5691 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; | |
5692 | /* INVALID FIELD IN PARAMETER LIST */ | |
5693 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x26; | |
5694 | break; | |
5695 | case TCM_UNEXPECTED_UNSOLICITED_DATA: | |
5696 | /* CURRENT ERROR */ | |
5697 | buffer[offset] = 0x70; | |
5698 | /* ABORTED COMMAND */ | |
5699 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; | |
5700 | /* WRITE ERROR */ | |
5701 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c; | |
5702 | /* UNEXPECTED_UNSOLICITED_DATA */ | |
5703 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0c; | |
5704 | break; | |
5705 | case TCM_SERVICE_CRC_ERROR: | |
5706 | /* CURRENT ERROR */ | |
5707 | buffer[offset] = 0x70; | |
5708 | /* ABORTED COMMAND */ | |
5709 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; | |
5710 | /* PROTOCOL SERVICE CRC ERROR */ | |
5711 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x47; | |
5712 | /* N/A */ | |
5713 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x05; | |
5714 | break; | |
5715 | case TCM_SNACK_REJECTED: | |
5716 | /* CURRENT ERROR */ | |
5717 | buffer[offset] = 0x70; | |
5718 | /* ABORTED COMMAND */ | |
5719 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; | |
5720 | /* READ ERROR */ | |
5721 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x11; | |
5722 | /* FAILED RETRANSMISSION REQUEST */ | |
5723 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x13; | |
5724 | break; | |
5725 | case TCM_WRITE_PROTECTED: | |
5726 | /* CURRENT ERROR */ | |
5727 | buffer[offset] = 0x70; | |
5728 | /* DATA PROTECT */ | |
5729 | buffer[offset+SPC_SENSE_KEY_OFFSET] = DATA_PROTECT; | |
5730 | /* WRITE PROTECTED */ | |
5731 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x27; | |
5732 | break; | |
5733 | case TCM_CHECK_CONDITION_UNIT_ATTENTION: | |
5734 | /* CURRENT ERROR */ | |
5735 | buffer[offset] = 0x70; | |
5736 | /* UNIT ATTENTION */ | |
5737 | buffer[offset+SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION; | |
5738 | core_scsi3_ua_for_check_condition(cmd, &asc, &ascq); | |
5739 | buffer[offset+SPC_ASC_KEY_OFFSET] = asc; | |
5740 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq; | |
5741 | break; | |
5742 | case TCM_CHECK_CONDITION_NOT_READY: | |
5743 | /* CURRENT ERROR */ | |
5744 | buffer[offset] = 0x70; | |
5745 | /* Not Ready */ | |
5746 | buffer[offset+SPC_SENSE_KEY_OFFSET] = NOT_READY; | |
5747 | transport_get_sense_codes(cmd, &asc, &ascq); | |
5748 | buffer[offset+SPC_ASC_KEY_OFFSET] = asc; | |
5749 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq; | |
5750 | break; | |
5751 | case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE: | |
5752 | default: | |
5753 | /* CURRENT ERROR */ | |
5754 | buffer[offset] = 0x70; | |
5755 | /* ILLEGAL REQUEST */ | |
5756 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; | |
5757 | /* LOGICAL UNIT COMMUNICATION FAILURE */ | |
5758 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x80; | |
5759 | break; | |
5760 | } | |
5761 | /* | |
5762 | * This code uses linux/include/scsi/scsi.h SAM status codes! | |
5763 | */ | |
5764 | cmd->scsi_status = SAM_STAT_CHECK_CONDITION; | |
5765 | /* | |
5766 | * Automatically padded, this value is encoded in the fabric's | |
5767 | * data_length response PDU containing the SCSI defined sense data. | |
5768 | */ | |
5769 | cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER + offset; | |
5770 | ||
5771 | after_reason: | |
5772 | CMD_TFO(cmd)->queue_status(cmd); | |
5773 | return 0; | |
5774 | } | |
5775 | EXPORT_SYMBOL(transport_send_check_condition_and_sense); | |
5776 | ||
5777 | int transport_check_aborted_status(struct se_cmd *cmd, int send_status) | |
5778 | { | |
5779 | int ret = 0; | |
5780 | ||
5781 | if (atomic_read(&T_TASK(cmd)->t_transport_aborted) != 0) { | |
5782 | if (!(send_status) || | |
5783 | (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS)) | |
5784 | return 1; | |
5785 | #if 0 | |
5786 | printk(KERN_INFO "Sending delayed SAM_STAT_TASK_ABORTED" | |
5787 | " status for CDB: 0x%02x ITT: 0x%08x\n", | |
5788 | T_TASK(cmd)->t_task_cdb[0], | |
5789 | CMD_TFO(cmd)->get_task_tag(cmd)); | |
5790 | #endif | |
5791 | cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS; | |
5792 | CMD_TFO(cmd)->queue_status(cmd); | |
5793 | ret = 1; | |
5794 | } | |
5795 | return ret; | |
5796 | } | |
5797 | EXPORT_SYMBOL(transport_check_aborted_status); | |
5798 | ||
5799 | void transport_send_task_abort(struct se_cmd *cmd) | |
5800 | { | |
5801 | /* | |
5802 | * If there are still expected incoming fabric WRITEs, we wait | |
5803 | * until until they have completed before sending a TASK_ABORTED | |
5804 | * response. This response with TASK_ABORTED status will be | |
5805 | * queued back to fabric module by transport_check_aborted_status(). | |
5806 | */ | |
5807 | if (cmd->data_direction == DMA_TO_DEVICE) { | |
5808 | if (CMD_TFO(cmd)->write_pending_status(cmd) != 0) { | |
5809 | atomic_inc(&T_TASK(cmd)->t_transport_aborted); | |
5810 | smp_mb__after_atomic_inc(); | |
5811 | cmd->scsi_status = SAM_STAT_TASK_ABORTED; | |
5812 | transport_new_cmd_failure(cmd); | |
5813 | return; | |
5814 | } | |
5815 | } | |
5816 | cmd->scsi_status = SAM_STAT_TASK_ABORTED; | |
5817 | #if 0 | |
5818 | printk(KERN_INFO "Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x," | |
5819 | " ITT: 0x%08x\n", T_TASK(cmd)->t_task_cdb[0], | |
5820 | CMD_TFO(cmd)->get_task_tag(cmd)); | |
5821 | #endif | |
5822 | CMD_TFO(cmd)->queue_status(cmd); | |
5823 | } | |
5824 | ||
5825 | /* transport_generic_do_tmr(): | |
5826 | * | |
5827 | * | |
5828 | */ | |
5829 | int transport_generic_do_tmr(struct se_cmd *cmd) | |
5830 | { | |
5831 | struct se_cmd *ref_cmd; | |
5832 | struct se_device *dev = SE_DEV(cmd); | |
5833 | struct se_tmr_req *tmr = cmd->se_tmr_req; | |
5834 | int ret; | |
5835 | ||
5836 | switch (tmr->function) { | |
5837 | case ABORT_TASK: | |
5838 | ref_cmd = tmr->ref_cmd; | |
5839 | tmr->response = TMR_FUNCTION_REJECTED; | |
5840 | break; | |
5841 | case ABORT_TASK_SET: | |
5842 | case CLEAR_ACA: | |
5843 | case CLEAR_TASK_SET: | |
5844 | tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED; | |
5845 | break; | |
5846 | case LUN_RESET: | |
5847 | ret = core_tmr_lun_reset(dev, tmr, NULL, NULL); | |
5848 | tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE : | |
5849 | TMR_FUNCTION_REJECTED; | |
5850 | break; | |
5851 | #if 0 | |
5852 | case TARGET_WARM_RESET: | |
5853 | transport_generic_host_reset(dev->se_hba); | |
5854 | tmr->response = TMR_FUNCTION_REJECTED; | |
5855 | break; | |
5856 | case TARGET_COLD_RESET: | |
5857 | transport_generic_host_reset(dev->se_hba); | |
5858 | transport_generic_cold_reset(dev->se_hba); | |
5859 | tmr->response = TMR_FUNCTION_REJECTED; | |
5860 | break; | |
5861 | #endif | |
5862 | default: | |
5863 | printk(KERN_ERR "Uknown TMR function: 0x%02x.\n", | |
5864 | tmr->function); | |
5865 | tmr->response = TMR_FUNCTION_REJECTED; | |
5866 | break; | |
5867 | } | |
5868 | ||
5869 | cmd->t_state = TRANSPORT_ISTATE_PROCESSING; | |
5870 | CMD_TFO(cmd)->queue_tm_rsp(cmd); | |
5871 | ||
5872 | transport_cmd_check_stop(cmd, 2, 0); | |
5873 | return 0; | |
5874 | } | |
5875 | ||
5876 | /* | |
5877 | * Called with spin_lock_irq(&dev->execute_task_lock); held | |
5878 | * | |
5879 | */ | |
5880 | static struct se_task * | |
5881 | transport_get_task_from_state_list(struct se_device *dev) | |
5882 | { | |
5883 | struct se_task *task; | |
5884 | ||
5885 | if (list_empty(&dev->state_task_list)) | |
5886 | return NULL; | |
5887 | ||
5888 | list_for_each_entry(task, &dev->state_task_list, t_state_list) | |
5889 | break; | |
5890 | ||
5891 | list_del(&task->t_state_list); | |
5892 | atomic_set(&task->task_state_active, 0); | |
5893 | ||
5894 | return task; | |
5895 | } | |
5896 | ||
5897 | static void transport_processing_shutdown(struct se_device *dev) | |
5898 | { | |
5899 | struct se_cmd *cmd; | |
5900 | struct se_queue_req *qr; | |
5901 | struct se_task *task; | |
5902 | u8 state; | |
5903 | unsigned long flags; | |
5904 | /* | |
5905 | * Empty the struct se_device's struct se_task state list. | |
5906 | */ | |
5907 | spin_lock_irqsave(&dev->execute_task_lock, flags); | |
5908 | while ((task = transport_get_task_from_state_list(dev))) { | |
5909 | if (!(TASK_CMD(task))) { | |
5910 | printk(KERN_ERR "TASK_CMD(task) is NULL!\n"); | |
5911 | continue; | |
5912 | } | |
5913 | cmd = TASK_CMD(task); | |
5914 | ||
5915 | if (!T_TASK(cmd)) { | |
5916 | printk(KERN_ERR "T_TASK(cmd) is NULL for task: %p cmd:" | |
5917 | " %p ITT: 0x%08x\n", task, cmd, | |
5918 | CMD_TFO(cmd)->get_task_tag(cmd)); | |
5919 | continue; | |
5920 | } | |
5921 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); | |
5922 | ||
5923 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | |
5924 | ||
5925 | DEBUG_DO("PT: cmd: %p task: %p ITT/CmdSN: 0x%08x/0x%08x," | |
5926 | " i_state/def_i_state: %d/%d, t_state/def_t_state:" | |
5927 | " %d/%d cdb: 0x%02x\n", cmd, task, | |
5928 | CMD_TFO(cmd)->get_task_tag(cmd), cmd->cmd_sn, | |
5929 | CMD_TFO(cmd)->get_cmd_state(cmd), cmd->deferred_i_state, | |
5930 | cmd->t_state, cmd->deferred_t_state, | |
5931 | T_TASK(cmd)->t_task_cdb[0]); | |
5932 | DEBUG_DO("PT: ITT[0x%08x] - t_task_cdbs: %d t_task_cdbs_left:" | |
5933 | " %d t_task_cdbs_sent: %d -- t_transport_active: %d" | |
5934 | " t_transport_stop: %d t_transport_sent: %d\n", | |
5935 | CMD_TFO(cmd)->get_task_tag(cmd), | |
5936 | T_TASK(cmd)->t_task_cdbs, | |
5937 | atomic_read(&T_TASK(cmd)->t_task_cdbs_left), | |
5938 | atomic_read(&T_TASK(cmd)->t_task_cdbs_sent), | |
5939 | atomic_read(&T_TASK(cmd)->t_transport_active), | |
5940 | atomic_read(&T_TASK(cmd)->t_transport_stop), | |
5941 | atomic_read(&T_TASK(cmd)->t_transport_sent)); | |
5942 | ||
5943 | if (atomic_read(&task->task_active)) { | |
5944 | atomic_set(&task->task_stop, 1); | |
5945 | spin_unlock_irqrestore( | |
5946 | &T_TASK(cmd)->t_state_lock, flags); | |
5947 | ||
5948 | DEBUG_DO("Waiting for task: %p to shutdown for dev:" | |
5949 | " %p\n", task, dev); | |
5950 | wait_for_completion(&task->task_stop_comp); | |
5951 | DEBUG_DO("Completed task: %p shutdown for dev: %p\n", | |
5952 | task, dev); | |
5953 | ||
5954 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | |
5955 | atomic_dec(&T_TASK(cmd)->t_task_cdbs_left); | |
5956 | ||
5957 | atomic_set(&task->task_active, 0); | |
5958 | atomic_set(&task->task_stop, 0); | |
52208ae3 NB |
5959 | } else { |
5960 | if (atomic_read(&task->task_execute_queue) != 0) | |
5961 | transport_remove_task_from_execute_queue(task, dev); | |
c66ac9db NB |
5962 | } |
5963 | __transport_stop_task_timer(task, &flags); | |
5964 | ||
5965 | if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_ex_left))) { | |
5966 | spin_unlock_irqrestore( | |
5967 | &T_TASK(cmd)->t_state_lock, flags); | |
5968 | ||
5969 | DEBUG_DO("Skipping task: %p, dev: %p for" | |
5970 | " t_task_cdbs_ex_left: %d\n", task, dev, | |
5971 | atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left)); | |
5972 | ||
5973 | spin_lock_irqsave(&dev->execute_task_lock, flags); | |
5974 | continue; | |
5975 | } | |
5976 | ||
5977 | if (atomic_read(&T_TASK(cmd)->t_transport_active)) { | |
5978 | DEBUG_DO("got t_transport_active = 1 for task: %p, dev:" | |
5979 | " %p\n", task, dev); | |
5980 | ||
5981 | if (atomic_read(&T_TASK(cmd)->t_fe_count)) { | |
5982 | spin_unlock_irqrestore( | |
5983 | &T_TASK(cmd)->t_state_lock, flags); | |
5984 | transport_send_check_condition_and_sense( | |
5985 | cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, | |
5986 | 0); | |
5987 | transport_remove_cmd_from_queue(cmd, | |
5988 | SE_DEV(cmd)->dev_queue_obj); | |
5989 | ||
5990 | transport_lun_remove_cmd(cmd); | |
5991 | transport_cmd_check_stop(cmd, 1, 0); | |
5992 | } else { | |
5993 | spin_unlock_irqrestore( | |
5994 | &T_TASK(cmd)->t_state_lock, flags); | |
5995 | ||
5996 | transport_remove_cmd_from_queue(cmd, | |
5997 | SE_DEV(cmd)->dev_queue_obj); | |
5998 | ||
5999 | transport_lun_remove_cmd(cmd); | |
6000 | ||
6001 | if (transport_cmd_check_stop(cmd, 1, 0)) | |
6002 | transport_generic_remove(cmd, 0, 0); | |
6003 | } | |
6004 | ||
6005 | spin_lock_irqsave(&dev->execute_task_lock, flags); | |
6006 | continue; | |
6007 | } | |
6008 | DEBUG_DO("Got t_transport_active = 0 for task: %p, dev: %p\n", | |
6009 | task, dev); | |
6010 | ||
6011 | if (atomic_read(&T_TASK(cmd)->t_fe_count)) { | |
6012 | spin_unlock_irqrestore( | |
6013 | &T_TASK(cmd)->t_state_lock, flags); | |
6014 | transport_send_check_condition_and_sense(cmd, | |
6015 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); | |
6016 | transport_remove_cmd_from_queue(cmd, | |
6017 | SE_DEV(cmd)->dev_queue_obj); | |
6018 | ||
6019 | transport_lun_remove_cmd(cmd); | |
6020 | transport_cmd_check_stop(cmd, 1, 0); | |
6021 | } else { | |
6022 | spin_unlock_irqrestore( | |
6023 | &T_TASK(cmd)->t_state_lock, flags); | |
6024 | ||
6025 | transport_remove_cmd_from_queue(cmd, | |
6026 | SE_DEV(cmd)->dev_queue_obj); | |
6027 | transport_lun_remove_cmd(cmd); | |
6028 | ||
6029 | if (transport_cmd_check_stop(cmd, 1, 0)) | |
6030 | transport_generic_remove(cmd, 0, 0); | |
6031 | } | |
6032 | ||
6033 | spin_lock_irqsave(&dev->execute_task_lock, flags); | |
6034 | } | |
6035 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); | |
6036 | /* | |
6037 | * Empty the struct se_device's struct se_cmd list. | |
6038 | */ | |
6039 | spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags); | |
6040 | while ((qr = __transport_get_qr_from_queue(dev->dev_queue_obj))) { | |
6041 | spin_unlock_irqrestore( | |
6042 | &dev->dev_queue_obj->cmd_queue_lock, flags); | |
6043 | cmd = (struct se_cmd *)qr->cmd; | |
6044 | state = qr->state; | |
6045 | kfree(qr); | |
6046 | ||
6047 | DEBUG_DO("From Device Queue: cmd: %p t_state: %d\n", | |
6048 | cmd, state); | |
6049 | ||
6050 | if (atomic_read(&T_TASK(cmd)->t_fe_count)) { | |
6051 | transport_send_check_condition_and_sense(cmd, | |
6052 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); | |
6053 | ||
6054 | transport_lun_remove_cmd(cmd); | |
6055 | transport_cmd_check_stop(cmd, 1, 0); | |
6056 | } else { | |
6057 | transport_lun_remove_cmd(cmd); | |
6058 | if (transport_cmd_check_stop(cmd, 1, 0)) | |
6059 | transport_generic_remove(cmd, 0, 0); | |
6060 | } | |
6061 | spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags); | |
6062 | } | |
6063 | spin_unlock_irqrestore(&dev->dev_queue_obj->cmd_queue_lock, flags); | |
6064 | } | |
6065 | ||
6066 | /* transport_processing_thread(): | |
6067 | * | |
6068 | * | |
6069 | */ | |
6070 | static int transport_processing_thread(void *param) | |
6071 | { | |
6072 | int ret, t_state; | |
6073 | struct se_cmd *cmd; | |
6074 | struct se_device *dev = (struct se_device *) param; | |
6075 | struct se_queue_req *qr; | |
6076 | ||
6077 | set_user_nice(current, -20); | |
6078 | ||
6079 | while (!kthread_should_stop()) { | |
6080 | ret = wait_event_interruptible(dev->dev_queue_obj->thread_wq, | |
6081 | atomic_read(&dev->dev_queue_obj->queue_cnt) || | |
6082 | kthread_should_stop()); | |
6083 | if (ret < 0) | |
6084 | goto out; | |
6085 | ||
6086 | spin_lock_irq(&dev->dev_status_lock); | |
6087 | if (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN) { | |
6088 | spin_unlock_irq(&dev->dev_status_lock); | |
6089 | transport_processing_shutdown(dev); | |
6090 | continue; | |
6091 | } | |
6092 | spin_unlock_irq(&dev->dev_status_lock); | |
6093 | ||
6094 | get_cmd: | |
6095 | __transport_execute_tasks(dev); | |
6096 | ||
6097 | qr = transport_get_qr_from_queue(dev->dev_queue_obj); | |
6098 | if (!(qr)) | |
6099 | continue; | |
6100 | ||
6101 | cmd = (struct se_cmd *)qr->cmd; | |
6102 | t_state = qr->state; | |
6103 | kfree(qr); | |
6104 | ||
6105 | switch (t_state) { | |
6106 | case TRANSPORT_NEW_CMD_MAP: | |
6107 | if (!(CMD_TFO(cmd)->new_cmd_map)) { | |
6108 | printk(KERN_ERR "CMD_TFO(cmd)->new_cmd_map is" | |
6109 | " NULL for TRANSPORT_NEW_CMD_MAP\n"); | |
6110 | BUG(); | |
6111 | } | |
6112 | ret = CMD_TFO(cmd)->new_cmd_map(cmd); | |
6113 | if (ret < 0) { | |
6114 | cmd->transport_error_status = ret; | |
6115 | transport_generic_request_failure(cmd, NULL, | |
6116 | 0, (cmd->data_direction != | |
6117 | DMA_TO_DEVICE)); | |
6118 | break; | |
6119 | } | |
6120 | /* Fall through */ | |
6121 | case TRANSPORT_NEW_CMD: | |
6122 | ret = transport_generic_new_cmd(cmd); | |
6123 | if (ret < 0) { | |
6124 | cmd->transport_error_status = ret; | |
6125 | transport_generic_request_failure(cmd, NULL, | |
6126 | 0, (cmd->data_direction != | |
6127 | DMA_TO_DEVICE)); | |
6128 | } | |
6129 | break; | |
6130 | case TRANSPORT_PROCESS_WRITE: | |
6131 | transport_generic_process_write(cmd); | |
6132 | break; | |
6133 | case TRANSPORT_COMPLETE_OK: | |
6134 | transport_stop_all_task_timers(cmd); | |
6135 | transport_generic_complete_ok(cmd); | |
6136 | break; | |
6137 | case TRANSPORT_REMOVE: | |
6138 | transport_generic_remove(cmd, 1, 0); | |
6139 | break; | |
6140 | case TRANSPORT_PROCESS_TMR: | |
6141 | transport_generic_do_tmr(cmd); | |
6142 | break; | |
6143 | case TRANSPORT_COMPLETE_FAILURE: | |
6144 | transport_generic_request_failure(cmd, NULL, 1, 1); | |
6145 | break; | |
6146 | case TRANSPORT_COMPLETE_TIMEOUT: | |
6147 | transport_stop_all_task_timers(cmd); | |
6148 | transport_generic_request_timeout(cmd); | |
6149 | break; | |
6150 | default: | |
6151 | printk(KERN_ERR "Unknown t_state: %d deferred_t_state:" | |
6152 | " %d for ITT: 0x%08x i_state: %d on SE LUN:" | |
6153 | " %u\n", t_state, cmd->deferred_t_state, | |
6154 | CMD_TFO(cmd)->get_task_tag(cmd), | |
6155 | CMD_TFO(cmd)->get_cmd_state(cmd), | |
6156 | SE_LUN(cmd)->unpacked_lun); | |
6157 | BUG(); | |
6158 | } | |
6159 | ||
6160 | goto get_cmd; | |
6161 | } | |
6162 | ||
6163 | out: | |
6164 | transport_release_all_cmds(dev); | |
6165 | dev->process_thread = NULL; | |
6166 | return 0; | |
6167 | } |