Merge tag 'mmc-v4.6' of git://git.linaro.org/people/ulf.hansson/mmc
[deliverable/linux.git] / drivers / target / sbp / sbp_target.c
1 /*
2 * SBP2 target driver (SCSI over IEEE1394 in target mode)
3 *
4 * Copyright (C) 2011 Chris Boot <bootc@bootc.net>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21 #define KMSG_COMPONENT "sbp_target"
22 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
23
24 #include <linux/kernel.h>
25 #include <linux/module.h>
26 #include <linux/init.h>
27 #include <linux/types.h>
28 #include <linux/string.h>
29 #include <linux/configfs.h>
30 #include <linux/ctype.h>
31 #include <linux/firewire.h>
32 #include <linux/firewire-constants.h>
33 #include <scsi/scsi_proto.h>
34 #include <scsi/scsi_tcq.h>
35 #include <target/target_core_base.h>
36 #include <target/target_core_backend.h>
37 #include <target/target_core_fabric.h>
38 #include <asm/unaligned.h>
39
40 #include "sbp_target.h"
41
42 /* FireWire address region for management and command block address handlers */
43 static const struct fw_address_region sbp_register_region = {
44 .start = CSR_REGISTER_BASE + 0x10000,
45 .end = 0x1000000000000ULL,
46 };
47
48 static const u32 sbp_unit_directory_template[] = {
49 0x1200609e, /* unit_specifier_id: NCITS/T10 */
50 0x13010483, /* unit_sw_version: 1155D Rev 4 */
51 0x3800609e, /* command_set_specifier_id: NCITS/T10 */
52 0x390104d8, /* command_set: SPC-2 */
53 0x3b000000, /* command_set_revision: 0 */
54 0x3c000001, /* firmware_revision: 1 */
55 };
56
57 #define SESSION_MAINTENANCE_INTERVAL HZ
58
59 static atomic_t login_id = ATOMIC_INIT(0);
60
61 static void session_maintenance_work(struct work_struct *);
62 static int sbp_run_transaction(struct fw_card *, int, int, int, int,
63 unsigned long long, void *, size_t);
64
65 static int read_peer_guid(u64 *guid, const struct sbp_management_request *req)
66 {
67 int ret;
68 __be32 high, low;
69
70 ret = sbp_run_transaction(req->card, TCODE_READ_QUADLET_REQUEST,
71 req->node_addr, req->generation, req->speed,
72 (CSR_REGISTER_BASE | CSR_CONFIG_ROM) + 3 * 4,
73 &high, sizeof(high));
74 if (ret != RCODE_COMPLETE)
75 return ret;
76
77 ret = sbp_run_transaction(req->card, TCODE_READ_QUADLET_REQUEST,
78 req->node_addr, req->generation, req->speed,
79 (CSR_REGISTER_BASE | CSR_CONFIG_ROM) + 4 * 4,
80 &low, sizeof(low));
81 if (ret != RCODE_COMPLETE)
82 return ret;
83
84 *guid = (u64)be32_to_cpu(high) << 32 | be32_to_cpu(low);
85
86 return RCODE_COMPLETE;
87 }
88
89 static struct sbp_session *sbp_session_find_by_guid(
90 struct sbp_tpg *tpg, u64 guid)
91 {
92 struct se_session *se_sess;
93 struct sbp_session *sess, *found = NULL;
94
95 spin_lock_bh(&tpg->se_tpg.session_lock);
96 list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) {
97 sess = se_sess->fabric_sess_ptr;
98 if (sess->guid == guid)
99 found = sess;
100 }
101 spin_unlock_bh(&tpg->se_tpg.session_lock);
102
103 return found;
104 }
105
106 static struct sbp_login_descriptor *sbp_login_find_by_lun(
107 struct sbp_session *session, u32 unpacked_lun)
108 {
109 struct sbp_login_descriptor *login, *found = NULL;
110
111 spin_lock_bh(&session->lock);
112 list_for_each_entry(login, &session->login_list, link) {
113 if (login->login_lun == unpacked_lun)
114 found = login;
115 }
116 spin_unlock_bh(&session->lock);
117
118 return found;
119 }
120
121 static int sbp_login_count_all_by_lun(
122 struct sbp_tpg *tpg,
123 u32 unpacked_lun,
124 int exclusive)
125 {
126 struct se_session *se_sess;
127 struct sbp_session *sess;
128 struct sbp_login_descriptor *login;
129 int count = 0;
130
131 spin_lock_bh(&tpg->se_tpg.session_lock);
132 list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) {
133 sess = se_sess->fabric_sess_ptr;
134
135 spin_lock_bh(&sess->lock);
136 list_for_each_entry(login, &sess->login_list, link) {
137 if (login->login_lun != unpacked_lun)
138 continue;
139
140 if (!exclusive || login->exclusive)
141 count++;
142 }
143 spin_unlock_bh(&sess->lock);
144 }
145 spin_unlock_bh(&tpg->se_tpg.session_lock);
146
147 return count;
148 }
149
150 static struct sbp_login_descriptor *sbp_login_find_by_id(
151 struct sbp_tpg *tpg, int login_id)
152 {
153 struct se_session *se_sess;
154 struct sbp_session *sess;
155 struct sbp_login_descriptor *login, *found = NULL;
156
157 spin_lock_bh(&tpg->se_tpg.session_lock);
158 list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) {
159 sess = se_sess->fabric_sess_ptr;
160
161 spin_lock_bh(&sess->lock);
162 list_for_each_entry(login, &sess->login_list, link) {
163 if (login->login_id == login_id)
164 found = login;
165 }
166 spin_unlock_bh(&sess->lock);
167 }
168 spin_unlock_bh(&tpg->se_tpg.session_lock);
169
170 return found;
171 }
172
173 static u32 sbp_get_lun_from_tpg(struct sbp_tpg *tpg, u32 login_lun, int *err)
174 {
175 struct se_portal_group *se_tpg = &tpg->se_tpg;
176 struct se_lun *se_lun;
177
178 rcu_read_lock();
179 hlist_for_each_entry_rcu(se_lun, &se_tpg->tpg_lun_hlist, link) {
180 if (se_lun->unpacked_lun == login_lun) {
181 rcu_read_unlock();
182 *err = 0;
183 return login_lun;
184 }
185 }
186 rcu_read_unlock();
187
188 *err = -ENODEV;
189 return login_lun;
190 }
191
192 static struct sbp_session *sbp_session_create(
193 struct sbp_tpg *tpg,
194 u64 guid)
195 {
196 struct sbp_session *sess;
197 int ret;
198 char guid_str[17];
199 struct se_node_acl *se_nacl;
200
201 sess = kmalloc(sizeof(*sess), GFP_KERNEL);
202 if (!sess) {
203 pr_err("failed to allocate session descriptor\n");
204 return ERR_PTR(-ENOMEM);
205 }
206
207 sess->se_sess = transport_init_session(TARGET_PROT_NORMAL);
208 if (IS_ERR(sess->se_sess)) {
209 pr_err("failed to init se_session\n");
210
211 ret = PTR_ERR(sess->se_sess);
212 kfree(sess);
213 return ERR_PTR(ret);
214 }
215
216 snprintf(guid_str, sizeof(guid_str), "%016llx", guid);
217
218 se_nacl = core_tpg_check_initiator_node_acl(&tpg->se_tpg, guid_str);
219 if (!se_nacl) {
220 pr_warn("Node ACL not found for %s\n", guid_str);
221
222 transport_free_session(sess->se_sess);
223 kfree(sess);
224
225 return ERR_PTR(-EPERM);
226 }
227
228 sess->se_sess->se_node_acl = se_nacl;
229
230 spin_lock_init(&sess->lock);
231 INIT_LIST_HEAD(&sess->login_list);
232 INIT_DELAYED_WORK(&sess->maint_work, session_maintenance_work);
233
234 sess->guid = guid;
235
236 transport_register_session(&tpg->se_tpg, se_nacl, sess->se_sess, sess);
237
238 return sess;
239 }
240
241 static void sbp_session_release(struct sbp_session *sess, bool cancel_work)
242 {
243 spin_lock_bh(&sess->lock);
244 if (!list_empty(&sess->login_list)) {
245 spin_unlock_bh(&sess->lock);
246 return;
247 }
248 spin_unlock_bh(&sess->lock);
249
250 if (cancel_work)
251 cancel_delayed_work_sync(&sess->maint_work);
252
253 transport_deregister_session_configfs(sess->se_sess);
254 transport_deregister_session(sess->se_sess);
255
256 if (sess->card)
257 fw_card_put(sess->card);
258
259 kfree(sess);
260 }
261
262 static void sbp_target_agent_unregister(struct sbp_target_agent *);
263
264 static void sbp_login_release(struct sbp_login_descriptor *login,
265 bool cancel_work)
266 {
267 struct sbp_session *sess = login->sess;
268
269 /* FIXME: abort/wait on tasks */
270
271 sbp_target_agent_unregister(login->tgt_agt);
272
273 if (sess) {
274 spin_lock_bh(&sess->lock);
275 list_del(&login->link);
276 spin_unlock_bh(&sess->lock);
277
278 sbp_session_release(sess, cancel_work);
279 }
280
281 kfree(login);
282 }
283
284 static struct sbp_target_agent *sbp_target_agent_register(
285 struct sbp_login_descriptor *);
286
287 static void sbp_management_request_login(
288 struct sbp_management_agent *agent, struct sbp_management_request *req,
289 int *status_data_size)
290 {
291 struct sbp_tport *tport = agent->tport;
292 struct sbp_tpg *tpg = tport->tpg;
293 struct sbp_session *sess;
294 struct sbp_login_descriptor *login;
295 struct sbp_login_response_block *response;
296 u64 guid;
297 u32 unpacked_lun;
298 int login_response_len, ret;
299
300 unpacked_lun = sbp_get_lun_from_tpg(tpg,
301 LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc)), &ret);
302 if (ret) {
303 pr_notice("login to unknown LUN: %d\n",
304 LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc)));
305
306 req->status.status = cpu_to_be32(
307 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
308 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_LUN_NOTSUPP));
309 return;
310 }
311
312 ret = read_peer_guid(&guid, req);
313 if (ret != RCODE_COMPLETE) {
314 pr_warn("failed to read peer GUID: %d\n", ret);
315
316 req->status.status = cpu_to_be32(
317 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
318 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
319 return;
320 }
321
322 pr_notice("mgt_agent LOGIN to LUN %d from %016llx\n",
323 unpacked_lun, guid);
324
325 sess = sbp_session_find_by_guid(tpg, guid);
326 if (sess) {
327 login = sbp_login_find_by_lun(sess, unpacked_lun);
328 if (login) {
329 pr_notice("initiator already logged-in\n");
330
331 /*
332 * SBP-2 R4 says we should return access denied, but
333 * that can confuse initiators. Instead we need to
334 * treat this like a reconnect, but send the login
335 * response block like a fresh login.
336 *
337 * This is required particularly in the case of Apple
338 * devices booting off the FireWire target, where
339 * the firmware has an active login to the target. When
340 * the OS takes control of the session it issues its own
341 * LOGIN rather than a RECONNECT. To avoid the machine
342 * waiting until the reconnect_hold expires, we can skip
343 * the ACCESS_DENIED errors to speed things up.
344 */
345
346 goto already_logged_in;
347 }
348 }
349
350 /*
351 * check exclusive bit in login request
352 * reject with access_denied if any logins present
353 */
354 if (LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc)) &&
355 sbp_login_count_all_by_lun(tpg, unpacked_lun, 0)) {
356 pr_warn("refusing exclusive login with other active logins\n");
357
358 req->status.status = cpu_to_be32(
359 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
360 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
361 return;
362 }
363
364 /*
365 * check exclusive bit in any existing login descriptor
366 * reject with access_denied if any exclusive logins present
367 */
368 if (sbp_login_count_all_by_lun(tpg, unpacked_lun, 1)) {
369 pr_warn("refusing login while another exclusive login present\n");
370
371 req->status.status = cpu_to_be32(
372 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
373 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
374 return;
375 }
376
377 /*
378 * check we haven't exceeded the number of allowed logins
379 * reject with resources_unavailable if we have
380 */
381 if (sbp_login_count_all_by_lun(tpg, unpacked_lun, 0) >=
382 tport->max_logins_per_lun) {
383 pr_warn("max number of logins reached\n");
384
385 req->status.status = cpu_to_be32(
386 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
387 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
388 return;
389 }
390
391 if (!sess) {
392 sess = sbp_session_create(tpg, guid);
393 if (IS_ERR(sess)) {
394 switch (PTR_ERR(sess)) {
395 case -EPERM:
396 ret = SBP_STATUS_ACCESS_DENIED;
397 break;
398 default:
399 ret = SBP_STATUS_RESOURCES_UNAVAIL;
400 break;
401 }
402
403 req->status.status = cpu_to_be32(
404 STATUS_BLOCK_RESP(
405 STATUS_RESP_REQUEST_COMPLETE) |
406 STATUS_BLOCK_SBP_STATUS(ret));
407 return;
408 }
409
410 sess->node_id = req->node_addr;
411 sess->card = fw_card_get(req->card);
412 sess->generation = req->generation;
413 sess->speed = req->speed;
414
415 schedule_delayed_work(&sess->maint_work,
416 SESSION_MAINTENANCE_INTERVAL);
417 }
418
419 /* only take the latest reconnect_hold into account */
420 sess->reconnect_hold = min(
421 1 << LOGIN_ORB_RECONNECT(be32_to_cpu(req->orb.misc)),
422 tport->max_reconnect_timeout) - 1;
423
424 login = kmalloc(sizeof(*login), GFP_KERNEL);
425 if (!login) {
426 pr_err("failed to allocate login descriptor\n");
427
428 sbp_session_release(sess, true);
429
430 req->status.status = cpu_to_be32(
431 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
432 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
433 return;
434 }
435
436 login->sess = sess;
437 login->login_lun = unpacked_lun;
438 login->status_fifo_addr = sbp2_pointer_to_addr(&req->orb.status_fifo);
439 login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc));
440 login->login_id = atomic_inc_return(&login_id);
441
442 login->tgt_agt = sbp_target_agent_register(login);
443 if (IS_ERR(login->tgt_agt)) {
444 ret = PTR_ERR(login->tgt_agt);
445 pr_err("failed to map command block handler: %d\n", ret);
446
447 sbp_session_release(sess, true);
448 kfree(login);
449
450 req->status.status = cpu_to_be32(
451 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
452 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
453 return;
454 }
455
456 spin_lock_bh(&sess->lock);
457 list_add_tail(&login->link, &sess->login_list);
458 spin_unlock_bh(&sess->lock);
459
460 already_logged_in:
461 response = kzalloc(sizeof(*response), GFP_KERNEL);
462 if (!response) {
463 pr_err("failed to allocate login response block\n");
464
465 sbp_login_release(login, true);
466
467 req->status.status = cpu_to_be32(
468 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
469 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
470 return;
471 }
472
473 login_response_len = clamp_val(
474 LOGIN_ORB_RESPONSE_LENGTH(be32_to_cpu(req->orb.length)),
475 12, sizeof(*response));
476 response->misc = cpu_to_be32(
477 ((login_response_len & 0xffff) << 16) |
478 (login->login_id & 0xffff));
479 response->reconnect_hold = cpu_to_be32(sess->reconnect_hold & 0xffff);
480 addr_to_sbp2_pointer(login->tgt_agt->handler.offset,
481 &response->command_block_agent);
482
483 ret = sbp_run_transaction(sess->card, TCODE_WRITE_BLOCK_REQUEST,
484 sess->node_id, sess->generation, sess->speed,
485 sbp2_pointer_to_addr(&req->orb.ptr2), response,
486 login_response_len);
487 if (ret != RCODE_COMPLETE) {
488 pr_debug("failed to write login response block: %x\n", ret);
489
490 kfree(response);
491 sbp_login_release(login, true);
492
493 req->status.status = cpu_to_be32(
494 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
495 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
496 return;
497 }
498
499 kfree(response);
500
501 req->status.status = cpu_to_be32(
502 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
503 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
504 }
505
506 static void sbp_management_request_query_logins(
507 struct sbp_management_agent *agent, struct sbp_management_request *req,
508 int *status_data_size)
509 {
510 pr_notice("QUERY LOGINS not implemented\n");
511 /* FIXME: implement */
512
513 req->status.status = cpu_to_be32(
514 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
515 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
516 }
517
518 static void sbp_management_request_reconnect(
519 struct sbp_management_agent *agent, struct sbp_management_request *req,
520 int *status_data_size)
521 {
522 struct sbp_tport *tport = agent->tport;
523 struct sbp_tpg *tpg = tport->tpg;
524 int ret;
525 u64 guid;
526 struct sbp_login_descriptor *login;
527
528 ret = read_peer_guid(&guid, req);
529 if (ret != RCODE_COMPLETE) {
530 pr_warn("failed to read peer GUID: %d\n", ret);
531
532 req->status.status = cpu_to_be32(
533 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
534 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
535 return;
536 }
537
538 pr_notice("mgt_agent RECONNECT from %016llx\n", guid);
539
540 login = sbp_login_find_by_id(tpg,
541 RECONNECT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc)));
542
543 if (!login) {
544 pr_err("mgt_agent RECONNECT unknown login ID\n");
545
546 req->status.status = cpu_to_be32(
547 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
548 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
549 return;
550 }
551
552 if (login->sess->guid != guid) {
553 pr_err("mgt_agent RECONNECT login GUID doesn't match\n");
554
555 req->status.status = cpu_to_be32(
556 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
557 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
558 return;
559 }
560
561 spin_lock_bh(&login->sess->lock);
562 if (login->sess->card)
563 fw_card_put(login->sess->card);
564
565 /* update the node details */
566 login->sess->generation = req->generation;
567 login->sess->node_id = req->node_addr;
568 login->sess->card = fw_card_get(req->card);
569 login->sess->speed = req->speed;
570 spin_unlock_bh(&login->sess->lock);
571
572 req->status.status = cpu_to_be32(
573 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
574 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
575 }
576
577 static void sbp_management_request_logout(
578 struct sbp_management_agent *agent, struct sbp_management_request *req,
579 int *status_data_size)
580 {
581 struct sbp_tport *tport = agent->tport;
582 struct sbp_tpg *tpg = tport->tpg;
583 int id;
584 struct sbp_login_descriptor *login;
585
586 id = LOGOUT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc));
587
588 login = sbp_login_find_by_id(tpg, id);
589 if (!login) {
590 pr_warn("cannot find login: %d\n", id);
591
592 req->status.status = cpu_to_be32(
593 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
594 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_LOGIN_ID_UNKNOWN));
595 return;
596 }
597
598 pr_info("mgt_agent LOGOUT from LUN %d session %d\n",
599 login->login_lun, login->login_id);
600
601 if (req->node_addr != login->sess->node_id) {
602 pr_warn("logout from different node ID\n");
603
604 req->status.status = cpu_to_be32(
605 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
606 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
607 return;
608 }
609
610 sbp_login_release(login, true);
611
612 req->status.status = cpu_to_be32(
613 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
614 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
615 }
616
617 static void session_check_for_reset(struct sbp_session *sess)
618 {
619 bool card_valid = false;
620
621 spin_lock_bh(&sess->lock);
622
623 if (sess->card) {
624 spin_lock_irq(&sess->card->lock);
625 card_valid = (sess->card->local_node != NULL);
626 spin_unlock_irq(&sess->card->lock);
627
628 if (!card_valid) {
629 fw_card_put(sess->card);
630 sess->card = NULL;
631 }
632 }
633
634 if (!card_valid || (sess->generation != sess->card->generation)) {
635 pr_info("Waiting for reconnect from node: %016llx\n",
636 sess->guid);
637
638 sess->node_id = -1;
639 sess->reconnect_expires = get_jiffies_64() +
640 ((sess->reconnect_hold + 1) * HZ);
641 }
642
643 spin_unlock_bh(&sess->lock);
644 }
645
646 static void session_reconnect_expired(struct sbp_session *sess)
647 {
648 struct sbp_login_descriptor *login, *temp;
649 LIST_HEAD(login_list);
650
651 pr_info("Reconnect timer expired for node: %016llx\n", sess->guid);
652
653 spin_lock_bh(&sess->lock);
654 list_for_each_entry_safe(login, temp, &sess->login_list, link) {
655 login->sess = NULL;
656 list_move_tail(&login->link, &login_list);
657 }
658 spin_unlock_bh(&sess->lock);
659
660 list_for_each_entry_safe(login, temp, &login_list, link) {
661 list_del(&login->link);
662 sbp_login_release(login, false);
663 }
664
665 sbp_session_release(sess, false);
666 }
667
668 static void session_maintenance_work(struct work_struct *work)
669 {
670 struct sbp_session *sess = container_of(work, struct sbp_session,
671 maint_work.work);
672
673 /* could be called while tearing down the session */
674 spin_lock_bh(&sess->lock);
675 if (list_empty(&sess->login_list)) {
676 spin_unlock_bh(&sess->lock);
677 return;
678 }
679 spin_unlock_bh(&sess->lock);
680
681 if (sess->node_id != -1) {
682 /* check for bus reset and make node_id invalid */
683 session_check_for_reset(sess);
684
685 schedule_delayed_work(&sess->maint_work,
686 SESSION_MAINTENANCE_INTERVAL);
687 } else if (!time_after64(get_jiffies_64(), sess->reconnect_expires)) {
688 /* still waiting for reconnect */
689 schedule_delayed_work(&sess->maint_work,
690 SESSION_MAINTENANCE_INTERVAL);
691 } else {
692 /* reconnect timeout has expired */
693 session_reconnect_expired(sess);
694 }
695 }
696
697 static int tgt_agent_rw_agent_state(struct fw_card *card, int tcode, void *data,
698 struct sbp_target_agent *agent)
699 {
700 int state;
701
702 switch (tcode) {
703 case TCODE_READ_QUADLET_REQUEST:
704 pr_debug("tgt_agent AGENT_STATE READ\n");
705
706 spin_lock_bh(&agent->lock);
707 state = agent->state;
708 spin_unlock_bh(&agent->lock);
709
710 *(__be32 *)data = cpu_to_be32(state);
711
712 return RCODE_COMPLETE;
713
714 case TCODE_WRITE_QUADLET_REQUEST:
715 /* ignored */
716 return RCODE_COMPLETE;
717
718 default:
719 return RCODE_TYPE_ERROR;
720 }
721 }
722
723 static int tgt_agent_rw_agent_reset(struct fw_card *card, int tcode, void *data,
724 struct sbp_target_agent *agent)
725 {
726 switch (tcode) {
727 case TCODE_WRITE_QUADLET_REQUEST:
728 pr_debug("tgt_agent AGENT_RESET\n");
729 spin_lock_bh(&agent->lock);
730 agent->state = AGENT_STATE_RESET;
731 spin_unlock_bh(&agent->lock);
732 return RCODE_COMPLETE;
733
734 default:
735 return RCODE_TYPE_ERROR;
736 }
737 }
738
739 static int tgt_agent_rw_orb_pointer(struct fw_card *card, int tcode, void *data,
740 struct sbp_target_agent *agent)
741 {
742 struct sbp2_pointer *ptr = data;
743
744 switch (tcode) {
745 case TCODE_WRITE_BLOCK_REQUEST:
746 spin_lock_bh(&agent->lock);
747 if (agent->state != AGENT_STATE_SUSPENDED &&
748 agent->state != AGENT_STATE_RESET) {
749 spin_unlock_bh(&agent->lock);
750 pr_notice("Ignoring ORB_POINTER write while active.\n");
751 return RCODE_CONFLICT_ERROR;
752 }
753 agent->state = AGENT_STATE_ACTIVE;
754 spin_unlock_bh(&agent->lock);
755
756 agent->orb_pointer = sbp2_pointer_to_addr(ptr);
757 agent->doorbell = false;
758
759 pr_debug("tgt_agent ORB_POINTER write: 0x%llx\n",
760 agent->orb_pointer);
761
762 queue_work(system_unbound_wq, &agent->work);
763
764 return RCODE_COMPLETE;
765
766 case TCODE_READ_BLOCK_REQUEST:
767 pr_debug("tgt_agent ORB_POINTER READ\n");
768 spin_lock_bh(&agent->lock);
769 addr_to_sbp2_pointer(agent->orb_pointer, ptr);
770 spin_unlock_bh(&agent->lock);
771 return RCODE_COMPLETE;
772
773 default:
774 return RCODE_TYPE_ERROR;
775 }
776 }
777
778 static int tgt_agent_rw_doorbell(struct fw_card *card, int tcode, void *data,
779 struct sbp_target_agent *agent)
780 {
781 switch (tcode) {
782 case TCODE_WRITE_QUADLET_REQUEST:
783 spin_lock_bh(&agent->lock);
784 if (agent->state != AGENT_STATE_SUSPENDED) {
785 spin_unlock_bh(&agent->lock);
786 pr_debug("Ignoring DOORBELL while active.\n");
787 return RCODE_CONFLICT_ERROR;
788 }
789 agent->state = AGENT_STATE_ACTIVE;
790 spin_unlock_bh(&agent->lock);
791
792 agent->doorbell = true;
793
794 pr_debug("tgt_agent DOORBELL\n");
795
796 queue_work(system_unbound_wq, &agent->work);
797
798 return RCODE_COMPLETE;
799
800 case TCODE_READ_QUADLET_REQUEST:
801 return RCODE_COMPLETE;
802
803 default:
804 return RCODE_TYPE_ERROR;
805 }
806 }
807
808 static int tgt_agent_rw_unsolicited_status_enable(struct fw_card *card,
809 int tcode, void *data, struct sbp_target_agent *agent)
810 {
811 switch (tcode) {
812 case TCODE_WRITE_QUADLET_REQUEST:
813 pr_debug("tgt_agent UNSOLICITED_STATUS_ENABLE\n");
814 /* ignored as we don't send unsolicited status */
815 return RCODE_COMPLETE;
816
817 case TCODE_READ_QUADLET_REQUEST:
818 return RCODE_COMPLETE;
819
820 default:
821 return RCODE_TYPE_ERROR;
822 }
823 }
824
825 static void tgt_agent_rw(struct fw_card *card, struct fw_request *request,
826 int tcode, int destination, int source, int generation,
827 unsigned long long offset, void *data, size_t length,
828 void *callback_data)
829 {
830 struct sbp_target_agent *agent = callback_data;
831 struct sbp_session *sess = agent->login->sess;
832 int sess_gen, sess_node, rcode;
833
834 spin_lock_bh(&sess->lock);
835 sess_gen = sess->generation;
836 sess_node = sess->node_id;
837 spin_unlock_bh(&sess->lock);
838
839 if (generation != sess_gen) {
840 pr_notice("ignoring request with wrong generation\n");
841 rcode = RCODE_TYPE_ERROR;
842 goto out;
843 }
844
845 if (source != sess_node) {
846 pr_notice("ignoring request from foreign node (%x != %x)\n",
847 source, sess_node);
848 rcode = RCODE_TYPE_ERROR;
849 goto out;
850 }
851
852 /* turn offset into the offset from the start of the block */
853 offset -= agent->handler.offset;
854
855 if (offset == 0x00 && length == 4) {
856 /* AGENT_STATE */
857 rcode = tgt_agent_rw_agent_state(card, tcode, data, agent);
858 } else if (offset == 0x04 && length == 4) {
859 /* AGENT_RESET */
860 rcode = tgt_agent_rw_agent_reset(card, tcode, data, agent);
861 } else if (offset == 0x08 && length == 8) {
862 /* ORB_POINTER */
863 rcode = tgt_agent_rw_orb_pointer(card, tcode, data, agent);
864 } else if (offset == 0x10 && length == 4) {
865 /* DOORBELL */
866 rcode = tgt_agent_rw_doorbell(card, tcode, data, agent);
867 } else if (offset == 0x14 && length == 4) {
868 /* UNSOLICITED_STATUS_ENABLE */
869 rcode = tgt_agent_rw_unsolicited_status_enable(card, tcode,
870 data, agent);
871 } else {
872 rcode = RCODE_ADDRESS_ERROR;
873 }
874
875 out:
876 fw_send_response(card, request, rcode);
877 }
878
879 static void sbp_handle_command(struct sbp_target_request *);
880 static int sbp_send_status(struct sbp_target_request *);
881 static void sbp_free_request(struct sbp_target_request *);
882
883 static void tgt_agent_process_work(struct work_struct *work)
884 {
885 struct sbp_target_request *req =
886 container_of(work, struct sbp_target_request, work);
887
888 pr_debug("tgt_orb ptr:0x%llx next_ORB:0x%llx data_descriptor:0x%llx misc:0x%x\n",
889 req->orb_pointer,
890 sbp2_pointer_to_addr(&req->orb.next_orb),
891 sbp2_pointer_to_addr(&req->orb.data_descriptor),
892 be32_to_cpu(req->orb.misc));
893
894 if (req->orb_pointer >> 32)
895 pr_debug("ORB with high bits set\n");
896
897 switch (ORB_REQUEST_FORMAT(be32_to_cpu(req->orb.misc))) {
898 case 0:/* Format specified by this standard */
899 sbp_handle_command(req);
900 return;
901 case 1: /* Reserved for future standardization */
902 case 2: /* Vendor-dependent */
903 req->status.status |= cpu_to_be32(
904 STATUS_BLOCK_RESP(
905 STATUS_RESP_REQUEST_COMPLETE) |
906 STATUS_BLOCK_DEAD(0) |
907 STATUS_BLOCK_LEN(1) |
908 STATUS_BLOCK_SBP_STATUS(
909 SBP_STATUS_REQ_TYPE_NOTSUPP));
910 sbp_send_status(req);
911 sbp_free_request(req);
912 return;
913 case 3: /* Dummy ORB */
914 req->status.status |= cpu_to_be32(
915 STATUS_BLOCK_RESP(
916 STATUS_RESP_REQUEST_COMPLETE) |
917 STATUS_BLOCK_DEAD(0) |
918 STATUS_BLOCK_LEN(1) |
919 STATUS_BLOCK_SBP_STATUS(
920 SBP_STATUS_DUMMY_ORB_COMPLETE));
921 sbp_send_status(req);
922 sbp_free_request(req);
923 return;
924 default:
925 BUG();
926 }
927 }
928
929 /* used to double-check we haven't been issued an AGENT_RESET */
930 static inline bool tgt_agent_check_active(struct sbp_target_agent *agent)
931 {
932 bool active;
933
934 spin_lock_bh(&agent->lock);
935 active = (agent->state == AGENT_STATE_ACTIVE);
936 spin_unlock_bh(&agent->lock);
937
938 return active;
939 }
940
941 static void tgt_agent_fetch_work(struct work_struct *work)
942 {
943 struct sbp_target_agent *agent =
944 container_of(work, struct sbp_target_agent, work);
945 struct sbp_session *sess = agent->login->sess;
946 struct sbp_target_request *req;
947 int ret;
948 bool doorbell = agent->doorbell;
949 u64 next_orb = agent->orb_pointer;
950
951 while (next_orb && tgt_agent_check_active(agent)) {
952 req = kzalloc(sizeof(*req), GFP_KERNEL);
953 if (!req) {
954 spin_lock_bh(&agent->lock);
955 agent->state = AGENT_STATE_DEAD;
956 spin_unlock_bh(&agent->lock);
957 return;
958 }
959
960 req->login = agent->login;
961 req->orb_pointer = next_orb;
962
963 req->status.status = cpu_to_be32(STATUS_BLOCK_ORB_OFFSET_HIGH(
964 req->orb_pointer >> 32));
965 req->status.orb_low = cpu_to_be32(
966 req->orb_pointer & 0xfffffffc);
967
968 /* read in the ORB */
969 ret = sbp_run_transaction(sess->card, TCODE_READ_BLOCK_REQUEST,
970 sess->node_id, sess->generation, sess->speed,
971 req->orb_pointer, &req->orb, sizeof(req->orb));
972 if (ret != RCODE_COMPLETE) {
973 pr_debug("tgt_orb fetch failed: %x\n", ret);
974 req->status.status |= cpu_to_be32(
975 STATUS_BLOCK_SRC(
976 STATUS_SRC_ORB_FINISHED) |
977 STATUS_BLOCK_RESP(
978 STATUS_RESP_TRANSPORT_FAILURE) |
979 STATUS_BLOCK_DEAD(1) |
980 STATUS_BLOCK_LEN(1) |
981 STATUS_BLOCK_SBP_STATUS(
982 SBP_STATUS_UNSPECIFIED_ERROR));
983 spin_lock_bh(&agent->lock);
984 agent->state = AGENT_STATE_DEAD;
985 spin_unlock_bh(&agent->lock);
986
987 sbp_send_status(req);
988 sbp_free_request(req);
989 return;
990 }
991
992 /* check the next_ORB field */
993 if (be32_to_cpu(req->orb.next_orb.high) & 0x80000000) {
994 next_orb = 0;
995 req->status.status |= cpu_to_be32(STATUS_BLOCK_SRC(
996 STATUS_SRC_ORB_FINISHED));
997 } else {
998 next_orb = sbp2_pointer_to_addr(&req->orb.next_orb);
999 req->status.status |= cpu_to_be32(STATUS_BLOCK_SRC(
1000 STATUS_SRC_ORB_CONTINUING));
1001 }
1002
1003 if (tgt_agent_check_active(agent) && !doorbell) {
1004 INIT_WORK(&req->work, tgt_agent_process_work);
1005 queue_work(system_unbound_wq, &req->work);
1006 } else {
1007 /* don't process this request, just check next_ORB */
1008 sbp_free_request(req);
1009 }
1010
1011 spin_lock_bh(&agent->lock);
1012 doorbell = agent->doorbell = false;
1013
1014 /* check if we should carry on processing */
1015 if (next_orb)
1016 agent->orb_pointer = next_orb;
1017 else
1018 agent->state = AGENT_STATE_SUSPENDED;
1019
1020 spin_unlock_bh(&agent->lock);
1021 };
1022 }
1023
1024 static struct sbp_target_agent *sbp_target_agent_register(
1025 struct sbp_login_descriptor *login)
1026 {
1027 struct sbp_target_agent *agent;
1028 int ret;
1029
1030 agent = kmalloc(sizeof(*agent), GFP_KERNEL);
1031 if (!agent)
1032 return ERR_PTR(-ENOMEM);
1033
1034 spin_lock_init(&agent->lock);
1035
1036 agent->handler.length = 0x20;
1037 agent->handler.address_callback = tgt_agent_rw;
1038 agent->handler.callback_data = agent;
1039
1040 agent->login = login;
1041 agent->state = AGENT_STATE_RESET;
1042 INIT_WORK(&agent->work, tgt_agent_fetch_work);
1043 agent->orb_pointer = 0;
1044 agent->doorbell = false;
1045
1046 ret = fw_core_add_address_handler(&agent->handler,
1047 &sbp_register_region);
1048 if (ret < 0) {
1049 kfree(agent);
1050 return ERR_PTR(ret);
1051 }
1052
1053 return agent;
1054 }
1055
1056 static void sbp_target_agent_unregister(struct sbp_target_agent *agent)
1057 {
1058 fw_core_remove_address_handler(&agent->handler);
1059 cancel_work_sync(&agent->work);
1060 kfree(agent);
1061 }
1062
1063 /*
1064 * Simple wrapper around fw_run_transaction that retries the transaction several
1065 * times in case of failure, with an exponential backoff.
1066 */
1067 static int sbp_run_transaction(struct fw_card *card, int tcode, int destination_id,
1068 int generation, int speed, unsigned long long offset,
1069 void *payload, size_t length)
1070 {
1071 int attempt, ret, delay;
1072
1073 for (attempt = 1; attempt <= 5; attempt++) {
1074 ret = fw_run_transaction(card, tcode, destination_id,
1075 generation, speed, offset, payload, length);
1076
1077 switch (ret) {
1078 case RCODE_COMPLETE:
1079 case RCODE_TYPE_ERROR:
1080 case RCODE_ADDRESS_ERROR:
1081 case RCODE_GENERATION:
1082 return ret;
1083
1084 default:
1085 delay = 5 * attempt * attempt;
1086 usleep_range(delay, delay * 2);
1087 }
1088 }
1089
1090 return ret;
1091 }
1092
1093 /*
1094 * Wrapper around sbp_run_transaction that gets the card, destination,
1095 * generation and speed out of the request's session.
1096 */
1097 static int sbp_run_request_transaction(struct sbp_target_request *req,
1098 int tcode, unsigned long long offset, void *payload,
1099 size_t length)
1100 {
1101 struct sbp_login_descriptor *login = req->login;
1102 struct sbp_session *sess = login->sess;
1103 struct fw_card *card;
1104 int node_id, generation, speed, ret;
1105
1106 spin_lock_bh(&sess->lock);
1107 card = fw_card_get(sess->card);
1108 node_id = sess->node_id;
1109 generation = sess->generation;
1110 speed = sess->speed;
1111 spin_unlock_bh(&sess->lock);
1112
1113 ret = sbp_run_transaction(card, tcode, node_id, generation, speed,
1114 offset, payload, length);
1115
1116 fw_card_put(card);
1117
1118 return ret;
1119 }
1120
1121 static int sbp_fetch_command(struct sbp_target_request *req)
1122 {
1123 int ret, cmd_len, copy_len;
1124
1125 cmd_len = scsi_command_size(req->orb.command_block);
1126
1127 req->cmd_buf = kmalloc(cmd_len, GFP_KERNEL);
1128 if (!req->cmd_buf)
1129 return -ENOMEM;
1130
1131 memcpy(req->cmd_buf, req->orb.command_block,
1132 min_t(int, cmd_len, sizeof(req->orb.command_block)));
1133
1134 if (cmd_len > sizeof(req->orb.command_block)) {
1135 pr_debug("sbp_fetch_command: filling in long command\n");
1136 copy_len = cmd_len - sizeof(req->orb.command_block);
1137
1138 ret = sbp_run_request_transaction(req,
1139 TCODE_READ_BLOCK_REQUEST,
1140 req->orb_pointer + sizeof(req->orb),
1141 req->cmd_buf + sizeof(req->orb.command_block),
1142 copy_len);
1143 if (ret != RCODE_COMPLETE)
1144 return -EIO;
1145 }
1146
1147 return 0;
1148 }
1149
1150 static int sbp_fetch_page_table(struct sbp_target_request *req)
1151 {
1152 int pg_tbl_sz, ret;
1153 struct sbp_page_table_entry *pg_tbl;
1154
1155 if (!CMDBLK_ORB_PG_TBL_PRESENT(be32_to_cpu(req->orb.misc)))
1156 return 0;
1157
1158 pg_tbl_sz = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc)) *
1159 sizeof(struct sbp_page_table_entry);
1160
1161 pg_tbl = kmalloc(pg_tbl_sz, GFP_KERNEL);
1162 if (!pg_tbl)
1163 return -ENOMEM;
1164
1165 ret = sbp_run_request_transaction(req, TCODE_READ_BLOCK_REQUEST,
1166 sbp2_pointer_to_addr(&req->orb.data_descriptor),
1167 pg_tbl, pg_tbl_sz);
1168 if (ret != RCODE_COMPLETE) {
1169 kfree(pg_tbl);
1170 return -EIO;
1171 }
1172
1173 req->pg_tbl = pg_tbl;
1174 return 0;
1175 }
1176
1177 static void sbp_calc_data_length_direction(struct sbp_target_request *req,
1178 u32 *data_len, enum dma_data_direction *data_dir)
1179 {
1180 int data_size, direction, idx;
1181
1182 data_size = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc));
1183 direction = CMDBLK_ORB_DIRECTION(be32_to_cpu(req->orb.misc));
1184
1185 if (!data_size) {
1186 *data_len = 0;
1187 *data_dir = DMA_NONE;
1188 return;
1189 }
1190
1191 *data_dir = direction ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
1192
1193 if (req->pg_tbl) {
1194 *data_len = 0;
1195 for (idx = 0; idx < data_size; idx++) {
1196 *data_len += be16_to_cpu(
1197 req->pg_tbl[idx].segment_length);
1198 }
1199 } else {
1200 *data_len = data_size;
1201 }
1202 }
1203
1204 static void sbp_handle_command(struct sbp_target_request *req)
1205 {
1206 struct sbp_login_descriptor *login = req->login;
1207 struct sbp_session *sess = login->sess;
1208 int ret, unpacked_lun;
1209 u32 data_length;
1210 enum dma_data_direction data_dir;
1211
1212 ret = sbp_fetch_command(req);
1213 if (ret) {
1214 pr_debug("sbp_handle_command: fetch command failed: %d\n", ret);
1215 goto err;
1216 }
1217
1218 ret = sbp_fetch_page_table(req);
1219 if (ret) {
1220 pr_debug("sbp_handle_command: fetch page table failed: %d\n",
1221 ret);
1222 goto err;
1223 }
1224
1225 unpacked_lun = req->login->login_lun;
1226 sbp_calc_data_length_direction(req, &data_length, &data_dir);
1227
1228 pr_debug("sbp_handle_command ORB:0x%llx unpacked_lun:%d data_len:%d data_dir:%d\n",
1229 req->orb_pointer, unpacked_lun, data_length, data_dir);
1230
1231 /* only used for printk until we do TMRs */
1232 req->se_cmd.tag = req->orb_pointer;
1233 if (target_submit_cmd(&req->se_cmd, sess->se_sess, req->cmd_buf,
1234 req->sense_buf, unpacked_lun, data_length,
1235 TCM_SIMPLE_TAG, data_dir, 0))
1236 goto err;
1237
1238 return;
1239
1240 err:
1241 req->status.status |= cpu_to_be32(
1242 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
1243 STATUS_BLOCK_DEAD(0) |
1244 STATUS_BLOCK_LEN(1) |
1245 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
1246 sbp_send_status(req);
1247 sbp_free_request(req);
1248 }
1249
1250 /*
1251 * DMA_TO_DEVICE = read from initiator (SCSI WRITE)
1252 * DMA_FROM_DEVICE = write to initiator (SCSI READ)
1253 */
1254 static int sbp_rw_data(struct sbp_target_request *req)
1255 {
1256 struct sbp_session *sess = req->login->sess;
1257 int tcode, sg_miter_flags, max_payload, pg_size, speed, node_id,
1258 generation, num_pte, length, tfr_length,
1259 rcode = RCODE_COMPLETE;
1260 struct sbp_page_table_entry *pte;
1261 unsigned long long offset;
1262 struct fw_card *card;
1263 struct sg_mapping_iter iter;
1264
1265 if (req->se_cmd.data_direction == DMA_FROM_DEVICE) {
1266 tcode = TCODE_WRITE_BLOCK_REQUEST;
1267 sg_miter_flags = SG_MITER_FROM_SG;
1268 } else {
1269 tcode = TCODE_READ_BLOCK_REQUEST;
1270 sg_miter_flags = SG_MITER_TO_SG;
1271 }
1272
1273 max_payload = 4 << CMDBLK_ORB_MAX_PAYLOAD(be32_to_cpu(req->orb.misc));
1274 speed = CMDBLK_ORB_SPEED(be32_to_cpu(req->orb.misc));
1275
1276 pg_size = CMDBLK_ORB_PG_SIZE(be32_to_cpu(req->orb.misc));
1277 if (pg_size) {
1278 pr_err("sbp_run_transaction: page size ignored\n");
1279 pg_size = 0x100 << pg_size;
1280 }
1281
1282 spin_lock_bh(&sess->lock);
1283 card = fw_card_get(sess->card);
1284 node_id = sess->node_id;
1285 generation = sess->generation;
1286 spin_unlock_bh(&sess->lock);
1287
1288 if (req->pg_tbl) {
1289 pte = req->pg_tbl;
1290 num_pte = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc));
1291
1292 offset = 0;
1293 length = 0;
1294 } else {
1295 pte = NULL;
1296 num_pte = 0;
1297
1298 offset = sbp2_pointer_to_addr(&req->orb.data_descriptor);
1299 length = req->se_cmd.data_length;
1300 }
1301
1302 sg_miter_start(&iter, req->se_cmd.t_data_sg, req->se_cmd.t_data_nents,
1303 sg_miter_flags);
1304
1305 while (length || num_pte) {
1306 if (!length) {
1307 offset = (u64)be16_to_cpu(pte->segment_base_hi) << 32 |
1308 be32_to_cpu(pte->segment_base_lo);
1309 length = be16_to_cpu(pte->segment_length);
1310
1311 pte++;
1312 num_pte--;
1313 }
1314
1315 sg_miter_next(&iter);
1316
1317 tfr_length = min3(length, max_payload, (int)iter.length);
1318
1319 /* FIXME: take page_size into account */
1320
1321 rcode = sbp_run_transaction(card, tcode, node_id,
1322 generation, speed,
1323 offset, iter.addr, tfr_length);
1324
1325 if (rcode != RCODE_COMPLETE)
1326 break;
1327
1328 length -= tfr_length;
1329 offset += tfr_length;
1330 iter.consumed = tfr_length;
1331 }
1332
1333 sg_miter_stop(&iter);
1334 fw_card_put(card);
1335
1336 if (rcode == RCODE_COMPLETE) {
1337 WARN_ON(length != 0);
1338 return 0;
1339 } else {
1340 return -EIO;
1341 }
1342 }
1343
1344 static int sbp_send_status(struct sbp_target_request *req)
1345 {
1346 int ret, length;
1347 struct sbp_login_descriptor *login = req->login;
1348
1349 length = (((be32_to_cpu(req->status.status) >> 24) & 0x07) + 1) * 4;
1350
1351 ret = sbp_run_request_transaction(req, TCODE_WRITE_BLOCK_REQUEST,
1352 login->status_fifo_addr, &req->status, length);
1353 if (ret != RCODE_COMPLETE) {
1354 pr_debug("sbp_send_status: write failed: 0x%x\n", ret);
1355 return -EIO;
1356 }
1357
1358 pr_debug("sbp_send_status: status write complete for ORB: 0x%llx\n",
1359 req->orb_pointer);
1360
1361 return 0;
1362 }
1363
1364 static void sbp_sense_mangle(struct sbp_target_request *req)
1365 {
1366 struct se_cmd *se_cmd = &req->se_cmd;
1367 u8 *sense = req->sense_buf;
1368 u8 *status = req->status.data;
1369
1370 WARN_ON(se_cmd->scsi_sense_length < 18);
1371
1372 switch (sense[0] & 0x7f) { /* sfmt */
1373 case 0x70: /* current, fixed */
1374 status[0] = 0 << 6;
1375 break;
1376 case 0x71: /* deferred, fixed */
1377 status[0] = 1 << 6;
1378 break;
1379 case 0x72: /* current, descriptor */
1380 case 0x73: /* deferred, descriptor */
1381 default:
1382 /*
1383 * TODO: SBP-3 specifies what we should do with descriptor
1384 * format sense data
1385 */
1386 pr_err("sbp_send_sense: unknown sense format: 0x%x\n",
1387 sense[0]);
1388 req->status.status |= cpu_to_be32(
1389 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1390 STATUS_BLOCK_DEAD(0) |
1391 STATUS_BLOCK_LEN(1) |
1392 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQUEST_ABORTED));
1393 return;
1394 }
1395
1396 status[0] |= se_cmd->scsi_status & 0x3f;/* status */
1397 status[1] =
1398 (sense[0] & 0x80) | /* valid */
1399 ((sense[2] & 0xe0) >> 1) | /* mark, eom, ili */
1400 (sense[2] & 0x0f); /* sense_key */
1401 status[2] = se_cmd->scsi_asc; /* sense_code */
1402 status[3] = se_cmd->scsi_ascq; /* sense_qualifier */
1403
1404 /* information */
1405 status[4] = sense[3];
1406 status[5] = sense[4];
1407 status[6] = sense[5];
1408 status[7] = sense[6];
1409
1410 /* CDB-dependent */
1411 status[8] = sense[8];
1412 status[9] = sense[9];
1413 status[10] = sense[10];
1414 status[11] = sense[11];
1415
1416 /* fru */
1417 status[12] = sense[14];
1418
1419 /* sense_key-dependent */
1420 status[13] = sense[15];
1421 status[14] = sense[16];
1422 status[15] = sense[17];
1423
1424 req->status.status |= cpu_to_be32(
1425 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1426 STATUS_BLOCK_DEAD(0) |
1427 STATUS_BLOCK_LEN(5) |
1428 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
1429 }
1430
1431 static int sbp_send_sense(struct sbp_target_request *req)
1432 {
1433 struct se_cmd *se_cmd = &req->se_cmd;
1434
1435 if (se_cmd->scsi_sense_length) {
1436 sbp_sense_mangle(req);
1437 } else {
1438 req->status.status |= cpu_to_be32(
1439 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1440 STATUS_BLOCK_DEAD(0) |
1441 STATUS_BLOCK_LEN(1) |
1442 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
1443 }
1444
1445 return sbp_send_status(req);
1446 }
1447
1448 static void sbp_free_request(struct sbp_target_request *req)
1449 {
1450 kfree(req->pg_tbl);
1451 kfree(req->cmd_buf);
1452 kfree(req);
1453 }
1454
1455 static void sbp_mgt_agent_process(struct work_struct *work)
1456 {
1457 struct sbp_management_agent *agent =
1458 container_of(work, struct sbp_management_agent, work);
1459 struct sbp_management_request *req = agent->request;
1460 int ret;
1461 int status_data_len = 0;
1462
1463 /* fetch the ORB from the initiator */
1464 ret = sbp_run_transaction(req->card, TCODE_READ_BLOCK_REQUEST,
1465 req->node_addr, req->generation, req->speed,
1466 agent->orb_offset, &req->orb, sizeof(req->orb));
1467 if (ret != RCODE_COMPLETE) {
1468 pr_debug("mgt_orb fetch failed: %x\n", ret);
1469 goto out;
1470 }
1471
1472 pr_debug("mgt_orb ptr1:0x%llx ptr2:0x%llx misc:0x%x len:0x%x status_fifo:0x%llx\n",
1473 sbp2_pointer_to_addr(&req->orb.ptr1),
1474 sbp2_pointer_to_addr(&req->orb.ptr2),
1475 be32_to_cpu(req->orb.misc), be32_to_cpu(req->orb.length),
1476 sbp2_pointer_to_addr(&req->orb.status_fifo));
1477
1478 if (!ORB_NOTIFY(be32_to_cpu(req->orb.misc)) ||
1479 ORB_REQUEST_FORMAT(be32_to_cpu(req->orb.misc)) != 0) {
1480 pr_err("mgt_orb bad request\n");
1481 goto out;
1482 }
1483
1484 switch (MANAGEMENT_ORB_FUNCTION(be32_to_cpu(req->orb.misc))) {
1485 case MANAGEMENT_ORB_FUNCTION_LOGIN:
1486 sbp_management_request_login(agent, req, &status_data_len);
1487 break;
1488
1489 case MANAGEMENT_ORB_FUNCTION_QUERY_LOGINS:
1490 sbp_management_request_query_logins(agent, req,
1491 &status_data_len);
1492 break;
1493
1494 case MANAGEMENT_ORB_FUNCTION_RECONNECT:
1495 sbp_management_request_reconnect(agent, req, &status_data_len);
1496 break;
1497
1498 case MANAGEMENT_ORB_FUNCTION_SET_PASSWORD:
1499 pr_notice("SET PASSWORD not implemented\n");
1500
1501 req->status.status = cpu_to_be32(
1502 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1503 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1504
1505 break;
1506
1507 case MANAGEMENT_ORB_FUNCTION_LOGOUT:
1508 sbp_management_request_logout(agent, req, &status_data_len);
1509 break;
1510
1511 case MANAGEMENT_ORB_FUNCTION_ABORT_TASK:
1512 pr_notice("ABORT TASK not implemented\n");
1513
1514 req->status.status = cpu_to_be32(
1515 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1516 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1517
1518 break;
1519
1520 case MANAGEMENT_ORB_FUNCTION_ABORT_TASK_SET:
1521 pr_notice("ABORT TASK SET not implemented\n");
1522
1523 req->status.status = cpu_to_be32(
1524 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1525 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1526
1527 break;
1528
1529 case MANAGEMENT_ORB_FUNCTION_LOGICAL_UNIT_RESET:
1530 pr_notice("LOGICAL UNIT RESET not implemented\n");
1531
1532 req->status.status = cpu_to_be32(
1533 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1534 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1535
1536 break;
1537
1538 case MANAGEMENT_ORB_FUNCTION_TARGET_RESET:
1539 pr_notice("TARGET RESET not implemented\n");
1540
1541 req->status.status = cpu_to_be32(
1542 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1543 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1544
1545 break;
1546
1547 default:
1548 pr_notice("unknown management function 0x%x\n",
1549 MANAGEMENT_ORB_FUNCTION(be32_to_cpu(req->orb.misc)));
1550
1551 req->status.status = cpu_to_be32(
1552 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1553 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1554
1555 break;
1556 }
1557
1558 req->status.status |= cpu_to_be32(
1559 STATUS_BLOCK_SRC(1) | /* Response to ORB, next_ORB absent */
1560 STATUS_BLOCK_LEN(DIV_ROUND_UP(status_data_len, 4) + 1) |
1561 STATUS_BLOCK_ORB_OFFSET_HIGH(agent->orb_offset >> 32));
1562 req->status.orb_low = cpu_to_be32(agent->orb_offset);
1563
1564 /* write the status block back to the initiator */
1565 ret = sbp_run_transaction(req->card, TCODE_WRITE_BLOCK_REQUEST,
1566 req->node_addr, req->generation, req->speed,
1567 sbp2_pointer_to_addr(&req->orb.status_fifo),
1568 &req->status, 8 + status_data_len);
1569 if (ret != RCODE_COMPLETE) {
1570 pr_debug("mgt_orb status write failed: %x\n", ret);
1571 goto out;
1572 }
1573
1574 out:
1575 fw_card_put(req->card);
1576 kfree(req);
1577
1578 spin_lock_bh(&agent->lock);
1579 agent->state = MANAGEMENT_AGENT_STATE_IDLE;
1580 spin_unlock_bh(&agent->lock);
1581 }
1582
1583 static void sbp_mgt_agent_rw(struct fw_card *card,
1584 struct fw_request *request, int tcode, int destination, int source,
1585 int generation, unsigned long long offset, void *data, size_t length,
1586 void *callback_data)
1587 {
1588 struct sbp_management_agent *agent = callback_data;
1589 struct sbp2_pointer *ptr = data;
1590 int rcode = RCODE_ADDRESS_ERROR;
1591
1592 if (!agent->tport->enable)
1593 goto out;
1594
1595 if ((offset != agent->handler.offset) || (length != 8))
1596 goto out;
1597
1598 if (tcode == TCODE_WRITE_BLOCK_REQUEST) {
1599 struct sbp_management_request *req;
1600 int prev_state;
1601
1602 spin_lock_bh(&agent->lock);
1603 prev_state = agent->state;
1604 agent->state = MANAGEMENT_AGENT_STATE_BUSY;
1605 spin_unlock_bh(&agent->lock);
1606
1607 if (prev_state == MANAGEMENT_AGENT_STATE_BUSY) {
1608 pr_notice("ignoring management request while busy\n");
1609 rcode = RCODE_CONFLICT_ERROR;
1610 goto out;
1611 }
1612
1613 req = kzalloc(sizeof(*req), GFP_ATOMIC);
1614 if (!req) {
1615 rcode = RCODE_CONFLICT_ERROR;
1616 goto out;
1617 }
1618
1619 req->card = fw_card_get(card);
1620 req->generation = generation;
1621 req->node_addr = source;
1622 req->speed = fw_get_request_speed(request);
1623
1624 agent->orb_offset = sbp2_pointer_to_addr(ptr);
1625 agent->request = req;
1626
1627 queue_work(system_unbound_wq, &agent->work);
1628 rcode = RCODE_COMPLETE;
1629 } else if (tcode == TCODE_READ_BLOCK_REQUEST) {
1630 addr_to_sbp2_pointer(agent->orb_offset, ptr);
1631 rcode = RCODE_COMPLETE;
1632 } else {
1633 rcode = RCODE_TYPE_ERROR;
1634 }
1635
1636 out:
1637 fw_send_response(card, request, rcode);
1638 }
1639
1640 static struct sbp_management_agent *sbp_management_agent_register(
1641 struct sbp_tport *tport)
1642 {
1643 int ret;
1644 struct sbp_management_agent *agent;
1645
1646 agent = kmalloc(sizeof(*agent), GFP_KERNEL);
1647 if (!agent)
1648 return ERR_PTR(-ENOMEM);
1649
1650 spin_lock_init(&agent->lock);
1651 agent->tport = tport;
1652 agent->handler.length = 0x08;
1653 agent->handler.address_callback = sbp_mgt_agent_rw;
1654 agent->handler.callback_data = agent;
1655 agent->state = MANAGEMENT_AGENT_STATE_IDLE;
1656 INIT_WORK(&agent->work, sbp_mgt_agent_process);
1657 agent->orb_offset = 0;
1658 agent->request = NULL;
1659
1660 ret = fw_core_add_address_handler(&agent->handler,
1661 &sbp_register_region);
1662 if (ret < 0) {
1663 kfree(agent);
1664 return ERR_PTR(ret);
1665 }
1666
1667 return agent;
1668 }
1669
1670 static void sbp_management_agent_unregister(struct sbp_management_agent *agent)
1671 {
1672 fw_core_remove_address_handler(&agent->handler);
1673 cancel_work_sync(&agent->work);
1674 kfree(agent);
1675 }
1676
1677 static int sbp_check_true(struct se_portal_group *se_tpg)
1678 {
1679 return 1;
1680 }
1681
1682 static int sbp_check_false(struct se_portal_group *se_tpg)
1683 {
1684 return 0;
1685 }
1686
1687 static char *sbp_get_fabric_name(void)
1688 {
1689 return "sbp";
1690 }
1691
1692 static char *sbp_get_fabric_wwn(struct se_portal_group *se_tpg)
1693 {
1694 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
1695 struct sbp_tport *tport = tpg->tport;
1696
1697 return &tport->tport_name[0];
1698 }
1699
1700 static u16 sbp_get_tag(struct se_portal_group *se_tpg)
1701 {
1702 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
1703 return tpg->tport_tpgt;
1704 }
1705
1706 static u32 sbp_tpg_get_inst_index(struct se_portal_group *se_tpg)
1707 {
1708 return 1;
1709 }
1710
1711 static void sbp_release_cmd(struct se_cmd *se_cmd)
1712 {
1713 struct sbp_target_request *req = container_of(se_cmd,
1714 struct sbp_target_request, se_cmd);
1715
1716 sbp_free_request(req);
1717 }
1718
1719 static int sbp_shutdown_session(struct se_session *se_sess)
1720 {
1721 return 0;
1722 }
1723
1724 static void sbp_close_session(struct se_session *se_sess)
1725 {
1726 return;
1727 }
1728
1729 static u32 sbp_sess_get_index(struct se_session *se_sess)
1730 {
1731 return 0;
1732 }
1733
1734 static int sbp_write_pending(struct se_cmd *se_cmd)
1735 {
1736 struct sbp_target_request *req = container_of(se_cmd,
1737 struct sbp_target_request, se_cmd);
1738 int ret;
1739
1740 ret = sbp_rw_data(req);
1741 if (ret) {
1742 req->status.status |= cpu_to_be32(
1743 STATUS_BLOCK_RESP(
1744 STATUS_RESP_TRANSPORT_FAILURE) |
1745 STATUS_BLOCK_DEAD(0) |
1746 STATUS_BLOCK_LEN(1) |
1747 STATUS_BLOCK_SBP_STATUS(
1748 SBP_STATUS_UNSPECIFIED_ERROR));
1749 sbp_send_status(req);
1750 return ret;
1751 }
1752
1753 target_execute_cmd(se_cmd);
1754 return 0;
1755 }
1756
1757 static int sbp_write_pending_status(struct se_cmd *se_cmd)
1758 {
1759 return 0;
1760 }
1761
1762 static void sbp_set_default_node_attrs(struct se_node_acl *nacl)
1763 {
1764 return;
1765 }
1766
1767 static int sbp_get_cmd_state(struct se_cmd *se_cmd)
1768 {
1769 return 0;
1770 }
1771
1772 static int sbp_queue_data_in(struct se_cmd *se_cmd)
1773 {
1774 struct sbp_target_request *req = container_of(se_cmd,
1775 struct sbp_target_request, se_cmd);
1776 int ret;
1777
1778 ret = sbp_rw_data(req);
1779 if (ret) {
1780 req->status.status |= cpu_to_be32(
1781 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
1782 STATUS_BLOCK_DEAD(0) |
1783 STATUS_BLOCK_LEN(1) |
1784 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
1785 sbp_send_status(req);
1786 return ret;
1787 }
1788
1789 return sbp_send_sense(req);
1790 }
1791
1792 /*
1793 * Called after command (no data transfer) or after the write (to device)
1794 * operation is completed
1795 */
1796 static int sbp_queue_status(struct se_cmd *se_cmd)
1797 {
1798 struct sbp_target_request *req = container_of(se_cmd,
1799 struct sbp_target_request, se_cmd);
1800
1801 return sbp_send_sense(req);
1802 }
1803
1804 static void sbp_queue_tm_rsp(struct se_cmd *se_cmd)
1805 {
1806 }
1807
1808 static void sbp_aborted_task(struct se_cmd *se_cmd)
1809 {
1810 return;
1811 }
1812
1813 static int sbp_check_stop_free(struct se_cmd *se_cmd)
1814 {
1815 struct sbp_target_request *req = container_of(se_cmd,
1816 struct sbp_target_request, se_cmd);
1817
1818 transport_generic_free_cmd(&req->se_cmd, 0);
1819 return 1;
1820 }
1821
1822 static int sbp_count_se_tpg_luns(struct se_portal_group *tpg)
1823 {
1824 struct se_lun *lun;
1825 int count = 0;
1826
1827 rcu_read_lock();
1828 hlist_for_each_entry_rcu(lun, &tpg->tpg_lun_hlist, link)
1829 count++;
1830 rcu_read_unlock();
1831
1832 return count;
1833 }
1834
1835 static int sbp_update_unit_directory(struct sbp_tport *tport)
1836 {
1837 struct se_lun *lun;
1838 int num_luns, num_entries, idx = 0, mgt_agt_addr, ret;
1839 u32 *data;
1840
1841 if (tport->unit_directory.data) {
1842 fw_core_remove_descriptor(&tport->unit_directory);
1843 kfree(tport->unit_directory.data);
1844 tport->unit_directory.data = NULL;
1845 }
1846
1847 if (!tport->enable || !tport->tpg)
1848 return 0;
1849
1850 num_luns = sbp_count_se_tpg_luns(&tport->tpg->se_tpg);
1851
1852 /*
1853 * Number of entries in the final unit directory:
1854 * - all of those in the template
1855 * - management_agent
1856 * - unit_characteristics
1857 * - reconnect_timeout
1858 * - unit unique ID
1859 * - one for each LUN
1860 *
1861 * MUST NOT include leaf or sub-directory entries
1862 */
1863 num_entries = ARRAY_SIZE(sbp_unit_directory_template) + 4 + num_luns;
1864
1865 if (tport->directory_id != -1)
1866 num_entries++;
1867
1868 /* allocate num_entries + 4 for the header and unique ID leaf */
1869 data = kcalloc((num_entries + 4), sizeof(u32), GFP_KERNEL);
1870 if (!data)
1871 return -ENOMEM;
1872
1873 /* directory_length */
1874 data[idx++] = num_entries << 16;
1875
1876 /* directory_id */
1877 if (tport->directory_id != -1)
1878 data[idx++] = (CSR_DIRECTORY_ID << 24) | tport->directory_id;
1879
1880 /* unit directory template */
1881 memcpy(&data[idx], sbp_unit_directory_template,
1882 sizeof(sbp_unit_directory_template));
1883 idx += ARRAY_SIZE(sbp_unit_directory_template);
1884
1885 /* management_agent */
1886 mgt_agt_addr = (tport->mgt_agt->handler.offset - CSR_REGISTER_BASE) / 4;
1887 data[idx++] = 0x54000000 | (mgt_agt_addr & 0x00ffffff);
1888
1889 /* unit_characteristics */
1890 data[idx++] = 0x3a000000 |
1891 (((tport->mgt_orb_timeout * 2) << 8) & 0xff00) |
1892 SBP_ORB_FETCH_SIZE;
1893
1894 /* reconnect_timeout */
1895 data[idx++] = 0x3d000000 | (tport->max_reconnect_timeout & 0xffff);
1896
1897 /* unit unique ID (leaf is just after LUNs) */
1898 data[idx++] = 0x8d000000 | (num_luns + 1);
1899
1900 rcu_read_lock();
1901 hlist_for_each_entry_rcu(lun, &tport->tpg->se_tpg.tpg_lun_hlist, link) {
1902 struct se_device *dev;
1903 int type;
1904 /*
1905 * rcu_dereference_raw protected by se_lun->lun_group symlink
1906 * reference to se_device->dev_group.
1907 */
1908 dev = rcu_dereference_raw(lun->lun_se_dev);
1909 type = dev->transport->get_device_type(dev);
1910
1911 /* logical_unit_number */
1912 data[idx++] = 0x14000000 |
1913 ((type << 16) & 0x1f0000) |
1914 (lun->unpacked_lun & 0xffff);
1915 }
1916 rcu_read_unlock();
1917
1918 /* unit unique ID leaf */
1919 data[idx++] = 2 << 16;
1920 data[idx++] = tport->guid >> 32;
1921 data[idx++] = tport->guid;
1922
1923 tport->unit_directory.length = idx;
1924 tport->unit_directory.key = (CSR_DIRECTORY | CSR_UNIT) << 24;
1925 tport->unit_directory.data = data;
1926
1927 ret = fw_core_add_descriptor(&tport->unit_directory);
1928 if (ret < 0) {
1929 kfree(tport->unit_directory.data);
1930 tport->unit_directory.data = NULL;
1931 }
1932
1933 return ret;
1934 }
1935
1936 static ssize_t sbp_parse_wwn(const char *name, u64 *wwn)
1937 {
1938 const char *cp;
1939 char c, nibble;
1940 int pos = 0, err;
1941
1942 *wwn = 0;
1943 for (cp = name; cp < &name[SBP_NAMELEN - 1]; cp++) {
1944 c = *cp;
1945 if (c == '\n' && cp[1] == '\0')
1946 continue;
1947 if (c == '\0') {
1948 err = 2;
1949 if (pos != 16)
1950 goto fail;
1951 return cp - name;
1952 }
1953 err = 3;
1954 if (isdigit(c))
1955 nibble = c - '0';
1956 else if (isxdigit(c))
1957 nibble = tolower(c) - 'a' + 10;
1958 else
1959 goto fail;
1960 *wwn = (*wwn << 4) | nibble;
1961 pos++;
1962 }
1963 err = 4;
1964 fail:
1965 printk(KERN_INFO "err %u len %zu pos %u\n",
1966 err, cp - name, pos);
1967 return -1;
1968 }
1969
1970 static ssize_t sbp_format_wwn(char *buf, size_t len, u64 wwn)
1971 {
1972 return snprintf(buf, len, "%016llx", wwn);
1973 }
1974
1975 static int sbp_init_nodeacl(struct se_node_acl *se_nacl, const char *name)
1976 {
1977 u64 guid = 0;
1978
1979 if (sbp_parse_wwn(name, &guid) < 0)
1980 return -EINVAL;
1981 return 0;
1982 }
1983
1984 static int sbp_post_link_lun(
1985 struct se_portal_group *se_tpg,
1986 struct se_lun *se_lun)
1987 {
1988 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
1989
1990 return sbp_update_unit_directory(tpg->tport);
1991 }
1992
1993 static void sbp_pre_unlink_lun(
1994 struct se_portal_group *se_tpg,
1995 struct se_lun *se_lun)
1996 {
1997 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
1998 struct sbp_tport *tport = tpg->tport;
1999 int ret;
2000
2001 if (sbp_count_se_tpg_luns(&tpg->se_tpg) == 0)
2002 tport->enable = 0;
2003
2004 ret = sbp_update_unit_directory(tport);
2005 if (ret < 0)
2006 pr_err("unlink LUN: failed to update unit directory\n");
2007 }
2008
2009 static struct se_portal_group *sbp_make_tpg(
2010 struct se_wwn *wwn,
2011 struct config_group *group,
2012 const char *name)
2013 {
2014 struct sbp_tport *tport =
2015 container_of(wwn, struct sbp_tport, tport_wwn);
2016
2017 struct sbp_tpg *tpg;
2018 unsigned long tpgt;
2019 int ret;
2020
2021 if (strstr(name, "tpgt_") != name)
2022 return ERR_PTR(-EINVAL);
2023 if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)
2024 return ERR_PTR(-EINVAL);
2025
2026 if (tport->tpg) {
2027 pr_err("Only one TPG per Unit is possible.\n");
2028 return ERR_PTR(-EBUSY);
2029 }
2030
2031 tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
2032 if (!tpg) {
2033 pr_err("Unable to allocate struct sbp_tpg\n");
2034 return ERR_PTR(-ENOMEM);
2035 }
2036
2037 tpg->tport = tport;
2038 tpg->tport_tpgt = tpgt;
2039 tport->tpg = tpg;
2040
2041 /* default attribute values */
2042 tport->enable = 0;
2043 tport->directory_id = -1;
2044 tport->mgt_orb_timeout = 15;
2045 tport->max_reconnect_timeout = 5;
2046 tport->max_logins_per_lun = 1;
2047
2048 tport->mgt_agt = sbp_management_agent_register(tport);
2049 if (IS_ERR(tport->mgt_agt)) {
2050 ret = PTR_ERR(tport->mgt_agt);
2051 goto out_free_tpg;
2052 }
2053
2054 ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_SBP);
2055 if (ret < 0)
2056 goto out_unreg_mgt_agt;
2057
2058 return &tpg->se_tpg;
2059
2060 out_unreg_mgt_agt:
2061 sbp_management_agent_unregister(tport->mgt_agt);
2062 out_free_tpg:
2063 tport->tpg = NULL;
2064 kfree(tpg);
2065 return ERR_PTR(ret);
2066 }
2067
2068 static void sbp_drop_tpg(struct se_portal_group *se_tpg)
2069 {
2070 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2071 struct sbp_tport *tport = tpg->tport;
2072
2073 core_tpg_deregister(se_tpg);
2074 sbp_management_agent_unregister(tport->mgt_agt);
2075 tport->tpg = NULL;
2076 kfree(tpg);
2077 }
2078
2079 static struct se_wwn *sbp_make_tport(
2080 struct target_fabric_configfs *tf,
2081 struct config_group *group,
2082 const char *name)
2083 {
2084 struct sbp_tport *tport;
2085 u64 guid = 0;
2086
2087 if (sbp_parse_wwn(name, &guid) < 0)
2088 return ERR_PTR(-EINVAL);
2089
2090 tport = kzalloc(sizeof(*tport), GFP_KERNEL);
2091 if (!tport) {
2092 pr_err("Unable to allocate struct sbp_tport\n");
2093 return ERR_PTR(-ENOMEM);
2094 }
2095
2096 tport->guid = guid;
2097 sbp_format_wwn(tport->tport_name, SBP_NAMELEN, guid);
2098
2099 return &tport->tport_wwn;
2100 }
2101
2102 static void sbp_drop_tport(struct se_wwn *wwn)
2103 {
2104 struct sbp_tport *tport =
2105 container_of(wwn, struct sbp_tport, tport_wwn);
2106
2107 kfree(tport);
2108 }
2109
2110 static ssize_t sbp_wwn_version_show(struct config_item *item, char *page)
2111 {
2112 return sprintf(page, "FireWire SBP fabric module %s\n", SBP_VERSION);
2113 }
2114
2115 CONFIGFS_ATTR_RO(sbp_wwn_, version);
2116
2117 static struct configfs_attribute *sbp_wwn_attrs[] = {
2118 &sbp_wwn_attr_version,
2119 NULL,
2120 };
2121
2122 static ssize_t sbp_tpg_directory_id_show(struct config_item *item, char *page)
2123 {
2124 struct se_portal_group *se_tpg = to_tpg(item);
2125 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2126 struct sbp_tport *tport = tpg->tport;
2127
2128 if (tport->directory_id == -1)
2129 return sprintf(page, "implicit\n");
2130 else
2131 return sprintf(page, "%06x\n", tport->directory_id);
2132 }
2133
2134 static ssize_t sbp_tpg_directory_id_store(struct config_item *item,
2135 const char *page, size_t count)
2136 {
2137 struct se_portal_group *se_tpg = to_tpg(item);
2138 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2139 struct sbp_tport *tport = tpg->tport;
2140 unsigned long val;
2141
2142 if (tport->enable) {
2143 pr_err("Cannot change the directory_id on an active target.\n");
2144 return -EBUSY;
2145 }
2146
2147 if (strstr(page, "implicit") == page) {
2148 tport->directory_id = -1;
2149 } else {
2150 if (kstrtoul(page, 16, &val) < 0)
2151 return -EINVAL;
2152 if (val > 0xffffff)
2153 return -EINVAL;
2154
2155 tport->directory_id = val;
2156 }
2157
2158 return count;
2159 }
2160
2161 static ssize_t sbp_tpg_enable_show(struct config_item *item, char *page)
2162 {
2163 struct se_portal_group *se_tpg = to_tpg(item);
2164 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2165 struct sbp_tport *tport = tpg->tport;
2166 return sprintf(page, "%d\n", tport->enable);
2167 }
2168
2169 static ssize_t sbp_tpg_enable_store(struct config_item *item,
2170 const char *page, size_t count)
2171 {
2172 struct se_portal_group *se_tpg = to_tpg(item);
2173 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2174 struct sbp_tport *tport = tpg->tport;
2175 unsigned long val;
2176 int ret;
2177
2178 if (kstrtoul(page, 0, &val) < 0)
2179 return -EINVAL;
2180 if ((val != 0) && (val != 1))
2181 return -EINVAL;
2182
2183 if (tport->enable == val)
2184 return count;
2185
2186 if (val) {
2187 if (sbp_count_se_tpg_luns(&tpg->se_tpg) == 0) {
2188 pr_err("Cannot enable a target with no LUNs!\n");
2189 return -EINVAL;
2190 }
2191 } else {
2192 /* XXX: force-shutdown sessions instead? */
2193 spin_lock_bh(&se_tpg->session_lock);
2194 if (!list_empty(&se_tpg->tpg_sess_list)) {
2195 spin_unlock_bh(&se_tpg->session_lock);
2196 return -EBUSY;
2197 }
2198 spin_unlock_bh(&se_tpg->session_lock);
2199 }
2200
2201 tport->enable = val;
2202
2203 ret = sbp_update_unit_directory(tport);
2204 if (ret < 0) {
2205 pr_err("Could not update Config ROM\n");
2206 return ret;
2207 }
2208
2209 return count;
2210 }
2211
2212 CONFIGFS_ATTR(sbp_tpg_, directory_id);
2213 CONFIGFS_ATTR(sbp_tpg_, enable);
2214
2215 static struct configfs_attribute *sbp_tpg_base_attrs[] = {
2216 &sbp_tpg_attr_directory_id,
2217 &sbp_tpg_attr_enable,
2218 NULL,
2219 };
2220
2221 static ssize_t sbp_tpg_attrib_mgt_orb_timeout_show(struct config_item *item,
2222 char *page)
2223 {
2224 struct se_portal_group *se_tpg = attrib_to_tpg(item);
2225 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2226 struct sbp_tport *tport = tpg->tport;
2227 return sprintf(page, "%d\n", tport->mgt_orb_timeout);
2228 }
2229
2230 static ssize_t sbp_tpg_attrib_mgt_orb_timeout_store(struct config_item *item,
2231 const char *page, size_t count)
2232 {
2233 struct se_portal_group *se_tpg = attrib_to_tpg(item);
2234 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2235 struct sbp_tport *tport = tpg->tport;
2236 unsigned long val;
2237 int ret;
2238
2239 if (kstrtoul(page, 0, &val) < 0)
2240 return -EINVAL;
2241 if ((val < 1) || (val > 127))
2242 return -EINVAL;
2243
2244 if (tport->mgt_orb_timeout == val)
2245 return count;
2246
2247 tport->mgt_orb_timeout = val;
2248
2249 ret = sbp_update_unit_directory(tport);
2250 if (ret < 0)
2251 return ret;
2252
2253 return count;
2254 }
2255
2256 static ssize_t sbp_tpg_attrib_max_reconnect_timeout_show(struct config_item *item,
2257 char *page)
2258 {
2259 struct se_portal_group *se_tpg = attrib_to_tpg(item);
2260 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2261 struct sbp_tport *tport = tpg->tport;
2262 return sprintf(page, "%d\n", tport->max_reconnect_timeout);
2263 }
2264
2265 static ssize_t sbp_tpg_attrib_max_reconnect_timeout_store(struct config_item *item,
2266 const char *page, size_t count)
2267 {
2268 struct se_portal_group *se_tpg = attrib_to_tpg(item);
2269 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2270 struct sbp_tport *tport = tpg->tport;
2271 unsigned long val;
2272 int ret;
2273
2274 if (kstrtoul(page, 0, &val) < 0)
2275 return -EINVAL;
2276 if ((val < 1) || (val > 32767))
2277 return -EINVAL;
2278
2279 if (tport->max_reconnect_timeout == val)
2280 return count;
2281
2282 tport->max_reconnect_timeout = val;
2283
2284 ret = sbp_update_unit_directory(tport);
2285 if (ret < 0)
2286 return ret;
2287
2288 return count;
2289 }
2290
2291 static ssize_t sbp_tpg_attrib_max_logins_per_lun_show(struct config_item *item,
2292 char *page)
2293 {
2294 struct se_portal_group *se_tpg = attrib_to_tpg(item);
2295 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2296 struct sbp_tport *tport = tpg->tport;
2297 return sprintf(page, "%d\n", tport->max_logins_per_lun);
2298 }
2299
2300 static ssize_t sbp_tpg_attrib_max_logins_per_lun_store(struct config_item *item,
2301 const char *page, size_t count)
2302 {
2303 struct se_portal_group *se_tpg = attrib_to_tpg(item);
2304 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2305 struct sbp_tport *tport = tpg->tport;
2306 unsigned long val;
2307
2308 if (kstrtoul(page, 0, &val) < 0)
2309 return -EINVAL;
2310 if ((val < 1) || (val > 127))
2311 return -EINVAL;
2312
2313 /* XXX: also check against current count? */
2314
2315 tport->max_logins_per_lun = val;
2316
2317 return count;
2318 }
2319
2320 CONFIGFS_ATTR(sbp_tpg_attrib_, mgt_orb_timeout);
2321 CONFIGFS_ATTR(sbp_tpg_attrib_, max_reconnect_timeout);
2322 CONFIGFS_ATTR(sbp_tpg_attrib_, max_logins_per_lun);
2323
2324 static struct configfs_attribute *sbp_tpg_attrib_attrs[] = {
2325 &sbp_tpg_attrib_attr_mgt_orb_timeout,
2326 &sbp_tpg_attrib_attr_max_reconnect_timeout,
2327 &sbp_tpg_attrib_attr_max_logins_per_lun,
2328 NULL,
2329 };
2330
2331 static const struct target_core_fabric_ops sbp_ops = {
2332 .module = THIS_MODULE,
2333 .name = "sbp",
2334 .get_fabric_name = sbp_get_fabric_name,
2335 .tpg_get_wwn = sbp_get_fabric_wwn,
2336 .tpg_get_tag = sbp_get_tag,
2337 .tpg_check_demo_mode = sbp_check_true,
2338 .tpg_check_demo_mode_cache = sbp_check_true,
2339 .tpg_check_demo_mode_write_protect = sbp_check_false,
2340 .tpg_check_prod_mode_write_protect = sbp_check_false,
2341 .tpg_get_inst_index = sbp_tpg_get_inst_index,
2342 .release_cmd = sbp_release_cmd,
2343 .shutdown_session = sbp_shutdown_session,
2344 .close_session = sbp_close_session,
2345 .sess_get_index = sbp_sess_get_index,
2346 .write_pending = sbp_write_pending,
2347 .write_pending_status = sbp_write_pending_status,
2348 .set_default_node_attributes = sbp_set_default_node_attrs,
2349 .get_cmd_state = sbp_get_cmd_state,
2350 .queue_data_in = sbp_queue_data_in,
2351 .queue_status = sbp_queue_status,
2352 .queue_tm_rsp = sbp_queue_tm_rsp,
2353 .aborted_task = sbp_aborted_task,
2354 .check_stop_free = sbp_check_stop_free,
2355
2356 .fabric_make_wwn = sbp_make_tport,
2357 .fabric_drop_wwn = sbp_drop_tport,
2358 .fabric_make_tpg = sbp_make_tpg,
2359 .fabric_drop_tpg = sbp_drop_tpg,
2360 .fabric_post_link = sbp_post_link_lun,
2361 .fabric_pre_unlink = sbp_pre_unlink_lun,
2362 .fabric_make_np = NULL,
2363 .fabric_drop_np = NULL,
2364 .fabric_init_nodeacl = sbp_init_nodeacl,
2365
2366 .tfc_wwn_attrs = sbp_wwn_attrs,
2367 .tfc_tpg_base_attrs = sbp_tpg_base_attrs,
2368 .tfc_tpg_attrib_attrs = sbp_tpg_attrib_attrs,
2369 };
2370
2371 static int __init sbp_init(void)
2372 {
2373 return target_register_template(&sbp_ops);
2374 };
2375
2376 static void __exit sbp_exit(void)
2377 {
2378 target_unregister_template(&sbp_ops);
2379 };
2380
2381 MODULE_DESCRIPTION("FireWire SBP fabric driver");
2382 MODULE_LICENSE("GPL");
2383 module_init(sbp_init);
2384 module_exit(sbp_exit);
This page took 0.148363 seconds and 5 git commands to generate.