[SCSI] libfc: change interface for rport_create
[deliverable/linux.git] / drivers / scsi / libfc / fc_disc.c
1 /*
2 * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Maintained at www.Open-FCoE.org
18 */
19
20 /*
21 * Target Discovery
22 *
23 * This block discovers all FC-4 remote ports, including FCP initiators. It
24 * also handles RSCN events and re-discovery if necessary.
25 */
26
27 /*
28 * DISC LOCKING
29 *
30 * The disc mutex is can be locked when acquiring rport locks, but may not
31 * be held when acquiring the lport lock. Refer to fc_lport.c for more
32 * details.
33 */
34
35 #include <linux/timer.h>
36 #include <linux/err.h>
37 #include <asm/unaligned.h>
38
39 #include <scsi/fc/fc_gs.h>
40
41 #include <scsi/libfc.h>
42
43 #define FC_DISC_RETRY_LIMIT 3 /* max retries */
44 #define FC_DISC_RETRY_DELAY 500UL /* (msecs) delay */
45
46 #define FC_DISC_DELAY 3
47
48 static void fc_disc_gpn_ft_req(struct fc_disc *);
49 static void fc_disc_gpn_ft_resp(struct fc_seq *, struct fc_frame *, void *);
50 static int fc_disc_new_target(struct fc_disc *, struct fc_rport *,
51 struct fc_rport_identifiers *);
52 static void fc_disc_del_target(struct fc_disc *, struct fc_rport *);
53 static void fc_disc_done(struct fc_disc *);
54 static void fc_disc_timeout(struct work_struct *);
55 static void fc_disc_single(struct fc_disc *, struct fc_disc_port *);
56 static void fc_disc_restart(struct fc_disc *);
57
58 /**
59 * fc_disc_lookup_rport() - lookup a remote port by port_id
60 * @lport: Fibre Channel host port instance
61 * @port_id: remote port port_id to match
62 */
63 struct fc_rport *fc_disc_lookup_rport(const struct fc_lport *lport,
64 u32 port_id)
65 {
66 const struct fc_disc *disc = &lport->disc;
67 struct fc_rport *rport, *found = NULL;
68 struct fc_rport_priv *rdata;
69 int disc_found = 0;
70
71 list_for_each_entry(rdata, &disc->rports, peers) {
72 rport = PRIV_TO_RPORT(rdata);
73 if (rport->port_id == port_id) {
74 disc_found = 1;
75 found = rport;
76 break;
77 }
78 }
79
80 if (!disc_found)
81 found = NULL;
82
83 return found;
84 }
85
86 /**
87 * fc_disc_stop_rports() - delete all the remote ports associated with the lport
88 * @disc: The discovery job to stop rports on
89 *
90 * Locking Note: This function expects that the lport mutex is locked before
91 * calling it.
92 */
93 void fc_disc_stop_rports(struct fc_disc *disc)
94 {
95 struct fc_lport *lport;
96 struct fc_rport *rport;
97 struct fc_rport_priv *rdata, *next;
98
99 lport = disc->lport;
100
101 mutex_lock(&disc->disc_mutex);
102 list_for_each_entry_safe(rdata, next, &disc->rports, peers) {
103 rport = PRIV_TO_RPORT(rdata);
104 list_del(&rdata->peers);
105 lport->tt.rport_logoff(rport);
106 }
107
108 list_for_each_entry_safe(rdata, next, &disc->rogue_rports, peers) {
109 rport = PRIV_TO_RPORT(rdata);
110 lport->tt.rport_logoff(rport);
111 }
112
113 mutex_unlock(&disc->disc_mutex);
114 }
115
116 /**
117 * fc_disc_rport_callback() - Event handler for rport events
118 * @lport: The lport which is receiving the event
119 * @rport: The rport which the event has occured on
120 * @event: The event that occured
121 *
122 * Locking Note: The rport lock should not be held when calling
123 * this function.
124 */
125 static void fc_disc_rport_callback(struct fc_lport *lport,
126 struct fc_rport *rport,
127 enum fc_rport_event event)
128 {
129 struct fc_rport_priv *rdata = rport->dd_data;
130 struct fc_disc *disc = &lport->disc;
131
132 FC_DISC_DBG(disc, "Received a %d event for port (%6x)\n", event,
133 rport->port_id);
134
135 switch (event) {
136 case RPORT_EV_CREATED:
137 if (disc) {
138 mutex_lock(&disc->disc_mutex);
139 list_add_tail(&rdata->peers, &disc->rports);
140 mutex_unlock(&disc->disc_mutex);
141 }
142 break;
143 case RPORT_EV_LOGO:
144 case RPORT_EV_FAILED:
145 case RPORT_EV_STOP:
146 mutex_lock(&disc->disc_mutex);
147 mutex_lock(&rdata->rp_mutex);
148 if (rdata->trans_state == FC_PORTSTATE_ROGUE)
149 list_del(&rdata->peers);
150 mutex_unlock(&rdata->rp_mutex);
151 mutex_unlock(&disc->disc_mutex);
152 break;
153 default:
154 break;
155 }
156
157 }
158
159 /**
160 * fc_disc_recv_rscn_req() - Handle Registered State Change Notification (RSCN)
161 * @sp: Current sequence of the RSCN exchange
162 * @fp: RSCN Frame
163 * @lport: Fibre Channel host port instance
164 *
165 * Locking Note: This function expects that the disc_mutex is locked
166 * before it is called.
167 */
168 static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp,
169 struct fc_disc *disc)
170 {
171 struct fc_lport *lport;
172 struct fc_rport *rport;
173 struct fc_rport_priv *rdata;
174 struct fc_els_rscn *rp;
175 struct fc_els_rscn_page *pp;
176 struct fc_seq_els_data rjt_data;
177 unsigned int len;
178 int redisc = 0;
179 enum fc_els_rscn_ev_qual ev_qual;
180 enum fc_els_rscn_addr_fmt fmt;
181 LIST_HEAD(disc_ports);
182 struct fc_disc_port *dp, *next;
183
184 lport = disc->lport;
185
186 FC_DISC_DBG(disc, "Received an RSCN event\n");
187
188 /* make sure the frame contains an RSCN message */
189 rp = fc_frame_payload_get(fp, sizeof(*rp));
190 if (!rp)
191 goto reject;
192 /* make sure the page length is as expected (4 bytes) */
193 if (rp->rscn_page_len != sizeof(*pp))
194 goto reject;
195 /* get the RSCN payload length */
196 len = ntohs(rp->rscn_plen);
197 if (len < sizeof(*rp))
198 goto reject;
199 /* make sure the frame contains the expected payload */
200 rp = fc_frame_payload_get(fp, len);
201 if (!rp)
202 goto reject;
203 /* payload must be a multiple of the RSCN page size */
204 len -= sizeof(*rp);
205 if (len % sizeof(*pp))
206 goto reject;
207
208 for (pp = (void *)(rp + 1); len > 0; len -= sizeof(*pp), pp++) {
209 ev_qual = pp->rscn_page_flags >> ELS_RSCN_EV_QUAL_BIT;
210 ev_qual &= ELS_RSCN_EV_QUAL_MASK;
211 fmt = pp->rscn_page_flags >> ELS_RSCN_ADDR_FMT_BIT;
212 fmt &= ELS_RSCN_ADDR_FMT_MASK;
213 /*
214 * if we get an address format other than port
215 * (area, domain, fabric), then do a full discovery
216 */
217 switch (fmt) {
218 case ELS_ADDR_FMT_PORT:
219 FC_DISC_DBG(disc, "Port address format for port "
220 "(%6x)\n", ntoh24(pp->rscn_fid));
221 dp = kzalloc(sizeof(*dp), GFP_KERNEL);
222 if (!dp) {
223 redisc = 1;
224 break;
225 }
226 dp->lp = lport;
227 dp->ids.port_id = ntoh24(pp->rscn_fid);
228 dp->ids.port_name = -1;
229 dp->ids.node_name = -1;
230 dp->ids.roles = FC_RPORT_ROLE_UNKNOWN;
231 list_add_tail(&dp->peers, &disc_ports);
232 break;
233 case ELS_ADDR_FMT_AREA:
234 case ELS_ADDR_FMT_DOM:
235 case ELS_ADDR_FMT_FAB:
236 default:
237 FC_DISC_DBG(disc, "Address format is (%d)\n", fmt);
238 redisc = 1;
239 break;
240 }
241 }
242 lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
243 if (redisc) {
244 FC_DISC_DBG(disc, "RSCN received: rediscovering\n");
245 fc_disc_restart(disc);
246 } else {
247 FC_DISC_DBG(disc, "RSCN received: not rediscovering. "
248 "redisc %d state %d in_prog %d\n",
249 redisc, lport->state, disc->pending);
250 list_for_each_entry_safe(dp, next, &disc_ports, peers) {
251 list_del(&dp->peers);
252 rport = lport->tt.rport_lookup(lport, dp->ids.port_id);
253 if (rport) {
254 rdata = rport->dd_data;
255 list_del(&rdata->peers);
256 lport->tt.rport_logoff(rport);
257 }
258 fc_disc_single(disc, dp);
259 }
260 }
261 fc_frame_free(fp);
262 return;
263 reject:
264 FC_DISC_DBG(disc, "Received a bad RSCN frame\n");
265 rjt_data.fp = NULL;
266 rjt_data.reason = ELS_RJT_LOGIC;
267 rjt_data.explan = ELS_EXPL_NONE;
268 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
269 fc_frame_free(fp);
270 }
271
272 /**
273 * fc_disc_recv_req() - Handle incoming requests
274 * @sp: Current sequence of the request exchange
275 * @fp: The frame
276 * @lport: The FC local port
277 *
278 * Locking Note: This function is called from the EM and will lock
279 * the disc_mutex before calling the handler for the
280 * request.
281 */
282 static void fc_disc_recv_req(struct fc_seq *sp, struct fc_frame *fp,
283 struct fc_lport *lport)
284 {
285 u8 op;
286 struct fc_disc *disc = &lport->disc;
287
288 op = fc_frame_payload_op(fp);
289 switch (op) {
290 case ELS_RSCN:
291 mutex_lock(&disc->disc_mutex);
292 fc_disc_recv_rscn_req(sp, fp, disc);
293 mutex_unlock(&disc->disc_mutex);
294 break;
295 default:
296 FC_DISC_DBG(disc, "Received an unsupported request, "
297 "the opcode is (%x)\n", op);
298 break;
299 }
300 }
301
302 /**
303 * fc_disc_restart() - Restart discovery
304 * @lport: FC discovery context
305 *
306 * Locking Note: This function expects that the disc mutex
307 * is already locked.
308 */
309 static void fc_disc_restart(struct fc_disc *disc)
310 {
311 struct fc_rport *rport;
312 struct fc_rport_priv *rdata, *next;
313 struct fc_lport *lport = disc->lport;
314
315 FC_DISC_DBG(disc, "Restarting discovery\n");
316
317 list_for_each_entry_safe(rdata, next, &disc->rports, peers) {
318 rport = PRIV_TO_RPORT(rdata);
319 list_del(&rdata->peers);
320 lport->tt.rport_logoff(rport);
321 }
322
323 disc->requested = 1;
324 if (!disc->pending)
325 fc_disc_gpn_ft_req(disc);
326 }
327
328 /**
329 * fc_disc_start() - Fibre Channel Target discovery
330 * @lport: FC local port
331 *
332 * Returns non-zero if discovery cannot be started.
333 */
334 static void fc_disc_start(void (*disc_callback)(struct fc_lport *,
335 enum fc_disc_event),
336 struct fc_lport *lport)
337 {
338 struct fc_rport *rport;
339 struct fc_rport_identifiers ids;
340 struct fc_disc *disc = &lport->disc;
341
342 /*
343 * At this point we may have a new disc job or an existing
344 * one. Either way, let's lock when we make changes to it
345 * and send the GPN_FT request.
346 */
347 mutex_lock(&disc->disc_mutex);
348
349 disc->disc_callback = disc_callback;
350
351 /*
352 * If not ready, or already running discovery, just set request flag.
353 */
354 disc->requested = 1;
355
356 if (disc->pending) {
357 mutex_unlock(&disc->disc_mutex);
358 return;
359 }
360
361 /*
362 * Handle point-to-point mode as a simple discovery
363 * of the remote port. Yucky, yucky, yuck, yuck!
364 */
365 rport = disc->lport->ptp_rp;
366 if (rport) {
367 ids.port_id = rport->port_id;
368 ids.port_name = rport->port_name;
369 ids.node_name = rport->node_name;
370 ids.roles = FC_RPORT_ROLE_UNKNOWN;
371 get_device(&rport->dev);
372
373 if (!fc_disc_new_target(disc, rport, &ids)) {
374 disc->event = DISC_EV_SUCCESS;
375 fc_disc_done(disc);
376 }
377 put_device(&rport->dev);
378 } else {
379 fc_disc_gpn_ft_req(disc); /* get ports by FC-4 type */
380 }
381
382 mutex_unlock(&disc->disc_mutex);
383 }
384
385 static struct fc_rport_operations fc_disc_rport_ops = {
386 .event_callback = fc_disc_rport_callback,
387 };
388
389 /**
390 * fc_disc_new_target() - Handle new target found by discovery
391 * @lport: FC local port
392 * @rport: The previous FC remote port (NULL if new remote port)
393 * @ids: Identifiers for the new FC remote port
394 *
395 * Locking Note: This function expects that the disc_mutex is locked
396 * before it is called.
397 */
398 static int fc_disc_new_target(struct fc_disc *disc,
399 struct fc_rport *rport,
400 struct fc_rport_identifiers *ids)
401 {
402 struct fc_lport *lport = disc->lport;
403 struct fc_rport_priv *rdata;
404 int error = 0;
405
406 if (rport && ids->port_name) {
407 if (rport->port_name == -1) {
408 /*
409 * Set WWN and fall through to notify of create.
410 */
411 fc_rport_set_name(rport, ids->port_name,
412 rport->node_name);
413 } else if (rport->port_name != ids->port_name) {
414 /*
415 * This is a new port with the same FCID as
416 * a previously-discovered port. Presumably the old
417 * port logged out and a new port logged in and was
418 * assigned the same FCID. This should be rare.
419 * Delete the old one and fall thru to re-create.
420 */
421 fc_disc_del_target(disc, rport);
422 rport = NULL;
423 }
424 }
425 if (((ids->port_name != -1) || (ids->port_id != -1)) &&
426 ids->port_id != fc_host_port_id(lport->host) &&
427 ids->port_name != lport->wwpn) {
428 if (!rport) {
429 rport = lport->tt.rport_lookup(lport, ids->port_id);
430 if (!rport) {
431 rport = lport->tt.rport_create(lport, ids);
432 }
433 if (!rport)
434 error = -ENOMEM;
435 }
436 if (rport) {
437 rdata = rport->dd_data;
438 rdata->ops = &fc_disc_rport_ops;
439 rdata->rp_state = RPORT_ST_INIT;
440 list_add_tail(&rdata->peers, &disc->rogue_rports);
441 lport->tt.rport_login(rport);
442 }
443 }
444 return error;
445 }
446
447 /**
448 * fc_disc_del_target() - Delete a target
449 * @disc: FC discovery context
450 * @rport: The remote port to be removed
451 */
452 static void fc_disc_del_target(struct fc_disc *disc, struct fc_rport *rport)
453 {
454 struct fc_lport *lport = disc->lport;
455 struct fc_rport_priv *rdata = rport->dd_data;
456 list_del(&rdata->peers);
457 lport->tt.rport_logoff(rport);
458 }
459
460 /**
461 * fc_disc_done() - Discovery has been completed
462 * @disc: FC discovery context
463 * Locking Note: This function expects that the disc mutex is locked before
464 * it is called. The discovery callback is then made with the lock released,
465 * and the lock is re-taken before returning from this function
466 */
467 static void fc_disc_done(struct fc_disc *disc)
468 {
469 struct fc_lport *lport = disc->lport;
470 enum fc_disc_event event;
471
472 FC_DISC_DBG(disc, "Discovery complete\n");
473
474 event = disc->event;
475 disc->event = DISC_EV_NONE;
476
477 if (disc->requested)
478 fc_disc_gpn_ft_req(disc);
479 else
480 disc->pending = 0;
481
482 mutex_unlock(&disc->disc_mutex);
483 disc->disc_callback(lport, event);
484 mutex_lock(&disc->disc_mutex);
485 }
486
487 /**
488 * fc_disc_error() - Handle error on dNS request
489 * @disc: FC discovery context
490 * @fp: The frame pointer
491 */
492 static void fc_disc_error(struct fc_disc *disc, struct fc_frame *fp)
493 {
494 struct fc_lport *lport = disc->lport;
495 unsigned long delay = 0;
496
497 FC_DISC_DBG(disc, "Error %ld, retries %d/%d\n",
498 PTR_ERR(fp), disc->retry_count,
499 FC_DISC_RETRY_LIMIT);
500
501 if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) {
502 /*
503 * Memory allocation failure, or the exchange timed out,
504 * retry after delay.
505 */
506 if (disc->retry_count < FC_DISC_RETRY_LIMIT) {
507 /* go ahead and retry */
508 if (!fp)
509 delay = msecs_to_jiffies(FC_DISC_RETRY_DELAY);
510 else {
511 delay = msecs_to_jiffies(lport->e_d_tov);
512
513 /* timeout faster first time */
514 if (!disc->retry_count)
515 delay /= 4;
516 }
517 disc->retry_count++;
518 schedule_delayed_work(&disc->disc_work, delay);
519 } else {
520 /* exceeded retries */
521 disc->event = DISC_EV_FAILED;
522 fc_disc_done(disc);
523 }
524 }
525 }
526
527 /**
528 * fc_disc_gpn_ft_req() - Send Get Port Names by FC-4 type (GPN_FT) request
529 * @lport: FC discovery context
530 *
531 * Locking Note: This function expects that the disc_mutex is locked
532 * before it is called.
533 */
534 static void fc_disc_gpn_ft_req(struct fc_disc *disc)
535 {
536 struct fc_frame *fp;
537 struct fc_lport *lport = disc->lport;
538
539 WARN_ON(!fc_lport_test_ready(lport));
540
541 disc->pending = 1;
542 disc->requested = 0;
543
544 disc->buf_len = 0;
545 disc->seq_count = 0;
546 fp = fc_frame_alloc(lport,
547 sizeof(struct fc_ct_hdr) +
548 sizeof(struct fc_ns_gid_ft));
549 if (!fp)
550 goto err;
551
552 if (lport->tt.elsct_send(lport, NULL, fp,
553 FC_NS_GPN_FT,
554 fc_disc_gpn_ft_resp,
555 disc, lport->e_d_tov))
556 return;
557 err:
558 fc_disc_error(disc, fp);
559 }
560
561 /**
562 * fc_disc_gpn_ft_parse() - Parse the list of IDs and names resulting from a request
563 * @lport: Fibre Channel host port instance
564 * @buf: GPN_FT response buffer
565 * @len: size of response buffer
566 */
567 static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len)
568 {
569 struct fc_lport *lport;
570 struct fc_gpn_ft_resp *np;
571 char *bp;
572 size_t plen;
573 size_t tlen;
574 int error = 0;
575 struct fc_rport_identifiers ids;
576 struct fc_rport *rport;
577 struct fc_rport_priv *rdata;
578
579 lport = disc->lport;
580
581 /*
582 * Handle partial name record left over from previous call.
583 */
584 bp = buf;
585 plen = len;
586 np = (struct fc_gpn_ft_resp *)bp;
587 tlen = disc->buf_len;
588 if (tlen) {
589 WARN_ON(tlen >= sizeof(*np));
590 plen = sizeof(*np) - tlen;
591 WARN_ON(plen <= 0);
592 WARN_ON(plen >= sizeof(*np));
593 if (plen > len)
594 plen = len;
595 np = &disc->partial_buf;
596 memcpy((char *)np + tlen, bp, plen);
597
598 /*
599 * Set bp so that the loop below will advance it to the
600 * first valid full name element.
601 */
602 bp -= tlen;
603 len += tlen;
604 plen += tlen;
605 disc->buf_len = (unsigned char) plen;
606 if (plen == sizeof(*np))
607 disc->buf_len = 0;
608 }
609
610 /*
611 * Handle full name records, including the one filled from above.
612 * Normally, np == bp and plen == len, but from the partial case above,
613 * bp, len describe the overall buffer, and np, plen describe the
614 * partial buffer, which if would usually be full now.
615 * After the first time through the loop, things return to "normal".
616 */
617 while (plen >= sizeof(*np)) {
618 ids.port_id = ntoh24(np->fp_fid);
619 ids.port_name = ntohll(np->fp_wwpn);
620 ids.node_name = -1;
621 ids.roles = FC_RPORT_ROLE_UNKNOWN;
622
623 if (ids.port_id != fc_host_port_id(lport->host) &&
624 ids.port_name != lport->wwpn) {
625 rport = lport->tt.rport_create(lport, &ids);
626 if (rport) {
627 rdata = rport->dd_data;
628 rdata->ops = &fc_disc_rport_ops;
629 rdata->local_port = lport;
630 list_add_tail(&rdata->peers,
631 &disc->rogue_rports);
632 lport->tt.rport_login(rport);
633 } else
634 printk(KERN_WARNING "libfc: Failed to allocate "
635 "memory for the newly discovered port "
636 "(%6x)\n", ids.port_id);
637 }
638
639 if (np->fp_flags & FC_NS_FID_LAST) {
640 disc->event = DISC_EV_SUCCESS;
641 fc_disc_done(disc);
642 len = 0;
643 break;
644 }
645 len -= sizeof(*np);
646 bp += sizeof(*np);
647 np = (struct fc_gpn_ft_resp *)bp;
648 plen = len;
649 }
650
651 /*
652 * Save any partial record at the end of the buffer for next time.
653 */
654 if (error == 0 && len > 0 && len < sizeof(*np)) {
655 if (np != &disc->partial_buf) {
656 FC_DISC_DBG(disc, "Partial buffer remains "
657 "for discovery\n");
658 memcpy(&disc->partial_buf, np, len);
659 }
660 disc->buf_len = (unsigned char) len;
661 } else {
662 disc->buf_len = 0;
663 }
664 return error;
665 }
666
667 /**
668 * fc_disc_timeout() - Retry handler for the disc component
669 * @work: Structure holding disc obj that needs retry discovery
670 *
671 * Handle retry of memory allocation for remote ports.
672 */
673 static void fc_disc_timeout(struct work_struct *work)
674 {
675 struct fc_disc *disc = container_of(work,
676 struct fc_disc,
677 disc_work.work);
678 mutex_lock(&disc->disc_mutex);
679 if (disc->requested && !disc->pending)
680 fc_disc_gpn_ft_req(disc);
681 mutex_unlock(&disc->disc_mutex);
682 }
683
684 /**
685 * fc_disc_gpn_ft_resp() - Handle a response frame from Get Port Names (GPN_FT)
686 * @sp: Current sequence of GPN_FT exchange
687 * @fp: response frame
688 * @lp_arg: Fibre Channel host port instance
689 *
690 * Locking Note: This function is called without disc mutex held, and
691 * should do all its processing with the mutex held
692 */
693 static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp,
694 void *disc_arg)
695 {
696 struct fc_disc *disc = disc_arg;
697 struct fc_ct_hdr *cp;
698 struct fc_frame_header *fh;
699 unsigned int seq_cnt;
700 void *buf = NULL;
701 unsigned int len;
702 int error;
703
704 mutex_lock(&disc->disc_mutex);
705 FC_DISC_DBG(disc, "Received a GPN_FT response\n");
706
707 if (IS_ERR(fp)) {
708 fc_disc_error(disc, fp);
709 mutex_unlock(&disc->disc_mutex);
710 return;
711 }
712
713 WARN_ON(!fc_frame_is_linear(fp)); /* buffer must be contiguous */
714 fh = fc_frame_header_get(fp);
715 len = fr_len(fp) - sizeof(*fh);
716 seq_cnt = ntohs(fh->fh_seq_cnt);
717 if (fr_sof(fp) == FC_SOF_I3 && seq_cnt == 0 &&
718 disc->seq_count == 0) {
719 cp = fc_frame_payload_get(fp, sizeof(*cp));
720 if (!cp) {
721 FC_DISC_DBG(disc, "GPN_FT response too short, len %d\n",
722 fr_len(fp));
723 } else if (ntohs(cp->ct_cmd) == FC_FS_ACC) {
724
725 /* Accepted, parse the response. */
726 buf = cp + 1;
727 len -= sizeof(*cp);
728 } else if (ntohs(cp->ct_cmd) == FC_FS_RJT) {
729 FC_DISC_DBG(disc, "GPN_FT rejected reason %x exp %x "
730 "(check zoning)\n", cp->ct_reason,
731 cp->ct_explan);
732 disc->event = DISC_EV_FAILED;
733 fc_disc_done(disc);
734 } else {
735 FC_DISC_DBG(disc, "GPN_FT unexpected response code "
736 "%x\n", ntohs(cp->ct_cmd));
737 }
738 } else if (fr_sof(fp) == FC_SOF_N3 &&
739 seq_cnt == disc->seq_count) {
740 buf = fh + 1;
741 } else {
742 FC_DISC_DBG(disc, "GPN_FT unexpected frame - out of sequence? "
743 "seq_cnt %x expected %x sof %x eof %x\n",
744 seq_cnt, disc->seq_count, fr_sof(fp), fr_eof(fp));
745 }
746 if (buf) {
747 error = fc_disc_gpn_ft_parse(disc, buf, len);
748 if (error)
749 fc_disc_error(disc, fp);
750 else
751 disc->seq_count++;
752 }
753 fc_frame_free(fp);
754
755 mutex_unlock(&disc->disc_mutex);
756 }
757
758 /**
759 * fc_disc_single() - Discover the directory information for a single target
760 * @lport: FC local port
761 * @dp: The port to rediscover
762 *
763 * Locking Note: This function expects that the disc_mutex is locked
764 * before it is called.
765 */
766 static void fc_disc_single(struct fc_disc *disc, struct fc_disc_port *dp)
767 {
768 struct fc_lport *lport;
769 struct fc_rport *new_rport;
770 struct fc_rport_priv *rdata;
771
772 lport = disc->lport;
773
774 if (dp->ids.port_id == fc_host_port_id(lport->host))
775 goto out;
776
777 new_rport = lport->tt.rport_create(lport, &dp->ids);
778 if (new_rport) {
779 rdata = new_rport->dd_data;
780 rdata->ops = &fc_disc_rport_ops;
781 kfree(dp);
782 list_add_tail(&rdata->peers, &disc->rogue_rports);
783 lport->tt.rport_login(new_rport);
784 }
785 return;
786 out:
787 kfree(dp);
788 }
789
790 /**
791 * fc_disc_stop() - Stop discovery for a given lport
792 * @lport: The lport that discovery should stop for
793 */
794 void fc_disc_stop(struct fc_lport *lport)
795 {
796 struct fc_disc *disc = &lport->disc;
797
798 if (disc) {
799 cancel_delayed_work_sync(&disc->disc_work);
800 fc_disc_stop_rports(disc);
801 }
802 }
803
804 /**
805 * fc_disc_stop_final() - Stop discovery for a given lport
806 * @lport: The lport that discovery should stop for
807 *
808 * This function will block until discovery has been
809 * completely stopped and all rports have been deleted.
810 */
811 void fc_disc_stop_final(struct fc_lport *lport)
812 {
813 fc_disc_stop(lport);
814 lport->tt.rport_flush_queue();
815 }
816
817 /**
818 * fc_disc_init() - Initialize the discovery block
819 * @lport: FC local port
820 */
821 int fc_disc_init(struct fc_lport *lport)
822 {
823 struct fc_disc *disc;
824
825 if (!lport->tt.disc_start)
826 lport->tt.disc_start = fc_disc_start;
827
828 if (!lport->tt.disc_stop)
829 lport->tt.disc_stop = fc_disc_stop;
830
831 if (!lport->tt.disc_stop_final)
832 lport->tt.disc_stop_final = fc_disc_stop_final;
833
834 if (!lport->tt.disc_recv_req)
835 lport->tt.disc_recv_req = fc_disc_recv_req;
836
837 if (!lport->tt.rport_lookup)
838 lport->tt.rport_lookup = fc_disc_lookup_rport;
839
840 disc = &lport->disc;
841 INIT_DELAYED_WORK(&disc->disc_work, fc_disc_timeout);
842 mutex_init(&disc->disc_mutex);
843 INIT_LIST_HEAD(&disc->rports);
844 INIT_LIST_HEAD(&disc->rogue_rports);
845
846 disc->lport = lport;
847 disc->delay = FC_DISC_DELAY;
848 disc->event = DISC_EV_NONE;
849
850 return 0;
851 }
852 EXPORT_SYMBOL(fc_disc_init);
This page took 0.049279 seconds and 5 git commands to generate.