Merge branch 'upstream-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jlbec...
[deliverable/linux.git] / drivers / scsi / libfc / fc_disc.c
1 /*
2 * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Maintained at www.Open-FCoE.org
18 */
19
20 /*
21 * Target Discovery
22 *
23 * This block discovers all FC-4 remote ports, including FCP initiators. It
24 * also handles RSCN events and re-discovery if necessary.
25 */
26
27 /*
28 * DISC LOCKING
29 *
30 * The disc mutex is can be locked when acquiring rport locks, but may not
31 * be held when acquiring the lport lock. Refer to fc_lport.c for more
32 * details.
33 */
34
35 #include <linux/timer.h>
36 #include <linux/err.h>
37 #include <asm/unaligned.h>
38
39 #include <scsi/fc/fc_gs.h>
40
41 #include <scsi/libfc.h>
42
43 #define FC_DISC_RETRY_LIMIT 3 /* max retries */
44 #define FC_DISC_RETRY_DELAY 500UL /* (msecs) delay */
45
46 #define FC_DISC_DELAY 3
47
48 static int fc_disc_debug;
49
50 #define FC_DEBUG_DISC(fmt...) \
51 do { \
52 if (fc_disc_debug) \
53 FC_DBG(fmt); \
54 } while (0)
55
56 static void fc_disc_gpn_ft_req(struct fc_disc *);
57 static void fc_disc_gpn_ft_resp(struct fc_seq *, struct fc_frame *, void *);
58 static int fc_disc_new_target(struct fc_disc *, struct fc_rport *,
59 struct fc_rport_identifiers *);
60 static void fc_disc_del_target(struct fc_disc *, struct fc_rport *);
61 static void fc_disc_done(struct fc_disc *);
62 static void fc_disc_timeout(struct work_struct *);
63 static void fc_disc_single(struct fc_disc *, struct fc_disc_port *);
64 static void fc_disc_restart(struct fc_disc *);
65
66 /**
67 * fc_disc_lookup_rport() - lookup a remote port by port_id
68 * @lport: Fibre Channel host port instance
69 * @port_id: remote port port_id to match
70 */
71 struct fc_rport *fc_disc_lookup_rport(const struct fc_lport *lport,
72 u32 port_id)
73 {
74 const struct fc_disc *disc = &lport->disc;
75 struct fc_rport *rport, *found = NULL;
76 struct fc_rport_libfc_priv *rdata;
77 int disc_found = 0;
78
79 list_for_each_entry(rdata, &disc->rports, peers) {
80 rport = PRIV_TO_RPORT(rdata);
81 if (rport->port_id == port_id) {
82 disc_found = 1;
83 found = rport;
84 break;
85 }
86 }
87
88 if (!disc_found)
89 found = NULL;
90
91 return found;
92 }
93
94 /**
95 * fc_disc_stop_rports() - delete all the remote ports associated with the lport
96 * @disc: The discovery job to stop rports on
97 *
98 * Locking Note: This function expects that the lport mutex is locked before
99 * calling it.
100 */
101 void fc_disc_stop_rports(struct fc_disc *disc)
102 {
103 struct fc_lport *lport;
104 struct fc_rport *rport;
105 struct fc_rport_libfc_priv *rdata, *next;
106
107 lport = disc->lport;
108
109 mutex_lock(&disc->disc_mutex);
110 list_for_each_entry_safe(rdata, next, &disc->rports, peers) {
111 rport = PRIV_TO_RPORT(rdata);
112 list_del(&rdata->peers);
113 lport->tt.rport_logoff(rport);
114 }
115
116 list_for_each_entry_safe(rdata, next, &disc->rogue_rports, peers) {
117 rport = PRIV_TO_RPORT(rdata);
118 lport->tt.rport_logoff(rport);
119 }
120
121 mutex_unlock(&disc->disc_mutex);
122 }
123
124 /**
125 * fc_disc_rport_callback() - Event handler for rport events
126 * @lport: The lport which is receiving the event
127 * @rport: The rport which the event has occured on
128 * @event: The event that occured
129 *
130 * Locking Note: The rport lock should not be held when calling
131 * this function.
132 */
133 static void fc_disc_rport_callback(struct fc_lport *lport,
134 struct fc_rport *rport,
135 enum fc_rport_event event)
136 {
137 struct fc_rport_libfc_priv *rdata = rport->dd_data;
138 struct fc_disc *disc = &lport->disc;
139
140 FC_DEBUG_DISC("Received a %d event for port (%6x)\n", event,
141 rport->port_id);
142
143 switch (event) {
144 case RPORT_EV_CREATED:
145 if (disc) {
146 mutex_lock(&disc->disc_mutex);
147 list_add_tail(&rdata->peers, &disc->rports);
148 mutex_unlock(&disc->disc_mutex);
149 }
150 break;
151 case RPORT_EV_LOGO:
152 case RPORT_EV_FAILED:
153 case RPORT_EV_STOP:
154 mutex_lock(&disc->disc_mutex);
155 mutex_lock(&rdata->rp_mutex);
156 if (rdata->trans_state == FC_PORTSTATE_ROGUE)
157 list_del(&rdata->peers);
158 mutex_unlock(&rdata->rp_mutex);
159 mutex_unlock(&disc->disc_mutex);
160 break;
161 default:
162 break;
163 }
164
165 }
166
167 /**
168 * fc_disc_recv_rscn_req() - Handle Registered State Change Notification (RSCN)
169 * @sp: Current sequence of the RSCN exchange
170 * @fp: RSCN Frame
171 * @lport: Fibre Channel host port instance
172 *
173 * Locking Note: This function expects that the disc_mutex is locked
174 * before it is called.
175 */
176 static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp,
177 struct fc_disc *disc)
178 {
179 struct fc_lport *lport;
180 struct fc_rport *rport;
181 struct fc_rport_libfc_priv *rdata;
182 struct fc_els_rscn *rp;
183 struct fc_els_rscn_page *pp;
184 struct fc_seq_els_data rjt_data;
185 unsigned int len;
186 int redisc = 0;
187 enum fc_els_rscn_ev_qual ev_qual;
188 enum fc_els_rscn_addr_fmt fmt;
189 LIST_HEAD(disc_ports);
190 struct fc_disc_port *dp, *next;
191
192 lport = disc->lport;
193
194 FC_DEBUG_DISC("Received an RSCN event on port (%6x)\n",
195 fc_host_port_id(lport->host));
196
197 /* make sure the frame contains an RSCN message */
198 rp = fc_frame_payload_get(fp, sizeof(*rp));
199 if (!rp)
200 goto reject;
201 /* make sure the page length is as expected (4 bytes) */
202 if (rp->rscn_page_len != sizeof(*pp))
203 goto reject;
204 /* get the RSCN payload length */
205 len = ntohs(rp->rscn_plen);
206 if (len < sizeof(*rp))
207 goto reject;
208 /* make sure the frame contains the expected payload */
209 rp = fc_frame_payload_get(fp, len);
210 if (!rp)
211 goto reject;
212 /* payload must be a multiple of the RSCN page size */
213 len -= sizeof(*rp);
214 if (len % sizeof(*pp))
215 goto reject;
216
217 for (pp = (void *)(rp + 1); len > 0; len -= sizeof(*pp), pp++) {
218 ev_qual = pp->rscn_page_flags >> ELS_RSCN_EV_QUAL_BIT;
219 ev_qual &= ELS_RSCN_EV_QUAL_MASK;
220 fmt = pp->rscn_page_flags >> ELS_RSCN_ADDR_FMT_BIT;
221 fmt &= ELS_RSCN_ADDR_FMT_MASK;
222 /*
223 * if we get an address format other than port
224 * (area, domain, fabric), then do a full discovery
225 */
226 switch (fmt) {
227 case ELS_ADDR_FMT_PORT:
228 FC_DEBUG_DISC("Port address format for port (%6x)\n",
229 ntoh24(pp->rscn_fid));
230 dp = kzalloc(sizeof(*dp), GFP_KERNEL);
231 if (!dp) {
232 redisc = 1;
233 break;
234 }
235 dp->lp = lport;
236 dp->ids.port_id = ntoh24(pp->rscn_fid);
237 dp->ids.port_name = -1;
238 dp->ids.node_name = -1;
239 dp->ids.roles = FC_RPORT_ROLE_UNKNOWN;
240 list_add_tail(&dp->peers, &disc_ports);
241 break;
242 case ELS_ADDR_FMT_AREA:
243 case ELS_ADDR_FMT_DOM:
244 case ELS_ADDR_FMT_FAB:
245 default:
246 FC_DEBUG_DISC("Address format is (%d)\n", fmt);
247 redisc = 1;
248 break;
249 }
250 }
251 lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
252 if (redisc) {
253 FC_DEBUG_DISC("RSCN received: rediscovering\n");
254 fc_disc_restart(disc);
255 } else {
256 FC_DEBUG_DISC("RSCN received: not rediscovering. "
257 "redisc %d state %d in_prog %d\n",
258 redisc, lport->state, disc->pending);
259 list_for_each_entry_safe(dp, next, &disc_ports, peers) {
260 list_del(&dp->peers);
261 rport = lport->tt.rport_lookup(lport, dp->ids.port_id);
262 if (rport) {
263 rdata = rport->dd_data;
264 list_del(&rdata->peers);
265 lport->tt.rport_logoff(rport);
266 }
267 fc_disc_single(disc, dp);
268 }
269 }
270 fc_frame_free(fp);
271 return;
272 reject:
273 FC_DEBUG_DISC("Received a bad RSCN frame\n");
274 rjt_data.fp = NULL;
275 rjt_data.reason = ELS_RJT_LOGIC;
276 rjt_data.explan = ELS_EXPL_NONE;
277 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
278 fc_frame_free(fp);
279 }
280
281 /**
282 * fc_disc_recv_req() - Handle incoming requests
283 * @sp: Current sequence of the request exchange
284 * @fp: The frame
285 * @lport: The FC local port
286 *
287 * Locking Note: This function is called from the EM and will lock
288 * the disc_mutex before calling the handler for the
289 * request.
290 */
291 static void fc_disc_recv_req(struct fc_seq *sp, struct fc_frame *fp,
292 struct fc_lport *lport)
293 {
294 u8 op;
295 struct fc_disc *disc = &lport->disc;
296
297 op = fc_frame_payload_op(fp);
298 switch (op) {
299 case ELS_RSCN:
300 mutex_lock(&disc->disc_mutex);
301 fc_disc_recv_rscn_req(sp, fp, disc);
302 mutex_unlock(&disc->disc_mutex);
303 break;
304 default:
305 FC_DBG("Received an unsupported request. opcode (%x)\n", op);
306 break;
307 }
308 }
309
310 /**
311 * fc_disc_restart() - Restart discovery
312 * @lport: FC discovery context
313 *
314 * Locking Note: This function expects that the disc mutex
315 * is already locked.
316 */
317 static void fc_disc_restart(struct fc_disc *disc)
318 {
319 struct fc_rport *rport;
320 struct fc_rport_libfc_priv *rdata, *next;
321 struct fc_lport *lport = disc->lport;
322
323 FC_DEBUG_DISC("Restarting discovery for port (%6x)\n",
324 fc_host_port_id(lport->host));
325
326 list_for_each_entry_safe(rdata, next, &disc->rports, peers) {
327 rport = PRIV_TO_RPORT(rdata);
328 FC_DEBUG_DISC("list_del(%6x)\n", rport->port_id);
329 list_del(&rdata->peers);
330 lport->tt.rport_logoff(rport);
331 }
332
333 disc->requested = 1;
334 if (!disc->pending)
335 fc_disc_gpn_ft_req(disc);
336 }
337
338 /**
339 * fc_disc_start() - Fibre Channel Target discovery
340 * @lport: FC local port
341 *
342 * Returns non-zero if discovery cannot be started.
343 */
344 static void fc_disc_start(void (*disc_callback)(struct fc_lport *,
345 enum fc_disc_event),
346 struct fc_lport *lport)
347 {
348 struct fc_rport *rport;
349 struct fc_rport_identifiers ids;
350 struct fc_disc *disc = &lport->disc;
351
352 /*
353 * At this point we may have a new disc job or an existing
354 * one. Either way, let's lock when we make changes to it
355 * and send the GPN_FT request.
356 */
357 mutex_lock(&disc->disc_mutex);
358
359 disc->disc_callback = disc_callback;
360
361 /*
362 * If not ready, or already running discovery, just set request flag.
363 */
364 disc->requested = 1;
365
366 if (disc->pending) {
367 mutex_unlock(&disc->disc_mutex);
368 return;
369 }
370
371 /*
372 * Handle point-to-point mode as a simple discovery
373 * of the remote port. Yucky, yucky, yuck, yuck!
374 */
375 rport = disc->lport->ptp_rp;
376 if (rport) {
377 ids.port_id = rport->port_id;
378 ids.port_name = rport->port_name;
379 ids.node_name = rport->node_name;
380 ids.roles = FC_RPORT_ROLE_UNKNOWN;
381 get_device(&rport->dev);
382
383 if (!fc_disc_new_target(disc, rport, &ids)) {
384 disc->event = DISC_EV_SUCCESS;
385 fc_disc_done(disc);
386 }
387 put_device(&rport->dev);
388 } else {
389 fc_disc_gpn_ft_req(disc); /* get ports by FC-4 type */
390 }
391
392 mutex_unlock(&disc->disc_mutex);
393 }
394
395 static struct fc_rport_operations fc_disc_rport_ops = {
396 .event_callback = fc_disc_rport_callback,
397 };
398
399 /**
400 * fc_disc_new_target() - Handle new target found by discovery
401 * @lport: FC local port
402 * @rport: The previous FC remote port (NULL if new remote port)
403 * @ids: Identifiers for the new FC remote port
404 *
405 * Locking Note: This function expects that the disc_mutex is locked
406 * before it is called.
407 */
408 static int fc_disc_new_target(struct fc_disc *disc,
409 struct fc_rport *rport,
410 struct fc_rport_identifiers *ids)
411 {
412 struct fc_lport *lport = disc->lport;
413 struct fc_rport_libfc_priv *rdata;
414 int error = 0;
415
416 if (rport && ids->port_name) {
417 if (rport->port_name == -1) {
418 /*
419 * Set WWN and fall through to notify of create.
420 */
421 fc_rport_set_name(rport, ids->port_name,
422 rport->node_name);
423 } else if (rport->port_name != ids->port_name) {
424 /*
425 * This is a new port with the same FCID as
426 * a previously-discovered port. Presumably the old
427 * port logged out and a new port logged in and was
428 * assigned the same FCID. This should be rare.
429 * Delete the old one and fall thru to re-create.
430 */
431 fc_disc_del_target(disc, rport);
432 rport = NULL;
433 }
434 }
435 if (((ids->port_name != -1) || (ids->port_id != -1)) &&
436 ids->port_id != fc_host_port_id(lport->host) &&
437 ids->port_name != lport->wwpn) {
438 if (!rport) {
439 rport = lport->tt.rport_lookup(lport, ids->port_id);
440 if (!rport) {
441 struct fc_disc_port dp;
442 dp.lp = lport;
443 dp.ids.port_id = ids->port_id;
444 dp.ids.port_name = ids->port_name;
445 dp.ids.node_name = ids->node_name;
446 dp.ids.roles = ids->roles;
447 rport = lport->tt.rport_create(&dp);
448 }
449 if (!rport)
450 error = -ENOMEM;
451 }
452 if (rport) {
453 rdata = rport->dd_data;
454 rdata->ops = &fc_disc_rport_ops;
455 rdata->rp_state = RPORT_ST_INIT;
456 list_add_tail(&rdata->peers, &disc->rogue_rports);
457 lport->tt.rport_login(rport);
458 }
459 }
460 return error;
461 }
462
463 /**
464 * fc_disc_del_target() - Delete a target
465 * @disc: FC discovery context
466 * @rport: The remote port to be removed
467 */
468 static void fc_disc_del_target(struct fc_disc *disc, struct fc_rport *rport)
469 {
470 struct fc_lport *lport = disc->lport;
471 struct fc_rport_libfc_priv *rdata = rport->dd_data;
472 list_del(&rdata->peers);
473 lport->tt.rport_logoff(rport);
474 }
475
476 /**
477 * fc_disc_done() - Discovery has been completed
478 * @disc: FC discovery context
479 * Locking Note: This function expects that the disc mutex is locked before
480 * it is called. The discovery callback is then made with the lock released,
481 * and the lock is re-taken before returning from this function
482 */
483 static void fc_disc_done(struct fc_disc *disc)
484 {
485 struct fc_lport *lport = disc->lport;
486 enum fc_disc_event event;
487
488 FC_DEBUG_DISC("Discovery complete for port (%6x)\n",
489 fc_host_port_id(lport->host));
490
491 event = disc->event;
492 disc->event = DISC_EV_NONE;
493
494 if (disc->requested)
495 fc_disc_gpn_ft_req(disc);
496 else
497 disc->pending = 0;
498
499 mutex_unlock(&disc->disc_mutex);
500 disc->disc_callback(lport, event);
501 mutex_lock(&disc->disc_mutex);
502 }
503
504 /**
505 * fc_disc_error() - Handle error on dNS request
506 * @disc: FC discovery context
507 * @fp: The frame pointer
508 */
509 static void fc_disc_error(struct fc_disc *disc, struct fc_frame *fp)
510 {
511 struct fc_lport *lport = disc->lport;
512 unsigned long delay = 0;
513 if (fc_disc_debug)
514 FC_DBG("Error %ld, retries %d/%d\n",
515 PTR_ERR(fp), disc->retry_count,
516 FC_DISC_RETRY_LIMIT);
517
518 if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) {
519 /*
520 * Memory allocation failure, or the exchange timed out,
521 * retry after delay.
522 */
523 if (disc->retry_count < FC_DISC_RETRY_LIMIT) {
524 /* go ahead and retry */
525 if (!fp)
526 delay = msecs_to_jiffies(FC_DISC_RETRY_DELAY);
527 else {
528 delay = msecs_to_jiffies(lport->e_d_tov);
529
530 /* timeout faster first time */
531 if (!disc->retry_count)
532 delay /= 4;
533 }
534 disc->retry_count++;
535 schedule_delayed_work(&disc->disc_work, delay);
536 } else {
537 /* exceeded retries */
538 disc->event = DISC_EV_FAILED;
539 fc_disc_done(disc);
540 }
541 }
542 }
543
544 /**
545 * fc_disc_gpn_ft_req() - Send Get Port Names by FC-4 type (GPN_FT) request
546 * @lport: FC discovery context
547 *
548 * Locking Note: This function expects that the disc_mutex is locked
549 * before it is called.
550 */
551 static void fc_disc_gpn_ft_req(struct fc_disc *disc)
552 {
553 struct fc_frame *fp;
554 struct fc_lport *lport = disc->lport;
555
556 WARN_ON(!fc_lport_test_ready(lport));
557
558 disc->pending = 1;
559 disc->requested = 0;
560
561 disc->buf_len = 0;
562 disc->seq_count = 0;
563 fp = fc_frame_alloc(lport,
564 sizeof(struct fc_ct_hdr) +
565 sizeof(struct fc_ns_gid_ft));
566 if (!fp)
567 goto err;
568
569 if (lport->tt.elsct_send(lport, NULL, fp,
570 FC_NS_GPN_FT,
571 fc_disc_gpn_ft_resp,
572 disc, lport->e_d_tov))
573 return;
574 err:
575 fc_disc_error(disc, fp);
576 }
577
578 /**
579 * fc_disc_gpn_ft_parse() - Parse the list of IDs and names resulting from a request
580 * @lport: Fibre Channel host port instance
581 * @buf: GPN_FT response buffer
582 * @len: size of response buffer
583 */
584 static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len)
585 {
586 struct fc_lport *lport;
587 struct fc_gpn_ft_resp *np;
588 char *bp;
589 size_t plen;
590 size_t tlen;
591 int error = 0;
592 struct fc_disc_port dp;
593 struct fc_rport *rport;
594 struct fc_rport_libfc_priv *rdata;
595
596 lport = disc->lport;
597
598 /*
599 * Handle partial name record left over from previous call.
600 */
601 bp = buf;
602 plen = len;
603 np = (struct fc_gpn_ft_resp *)bp;
604 tlen = disc->buf_len;
605 if (tlen) {
606 WARN_ON(tlen >= sizeof(*np));
607 plen = sizeof(*np) - tlen;
608 WARN_ON(plen <= 0);
609 WARN_ON(plen >= sizeof(*np));
610 if (plen > len)
611 plen = len;
612 np = &disc->partial_buf;
613 memcpy((char *)np + tlen, bp, plen);
614
615 /*
616 * Set bp so that the loop below will advance it to the
617 * first valid full name element.
618 */
619 bp -= tlen;
620 len += tlen;
621 plen += tlen;
622 disc->buf_len = (unsigned char) plen;
623 if (plen == sizeof(*np))
624 disc->buf_len = 0;
625 }
626
627 /*
628 * Handle full name records, including the one filled from above.
629 * Normally, np == bp and plen == len, but from the partial case above,
630 * bp, len describe the overall buffer, and np, plen describe the
631 * partial buffer, which if would usually be full now.
632 * After the first time through the loop, things return to "normal".
633 */
634 while (plen >= sizeof(*np)) {
635 dp.lp = lport;
636 dp.ids.port_id = ntoh24(np->fp_fid);
637 dp.ids.port_name = ntohll(np->fp_wwpn);
638 dp.ids.node_name = -1;
639 dp.ids.roles = FC_RPORT_ROLE_UNKNOWN;
640
641 if ((dp.ids.port_id != fc_host_port_id(lport->host)) &&
642 (dp.ids.port_name != lport->wwpn)) {
643 rport = lport->tt.rport_create(&dp);
644 if (rport) {
645 rdata = rport->dd_data;
646 rdata->ops = &fc_disc_rport_ops;
647 rdata->local_port = lport;
648 list_add_tail(&rdata->peers,
649 &disc->rogue_rports);
650 lport->tt.rport_login(rport);
651 } else
652 FC_DBG("Failed to allocate memory for "
653 "the newly discovered port (%6x)\n",
654 dp.ids.port_id);
655 }
656
657 if (np->fp_flags & FC_NS_FID_LAST) {
658 disc->event = DISC_EV_SUCCESS;
659 fc_disc_done(disc);
660 len = 0;
661 break;
662 }
663 len -= sizeof(*np);
664 bp += sizeof(*np);
665 np = (struct fc_gpn_ft_resp *)bp;
666 plen = len;
667 }
668
669 /*
670 * Save any partial record at the end of the buffer for next time.
671 */
672 if (error == 0 && len > 0 && len < sizeof(*np)) {
673 if (np != &disc->partial_buf) {
674 FC_DEBUG_DISC("Partial buffer remains "
675 "for discovery by (%6x)\n",
676 fc_host_port_id(lport->host));
677 memcpy(&disc->partial_buf, np, len);
678 }
679 disc->buf_len = (unsigned char) len;
680 } else {
681 disc->buf_len = 0;
682 }
683 return error;
684 }
685
686 /**
687 * fc_disc_timeout() - Retry handler for the disc component
688 * @work: Structure holding disc obj that needs retry discovery
689 *
690 * Handle retry of memory allocation for remote ports.
691 */
692 static void fc_disc_timeout(struct work_struct *work)
693 {
694 struct fc_disc *disc = container_of(work,
695 struct fc_disc,
696 disc_work.work);
697 mutex_lock(&disc->disc_mutex);
698 if (disc->requested && !disc->pending)
699 fc_disc_gpn_ft_req(disc);
700 mutex_unlock(&disc->disc_mutex);
701 }
702
703 /**
704 * fc_disc_gpn_ft_resp() - Handle a response frame from Get Port Names (GPN_FT)
705 * @sp: Current sequence of GPN_FT exchange
706 * @fp: response frame
707 * @lp_arg: Fibre Channel host port instance
708 *
709 * Locking Note: This function is called without disc mutex held, and
710 * should do all its processing with the mutex held
711 */
712 static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp,
713 void *disc_arg)
714 {
715 struct fc_disc *disc = disc_arg;
716 struct fc_ct_hdr *cp;
717 struct fc_frame_header *fh;
718 unsigned int seq_cnt;
719 void *buf = NULL;
720 unsigned int len;
721 int error;
722
723 mutex_lock(&disc->disc_mutex);
724 FC_DEBUG_DISC("Received a GPN_FT response on port (%6x)\n",
725 fc_host_port_id(disc->lport->host));
726
727 if (IS_ERR(fp)) {
728 fc_disc_error(disc, fp);
729 mutex_unlock(&disc->disc_mutex);
730 return;
731 }
732
733 WARN_ON(!fc_frame_is_linear(fp)); /* buffer must be contiguous */
734 fh = fc_frame_header_get(fp);
735 len = fr_len(fp) - sizeof(*fh);
736 seq_cnt = ntohs(fh->fh_seq_cnt);
737 if (fr_sof(fp) == FC_SOF_I3 && seq_cnt == 0 &&
738 disc->seq_count == 0) {
739 cp = fc_frame_payload_get(fp, sizeof(*cp));
740 if (!cp) {
741 FC_DBG("GPN_FT response too short, len %d\n",
742 fr_len(fp));
743 } else if (ntohs(cp->ct_cmd) == FC_FS_ACC) {
744
745 /* Accepted, parse the response. */
746 buf = cp + 1;
747 len -= sizeof(*cp);
748 } else if (ntohs(cp->ct_cmd) == FC_FS_RJT) {
749 FC_DBG("GPN_FT rejected reason %x exp %x "
750 "(check zoning)\n", cp->ct_reason,
751 cp->ct_explan);
752 disc->event = DISC_EV_FAILED;
753 fc_disc_done(disc);
754 } else {
755 FC_DBG("GPN_FT unexpected response code %x\n",
756 ntohs(cp->ct_cmd));
757 }
758 } else if (fr_sof(fp) == FC_SOF_N3 &&
759 seq_cnt == disc->seq_count) {
760 buf = fh + 1;
761 } else {
762 FC_DBG("GPN_FT unexpected frame - out of sequence? "
763 "seq_cnt %x expected %x sof %x eof %x\n",
764 seq_cnt, disc->seq_count, fr_sof(fp), fr_eof(fp));
765 }
766 if (buf) {
767 error = fc_disc_gpn_ft_parse(disc, buf, len);
768 if (error)
769 fc_disc_error(disc, fp);
770 else
771 disc->seq_count++;
772 }
773 fc_frame_free(fp);
774
775 mutex_unlock(&disc->disc_mutex);
776 }
777
778 /**
779 * fc_disc_single() - Discover the directory information for a single target
780 * @lport: FC local port
781 * @dp: The port to rediscover
782 *
783 * Locking Note: This function expects that the disc_mutex is locked
784 * before it is called.
785 */
786 static void fc_disc_single(struct fc_disc *disc, struct fc_disc_port *dp)
787 {
788 struct fc_lport *lport;
789 struct fc_rport *new_rport;
790 struct fc_rport_libfc_priv *rdata;
791
792 lport = disc->lport;
793
794 if (dp->ids.port_id == fc_host_port_id(lport->host))
795 goto out;
796
797 new_rport = lport->tt.rport_create(dp);
798 if (new_rport) {
799 rdata = new_rport->dd_data;
800 rdata->ops = &fc_disc_rport_ops;
801 kfree(dp);
802 list_add_tail(&rdata->peers, &disc->rogue_rports);
803 lport->tt.rport_login(new_rport);
804 }
805 return;
806 out:
807 kfree(dp);
808 }
809
810 /**
811 * fc_disc_stop() - Stop discovery for a given lport
812 * @lport: The lport that discovery should stop for
813 */
814 void fc_disc_stop(struct fc_lport *lport)
815 {
816 struct fc_disc *disc = &lport->disc;
817
818 if (disc) {
819 cancel_delayed_work_sync(&disc->disc_work);
820 fc_disc_stop_rports(disc);
821 }
822 }
823
824 /**
825 * fc_disc_stop_final() - Stop discovery for a given lport
826 * @lport: The lport that discovery should stop for
827 *
828 * This function will block until discovery has been
829 * completely stopped and all rports have been deleted.
830 */
831 void fc_disc_stop_final(struct fc_lport *lport)
832 {
833 fc_disc_stop(lport);
834 lport->tt.rport_flush_queue();
835 }
836
837 /**
838 * fc_disc_init() - Initialize the discovery block
839 * @lport: FC local port
840 */
841 int fc_disc_init(struct fc_lport *lport)
842 {
843 struct fc_disc *disc;
844
845 if (!lport->tt.disc_start)
846 lport->tt.disc_start = fc_disc_start;
847
848 if (!lport->tt.disc_stop)
849 lport->tt.disc_stop = fc_disc_stop;
850
851 if (!lport->tt.disc_stop_final)
852 lport->tt.disc_stop_final = fc_disc_stop_final;
853
854 if (!lport->tt.disc_recv_req)
855 lport->tt.disc_recv_req = fc_disc_recv_req;
856
857 if (!lport->tt.rport_lookup)
858 lport->tt.rport_lookup = fc_disc_lookup_rport;
859
860 disc = &lport->disc;
861 INIT_DELAYED_WORK(&disc->disc_work, fc_disc_timeout);
862 mutex_init(&disc->disc_mutex);
863 INIT_LIST_HEAD(&disc->rports);
864 INIT_LIST_HEAD(&disc->rogue_rports);
865
866 disc->lport = lport;
867 disc->delay = FC_DISC_DELAY;
868 disc->event = DISC_EV_NONE;
869
870 return 0;
871 }
872 EXPORT_SYMBOL(fc_disc_init);
This page took 0.050982 seconds and 6 git commands to generate.