1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
22 #include <linux/blkdev.h>
23 #include <linux/pci.h>
24 #include <linux/kthread.h>
25 #include <linux/interrupt.h>
27 #include <scsi/scsi.h>
28 #include <scsi/scsi_device.h>
29 #include <scsi/scsi_host.h>
30 #include <scsi/scsi_transport_fc.h>
35 #include "lpfc_disc.h"
37 #include "lpfc_sli4.h"
38 #include "lpfc_scsi.h"
40 #include "lpfc_logmsg.h"
41 #include "lpfc_crtn.h"
42 #include "lpfc_vport.h"
43 #include "lpfc_debugfs.h"
45 /* AlpaArray for assignment of scsid for scan-down and bind_method */
46 static uint8_t lpfcAlpaArray
[] = {
47 0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6,
48 0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA,
49 0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5,
50 0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9,
51 0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97,
52 0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79,
53 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B,
54 0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56,
55 0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A,
56 0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35,
57 0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29,
58 0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17,
59 0x10, 0x0F, 0x08, 0x04, 0x02, 0x01
62 static void lpfc_disc_timeout_handler(struct lpfc_vport
*);
63 static void lpfc_disc_flush_list(struct lpfc_vport
*vport
);
66 lpfc_terminate_rport_io(struct fc_rport
*rport
)
68 struct lpfc_rport_data
*rdata
;
69 struct lpfc_nodelist
* ndlp
;
70 struct lpfc_hba
*phba
;
72 rdata
= rport
->dd_data
;
75 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
)) {
76 if (rport
->roles
& FC_RPORT_ROLE_FCP_TARGET
)
77 printk(KERN_ERR
"Cannot find remote node"
78 " to terminate I/O Data x%x\n",
85 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_RPORT
,
86 "rport terminate: sid:x%x did:x%x flg:x%x",
87 ndlp
->nlp_sid
, ndlp
->nlp_DID
, ndlp
->nlp_flag
);
89 if (ndlp
->nlp_sid
!= NLP_NO_SID
) {
90 lpfc_sli_abort_iocb(ndlp
->vport
,
91 &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
92 ndlp
->nlp_sid
, 0, LPFC_CTX_TGT
);
97 * This function will be called when dev_loss_tmo fire.
100 lpfc_dev_loss_tmo_callbk(struct fc_rport
*rport
)
102 struct lpfc_rport_data
*rdata
;
103 struct lpfc_nodelist
* ndlp
;
104 struct lpfc_vport
*vport
;
105 struct lpfc_hba
*phba
;
106 struct lpfc_work_evt
*evtp
;
110 rdata
= rport
->dd_data
;
112 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
))
118 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_RPORT
,
119 "rport devlosscb: sid:x%x did:x%x flg:x%x",
120 ndlp
->nlp_sid
, ndlp
->nlp_DID
, ndlp
->nlp_flag
);
122 /* Don't defer this if we are in the process of deleting the vport
123 * or unloading the driver. The unload will cleanup the node
124 * appropriately we just need to cleanup the ndlp rport info here.
126 if (vport
->load_flag
& FC_UNLOADING
) {
127 put_node
= rdata
->pnode
!= NULL
;
128 put_rport
= ndlp
->rport
!= NULL
;
134 put_device(&rport
->dev
);
138 if (ndlp
->nlp_state
== NLP_STE_MAPPED_NODE
)
141 evtp
= &ndlp
->dev_loss_evt
;
143 if (!list_empty(&evtp
->evt_listp
))
146 spin_lock_irq(&phba
->hbalock
);
147 /* We need to hold the node by incrementing the reference
148 * count until this queued work is done
150 evtp
->evt_arg1
= lpfc_nlp_get(ndlp
);
151 if (evtp
->evt_arg1
) {
152 evtp
->evt
= LPFC_EVT_DEV_LOSS
;
153 list_add_tail(&evtp
->evt_listp
, &phba
->work_list
);
154 lpfc_worker_wake_up(phba
);
156 spin_unlock_irq(&phba
->hbalock
);
162 * This function is called from the worker thread when dev_loss_tmo
166 lpfc_dev_loss_tmo_handler(struct lpfc_nodelist
*ndlp
)
168 struct lpfc_rport_data
*rdata
;
169 struct fc_rport
*rport
;
170 struct lpfc_vport
*vport
;
171 struct lpfc_hba
*phba
;
182 rdata
= rport
->dd_data
;
183 name
= (uint8_t *) &ndlp
->nlp_portname
;
187 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_RPORT
,
188 "rport devlosstmo:did:x%x type:x%x id:x%x",
189 ndlp
->nlp_DID
, ndlp
->nlp_type
, rport
->scsi_target_id
);
191 /* Don't defer this if we are in the process of deleting the vport
192 * or unloading the driver. The unload will cleanup the node
193 * appropriately we just need to cleanup the ndlp rport info here.
195 if (vport
->load_flag
& FC_UNLOADING
) {
196 if (ndlp
->nlp_sid
!= NLP_NO_SID
) {
197 /* flush the target */
198 lpfc_sli_abort_iocb(vport
,
199 &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
200 ndlp
->nlp_sid
, 0, LPFC_CTX_TGT
);
202 put_node
= rdata
->pnode
!= NULL
;
203 put_rport
= ndlp
->rport
!= NULL
;
209 put_device(&rport
->dev
);
213 if (ndlp
->nlp_state
== NLP_STE_MAPPED_NODE
) {
214 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
215 "0284 Devloss timeout Ignored on "
216 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
218 *name
, *(name
+1), *(name
+2), *(name
+3),
219 *(name
+4), *(name
+5), *(name
+6), *(name
+7),
224 if (ndlp
->nlp_type
& NLP_FABRIC
) {
225 /* We will clean up these Nodes in linkup */
226 put_node
= rdata
->pnode
!= NULL
;
227 put_rport
= ndlp
->rport
!= NULL
;
233 put_device(&rport
->dev
);
237 if (ndlp
->nlp_sid
!= NLP_NO_SID
) {
239 /* flush the target */
240 lpfc_sli_abort_iocb(vport
, &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
241 ndlp
->nlp_sid
, 0, LPFC_CTX_TGT
);
245 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
246 "0203 Devloss timeout on "
247 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
248 "NPort x%06x Data: x%x x%x x%x\n",
249 *name
, *(name
+1), *(name
+2), *(name
+3),
250 *(name
+4), *(name
+5), *(name
+6), *(name
+7),
251 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
252 ndlp
->nlp_state
, ndlp
->nlp_rpi
);
254 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
255 "0204 Devloss timeout on "
256 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
257 "NPort x%06x Data: x%x x%x x%x\n",
258 *name
, *(name
+1), *(name
+2), *(name
+3),
259 *(name
+4), *(name
+5), *(name
+6), *(name
+7),
260 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
261 ndlp
->nlp_state
, ndlp
->nlp_rpi
);
264 put_node
= rdata
->pnode
!= NULL
;
265 put_rport
= ndlp
->rport
!= NULL
;
271 put_device(&rport
->dev
);
273 if (!(vport
->load_flag
& FC_UNLOADING
) &&
274 !(ndlp
->nlp_flag
& NLP_DELAY_TMO
) &&
275 !(ndlp
->nlp_flag
& NLP_NPR_2B_DISC
) &&
276 (ndlp
->nlp_state
!= NLP_STE_UNMAPPED_NODE
))
277 lpfc_disc_state_machine(vport
, ndlp
, NULL
, NLP_EVT_DEVICE_RM
);
279 lpfc_unregister_unused_fcf(phba
);
283 * lpfc_alloc_fast_evt - Allocates data structure for posting event
284 * @phba: Pointer to hba context object.
286 * This function is called from the functions which need to post
287 * events from interrupt context. This function allocates data
288 * structure required for posting event. It also keeps track of
289 * number of events pending and prevent event storm when there are
292 struct lpfc_fast_path_event
*
293 lpfc_alloc_fast_evt(struct lpfc_hba
*phba
) {
294 struct lpfc_fast_path_event
*ret
;
296 /* If there are lot of fast event do not exhaust memory due to this */
297 if (atomic_read(&phba
->fast_event_count
) > LPFC_MAX_EVT_COUNT
)
300 ret
= kzalloc(sizeof(struct lpfc_fast_path_event
),
303 atomic_inc(&phba
->fast_event_count
);
304 INIT_LIST_HEAD(&ret
->work_evt
.evt_listp
);
305 ret
->work_evt
.evt
= LPFC_EVT_FASTPATH_MGMT_EVT
;
311 * lpfc_free_fast_evt - Frees event data structure
312 * @phba: Pointer to hba context object.
313 * @evt: Event object which need to be freed.
315 * This function frees the data structure required for posting
319 lpfc_free_fast_evt(struct lpfc_hba
*phba
,
320 struct lpfc_fast_path_event
*evt
) {
322 atomic_dec(&phba
->fast_event_count
);
327 * lpfc_send_fastpath_evt - Posts events generated from fast path
328 * @phba: Pointer to hba context object.
329 * @evtp: Event data structure.
331 * This function is called from worker thread, when the interrupt
332 * context need to post an event. This function posts the event
333 * to fc transport netlink interface.
336 lpfc_send_fastpath_evt(struct lpfc_hba
*phba
,
337 struct lpfc_work_evt
*evtp
)
339 unsigned long evt_category
, evt_sub_category
;
340 struct lpfc_fast_path_event
*fast_evt_data
;
342 uint32_t evt_data_size
;
343 struct Scsi_Host
*shost
;
345 fast_evt_data
= container_of(evtp
, struct lpfc_fast_path_event
,
348 evt_category
= (unsigned long) fast_evt_data
->un
.fabric_evt
.event_type
;
349 evt_sub_category
= (unsigned long) fast_evt_data
->un
.
350 fabric_evt
.subcategory
;
351 shost
= lpfc_shost_from_vport(fast_evt_data
->vport
);
352 if (evt_category
== FC_REG_FABRIC_EVENT
) {
353 if (evt_sub_category
== LPFC_EVENT_FCPRDCHKERR
) {
354 evt_data
= (char *) &fast_evt_data
->un
.read_check_error
;
355 evt_data_size
= sizeof(fast_evt_data
->un
.
357 } else if ((evt_sub_category
== LPFC_EVENT_FABRIC_BUSY
) ||
358 (evt_sub_category
== LPFC_EVENT_PORT_BUSY
)) {
359 evt_data
= (char *) &fast_evt_data
->un
.fabric_evt
;
360 evt_data_size
= sizeof(fast_evt_data
->un
.fabric_evt
);
362 lpfc_free_fast_evt(phba
, fast_evt_data
);
365 } else if (evt_category
== FC_REG_SCSI_EVENT
) {
366 switch (evt_sub_category
) {
367 case LPFC_EVENT_QFULL
:
368 case LPFC_EVENT_DEVBSY
:
369 evt_data
= (char *) &fast_evt_data
->un
.scsi_evt
;
370 evt_data_size
= sizeof(fast_evt_data
->un
.scsi_evt
);
372 case LPFC_EVENT_CHECK_COND
:
373 evt_data
= (char *) &fast_evt_data
->un
.check_cond_evt
;
374 evt_data_size
= sizeof(fast_evt_data
->un
.
377 case LPFC_EVENT_VARQUEDEPTH
:
378 evt_data
= (char *) &fast_evt_data
->un
.queue_depth_evt
;
379 evt_data_size
= sizeof(fast_evt_data
->un
.
383 lpfc_free_fast_evt(phba
, fast_evt_data
);
387 lpfc_free_fast_evt(phba
, fast_evt_data
);
391 fc_host_post_vendor_event(shost
,
392 fc_get_event_number(),
397 lpfc_free_fast_evt(phba
, fast_evt_data
);
402 lpfc_work_list_done(struct lpfc_hba
*phba
)
404 struct lpfc_work_evt
*evtp
= NULL
;
405 struct lpfc_nodelist
*ndlp
;
408 spin_lock_irq(&phba
->hbalock
);
409 while (!list_empty(&phba
->work_list
)) {
410 list_remove_head((&phba
->work_list
), evtp
, typeof(*evtp
),
412 spin_unlock_irq(&phba
->hbalock
);
415 case LPFC_EVT_ELS_RETRY
:
416 ndlp
= (struct lpfc_nodelist
*) (evtp
->evt_arg1
);
417 lpfc_els_retry_delay_handler(ndlp
);
418 free_evt
= 0; /* evt is part of ndlp */
419 /* decrement the node reference count held
420 * for this queued work
424 case LPFC_EVT_DEV_LOSS
:
425 ndlp
= (struct lpfc_nodelist
*)(evtp
->evt_arg1
);
426 lpfc_dev_loss_tmo_handler(ndlp
);
428 /* decrement the node reference count held for
433 case LPFC_EVT_ONLINE
:
434 if (phba
->link_state
< LPFC_LINK_DOWN
)
435 *(int *) (evtp
->evt_arg1
) = lpfc_online(phba
);
437 *(int *) (evtp
->evt_arg1
) = 0;
438 complete((struct completion
*)(evtp
->evt_arg2
));
440 case LPFC_EVT_OFFLINE_PREP
:
441 if (phba
->link_state
>= LPFC_LINK_DOWN
)
442 lpfc_offline_prep(phba
);
443 *(int *)(evtp
->evt_arg1
) = 0;
444 complete((struct completion
*)(evtp
->evt_arg2
));
446 case LPFC_EVT_OFFLINE
:
448 lpfc_sli_brdrestart(phba
);
449 *(int *)(evtp
->evt_arg1
) =
450 lpfc_sli_brdready(phba
, HS_FFRDY
| HS_MBRDY
);
451 lpfc_unblock_mgmt_io(phba
);
452 complete((struct completion
*)(evtp
->evt_arg2
));
454 case LPFC_EVT_WARM_START
:
456 lpfc_reset_barrier(phba
);
457 lpfc_sli_brdreset(phba
);
458 lpfc_hba_down_post(phba
);
459 *(int *)(evtp
->evt_arg1
) =
460 lpfc_sli_brdready(phba
, HS_MBRDY
);
461 lpfc_unblock_mgmt_io(phba
);
462 complete((struct completion
*)(evtp
->evt_arg2
));
466 *(int *)(evtp
->evt_arg1
)
467 = (phba
->pport
->stopped
)
468 ? 0 : lpfc_sli_brdkill(phba
);
469 lpfc_unblock_mgmt_io(phba
);
470 complete((struct completion
*)(evtp
->evt_arg2
));
472 case LPFC_EVT_FASTPATH_MGMT_EVT
:
473 lpfc_send_fastpath_evt(phba
, evtp
);
479 spin_lock_irq(&phba
->hbalock
);
481 spin_unlock_irq(&phba
->hbalock
);
486 lpfc_work_done(struct lpfc_hba
*phba
)
488 struct lpfc_sli_ring
*pring
;
489 uint32_t ha_copy
, status
, control
, work_port_events
;
490 struct lpfc_vport
**vports
;
491 struct lpfc_vport
*vport
;
494 spin_lock_irq(&phba
->hbalock
);
495 ha_copy
= phba
->work_ha
;
497 spin_unlock_irq(&phba
->hbalock
);
499 /* First, try to post the next mailbox command to SLI4 device */
500 if (phba
->pci_dev_grp
== LPFC_PCI_DEV_OC
)
501 lpfc_sli4_post_async_mbox(phba
);
503 if (ha_copy
& HA_ERATT
)
504 /* Handle the error attention event */
505 lpfc_handle_eratt(phba
);
507 if (ha_copy
& HA_MBATT
)
508 lpfc_sli_handle_mb_event(phba
);
510 if (ha_copy
& HA_LATT
)
511 lpfc_handle_latt(phba
);
513 /* Process SLI4 events */
514 if (phba
->pci_dev_grp
== LPFC_PCI_DEV_OC
) {
515 if (phba
->hba_flag
& FCP_XRI_ABORT_EVENT
)
516 lpfc_sli4_fcp_xri_abort_event_proc(phba
);
517 if (phba
->hba_flag
& ELS_XRI_ABORT_EVENT
)
518 lpfc_sli4_els_xri_abort_event_proc(phba
);
519 if (phba
->hba_flag
& ASYNC_EVENT
)
520 lpfc_sli4_async_event_proc(phba
);
521 if (phba
->hba_flag
& HBA_POST_RECEIVE_BUFFER
) {
522 spin_lock_irq(&phba
->hbalock
);
523 phba
->hba_flag
&= ~HBA_POST_RECEIVE_BUFFER
;
524 spin_unlock_irq(&phba
->hbalock
);
525 lpfc_sli_hbqbuf_add_hbqs(phba
, LPFC_ELS_HBQ
);
527 if (phba
->hba_flag
& HBA_RECEIVE_BUFFER
)
528 lpfc_sli4_handle_received_buffer(phba
);
531 vports
= lpfc_create_vport_work_array(phba
);
533 for (i
= 0; i
<= phba
->max_vports
; i
++) {
535 * We could have no vports in array if unloading, so if
536 * this happens then just use the pport
538 if (vports
[i
] == NULL
&& i
== 0)
544 spin_lock_irq(&vport
->work_port_lock
);
545 work_port_events
= vport
->work_port_events
;
546 vport
->work_port_events
&= ~work_port_events
;
547 spin_unlock_irq(&vport
->work_port_lock
);
548 if (work_port_events
& WORKER_DISC_TMO
)
549 lpfc_disc_timeout_handler(vport
);
550 if (work_port_events
& WORKER_ELS_TMO
)
551 lpfc_els_timeout_handler(vport
);
552 if (work_port_events
& WORKER_HB_TMO
)
553 lpfc_hb_timeout_handler(phba
);
554 if (work_port_events
& WORKER_MBOX_TMO
)
555 lpfc_mbox_timeout_handler(phba
);
556 if (work_port_events
& WORKER_FABRIC_BLOCK_TMO
)
557 lpfc_unblock_fabric_iocbs(phba
);
558 if (work_port_events
& WORKER_FDMI_TMO
)
559 lpfc_fdmi_timeout_handler(vport
);
560 if (work_port_events
& WORKER_RAMP_DOWN_QUEUE
)
561 lpfc_ramp_down_queue_handler(phba
);
562 if (work_port_events
& WORKER_RAMP_UP_QUEUE
)
563 lpfc_ramp_up_queue_handler(phba
);
565 lpfc_destroy_vport_work_array(phba
, vports
);
567 pring
= &phba
->sli
.ring
[LPFC_ELS_RING
];
568 status
= (ha_copy
& (HA_RXMASK
<< (4*LPFC_ELS_RING
)));
569 status
>>= (4*LPFC_ELS_RING
);
570 if ((status
& HA_RXMASK
)
571 || (pring
->flag
& LPFC_DEFERRED_RING_EVENT
)) {
572 if (pring
->flag
& LPFC_STOP_IOCB_EVENT
) {
573 pring
->flag
|= LPFC_DEFERRED_RING_EVENT
;
574 /* Set the lpfc data pending flag */
575 set_bit(LPFC_DATA_READY
, &phba
->data_flags
);
577 pring
->flag
&= ~LPFC_DEFERRED_RING_EVENT
;
578 lpfc_sli_handle_slow_ring_event(phba
, pring
,
583 * Turn on Ring interrupts
585 if (phba
->sli_rev
<= LPFC_SLI_REV3
) {
586 spin_lock_irq(&phba
->hbalock
);
587 control
= readl(phba
->HCregaddr
);
588 if (!(control
& (HC_R0INT_ENA
<< LPFC_ELS_RING
))) {
589 lpfc_debugfs_slow_ring_trc(phba
,
590 "WRK Enable ring: cntl:x%x hacopy:x%x",
591 control
, ha_copy
, 0);
593 control
|= (HC_R0INT_ENA
<< LPFC_ELS_RING
);
594 writel(control
, phba
->HCregaddr
);
595 readl(phba
->HCregaddr
); /* flush */
597 lpfc_debugfs_slow_ring_trc(phba
,
598 "WRK Ring ok: cntl:x%x hacopy:x%x",
599 control
, ha_copy
, 0);
601 spin_unlock_irq(&phba
->hbalock
);
604 lpfc_work_list_done(phba
);
608 lpfc_do_work(void *p
)
610 struct lpfc_hba
*phba
= p
;
613 set_user_nice(current
, -20);
614 phba
->data_flags
= 0;
616 while (!kthread_should_stop()) {
617 /* wait and check worker queue activities */
618 rc
= wait_event_interruptible(phba
->work_waitq
,
619 (test_and_clear_bit(LPFC_DATA_READY
,
621 || kthread_should_stop()));
622 /* Signal wakeup shall terminate the worker thread */
624 lpfc_printf_log(phba
, KERN_ERR
, LOG_ELS
,
625 "0433 Wakeup on signal: rc=x%x\n", rc
);
629 /* Attend pending lpfc data processing */
630 lpfc_work_done(phba
);
632 phba
->worker_thread
= NULL
;
633 lpfc_printf_log(phba
, KERN_INFO
, LOG_ELS
,
634 "0432 Worker thread stopped.\n");
639 * This is only called to handle FC worker events. Since this a rare
640 * occurance, we allocate a struct lpfc_work_evt structure here instead of
641 * embedding it in the IOCB.
644 lpfc_workq_post_event(struct lpfc_hba
*phba
, void *arg1
, void *arg2
,
647 struct lpfc_work_evt
*evtp
;
651 * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will
652 * be queued to worker thread for processing
654 evtp
= kmalloc(sizeof(struct lpfc_work_evt
), GFP_ATOMIC
);
658 evtp
->evt_arg1
= arg1
;
659 evtp
->evt_arg2
= arg2
;
662 spin_lock_irqsave(&phba
->hbalock
, flags
);
663 list_add_tail(&evtp
->evt_listp
, &phba
->work_list
);
664 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
666 lpfc_worker_wake_up(phba
);
672 lpfc_cleanup_rpis(struct lpfc_vport
*vport
, int remove
)
674 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
675 struct lpfc_hba
*phba
= vport
->phba
;
676 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
679 list_for_each_entry_safe(ndlp
, next_ndlp
, &vport
->fc_nodes
, nlp_listp
) {
680 if (!NLP_CHK_NODE_ACT(ndlp
))
682 if (ndlp
->nlp_state
== NLP_STE_UNUSED_NODE
)
684 if ((phba
->sli3_options
& LPFC_SLI3_VPORT_TEARDOWN
) ||
685 ((vport
->port_type
== LPFC_NPIV_PORT
) &&
686 (ndlp
->nlp_DID
== NameServer_DID
)))
687 lpfc_unreg_rpi(vport
, ndlp
);
689 /* Leave Fabric nodes alone on link down */
690 if (!remove
&& ndlp
->nlp_type
& NLP_FABRIC
)
692 rc
= lpfc_disc_state_machine(vport
, ndlp
, NULL
,
695 : NLP_EVT_DEVICE_RECOVERY
);
697 if (phba
->sli3_options
& LPFC_SLI3_VPORT_TEARDOWN
) {
698 lpfc_mbx_unreg_vpi(vport
);
699 spin_lock_irq(shost
->host_lock
);
700 vport
->fc_flag
|= FC_VPORT_NEEDS_REG_VPI
;
701 spin_unlock_irq(shost
->host_lock
);
706 lpfc_port_link_failure(struct lpfc_vport
*vport
)
708 /* Cleanup any outstanding RSCN activity */
709 lpfc_els_flush_rscn(vport
);
711 /* Cleanup any outstanding ELS commands */
712 lpfc_els_flush_cmd(vport
);
714 lpfc_cleanup_rpis(vport
, 0);
716 /* Turn off discovery timer if its running */
717 lpfc_can_disctmo(vport
);
721 lpfc_linkdown_port(struct lpfc_vport
*vport
)
723 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
725 fc_host_post_event(shost
, fc_get_event_number(), FCH_EVT_LINKDOWN
, 0);
727 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
728 "Link Down: state:x%x rtry:x%x flg:x%x",
729 vport
->port_state
, vport
->fc_ns_retry
, vport
->fc_flag
);
731 lpfc_port_link_failure(vport
);
736 lpfc_linkdown(struct lpfc_hba
*phba
)
738 struct lpfc_vport
*vport
= phba
->pport
;
739 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
740 struct lpfc_vport
**vports
;
744 if (phba
->link_state
== LPFC_LINK_DOWN
)
746 spin_lock_irq(&phba
->hbalock
);
747 phba
->fcf
.fcf_flag
&= ~(FCF_AVAILABLE
| FCF_DISCOVERED
);
748 if (phba
->link_state
> LPFC_LINK_DOWN
) {
749 phba
->link_state
= LPFC_LINK_DOWN
;
750 phba
->pport
->fc_flag
&= ~FC_LBIT
;
752 spin_unlock_irq(&phba
->hbalock
);
753 vports
= lpfc_create_vport_work_array(phba
);
755 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
756 /* Issue a LINK DOWN event to all nodes */
757 lpfc_linkdown_port(vports
[i
]);
759 lpfc_destroy_vport_work_array(phba
, vports
);
760 /* Clean up any firmware default rpi's */
761 mb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
763 lpfc_unreg_did(phba
, 0xffff, 0xffffffff, mb
);
765 mb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
766 if (lpfc_sli_issue_mbox(phba
, mb
, MBX_NOWAIT
)
767 == MBX_NOT_FINISHED
) {
768 mempool_free(mb
, phba
->mbox_mem_pool
);
772 /* Setup myDID for link up if we are in pt2pt mode */
773 if (phba
->pport
->fc_flag
& FC_PT2PT
) {
774 phba
->pport
->fc_myDID
= 0;
775 mb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
777 lpfc_config_link(phba
, mb
);
778 mb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
780 if (lpfc_sli_issue_mbox(phba
, mb
, MBX_NOWAIT
)
781 == MBX_NOT_FINISHED
) {
782 mempool_free(mb
, phba
->mbox_mem_pool
);
785 spin_lock_irq(shost
->host_lock
);
786 phba
->pport
->fc_flag
&= ~(FC_PT2PT
| FC_PT2PT_PLOGI
);
787 spin_unlock_irq(shost
->host_lock
);
794 lpfc_linkup_cleanup_nodes(struct lpfc_vport
*vport
)
796 struct lpfc_nodelist
*ndlp
;
798 list_for_each_entry(ndlp
, &vport
->fc_nodes
, nlp_listp
) {
799 if (!NLP_CHK_NODE_ACT(ndlp
))
801 if (ndlp
->nlp_state
== NLP_STE_UNUSED_NODE
)
803 if (ndlp
->nlp_type
& NLP_FABRIC
) {
804 /* On Linkup its safe to clean up the ndlp
805 * from Fabric connections.
807 if (ndlp
->nlp_DID
!= Fabric_DID
)
808 lpfc_unreg_rpi(vport
, ndlp
);
809 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_NPR_NODE
);
810 } else if (!(ndlp
->nlp_flag
& NLP_NPR_ADISC
)) {
811 /* Fail outstanding IO now since device is
814 lpfc_unreg_rpi(vport
, ndlp
);
820 lpfc_linkup_port(struct lpfc_vport
*vport
)
822 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
823 struct lpfc_hba
*phba
= vport
->phba
;
825 if ((vport
->load_flag
& FC_UNLOADING
) != 0)
828 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
829 "Link Up: top:x%x speed:x%x flg:x%x",
830 phba
->fc_topology
, phba
->fc_linkspeed
, phba
->link_flag
);
832 /* If NPIV is not enabled, only bring the physical port up */
833 if (!(phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
) &&
834 (vport
!= phba
->pport
))
837 fc_host_post_event(shost
, fc_get_event_number(), FCH_EVT_LINKUP
, 0);
839 spin_lock_irq(shost
->host_lock
);
840 vport
->fc_flag
&= ~(FC_PT2PT
| FC_PT2PT_PLOGI
| FC_ABORT_DISCOVERY
|
841 FC_RSCN_MODE
| FC_NLP_MORE
| FC_RSCN_DISCOVERY
);
842 vport
->fc_flag
|= FC_NDISC_ACTIVE
;
843 vport
->fc_ns_retry
= 0;
844 spin_unlock_irq(shost
->host_lock
);
846 if (vport
->fc_flag
& FC_LBIT
)
847 lpfc_linkup_cleanup_nodes(vport
);
852 lpfc_linkup(struct lpfc_hba
*phba
)
854 struct lpfc_vport
**vports
;
857 phba
->link_state
= LPFC_LINK_UP
;
859 /* Unblock fabric iocbs if they are blocked */
860 clear_bit(FABRIC_COMANDS_BLOCKED
, &phba
->bit_flags
);
861 del_timer_sync(&phba
->fabric_block_timer
);
863 vports
= lpfc_create_vport_work_array(phba
);
865 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++)
866 lpfc_linkup_port(vports
[i
]);
867 lpfc_destroy_vport_work_array(phba
, vports
);
868 if ((phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
) &&
869 (phba
->sli_rev
< LPFC_SLI_REV4
))
870 lpfc_issue_clear_la(phba
, phba
->pport
);
876 * This routine handles processing a CLEAR_LA mailbox
877 * command upon completion. It is setup in the LPFC_MBOXQ
878 * as the completion routine when the command is
879 * handed off to the SLI layer.
882 lpfc_mbx_cmpl_clear_la(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
884 struct lpfc_vport
*vport
= pmb
->vport
;
885 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
886 struct lpfc_sli
*psli
= &phba
->sli
;
887 MAILBOX_t
*mb
= &pmb
->u
.mb
;
890 /* Since we don't do discovery right now, turn these off here */
891 psli
->ring
[psli
->extra_ring
].flag
&= ~LPFC_STOP_IOCB_EVENT
;
892 psli
->ring
[psli
->fcp_ring
].flag
&= ~LPFC_STOP_IOCB_EVENT
;
893 psli
->ring
[psli
->next_ring
].flag
&= ~LPFC_STOP_IOCB_EVENT
;
895 /* Check for error */
896 if ((mb
->mbxStatus
) && (mb
->mbxStatus
!= 0x1601)) {
897 /* CLEAR_LA mbox error <mbxStatus> state <hba_state> */
898 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
899 "0320 CLEAR_LA mbxStatus error x%x hba "
901 mb
->mbxStatus
, vport
->port_state
);
902 phba
->link_state
= LPFC_HBA_ERROR
;
906 if (vport
->port_type
== LPFC_PHYSICAL_PORT
)
907 phba
->link_state
= LPFC_HBA_READY
;
909 spin_lock_irq(&phba
->hbalock
);
910 psli
->sli_flag
|= LPFC_PROCESS_LA
;
911 control
= readl(phba
->HCregaddr
);
912 control
|= HC_LAINT_ENA
;
913 writel(control
, phba
->HCregaddr
);
914 readl(phba
->HCregaddr
); /* flush */
915 spin_unlock_irq(&phba
->hbalock
);
916 mempool_free(pmb
, phba
->mbox_mem_pool
);
920 /* Device Discovery completes */
921 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
922 "0225 Device Discovery completes\n");
923 mempool_free(pmb
, phba
->mbox_mem_pool
);
925 spin_lock_irq(shost
->host_lock
);
926 vport
->fc_flag
&= ~FC_ABORT_DISCOVERY
;
927 spin_unlock_irq(shost
->host_lock
);
929 lpfc_can_disctmo(vport
);
931 /* turn on Link Attention interrupts */
933 spin_lock_irq(&phba
->hbalock
);
934 psli
->sli_flag
|= LPFC_PROCESS_LA
;
935 control
= readl(phba
->HCregaddr
);
936 control
|= HC_LAINT_ENA
;
937 writel(control
, phba
->HCregaddr
);
938 readl(phba
->HCregaddr
); /* flush */
939 spin_unlock_irq(&phba
->hbalock
);
946 lpfc_mbx_cmpl_local_config_link(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
948 struct lpfc_vport
*vport
= pmb
->vport
;
950 if (pmb
->u
.mb
.mbxStatus
)
953 mempool_free(pmb
, phba
->mbox_mem_pool
);
955 if (phba
->fc_topology
== TOPOLOGY_LOOP
&&
956 vport
->fc_flag
& FC_PUBLIC_LOOP
&&
957 !(vport
->fc_flag
& FC_LBIT
)) {
958 /* Need to wait for FAN - use discovery timer
959 * for timeout. port_state is identically
960 * LPFC_LOCAL_CFG_LINK while waiting for FAN
962 lpfc_set_disctmo(vport
);
966 /* Start discovery by sending a FLOGI. port_state is identically
967 * LPFC_FLOGI while waiting for FLOGI cmpl
969 if (vport
->port_state
!= LPFC_FLOGI
) {
970 lpfc_initial_flogi(vport
);
975 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
976 "0306 CONFIG_LINK mbxStatus error x%x "
978 pmb
->u
.mb
.mbxStatus
, vport
->port_state
);
979 mempool_free(pmb
, phba
->mbox_mem_pool
);
983 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
984 "0200 CONFIG_LINK bad hba state x%x\n",
987 lpfc_issue_clear_la(phba
, vport
);
992 lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
994 struct lpfc_vport
*vport
= mboxq
->vport
;
997 if (mboxq
->u
.mb
.mbxStatus
) {
998 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
999 "2017 REG_FCFI mbxStatus error x%x "
1001 mboxq
->u
.mb
.mbxStatus
, vport
->port_state
);
1002 mempool_free(mboxq
, phba
->mbox_mem_pool
);
1006 /* Start FCoE discovery by sending a FLOGI. */
1007 phba
->fcf
.fcfi
= bf_get(lpfc_reg_fcfi_fcfi
, &mboxq
->u
.mqe
.un
.reg_fcfi
);
1008 /* Set the FCFI registered flag */
1009 spin_lock_irqsave(&phba
->hbalock
, flags
);
1010 phba
->fcf
.fcf_flag
|= FCF_REGISTERED
;
1011 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
1012 if (vport
->port_state
!= LPFC_FLOGI
) {
1013 spin_lock_irqsave(&phba
->hbalock
, flags
);
1014 phba
->fcf
.fcf_flag
|= (FCF_DISCOVERED
| FCF_IN_USE
);
1015 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
1016 lpfc_initial_flogi(vport
);
1019 mempool_free(mboxq
, phba
->mbox_mem_pool
);
1024 * lpfc_fab_name_match - Check if the fcf fabric name match.
1025 * @fab_name: pointer to fabric name.
1026 * @new_fcf_record: pointer to fcf record.
1028 * This routine compare the fcf record's fabric name with provided
1029 * fabric name. If the fabric name are identical this function
1030 * returns 1 else return 0.
1033 lpfc_fab_name_match(uint8_t *fab_name
, struct fcf_record
*new_fcf_record
)
1036 bf_get(lpfc_fcf_record_fab_name_0
, new_fcf_record
)) &&
1038 bf_get(lpfc_fcf_record_fab_name_1
, new_fcf_record
)) &&
1040 bf_get(lpfc_fcf_record_fab_name_2
, new_fcf_record
)) &&
1042 bf_get(lpfc_fcf_record_fab_name_3
, new_fcf_record
)) &&
1044 bf_get(lpfc_fcf_record_fab_name_4
, new_fcf_record
)) &&
1046 bf_get(lpfc_fcf_record_fab_name_5
, new_fcf_record
)) &&
1048 bf_get(lpfc_fcf_record_fab_name_6
, new_fcf_record
)) &&
1050 bf_get(lpfc_fcf_record_fab_name_7
, new_fcf_record
)))
1057 * lpfc_sw_name_match - Check if the fcf switch name match.
1058 * @fab_name: pointer to fabric name.
1059 * @new_fcf_record: pointer to fcf record.
1061 * This routine compare the fcf record's switch name with provided
1062 * switch name. If the switch name are identical this function
1063 * returns 1 else return 0.
1066 lpfc_sw_name_match(uint8_t *sw_name
, struct fcf_record
*new_fcf_record
)
1069 bf_get(lpfc_fcf_record_switch_name_0
, new_fcf_record
)) &&
1071 bf_get(lpfc_fcf_record_switch_name_1
, new_fcf_record
)) &&
1073 bf_get(lpfc_fcf_record_switch_name_2
, new_fcf_record
)) &&
1075 bf_get(lpfc_fcf_record_switch_name_3
, new_fcf_record
)) &&
1077 bf_get(lpfc_fcf_record_switch_name_4
, new_fcf_record
)) &&
1079 bf_get(lpfc_fcf_record_switch_name_5
, new_fcf_record
)) &&
1081 bf_get(lpfc_fcf_record_switch_name_6
, new_fcf_record
)) &&
1083 bf_get(lpfc_fcf_record_switch_name_7
, new_fcf_record
)))
1090 * lpfc_mac_addr_match - Check if the fcf mac address match.
1091 * @phba: pointer to lpfc hba data structure.
1092 * @new_fcf_record: pointer to fcf record.
1094 * This routine compare the fcf record's mac address with HBA's
1095 * FCF mac address. If the mac addresses are identical this function
1096 * returns 1 else return 0.
1099 lpfc_mac_addr_match(struct lpfc_hba
*phba
, struct fcf_record
*new_fcf_record
)
1101 if ((phba
->fcf
.mac_addr
[0] ==
1102 bf_get(lpfc_fcf_record_mac_0
, new_fcf_record
)) &&
1103 (phba
->fcf
.mac_addr
[1] ==
1104 bf_get(lpfc_fcf_record_mac_1
, new_fcf_record
)) &&
1105 (phba
->fcf
.mac_addr
[2] ==
1106 bf_get(lpfc_fcf_record_mac_2
, new_fcf_record
)) &&
1107 (phba
->fcf
.mac_addr
[3] ==
1108 bf_get(lpfc_fcf_record_mac_3
, new_fcf_record
)) &&
1109 (phba
->fcf
.mac_addr
[4] ==
1110 bf_get(lpfc_fcf_record_mac_4
, new_fcf_record
)) &&
1111 (phba
->fcf
.mac_addr
[5] ==
1112 bf_get(lpfc_fcf_record_mac_5
, new_fcf_record
)))
1119 * lpfc_copy_fcf_record - Copy fcf information to lpfc_hba.
1120 * @phba: pointer to lpfc hba data structure.
1121 * @new_fcf_record: pointer to fcf record.
1123 * This routine copies the FCF information from the FCF
1124 * record to lpfc_hba data structure.
1127 lpfc_copy_fcf_record(struct lpfc_hba
*phba
, struct fcf_record
*new_fcf_record
)
1129 phba
->fcf
.fabric_name
[0] =
1130 bf_get(lpfc_fcf_record_fab_name_0
, new_fcf_record
);
1131 phba
->fcf
.fabric_name
[1] =
1132 bf_get(lpfc_fcf_record_fab_name_1
, new_fcf_record
);
1133 phba
->fcf
.fabric_name
[2] =
1134 bf_get(lpfc_fcf_record_fab_name_2
, new_fcf_record
);
1135 phba
->fcf
.fabric_name
[3] =
1136 bf_get(lpfc_fcf_record_fab_name_3
, new_fcf_record
);
1137 phba
->fcf
.fabric_name
[4] =
1138 bf_get(lpfc_fcf_record_fab_name_4
, new_fcf_record
);
1139 phba
->fcf
.fabric_name
[5] =
1140 bf_get(lpfc_fcf_record_fab_name_5
, new_fcf_record
);
1141 phba
->fcf
.fabric_name
[6] =
1142 bf_get(lpfc_fcf_record_fab_name_6
, new_fcf_record
);
1143 phba
->fcf
.fabric_name
[7] =
1144 bf_get(lpfc_fcf_record_fab_name_7
, new_fcf_record
);
1145 phba
->fcf
.mac_addr
[0] =
1146 bf_get(lpfc_fcf_record_mac_0
, new_fcf_record
);
1147 phba
->fcf
.mac_addr
[1] =
1148 bf_get(lpfc_fcf_record_mac_1
, new_fcf_record
);
1149 phba
->fcf
.mac_addr
[2] =
1150 bf_get(lpfc_fcf_record_mac_2
, new_fcf_record
);
1151 phba
->fcf
.mac_addr
[3] =
1152 bf_get(lpfc_fcf_record_mac_3
, new_fcf_record
);
1153 phba
->fcf
.mac_addr
[4] =
1154 bf_get(lpfc_fcf_record_mac_4
, new_fcf_record
);
1155 phba
->fcf
.mac_addr
[5] =
1156 bf_get(lpfc_fcf_record_mac_5
, new_fcf_record
);
1157 phba
->fcf
.fcf_indx
= bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
);
1158 phba
->fcf
.priority
= new_fcf_record
->fip_priority
;
1159 phba
->fcf
.switch_name
[0] =
1160 bf_get(lpfc_fcf_record_switch_name_0
, new_fcf_record
);
1161 phba
->fcf
.switch_name
[1] =
1162 bf_get(lpfc_fcf_record_switch_name_1
, new_fcf_record
);
1163 phba
->fcf
.switch_name
[2] =
1164 bf_get(lpfc_fcf_record_switch_name_2
, new_fcf_record
);
1165 phba
->fcf
.switch_name
[3] =
1166 bf_get(lpfc_fcf_record_switch_name_3
, new_fcf_record
);
1167 phba
->fcf
.switch_name
[4] =
1168 bf_get(lpfc_fcf_record_switch_name_4
, new_fcf_record
);
1169 phba
->fcf
.switch_name
[5] =
1170 bf_get(lpfc_fcf_record_switch_name_5
, new_fcf_record
);
1171 phba
->fcf
.switch_name
[6] =
1172 bf_get(lpfc_fcf_record_switch_name_6
, new_fcf_record
);
1173 phba
->fcf
.switch_name
[7] =
1174 bf_get(lpfc_fcf_record_switch_name_7
, new_fcf_record
);
1178 * lpfc_register_fcf - Register the FCF with hba.
1179 * @phba: pointer to lpfc hba data structure.
1181 * This routine issues a register fcfi mailbox command to register
1185 lpfc_register_fcf(struct lpfc_hba
*phba
)
1187 LPFC_MBOXQ_t
*fcf_mbxq
;
1189 unsigned long flags
;
1191 spin_lock_irqsave(&phba
->hbalock
, flags
);
1193 /* If the FCF is not availabe do nothing. */
1194 if (!(phba
->fcf
.fcf_flag
& FCF_AVAILABLE
)) {
1195 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
1199 /* The FCF is already registered, start discovery */
1200 if (phba
->fcf
.fcf_flag
& FCF_REGISTERED
) {
1201 phba
->fcf
.fcf_flag
|= (FCF_DISCOVERED
| FCF_IN_USE
);
1202 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
1203 if (phba
->pport
->port_state
!= LPFC_FLOGI
)
1204 lpfc_initial_flogi(phba
->pport
);
1207 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
1209 fcf_mbxq
= mempool_alloc(phba
->mbox_mem_pool
,
1214 lpfc_reg_fcfi(phba
, fcf_mbxq
);
1215 fcf_mbxq
->vport
= phba
->pport
;
1216 fcf_mbxq
->mbox_cmpl
= lpfc_mbx_cmpl_reg_fcfi
;
1217 rc
= lpfc_sli_issue_mbox(phba
, fcf_mbxq
, MBX_NOWAIT
);
1218 if (rc
== MBX_NOT_FINISHED
)
1219 mempool_free(fcf_mbxq
, phba
->mbox_mem_pool
);
1225 * lpfc_match_fcf_conn_list - Check if the FCF record can be used for discovery.
1226 * @phba: pointer to lpfc hba data structure.
1227 * @new_fcf_record: pointer to fcf record.
1228 * @boot_flag: Indicates if this record used by boot bios.
1229 * @addr_mode: The address mode to be used by this FCF
1231 * This routine compare the fcf record with connect list obtained from the
1232 * config region to decide if this FCF can be used for SAN discovery. It returns
1233 * 1 if this record can be used for SAN discovery else return zero. If this FCF
1234 * record can be used for SAN discovery, the boot_flag will indicate if this FCF
1235 * is used by boot bios and addr_mode will indicate the addressing mode to be
1236 * used for this FCF when the function returns.
1237 * If the FCF record need to be used with a particular vlan id, the vlan is
1238 * set in the vlan_id on return of the function. If not VLAN tagging need to
1239 * be used with the FCF vlan_id will be set to 0xFFFF;
1242 lpfc_match_fcf_conn_list(struct lpfc_hba
*phba
,
1243 struct fcf_record
*new_fcf_record
,
1244 uint32_t *boot_flag
, uint32_t *addr_mode
,
1247 struct lpfc_fcf_conn_entry
*conn_entry
;
1249 /* If FCF not available return 0 */
1250 if (!bf_get(lpfc_fcf_record_fcf_avail
, new_fcf_record
) ||
1251 !bf_get(lpfc_fcf_record_fcf_valid
, new_fcf_record
))
1254 if (!phba
->cfg_enable_fip
) {
1256 *addr_mode
= bf_get(lpfc_fcf_record_mac_addr_prov
,
1258 if (phba
->valid_vlan
)
1259 *vlan_id
= phba
->vlan_id
;
1266 * If there are no FCF connection table entry, driver connect to all
1269 if (list_empty(&phba
->fcf_conn_rec_list
)) {
1271 *addr_mode
= bf_get(lpfc_fcf_record_mac_addr_prov
,
1275 * When there are no FCF connect entries, use driver's default
1276 * addressing mode - FPMA.
1278 if (*addr_mode
& LPFC_FCF_FPMA
)
1279 *addr_mode
= LPFC_FCF_FPMA
;
1285 list_for_each_entry(conn_entry
, &phba
->fcf_conn_rec_list
, list
) {
1286 if (!(conn_entry
->conn_rec
.flags
& FCFCNCT_VALID
))
1289 if ((conn_entry
->conn_rec
.flags
& FCFCNCT_FBNM_VALID
) &&
1290 !lpfc_fab_name_match(conn_entry
->conn_rec
.fabric_name
,
1293 if ((conn_entry
->conn_rec
.flags
& FCFCNCT_SWNM_VALID
) &&
1294 !lpfc_sw_name_match(conn_entry
->conn_rec
.switch_name
,
1297 if (conn_entry
->conn_rec
.flags
& FCFCNCT_VLAN_VALID
) {
1299 * If the vlan bit map does not have the bit set for the
1300 * vlan id to be used, then it is not a match.
1302 if (!(new_fcf_record
->vlan_bitmap
1303 [conn_entry
->conn_rec
.vlan_tag
/ 8] &
1304 (1 << (conn_entry
->conn_rec
.vlan_tag
% 8))))
1309 * If connection record does not support any addressing mode,
1310 * skip the FCF record.
1312 if (!(bf_get(lpfc_fcf_record_mac_addr_prov
, new_fcf_record
)
1313 & (LPFC_FCF_FPMA
| LPFC_FCF_SPMA
)))
1317 * Check if the connection record specifies a required
1320 if ((conn_entry
->conn_rec
.flags
& FCFCNCT_AM_VALID
) &&
1321 !(conn_entry
->conn_rec
.flags
& FCFCNCT_AM_PREFERRED
)) {
1324 * If SPMA required but FCF not support this continue.
1326 if ((conn_entry
->conn_rec
.flags
& FCFCNCT_AM_SPMA
) &&
1327 !(bf_get(lpfc_fcf_record_mac_addr_prov
,
1328 new_fcf_record
) & LPFC_FCF_SPMA
))
1332 * If FPMA required but FCF not support this continue.
1334 if (!(conn_entry
->conn_rec
.flags
& FCFCNCT_AM_SPMA
) &&
1335 !(bf_get(lpfc_fcf_record_mac_addr_prov
,
1336 new_fcf_record
) & LPFC_FCF_FPMA
))
1341 * This fcf record matches filtering criteria.
1343 if (conn_entry
->conn_rec
.flags
& FCFCNCT_BOOT
)
1349 * If user did not specify any addressing mode, or if the
1350 * prefered addressing mode specified by user is not supported
1351 * by FCF, allow fabric to pick the addressing mode.
1353 *addr_mode
= bf_get(lpfc_fcf_record_mac_addr_prov
,
1356 * If the user specified a required address mode, assign that
1359 if ((conn_entry
->conn_rec
.flags
& FCFCNCT_AM_VALID
) &&
1360 (!(conn_entry
->conn_rec
.flags
& FCFCNCT_AM_PREFERRED
)))
1361 *addr_mode
= (conn_entry
->conn_rec
.flags
&
1363 LPFC_FCF_SPMA
: LPFC_FCF_FPMA
;
1365 * If the user specified a prefered address mode, use the
1366 * addr mode only if FCF support the addr_mode.
1368 else if ((conn_entry
->conn_rec
.flags
& FCFCNCT_AM_VALID
) &&
1369 (conn_entry
->conn_rec
.flags
& FCFCNCT_AM_PREFERRED
) &&
1370 (conn_entry
->conn_rec
.flags
& FCFCNCT_AM_SPMA
) &&
1371 (*addr_mode
& LPFC_FCF_SPMA
))
1372 *addr_mode
= LPFC_FCF_SPMA
;
1373 else if ((conn_entry
->conn_rec
.flags
& FCFCNCT_AM_VALID
) &&
1374 (conn_entry
->conn_rec
.flags
& FCFCNCT_AM_PREFERRED
) &&
1375 !(conn_entry
->conn_rec
.flags
& FCFCNCT_AM_SPMA
) &&
1376 (*addr_mode
& LPFC_FCF_FPMA
))
1377 *addr_mode
= LPFC_FCF_FPMA
;
1379 if (conn_entry
->conn_rec
.flags
& FCFCNCT_VLAN_VALID
)
1380 *vlan_id
= conn_entry
->conn_rec
.vlan_tag
;
1391 * lpfc_mbx_cmpl_read_fcf_record - Completion handler for read_fcf mbox.
1392 * @phba: pointer to lpfc hba data structure.
1393 * @mboxq: pointer to mailbox object.
1395 * This function iterate through all the fcf records available in
1396 * HBA and choose the optimal FCF record for discovery. After finding
1397 * the FCF for discovery it register the FCF record and kick start
1399 * If FCF_IN_USE flag is set in currently used FCF, the routine try to
1400 * use a FCF record which match fabric name and mac address of the
1401 * currently used FCF record.
1402 * If the driver support only one FCF, it will try to use the FCF record
1403 * used by BOOT_BIOS.
1406 lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
1409 dma_addr_t phys_addr
;
1411 struct lpfc_mbx_sge sge
;
1412 struct lpfc_mbx_read_fcf_tbl
*read_fcf
;
1413 uint32_t shdr_status
, shdr_add_status
;
1414 union lpfc_sli4_cfg_shdr
*shdr
;
1415 struct fcf_record
*new_fcf_record
;
1417 uint32_t boot_flag
, addr_mode
;
1418 uint32_t next_fcf_index
;
1419 unsigned long flags
;
1422 /* Get the first SGE entry from the non-embedded DMA memory. This
1423 * routine only uses a single SGE.
1425 lpfc_sli4_mbx_sge_get(mboxq
, 0, &sge
);
1426 phys_addr
= getPaddr(sge
.pa_hi
, sge
.pa_lo
);
1427 if (unlikely(!mboxq
->sge_array
)) {
1428 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
,
1429 "2524 Failed to get the non-embedded SGE "
1430 "virtual address\n");
1433 virt_addr
= mboxq
->sge_array
->addr
[0];
1435 shdr
= (union lpfc_sli4_cfg_shdr
*)virt_addr
;
1436 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
1437 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
,
1440 * The FCF Record was read and there is no reason for the driver
1441 * to maintain the FCF record data or memory. Instead, just need
1442 * to book keeping the FCFIs can be used.
1444 if (shdr_status
|| shdr_add_status
) {
1445 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
1446 "2521 READ_FCF_RECORD mailbox failed "
1447 "with status x%x add_status x%x, mbx\n",
1448 shdr_status
, shdr_add_status
);
1451 /* Interpreting the returned information of FCF records */
1452 read_fcf
= (struct lpfc_mbx_read_fcf_tbl
*)virt_addr
;
1453 lpfc_sli_pcimem_bcopy(read_fcf
, read_fcf
,
1454 sizeof(struct lpfc_mbx_read_fcf_tbl
));
1455 next_fcf_index
= bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx
, read_fcf
);
1457 new_fcf_record
= (struct fcf_record
*)(virt_addr
+
1458 sizeof(struct lpfc_mbx_read_fcf_tbl
));
1459 lpfc_sli_pcimem_bcopy(new_fcf_record
, new_fcf_record
,
1460 sizeof(struct fcf_record
));
1461 bytep
= virt_addr
+ sizeof(union lpfc_sli4_cfg_shdr
);
1463 rc
= lpfc_match_fcf_conn_list(phba
, new_fcf_record
,
1464 &boot_flag
, &addr_mode
,
1467 * If the fcf record does not match with connect list entries
1468 * read the next entry.
1473 * If this is not the first FCF discovery of the HBA, use last
1474 * FCF record for the discovery.
1476 spin_lock_irqsave(&phba
->hbalock
, flags
);
1477 if (phba
->fcf
.fcf_flag
& FCF_IN_USE
) {
1478 if (lpfc_fab_name_match(phba
->fcf
.fabric_name
,
1480 lpfc_sw_name_match(phba
->fcf
.switch_name
,
1482 lpfc_mac_addr_match(phba
, new_fcf_record
)) {
1483 phba
->fcf
.fcf_flag
|= FCF_AVAILABLE
;
1484 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
1487 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
1490 if (phba
->fcf
.fcf_flag
& FCF_AVAILABLE
) {
1492 * If the current FCF record does not have boot flag
1493 * set and new fcf record has boot flag set, use the
1496 if (boot_flag
&& !(phba
->fcf
.fcf_flag
& FCF_BOOT_ENABLE
)) {
1497 /* Use this FCF record */
1498 lpfc_copy_fcf_record(phba
, new_fcf_record
);
1499 phba
->fcf
.addr_mode
= addr_mode
;
1500 phba
->fcf
.fcf_flag
|= FCF_BOOT_ENABLE
;
1501 if (vlan_id
!= 0xFFFF) {
1502 phba
->fcf
.fcf_flag
|= FCF_VALID_VLAN
;
1503 phba
->fcf
.vlan_id
= vlan_id
;
1505 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
1509 * If the current FCF record has boot flag set and the
1510 * new FCF record does not have boot flag, read the next
1513 if (!boot_flag
&& (phba
->fcf
.fcf_flag
& FCF_BOOT_ENABLE
)) {
1514 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
1518 * If there is a record with lower priority value for
1519 * the current FCF, use that record.
1521 if (lpfc_fab_name_match(phba
->fcf
.fabric_name
,
1523 (new_fcf_record
->fip_priority
< phba
->fcf
.priority
)) {
1524 /* Use this FCF record */
1525 lpfc_copy_fcf_record(phba
, new_fcf_record
);
1526 phba
->fcf
.addr_mode
= addr_mode
;
1527 if (vlan_id
!= 0xFFFF) {
1528 phba
->fcf
.fcf_flag
|= FCF_VALID_VLAN
;
1529 phba
->fcf
.vlan_id
= vlan_id
;
1531 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
1534 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
1538 * This is the first available FCF record, use this
1541 lpfc_copy_fcf_record(phba
, new_fcf_record
);
1542 phba
->fcf
.addr_mode
= addr_mode
;
1544 phba
->fcf
.fcf_flag
|= FCF_BOOT_ENABLE
;
1545 phba
->fcf
.fcf_flag
|= FCF_AVAILABLE
;
1546 if (vlan_id
!= 0xFFFF) {
1547 phba
->fcf
.fcf_flag
|= FCF_VALID_VLAN
;
1548 phba
->fcf
.vlan_id
= vlan_id
;
1550 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
1554 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
1555 if (next_fcf_index
== LPFC_FCOE_FCF_NEXT_NONE
|| next_fcf_index
== 0)
1556 lpfc_register_fcf(phba
);
1558 lpfc_sli4_read_fcf_record(phba
, next_fcf_index
);
1562 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
1563 lpfc_register_fcf(phba
);
1569 * lpfc_start_fdiscs - send fdiscs for each vports on this port.
1570 * @phba: pointer to lpfc hba data structure.
1572 * This function loops through the list of vports on the @phba and issues an
1573 * FDISC if possible.
1576 lpfc_start_fdiscs(struct lpfc_hba
*phba
)
1578 struct lpfc_vport
**vports
;
1581 vports
= lpfc_create_vport_work_array(phba
);
1582 if (vports
!= NULL
) {
1583 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
1584 if (vports
[i
]->port_type
== LPFC_PHYSICAL_PORT
)
1586 /* There are no vpi for this vport */
1587 if (vports
[i
]->vpi
> phba
->max_vpi
) {
1588 lpfc_vport_set_state(vports
[i
],
1592 if (phba
->fc_topology
== TOPOLOGY_LOOP
) {
1593 lpfc_vport_set_state(vports
[i
],
1597 if (phba
->link_flag
& LS_NPIV_FAB_SUPPORTED
)
1598 lpfc_initial_fdisc(vports
[i
]);
1600 lpfc_vport_set_state(vports
[i
],
1601 FC_VPORT_NO_FABRIC_SUPP
);
1602 lpfc_printf_vlog(vports
[i
], KERN_ERR
,
1605 "Fabric support\n");
1609 lpfc_destroy_vport_work_array(phba
, vports
);
1613 lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
1615 struct lpfc_dmabuf
*dmabuf
= mboxq
->context1
;
1616 struct lpfc_vport
*vport
= mboxq
->vport
;
1618 if (mboxq
->u
.mb
.mbxStatus
) {
1619 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
1620 "2018 REG_VFI mbxStatus error x%x "
1622 mboxq
->u
.mb
.mbxStatus
, vport
->port_state
);
1623 if (phba
->fc_topology
== TOPOLOGY_LOOP
) {
1624 /* FLOGI failed, use loop map to make discovery list */
1625 lpfc_disc_list_loopmap(vport
);
1626 /* Start discovery */
1627 lpfc_disc_start(vport
);
1630 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
1633 /* Mark the vport has registered with its VFI */
1634 vport
->vfi_state
|= LPFC_VFI_REGISTERED
;
1636 if (vport
->port_state
== LPFC_FABRIC_CFG_LINK
) {
1637 lpfc_start_fdiscs(phba
);
1638 lpfc_do_scr_ns_plogi(phba
, vport
);
1642 mempool_free(mboxq
, phba
->mbox_mem_pool
);
1643 lpfc_mbuf_free(phba
, dmabuf
->virt
, dmabuf
->phys
);
1649 lpfc_mbx_cmpl_read_sparam(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
1651 MAILBOX_t
*mb
= &pmb
->u
.mb
;
1652 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) pmb
->context1
;
1653 struct lpfc_vport
*vport
= pmb
->vport
;
1656 /* Check for error */
1657 if (mb
->mbxStatus
) {
1658 /* READ_SPARAM mbox error <mbxStatus> state <hba_state> */
1659 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
1660 "0319 READ_SPARAM mbxStatus error x%x "
1662 mb
->mbxStatus
, vport
->port_state
);
1663 lpfc_linkdown(phba
);
1667 memcpy((uint8_t *) &vport
->fc_sparam
, (uint8_t *) mp
->virt
,
1668 sizeof (struct serv_parm
));
1669 if (phba
->cfg_soft_wwnn
)
1670 u64_to_wwn(phba
->cfg_soft_wwnn
,
1671 vport
->fc_sparam
.nodeName
.u
.wwn
);
1672 if (phba
->cfg_soft_wwpn
)
1673 u64_to_wwn(phba
->cfg_soft_wwpn
,
1674 vport
->fc_sparam
.portName
.u
.wwn
);
1675 memcpy(&vport
->fc_nodename
, &vport
->fc_sparam
.nodeName
,
1676 sizeof(vport
->fc_nodename
));
1677 memcpy(&vport
->fc_portname
, &vport
->fc_sparam
.portName
,
1678 sizeof(vport
->fc_portname
));
1679 if (vport
->port_type
== LPFC_PHYSICAL_PORT
) {
1680 memcpy(&phba
->wwnn
, &vport
->fc_nodename
, sizeof(phba
->wwnn
));
1681 memcpy(&phba
->wwpn
, &vport
->fc_portname
, sizeof(phba
->wwnn
));
1684 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
1686 mempool_free(pmb
, phba
->mbox_mem_pool
);
1690 pmb
->context1
= NULL
;
1691 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
1693 lpfc_issue_clear_la(phba
, vport
);
1694 mempool_free(pmb
, phba
->mbox_mem_pool
);
1699 lpfc_mbx_process_link_up(struct lpfc_hba
*phba
, READ_LA_VAR
*la
)
1701 struct lpfc_vport
*vport
= phba
->pport
;
1702 LPFC_MBOXQ_t
*sparam_mbox
, *cfglink_mbox
= NULL
;
1704 struct lpfc_dmabuf
*mp
;
1706 struct fcf_record
*fcf_record
;
1708 sparam_mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
1710 spin_lock_irq(&phba
->hbalock
);
1711 switch (la
->UlnkSpeed
) {
1713 phba
->fc_linkspeed
= LA_1GHZ_LINK
;
1716 phba
->fc_linkspeed
= LA_2GHZ_LINK
;
1719 phba
->fc_linkspeed
= LA_4GHZ_LINK
;
1722 phba
->fc_linkspeed
= LA_8GHZ_LINK
;
1725 phba
->fc_linkspeed
= LA_10GHZ_LINK
;
1728 phba
->fc_linkspeed
= LA_UNKNW_LINK
;
1732 phba
->fc_topology
= la
->topology
;
1733 phba
->link_flag
&= ~LS_NPIV_FAB_SUPPORTED
;
1735 if (phba
->fc_topology
== TOPOLOGY_LOOP
) {
1736 phba
->sli3_options
&= ~LPFC_SLI3_NPIV_ENABLED
;
1738 if (phba
->cfg_enable_npiv
)
1739 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
1740 "1309 Link Up Event npiv not supported in loop "
1742 /* Get Loop Map information */
1744 vport
->fc_flag
|= FC_LBIT
;
1746 vport
->fc_myDID
= la
->granted_AL_PA
;
1747 i
= la
->un
.lilpBde64
.tus
.f
.bdeSize
;
1750 phba
->alpa_map
[0] = 0;
1752 if (vport
->cfg_log_verbose
& LOG_LINK_EVENT
) {
1763 numalpa
= phba
->alpa_map
[0];
1765 while (j
< numalpa
) {
1766 memset(un
.pamap
, 0, 16);
1767 for (k
= 1; j
< numalpa
; k
++) {
1769 phba
->alpa_map
[j
+ 1];
1774 /* Link Up Event ALPA map */
1775 lpfc_printf_log(phba
,
1778 "1304 Link Up Event "
1779 "ALPA map Data: x%x "
1781 un
.pa
.wd1
, un
.pa
.wd2
,
1782 un
.pa
.wd3
, un
.pa
.wd4
);
1787 if (!(phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
)) {
1788 if (phba
->max_vpi
&& phba
->cfg_enable_npiv
&&
1789 (phba
->sli_rev
== 3))
1790 phba
->sli3_options
|= LPFC_SLI3_NPIV_ENABLED
;
1792 vport
->fc_myDID
= phba
->fc_pref_DID
;
1793 vport
->fc_flag
|= FC_LBIT
;
1795 spin_unlock_irq(&phba
->hbalock
);
1799 lpfc_read_sparam(phba
, sparam_mbox
, 0);
1800 sparam_mbox
->vport
= vport
;
1801 sparam_mbox
->mbox_cmpl
= lpfc_mbx_cmpl_read_sparam
;
1802 rc
= lpfc_sli_issue_mbox(phba
, sparam_mbox
, MBX_NOWAIT
);
1803 if (rc
== MBX_NOT_FINISHED
) {
1804 mp
= (struct lpfc_dmabuf
*) sparam_mbox
->context1
;
1805 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
1807 mempool_free(sparam_mbox
, phba
->mbox_mem_pool
);
1812 if (!(phba
->hba_flag
& HBA_FCOE_SUPPORT
)) {
1813 cfglink_mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
1816 vport
->port_state
= LPFC_LOCAL_CFG_LINK
;
1817 lpfc_config_link(phba
, cfglink_mbox
);
1818 cfglink_mbox
->vport
= vport
;
1819 cfglink_mbox
->mbox_cmpl
= lpfc_mbx_cmpl_local_config_link
;
1820 rc
= lpfc_sli_issue_mbox(phba
, cfglink_mbox
, MBX_NOWAIT
);
1821 if (rc
== MBX_NOT_FINISHED
) {
1822 mempool_free(cfglink_mbox
, phba
->mbox_mem_pool
);
1827 * Add the driver's default FCF record at FCF index 0 now. This
1828 * is phase 1 implementation that support FCF index 0 and driver
1831 if (phba
->cfg_enable_fip
== 0) {
1832 fcf_record
= kzalloc(sizeof(struct fcf_record
),
1834 if (unlikely(!fcf_record
)) {
1835 lpfc_printf_log(phba
, KERN_ERR
,
1837 "2554 Could not allocate memmory for "
1843 lpfc_sli4_build_dflt_fcf_record(phba
, fcf_record
,
1844 LPFC_FCOE_FCF_DEF_INDEX
);
1845 rc
= lpfc_sli4_add_fcf_record(phba
, fcf_record
);
1847 lpfc_printf_log(phba
, KERN_ERR
,
1849 "2013 Could not manually add FCF "
1850 "record 0, status %d\n", rc
);
1858 * The driver is expected to do FIP/FCF. Call the port
1859 * and get the FCF Table.
1861 rc
= lpfc_sli4_read_fcf_record(phba
,
1862 LPFC_FCOE_FCF_GET_FIRST
);
1869 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
1870 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
1871 "0263 Discovery Mailbox error: state: 0x%x : %p %p\n",
1872 vport
->port_state
, sparam_mbox
, cfglink_mbox
);
1873 lpfc_issue_clear_la(phba
, vport
);
1878 lpfc_enable_la(struct lpfc_hba
*phba
)
1881 struct lpfc_sli
*psli
= &phba
->sli
;
1882 spin_lock_irq(&phba
->hbalock
);
1883 psli
->sli_flag
|= LPFC_PROCESS_LA
;
1884 if (phba
->sli_rev
<= LPFC_SLI_REV3
) {
1885 control
= readl(phba
->HCregaddr
);
1886 control
|= HC_LAINT_ENA
;
1887 writel(control
, phba
->HCregaddr
);
1888 readl(phba
->HCregaddr
); /* flush */
1890 spin_unlock_irq(&phba
->hbalock
);
1894 lpfc_mbx_issue_link_down(struct lpfc_hba
*phba
)
1896 lpfc_linkdown(phba
);
1897 lpfc_enable_la(phba
);
1898 lpfc_unregister_unused_fcf(phba
);
1899 /* turn on Link Attention interrupts - no CLEAR_LA needed */
1904 * This routine handles processing a READ_LA mailbox
1905 * command upon completion. It is setup in the LPFC_MBOXQ
1906 * as the completion routine when the command is
1907 * handed off to the SLI layer.
1910 lpfc_mbx_cmpl_read_la(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
1912 struct lpfc_vport
*vport
= pmb
->vport
;
1913 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
1915 MAILBOX_t
*mb
= &pmb
->u
.mb
;
1916 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
1918 /* Unblock ELS traffic */
1919 phba
->sli
.ring
[LPFC_ELS_RING
].flag
&= ~LPFC_STOP_IOCB_EVENT
;
1920 /* Check for error */
1921 if (mb
->mbxStatus
) {
1922 lpfc_printf_log(phba
, KERN_INFO
, LOG_LINK_EVENT
,
1923 "1307 READ_LA mbox error x%x state x%x\n",
1924 mb
->mbxStatus
, vport
->port_state
);
1925 lpfc_mbx_issue_link_down(phba
);
1926 phba
->link_state
= LPFC_HBA_ERROR
;
1927 goto lpfc_mbx_cmpl_read_la_free_mbuf
;
1930 la
= (READ_LA_VAR
*) &pmb
->u
.mb
.un
.varReadLA
;
1932 memcpy(&phba
->alpa_map
[0], mp
->virt
, 128);
1934 spin_lock_irq(shost
->host_lock
);
1936 vport
->fc_flag
|= FC_BYPASSED_MODE
;
1938 vport
->fc_flag
&= ~FC_BYPASSED_MODE
;
1939 spin_unlock_irq(shost
->host_lock
);
1941 if ((phba
->fc_eventTag
< la
->eventTag
) ||
1942 (phba
->fc_eventTag
== la
->eventTag
)) {
1943 phba
->fc_stat
.LinkMultiEvent
++;
1944 if (la
->attType
== AT_LINK_UP
)
1945 if (phba
->fc_eventTag
!= 0)
1946 lpfc_linkdown(phba
);
1949 phba
->fc_eventTag
= la
->eventTag
;
1951 phba
->sli
.sli_flag
|= LPFC_MENLO_MAINT
;
1953 phba
->sli
.sli_flag
&= ~LPFC_MENLO_MAINT
;
1955 if (la
->attType
== AT_LINK_UP
&& (!la
->mm
)) {
1956 phba
->fc_stat
.LinkUp
++;
1957 if (phba
->link_flag
& LS_LOOPBACK_MODE
) {
1958 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
1959 "1306 Link Up Event in loop back mode "
1960 "x%x received Data: x%x x%x x%x x%x\n",
1961 la
->eventTag
, phba
->fc_eventTag
,
1962 la
->granted_AL_PA
, la
->UlnkSpeed
,
1965 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
1966 "1303 Link Up Event x%x received "
1967 "Data: x%x x%x x%x x%x x%x x%x %d\n",
1968 la
->eventTag
, phba
->fc_eventTag
,
1969 la
->granted_AL_PA
, la
->UlnkSpeed
,
1972 phba
->wait_4_mlo_maint_flg
);
1974 lpfc_mbx_process_link_up(phba
, la
);
1975 } else if (la
->attType
== AT_LINK_DOWN
) {
1976 phba
->fc_stat
.LinkDown
++;
1977 if (phba
->link_flag
& LS_LOOPBACK_MODE
) {
1978 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
1979 "1308 Link Down Event in loop back mode "
1981 "Data: x%x x%x x%x\n",
1982 la
->eventTag
, phba
->fc_eventTag
,
1983 phba
->pport
->port_state
, vport
->fc_flag
);
1986 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
1987 "1305 Link Down Event x%x received "
1988 "Data: x%x x%x x%x x%x x%x\n",
1989 la
->eventTag
, phba
->fc_eventTag
,
1990 phba
->pport
->port_state
, vport
->fc_flag
,
1993 lpfc_mbx_issue_link_down(phba
);
1995 if (la
->mm
&& la
->attType
== AT_LINK_UP
) {
1996 if (phba
->link_state
!= LPFC_LINK_DOWN
) {
1997 phba
->fc_stat
.LinkDown
++;
1998 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
1999 "1312 Link Down Event x%x received "
2000 "Data: x%x x%x x%x\n",
2001 la
->eventTag
, phba
->fc_eventTag
,
2002 phba
->pport
->port_state
, vport
->fc_flag
);
2003 lpfc_mbx_issue_link_down(phba
);
2005 lpfc_enable_la(phba
);
2007 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
2008 "1310 Menlo Maint Mode Link up Event x%x rcvd "
2009 "Data: x%x x%x x%x\n",
2010 la
->eventTag
, phba
->fc_eventTag
,
2011 phba
->pport
->port_state
, vport
->fc_flag
);
2013 * The cmnd that triggered this will be waiting for this
2016 /* WAKEUP for MENLO_SET_MODE or MENLO_RESET command. */
2017 if (phba
->wait_4_mlo_maint_flg
) {
2018 phba
->wait_4_mlo_maint_flg
= 0;
2019 wake_up_interruptible(&phba
->wait_4_mlo_m_q
);
2025 lpfc_issue_clear_la(phba
, vport
);
2026 lpfc_printf_log(phba
, KERN_INFO
, LOG_LINK_EVENT
,
2027 "1311 fa %d\n", la
->fa
);
2030 lpfc_mbx_cmpl_read_la_free_mbuf
:
2031 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
2033 mempool_free(pmb
, phba
->mbox_mem_pool
);
2038 * This routine handles processing a REG_LOGIN mailbox
2039 * command upon completion. It is setup in the LPFC_MBOXQ
2040 * as the completion routine when the command is
2041 * handed off to the SLI layer.
2044 lpfc_mbx_cmpl_reg_login(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
2046 struct lpfc_vport
*vport
= pmb
->vport
;
2047 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
2048 struct lpfc_nodelist
*ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
2050 pmb
->context1
= NULL
;
2052 /* Good status, call state machine */
2053 lpfc_disc_state_machine(vport
, ndlp
, pmb
, NLP_EVT_CMPL_REG_LOGIN
);
2054 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
2056 mempool_free(pmb
, phba
->mbox_mem_pool
);
2057 /* decrement the node reference count held for this callback
2066 lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
2068 MAILBOX_t
*mb
= &pmb
->u
.mb
;
2069 struct lpfc_vport
*vport
= pmb
->vport
;
2070 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2072 switch (mb
->mbxStatus
) {
2076 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
2077 "0911 cmpl_unreg_vpi, mb status = 0x%x\n",
2081 vport
->unreg_vpi_cmpl
= VPORT_OK
;
2082 mempool_free(pmb
, phba
->mbox_mem_pool
);
2084 * This shost reference might have been taken at the beginning of
2085 * lpfc_vport_delete()
2087 if (vport
->load_flag
& FC_UNLOADING
)
2088 scsi_host_put(shost
);
2092 lpfc_mbx_unreg_vpi(struct lpfc_vport
*vport
)
2094 struct lpfc_hba
*phba
= vport
->phba
;
2098 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
2102 lpfc_unreg_vpi(phba
, vport
->vpi
, mbox
);
2103 mbox
->vport
= vport
;
2104 mbox
->mbox_cmpl
= lpfc_mbx_cmpl_unreg_vpi
;
2105 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
2106 if (rc
== MBX_NOT_FINISHED
) {
2107 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
| LOG_VPORT
,
2108 "1800 Could not issue unreg_vpi\n");
2109 mempool_free(mbox
, phba
->mbox_mem_pool
);
2110 vport
->unreg_vpi_cmpl
= VPORT_ERROR
;
2117 lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
2119 struct lpfc_vport
*vport
= pmb
->vport
;
2120 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2121 MAILBOX_t
*mb
= &pmb
->u
.mb
;
2123 switch (mb
->mbxStatus
) {
2127 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
2128 "0912 cmpl_reg_vpi, mb status = 0x%x\n",
2130 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
2131 spin_lock_irq(shost
->host_lock
);
2132 vport
->fc_flag
&= ~(FC_FABRIC
| FC_PUBLIC_LOOP
);
2133 spin_unlock_irq(shost
->host_lock
);
2134 vport
->fc_myDID
= 0;
2138 vport
->num_disc_nodes
= 0;
2139 /* go thru NPR list and issue ELS PLOGIs */
2140 if (vport
->fc_npr_cnt
)
2141 lpfc_els_disc_plogi(vport
);
2143 if (!vport
->num_disc_nodes
) {
2144 spin_lock_irq(shost
->host_lock
);
2145 vport
->fc_flag
&= ~FC_NDISC_ACTIVE
;
2146 spin_unlock_irq(shost
->host_lock
);
2147 lpfc_can_disctmo(vport
);
2149 vport
->port_state
= LPFC_VPORT_READY
;
2152 mempool_free(pmb
, phba
->mbox_mem_pool
);
2157 * lpfc_create_static_vport - Read HBA config region to create static vports.
2158 * @phba: pointer to lpfc hba data structure.
2160 * This routine issue a DUMP mailbox command for config region 22 to get
2161 * the list of static vports to be created. The function create vports
2162 * based on the information returned from the HBA.
2165 lpfc_create_static_vport(struct lpfc_hba
*phba
)
2167 LPFC_MBOXQ_t
*pmb
= NULL
;
2169 struct static_vport_info
*vport_info
;
2171 struct fc_vport_identifiers vport_id
;
2172 struct fc_vport
*new_fc_vport
;
2173 struct Scsi_Host
*shost
;
2174 struct lpfc_vport
*vport
;
2175 uint16_t offset
= 0;
2176 uint8_t *vport_buff
;
2178 pmb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
2180 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
2181 "0542 lpfc_create_static_vport failed to"
2182 " allocate mailbox memory\n");
2188 vport_info
= kzalloc(sizeof(struct static_vport_info
), GFP_KERNEL
);
2190 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
2191 "0543 lpfc_create_static_vport failed to"
2192 " allocate vport_info\n");
2193 mempool_free(pmb
, phba
->mbox_mem_pool
);
2197 vport_buff
= (uint8_t *) vport_info
;
2199 lpfc_dump_static_vport(phba
, pmb
, offset
);
2200 pmb
->vport
= phba
->pport
;
2201 rc
= lpfc_sli_issue_mbox_wait(phba
, pmb
, LPFC_MBOX_TMO
);
2203 if ((rc
!= MBX_SUCCESS
) || mb
->mbxStatus
) {
2204 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
2205 "0544 lpfc_create_static_vport failed to"
2206 " issue dump mailbox command ret 0x%x "
2212 if (mb
->un
.varDmp
.word_cnt
>
2213 sizeof(struct static_vport_info
) - offset
)
2214 mb
->un
.varDmp
.word_cnt
=
2215 sizeof(struct static_vport_info
) - offset
;
2217 lpfc_sli_pcimem_bcopy(((uint8_t *)mb
) + DMP_RSP_OFFSET
,
2218 vport_buff
+ offset
,
2219 mb
->un
.varDmp
.word_cnt
);
2220 offset
+= mb
->un
.varDmp
.word_cnt
;
2222 } while (mb
->un
.varDmp
.word_cnt
&&
2223 offset
< sizeof(struct static_vport_info
));
2226 if ((le32_to_cpu(vport_info
->signature
) != VPORT_INFO_SIG
) ||
2227 ((le32_to_cpu(vport_info
->rev
) & VPORT_INFO_REV_MASK
)
2228 != VPORT_INFO_REV
)) {
2229 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
2230 "0545 lpfc_create_static_vport bad"
2231 " information header 0x%x 0x%x\n",
2232 le32_to_cpu(vport_info
->signature
),
2233 le32_to_cpu(vport_info
->rev
) & VPORT_INFO_REV_MASK
);
2238 shost
= lpfc_shost_from_vport(phba
->pport
);
2240 for (i
= 0; i
< MAX_STATIC_VPORT_COUNT
; i
++) {
2241 memset(&vport_id
, 0, sizeof(vport_id
));
2242 vport_id
.port_name
= wwn_to_u64(vport_info
->vport_list
[i
].wwpn
);
2243 vport_id
.node_name
= wwn_to_u64(vport_info
->vport_list
[i
].wwnn
);
2244 if (!vport_id
.port_name
|| !vport_id
.node_name
)
2247 vport_id
.roles
= FC_PORT_ROLE_FCP_INITIATOR
;
2248 vport_id
.vport_type
= FC_PORTTYPE_NPIV
;
2249 vport_id
.disable
= false;
2250 new_fc_vport
= fc_vport_create(shost
, 0, &vport_id
);
2252 if (!new_fc_vport
) {
2253 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
2254 "0546 lpfc_create_static_vport failed to"
2255 " create vport \n");
2259 vport
= *(struct lpfc_vport
**)new_fc_vport
->dd_data
;
2260 vport
->vport_flag
|= STATIC_VPORT
;
2265 * If this is timed out command, setting NULL to context2 tell SLI
2266 * layer not to use this buffer.
2268 spin_lock_irq(&phba
->hbalock
);
2269 pmb
->context2
= NULL
;
2270 spin_unlock_irq(&phba
->hbalock
);
2272 if (rc
!= MBX_TIMEOUT
)
2273 mempool_free(pmb
, phba
->mbox_mem_pool
);
2279 * This routine handles processing a Fabric REG_LOGIN mailbox
2280 * command upon completion. It is setup in the LPFC_MBOXQ
2281 * as the completion routine when the command is
2282 * handed off to the SLI layer.
2285 lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
2287 struct lpfc_vport
*vport
= pmb
->vport
;
2288 MAILBOX_t
*mb
= &pmb
->u
.mb
;
2289 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
2290 struct lpfc_nodelist
*ndlp
;
2292 ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
2293 pmb
->context1
= NULL
;
2294 pmb
->context2
= NULL
;
2295 if (mb
->mbxStatus
) {
2296 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
2297 "0258 Register Fabric login error: 0x%x\n",
2299 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
2301 mempool_free(pmb
, phba
->mbox_mem_pool
);
2303 if (phba
->fc_topology
== TOPOLOGY_LOOP
) {
2304 /* FLOGI failed, use loop map to make discovery list */
2305 lpfc_disc_list_loopmap(vport
);
2307 /* Start discovery */
2308 lpfc_disc_start(vport
);
2309 /* Decrement the reference count to ndlp after the
2310 * reference to the ndlp are done.
2316 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
2317 /* Decrement the reference count to ndlp after the reference
2318 * to the ndlp are done.
2324 ndlp
->nlp_rpi
= mb
->un
.varWords
[0];
2325 ndlp
->nlp_flag
|= NLP_RPI_VALID
;
2326 ndlp
->nlp_type
|= NLP_FABRIC
;
2327 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNMAPPED_NODE
);
2329 if (vport
->port_state
== LPFC_FABRIC_CFG_LINK
) {
2330 lpfc_start_fdiscs(phba
);
2331 lpfc_do_scr_ns_plogi(phba
, vport
);
2334 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
2336 mempool_free(pmb
, phba
->mbox_mem_pool
);
2338 /* Drop the reference count from the mbox at the end after
2339 * all the current reference to the ndlp have been done.
2346 * This routine handles processing a NameServer REG_LOGIN mailbox
2347 * command upon completion. It is setup in the LPFC_MBOXQ
2348 * as the completion routine when the command is
2349 * handed off to the SLI layer.
2352 lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
2354 MAILBOX_t
*mb
= &pmb
->u
.mb
;
2355 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
2356 struct lpfc_nodelist
*ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
2357 struct lpfc_vport
*vport
= pmb
->vport
;
2359 if (mb
->mbxStatus
) {
2361 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
2362 "0260 Register NameServer error: 0x%x\n",
2364 /* decrement the node reference count held for this
2365 * callback function.
2368 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
2370 mempool_free(pmb
, phba
->mbox_mem_pool
);
2372 /* If no other thread is using the ndlp, free it */
2373 lpfc_nlp_not_used(ndlp
);
2375 if (phba
->fc_topology
== TOPOLOGY_LOOP
) {
2377 * RegLogin failed, use loop map to make discovery
2380 lpfc_disc_list_loopmap(vport
);
2382 /* Start discovery */
2383 lpfc_disc_start(vport
);
2386 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
2390 pmb
->context1
= NULL
;
2392 ndlp
->nlp_rpi
= mb
->un
.varWords
[0];
2393 ndlp
->nlp_flag
|= NLP_RPI_VALID
;
2394 ndlp
->nlp_type
|= NLP_FABRIC
;
2395 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNMAPPED_NODE
);
2397 if (vport
->port_state
< LPFC_VPORT_READY
) {
2398 /* Link up discovery requires Fabric registration. */
2399 lpfc_ns_cmd(vport
, SLI_CTNS_RFF_ID
, 0, 0); /* Do this first! */
2400 lpfc_ns_cmd(vport
, SLI_CTNS_RNN_ID
, 0, 0);
2401 lpfc_ns_cmd(vport
, SLI_CTNS_RSNN_NN
, 0, 0);
2402 lpfc_ns_cmd(vport
, SLI_CTNS_RSPN_ID
, 0, 0);
2403 lpfc_ns_cmd(vport
, SLI_CTNS_RFT_ID
, 0, 0);
2405 /* Issue SCR just before NameServer GID_FT Query */
2406 lpfc_issue_els_scr(vport
, SCR_DID
, 0);
2409 vport
->fc_ns_retry
= 0;
2410 /* Good status, issue CT Request to NameServer */
2411 if (lpfc_ns_cmd(vport
, SLI_CTNS_GID_FT
, 0, 0)) {
2412 /* Cannot issue NameServer Query, so finish up discovery */
2416 /* decrement the node reference count held for this
2417 * callback function.
2420 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
2422 mempool_free(pmb
, phba
->mbox_mem_pool
);
2428 lpfc_register_remote_port(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
2430 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2431 struct fc_rport
*rport
;
2432 struct lpfc_rport_data
*rdata
;
2433 struct fc_rport_identifiers rport_ids
;
2434 struct lpfc_hba
*phba
= vport
->phba
;
2436 /* Remote port has reappeared. Re-register w/ FC transport */
2437 rport_ids
.node_name
= wwn_to_u64(ndlp
->nlp_nodename
.u
.wwn
);
2438 rport_ids
.port_name
= wwn_to_u64(ndlp
->nlp_portname
.u
.wwn
);
2439 rport_ids
.port_id
= ndlp
->nlp_DID
;
2440 rport_ids
.roles
= FC_RPORT_ROLE_UNKNOWN
;
2443 * We leave our node pointer in rport->dd_data when we unregister a
2444 * FCP target port. But fc_remote_port_add zeros the space to which
2445 * rport->dd_data points. So, if we're reusing a previously
2446 * registered port, drop the reference that we took the last time we
2447 * registered the port.
2449 if (ndlp
->rport
&& ndlp
->rport
->dd_data
&&
2450 ((struct lpfc_rport_data
*) ndlp
->rport
->dd_data
)->pnode
== ndlp
)
2453 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_RPORT
,
2454 "rport add: did:x%x flg:x%x type x%x",
2455 ndlp
->nlp_DID
, ndlp
->nlp_flag
, ndlp
->nlp_type
);
2457 ndlp
->rport
= rport
= fc_remote_port_add(shost
, 0, &rport_ids
);
2458 if (!rport
|| !get_device(&rport
->dev
)) {
2459 dev_printk(KERN_WARNING
, &phba
->pcidev
->dev
,
2460 "Warning: fc_remote_port_add failed\n");
2464 /* initialize static port data */
2465 rport
->maxframe_size
= ndlp
->nlp_maxframe
;
2466 rport
->supported_classes
= ndlp
->nlp_class_sup
;
2467 rdata
= rport
->dd_data
;
2468 rdata
->pnode
= lpfc_nlp_get(ndlp
);
2470 if (ndlp
->nlp_type
& NLP_FCP_TARGET
)
2471 rport_ids
.roles
|= FC_RPORT_ROLE_FCP_TARGET
;
2472 if (ndlp
->nlp_type
& NLP_FCP_INITIATOR
)
2473 rport_ids
.roles
|= FC_RPORT_ROLE_FCP_INITIATOR
;
2476 if (rport_ids
.roles
!= FC_RPORT_ROLE_UNKNOWN
)
2477 fc_remote_port_rolechg(rport
, rport_ids
.roles
);
2479 if ((rport
->scsi_target_id
!= -1) &&
2480 (rport
->scsi_target_id
< LPFC_MAX_TARGET
)) {
2481 ndlp
->nlp_sid
= rport
->scsi_target_id
;
2487 lpfc_unregister_remote_port(struct lpfc_nodelist
*ndlp
)
2489 struct fc_rport
*rport
= ndlp
->rport
;
2491 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_RPORT
,
2492 "rport delete: did:x%x flg:x%x type x%x",
2493 ndlp
->nlp_DID
, ndlp
->nlp_flag
, ndlp
->nlp_type
);
2495 fc_remote_port_delete(rport
);
2501 lpfc_nlp_counters(struct lpfc_vport
*vport
, int state
, int count
)
2503 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2505 spin_lock_irq(shost
->host_lock
);
2507 case NLP_STE_UNUSED_NODE
:
2508 vport
->fc_unused_cnt
+= count
;
2510 case NLP_STE_PLOGI_ISSUE
:
2511 vport
->fc_plogi_cnt
+= count
;
2513 case NLP_STE_ADISC_ISSUE
:
2514 vport
->fc_adisc_cnt
+= count
;
2516 case NLP_STE_REG_LOGIN_ISSUE
:
2517 vport
->fc_reglogin_cnt
+= count
;
2519 case NLP_STE_PRLI_ISSUE
:
2520 vport
->fc_prli_cnt
+= count
;
2522 case NLP_STE_UNMAPPED_NODE
:
2523 vport
->fc_unmap_cnt
+= count
;
2525 case NLP_STE_MAPPED_NODE
:
2526 vport
->fc_map_cnt
+= count
;
2528 case NLP_STE_NPR_NODE
:
2529 vport
->fc_npr_cnt
+= count
;
2532 spin_unlock_irq(shost
->host_lock
);
2536 lpfc_nlp_state_cleanup(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
2537 int old_state
, int new_state
)
2539 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2541 if (new_state
== NLP_STE_UNMAPPED_NODE
) {
2542 ndlp
->nlp_type
&= ~(NLP_FCP_TARGET
| NLP_FCP_INITIATOR
);
2543 ndlp
->nlp_flag
&= ~NLP_NODEV_REMOVE
;
2544 ndlp
->nlp_type
|= NLP_FC_NODE
;
2546 if (new_state
== NLP_STE_MAPPED_NODE
)
2547 ndlp
->nlp_flag
&= ~NLP_NODEV_REMOVE
;
2548 if (new_state
== NLP_STE_NPR_NODE
)
2549 ndlp
->nlp_flag
&= ~NLP_RCV_PLOGI
;
2551 /* Transport interface */
2552 if (ndlp
->rport
&& (old_state
== NLP_STE_MAPPED_NODE
||
2553 old_state
== NLP_STE_UNMAPPED_NODE
)) {
2554 vport
->phba
->nport_event_cnt
++;
2555 lpfc_unregister_remote_port(ndlp
);
2558 if (new_state
== NLP_STE_MAPPED_NODE
||
2559 new_state
== NLP_STE_UNMAPPED_NODE
) {
2560 vport
->phba
->nport_event_cnt
++;
2562 * Tell the fc transport about the port, if we haven't
2563 * already. If we have, and it's a scsi entity, be
2564 * sure to unblock any attached scsi devices
2566 lpfc_register_remote_port(vport
, ndlp
);
2568 if ((new_state
== NLP_STE_MAPPED_NODE
) &&
2569 (vport
->stat_data_enabled
)) {
2571 * A new target is discovered, if there is no buffer for
2572 * statistical data collection allocate buffer.
2574 ndlp
->lat_data
= kcalloc(LPFC_MAX_BUCKET_COUNT
,
2575 sizeof(struct lpfc_scsicmd_bkt
),
2578 if (!ndlp
->lat_data
)
2579 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_NODE
,
2580 "0286 lpfc_nlp_state_cleanup failed to "
2581 "allocate statistical data buffer DID "
2582 "0x%x\n", ndlp
->nlp_DID
);
2585 * if we added to Mapped list, but the remote port
2586 * registration failed or assigned a target id outside
2587 * our presentable range - move the node to the
2590 if (new_state
== NLP_STE_MAPPED_NODE
&&
2592 ndlp
->rport
->scsi_target_id
== -1 ||
2593 ndlp
->rport
->scsi_target_id
>= LPFC_MAX_TARGET
)) {
2594 spin_lock_irq(shost
->host_lock
);
2595 ndlp
->nlp_flag
|= NLP_TGT_NO_SCSIID
;
2596 spin_unlock_irq(shost
->host_lock
);
2597 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNMAPPED_NODE
);
2602 lpfc_nlp_state_name(char *buffer
, size_t size
, int state
)
2604 static char *states
[] = {
2605 [NLP_STE_UNUSED_NODE
] = "UNUSED",
2606 [NLP_STE_PLOGI_ISSUE
] = "PLOGI",
2607 [NLP_STE_ADISC_ISSUE
] = "ADISC",
2608 [NLP_STE_REG_LOGIN_ISSUE
] = "REGLOGIN",
2609 [NLP_STE_PRLI_ISSUE
] = "PRLI",
2610 [NLP_STE_UNMAPPED_NODE
] = "UNMAPPED",
2611 [NLP_STE_MAPPED_NODE
] = "MAPPED",
2612 [NLP_STE_NPR_NODE
] = "NPR",
2615 if (state
< NLP_STE_MAX_STATE
&& states
[state
])
2616 strlcpy(buffer
, states
[state
], size
);
2618 snprintf(buffer
, size
, "unknown (%d)", state
);
2623 lpfc_nlp_set_state(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
2626 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2627 int old_state
= ndlp
->nlp_state
;
2628 char name1
[16], name2
[16];
2630 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
2631 "0904 NPort state transition x%06x, %s -> %s\n",
2633 lpfc_nlp_state_name(name1
, sizeof(name1
), old_state
),
2634 lpfc_nlp_state_name(name2
, sizeof(name2
), state
));
2636 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_NODE
,
2637 "node statechg did:x%x old:%d ste:%d",
2638 ndlp
->nlp_DID
, old_state
, state
);
2640 if (old_state
== NLP_STE_NPR_NODE
&&
2641 state
!= NLP_STE_NPR_NODE
)
2642 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
2643 if (old_state
== NLP_STE_UNMAPPED_NODE
) {
2644 ndlp
->nlp_flag
&= ~NLP_TGT_NO_SCSIID
;
2645 ndlp
->nlp_type
&= ~NLP_FC_NODE
;
2648 if (list_empty(&ndlp
->nlp_listp
)) {
2649 spin_lock_irq(shost
->host_lock
);
2650 list_add_tail(&ndlp
->nlp_listp
, &vport
->fc_nodes
);
2651 spin_unlock_irq(shost
->host_lock
);
2652 } else if (old_state
)
2653 lpfc_nlp_counters(vport
, old_state
, -1);
2655 ndlp
->nlp_state
= state
;
2656 lpfc_nlp_counters(vport
, state
, 1);
2657 lpfc_nlp_state_cleanup(vport
, ndlp
, old_state
, state
);
2661 lpfc_enqueue_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
2663 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2665 if (list_empty(&ndlp
->nlp_listp
)) {
2666 spin_lock_irq(shost
->host_lock
);
2667 list_add_tail(&ndlp
->nlp_listp
, &vport
->fc_nodes
);
2668 spin_unlock_irq(shost
->host_lock
);
2673 lpfc_dequeue_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
2675 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2677 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
2678 if (ndlp
->nlp_state
&& !list_empty(&ndlp
->nlp_listp
))
2679 lpfc_nlp_counters(vport
, ndlp
->nlp_state
, -1);
2680 spin_lock_irq(shost
->host_lock
);
2681 list_del_init(&ndlp
->nlp_listp
);
2682 spin_unlock_irq(shost
->host_lock
);
2683 lpfc_nlp_state_cleanup(vport
, ndlp
, ndlp
->nlp_state
,
2684 NLP_STE_UNUSED_NODE
);
2688 lpfc_disable_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
2690 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
2691 if (ndlp
->nlp_state
&& !list_empty(&ndlp
->nlp_listp
))
2692 lpfc_nlp_counters(vport
, ndlp
->nlp_state
, -1);
2693 lpfc_nlp_state_cleanup(vport
, ndlp
, ndlp
->nlp_state
,
2694 NLP_STE_UNUSED_NODE
);
2697 * lpfc_initialize_node - Initialize all fields of node object
2698 * @vport: Pointer to Virtual Port object.
2699 * @ndlp: Pointer to FC node object.
2700 * @did: FC_ID of the node.
2702 * This function is always called when node object need to be initialized.
2703 * It initializes all the fields of the node object. Although the reference
2704 * to phba from @ndlp can be obtained indirectly through it's reference to
2705 * @vport, a direct reference to phba is taken here by @ndlp. This is due
2706 * to the life-span of the @ndlp might go beyond the existence of @vport as
2707 * the final release of ndlp is determined by its reference count. And, the
2708 * operation on @ndlp needs the reference to phba.
2711 lpfc_initialize_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
2714 INIT_LIST_HEAD(&ndlp
->els_retry_evt
.evt_listp
);
2715 INIT_LIST_HEAD(&ndlp
->dev_loss_evt
.evt_listp
);
2716 init_timer(&ndlp
->nlp_delayfunc
);
2717 ndlp
->nlp_delayfunc
.function
= lpfc_els_retry_delay
;
2718 ndlp
->nlp_delayfunc
.data
= (unsigned long)ndlp
;
2719 ndlp
->nlp_DID
= did
;
2720 ndlp
->vport
= vport
;
2721 ndlp
->phba
= vport
->phba
;
2722 ndlp
->nlp_sid
= NLP_NO_SID
;
2723 kref_init(&ndlp
->kref
);
2724 NLP_INT_NODE_ACT(ndlp
);
2725 atomic_set(&ndlp
->cmd_pending
, 0);
2726 ndlp
->cmd_qdepth
= LPFC_MAX_TGT_QDEPTH
;
2729 struct lpfc_nodelist
*
2730 lpfc_enable_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
2733 struct lpfc_hba
*phba
= vport
->phba
;
2735 unsigned long flags
;
2740 spin_lock_irqsave(&phba
->ndlp_lock
, flags
);
2741 /* The ndlp should not be in memory free mode */
2742 if (NLP_CHK_FREE_REQ(ndlp
)) {
2743 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
2744 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_NODE
,
2745 "0277 lpfc_enable_node: ndlp:x%p "
2746 "usgmap:x%x refcnt:%d\n",
2747 (void *)ndlp
, ndlp
->nlp_usg_map
,
2748 atomic_read(&ndlp
->kref
.refcount
));
2751 /* The ndlp should not already be in active mode */
2752 if (NLP_CHK_NODE_ACT(ndlp
)) {
2753 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
2754 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_NODE
,
2755 "0278 lpfc_enable_node: ndlp:x%p "
2756 "usgmap:x%x refcnt:%d\n",
2757 (void *)ndlp
, ndlp
->nlp_usg_map
,
2758 atomic_read(&ndlp
->kref
.refcount
));
2762 /* Keep the original DID */
2763 did
= ndlp
->nlp_DID
;
2765 /* re-initialize ndlp except of ndlp linked list pointer */
2766 memset((((char *)ndlp
) + sizeof (struct list_head
)), 0,
2767 sizeof (struct lpfc_nodelist
) - sizeof (struct list_head
));
2768 lpfc_initialize_node(vport
, ndlp
, did
);
2770 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
2772 if (state
!= NLP_STE_UNUSED_NODE
)
2773 lpfc_nlp_set_state(vport
, ndlp
, state
);
2775 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_NODE
,
2776 "node enable: did:x%x",
2777 ndlp
->nlp_DID
, 0, 0);
2782 lpfc_drop_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
2785 * Use of lpfc_drop_node and UNUSED list: lpfc_drop_node should
2786 * be used if we wish to issue the "last" lpfc_nlp_put() to remove
2787 * the ndlp from the vport. The ndlp marked as UNUSED on the list
2788 * until ALL other outstanding threads have completed. We check
2789 * that the ndlp not already in the UNUSED state before we proceed.
2791 if (ndlp
->nlp_state
== NLP_STE_UNUSED_NODE
)
2793 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNUSED_NODE
);
2799 * Start / ReStart rescue timer for Discovery / RSCN handling
2802 lpfc_set_disctmo(struct lpfc_vport
*vport
)
2804 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2805 struct lpfc_hba
*phba
= vport
->phba
;
2808 if (vport
->port_state
== LPFC_LOCAL_CFG_LINK
) {
2809 /* For FAN, timeout should be greater than edtov */
2810 tmo
= (((phba
->fc_edtov
+ 999) / 1000) + 1);
2812 /* Normal discovery timeout should be > than ELS/CT timeout
2813 * FC spec states we need 3 * ratov for CT requests
2815 tmo
= ((phba
->fc_ratov
* 3) + 3);
2819 if (!timer_pending(&vport
->fc_disctmo
)) {
2820 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
2821 "set disc timer: tmo:x%x state:x%x flg:x%x",
2822 tmo
, vport
->port_state
, vport
->fc_flag
);
2825 mod_timer(&vport
->fc_disctmo
, jiffies
+ HZ
* tmo
);
2826 spin_lock_irq(shost
->host_lock
);
2827 vport
->fc_flag
|= FC_DISC_TMO
;
2828 spin_unlock_irq(shost
->host_lock
);
2830 /* Start Discovery Timer state <hba_state> */
2831 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
2832 "0247 Start Discovery Timer state x%x "
2833 "Data: x%x x%lx x%x x%x\n",
2834 vport
->port_state
, tmo
,
2835 (unsigned long)&vport
->fc_disctmo
, vport
->fc_plogi_cnt
,
2836 vport
->fc_adisc_cnt
);
2842 * Cancel rescue timer for Discovery / RSCN handling
2845 lpfc_can_disctmo(struct lpfc_vport
*vport
)
2847 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2848 unsigned long iflags
;
2850 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
2851 "can disc timer: state:x%x rtry:x%x flg:x%x",
2852 vport
->port_state
, vport
->fc_ns_retry
, vport
->fc_flag
);
2854 /* Turn off discovery timer if its running */
2855 if (vport
->fc_flag
& FC_DISC_TMO
) {
2856 spin_lock_irqsave(shost
->host_lock
, iflags
);
2857 vport
->fc_flag
&= ~FC_DISC_TMO
;
2858 spin_unlock_irqrestore(shost
->host_lock
, iflags
);
2859 del_timer_sync(&vport
->fc_disctmo
);
2860 spin_lock_irqsave(&vport
->work_port_lock
, iflags
);
2861 vport
->work_port_events
&= ~WORKER_DISC_TMO
;
2862 spin_unlock_irqrestore(&vport
->work_port_lock
, iflags
);
2865 /* Cancel Discovery Timer state <hba_state> */
2866 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
2867 "0248 Cancel Discovery Timer state x%x "
2868 "Data: x%x x%x x%x\n",
2869 vport
->port_state
, vport
->fc_flag
,
2870 vport
->fc_plogi_cnt
, vport
->fc_adisc_cnt
);
2875 * Check specified ring for outstanding IOCB on the SLI queue
2876 * Return true if iocb matches the specified nport
2879 lpfc_check_sli_ndlp(struct lpfc_hba
*phba
,
2880 struct lpfc_sli_ring
*pring
,
2881 struct lpfc_iocbq
*iocb
,
2882 struct lpfc_nodelist
*ndlp
)
2884 struct lpfc_sli
*psli
= &phba
->sli
;
2885 IOCB_t
*icmd
= &iocb
->iocb
;
2886 struct lpfc_vport
*vport
= ndlp
->vport
;
2888 if (iocb
->vport
!= vport
)
2891 if (pring
->ringno
== LPFC_ELS_RING
) {
2892 switch (icmd
->ulpCommand
) {
2893 case CMD_GEN_REQUEST64_CR
:
2894 if (iocb
->context_un
.ndlp
== ndlp
)
2896 case CMD_ELS_REQUEST64_CR
:
2897 if (icmd
->un
.elsreq64
.remoteID
== ndlp
->nlp_DID
)
2899 case CMD_XMIT_ELS_RSP64_CX
:
2900 if (iocb
->context1
== (uint8_t *) ndlp
)
2903 } else if (pring
->ringno
== psli
->extra_ring
) {
2905 } else if (pring
->ringno
== psli
->fcp_ring
) {
2906 /* Skip match check if waiting to relogin to FCP target */
2907 if ((ndlp
->nlp_type
& NLP_FCP_TARGET
) &&
2908 (ndlp
->nlp_flag
& NLP_DELAY_TMO
)) {
2911 if (icmd
->ulpContext
== (volatile ushort
)ndlp
->nlp_rpi
) {
2914 } else if (pring
->ringno
== psli
->next_ring
) {
2921 * Free resources / clean up outstanding I/Os
2922 * associated with nlp_rpi in the LPFC_NODELIST entry.
2925 lpfc_no_rpi(struct lpfc_hba
*phba
, struct lpfc_nodelist
*ndlp
)
2927 LIST_HEAD(completions
);
2928 struct lpfc_sli
*psli
;
2929 struct lpfc_sli_ring
*pring
;
2930 struct lpfc_iocbq
*iocb
, *next_iocb
;
2933 lpfc_fabric_abort_nport(ndlp
);
2936 * Everything that matches on txcmplq will be returned
2937 * by firmware with a no rpi error.
2940 rpi
= ndlp
->nlp_rpi
;
2941 if (ndlp
->nlp_flag
& NLP_RPI_VALID
) {
2942 /* Now process each ring */
2943 for (i
= 0; i
< psli
->num_rings
; i
++) {
2944 pring
= &psli
->ring
[i
];
2946 spin_lock_irq(&phba
->hbalock
);
2947 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txq
,
2950 * Check to see if iocb matches the nport we are
2953 if ((lpfc_check_sli_ndlp(phba
, pring
, iocb
,
2955 /* It matches, so deque and call compl
2957 list_move_tail(&iocb
->list
,
2962 spin_unlock_irq(&phba
->hbalock
);
2966 /* Cancel all the IOCBs from the completions list */
2967 lpfc_sli_cancel_iocbs(phba
, &completions
, IOSTAT_LOCAL_REJECT
,
2974 * Free rpi associated with LPFC_NODELIST entry.
2975 * This routine is called from lpfc_freenode(), when we are removing
2976 * a LPFC_NODELIST entry. It is also called if the driver initiates a
2977 * LOGO that completes successfully, and we are waiting to PLOGI back
2978 * to the remote NPort. In addition, it is called after we receive
2979 * and unsolicated ELS cmd, send back a rsp, the rsp completes and
2980 * we are waiting to PLOGI back to the remote NPort.
2983 lpfc_unreg_rpi(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
2985 struct lpfc_hba
*phba
= vport
->phba
;
2989 if (ndlp
->nlp_flag
& NLP_RPI_VALID
) {
2990 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
2992 lpfc_unreg_login(phba
, vport
->vpi
, ndlp
->nlp_rpi
, mbox
);
2993 mbox
->vport
= vport
;
2994 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
2995 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
2996 if (rc
== MBX_NOT_FINISHED
)
2997 mempool_free(mbox
, phba
->mbox_mem_pool
);
2999 lpfc_no_rpi(phba
, ndlp
);
3001 ndlp
->nlp_flag
&= ~NLP_RPI_VALID
;
3002 ndlp
->nlp_flag
&= ~NLP_NPR_ADISC
;
3009 lpfc_unreg_all_rpis(struct lpfc_vport
*vport
)
3011 struct lpfc_hba
*phba
= vport
->phba
;
3015 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
3017 lpfc_unreg_login(phba
, vport
->vpi
, 0xffff, mbox
);
3018 mbox
->vport
= vport
;
3019 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
3020 mbox
->context1
= NULL
;
3021 rc
= lpfc_sli_issue_mbox_wait(phba
, mbox
, LPFC_MBOX_TMO
);
3022 if (rc
!= MBX_TIMEOUT
)
3023 mempool_free(mbox
, phba
->mbox_mem_pool
);
3025 if ((rc
== MBX_TIMEOUT
) || (rc
== MBX_NOT_FINISHED
))
3026 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
| LOG_VPORT
,
3027 "1836 Could not issue "
3028 "unreg_login(all_rpis) status %d\n", rc
);
3033 lpfc_unreg_default_rpis(struct lpfc_vport
*vport
)
3035 struct lpfc_hba
*phba
= vport
->phba
;
3039 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
3041 lpfc_unreg_did(phba
, vport
->vpi
, 0xffffffff, mbox
);
3042 mbox
->vport
= vport
;
3043 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
3044 mbox
->context1
= NULL
;
3045 rc
= lpfc_sli_issue_mbox_wait(phba
, mbox
, LPFC_MBOX_TMO
);
3046 if (rc
!= MBX_TIMEOUT
)
3047 mempool_free(mbox
, phba
->mbox_mem_pool
);
3049 if ((rc
== MBX_TIMEOUT
) || (rc
== MBX_NOT_FINISHED
))
3050 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
| LOG_VPORT
,
3051 "1815 Could not issue "
3052 "unreg_did (default rpis) status %d\n",
3058 * Free resources associated with LPFC_NODELIST entry
3059 * so it can be freed.
3062 lpfc_cleanup_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
3064 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3065 struct lpfc_hba
*phba
= vport
->phba
;
3066 LPFC_MBOXQ_t
*mb
, *nextmb
;
3067 struct lpfc_dmabuf
*mp
;
3069 /* Cleanup node for NPort <nlp_DID> */
3070 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
3071 "0900 Cleanup node for NPort x%x "
3072 "Data: x%x x%x x%x\n",
3073 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
3074 ndlp
->nlp_state
, ndlp
->nlp_rpi
);
3075 if (NLP_CHK_FREE_REQ(ndlp
)) {
3076 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_NODE
,
3077 "0280 lpfc_cleanup_node: ndlp:x%p "
3078 "usgmap:x%x refcnt:%d\n",
3079 (void *)ndlp
, ndlp
->nlp_usg_map
,
3080 atomic_read(&ndlp
->kref
.refcount
));
3081 lpfc_dequeue_node(vport
, ndlp
);
3083 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_NODE
,
3084 "0281 lpfc_cleanup_node: ndlp:x%p "
3085 "usgmap:x%x refcnt:%d\n",
3086 (void *)ndlp
, ndlp
->nlp_usg_map
,
3087 atomic_read(&ndlp
->kref
.refcount
));
3088 lpfc_disable_node(vport
, ndlp
);
3091 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
3092 if ((mb
= phba
->sli
.mbox_active
)) {
3093 if ((mb
->u
.mb
.mbxCommand
== MBX_REG_LOGIN64
) &&
3094 (ndlp
== (struct lpfc_nodelist
*) mb
->context2
)) {
3095 mb
->context2
= NULL
;
3096 mb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
3100 spin_lock_irq(&phba
->hbalock
);
3101 list_for_each_entry_safe(mb
, nextmb
, &phba
->sli
.mboxq
, list
) {
3102 if ((mb
->u
.mb
.mbxCommand
== MBX_REG_LOGIN64
) &&
3103 (ndlp
== (struct lpfc_nodelist
*) mb
->context2
)) {
3104 mp
= (struct lpfc_dmabuf
*) (mb
->context1
);
3106 __lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3109 list_del(&mb
->list
);
3110 mempool_free(mb
, phba
->mbox_mem_pool
);
3111 /* We shall not invoke the lpfc_nlp_put to decrement
3112 * the ndlp reference count as we are in the process
3113 * of lpfc_nlp_release.
3117 spin_unlock_irq(&phba
->hbalock
);
3119 lpfc_els_abort(phba
, ndlp
);
3121 spin_lock_irq(shost
->host_lock
);
3122 ndlp
->nlp_flag
&= ~NLP_DELAY_TMO
;
3123 spin_unlock_irq(shost
->host_lock
);
3125 ndlp
->nlp_last_elscmd
= 0;
3126 del_timer_sync(&ndlp
->nlp_delayfunc
);
3128 list_del_init(&ndlp
->els_retry_evt
.evt_listp
);
3129 list_del_init(&ndlp
->dev_loss_evt
.evt_listp
);
3131 lpfc_unreg_rpi(vport
, ndlp
);
3137 * Check to see if we can free the nlp back to the freelist.
3138 * If we are in the middle of using the nlp in the discovery state
3139 * machine, defer the free till we reach the end of the state machine.
3142 lpfc_nlp_remove(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
3144 struct lpfc_hba
*phba
= vport
->phba
;
3145 struct lpfc_rport_data
*rdata
;
3149 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
3150 if ((ndlp
->nlp_flag
& NLP_DEFER_RM
) &&
3151 !(ndlp
->nlp_flag
& NLP_RPI_VALID
)) {
3152 /* For this case we need to cleanup the default rpi
3153 * allocated by the firmware.
3155 if ((mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
))
3157 rc
= lpfc_reg_rpi(phba
, vport
->vpi
, ndlp
->nlp_DID
,
3158 (uint8_t *) &vport
->fc_sparam
, mbox
, 0);
3160 mempool_free(mbox
, phba
->mbox_mem_pool
);
3163 mbox
->mbox_flag
|= LPFC_MBX_IMED_UNREG
;
3164 mbox
->mbox_cmpl
= lpfc_mbx_cmpl_dflt_rpi
;
3165 mbox
->vport
= vport
;
3166 mbox
->context2
= NULL
;
3167 rc
=lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
3168 if (rc
== MBX_NOT_FINISHED
) {
3169 mempool_free(mbox
, phba
->mbox_mem_pool
);
3174 lpfc_cleanup_node(vport
, ndlp
);
3177 * We can get here with a non-NULL ndlp->rport because when we
3178 * unregister a rport we don't break the rport/node linkage. So if we
3179 * do, make sure we don't leaving any dangling pointers behind.
3182 rdata
= ndlp
->rport
->dd_data
;
3183 rdata
->pnode
= NULL
;
3189 lpfc_matchdid(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
3192 D_ID mydid
, ndlpdid
, matchdid
;
3194 if (did
== Bcast_DID
)
3197 /* First check for Direct match */
3198 if (ndlp
->nlp_DID
== did
)
3201 /* Next check for area/domain identically equals 0 match */
3202 mydid
.un
.word
= vport
->fc_myDID
;
3203 if ((mydid
.un
.b
.domain
== 0) && (mydid
.un
.b
.area
== 0)) {
3207 matchdid
.un
.word
= did
;
3208 ndlpdid
.un
.word
= ndlp
->nlp_DID
;
3209 if (matchdid
.un
.b
.id
== ndlpdid
.un
.b
.id
) {
3210 if ((mydid
.un
.b
.domain
== matchdid
.un
.b
.domain
) &&
3211 (mydid
.un
.b
.area
== matchdid
.un
.b
.area
)) {
3212 if ((ndlpdid
.un
.b
.domain
== 0) &&
3213 (ndlpdid
.un
.b
.area
== 0)) {
3214 if (ndlpdid
.un
.b
.id
)
3220 matchdid
.un
.word
= ndlp
->nlp_DID
;
3221 if ((mydid
.un
.b
.domain
== ndlpdid
.un
.b
.domain
) &&
3222 (mydid
.un
.b
.area
== ndlpdid
.un
.b
.area
)) {
3223 if ((matchdid
.un
.b
.domain
== 0) &&
3224 (matchdid
.un
.b
.area
== 0)) {
3225 if (matchdid
.un
.b
.id
)
3233 /* Search for a nodelist entry */
3234 static struct lpfc_nodelist
*
3235 __lpfc_findnode_did(struct lpfc_vport
*vport
, uint32_t did
)
3237 struct lpfc_nodelist
*ndlp
;
3240 list_for_each_entry(ndlp
, &vport
->fc_nodes
, nlp_listp
) {
3241 if (lpfc_matchdid(vport
, ndlp
, did
)) {
3242 data1
= (((uint32_t) ndlp
->nlp_state
<< 24) |
3243 ((uint32_t) ndlp
->nlp_xri
<< 16) |
3244 ((uint32_t) ndlp
->nlp_type
<< 8) |
3245 ((uint32_t) ndlp
->nlp_rpi
& 0xff));
3246 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
3247 "0929 FIND node DID "
3248 "Data: x%p x%x x%x x%x\n",
3249 ndlp
, ndlp
->nlp_DID
,
3250 ndlp
->nlp_flag
, data1
);
3255 /* FIND node did <did> NOT FOUND */
3256 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
3257 "0932 FIND node did x%x NOT FOUND.\n", did
);
3261 struct lpfc_nodelist
*
3262 lpfc_findnode_did(struct lpfc_vport
*vport
, uint32_t did
)
3264 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3265 struct lpfc_nodelist
*ndlp
;
3267 spin_lock_irq(shost
->host_lock
);
3268 ndlp
= __lpfc_findnode_did(vport
, did
);
3269 spin_unlock_irq(shost
->host_lock
);
3273 struct lpfc_nodelist
*
3274 lpfc_setup_disc_node(struct lpfc_vport
*vport
, uint32_t did
)
3276 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3277 struct lpfc_nodelist
*ndlp
;
3279 ndlp
= lpfc_findnode_did(vport
, did
);
3281 if ((vport
->fc_flag
& FC_RSCN_MODE
) != 0 &&
3282 lpfc_rscn_payload_check(vport
, did
) == 0)
3284 ndlp
= (struct lpfc_nodelist
*)
3285 mempool_alloc(vport
->phba
->nlp_mem_pool
, GFP_KERNEL
);
3288 lpfc_nlp_init(vport
, ndlp
, did
);
3289 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_NPR_NODE
);
3290 spin_lock_irq(shost
->host_lock
);
3291 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
3292 spin_unlock_irq(shost
->host_lock
);
3294 } else if (!NLP_CHK_NODE_ACT(ndlp
)) {
3295 ndlp
= lpfc_enable_node(vport
, ndlp
, NLP_STE_NPR_NODE
);
3298 spin_lock_irq(shost
->host_lock
);
3299 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
3300 spin_unlock_irq(shost
->host_lock
);
3304 if ((vport
->fc_flag
& FC_RSCN_MODE
) &&
3305 !(vport
->fc_flag
& FC_NDISC_ACTIVE
)) {
3306 if (lpfc_rscn_payload_check(vport
, did
)) {
3307 /* If we've already recieved a PLOGI from this NPort
3308 * we don't need to try to discover it again.
3310 if (ndlp
->nlp_flag
& NLP_RCV_PLOGI
)
3313 /* Since this node is marked for discovery,
3314 * delay timeout is not needed.
3316 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
3317 spin_lock_irq(shost
->host_lock
);
3318 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
3319 spin_unlock_irq(shost
->host_lock
);
3323 /* If we've already recieved a PLOGI from this NPort,
3324 * or we are already in the process of discovery on it,
3325 * we don't need to try to discover it again.
3327 if (ndlp
->nlp_state
== NLP_STE_ADISC_ISSUE
||
3328 ndlp
->nlp_state
== NLP_STE_PLOGI_ISSUE
||
3329 ndlp
->nlp_flag
& NLP_RCV_PLOGI
)
3331 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_NPR_NODE
);
3332 spin_lock_irq(shost
->host_lock
);
3333 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
3334 spin_unlock_irq(shost
->host_lock
);
3339 /* Build a list of nodes to discover based on the loopmap */
3341 lpfc_disc_list_loopmap(struct lpfc_vport
*vport
)
3343 struct lpfc_hba
*phba
= vport
->phba
;
3345 uint32_t alpa
, index
;
3347 if (!lpfc_is_link_up(phba
))
3350 if (phba
->fc_topology
!= TOPOLOGY_LOOP
)
3353 /* Check for loop map present or not */
3354 if (phba
->alpa_map
[0]) {
3355 for (j
= 1; j
<= phba
->alpa_map
[0]; j
++) {
3356 alpa
= phba
->alpa_map
[j
];
3357 if (((vport
->fc_myDID
& 0xff) == alpa
) || (alpa
== 0))
3359 lpfc_setup_disc_node(vport
, alpa
);
3362 /* No alpamap, so try all alpa's */
3363 for (j
= 0; j
< FC_MAXLOOP
; j
++) {
3364 /* If cfg_scan_down is set, start from highest
3365 * ALPA (0xef) to lowest (0x1).
3367 if (vport
->cfg_scan_down
)
3370 index
= FC_MAXLOOP
- j
- 1;
3371 alpa
= lpfcAlpaArray
[index
];
3372 if ((vport
->fc_myDID
& 0xff) == alpa
)
3374 lpfc_setup_disc_node(vport
, alpa
);
3381 lpfc_issue_clear_la(struct lpfc_hba
*phba
, struct lpfc_vport
*vport
)
3384 struct lpfc_sli
*psli
= &phba
->sli
;
3385 struct lpfc_sli_ring
*extra_ring
= &psli
->ring
[psli
->extra_ring
];
3386 struct lpfc_sli_ring
*fcp_ring
= &psli
->ring
[psli
->fcp_ring
];
3387 struct lpfc_sli_ring
*next_ring
= &psli
->ring
[psli
->next_ring
];
3391 * if it's not a physical port or if we already send
3392 * clear_la then don't send it.
3394 if ((phba
->link_state
>= LPFC_CLEAR_LA
) ||
3395 (vport
->port_type
!= LPFC_PHYSICAL_PORT
) ||
3396 (phba
->sli_rev
== LPFC_SLI_REV4
))
3399 /* Link up discovery */
3400 if ((mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
)) != NULL
) {
3401 phba
->link_state
= LPFC_CLEAR_LA
;
3402 lpfc_clear_la(phba
, mbox
);
3403 mbox
->mbox_cmpl
= lpfc_mbx_cmpl_clear_la
;
3404 mbox
->vport
= vport
;
3405 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
3406 if (rc
== MBX_NOT_FINISHED
) {
3407 mempool_free(mbox
, phba
->mbox_mem_pool
);
3408 lpfc_disc_flush_list(vport
);
3409 extra_ring
->flag
&= ~LPFC_STOP_IOCB_EVENT
;
3410 fcp_ring
->flag
&= ~LPFC_STOP_IOCB_EVENT
;
3411 next_ring
->flag
&= ~LPFC_STOP_IOCB_EVENT
;
3412 phba
->link_state
= LPFC_HBA_ERROR
;
3417 /* Reg_vpi to tell firmware to resume normal operations */
3419 lpfc_issue_reg_vpi(struct lpfc_hba
*phba
, struct lpfc_vport
*vport
)
3421 LPFC_MBOXQ_t
*regvpimbox
;
3423 regvpimbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
3425 lpfc_reg_vpi(vport
, regvpimbox
);
3426 regvpimbox
->mbox_cmpl
= lpfc_mbx_cmpl_reg_vpi
;
3427 regvpimbox
->vport
= vport
;
3428 if (lpfc_sli_issue_mbox(phba
, regvpimbox
, MBX_NOWAIT
)
3429 == MBX_NOT_FINISHED
) {
3430 mempool_free(regvpimbox
, phba
->mbox_mem_pool
);
3435 /* Start Link up / RSCN discovery on NPR nodes */
3437 lpfc_disc_start(struct lpfc_vport
*vport
)
3439 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3440 struct lpfc_hba
*phba
= vport
->phba
;
3442 uint32_t clear_la_pending
;
3445 if (!lpfc_is_link_up(phba
))
3448 if (phba
->link_state
== LPFC_CLEAR_LA
)
3449 clear_la_pending
= 1;
3451 clear_la_pending
= 0;
3453 if (vport
->port_state
< LPFC_VPORT_READY
)
3454 vport
->port_state
= LPFC_DISC_AUTH
;
3456 lpfc_set_disctmo(vport
);
3458 if (vport
->fc_prevDID
== vport
->fc_myDID
)
3463 vport
->fc_prevDID
= vport
->fc_myDID
;
3464 vport
->num_disc_nodes
= 0;
3466 /* Start Discovery state <hba_state> */
3467 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
3468 "0202 Start Discovery hba state x%x "
3469 "Data: x%x x%x x%x\n",
3470 vport
->port_state
, vport
->fc_flag
, vport
->fc_plogi_cnt
,
3471 vport
->fc_adisc_cnt
);
3473 /* First do ADISCs - if any */
3474 num_sent
= lpfc_els_disc_adisc(vport
);
3480 * For SLI3, cmpl_reg_vpi will set port_state to READY, and
3481 * continue discovery.
3483 if ((phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
) &&
3484 !(vport
->fc_flag
& FC_PT2PT
) &&
3485 !(vport
->fc_flag
& FC_RSCN_MODE
) &&
3486 (phba
->sli_rev
< LPFC_SLI_REV4
)) {
3487 lpfc_issue_reg_vpi(phba
, vport
);
3492 * For SLI2, we need to set port_state to READY and continue
3495 if (vport
->port_state
< LPFC_VPORT_READY
&& !clear_la_pending
) {
3496 /* If we get here, there is nothing to ADISC */
3497 if (vport
->port_type
== LPFC_PHYSICAL_PORT
)
3498 lpfc_issue_clear_la(phba
, vport
);
3500 if (!(vport
->fc_flag
& FC_ABORT_DISCOVERY
)) {
3501 vport
->num_disc_nodes
= 0;
3502 /* go thru NPR nodes and issue ELS PLOGIs */
3503 if (vport
->fc_npr_cnt
)
3504 lpfc_els_disc_plogi(vport
);
3506 if (!vport
->num_disc_nodes
) {
3507 spin_lock_irq(shost
->host_lock
);
3508 vport
->fc_flag
&= ~FC_NDISC_ACTIVE
;
3509 spin_unlock_irq(shost
->host_lock
);
3510 lpfc_can_disctmo(vport
);
3513 vport
->port_state
= LPFC_VPORT_READY
;
3515 /* Next do PLOGIs - if any */
3516 num_sent
= lpfc_els_disc_plogi(vport
);
3521 if (vport
->fc_flag
& FC_RSCN_MODE
) {
3522 /* Check to see if more RSCNs came in while we
3523 * were processing this one.
3525 if ((vport
->fc_rscn_id_cnt
== 0) &&
3526 (!(vport
->fc_flag
& FC_RSCN_DISCOVERY
))) {
3527 spin_lock_irq(shost
->host_lock
);
3528 vport
->fc_flag
&= ~FC_RSCN_MODE
;
3529 spin_unlock_irq(shost
->host_lock
);
3530 lpfc_can_disctmo(vport
);
3532 lpfc_els_handle_rscn(vport
);
3539 * Ignore completion for all IOCBs on tx and txcmpl queue for ELS
3540 * ring the match the sppecified nodelist.
3543 lpfc_free_tx(struct lpfc_hba
*phba
, struct lpfc_nodelist
*ndlp
)
3545 LIST_HEAD(completions
);
3546 struct lpfc_sli
*psli
;
3548 struct lpfc_iocbq
*iocb
, *next_iocb
;
3549 struct lpfc_sli_ring
*pring
;
3552 pring
= &psli
->ring
[LPFC_ELS_RING
];
3554 /* Error matching iocb on txq or txcmplq
3555 * First check the txq.
3557 spin_lock_irq(&phba
->hbalock
);
3558 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txq
, list
) {
3559 if (iocb
->context1
!= ndlp
) {
3563 if ((icmd
->ulpCommand
== CMD_ELS_REQUEST64_CR
) ||
3564 (icmd
->ulpCommand
== CMD_XMIT_ELS_RSP64_CX
)) {
3566 list_move_tail(&iocb
->list
, &completions
);
3571 /* Next check the txcmplq */
3572 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txcmplq
, list
) {
3573 if (iocb
->context1
!= ndlp
) {
3577 if (icmd
->ulpCommand
== CMD_ELS_REQUEST64_CR
||
3578 icmd
->ulpCommand
== CMD_XMIT_ELS_RSP64_CX
) {
3579 lpfc_sli_issue_abort_iotag(phba
, pring
, iocb
);
3582 spin_unlock_irq(&phba
->hbalock
);
3584 /* Cancel all the IOCBs from the completions list */
3585 lpfc_sli_cancel_iocbs(phba
, &completions
, IOSTAT_LOCAL_REJECT
,
3590 lpfc_disc_flush_list(struct lpfc_vport
*vport
)
3592 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
3593 struct lpfc_hba
*phba
= vport
->phba
;
3595 if (vport
->fc_plogi_cnt
|| vport
->fc_adisc_cnt
) {
3596 list_for_each_entry_safe(ndlp
, next_ndlp
, &vport
->fc_nodes
,
3598 if (!NLP_CHK_NODE_ACT(ndlp
))
3600 if (ndlp
->nlp_state
== NLP_STE_PLOGI_ISSUE
||
3601 ndlp
->nlp_state
== NLP_STE_ADISC_ISSUE
) {
3602 lpfc_free_tx(phba
, ndlp
);
3609 lpfc_cleanup_discovery_resources(struct lpfc_vport
*vport
)
3611 lpfc_els_flush_rscn(vport
);
3612 lpfc_els_flush_cmd(vport
);
3613 lpfc_disc_flush_list(vport
);
3616 /*****************************************************************************/
3618 * NAME: lpfc_disc_timeout
3620 * FUNCTION: Fibre Channel driver discovery timeout routine.
3622 * EXECUTION ENVIRONMENT: interrupt only
3630 /*****************************************************************************/
3632 lpfc_disc_timeout(unsigned long ptr
)
3634 struct lpfc_vport
*vport
= (struct lpfc_vport
*) ptr
;
3635 struct lpfc_hba
*phba
= vport
->phba
;
3636 uint32_t tmo_posted
;
3637 unsigned long flags
= 0;
3639 if (unlikely(!phba
))
3642 spin_lock_irqsave(&vport
->work_port_lock
, flags
);
3643 tmo_posted
= vport
->work_port_events
& WORKER_DISC_TMO
;
3645 vport
->work_port_events
|= WORKER_DISC_TMO
;
3646 spin_unlock_irqrestore(&vport
->work_port_lock
, flags
);
3649 lpfc_worker_wake_up(phba
);
3654 lpfc_disc_timeout_handler(struct lpfc_vport
*vport
)
3656 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3657 struct lpfc_hba
*phba
= vport
->phba
;
3658 struct lpfc_sli
*psli
= &phba
->sli
;
3659 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
3660 LPFC_MBOXQ_t
*initlinkmbox
;
3661 int rc
, clrlaerr
= 0;
3663 if (!(vport
->fc_flag
& FC_DISC_TMO
))
3666 spin_lock_irq(shost
->host_lock
);
3667 vport
->fc_flag
&= ~FC_DISC_TMO
;
3668 spin_unlock_irq(shost
->host_lock
);
3670 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
3671 "disc timeout: state:x%x rtry:x%x flg:x%x",
3672 vport
->port_state
, vport
->fc_ns_retry
, vport
->fc_flag
);
3674 switch (vport
->port_state
) {
3676 case LPFC_LOCAL_CFG_LINK
:
3677 /* port_state is identically LPFC_LOCAL_CFG_LINK while waiting for
3681 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_DISCOVERY
,
3682 "0221 FAN timeout\n");
3683 /* Start discovery by sending FLOGI, clean up old rpis */
3684 list_for_each_entry_safe(ndlp
, next_ndlp
, &vport
->fc_nodes
,
3686 if (!NLP_CHK_NODE_ACT(ndlp
))
3688 if (ndlp
->nlp_state
!= NLP_STE_NPR_NODE
)
3690 if (ndlp
->nlp_type
& NLP_FABRIC
) {
3691 /* Clean up the ndlp on Fabric connections */
3692 lpfc_drop_node(vport
, ndlp
);
3694 } else if (!(ndlp
->nlp_flag
& NLP_NPR_ADISC
)) {
3695 /* Fail outstanding IO now since device
3696 * is marked for PLOGI.
3698 lpfc_unreg_rpi(vport
, ndlp
);
3701 if (vport
->port_state
!= LPFC_FLOGI
) {
3702 lpfc_initial_flogi(vport
);
3709 /* port_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */
3710 /* Initial FLOGI timeout */
3711 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
3712 "0222 Initial %s timeout\n",
3713 vport
->vpi
? "FDISC" : "FLOGI");
3715 /* Assume no Fabric and go on with discovery.
3716 * Check for outstanding ELS FLOGI to abort.
3719 /* FLOGI failed, so just use loop map to make discovery list */
3720 lpfc_disc_list_loopmap(vport
);
3722 /* Start discovery */
3723 lpfc_disc_start(vport
);
3726 case LPFC_FABRIC_CFG_LINK
:
3727 /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for
3729 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
3730 "0223 Timeout while waiting for "
3731 "NameServer login\n");
3732 /* Next look for NameServer ndlp */
3733 ndlp
= lpfc_findnode_did(vport
, NameServer_DID
);
3734 if (ndlp
&& NLP_CHK_NODE_ACT(ndlp
))
3735 lpfc_els_abort(phba
, ndlp
);
3737 /* ReStart discovery */
3741 /* Check for wait for NameServer Rsp timeout */
3742 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
3743 "0224 NameServer Query timeout "
3745 vport
->fc_ns_retry
, LPFC_MAX_NS_RETRY
);
3747 if (vport
->fc_ns_retry
< LPFC_MAX_NS_RETRY
) {
3748 /* Try it one more time */
3749 vport
->fc_ns_retry
++;
3750 rc
= lpfc_ns_cmd(vport
, SLI_CTNS_GID_FT
,
3751 vport
->fc_ns_retry
, 0);
3755 vport
->fc_ns_retry
= 0;
3759 * Discovery is over.
3760 * set port_state to PORT_READY if SLI2.
3761 * cmpl_reg_vpi will set port_state to READY for SLI3.
3763 if (phba
->sli_rev
< LPFC_SLI_REV4
) {
3764 if (phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
)
3765 lpfc_issue_reg_vpi(phba
, vport
);
3766 else { /* NPIV Not enabled */
3767 lpfc_issue_clear_la(phba
, vport
);
3768 vport
->port_state
= LPFC_VPORT_READY
;
3772 /* Setup and issue mailbox INITIALIZE LINK command */
3773 initlinkmbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
3774 if (!initlinkmbox
) {
3775 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
3776 "0206 Device Discovery "
3777 "completion error\n");
3778 phba
->link_state
= LPFC_HBA_ERROR
;
3782 lpfc_linkdown(phba
);
3783 lpfc_init_link(phba
, initlinkmbox
, phba
->cfg_topology
,
3784 phba
->cfg_link_speed
);
3785 initlinkmbox
->u
.mb
.un
.varInitLnk
.lipsr_AL_PA
= 0;
3786 initlinkmbox
->vport
= vport
;
3787 initlinkmbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
3788 rc
= lpfc_sli_issue_mbox(phba
, initlinkmbox
, MBX_NOWAIT
);
3789 lpfc_set_loopback_flag(phba
);
3790 if (rc
== MBX_NOT_FINISHED
)
3791 mempool_free(initlinkmbox
, phba
->mbox_mem_pool
);
3795 case LPFC_DISC_AUTH
:
3796 /* Node Authentication timeout */
3797 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
3798 "0227 Node Authentication timeout\n");
3799 lpfc_disc_flush_list(vport
);
3802 * set port_state to PORT_READY if SLI2.
3803 * cmpl_reg_vpi will set port_state to READY for SLI3.
3805 if (phba
->sli_rev
< LPFC_SLI_REV4
) {
3806 if (phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
)
3807 lpfc_issue_reg_vpi(phba
, vport
);
3808 else { /* NPIV Not enabled */
3809 lpfc_issue_clear_la(phba
, vport
);
3810 vport
->port_state
= LPFC_VPORT_READY
;
3815 case LPFC_VPORT_READY
:
3816 if (vport
->fc_flag
& FC_RSCN_MODE
) {
3817 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
3818 "0231 RSCN timeout Data: x%x "
3820 vport
->fc_ns_retry
, LPFC_MAX_NS_RETRY
);
3822 /* Cleanup any outstanding ELS commands */
3823 lpfc_els_flush_cmd(vport
);
3825 lpfc_els_flush_rscn(vport
);
3826 lpfc_disc_flush_list(vport
);
3831 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
3832 "0273 Unexpected discovery timeout, "
3833 "vport State x%x\n", vport
->port_state
);
3837 switch (phba
->link_state
) {
3839 /* CLEAR LA timeout */
3840 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
3841 "0228 CLEAR LA timeout\n");
3846 lpfc_issue_clear_la(phba
, vport
);
3848 case LPFC_LINK_UNKNOWN
:
3849 case LPFC_WARM_START
:
3850 case LPFC_INIT_START
:
3851 case LPFC_INIT_MBX_CMDS
:
3852 case LPFC_LINK_DOWN
:
3853 case LPFC_HBA_ERROR
:
3854 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
3855 "0230 Unexpected timeout, hba link "
3856 "state x%x\n", phba
->link_state
);
3860 case LPFC_HBA_READY
:
3865 lpfc_disc_flush_list(vport
);
3866 psli
->ring
[(psli
->extra_ring
)].flag
&= ~LPFC_STOP_IOCB_EVENT
;
3867 psli
->ring
[(psli
->fcp_ring
)].flag
&= ~LPFC_STOP_IOCB_EVENT
;
3868 psli
->ring
[(psli
->next_ring
)].flag
&= ~LPFC_STOP_IOCB_EVENT
;
3869 vport
->port_state
= LPFC_VPORT_READY
;
3876 * This routine handles processing a NameServer REG_LOGIN mailbox
3877 * command upon completion. It is setup in the LPFC_MBOXQ
3878 * as the completion routine when the command is
3879 * handed off to the SLI layer.
3882 lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
3884 MAILBOX_t
*mb
= &pmb
->u
.mb
;
3885 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
3886 struct lpfc_nodelist
*ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
3887 struct lpfc_vport
*vport
= pmb
->vport
;
3889 pmb
->context1
= NULL
;
3891 ndlp
->nlp_rpi
= mb
->un
.varWords
[0];
3892 ndlp
->nlp_flag
|= NLP_RPI_VALID
;
3893 ndlp
->nlp_type
|= NLP_FABRIC
;
3894 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNMAPPED_NODE
);
3897 * Start issuing Fabric-Device Management Interface (FDMI) command to
3898 * 0xfffffa (FDMI well known port) or Delay issuing FDMI command if
3899 * fdmi-on=2 (supporting RPA/hostnmae)
3902 if (vport
->cfg_fdmi_on
== 1)
3903 lpfc_fdmi_cmd(vport
, ndlp
, SLI_MGMT_DHBA
);
3905 mod_timer(&vport
->fc_fdmitmo
, jiffies
+ HZ
* 60);
3907 /* decrement the node reference count held for this callback
3911 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3913 mempool_free(pmb
, phba
->mbox_mem_pool
);
3919 lpfc_filter_by_rpi(struct lpfc_nodelist
*ndlp
, void *param
)
3921 uint16_t *rpi
= param
;
3923 return ndlp
->nlp_rpi
== *rpi
;
3927 lpfc_filter_by_wwpn(struct lpfc_nodelist
*ndlp
, void *param
)
3929 return memcmp(&ndlp
->nlp_portname
, param
,
3930 sizeof(ndlp
->nlp_portname
)) == 0;
3933 static struct lpfc_nodelist
*
3934 __lpfc_find_node(struct lpfc_vport
*vport
, node_filter filter
, void *param
)
3936 struct lpfc_nodelist
*ndlp
;
3938 list_for_each_entry(ndlp
, &vport
->fc_nodes
, nlp_listp
) {
3939 if (filter(ndlp
, param
))
3946 * This routine looks up the ndlp lists for the given RPI. If rpi found it
3947 * returns the node list element pointer else return NULL.
3949 struct lpfc_nodelist
*
3950 __lpfc_findnode_rpi(struct lpfc_vport
*vport
, uint16_t rpi
)
3952 return __lpfc_find_node(vport
, lpfc_filter_by_rpi
, &rpi
);
3956 * This routine looks up the ndlp lists for the given WWPN. If WWPN found it
3957 * returns the node element list pointer else return NULL.
3959 struct lpfc_nodelist
*
3960 lpfc_findnode_wwpn(struct lpfc_vport
*vport
, struct lpfc_name
*wwpn
)
3962 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3963 struct lpfc_nodelist
*ndlp
;
3965 spin_lock_irq(shost
->host_lock
);
3966 ndlp
= __lpfc_find_node(vport
, lpfc_filter_by_wwpn
, wwpn
);
3967 spin_unlock_irq(shost
->host_lock
);
3972 lpfc_nlp_init(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
3975 memset(ndlp
, 0, sizeof (struct lpfc_nodelist
));
3977 lpfc_initialize_node(vport
, ndlp
, did
);
3978 INIT_LIST_HEAD(&ndlp
->nlp_listp
);
3980 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_NODE
,
3981 "node init: did:x%x",
3982 ndlp
->nlp_DID
, 0, 0);
3987 /* This routine releases all resources associated with a specifc NPort's ndlp
3988 * and mempool_free's the nodelist.
3991 lpfc_nlp_release(struct kref
*kref
)
3993 struct lpfc_hba
*phba
;
3994 unsigned long flags
;
3995 struct lpfc_nodelist
*ndlp
= container_of(kref
, struct lpfc_nodelist
,
3998 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_NODE
,
3999 "node release: did:x%x flg:x%x type:x%x",
4000 ndlp
->nlp_DID
, ndlp
->nlp_flag
, ndlp
->nlp_type
);
4002 lpfc_printf_vlog(ndlp
->vport
, KERN_INFO
, LOG_NODE
,
4003 "0279 lpfc_nlp_release: ndlp:x%p "
4004 "usgmap:x%x refcnt:%d\n",
4005 (void *)ndlp
, ndlp
->nlp_usg_map
,
4006 atomic_read(&ndlp
->kref
.refcount
));
4008 /* remove ndlp from action. */
4009 lpfc_nlp_remove(ndlp
->vport
, ndlp
);
4011 /* clear the ndlp active flag for all release cases */
4013 spin_lock_irqsave(&phba
->ndlp_lock
, flags
);
4014 NLP_CLR_NODE_ACT(ndlp
);
4015 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
4017 /* free ndlp memory for final ndlp release */
4018 if (NLP_CHK_FREE_REQ(ndlp
)) {
4019 kfree(ndlp
->lat_data
);
4020 mempool_free(ndlp
, ndlp
->phba
->nlp_mem_pool
);
4024 /* This routine bumps the reference count for a ndlp structure to ensure
4025 * that one discovery thread won't free a ndlp while another discovery thread
4028 struct lpfc_nodelist
*
4029 lpfc_nlp_get(struct lpfc_nodelist
*ndlp
)
4031 struct lpfc_hba
*phba
;
4032 unsigned long flags
;
4035 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_NODE
,
4036 "node get: did:x%x flg:x%x refcnt:x%x",
4037 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
4038 atomic_read(&ndlp
->kref
.refcount
));
4039 /* The check of ndlp usage to prevent incrementing the
4040 * ndlp reference count that is in the process of being
4044 spin_lock_irqsave(&phba
->ndlp_lock
, flags
);
4045 if (!NLP_CHK_NODE_ACT(ndlp
) || NLP_CHK_FREE_ACK(ndlp
)) {
4046 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
4047 lpfc_printf_vlog(ndlp
->vport
, KERN_WARNING
, LOG_NODE
,
4048 "0276 lpfc_nlp_get: ndlp:x%p "
4049 "usgmap:x%x refcnt:%d\n",
4050 (void *)ndlp
, ndlp
->nlp_usg_map
,
4051 atomic_read(&ndlp
->kref
.refcount
));
4054 kref_get(&ndlp
->kref
);
4055 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
4060 /* This routine decrements the reference count for a ndlp structure. If the
4061 * count goes to 0, this indicates the the associated nodelist should be
4062 * freed. Returning 1 indicates the ndlp resource has been released; on the
4063 * other hand, returning 0 indicates the ndlp resource has not been released
4067 lpfc_nlp_put(struct lpfc_nodelist
*ndlp
)
4069 struct lpfc_hba
*phba
;
4070 unsigned long flags
;
4075 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_NODE
,
4076 "node put: did:x%x flg:x%x refcnt:x%x",
4077 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
4078 atomic_read(&ndlp
->kref
.refcount
));
4080 spin_lock_irqsave(&phba
->ndlp_lock
, flags
);
4081 /* Check the ndlp memory free acknowledge flag to avoid the
4082 * possible race condition that kref_put got invoked again
4083 * after previous one has done ndlp memory free.
4085 if (NLP_CHK_FREE_ACK(ndlp
)) {
4086 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
4087 lpfc_printf_vlog(ndlp
->vport
, KERN_WARNING
, LOG_NODE
,
4088 "0274 lpfc_nlp_put: ndlp:x%p "
4089 "usgmap:x%x refcnt:%d\n",
4090 (void *)ndlp
, ndlp
->nlp_usg_map
,
4091 atomic_read(&ndlp
->kref
.refcount
));
4094 /* Check the ndlp inactivate log flag to avoid the possible
4095 * race condition that kref_put got invoked again after ndlp
4096 * is already in inactivating state.
4098 if (NLP_CHK_IACT_REQ(ndlp
)) {
4099 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
4100 lpfc_printf_vlog(ndlp
->vport
, KERN_WARNING
, LOG_NODE
,
4101 "0275 lpfc_nlp_put: ndlp:x%p "
4102 "usgmap:x%x refcnt:%d\n",
4103 (void *)ndlp
, ndlp
->nlp_usg_map
,
4104 atomic_read(&ndlp
->kref
.refcount
));
4107 /* For last put, mark the ndlp usage flags to make sure no
4108 * other kref_get and kref_put on the same ndlp shall get
4109 * in between the process when the final kref_put has been
4110 * invoked on this ndlp.
4112 if (atomic_read(&ndlp
->kref
.refcount
) == 1) {
4113 /* Indicate ndlp is put to inactive state. */
4114 NLP_SET_IACT_REQ(ndlp
);
4115 /* Acknowledge ndlp memory free has been seen. */
4116 if (NLP_CHK_FREE_REQ(ndlp
))
4117 NLP_SET_FREE_ACK(ndlp
);
4119 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
4120 /* Note, the kref_put returns 1 when decrementing a reference
4121 * count that was 1, it invokes the release callback function,
4122 * but it still left the reference count as 1 (not actually
4123 * performs the last decrementation). Otherwise, it actually
4124 * decrements the reference count and returns 0.
4126 return kref_put(&ndlp
->kref
, lpfc_nlp_release
);
4129 /* This routine free's the specified nodelist if it is not in use
4130 * by any other discovery thread. This routine returns 1 if the
4131 * ndlp has been freed. A return value of 0 indicates the ndlp is
4132 * not yet been released.
4135 lpfc_nlp_not_used(struct lpfc_nodelist
*ndlp
)
4137 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_NODE
,
4138 "node not used: did:x%x flg:x%x refcnt:x%x",
4139 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
4140 atomic_read(&ndlp
->kref
.refcount
));
4141 if (atomic_read(&ndlp
->kref
.refcount
) == 1)
4142 if (lpfc_nlp_put(ndlp
))
4148 * lpfc_fcf_inuse - Check if FCF can be unregistered.
4149 * @phba: Pointer to hba context object.
4151 * This function iterate through all FC nodes associated
4152 * will all vports to check if there is any node with
4153 * fc_rports associated with it. If there is an fc_rport
4154 * associated with the node, then the node is either in
4155 * discovered state or its devloss_timer is pending.
4158 lpfc_fcf_inuse(struct lpfc_hba
*phba
)
4160 struct lpfc_vport
**vports
;
4162 struct lpfc_nodelist
*ndlp
;
4163 struct Scsi_Host
*shost
;
4165 vports
= lpfc_create_vport_work_array(phba
);
4167 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
4168 shost
= lpfc_shost_from_vport(vports
[i
]);
4169 spin_lock_irq(shost
->host_lock
);
4170 list_for_each_entry(ndlp
, &vports
[i
]->fc_nodes
, nlp_listp
) {
4171 if (NLP_CHK_NODE_ACT(ndlp
) && ndlp
->rport
&&
4172 (ndlp
->rport
->roles
& FC_RPORT_ROLE_FCP_TARGET
)) {
4174 spin_unlock_irq(shost
->host_lock
);
4178 spin_unlock_irq(shost
->host_lock
);
4181 lpfc_destroy_vport_work_array(phba
, vports
);
4186 * lpfc_unregister_vfi_cmpl - Completion handler for unreg vfi.
4187 * @phba: Pointer to hba context object.
4188 * @mboxq: Pointer to mailbox object.
4190 * This function frees memory associated with the mailbox command.
4193 lpfc_unregister_vfi_cmpl(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
4195 struct lpfc_vport
*vport
= mboxq
->vport
;
4197 if (mboxq
->u
.mb
.mbxStatus
) {
4198 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
|LOG_MBOX
,
4199 "2555 UNREG_VFI mbxStatus error x%x "
4201 mboxq
->u
.mb
.mbxStatus
, vport
->port_state
);
4203 mempool_free(mboxq
, phba
->mbox_mem_pool
);
4208 * lpfc_unregister_fcfi_cmpl - Completion handler for unreg fcfi.
4209 * @phba: Pointer to hba context object.
4210 * @mboxq: Pointer to mailbox object.
4212 * This function frees memory associated with the mailbox command.
4215 lpfc_unregister_fcfi_cmpl(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
4217 struct lpfc_vport
*vport
= mboxq
->vport
;
4219 if (mboxq
->u
.mb
.mbxStatus
) {
4220 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
|LOG_MBOX
,
4221 "2550 UNREG_FCFI mbxStatus error x%x "
4223 mboxq
->u
.mb
.mbxStatus
, vport
->port_state
);
4225 mempool_free(mboxq
, phba
->mbox_mem_pool
);
4230 * lpfc_unregister_unused_fcf - Unregister FCF if all devices are disconnected.
4231 * @phba: Pointer to hba context object.
4233 * This function check if there are any connected remote port for the FCF and
4234 * if all the devices are disconnected, this function unregister FCFI.
4235 * This function also tries to use another FCF for discovery.
4238 lpfc_unregister_unused_fcf(struct lpfc_hba
*phba
)
4242 struct lpfc_vport
**vports
;
4245 spin_lock_irq(&phba
->hbalock
);
4247 * If HBA is not running in FIP mode or
4248 * If HBA does not support FCoE or
4249 * If FCF is not registered.
4252 if (!(phba
->hba_flag
& HBA_FCOE_SUPPORT
) ||
4253 !(phba
->fcf
.fcf_flag
& FCF_REGISTERED
) ||
4254 (phba
->cfg_enable_fip
== 0)) {
4255 spin_unlock_irq(&phba
->hbalock
);
4258 spin_unlock_irq(&phba
->hbalock
);
4260 if (lpfc_fcf_inuse(phba
))
4264 /* Unregister VPIs */
4265 vports
= lpfc_create_vport_work_array(phba
);
4267 (phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
))
4268 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
4269 lpfc_mbx_unreg_vpi(vports
[i
]);
4270 vports
[i
]->fc_flag
|= FC_VPORT_NEEDS_REG_VPI
;
4271 vports
[i
]->vfi_state
&= ~LPFC_VFI_REGISTERED
;
4273 lpfc_destroy_vport_work_array(phba
, vports
);
4275 /* Unregister VFI */
4276 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
4278 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
|LOG_MBOX
,
4279 "2556 UNREG_VFI mbox allocation failed"
4281 phba
->pport
->port_state
);
4285 lpfc_unreg_vfi(mbox
, phba
->pport
->vfi
);
4286 mbox
->vport
= phba
->pport
;
4287 mbox
->mbox_cmpl
= lpfc_unregister_vfi_cmpl
;
4289 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
4290 if (rc
== MBX_NOT_FINISHED
) {
4291 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
|LOG_MBOX
,
4292 "2557 UNREG_VFI issue mbox failed rc x%x "
4294 rc
, phba
->pport
->port_state
);
4295 mempool_free(mbox
, phba
->mbox_mem_pool
);
4299 /* Unregister FCF */
4300 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
4302 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
|LOG_MBOX
,
4303 "2551 UNREG_FCFI mbox allocation failed"
4305 phba
->pport
->port_state
);
4309 lpfc_unreg_fcfi(mbox
, phba
->fcf
.fcfi
);
4310 mbox
->vport
= phba
->pport
;
4311 mbox
->mbox_cmpl
= lpfc_unregister_fcfi_cmpl
;
4312 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
4314 if (rc
== MBX_NOT_FINISHED
) {
4315 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
|LOG_MBOX
,
4316 "2552 UNREG_FCFI issue mbox failed rc x%x "
4318 rc
, phba
->pport
->port_state
);
4319 mempool_free(mbox
, phba
->mbox_mem_pool
);
4323 spin_lock_irq(&phba
->hbalock
);
4324 phba
->fcf
.fcf_flag
&= ~(FCF_AVAILABLE
| FCF_REGISTERED
|
4325 FCF_DISCOVERED
| FCF_BOOT_ENABLE
| FCF_IN_USE
|
4327 spin_unlock_irq(&phba
->hbalock
);
4330 * If driver is not unloading, check if there is any other
4331 * FCF record that can be used for discovery.
4333 if ((phba
->pport
->load_flag
& FC_UNLOADING
) ||
4334 (phba
->link_state
< LPFC_LINK_UP
))
4337 rc
= lpfc_sli4_read_fcf_record(phba
, LPFC_FCOE_FCF_GET_FIRST
);
4340 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
|LOG_MBOX
,
4341 "2553 lpfc_unregister_unused_fcf failed to read FCF"
4342 " record HBA state x%x\n",
4343 phba
->pport
->port_state
);
4347 * lpfc_read_fcf_conn_tbl - Create driver FCF connection table.
4348 * @phba: Pointer to hba context object.
4349 * @buff: Buffer containing the FCF connection table as in the config
4351 * This function create driver data structure for the FCF connection
4352 * record table read from config region 23.
4355 lpfc_read_fcf_conn_tbl(struct lpfc_hba
*phba
,
4358 struct lpfc_fcf_conn_entry
*conn_entry
, *next_conn_entry
;
4359 struct lpfc_fcf_conn_hdr
*conn_hdr
;
4360 struct lpfc_fcf_conn_rec
*conn_rec
;
4361 uint32_t record_count
;
4364 /* Free the current connect table */
4365 list_for_each_entry_safe(conn_entry
, next_conn_entry
,
4366 &phba
->fcf_conn_rec_list
, list
)
4369 conn_hdr
= (struct lpfc_fcf_conn_hdr
*) buff
;
4370 record_count
= conn_hdr
->length
* sizeof(uint32_t)/
4371 sizeof(struct lpfc_fcf_conn_rec
);
4373 conn_rec
= (struct lpfc_fcf_conn_rec
*)
4374 (buff
+ sizeof(struct lpfc_fcf_conn_hdr
));
4376 for (i
= 0; i
< record_count
; i
++) {
4377 if (!(conn_rec
[i
].flags
& FCFCNCT_VALID
))
4379 conn_entry
= kzalloc(sizeof(struct lpfc_fcf_conn_entry
),
4382 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
4383 "2566 Failed to allocate connection"
4388 memcpy(&conn_entry
->conn_rec
, &conn_rec
[i
],
4389 sizeof(struct lpfc_fcf_conn_rec
));
4390 conn_entry
->conn_rec
.vlan_tag
=
4391 le16_to_cpu(conn_entry
->conn_rec
.vlan_tag
) & 0xFFF;
4392 conn_entry
->conn_rec
.flags
=
4393 le16_to_cpu(conn_entry
->conn_rec
.flags
);
4394 list_add_tail(&conn_entry
->list
,
4395 &phba
->fcf_conn_rec_list
);
4400 * lpfc_read_fcoe_param - Read FCoe parameters from conf region..
4401 * @phba: Pointer to hba context object.
4402 * @buff: Buffer containing the FCoE parameter data structure.
4404 * This function update driver data structure with config
4405 * parameters read from config region 23.
4408 lpfc_read_fcoe_param(struct lpfc_hba
*phba
,
4411 struct lpfc_fip_param_hdr
*fcoe_param_hdr
;
4412 struct lpfc_fcoe_params
*fcoe_param
;
4414 fcoe_param_hdr
= (struct lpfc_fip_param_hdr
*)
4416 fcoe_param
= (struct lpfc_fcoe_params
*)
4417 buff
+ sizeof(struct lpfc_fip_param_hdr
);
4419 if ((fcoe_param_hdr
->parm_version
!= FIPP_VERSION
) ||
4420 (fcoe_param_hdr
->length
!= FCOE_PARAM_LENGTH
))
4423 if (bf_get(lpfc_fip_param_hdr_fipp_mode
, fcoe_param_hdr
) ==
4425 phba
->cfg_enable_fip
= 1;
4427 if (bf_get(lpfc_fip_param_hdr_fipp_mode
, fcoe_param_hdr
) ==
4429 phba
->cfg_enable_fip
= 0;
4431 if (fcoe_param_hdr
->parm_flags
& FIPP_VLAN_VALID
) {
4432 phba
->valid_vlan
= 1;
4433 phba
->vlan_id
= le16_to_cpu(fcoe_param
->vlan_tag
) &
4437 phba
->fc_map
[0] = fcoe_param
->fc_map
[0];
4438 phba
->fc_map
[1] = fcoe_param
->fc_map
[1];
4439 phba
->fc_map
[2] = fcoe_param
->fc_map
[2];
4444 * lpfc_get_rec_conf23 - Get a record type in config region data.
4445 * @buff: Buffer containing config region 23 data.
4446 * @size: Size of the data buffer.
4447 * @rec_type: Record type to be searched.
4449 * This function searches config region data to find the begining
4450 * of the record specified by record_type. If record found, this
4451 * function return pointer to the record else return NULL.
4454 lpfc_get_rec_conf23(uint8_t *buff
, uint32_t size
, uint8_t rec_type
)
4456 uint32_t offset
= 0, rec_length
;
4458 if ((buff
[0] == LPFC_REGION23_LAST_REC
) ||
4459 (size
< sizeof(uint32_t)))
4462 rec_length
= buff
[offset
+ 1];
4465 * One TLV record has one word header and number of data words
4466 * specified in the rec_length field of the record header.
4468 while ((offset
+ rec_length
* sizeof(uint32_t) + sizeof(uint32_t))
4470 if (buff
[offset
] == rec_type
)
4471 return &buff
[offset
];
4473 if (buff
[offset
] == LPFC_REGION23_LAST_REC
)
4476 offset
+= rec_length
* sizeof(uint32_t) + sizeof(uint32_t);
4477 rec_length
= buff
[offset
+ 1];
4483 * lpfc_parse_fcoe_conf - Parse FCoE config data read from config region 23.
4484 * @phba: Pointer to lpfc_hba data structure.
4485 * @buff: Buffer containing config region 23 data.
4486 * @size: Size of the data buffer.
4488 * This fuction parse the FCoE config parameters in config region 23 and
4489 * populate driver data structure with the parameters.
4492 lpfc_parse_fcoe_conf(struct lpfc_hba
*phba
,
4496 uint32_t offset
= 0, rec_length
;
4500 * If data size is less than 2 words signature and version cannot be
4503 if (size
< 2*sizeof(uint32_t))
4506 /* Check the region signature first */
4507 if (memcmp(buff
, LPFC_REGION23_SIGNATURE
, 4)) {
4508 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
4509 "2567 Config region 23 has bad signature\n");
4515 /* Check the data structure version */
4516 if (buff
[offset
] != LPFC_REGION23_VERSION
) {
4517 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
4518 "2568 Config region 23 has bad version\n");
4523 rec_length
= buff
[offset
+ 1];
4525 /* Read FCoE param record */
4526 rec_ptr
= lpfc_get_rec_conf23(&buff
[offset
],
4527 size
- offset
, FCOE_PARAM_TYPE
);
4529 lpfc_read_fcoe_param(phba
, rec_ptr
);
4531 /* Read FCF connection table */
4532 rec_ptr
= lpfc_get_rec_conf23(&buff
[offset
],
4533 size
- offset
, FCOE_CONN_TBL_TYPE
);
4535 lpfc_read_fcf_conn_tbl(phba
, rec_ptr
);