net: move address list functions to a separate file
[deliverable/linux.git] / drivers / scsi / fcoe / fcoe.c
1 /*
2 * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Maintained at www.Open-FCoE.org
18 */
19
20 #include <linux/module.h>
21 #include <linux/version.h>
22 #include <linux/spinlock.h>
23 #include <linux/netdevice.h>
24 #include <linux/etherdevice.h>
25 #include <linux/ethtool.h>
26 #include <linux/if_ether.h>
27 #include <linux/if_vlan.h>
28 #include <linux/crc32.h>
29 #include <linux/cpu.h>
30 #include <linux/fs.h>
31 #include <linux/sysfs.h>
32 #include <linux/ctype.h>
33 #include <scsi/scsi_tcq.h>
34 #include <scsi/scsicam.h>
35 #include <scsi/scsi_transport.h>
36 #include <scsi/scsi_transport_fc.h>
37 #include <net/rtnetlink.h>
38
39 #include <scsi/fc/fc_encaps.h>
40 #include <scsi/fc/fc_fip.h>
41
42 #include <scsi/libfc.h>
43 #include <scsi/fc_frame.h>
44 #include <scsi/libfcoe.h>
45
46 #include "fcoe.h"
47
48 MODULE_AUTHOR("Open-FCoE.org");
49 MODULE_DESCRIPTION("FCoE");
50 MODULE_LICENSE("GPL v2");
51
52 /* Performance tuning parameters for fcoe */
53 static unsigned int fcoe_ddp_min;
54 module_param_named(ddp_min, fcoe_ddp_min, uint, S_IRUGO | S_IWUSR);
55 MODULE_PARM_DESC(ddp_min, "Minimum I/O size in bytes for " \
56 "Direct Data Placement (DDP).");
57
58 DEFINE_MUTEX(fcoe_config_mutex);
59
60 /* fcoe_percpu_clean completion. Waiter protected by fcoe_create_mutex */
61 static DECLARE_COMPLETION(fcoe_flush_completion);
62
63 /* fcoe host list */
64 /* must only by accessed under the RTNL mutex */
65 LIST_HEAD(fcoe_hostlist);
66 DEFINE_PER_CPU(struct fcoe_percpu_s, fcoe_percpu);
67
68 /* Function Prototypes */
69 static int fcoe_reset(struct Scsi_Host *);
70 static int fcoe_xmit(struct fc_lport *, struct fc_frame *);
71 static int fcoe_rcv(struct sk_buff *, struct net_device *,
72 struct packet_type *, struct net_device *);
73 static int fcoe_percpu_receive_thread(void *);
74 static void fcoe_clean_pending_queue(struct fc_lport *);
75 static void fcoe_percpu_clean(struct fc_lport *);
76 static int fcoe_link_ok(struct fc_lport *);
77
78 static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *);
79 static int fcoe_hostlist_add(const struct fc_lport *);
80
81 static void fcoe_check_wait_queue(struct fc_lport *, struct sk_buff *);
82 static int fcoe_device_notification(struct notifier_block *, ulong, void *);
83 static void fcoe_dev_setup(void);
84 static void fcoe_dev_cleanup(void);
85 static struct fcoe_interface
86 *fcoe_hostlist_lookup_port(const struct net_device *);
87
88 static int fcoe_fip_recv(struct sk_buff *, struct net_device *,
89 struct packet_type *, struct net_device *);
90
91 static void fcoe_fip_send(struct fcoe_ctlr *, struct sk_buff *);
92 static void fcoe_update_src_mac(struct fc_lport *, u8 *);
93 static u8 *fcoe_get_src_mac(struct fc_lport *);
94 static void fcoe_destroy_work(struct work_struct *);
95
96 static int fcoe_ddp_setup(struct fc_lport *, u16, struct scatterlist *,
97 unsigned int);
98 static int fcoe_ddp_done(struct fc_lport *, u16);
99
100 static int fcoe_cpu_callback(struct notifier_block *, unsigned long, void *);
101
102 static int fcoe_create(const char *, struct kernel_param *);
103 static int fcoe_destroy(const char *, struct kernel_param *);
104 static int fcoe_enable(const char *, struct kernel_param *);
105 static int fcoe_disable(const char *, struct kernel_param *);
106
107 static struct fc_seq *fcoe_elsct_send(struct fc_lport *,
108 u32 did, struct fc_frame *,
109 unsigned int op,
110 void (*resp)(struct fc_seq *,
111 struct fc_frame *,
112 void *),
113 void *, u32 timeout);
114 static void fcoe_recv_frame(struct sk_buff *skb);
115
116 static void fcoe_get_lesb(struct fc_lport *, struct fc_els_lesb *);
117
118 module_param_call(create, fcoe_create, NULL, NULL, S_IWUSR);
119 __MODULE_PARM_TYPE(create, "string");
120 MODULE_PARM_DESC(create, " Creates fcoe instance on a ethernet interface");
121 module_param_call(destroy, fcoe_destroy, NULL, NULL, S_IWUSR);
122 __MODULE_PARM_TYPE(destroy, "string");
123 MODULE_PARM_DESC(destroy, " Destroys fcoe instance on a ethernet interface");
124 module_param_call(enable, fcoe_enable, NULL, NULL, S_IWUSR);
125 __MODULE_PARM_TYPE(enable, "string");
126 MODULE_PARM_DESC(enable, " Enables fcoe on a ethernet interface.");
127 module_param_call(disable, fcoe_disable, NULL, NULL, S_IWUSR);
128 __MODULE_PARM_TYPE(disable, "string");
129 MODULE_PARM_DESC(disable, " Disables fcoe on a ethernet interface.");
130
131 /* notification function for packets from net device */
132 static struct notifier_block fcoe_notifier = {
133 .notifier_call = fcoe_device_notification,
134 };
135
136 /* notification function for CPU hotplug events */
137 static struct notifier_block fcoe_cpu_notifier = {
138 .notifier_call = fcoe_cpu_callback,
139 };
140
141 static struct scsi_transport_template *fcoe_transport_template;
142 static struct scsi_transport_template *fcoe_vport_transport_template;
143
144 static int fcoe_vport_destroy(struct fc_vport *);
145 static int fcoe_vport_create(struct fc_vport *, bool disabled);
146 static int fcoe_vport_disable(struct fc_vport *, bool disable);
147 static void fcoe_set_vport_symbolic_name(struct fc_vport *);
148
149 static struct libfc_function_template fcoe_libfc_fcn_templ = {
150 .frame_send = fcoe_xmit,
151 .ddp_setup = fcoe_ddp_setup,
152 .ddp_done = fcoe_ddp_done,
153 .elsct_send = fcoe_elsct_send,
154 .get_lesb = fcoe_get_lesb,
155 };
156
157 struct fc_function_template fcoe_transport_function = {
158 .show_host_node_name = 1,
159 .show_host_port_name = 1,
160 .show_host_supported_classes = 1,
161 .show_host_supported_fc4s = 1,
162 .show_host_active_fc4s = 1,
163 .show_host_maxframe_size = 1,
164
165 .show_host_port_id = 1,
166 .show_host_supported_speeds = 1,
167 .get_host_speed = fc_get_host_speed,
168 .show_host_speed = 1,
169 .show_host_port_type = 1,
170 .get_host_port_state = fc_get_host_port_state,
171 .show_host_port_state = 1,
172 .show_host_symbolic_name = 1,
173
174 .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv),
175 .show_rport_maxframe_size = 1,
176 .show_rport_supported_classes = 1,
177
178 .show_host_fabric_name = 1,
179 .show_starget_node_name = 1,
180 .show_starget_port_name = 1,
181 .show_starget_port_id = 1,
182 .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
183 .show_rport_dev_loss_tmo = 1,
184 .get_fc_host_stats = fc_get_host_stats,
185 .issue_fc_host_lip = fcoe_reset,
186
187 .terminate_rport_io = fc_rport_terminate_io,
188
189 .vport_create = fcoe_vport_create,
190 .vport_delete = fcoe_vport_destroy,
191 .vport_disable = fcoe_vport_disable,
192 .set_vport_symbolic_name = fcoe_set_vport_symbolic_name,
193
194 .bsg_request = fc_lport_bsg_request,
195 };
196
197 struct fc_function_template fcoe_vport_transport_function = {
198 .show_host_node_name = 1,
199 .show_host_port_name = 1,
200 .show_host_supported_classes = 1,
201 .show_host_supported_fc4s = 1,
202 .show_host_active_fc4s = 1,
203 .show_host_maxframe_size = 1,
204
205 .show_host_port_id = 1,
206 .show_host_supported_speeds = 1,
207 .get_host_speed = fc_get_host_speed,
208 .show_host_speed = 1,
209 .show_host_port_type = 1,
210 .get_host_port_state = fc_get_host_port_state,
211 .show_host_port_state = 1,
212 .show_host_symbolic_name = 1,
213
214 .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv),
215 .show_rport_maxframe_size = 1,
216 .show_rport_supported_classes = 1,
217
218 .show_host_fabric_name = 1,
219 .show_starget_node_name = 1,
220 .show_starget_port_name = 1,
221 .show_starget_port_id = 1,
222 .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
223 .show_rport_dev_loss_tmo = 1,
224 .get_fc_host_stats = fc_get_host_stats,
225 .issue_fc_host_lip = fcoe_reset,
226
227 .terminate_rport_io = fc_rport_terminate_io,
228
229 .bsg_request = fc_lport_bsg_request,
230 };
231
232 static struct scsi_host_template fcoe_shost_template = {
233 .module = THIS_MODULE,
234 .name = "FCoE Driver",
235 .proc_name = FCOE_NAME,
236 .queuecommand = fc_queuecommand,
237 .eh_abort_handler = fc_eh_abort,
238 .eh_device_reset_handler = fc_eh_device_reset,
239 .eh_host_reset_handler = fc_eh_host_reset,
240 .slave_alloc = fc_slave_alloc,
241 .change_queue_depth = fc_change_queue_depth,
242 .change_queue_type = fc_change_queue_type,
243 .this_id = -1,
244 .cmd_per_lun = 3,
245 .can_queue = FCOE_MAX_OUTSTANDING_COMMANDS,
246 .use_clustering = ENABLE_CLUSTERING,
247 .sg_tablesize = SG_ALL,
248 .max_sectors = 0xffff,
249 };
250
251 /**
252 * fcoe_interface_setup() - Setup a FCoE interface
253 * @fcoe: The new FCoE interface
254 * @netdev: The net device that the fcoe interface is on
255 *
256 * Returns : 0 for success
257 * Locking: must be called with the RTNL mutex held
258 */
259 static int fcoe_interface_setup(struct fcoe_interface *fcoe,
260 struct net_device *netdev)
261 {
262 struct fcoe_ctlr *fip = &fcoe->ctlr;
263 struct netdev_hw_addr *ha;
264 struct net_device *real_dev;
265 u8 flogi_maddr[ETH_ALEN];
266 const struct net_device_ops *ops;
267
268 fcoe->netdev = netdev;
269
270 /* Let LLD initialize for FCoE */
271 ops = netdev->netdev_ops;
272 if (ops->ndo_fcoe_enable) {
273 if (ops->ndo_fcoe_enable(netdev))
274 FCOE_NETDEV_DBG(netdev, "Failed to enable FCoE"
275 " specific feature for LLD.\n");
276 }
277
278 /* Do not support for bonding device */
279 if ((netdev->priv_flags & IFF_MASTER_ALB) ||
280 (netdev->priv_flags & IFF_SLAVE_INACTIVE) ||
281 (netdev->priv_flags & IFF_MASTER_8023AD)) {
282 FCOE_NETDEV_DBG(netdev, "Bonded interfaces not supported\n");
283 return -EOPNOTSUPP;
284 }
285
286 /* look for SAN MAC address, if multiple SAN MACs exist, only
287 * use the first one for SPMA */
288 real_dev = (netdev->priv_flags & IFF_802_1Q_VLAN) ?
289 vlan_dev_real_dev(netdev) : netdev;
290 rcu_read_lock();
291 for_each_dev_addr(real_dev, ha) {
292 if ((ha->type == NETDEV_HW_ADDR_T_SAN) &&
293 (is_valid_ether_addr(ha->addr))) {
294 memcpy(fip->ctl_src_addr, ha->addr, ETH_ALEN);
295 fip->spma = 1;
296 break;
297 }
298 }
299 rcu_read_unlock();
300
301 /* setup Source Mac Address */
302 if (!fip->spma)
303 memcpy(fip->ctl_src_addr, netdev->dev_addr, netdev->addr_len);
304
305 /*
306 * Add FCoE MAC address as second unicast MAC address
307 * or enter promiscuous mode if not capable of listening
308 * for multiple unicast MACs.
309 */
310 memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
311 dev_uc_add(netdev, flogi_maddr);
312 if (fip->spma)
313 dev_uc_add(netdev, fip->ctl_src_addr);
314 dev_mc_add(netdev, FIP_ALL_ENODE_MACS, ETH_ALEN, 0);
315
316 /*
317 * setup the receive function from ethernet driver
318 * on the ethertype for the given device
319 */
320 fcoe->fcoe_packet_type.func = fcoe_rcv;
321 fcoe->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE);
322 fcoe->fcoe_packet_type.dev = netdev;
323 dev_add_pack(&fcoe->fcoe_packet_type);
324
325 fcoe->fip_packet_type.func = fcoe_fip_recv;
326 fcoe->fip_packet_type.type = htons(ETH_P_FIP);
327 fcoe->fip_packet_type.dev = netdev;
328 dev_add_pack(&fcoe->fip_packet_type);
329
330 return 0;
331 }
332
333 /**
334 * fcoe_interface_create() - Create a FCoE interface on a net device
335 * @netdev: The net device to create the FCoE interface on
336 *
337 * Returns: pointer to a struct fcoe_interface or NULL on error
338 */
339 static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev)
340 {
341 struct fcoe_interface *fcoe;
342 int err;
343
344 fcoe = kzalloc(sizeof(*fcoe), GFP_KERNEL);
345 if (!fcoe) {
346 FCOE_NETDEV_DBG(netdev, "Could not allocate fcoe structure\n");
347 return NULL;
348 }
349
350 dev_hold(netdev);
351 kref_init(&fcoe->kref);
352
353 /*
354 * Initialize FIP.
355 */
356 fcoe_ctlr_init(&fcoe->ctlr);
357 fcoe->ctlr.send = fcoe_fip_send;
358 fcoe->ctlr.update_mac = fcoe_update_src_mac;
359 fcoe->ctlr.get_src_addr = fcoe_get_src_mac;
360
361 err = fcoe_interface_setup(fcoe, netdev);
362 if (err) {
363 fcoe_ctlr_destroy(&fcoe->ctlr);
364 kfree(fcoe);
365 dev_put(netdev);
366 return NULL;
367 }
368
369 return fcoe;
370 }
371
372 /**
373 * fcoe_interface_cleanup() - Clean up a FCoE interface
374 * @fcoe: The FCoE interface to be cleaned up
375 *
376 * Caller must be holding the RTNL mutex
377 */
378 void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
379 {
380 struct net_device *netdev = fcoe->netdev;
381 struct fcoe_ctlr *fip = &fcoe->ctlr;
382 u8 flogi_maddr[ETH_ALEN];
383 const struct net_device_ops *ops;
384
385 /*
386 * Don't listen for Ethernet packets anymore.
387 * synchronize_net() ensures that the packet handlers are not running
388 * on another CPU. dev_remove_pack() would do that, this calls the
389 * unsyncronized version __dev_remove_pack() to avoid multiple delays.
390 */
391 __dev_remove_pack(&fcoe->fcoe_packet_type);
392 __dev_remove_pack(&fcoe->fip_packet_type);
393 synchronize_net();
394
395 /* Delete secondary MAC addresses */
396 memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
397 dev_uc_del(netdev, flogi_maddr);
398 if (fip->spma)
399 dev_uc_del(netdev, fip->ctl_src_addr);
400 dev_mc_delete(netdev, FIP_ALL_ENODE_MACS, ETH_ALEN, 0);
401
402 /* Tell the LLD we are done w/ FCoE */
403 ops = netdev->netdev_ops;
404 if (ops->ndo_fcoe_disable) {
405 if (ops->ndo_fcoe_disable(netdev))
406 FCOE_NETDEV_DBG(netdev, "Failed to disable FCoE"
407 " specific feature for LLD.\n");
408 }
409 }
410
411 /**
412 * fcoe_interface_release() - fcoe_port kref release function
413 * @kref: Embedded reference count in an fcoe_interface struct
414 */
415 static void fcoe_interface_release(struct kref *kref)
416 {
417 struct fcoe_interface *fcoe;
418 struct net_device *netdev;
419
420 fcoe = container_of(kref, struct fcoe_interface, kref);
421 netdev = fcoe->netdev;
422 /* tear-down the FCoE controller */
423 fcoe_ctlr_destroy(&fcoe->ctlr);
424 kfree(fcoe);
425 dev_put(netdev);
426 }
427
428 /**
429 * fcoe_interface_get() - Get a reference to a FCoE interface
430 * @fcoe: The FCoE interface to be held
431 */
432 static inline void fcoe_interface_get(struct fcoe_interface *fcoe)
433 {
434 kref_get(&fcoe->kref);
435 }
436
437 /**
438 * fcoe_interface_put() - Put a reference to a FCoE interface
439 * @fcoe: The FCoE interface to be released
440 */
441 static inline void fcoe_interface_put(struct fcoe_interface *fcoe)
442 {
443 kref_put(&fcoe->kref, fcoe_interface_release);
444 }
445
446 /**
447 * fcoe_fip_recv() - Handler for received FIP frames
448 * @skb: The receive skb
449 * @netdev: The associated net device
450 * @ptype: The packet_type structure which was used to register this handler
451 * @orig_dev: The original net_device the the skb was received on.
452 * (in case dev is a bond)
453 *
454 * Returns: 0 for success
455 */
456 static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *netdev,
457 struct packet_type *ptype,
458 struct net_device *orig_dev)
459 {
460 struct fcoe_interface *fcoe;
461
462 fcoe = container_of(ptype, struct fcoe_interface, fip_packet_type);
463 fcoe_ctlr_recv(&fcoe->ctlr, skb);
464 return 0;
465 }
466
467 /**
468 * fcoe_fip_send() - Send an Ethernet-encapsulated FIP frame
469 * @fip: The FCoE controller
470 * @skb: The FIP packet to be sent
471 */
472 static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
473 {
474 skb->dev = fcoe_from_ctlr(fip)->netdev;
475 dev_queue_xmit(skb);
476 }
477
478 /**
479 * fcoe_update_src_mac() - Update the Ethernet MAC filters
480 * @lport: The local port to update the source MAC on
481 * @addr: Unicast MAC address to add
482 *
483 * Remove any previously-set unicast MAC filter.
484 * Add secondary FCoE MAC address filter for our OUI.
485 */
486 static void fcoe_update_src_mac(struct fc_lport *lport, u8 *addr)
487 {
488 struct fcoe_port *port = lport_priv(lport);
489 struct fcoe_interface *fcoe = port->fcoe;
490
491 rtnl_lock();
492 if (!is_zero_ether_addr(port->data_src_addr))
493 dev_uc_del(fcoe->netdev, port->data_src_addr);
494 if (!is_zero_ether_addr(addr))
495 dev_uc_add(fcoe->netdev, addr);
496 memcpy(port->data_src_addr, addr, ETH_ALEN);
497 rtnl_unlock();
498 }
499
500 /**
501 * fcoe_get_src_mac() - return the Ethernet source address for an lport
502 * @lport: libfc lport
503 */
504 static u8 *fcoe_get_src_mac(struct fc_lport *lport)
505 {
506 struct fcoe_port *port = lport_priv(lport);
507
508 return port->data_src_addr;
509 }
510
511 /**
512 * fcoe_lport_config() - Set up a local port
513 * @lport: The local port to be setup
514 *
515 * Returns: 0 for success
516 */
517 static int fcoe_lport_config(struct fc_lport *lport)
518 {
519 lport->link_up = 0;
520 lport->qfull = 0;
521 lport->max_retry_count = 3;
522 lport->max_rport_retry_count = 3;
523 lport->e_d_tov = 2 * 1000; /* FC-FS default */
524 lport->r_a_tov = 2 * 2 * 1000;
525 lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
526 FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);
527 lport->does_npiv = 1;
528
529 fc_lport_init_stats(lport);
530
531 /* lport fc_lport related configuration */
532 fc_lport_config(lport);
533
534 /* offload related configuration */
535 lport->crc_offload = 0;
536 lport->seq_offload = 0;
537 lport->lro_enabled = 0;
538 lport->lro_xid = 0;
539 lport->lso_max = 0;
540
541 return 0;
542 }
543
544 /**
545 * fcoe_queue_timer() - The fcoe queue timer
546 * @lport: The local port
547 *
548 * Calls fcoe_check_wait_queue on timeout
549 */
550 static void fcoe_queue_timer(ulong lport)
551 {
552 fcoe_check_wait_queue((struct fc_lport *)lport, NULL);
553 }
554
555 /**
556 * fcoe_get_wwn() - Get the world wide name from LLD if it supports it
557 * @netdev: the associated net device
558 * @wwn: the output WWN
559 * @type: the type of WWN (WWPN or WWNN)
560 *
561 * Returns: 0 for success
562 */
563 static int fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type)
564 {
565 const struct net_device_ops *ops = netdev->netdev_ops;
566
567 if (ops->ndo_fcoe_get_wwn)
568 return ops->ndo_fcoe_get_wwn(netdev, wwn, type);
569 return -EINVAL;
570 }
571
572 /**
573 * fcoe_netdev_config() - Set up net devive for SW FCoE
574 * @lport: The local port that is associated with the net device
575 * @netdev: The associated net device
576 *
577 * Must be called after fcoe_lport_config() as it will use local port mutex
578 *
579 * Returns: 0 for success
580 */
581 static int fcoe_netdev_config(struct fc_lport *lport, struct net_device *netdev)
582 {
583 u32 mfs;
584 u64 wwnn, wwpn;
585 struct fcoe_interface *fcoe;
586 struct fcoe_port *port;
587 int vid = 0;
588
589 /* Setup lport private data to point to fcoe softc */
590 port = lport_priv(lport);
591 fcoe = port->fcoe;
592
593 /*
594 * Determine max frame size based on underlying device and optional
595 * user-configured limit. If the MFS is too low, fcoe_link_ok()
596 * will return 0, so do this first.
597 */
598 mfs = netdev->mtu;
599 if (netdev->features & NETIF_F_FCOE_MTU) {
600 mfs = FCOE_MTU;
601 FCOE_NETDEV_DBG(netdev, "Supports FCOE_MTU of %d bytes\n", mfs);
602 }
603 mfs -= (sizeof(struct fcoe_hdr) + sizeof(struct fcoe_crc_eof));
604 if (fc_set_mfs(lport, mfs))
605 return -EINVAL;
606
607 /* offload features support */
608 if (netdev->features & NETIF_F_SG)
609 lport->sg_supp = 1;
610
611 if (netdev->features & NETIF_F_FCOE_CRC) {
612 lport->crc_offload = 1;
613 FCOE_NETDEV_DBG(netdev, "Supports FCCRC offload\n");
614 }
615 if (netdev->features & NETIF_F_FSO) {
616 lport->seq_offload = 1;
617 lport->lso_max = netdev->gso_max_size;
618 FCOE_NETDEV_DBG(netdev, "Supports LSO for max len 0x%x\n",
619 lport->lso_max);
620 }
621 if (netdev->fcoe_ddp_xid) {
622 lport->lro_enabled = 1;
623 lport->lro_xid = netdev->fcoe_ddp_xid;
624 FCOE_NETDEV_DBG(netdev, "Supports LRO for max xid 0x%x\n",
625 lport->lro_xid);
626 }
627 skb_queue_head_init(&port->fcoe_pending_queue);
628 port->fcoe_pending_queue_active = 0;
629 setup_timer(&port->timer, fcoe_queue_timer, (unsigned long)lport);
630
631 if (!lport->vport) {
632 /*
633 * Use NAA 1&2 (FC-FS Rev. 2.0, Sec. 15) to generate WWNN/WWPN:
634 * For WWNN, we use NAA 1 w/ bit 27-16 of word 0 as 0.
635 * For WWPN, we use NAA 2 w/ bit 27-16 of word 0 from VLAN ID
636 */
637 if (netdev->priv_flags & IFF_802_1Q_VLAN)
638 vid = vlan_dev_vlan_id(netdev);
639
640 if (fcoe_get_wwn(netdev, &wwnn, NETDEV_FCOE_WWNN))
641 wwnn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr, 1, 0);
642 fc_set_wwnn(lport, wwnn);
643 if (fcoe_get_wwn(netdev, &wwpn, NETDEV_FCOE_WWPN))
644 wwpn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr,
645 2, vid);
646 fc_set_wwpn(lport, wwpn);
647 }
648
649 return 0;
650 }
651
652 /**
653 * fcoe_shost_config() - Set up the SCSI host associated with a local port
654 * @lport: The local port
655 * @shost: The SCSI host to associate with the local port
656 * @dev: The device associated with the SCSI host
657 *
658 * Must be called after fcoe_lport_config() and fcoe_netdev_config()
659 *
660 * Returns: 0 for success
661 */
662 static int fcoe_shost_config(struct fc_lport *lport, struct Scsi_Host *shost,
663 struct device *dev)
664 {
665 int rc = 0;
666
667 /* lport scsi host config */
668 lport->host->max_lun = FCOE_MAX_LUN;
669 lport->host->max_id = FCOE_MAX_FCP_TARGET;
670 lport->host->max_channel = 0;
671 if (lport->vport)
672 lport->host->transportt = fcoe_vport_transport_template;
673 else
674 lport->host->transportt = fcoe_transport_template;
675
676 /* add the new host to the SCSI-ml */
677 rc = scsi_add_host(lport->host, dev);
678 if (rc) {
679 FCOE_NETDEV_DBG(fcoe_netdev(lport), "fcoe_shost_config: "
680 "error on scsi_add_host\n");
681 return rc;
682 }
683
684 if (!lport->vport)
685 fc_host_max_npiv_vports(lport->host) = USHORT_MAX;
686
687 snprintf(fc_host_symbolic_name(lport->host), FC_SYMBOLIC_NAME_SIZE,
688 "%s v%s over %s", FCOE_NAME, FCOE_VERSION,
689 fcoe_netdev(lport)->name);
690
691 return 0;
692 }
693
694 /**
695 * fcoe_oem_match() - The match routine for the offloaded exchange manager
696 * @fp: The I/O frame
697 *
698 * This routine will be associated with an exchange manager (EM). When
699 * the libfc exchange handling code is looking for an EM to use it will
700 * call this routine and pass it the frame that it wishes to send. This
701 * routine will return True if the associated EM is to be used and False
702 * if the echange code should continue looking for an EM.
703 *
704 * The offload EM that this routine is associated with will handle any
705 * packets that are for SCSI read requests.
706 *
707 * Returns: True for read types I/O, otherwise returns false.
708 */
709 bool fcoe_oem_match(struct fc_frame *fp)
710 {
711 return fc_fcp_is_read(fr_fsp(fp)) &&
712 (fr_fsp(fp)->data_len > fcoe_ddp_min);
713 }
714
715 /**
716 * fcoe_em_config() - Allocate and configure an exchange manager
717 * @lport: The local port that the new EM will be associated with
718 *
719 * Returns: 0 on success
720 */
721 static inline int fcoe_em_config(struct fc_lport *lport)
722 {
723 struct fcoe_port *port = lport_priv(lport);
724 struct fcoe_interface *fcoe = port->fcoe;
725 struct fcoe_interface *oldfcoe = NULL;
726 struct net_device *old_real_dev, *cur_real_dev;
727 u16 min_xid = FCOE_MIN_XID;
728 u16 max_xid = FCOE_MAX_XID;
729
730 /*
731 * Check if need to allocate an em instance for
732 * offload exchange ids to be shared across all VN_PORTs/lport.
733 */
734 if (!lport->lro_enabled || !lport->lro_xid ||
735 (lport->lro_xid >= max_xid)) {
736 lport->lro_xid = 0;
737 goto skip_oem;
738 }
739
740 /*
741 * Reuse existing offload em instance in case
742 * it is already allocated on real eth device
743 */
744 if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN)
745 cur_real_dev = vlan_dev_real_dev(fcoe->netdev);
746 else
747 cur_real_dev = fcoe->netdev;
748
749 list_for_each_entry(oldfcoe, &fcoe_hostlist, list) {
750 if (oldfcoe->netdev->priv_flags & IFF_802_1Q_VLAN)
751 old_real_dev = vlan_dev_real_dev(oldfcoe->netdev);
752 else
753 old_real_dev = oldfcoe->netdev;
754
755 if (cur_real_dev == old_real_dev) {
756 fcoe->oem = oldfcoe->oem;
757 break;
758 }
759 }
760
761 if (fcoe->oem) {
762 if (!fc_exch_mgr_add(lport, fcoe->oem, fcoe_oem_match)) {
763 printk(KERN_ERR "fcoe_em_config: failed to add "
764 "offload em:%p on interface:%s\n",
765 fcoe->oem, fcoe->netdev->name);
766 return -ENOMEM;
767 }
768 } else {
769 fcoe->oem = fc_exch_mgr_alloc(lport, FC_CLASS_3,
770 FCOE_MIN_XID, lport->lro_xid,
771 fcoe_oem_match);
772 if (!fcoe->oem) {
773 printk(KERN_ERR "fcoe_em_config: failed to allocate "
774 "em for offload exches on interface:%s\n",
775 fcoe->netdev->name);
776 return -ENOMEM;
777 }
778 }
779
780 /*
781 * Exclude offload EM xid range from next EM xid range.
782 */
783 min_xid += lport->lro_xid + 1;
784
785 skip_oem:
786 if (!fc_exch_mgr_alloc(lport, FC_CLASS_3, min_xid, max_xid, NULL)) {
787 printk(KERN_ERR "fcoe_em_config: failed to "
788 "allocate em on interface %s\n", fcoe->netdev->name);
789 return -ENOMEM;
790 }
791
792 return 0;
793 }
794
795 /**
796 * fcoe_if_destroy() - Tear down a SW FCoE instance
797 * @lport: The local port to be destroyed
798 */
799 static void fcoe_if_destroy(struct fc_lport *lport)
800 {
801 struct fcoe_port *port = lport_priv(lport);
802 struct fcoe_interface *fcoe = port->fcoe;
803 struct net_device *netdev = fcoe->netdev;
804
805 FCOE_NETDEV_DBG(netdev, "Destroying interface\n");
806
807 /* Logout of the fabric */
808 fc_fabric_logoff(lport);
809
810 /* Cleanup the fc_lport */
811 fc_lport_destroy(lport);
812 fc_fcp_destroy(lport);
813
814 /* Stop the transmit retry timer */
815 del_timer_sync(&port->timer);
816
817 /* Free existing transmit skbs */
818 fcoe_clean_pending_queue(lport);
819
820 rtnl_lock();
821 if (!is_zero_ether_addr(port->data_src_addr))
822 dev_uc_del(netdev, port->data_src_addr);
823 rtnl_unlock();
824
825 /* receives may not be stopped until after this */
826 fcoe_interface_put(fcoe);
827
828 /* Free queued packets for the per-CPU receive threads */
829 fcoe_percpu_clean(lport);
830
831 /* Detach from the scsi-ml */
832 fc_remove_host(lport->host);
833 scsi_remove_host(lport->host);
834
835 /* There are no more rports or I/O, free the EM */
836 fc_exch_mgr_free(lport);
837
838 /* Free memory used by statistical counters */
839 fc_lport_free_stats(lport);
840
841 /* Release the Scsi_Host */
842 scsi_host_put(lport->host);
843 }
844
845 /**
846 * fcoe_ddp_setup() - Call a LLD's ddp_setup through the net device
847 * @lport: The local port to setup DDP for
848 * @xid: The exchange ID for this DDP transfer
849 * @sgl: The scatterlist describing this transfer
850 * @sgc: The number of sg items
851 *
852 * Returns: 0 if the DDP context was not configured
853 */
854 static int fcoe_ddp_setup(struct fc_lport *lport, u16 xid,
855 struct scatterlist *sgl, unsigned int sgc)
856 {
857 struct net_device *netdev = fcoe_netdev(lport);
858
859 if (netdev->netdev_ops->ndo_fcoe_ddp_setup)
860 return netdev->netdev_ops->ndo_fcoe_ddp_setup(netdev,
861 xid, sgl,
862 sgc);
863
864 return 0;
865 }
866
867 /**
868 * fcoe_ddp_done() - Call a LLD's ddp_done through the net device
869 * @lport: The local port to complete DDP on
870 * @xid: The exchange ID for this DDP transfer
871 *
872 * Returns: the length of data that have been completed by DDP
873 */
874 static int fcoe_ddp_done(struct fc_lport *lport, u16 xid)
875 {
876 struct net_device *netdev = fcoe_netdev(lport);
877
878 if (netdev->netdev_ops->ndo_fcoe_ddp_done)
879 return netdev->netdev_ops->ndo_fcoe_ddp_done(netdev, xid);
880 return 0;
881 }
882
883 /**
884 * fcoe_if_create() - Create a FCoE instance on an interface
885 * @fcoe: The FCoE interface to create a local port on
886 * @parent: The device pointer to be the parent in sysfs for the SCSI host
887 * @npiv: Indicates if the port is a vport or not
888 *
889 * Creates a fc_lport instance and a Scsi_Host instance and configure them.
890 *
891 * Returns: The allocated fc_lport or an error pointer
892 */
893 static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
894 struct device *parent, int npiv)
895 {
896 struct net_device *netdev = fcoe->netdev;
897 struct fc_lport *lport = NULL;
898 struct fcoe_port *port;
899 struct Scsi_Host *shost;
900 int rc;
901 /*
902 * parent is only a vport if npiv is 1,
903 * but we'll only use vport in that case so go ahead and set it
904 */
905 struct fc_vport *vport = dev_to_vport(parent);
906
907 FCOE_NETDEV_DBG(netdev, "Create Interface\n");
908
909 if (!npiv) {
910 lport = libfc_host_alloc(&fcoe_shost_template,
911 sizeof(struct fcoe_port));
912 } else {
913 lport = libfc_vport_create(vport,
914 sizeof(struct fcoe_port));
915 }
916 if (!lport) {
917 FCOE_NETDEV_DBG(netdev, "Could not allocate host structure\n");
918 rc = -ENOMEM;
919 goto out;
920 }
921 shost = lport->host;
922 port = lport_priv(lport);
923 port->lport = lport;
924 port->fcoe = fcoe;
925 INIT_WORK(&port->destroy_work, fcoe_destroy_work);
926
927 /* configure a fc_lport including the exchange manager */
928 rc = fcoe_lport_config(lport);
929 if (rc) {
930 FCOE_NETDEV_DBG(netdev, "Could not configure lport for the "
931 "interface\n");
932 goto out_host_put;
933 }
934
935 if (npiv) {
936 FCOE_NETDEV_DBG(netdev, "Setting vport names, 0x%llX 0x%llX\n",
937 vport->node_name, vport->port_name);
938 fc_set_wwnn(lport, vport->node_name);
939 fc_set_wwpn(lport, vport->port_name);
940 }
941
942 /* configure lport network properties */
943 rc = fcoe_netdev_config(lport, netdev);
944 if (rc) {
945 FCOE_NETDEV_DBG(netdev, "Could not configure netdev for the "
946 "interface\n");
947 goto out_lp_destroy;
948 }
949
950 /* configure lport scsi host properties */
951 rc = fcoe_shost_config(lport, shost, parent);
952 if (rc) {
953 FCOE_NETDEV_DBG(netdev, "Could not configure shost for the "
954 "interface\n");
955 goto out_lp_destroy;
956 }
957
958 /* Initialize the library */
959 rc = fcoe_libfc_config(lport, &fcoe_libfc_fcn_templ);
960 if (rc) {
961 FCOE_NETDEV_DBG(netdev, "Could not configure libfc for the "
962 "interface\n");
963 goto out_lp_destroy;
964 }
965
966 if (!npiv) {
967 /*
968 * fcoe_em_alloc() and fcoe_hostlist_add() both
969 * need to be atomic with respect to other changes to the
970 * hostlist since fcoe_em_alloc() looks for an existing EM
971 * instance on host list updated by fcoe_hostlist_add().
972 *
973 * This is currently handled through the fcoe_config_mutex
974 * begin held.
975 */
976
977 /* lport exch manager allocation */
978 rc = fcoe_em_config(lport);
979 if (rc) {
980 FCOE_NETDEV_DBG(netdev, "Could not configure the EM "
981 "for the interface\n");
982 goto out_lp_destroy;
983 }
984 }
985
986 fcoe_interface_get(fcoe);
987 return lport;
988
989 out_lp_destroy:
990 fc_exch_mgr_free(lport);
991 out_host_put:
992 scsi_host_put(lport->host);
993 out:
994 return ERR_PTR(rc);
995 }
996
997 /**
998 * fcoe_if_init() - Initialization routine for fcoe.ko
999 *
1000 * Attaches the SW FCoE transport to the FC transport
1001 *
1002 * Returns: 0 on success
1003 */
1004 static int __init fcoe_if_init(void)
1005 {
1006 /* attach to scsi transport */
1007 fcoe_transport_template = fc_attach_transport(&fcoe_transport_function);
1008 fcoe_vport_transport_template =
1009 fc_attach_transport(&fcoe_vport_transport_function);
1010
1011 if (!fcoe_transport_template) {
1012 printk(KERN_ERR "fcoe: Failed to attach to the FC transport\n");
1013 return -ENODEV;
1014 }
1015
1016 return 0;
1017 }
1018
1019 /**
1020 * fcoe_if_exit() - Tear down fcoe.ko
1021 *
1022 * Detaches the SW FCoE transport from the FC transport
1023 *
1024 * Returns: 0 on success
1025 */
1026 int __exit fcoe_if_exit(void)
1027 {
1028 fc_release_transport(fcoe_transport_template);
1029 fc_release_transport(fcoe_vport_transport_template);
1030 fcoe_transport_template = NULL;
1031 fcoe_vport_transport_template = NULL;
1032 return 0;
1033 }
1034
1035 /**
1036 * fcoe_percpu_thread_create() - Create a receive thread for an online CPU
1037 * @cpu: The CPU index of the CPU to create a receive thread for
1038 */
1039 static void fcoe_percpu_thread_create(unsigned int cpu)
1040 {
1041 struct fcoe_percpu_s *p;
1042 struct task_struct *thread;
1043
1044 p = &per_cpu(fcoe_percpu, cpu);
1045
1046 thread = kthread_create(fcoe_percpu_receive_thread,
1047 (void *)p, "fcoethread/%d", cpu);
1048
1049 if (likely(!IS_ERR(thread))) {
1050 kthread_bind(thread, cpu);
1051 wake_up_process(thread);
1052
1053 spin_lock_bh(&p->fcoe_rx_list.lock);
1054 p->thread = thread;
1055 spin_unlock_bh(&p->fcoe_rx_list.lock);
1056 }
1057 }
1058
1059 /**
1060 * fcoe_percpu_thread_destroy() - Remove the receive thread of a CPU
1061 * @cpu: The CPU index of the CPU whose receive thread is to be destroyed
1062 *
1063 * Destroys a per-CPU Rx thread. Any pending skbs are moved to the
1064 * current CPU's Rx thread. If the thread being destroyed is bound to
1065 * the CPU processing this context the skbs will be freed.
1066 */
1067 static void fcoe_percpu_thread_destroy(unsigned int cpu)
1068 {
1069 struct fcoe_percpu_s *p;
1070 struct task_struct *thread;
1071 struct page *crc_eof;
1072 struct sk_buff *skb;
1073 #ifdef CONFIG_SMP
1074 struct fcoe_percpu_s *p0;
1075 unsigned targ_cpu = smp_processor_id();
1076 #endif /* CONFIG_SMP */
1077
1078 FCOE_DBG("Destroying receive thread for CPU %d\n", cpu);
1079
1080 /* Prevent any new skbs from being queued for this CPU. */
1081 p = &per_cpu(fcoe_percpu, cpu);
1082 spin_lock_bh(&p->fcoe_rx_list.lock);
1083 thread = p->thread;
1084 p->thread = NULL;
1085 crc_eof = p->crc_eof_page;
1086 p->crc_eof_page = NULL;
1087 p->crc_eof_offset = 0;
1088 spin_unlock_bh(&p->fcoe_rx_list.lock);
1089
1090 #ifdef CONFIG_SMP
1091 /*
1092 * Don't bother moving the skb's if this context is running
1093 * on the same CPU that is having its thread destroyed. This
1094 * can easily happen when the module is removed.
1095 */
1096 if (cpu != targ_cpu) {
1097 p0 = &per_cpu(fcoe_percpu, targ_cpu);
1098 spin_lock_bh(&p0->fcoe_rx_list.lock);
1099 if (p0->thread) {
1100 FCOE_DBG("Moving frames from CPU %d to CPU %d\n",
1101 cpu, targ_cpu);
1102
1103 while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
1104 __skb_queue_tail(&p0->fcoe_rx_list, skb);
1105 spin_unlock_bh(&p0->fcoe_rx_list.lock);
1106 } else {
1107 /*
1108 * The targeted CPU is not initialized and cannot accept
1109 * new skbs. Unlock the targeted CPU and drop the skbs
1110 * on the CPU that is going offline.
1111 */
1112 while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
1113 kfree_skb(skb);
1114 spin_unlock_bh(&p0->fcoe_rx_list.lock);
1115 }
1116 } else {
1117 /*
1118 * This scenario occurs when the module is being removed
1119 * and all threads are being destroyed. skbs will continue
1120 * to be shifted from the CPU thread that is being removed
1121 * to the CPU thread associated with the CPU that is processing
1122 * the module removal. Once there is only one CPU Rx thread it
1123 * will reach this case and we will drop all skbs and later
1124 * stop the thread.
1125 */
1126 spin_lock_bh(&p->fcoe_rx_list.lock);
1127 while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
1128 kfree_skb(skb);
1129 spin_unlock_bh(&p->fcoe_rx_list.lock);
1130 }
1131 #else
1132 /*
1133 * This a non-SMP scenario where the singular Rx thread is
1134 * being removed. Free all skbs and stop the thread.
1135 */
1136 spin_lock_bh(&p->fcoe_rx_list.lock);
1137 while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
1138 kfree_skb(skb);
1139 spin_unlock_bh(&p->fcoe_rx_list.lock);
1140 #endif
1141
1142 if (thread)
1143 kthread_stop(thread);
1144
1145 if (crc_eof)
1146 put_page(crc_eof);
1147 }
1148
1149 /**
1150 * fcoe_cpu_callback() - Handler for CPU hotplug events
1151 * @nfb: The callback data block
1152 * @action: The event triggering the callback
1153 * @hcpu: The index of the CPU that the event is for
1154 *
1155 * This creates or destroys per-CPU data for fcoe
1156 *
1157 * Returns NOTIFY_OK always.
1158 */
1159 static int fcoe_cpu_callback(struct notifier_block *nfb,
1160 unsigned long action, void *hcpu)
1161 {
1162 unsigned cpu = (unsigned long)hcpu;
1163
1164 switch (action) {
1165 case CPU_ONLINE:
1166 case CPU_ONLINE_FROZEN:
1167 FCOE_DBG("CPU %x online: Create Rx thread\n", cpu);
1168 fcoe_percpu_thread_create(cpu);
1169 break;
1170 case CPU_DEAD:
1171 case CPU_DEAD_FROZEN:
1172 FCOE_DBG("CPU %x offline: Remove Rx thread\n", cpu);
1173 fcoe_percpu_thread_destroy(cpu);
1174 break;
1175 default:
1176 break;
1177 }
1178 return NOTIFY_OK;
1179 }
1180
1181 /**
1182 * fcoe_rcv() - Receive packets from a net device
1183 * @skb: The received packet
1184 * @netdev: The net device that the packet was received on
1185 * @ptype: The packet type context
1186 * @olddev: The last device net device
1187 *
1188 * This routine is called by NET_RX_SOFTIRQ. It receives a packet, builds a
1189 * FC frame and passes the frame to libfc.
1190 *
1191 * Returns: 0 for success
1192 */
1193 int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
1194 struct packet_type *ptype, struct net_device *olddev)
1195 {
1196 struct fc_lport *lport;
1197 struct fcoe_rcv_info *fr;
1198 struct fcoe_interface *fcoe;
1199 struct fc_frame_header *fh;
1200 struct fcoe_percpu_s *fps;
1201 unsigned int cpu;
1202
1203 fcoe = container_of(ptype, struct fcoe_interface, fcoe_packet_type);
1204 lport = fcoe->ctlr.lp;
1205 if (unlikely(!lport)) {
1206 FCOE_NETDEV_DBG(netdev, "Cannot find hba structure");
1207 goto err2;
1208 }
1209 if (!lport->link_up)
1210 goto err2;
1211
1212 FCOE_NETDEV_DBG(netdev, "skb_info: len:%d data_len:%d head:%p "
1213 "data:%p tail:%p end:%p sum:%d dev:%s",
1214 skb->len, skb->data_len, skb->head, skb->data,
1215 skb_tail_pointer(skb), skb_end_pointer(skb),
1216 skb->csum, skb->dev ? skb->dev->name : "<NULL>");
1217
1218 /* check for FCOE packet type */
1219 if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) {
1220 FCOE_NETDEV_DBG(netdev, "Wrong FC type frame");
1221 goto err;
1222 }
1223
1224 /*
1225 * Check for minimum frame length, and make sure required FCoE
1226 * and FC headers are pulled into the linear data area.
1227 */
1228 if (unlikely((skb->len < FCOE_MIN_FRAME) ||
1229 !pskb_may_pull(skb, FCOE_HEADER_LEN)))
1230 goto err;
1231
1232 skb_set_transport_header(skb, sizeof(struct fcoe_hdr));
1233 fh = (struct fc_frame_header *) skb_transport_header(skb);
1234
1235 fr = fcoe_dev_from_skb(skb);
1236 fr->fr_dev = lport;
1237 fr->ptype = ptype;
1238
1239 /*
1240 * In case the incoming frame's exchange is originated from
1241 * the initiator, then received frame's exchange id is ANDed
1242 * with fc_cpu_mask bits to get the same cpu on which exchange
1243 * was originated, otherwise just use the current cpu.
1244 */
1245 if (ntoh24(fh->fh_f_ctl) & FC_FC_EX_CTX)
1246 cpu = ntohs(fh->fh_ox_id) & fc_cpu_mask;
1247 else
1248 cpu = smp_processor_id();
1249
1250 fps = &per_cpu(fcoe_percpu, cpu);
1251 spin_lock_bh(&fps->fcoe_rx_list.lock);
1252 if (unlikely(!fps->thread)) {
1253 /*
1254 * The targeted CPU is not ready, let's target
1255 * the first CPU now. For non-SMP systems this
1256 * will check the same CPU twice.
1257 */
1258 FCOE_NETDEV_DBG(netdev, "CPU is online, but no receive thread "
1259 "ready for incoming skb- using first online "
1260 "CPU.\n");
1261
1262 spin_unlock_bh(&fps->fcoe_rx_list.lock);
1263 cpu = cpumask_first(cpu_online_mask);
1264 fps = &per_cpu(fcoe_percpu, cpu);
1265 spin_lock_bh(&fps->fcoe_rx_list.lock);
1266 if (!fps->thread) {
1267 spin_unlock_bh(&fps->fcoe_rx_list.lock);
1268 goto err;
1269 }
1270 }
1271
1272 /*
1273 * We now have a valid CPU that we're targeting for
1274 * this skb. We also have this receive thread locked,
1275 * so we're free to queue skbs into it's queue.
1276 */
1277
1278 /* If this is a SCSI-FCP frame, and this is already executing on the
1279 * correct CPU, and the queue for this CPU is empty, then go ahead
1280 * and process the frame directly in the softirq context.
1281 * This lets us process completions without context switching from the
1282 * NET_RX softirq, to our receive processing thread, and then back to
1283 * BLOCK softirq context.
1284 */
1285 if (fh->fh_type == FC_TYPE_FCP &&
1286 cpu == smp_processor_id() &&
1287 skb_queue_empty(&fps->fcoe_rx_list)) {
1288 spin_unlock_bh(&fps->fcoe_rx_list.lock);
1289 fcoe_recv_frame(skb);
1290 } else {
1291 __skb_queue_tail(&fps->fcoe_rx_list, skb);
1292 if (fps->fcoe_rx_list.qlen == 1)
1293 wake_up_process(fps->thread);
1294 spin_unlock_bh(&fps->fcoe_rx_list.lock);
1295 }
1296
1297 return 0;
1298 err:
1299 fc_lport_get_stats(lport)->ErrorFrames++;
1300
1301 err2:
1302 kfree_skb(skb);
1303 return -1;
1304 }
1305
1306 /**
1307 * fcoe_start_io() - Start FCoE I/O
1308 * @skb: The packet to be transmitted
1309 *
1310 * This routine is called from the net device to start transmitting
1311 * FCoE packets.
1312 *
1313 * Returns: 0 for success
1314 */
1315 static inline int fcoe_start_io(struct sk_buff *skb)
1316 {
1317 struct sk_buff *nskb;
1318 int rc;
1319
1320 nskb = skb_clone(skb, GFP_ATOMIC);
1321 rc = dev_queue_xmit(nskb);
1322 if (rc != 0)
1323 return rc;
1324 kfree_skb(skb);
1325 return 0;
1326 }
1327
1328 /**
1329 * fcoe_get_paged_crc_eof() - Allocate a page to be used for the trailer CRC
1330 * @skb: The packet to be transmitted
1331 * @tlen: The total length of the trailer
1332 *
1333 * This routine allocates a page for frame trailers. The page is re-used if
1334 * there is enough room left on it for the current trailer. If there isn't
1335 * enough buffer left a new page is allocated for the trailer. Reference to
1336 * the page from this function as well as the skbs using the page fragments
1337 * ensure that the page is freed at the appropriate time.
1338 *
1339 * Returns: 0 for success
1340 */
1341 static int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen)
1342 {
1343 struct fcoe_percpu_s *fps;
1344 struct page *page;
1345
1346 fps = &get_cpu_var(fcoe_percpu);
1347 page = fps->crc_eof_page;
1348 if (!page) {
1349 page = alloc_page(GFP_ATOMIC);
1350 if (!page) {
1351 put_cpu_var(fcoe_percpu);
1352 return -ENOMEM;
1353 }
1354 fps->crc_eof_page = page;
1355 fps->crc_eof_offset = 0;
1356 }
1357
1358 get_page(page);
1359 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page,
1360 fps->crc_eof_offset, tlen);
1361 skb->len += tlen;
1362 skb->data_len += tlen;
1363 skb->truesize += tlen;
1364 fps->crc_eof_offset += sizeof(struct fcoe_crc_eof);
1365
1366 if (fps->crc_eof_offset >= PAGE_SIZE) {
1367 fps->crc_eof_page = NULL;
1368 fps->crc_eof_offset = 0;
1369 put_page(page);
1370 }
1371 put_cpu_var(fcoe_percpu);
1372 return 0;
1373 }
1374
1375 /**
1376 * fcoe_fc_crc() - Calculates the CRC for a given frame
1377 * @fp: The frame to be checksumed
1378 *
1379 * This uses crc32() routine to calculate the CRC for a frame
1380 *
1381 * Return: The 32 bit CRC value
1382 */
1383 u32 fcoe_fc_crc(struct fc_frame *fp)
1384 {
1385 struct sk_buff *skb = fp_skb(fp);
1386 struct skb_frag_struct *frag;
1387 unsigned char *data;
1388 unsigned long off, len, clen;
1389 u32 crc;
1390 unsigned i;
1391
1392 crc = crc32(~0, skb->data, skb_headlen(skb));
1393
1394 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1395 frag = &skb_shinfo(skb)->frags[i];
1396 off = frag->page_offset;
1397 len = frag->size;
1398 while (len > 0) {
1399 clen = min(len, PAGE_SIZE - (off & ~PAGE_MASK));
1400 data = kmap_atomic(frag->page + (off >> PAGE_SHIFT),
1401 KM_SKB_DATA_SOFTIRQ);
1402 crc = crc32(crc, data + (off & ~PAGE_MASK), clen);
1403 kunmap_atomic(data, KM_SKB_DATA_SOFTIRQ);
1404 off += clen;
1405 len -= clen;
1406 }
1407 }
1408 return crc;
1409 }
1410
1411 /**
1412 * fcoe_xmit() - Transmit a FCoE frame
1413 * @lport: The local port that the frame is to be transmitted for
1414 * @fp: The frame to be transmitted
1415 *
1416 * Return: 0 for success
1417 */
1418 int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
1419 {
1420 int wlen;
1421 u32 crc;
1422 struct ethhdr *eh;
1423 struct fcoe_crc_eof *cp;
1424 struct sk_buff *skb;
1425 struct fcoe_dev_stats *stats;
1426 struct fc_frame_header *fh;
1427 unsigned int hlen; /* header length implies the version */
1428 unsigned int tlen; /* trailer length */
1429 unsigned int elen; /* eth header, may include vlan */
1430 struct fcoe_port *port = lport_priv(lport);
1431 struct fcoe_interface *fcoe = port->fcoe;
1432 u8 sof, eof;
1433 struct fcoe_hdr *hp;
1434
1435 WARN_ON((fr_len(fp) % sizeof(u32)) != 0);
1436
1437 fh = fc_frame_header_get(fp);
1438 skb = fp_skb(fp);
1439 wlen = skb->len / FCOE_WORD_TO_BYTE;
1440
1441 if (!lport->link_up) {
1442 kfree_skb(skb);
1443 return 0;
1444 }
1445
1446 if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ) &&
1447 fcoe_ctlr_els_send(&fcoe->ctlr, lport, skb))
1448 return 0;
1449
1450 sof = fr_sof(fp);
1451 eof = fr_eof(fp);
1452
1453 elen = sizeof(struct ethhdr);
1454 hlen = sizeof(struct fcoe_hdr);
1455 tlen = sizeof(struct fcoe_crc_eof);
1456 wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE;
1457
1458 /* crc offload */
1459 if (likely(lport->crc_offload)) {
1460 skb->ip_summed = CHECKSUM_PARTIAL;
1461 skb->csum_start = skb_headroom(skb);
1462 skb->csum_offset = skb->len;
1463 crc = 0;
1464 } else {
1465 skb->ip_summed = CHECKSUM_NONE;
1466 crc = fcoe_fc_crc(fp);
1467 }
1468
1469 /* copy port crc and eof to the skb buff */
1470 if (skb_is_nonlinear(skb)) {
1471 skb_frag_t *frag;
1472 if (fcoe_get_paged_crc_eof(skb, tlen)) {
1473 kfree_skb(skb);
1474 return -ENOMEM;
1475 }
1476 frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
1477 cp = kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ)
1478 + frag->page_offset;
1479 } else {
1480 cp = (struct fcoe_crc_eof *)skb_put(skb, tlen);
1481 }
1482
1483 memset(cp, 0, sizeof(*cp));
1484 cp->fcoe_eof = eof;
1485 cp->fcoe_crc32 = cpu_to_le32(~crc);
1486
1487 if (skb_is_nonlinear(skb)) {
1488 kunmap_atomic(cp, KM_SKB_DATA_SOFTIRQ);
1489 cp = NULL;
1490 }
1491
1492 /* adjust skb network/transport offsets to match mac/fcoe/port */
1493 skb_push(skb, elen + hlen);
1494 skb_reset_mac_header(skb);
1495 skb_reset_network_header(skb);
1496 skb->mac_len = elen;
1497 skb->protocol = htons(ETH_P_FCOE);
1498 skb->dev = fcoe->netdev;
1499
1500 /* fill up mac and fcoe headers */
1501 eh = eth_hdr(skb);
1502 eh->h_proto = htons(ETH_P_FCOE);
1503 if (fcoe->ctlr.map_dest)
1504 fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id);
1505 else
1506 /* insert GW address */
1507 memcpy(eh->h_dest, fcoe->ctlr.dest_addr, ETH_ALEN);
1508
1509 if (unlikely(fcoe->ctlr.flogi_oxid != FC_XID_UNKNOWN))
1510 memcpy(eh->h_source, fcoe->ctlr.ctl_src_addr, ETH_ALEN);
1511 else
1512 memcpy(eh->h_source, port->data_src_addr, ETH_ALEN);
1513
1514 hp = (struct fcoe_hdr *)(eh + 1);
1515 memset(hp, 0, sizeof(*hp));
1516 if (FC_FCOE_VER)
1517 FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER);
1518 hp->fcoe_sof = sof;
1519
1520 /* fcoe lso, mss is in max_payload which is non-zero for FCP data */
1521 if (lport->seq_offload && fr_max_payload(fp)) {
1522 skb_shinfo(skb)->gso_type = SKB_GSO_FCOE;
1523 skb_shinfo(skb)->gso_size = fr_max_payload(fp);
1524 } else {
1525 skb_shinfo(skb)->gso_type = 0;
1526 skb_shinfo(skb)->gso_size = 0;
1527 }
1528 /* update tx stats: regardless if LLD fails */
1529 stats = fc_lport_get_stats(lport);
1530 stats->TxFrames++;
1531 stats->TxWords += wlen;
1532
1533 /* send down to lld */
1534 fr_dev(fp) = lport;
1535 if (port->fcoe_pending_queue.qlen)
1536 fcoe_check_wait_queue(lport, skb);
1537 else if (fcoe_start_io(skb))
1538 fcoe_check_wait_queue(lport, skb);
1539
1540 return 0;
1541 }
1542
1543 /**
1544 * fcoe_percpu_flush_done() - Indicate per-CPU queue flush completion
1545 * @skb: The completed skb (argument required by destructor)
1546 */
1547 static void fcoe_percpu_flush_done(struct sk_buff *skb)
1548 {
1549 complete(&fcoe_flush_completion);
1550 }
1551
1552 /**
1553 * fcoe_recv_frame() - process a single received frame
1554 * @skb: frame to process
1555 */
1556 static void fcoe_recv_frame(struct sk_buff *skb)
1557 {
1558 u32 fr_len;
1559 struct fc_lport *lport;
1560 struct fcoe_rcv_info *fr;
1561 struct fcoe_dev_stats *stats;
1562 struct fc_frame_header *fh;
1563 struct fcoe_crc_eof crc_eof;
1564 struct fc_frame *fp;
1565 u8 *mac = NULL;
1566 struct fcoe_port *port;
1567 struct fcoe_hdr *hp;
1568
1569 fr = fcoe_dev_from_skb(skb);
1570 lport = fr->fr_dev;
1571 if (unlikely(!lport)) {
1572 if (skb->destructor != fcoe_percpu_flush_done)
1573 FCOE_NETDEV_DBG(skb->dev, "NULL lport in skb");
1574 kfree_skb(skb);
1575 return;
1576 }
1577
1578 FCOE_NETDEV_DBG(skb->dev, "skb_info: len:%d data_len:%d "
1579 "head:%p data:%p tail:%p end:%p sum:%d dev:%s",
1580 skb->len, skb->data_len,
1581 skb->head, skb->data, skb_tail_pointer(skb),
1582 skb_end_pointer(skb), skb->csum,
1583 skb->dev ? skb->dev->name : "<NULL>");
1584
1585 /*
1586 * Save source MAC address before discarding header.
1587 */
1588 port = lport_priv(lport);
1589 if (skb_is_nonlinear(skb))
1590 skb_linearize(skb); /* not ideal */
1591 mac = eth_hdr(skb)->h_source;
1592
1593 /*
1594 * Frame length checks and setting up the header pointers
1595 * was done in fcoe_rcv already.
1596 */
1597 hp = (struct fcoe_hdr *) skb_network_header(skb);
1598 fh = (struct fc_frame_header *) skb_transport_header(skb);
1599
1600 stats = fc_lport_get_stats(lport);
1601 if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) {
1602 if (stats->ErrorFrames < 5)
1603 printk(KERN_WARNING "fcoe: FCoE version "
1604 "mismatch: The frame has "
1605 "version %x, but the "
1606 "initiator supports version "
1607 "%x\n", FC_FCOE_DECAPS_VER(hp),
1608 FC_FCOE_VER);
1609 stats->ErrorFrames++;
1610 kfree_skb(skb);
1611 return;
1612 }
1613
1614 skb_pull(skb, sizeof(struct fcoe_hdr));
1615 fr_len = skb->len - sizeof(struct fcoe_crc_eof);
1616
1617 stats->RxFrames++;
1618 stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
1619
1620 fp = (struct fc_frame *)skb;
1621 fc_frame_init(fp);
1622 fr_dev(fp) = lport;
1623 fr_sof(fp) = hp->fcoe_sof;
1624
1625 /* Copy out the CRC and EOF trailer for access */
1626 if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) {
1627 kfree_skb(skb);
1628 return;
1629 }
1630 fr_eof(fp) = crc_eof.fcoe_eof;
1631 fr_crc(fp) = crc_eof.fcoe_crc32;
1632 if (pskb_trim(skb, fr_len)) {
1633 kfree_skb(skb);
1634 return;
1635 }
1636
1637 /*
1638 * We only check CRC if no offload is available and if it is
1639 * it's solicited data, in which case, the FCP layer would
1640 * check it during the copy.
1641 */
1642 if (lport->crc_offload &&
1643 skb->ip_summed == CHECKSUM_UNNECESSARY)
1644 fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
1645 else
1646 fr_flags(fp) |= FCPHF_CRC_UNCHECKED;
1647
1648 fh = fc_frame_header_get(fp);
1649 if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA &&
1650 fh->fh_type == FC_TYPE_FCP) {
1651 fc_exch_recv(lport, fp);
1652 return;
1653 }
1654 if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) {
1655 if (le32_to_cpu(fr_crc(fp)) !=
1656 ~crc32(~0, skb->data, fr_len)) {
1657 if (stats->InvalidCRCCount < 5)
1658 printk(KERN_WARNING "fcoe: dropping "
1659 "frame with CRC error\n");
1660 stats->InvalidCRCCount++;
1661 stats->ErrorFrames++;
1662 fc_frame_free(fp);
1663 return;
1664 }
1665 fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
1666 }
1667 fc_exch_recv(lport, fp);
1668 }
1669
1670 /**
1671 * fcoe_percpu_receive_thread() - The per-CPU packet receive thread
1672 * @arg: The per-CPU context
1673 *
1674 * Return: 0 for success
1675 */
1676 int fcoe_percpu_receive_thread(void *arg)
1677 {
1678 struct fcoe_percpu_s *p = arg;
1679 struct sk_buff *skb;
1680
1681 set_user_nice(current, -20);
1682
1683 while (!kthread_should_stop()) {
1684
1685 spin_lock_bh(&p->fcoe_rx_list.lock);
1686 while ((skb = __skb_dequeue(&p->fcoe_rx_list)) == NULL) {
1687 set_current_state(TASK_INTERRUPTIBLE);
1688 spin_unlock_bh(&p->fcoe_rx_list.lock);
1689 schedule();
1690 set_current_state(TASK_RUNNING);
1691 if (kthread_should_stop())
1692 return 0;
1693 spin_lock_bh(&p->fcoe_rx_list.lock);
1694 }
1695 spin_unlock_bh(&p->fcoe_rx_list.lock);
1696 fcoe_recv_frame(skb);
1697 }
1698 return 0;
1699 }
1700
1701 /**
1702 * fcoe_check_wait_queue() - Attempt to clear the transmit backlog
1703 * @lport: The local port whose backlog is to be cleared
1704 *
1705 * This empties the wait_queue, dequeues the head of the wait_queue queue
1706 * and calls fcoe_start_io() for each packet. If all skb have been
1707 * transmitted it returns the qlen. If an error occurs it restores
1708 * wait_queue (to try again later) and returns -1.
1709 *
1710 * The wait_queue is used when the skb transmit fails. The failed skb
1711 * will go in the wait_queue which will be emptied by the timer function or
1712 * by the next skb transmit.
1713 */
1714 static void fcoe_check_wait_queue(struct fc_lport *lport, struct sk_buff *skb)
1715 {
1716 struct fcoe_port *port = lport_priv(lport);
1717 int rc;
1718
1719 spin_lock_bh(&port->fcoe_pending_queue.lock);
1720
1721 if (skb)
1722 __skb_queue_tail(&port->fcoe_pending_queue, skb);
1723
1724 if (port->fcoe_pending_queue_active)
1725 goto out;
1726 port->fcoe_pending_queue_active = 1;
1727
1728 while (port->fcoe_pending_queue.qlen) {
1729 /* keep qlen > 0 until fcoe_start_io succeeds */
1730 port->fcoe_pending_queue.qlen++;
1731 skb = __skb_dequeue(&port->fcoe_pending_queue);
1732
1733 spin_unlock_bh(&port->fcoe_pending_queue.lock);
1734 rc = fcoe_start_io(skb);
1735 spin_lock_bh(&port->fcoe_pending_queue.lock);
1736
1737 if (rc) {
1738 __skb_queue_head(&port->fcoe_pending_queue, skb);
1739 /* undo temporary increment above */
1740 port->fcoe_pending_queue.qlen--;
1741 break;
1742 }
1743 /* undo temporary increment above */
1744 port->fcoe_pending_queue.qlen--;
1745 }
1746
1747 if (port->fcoe_pending_queue.qlen < FCOE_LOW_QUEUE_DEPTH)
1748 lport->qfull = 0;
1749 if (port->fcoe_pending_queue.qlen && !timer_pending(&port->timer))
1750 mod_timer(&port->timer, jiffies + 2);
1751 port->fcoe_pending_queue_active = 0;
1752 out:
1753 if (port->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
1754 lport->qfull = 1;
1755 spin_unlock_bh(&port->fcoe_pending_queue.lock);
1756 return;
1757 }
1758
1759 /**
1760 * fcoe_dev_setup() - Setup the link change notification interface
1761 */
1762 static void fcoe_dev_setup(void)
1763 {
1764 register_netdevice_notifier(&fcoe_notifier);
1765 }
1766
1767 /**
1768 * fcoe_dev_cleanup() - Cleanup the link change notification interface
1769 */
1770 static void fcoe_dev_cleanup(void)
1771 {
1772 unregister_netdevice_notifier(&fcoe_notifier);
1773 }
1774
1775 /**
1776 * fcoe_device_notification() - Handler for net device events
1777 * @notifier: The context of the notification
1778 * @event: The type of event
1779 * @ptr: The net device that the event was on
1780 *
1781 * This function is called by the Ethernet driver in case of link change event.
1782 *
1783 * Returns: 0 for success
1784 */
1785 static int fcoe_device_notification(struct notifier_block *notifier,
1786 ulong event, void *ptr)
1787 {
1788 struct fc_lport *lport = NULL;
1789 struct net_device *netdev = ptr;
1790 struct fcoe_interface *fcoe;
1791 struct fcoe_port *port;
1792 struct fcoe_dev_stats *stats;
1793 u32 link_possible = 1;
1794 u32 mfs;
1795 int rc = NOTIFY_OK;
1796
1797 list_for_each_entry(fcoe, &fcoe_hostlist, list) {
1798 if (fcoe->netdev == netdev) {
1799 lport = fcoe->ctlr.lp;
1800 break;
1801 }
1802 }
1803 if (!lport) {
1804 rc = NOTIFY_DONE;
1805 goto out;
1806 }
1807
1808 switch (event) {
1809 case NETDEV_DOWN:
1810 case NETDEV_GOING_DOWN:
1811 link_possible = 0;
1812 break;
1813 case NETDEV_UP:
1814 case NETDEV_CHANGE:
1815 break;
1816 case NETDEV_CHANGEMTU:
1817 if (netdev->features & NETIF_F_FCOE_MTU)
1818 break;
1819 mfs = netdev->mtu - (sizeof(struct fcoe_hdr) +
1820 sizeof(struct fcoe_crc_eof));
1821 if (mfs >= FC_MIN_MAX_FRAME)
1822 fc_set_mfs(lport, mfs);
1823 break;
1824 case NETDEV_REGISTER:
1825 break;
1826 case NETDEV_UNREGISTER:
1827 list_del(&fcoe->list);
1828 port = lport_priv(fcoe->ctlr.lp);
1829 fcoe_interface_cleanup(fcoe);
1830 schedule_work(&port->destroy_work);
1831 goto out;
1832 break;
1833 default:
1834 FCOE_NETDEV_DBG(netdev, "Unknown event %ld "
1835 "from netdev netlink\n", event);
1836 }
1837 if (link_possible && !fcoe_link_ok(lport))
1838 fcoe_ctlr_link_up(&fcoe->ctlr);
1839 else if (fcoe_ctlr_link_down(&fcoe->ctlr)) {
1840 stats = fc_lport_get_stats(lport);
1841 stats->LinkFailureCount++;
1842 fcoe_clean_pending_queue(lport);
1843 }
1844 out:
1845 return rc;
1846 }
1847
1848 /**
1849 * fcoe_if_to_netdev() - Parse a name buffer to get a net device
1850 * @buffer: The name of the net device
1851 *
1852 * Returns: NULL or a ptr to net_device
1853 */
1854 static struct net_device *fcoe_if_to_netdev(const char *buffer)
1855 {
1856 char *cp;
1857 char ifname[IFNAMSIZ + 2];
1858
1859 if (buffer) {
1860 strlcpy(ifname, buffer, IFNAMSIZ);
1861 cp = ifname + strlen(ifname);
1862 while (--cp >= ifname && *cp == '\n')
1863 *cp = '\0';
1864 return dev_get_by_name(&init_net, ifname);
1865 }
1866 return NULL;
1867 }
1868
1869 /**
1870 * fcoe_disable() - Disables a FCoE interface
1871 * @buffer: The name of the Ethernet interface to be disabled
1872 * @kp: The associated kernel parameter
1873 *
1874 * Called from sysfs.
1875 *
1876 * Returns: 0 for success
1877 */
1878 static int fcoe_disable(const char *buffer, struct kernel_param *kp)
1879 {
1880 struct fcoe_interface *fcoe;
1881 struct net_device *netdev;
1882 int rc = 0;
1883
1884 mutex_lock(&fcoe_config_mutex);
1885 #ifdef CONFIG_FCOE_MODULE
1886 /*
1887 * Make sure the module has been initialized, and is not about to be
1888 * removed. Module paramter sysfs files are writable before the
1889 * module_init function is called and after module_exit.
1890 */
1891 if (THIS_MODULE->state != MODULE_STATE_LIVE) {
1892 rc = -ENODEV;
1893 goto out_nodev;
1894 }
1895 #endif
1896
1897 netdev = fcoe_if_to_netdev(buffer);
1898 if (!netdev) {
1899 rc = -ENODEV;
1900 goto out_nodev;
1901 }
1902
1903 rtnl_lock();
1904 fcoe = fcoe_hostlist_lookup_port(netdev);
1905 rtnl_unlock();
1906
1907 if (fcoe)
1908 fc_fabric_logoff(fcoe->ctlr.lp);
1909 else
1910 rc = -ENODEV;
1911
1912 dev_put(netdev);
1913 out_nodev:
1914 mutex_unlock(&fcoe_config_mutex);
1915 return rc;
1916 }
1917
1918 /**
1919 * fcoe_enable() - Enables a FCoE interface
1920 * @buffer: The name of the Ethernet interface to be enabled
1921 * @kp: The associated kernel parameter
1922 *
1923 * Called from sysfs.
1924 *
1925 * Returns: 0 for success
1926 */
1927 static int fcoe_enable(const char *buffer, struct kernel_param *kp)
1928 {
1929 struct fcoe_interface *fcoe;
1930 struct net_device *netdev;
1931 int rc = 0;
1932
1933 mutex_lock(&fcoe_config_mutex);
1934 #ifdef CONFIG_FCOE_MODULE
1935 /*
1936 * Make sure the module has been initialized, and is not about to be
1937 * removed. Module paramter sysfs files are writable before the
1938 * module_init function is called and after module_exit.
1939 */
1940 if (THIS_MODULE->state != MODULE_STATE_LIVE) {
1941 rc = -ENODEV;
1942 goto out_nodev;
1943 }
1944 #endif
1945
1946 netdev = fcoe_if_to_netdev(buffer);
1947 if (!netdev) {
1948 rc = -ENODEV;
1949 goto out_nodev;
1950 }
1951
1952 rtnl_lock();
1953 fcoe = fcoe_hostlist_lookup_port(netdev);
1954 rtnl_unlock();
1955
1956 if (fcoe)
1957 rc = fc_fabric_login(fcoe->ctlr.lp);
1958 else
1959 rc = -ENODEV;
1960
1961 dev_put(netdev);
1962 out_nodev:
1963 mutex_unlock(&fcoe_config_mutex);
1964 return rc;
1965 }
1966
1967 /**
1968 * fcoe_destroy() - Destroy a FCoE interface
1969 * @buffer: The name of the Ethernet interface to be destroyed
1970 * @kp: The associated kernel parameter
1971 *
1972 * Called from sysfs.
1973 *
1974 * Returns: 0 for success
1975 */
1976 static int fcoe_destroy(const char *buffer, struct kernel_param *kp)
1977 {
1978 struct fcoe_interface *fcoe;
1979 struct net_device *netdev;
1980 int rc = 0;
1981
1982 mutex_lock(&fcoe_config_mutex);
1983 #ifdef CONFIG_FCOE_MODULE
1984 /*
1985 * Make sure the module has been initialized, and is not about to be
1986 * removed. Module paramter sysfs files are writable before the
1987 * module_init function is called and after module_exit.
1988 */
1989 if (THIS_MODULE->state != MODULE_STATE_LIVE) {
1990 rc = -ENODEV;
1991 goto out_nodev;
1992 }
1993 #endif
1994
1995 netdev = fcoe_if_to_netdev(buffer);
1996 if (!netdev) {
1997 rc = -ENODEV;
1998 goto out_nodev;
1999 }
2000
2001 rtnl_lock();
2002 fcoe = fcoe_hostlist_lookup_port(netdev);
2003 if (!fcoe) {
2004 rtnl_unlock();
2005 rc = -ENODEV;
2006 goto out_putdev;
2007 }
2008 list_del(&fcoe->list);
2009 fcoe_interface_cleanup(fcoe);
2010 rtnl_unlock();
2011 fcoe_if_destroy(fcoe->ctlr.lp);
2012 module_put(THIS_MODULE);
2013
2014 out_putdev:
2015 dev_put(netdev);
2016 out_nodev:
2017 mutex_unlock(&fcoe_config_mutex);
2018 return rc;
2019 }
2020
2021 /**
2022 * fcoe_destroy_work() - Destroy a FCoE port in a deferred work context
2023 * @work: Handle to the FCoE port to be destroyed
2024 */
2025 static void fcoe_destroy_work(struct work_struct *work)
2026 {
2027 struct fcoe_port *port;
2028
2029 port = container_of(work, struct fcoe_port, destroy_work);
2030 mutex_lock(&fcoe_config_mutex);
2031 fcoe_if_destroy(port->lport);
2032 mutex_unlock(&fcoe_config_mutex);
2033 }
2034
2035 /**
2036 * fcoe_create() - Create a fcoe interface
2037 * @buffer: The name of the Ethernet interface to create on
2038 * @kp: The associated kernel param
2039 *
2040 * Called from sysfs.
2041 *
2042 * Returns: 0 for success
2043 */
2044 static int fcoe_create(const char *buffer, struct kernel_param *kp)
2045 {
2046 int rc;
2047 struct fcoe_interface *fcoe;
2048 struct fc_lport *lport;
2049 struct net_device *netdev;
2050
2051 mutex_lock(&fcoe_config_mutex);
2052 #ifdef CONFIG_FCOE_MODULE
2053 /*
2054 * Make sure the module has been initialized, and is not about to be
2055 * removed. Module paramter sysfs files are writable before the
2056 * module_init function is called and after module_exit.
2057 */
2058 if (THIS_MODULE->state != MODULE_STATE_LIVE) {
2059 rc = -ENODEV;
2060 goto out_nodev;
2061 }
2062 #endif
2063
2064 if (!try_module_get(THIS_MODULE)) {
2065 rc = -EINVAL;
2066 goto out_nomod;
2067 }
2068
2069 rtnl_lock();
2070 netdev = fcoe_if_to_netdev(buffer);
2071 if (!netdev) {
2072 rc = -ENODEV;
2073 goto out_nodev;
2074 }
2075
2076 /* look for existing lport */
2077 if (fcoe_hostlist_lookup(netdev)) {
2078 rc = -EEXIST;
2079 goto out_putdev;
2080 }
2081
2082 fcoe = fcoe_interface_create(netdev);
2083 if (!fcoe) {
2084 rc = -ENOMEM;
2085 goto out_putdev;
2086 }
2087
2088 lport = fcoe_if_create(fcoe, &netdev->dev, 0);
2089 if (IS_ERR(lport)) {
2090 printk(KERN_ERR "fcoe: Failed to create interface (%s)\n",
2091 netdev->name);
2092 rc = -EIO;
2093 fcoe_interface_cleanup(fcoe);
2094 goto out_free;
2095 }
2096
2097 /* Make this the "master" N_Port */
2098 fcoe->ctlr.lp = lport;
2099
2100 /* add to lports list */
2101 fcoe_hostlist_add(lport);
2102
2103 /* start FIP Discovery and FLOGI */
2104 lport->boot_time = jiffies;
2105 fc_fabric_login(lport);
2106 if (!fcoe_link_ok(lport))
2107 fcoe_ctlr_link_up(&fcoe->ctlr);
2108
2109 /*
2110 * Release from init in fcoe_interface_create(), on success lport
2111 * should be holding a reference taken in fcoe_if_create().
2112 */
2113 fcoe_interface_put(fcoe);
2114 dev_put(netdev);
2115 rtnl_unlock();
2116 mutex_unlock(&fcoe_config_mutex);
2117
2118 return 0;
2119 out_free:
2120 fcoe_interface_put(fcoe);
2121 out_putdev:
2122 dev_put(netdev);
2123 out_nodev:
2124 rtnl_unlock();
2125 module_put(THIS_MODULE);
2126 out_nomod:
2127 mutex_unlock(&fcoe_config_mutex);
2128 return rc;
2129 }
2130
2131 /**
2132 * fcoe_link_ok() - Check if the link is OK for a local port
2133 * @lport: The local port to check link on
2134 *
2135 * Any permanently-disqualifying conditions have been previously checked.
2136 * This also updates the speed setting, which may change with link for 100/1000.
2137 *
2138 * This function should probably be checking for PAUSE support at some point
2139 * in the future. Currently Per-priority-pause is not determinable using
2140 * ethtool, so we shouldn't be restrictive until that problem is resolved.
2141 *
2142 * Returns: 0 if link is OK for use by FCoE.
2143 *
2144 */
2145 int fcoe_link_ok(struct fc_lport *lport)
2146 {
2147 struct fcoe_port *port = lport_priv(lport);
2148 struct net_device *netdev = port->fcoe->netdev;
2149 struct ethtool_cmd ecmd = { ETHTOOL_GSET };
2150
2151 if ((netdev->flags & IFF_UP) && netif_carrier_ok(netdev) &&
2152 (!dev_ethtool_get_settings(netdev, &ecmd))) {
2153 lport->link_supported_speeds &=
2154 ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
2155 if (ecmd.supported & (SUPPORTED_1000baseT_Half |
2156 SUPPORTED_1000baseT_Full))
2157 lport->link_supported_speeds |= FC_PORTSPEED_1GBIT;
2158 if (ecmd.supported & SUPPORTED_10000baseT_Full)
2159 lport->link_supported_speeds |=
2160 FC_PORTSPEED_10GBIT;
2161 if (ecmd.speed == SPEED_1000)
2162 lport->link_speed = FC_PORTSPEED_1GBIT;
2163 if (ecmd.speed == SPEED_10000)
2164 lport->link_speed = FC_PORTSPEED_10GBIT;
2165
2166 return 0;
2167 }
2168 return -1;
2169 }
2170
2171 /**
2172 * fcoe_percpu_clean() - Clear all pending skbs for an local port
2173 * @lport: The local port whose skbs are to be cleared
2174 *
2175 * Must be called with fcoe_create_mutex held to single-thread completion.
2176 *
2177 * This flushes the pending skbs by adding a new skb to each queue and
2178 * waiting until they are all freed. This assures us that not only are
2179 * there no packets that will be handled by the lport, but also that any
2180 * threads already handling packet have returned.
2181 */
2182 void fcoe_percpu_clean(struct fc_lport *lport)
2183 {
2184 struct fcoe_percpu_s *pp;
2185 struct fcoe_rcv_info *fr;
2186 struct sk_buff_head *list;
2187 struct sk_buff *skb, *next;
2188 struct sk_buff *head;
2189 unsigned int cpu;
2190
2191 for_each_possible_cpu(cpu) {
2192 pp = &per_cpu(fcoe_percpu, cpu);
2193 spin_lock_bh(&pp->fcoe_rx_list.lock);
2194 list = &pp->fcoe_rx_list;
2195 head = list->next;
2196 for (skb = head; skb != (struct sk_buff *)list;
2197 skb = next) {
2198 next = skb->next;
2199 fr = fcoe_dev_from_skb(skb);
2200 if (fr->fr_dev == lport) {
2201 __skb_unlink(skb, list);
2202 kfree_skb(skb);
2203 }
2204 }
2205
2206 if (!pp->thread || !cpu_online(cpu)) {
2207 spin_unlock_bh(&pp->fcoe_rx_list.lock);
2208 continue;
2209 }
2210
2211 skb = dev_alloc_skb(0);
2212 if (!skb) {
2213 spin_unlock_bh(&pp->fcoe_rx_list.lock);
2214 continue;
2215 }
2216 skb->destructor = fcoe_percpu_flush_done;
2217
2218 __skb_queue_tail(&pp->fcoe_rx_list, skb);
2219 if (pp->fcoe_rx_list.qlen == 1)
2220 wake_up_process(pp->thread);
2221 spin_unlock_bh(&pp->fcoe_rx_list.lock);
2222
2223 wait_for_completion(&fcoe_flush_completion);
2224 }
2225 }
2226
2227 /**
2228 * fcoe_clean_pending_queue() - Dequeue a skb and free it
2229 * @lport: The local port to dequeue a skb on
2230 */
2231 void fcoe_clean_pending_queue(struct fc_lport *lport)
2232 {
2233 struct fcoe_port *port = lport_priv(lport);
2234 struct sk_buff *skb;
2235
2236 spin_lock_bh(&port->fcoe_pending_queue.lock);
2237 while ((skb = __skb_dequeue(&port->fcoe_pending_queue)) != NULL) {
2238 spin_unlock_bh(&port->fcoe_pending_queue.lock);
2239 kfree_skb(skb);
2240 spin_lock_bh(&port->fcoe_pending_queue.lock);
2241 }
2242 spin_unlock_bh(&port->fcoe_pending_queue.lock);
2243 }
2244
2245 /**
2246 * fcoe_reset() - Reset a local port
2247 * @shost: The SCSI host associated with the local port to be reset
2248 *
2249 * Returns: Always 0 (return value required by FC transport template)
2250 */
2251 int fcoe_reset(struct Scsi_Host *shost)
2252 {
2253 struct fc_lport *lport = shost_priv(shost);
2254 fc_lport_reset(lport);
2255 return 0;
2256 }
2257
2258 /**
2259 * fcoe_hostlist_lookup_port() - Find the FCoE interface associated with a net device
2260 * @netdev: The net device used as a key
2261 *
2262 * Locking: Must be called with the RNL mutex held.
2263 *
2264 * Returns: NULL or the FCoE interface
2265 */
2266 static struct fcoe_interface *
2267 fcoe_hostlist_lookup_port(const struct net_device *netdev)
2268 {
2269 struct fcoe_interface *fcoe;
2270
2271 list_for_each_entry(fcoe, &fcoe_hostlist, list) {
2272 if (fcoe->netdev == netdev)
2273 return fcoe;
2274 }
2275 return NULL;
2276 }
2277
2278 /**
2279 * fcoe_hostlist_lookup() - Find the local port associated with a
2280 * given net device
2281 * @netdev: The netdevice used as a key
2282 *
2283 * Locking: Must be called with the RTNL mutex held
2284 *
2285 * Returns: NULL or the local port
2286 */
2287 static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *netdev)
2288 {
2289 struct fcoe_interface *fcoe;
2290
2291 fcoe = fcoe_hostlist_lookup_port(netdev);
2292 return (fcoe) ? fcoe->ctlr.lp : NULL;
2293 }
2294
2295 /**
2296 * fcoe_hostlist_add() - Add the FCoE interface identified by a local
2297 * port to the hostlist
2298 * @lport: The local port that identifies the FCoE interface to be added
2299 *
2300 * Locking: must be called with the RTNL mutex held
2301 *
2302 * Returns: 0 for success
2303 */
2304 static int fcoe_hostlist_add(const struct fc_lport *lport)
2305 {
2306 struct fcoe_interface *fcoe;
2307 struct fcoe_port *port;
2308
2309 fcoe = fcoe_hostlist_lookup_port(fcoe_netdev(lport));
2310 if (!fcoe) {
2311 port = lport_priv(lport);
2312 fcoe = port->fcoe;
2313 list_add_tail(&fcoe->list, &fcoe_hostlist);
2314 }
2315 return 0;
2316 }
2317
2318 /**
2319 * fcoe_init() - Initialize fcoe.ko
2320 *
2321 * Returns: 0 on success, or a negative value on failure
2322 */
2323 static int __init fcoe_init(void)
2324 {
2325 struct fcoe_percpu_s *p;
2326 unsigned int cpu;
2327 int rc = 0;
2328
2329 mutex_lock(&fcoe_config_mutex);
2330
2331 for_each_possible_cpu(cpu) {
2332 p = &per_cpu(fcoe_percpu, cpu);
2333 skb_queue_head_init(&p->fcoe_rx_list);
2334 }
2335
2336 for_each_online_cpu(cpu)
2337 fcoe_percpu_thread_create(cpu);
2338
2339 /* Initialize per CPU interrupt thread */
2340 rc = register_hotcpu_notifier(&fcoe_cpu_notifier);
2341 if (rc)
2342 goto out_free;
2343
2344 /* Setup link change notification */
2345 fcoe_dev_setup();
2346
2347 rc = fcoe_if_init();
2348 if (rc)
2349 goto out_free;
2350
2351 mutex_unlock(&fcoe_config_mutex);
2352 return 0;
2353
2354 out_free:
2355 for_each_online_cpu(cpu) {
2356 fcoe_percpu_thread_destroy(cpu);
2357 }
2358 mutex_unlock(&fcoe_config_mutex);
2359 return rc;
2360 }
2361 module_init(fcoe_init);
2362
2363 /**
2364 * fcoe_exit() - Clean up fcoe.ko
2365 *
2366 * Returns: 0 on success or a negative value on failure
2367 */
2368 static void __exit fcoe_exit(void)
2369 {
2370 struct fcoe_interface *fcoe, *tmp;
2371 struct fcoe_port *port;
2372 unsigned int cpu;
2373
2374 mutex_lock(&fcoe_config_mutex);
2375
2376 fcoe_dev_cleanup();
2377
2378 /* releases the associated fcoe hosts */
2379 rtnl_lock();
2380 list_for_each_entry_safe(fcoe, tmp, &fcoe_hostlist, list) {
2381 list_del(&fcoe->list);
2382 port = lport_priv(fcoe->ctlr.lp);
2383 fcoe_interface_cleanup(fcoe);
2384 schedule_work(&port->destroy_work);
2385 }
2386 rtnl_unlock();
2387
2388 unregister_hotcpu_notifier(&fcoe_cpu_notifier);
2389
2390 for_each_online_cpu(cpu)
2391 fcoe_percpu_thread_destroy(cpu);
2392
2393 mutex_unlock(&fcoe_config_mutex);
2394
2395 /* flush any asyncronous interface destroys,
2396 * this should happen after the netdev notifier is unregistered */
2397 flush_scheduled_work();
2398 /* That will flush out all the N_Ports on the hostlist, but now we
2399 * may have NPIV VN_Ports scheduled for destruction */
2400 flush_scheduled_work();
2401
2402 /* detach from scsi transport
2403 * must happen after all destroys are done, therefor after the flush */
2404 fcoe_if_exit();
2405 }
2406 module_exit(fcoe_exit);
2407
2408 /**
2409 * fcoe_flogi_resp() - FCoE specific FLOGI and FDISC response handler
2410 * @seq: active sequence in the FLOGI or FDISC exchange
2411 * @fp: response frame, or error encoded in a pointer (timeout)
2412 * @arg: pointer the the fcoe_ctlr structure
2413 *
2414 * This handles MAC address managment for FCoE, then passes control on to
2415 * the libfc FLOGI response handler.
2416 */
2417 static void fcoe_flogi_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
2418 {
2419 struct fcoe_ctlr *fip = arg;
2420 struct fc_exch *exch = fc_seq_exch(seq);
2421 struct fc_lport *lport = exch->lp;
2422 u8 *mac;
2423
2424 if (IS_ERR(fp))
2425 goto done;
2426
2427 mac = fr_cb(fp)->granted_mac;
2428 if (is_zero_ether_addr(mac)) {
2429 /* pre-FIP */
2430 if (fcoe_ctlr_recv_flogi(fip, lport, fp)) {
2431 fc_frame_free(fp);
2432 return;
2433 }
2434 }
2435 fcoe_update_src_mac(lport, mac);
2436 done:
2437 fc_lport_flogi_resp(seq, fp, lport);
2438 }
2439
2440 /**
2441 * fcoe_logo_resp() - FCoE specific LOGO response handler
2442 * @seq: active sequence in the LOGO exchange
2443 * @fp: response frame, or error encoded in a pointer (timeout)
2444 * @arg: pointer the the fcoe_ctlr structure
2445 *
2446 * This handles MAC address managment for FCoE, then passes control on to
2447 * the libfc LOGO response handler.
2448 */
2449 static void fcoe_logo_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
2450 {
2451 struct fc_lport *lport = arg;
2452 static u8 zero_mac[ETH_ALEN] = { 0 };
2453
2454 if (!IS_ERR(fp))
2455 fcoe_update_src_mac(lport, zero_mac);
2456 fc_lport_logo_resp(seq, fp, lport);
2457 }
2458
2459 /**
2460 * fcoe_elsct_send - FCoE specific ELS handler
2461 *
2462 * This does special case handling of FIP encapsualted ELS exchanges for FCoE,
2463 * using FCoE specific response handlers and passing the FIP controller as
2464 * the argument (the lport is still available from the exchange).
2465 *
2466 * Most of the work here is just handed off to the libfc routine.
2467 */
2468 static struct fc_seq *fcoe_elsct_send(struct fc_lport *lport, u32 did,
2469 struct fc_frame *fp, unsigned int op,
2470 void (*resp)(struct fc_seq *,
2471 struct fc_frame *,
2472 void *),
2473 void *arg, u32 timeout)
2474 {
2475 struct fcoe_port *port = lport_priv(lport);
2476 struct fcoe_interface *fcoe = port->fcoe;
2477 struct fcoe_ctlr *fip = &fcoe->ctlr;
2478 struct fc_frame_header *fh = fc_frame_header_get(fp);
2479
2480 switch (op) {
2481 case ELS_FLOGI:
2482 case ELS_FDISC:
2483 return fc_elsct_send(lport, did, fp, op, fcoe_flogi_resp,
2484 fip, timeout);
2485 case ELS_LOGO:
2486 /* only hook onto fabric logouts, not port logouts */
2487 if (ntoh24(fh->fh_d_id) != FC_FID_FLOGI)
2488 break;
2489 return fc_elsct_send(lport, did, fp, op, fcoe_logo_resp,
2490 lport, timeout);
2491 }
2492 return fc_elsct_send(lport, did, fp, op, resp, arg, timeout);
2493 }
2494
2495 /**
2496 * fcoe_vport_create() - create an fc_host/scsi_host for a vport
2497 * @vport: fc_vport object to create a new fc_host for
2498 * @disabled: start the new fc_host in a disabled state by default?
2499 *
2500 * Returns: 0 for success
2501 */
2502 static int fcoe_vport_create(struct fc_vport *vport, bool disabled)
2503 {
2504 struct Scsi_Host *shost = vport_to_shost(vport);
2505 struct fc_lport *n_port = shost_priv(shost);
2506 struct fcoe_port *port = lport_priv(n_port);
2507 struct fcoe_interface *fcoe = port->fcoe;
2508 struct net_device *netdev = fcoe->netdev;
2509 struct fc_lport *vn_port;
2510
2511 mutex_lock(&fcoe_config_mutex);
2512 vn_port = fcoe_if_create(fcoe, &vport->dev, 1);
2513 mutex_unlock(&fcoe_config_mutex);
2514
2515 if (IS_ERR(vn_port)) {
2516 printk(KERN_ERR "fcoe: fcoe_vport_create(%s) failed\n",
2517 netdev->name);
2518 return -EIO;
2519 }
2520
2521 if (disabled) {
2522 fc_vport_set_state(vport, FC_VPORT_DISABLED);
2523 } else {
2524 vn_port->boot_time = jiffies;
2525 fc_fabric_login(vn_port);
2526 fc_vport_setlink(vn_port);
2527 }
2528 return 0;
2529 }
2530
2531 /**
2532 * fcoe_vport_destroy() - destroy the fc_host/scsi_host for a vport
2533 * @vport: fc_vport object that is being destroyed
2534 *
2535 * Returns: 0 for success
2536 */
2537 static int fcoe_vport_destroy(struct fc_vport *vport)
2538 {
2539 struct Scsi_Host *shost = vport_to_shost(vport);
2540 struct fc_lport *n_port = shost_priv(shost);
2541 struct fc_lport *vn_port = vport->dd_data;
2542 struct fcoe_port *port = lport_priv(vn_port);
2543
2544 mutex_lock(&n_port->lp_mutex);
2545 list_del(&vn_port->list);
2546 mutex_unlock(&n_port->lp_mutex);
2547 schedule_work(&port->destroy_work);
2548 return 0;
2549 }
2550
2551 /**
2552 * fcoe_vport_disable() - change vport state
2553 * @vport: vport to bring online/offline
2554 * @disable: should the vport be disabled?
2555 */
2556 static int fcoe_vport_disable(struct fc_vport *vport, bool disable)
2557 {
2558 struct fc_lport *lport = vport->dd_data;
2559
2560 if (disable) {
2561 fc_vport_set_state(vport, FC_VPORT_DISABLED);
2562 fc_fabric_logoff(lport);
2563 } else {
2564 lport->boot_time = jiffies;
2565 fc_fabric_login(lport);
2566 fc_vport_setlink(lport);
2567 }
2568
2569 return 0;
2570 }
2571
2572 /**
2573 * fcoe_vport_set_symbolic_name() - append vport string to symbolic name
2574 * @vport: fc_vport with a new symbolic name string
2575 *
2576 * After generating a new symbolic name string, a new RSPN_ID request is
2577 * sent to the name server. There is no response handler, so if it fails
2578 * for some reason it will not be retried.
2579 */
2580 static void fcoe_set_vport_symbolic_name(struct fc_vport *vport)
2581 {
2582 struct fc_lport *lport = vport->dd_data;
2583 struct fc_frame *fp;
2584 size_t len;
2585
2586 snprintf(fc_host_symbolic_name(lport->host), FC_SYMBOLIC_NAME_SIZE,
2587 "%s v%s over %s : %s", FCOE_NAME, FCOE_VERSION,
2588 fcoe_netdev(lport)->name, vport->symbolic_name);
2589
2590 if (lport->state != LPORT_ST_READY)
2591 return;
2592
2593 len = strnlen(fc_host_symbolic_name(lport->host), 255);
2594 fp = fc_frame_alloc(lport,
2595 sizeof(struct fc_ct_hdr) +
2596 sizeof(struct fc_ns_rspn) + len);
2597 if (!fp)
2598 return;
2599 lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, FC_NS_RSPN_ID,
2600 NULL, NULL, 3 * lport->r_a_tov);
2601 }
2602
2603 /**
2604 * fcoe_get_lesb() - Fill the FCoE Link Error Status Block
2605 * @lport: the local port
2606 * @fc_lesb: the link error status block
2607 */
2608 static void fcoe_get_lesb(struct fc_lport *lport,
2609 struct fc_els_lesb *fc_lesb)
2610 {
2611 unsigned int cpu;
2612 u32 lfc, vlfc, mdac;
2613 struct fcoe_dev_stats *devst;
2614 struct fcoe_fc_els_lesb *lesb;
2615 struct net_device *netdev = fcoe_netdev(lport);
2616
2617 lfc = 0;
2618 vlfc = 0;
2619 mdac = 0;
2620 lesb = (struct fcoe_fc_els_lesb *)fc_lesb;
2621 memset(lesb, 0, sizeof(*lesb));
2622 for_each_possible_cpu(cpu) {
2623 devst = per_cpu_ptr(lport->dev_stats, cpu);
2624 lfc += devst->LinkFailureCount;
2625 vlfc += devst->VLinkFailureCount;
2626 mdac += devst->MissDiscAdvCount;
2627 }
2628 lesb->lesb_link_fail = htonl(lfc);
2629 lesb->lesb_vlink_fail = htonl(vlfc);
2630 lesb->lesb_miss_fka = htonl(mdac);
2631 lesb->lesb_fcs_error = htonl(dev_get_stats(netdev)->rx_crc_errors);
2632 }
This page took 0.106828 seconds and 5 git commands to generate.