[SCSI] fcoe: create/destroy fcoe Rx threads on CPU hotplug events
[deliverable/linux.git] / drivers / scsi / fcoe / libfcoe.c
1 /*
2 * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Maintained at www.Open-FCoE.org
18 */
19
20 #include <linux/module.h>
21 #include <linux/version.h>
22 #include <linux/kernel.h>
23 #include <linux/spinlock.h>
24 #include <linux/skbuff.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/ethtool.h>
28 #include <linux/if_ether.h>
29 #include <linux/if_vlan.h>
30 #include <linux/kthread.h>
31 #include <linux/crc32.h>
32 #include <linux/cpu.h>
33 #include <linux/fs.h>
34 #include <linux/sysfs.h>
35 #include <linux/ctype.h>
36 #include <scsi/scsi_tcq.h>
37 #include <scsi/scsicam.h>
38 #include <scsi/scsi_transport.h>
39 #include <scsi/scsi_transport_fc.h>
40 #include <net/rtnetlink.h>
41
42 #include <scsi/fc/fc_encaps.h>
43
44 #include <scsi/libfc.h>
45 #include <scsi/fc_frame.h>
46 #include <scsi/libfcoe.h>
47 #include <scsi/fc_transport_fcoe.h>
48
49 static int debug_fcoe;
50
51 #define FCOE_MAX_QUEUE_DEPTH 256
52 #define FCOE_LOW_QUEUE_DEPTH 32
53
54 /* destination address mode */
55 #define FCOE_GW_ADDR_MODE 0x00
56 #define FCOE_FCOUI_ADDR_MODE 0x01
57
58 #define FCOE_WORD_TO_BYTE 4
59
60 MODULE_AUTHOR("Open-FCoE.org");
61 MODULE_DESCRIPTION("FCoE");
62 MODULE_LICENSE("GPL");
63
64 /* fcoe host list */
65 LIST_HEAD(fcoe_hostlist);
66 DEFINE_RWLOCK(fcoe_hostlist_lock);
67 DEFINE_TIMER(fcoe_timer, NULL, 0, 0);
68 DEFINE_PER_CPU(struct fcoe_percpu_s, fcoe_percpu);
69
70
71 /* Function Prototyes */
72 static int fcoe_check_wait_queue(struct fc_lport *);
73 static void fcoe_recv_flogi(struct fcoe_softc *, struct fc_frame *, u8 *);
74 static int fcoe_device_notification(struct notifier_block *, ulong, void *);
75 static void fcoe_dev_setup(void);
76 static void fcoe_dev_cleanup(void);
77
78 /* notification function from net device */
79 static struct notifier_block fcoe_notifier = {
80 .notifier_call = fcoe_device_notification,
81 };
82
83 /**
84 * fcoe_percpu_thread_create() - Create a receive thread for an online cpu
85 * @cpu: cpu index for the online cpu
86 */
87 static void fcoe_percpu_thread_create(unsigned int cpu)
88 {
89 struct fcoe_percpu_s *p;
90 struct task_struct *thread;
91
92 p = &per_cpu(fcoe_percpu, cpu);
93
94 thread = kthread_create(fcoe_percpu_receive_thread,
95 (void *)p, "fcoethread/%d", cpu);
96
97 if (likely(!IS_ERR(p->thread))) {
98 kthread_bind(thread, cpu);
99 wake_up_process(thread);
100
101 spin_lock_bh(&p->fcoe_rx_list.lock);
102 p->thread = thread;
103 spin_unlock_bh(&p->fcoe_rx_list.lock);
104 }
105 }
106
107 /**
108 * fcoe_percpu_thread_destroy() - removes the rx thread for the given cpu
109 * @cpu: cpu index the rx thread is to be removed
110 *
111 * Destroys a per-CPU Rx thread. Any pending skbs are moved to the
112 * current CPU's Rx thread. If the thread being destroyed is bound to
113 * the CPU processing this context the skbs will be freed.
114 */
115 static void fcoe_percpu_thread_destroy(unsigned int cpu)
116 {
117 struct fcoe_percpu_s *p;
118 struct task_struct *thread;
119 struct page *crc_eof;
120 struct sk_buff *skb;
121 #ifdef CONFIG_SMP
122 struct fcoe_percpu_s *p0;
123 unsigned targ_cpu = smp_processor_id();
124 #endif /* CONFIG_SMP */
125
126 printk(KERN_DEBUG "fcoe: Destroying receive thread for CPU %d\n", cpu);
127
128 /* Prevent any new skbs from being queued for this CPU. */
129 p = &per_cpu(fcoe_percpu, cpu);
130 spin_lock_bh(&p->fcoe_rx_list.lock);
131 thread = p->thread;
132 p->thread = NULL;
133 crc_eof = p->crc_eof_page;
134 p->crc_eof_page = NULL;
135 p->crc_eof_offset = 0;
136 spin_unlock_bh(&p->fcoe_rx_list.lock);
137
138 #ifdef CONFIG_SMP
139 /*
140 * Don't bother moving the skb's if this context is running
141 * on the same CPU that is having its thread destroyed. This
142 * can easily happen when the module is removed.
143 */
144 if (cpu != targ_cpu) {
145 p0 = &per_cpu(fcoe_percpu, targ_cpu);
146 spin_lock_bh(&p0->fcoe_rx_list.lock);
147 if (p0->thread) {
148 FC_DBG("Moving frames from CPU %d to CPU %d\n",
149 cpu, targ_cpu);
150
151 while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
152 __skb_queue_tail(&p0->fcoe_rx_list, skb);
153 spin_unlock_bh(&p0->fcoe_rx_list.lock);
154 } else {
155 /*
156 * The targeted CPU is not initialized and cannot accept
157 * new skbs. Unlock the targeted CPU and drop the skbs
158 * on the CPU that is going offline.
159 */
160 while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
161 kfree_skb(skb);
162 spin_unlock_bh(&p0->fcoe_rx_list.lock);
163 }
164 } else {
165 /*
166 * This scenario occurs when the module is being removed
167 * and all threads are being destroyed. skbs will continue
168 * to be shifted from the CPU thread that is being removed
169 * to the CPU thread associated with the CPU that is processing
170 * the module removal. Once there is only one CPU Rx thread it
171 * will reach this case and we will drop all skbs and later
172 * stop the thread.
173 */
174 spin_lock_bh(&p->fcoe_rx_list.lock);
175 while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
176 kfree_skb(skb);
177 spin_unlock_bh(&p->fcoe_rx_list.lock);
178 }
179 #else
180 /*
181 * This a non-SMP scenario where the singluar Rx thread is
182 * being removed. Free all skbs and stop the thread.
183 */
184 spin_lock_bh(&p->fcoe_rx_list.lock);
185 while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
186 kfree_skb(skb);
187 spin_unlock_bh(&p->fcoe_rx_list.lock);
188 #endif
189
190 if (thread)
191 kthread_stop(thread);
192
193 if (crc_eof)
194 put_page(crc_eof);
195 }
196
197 /**
198 * fcoe_cpu_callback() - fcoe cpu hotplug event callback
199 * @nfb: callback data block
200 * @action: event triggering the callback
201 * @hcpu: index for the cpu of this event
202 *
203 * This creates or destroys per cpu data for fcoe
204 *
205 * Returns NOTIFY_OK always.
206 */
207 static int fcoe_cpu_callback(struct notifier_block *nfb,
208 unsigned long action, void *hcpu)
209 {
210 unsigned cpu = (unsigned long)hcpu;
211
212 switch (action) {
213 case CPU_ONLINE:
214 case CPU_ONLINE_FROZEN:
215 FC_DBG("CPU %x online: Create Rx thread\n", cpu);
216 fcoe_percpu_thread_create(cpu);
217 break;
218 case CPU_DEAD:
219 case CPU_DEAD_FROZEN:
220 FC_DBG("CPU %x offline: Remove Rx thread\n", cpu);
221 fcoe_percpu_thread_destroy(cpu);
222 break;
223 default:
224 break;
225 }
226 return NOTIFY_OK;
227 }
228
229 static struct notifier_block fcoe_cpu_notifier = {
230 .notifier_call = fcoe_cpu_callback,
231 };
232
233 /**
234 * fcoe_rcv() - this is the fcoe receive function called by NET_RX_SOFTIRQ
235 * @skb: the receive skb
236 * @dev: associated net device
237 * @ptype: context
238 * @odldev: last device
239 *
240 * this function will receive the packet and build fc frame and pass it up
241 *
242 * Returns: 0 for success
243 */
244 int fcoe_rcv(struct sk_buff *skb, struct net_device *dev,
245 struct packet_type *ptype, struct net_device *olddev)
246 {
247 struct fc_lport *lp;
248 struct fcoe_rcv_info *fr;
249 struct fcoe_softc *fc;
250 struct fc_frame_header *fh;
251 struct fcoe_percpu_s *fps;
252 unsigned short oxid;
253 unsigned int cpu = 0;
254
255 fc = container_of(ptype, struct fcoe_softc, fcoe_packet_type);
256 lp = fc->lp;
257 if (unlikely(lp == NULL)) {
258 FC_DBG("cannot find hba structure");
259 goto err2;
260 }
261
262 if (unlikely(debug_fcoe)) {
263 FC_DBG("skb_info: len:%d data_len:%d head:%p data:%p tail:%p "
264 "end:%p sum:%d dev:%s", skb->len, skb->data_len,
265 skb->head, skb->data, skb_tail_pointer(skb),
266 skb_end_pointer(skb), skb->csum,
267 skb->dev ? skb->dev->name : "<NULL>");
268
269 }
270
271 /* check for FCOE packet type */
272 if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) {
273 FC_DBG("wrong FC type frame");
274 goto err;
275 }
276
277 /*
278 * Check for minimum frame length, and make sure required FCoE
279 * and FC headers are pulled into the linear data area.
280 */
281 if (unlikely((skb->len < FCOE_MIN_FRAME) ||
282 !pskb_may_pull(skb, FCOE_HEADER_LEN)))
283 goto err;
284
285 skb_set_transport_header(skb, sizeof(struct fcoe_hdr));
286 fh = (struct fc_frame_header *) skb_transport_header(skb);
287
288 oxid = ntohs(fh->fh_ox_id);
289
290 fr = fcoe_dev_from_skb(skb);
291 fr->fr_dev = lp;
292 fr->ptype = ptype;
293
294 #ifdef CONFIG_SMP
295 /*
296 * The incoming frame exchange id(oxid) is ANDed with num of online
297 * cpu bits to get cpu and then this cpu is used for selecting
298 * a per cpu kernel thread from fcoe_percpu.
299 */
300 cpu = oxid & (num_online_cpus() - 1);
301 #endif
302
303 fps = &per_cpu(fcoe_percpu, cpu);
304 spin_lock_bh(&fps->fcoe_rx_list.lock);
305 if (unlikely(!fps->thread)) {
306 /*
307 * The targeted CPU is not ready, let's target
308 * the first CPU now. For non-SMP systems this
309 * will check the same CPU twice.
310 */
311 FC_DBG("CPU is online, but no receive thread ready "
312 "for incoming skb- using first online CPU.\n");
313
314 spin_unlock_bh(&fps->fcoe_rx_list.lock);
315 cpu = first_cpu(cpu_online_map);
316 fps = &per_cpu(fcoe_percpu, cpu);
317 spin_lock_bh(&fps->fcoe_rx_list.lock);
318 if (!fps->thread) {
319 spin_unlock_bh(&fps->fcoe_rx_list.lock);
320 goto err;
321 }
322 }
323
324 /*
325 * We now have a valid CPU that we're targeting for
326 * this skb. We also have this receive thread locked,
327 * so we're free to queue skbs into it's queue.
328 */
329 __skb_queue_tail(&fps->fcoe_rx_list, skb);
330 if (fps->fcoe_rx_list.qlen == 1)
331 wake_up_process(fps->thread);
332
333 spin_unlock_bh(&fps->fcoe_rx_list.lock);
334
335 return 0;
336 err:
337 fc_lport_get_stats(lp)->ErrorFrames++;
338
339 err2:
340 kfree_skb(skb);
341 return -1;
342 }
343 EXPORT_SYMBOL_GPL(fcoe_rcv);
344
345 /**
346 * fcoe_start_io() - pass to netdev to start xmit for fcoe
347 * @skb: the skb to be xmitted
348 *
349 * Returns: 0 for success
350 */
351 static inline int fcoe_start_io(struct sk_buff *skb)
352 {
353 int rc;
354
355 skb_get(skb);
356 rc = dev_queue_xmit(skb);
357 if (rc != 0)
358 return rc;
359 kfree_skb(skb);
360 return 0;
361 }
362
363 /**
364 * fcoe_get_paged_crc_eof() - in case we need alloc a page for crc_eof
365 * @skb: the skb to be xmitted
366 * @tlen: total len
367 *
368 * Returns: 0 for success
369 */
370 static int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen)
371 {
372 struct fcoe_percpu_s *fps;
373 struct page *page;
374
375 fps = &get_cpu_var(fcoe_percpu);
376 page = fps->crc_eof_page;
377 if (!page) {
378 page = alloc_page(GFP_ATOMIC);
379 if (!page) {
380 put_cpu_var(fcoe_percpu);
381 return -ENOMEM;
382 }
383 fps->crc_eof_page = page;
384 fps->crc_eof_offset = 0;
385 }
386
387 get_page(page);
388 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page,
389 fps->crc_eof_offset, tlen);
390 skb->len += tlen;
391 skb->data_len += tlen;
392 skb->truesize += tlen;
393 fps->crc_eof_offset += sizeof(struct fcoe_crc_eof);
394
395 if (fps->crc_eof_offset >= PAGE_SIZE) {
396 fps->crc_eof_page = NULL;
397 fps->crc_eof_offset = 0;
398 put_page(page);
399 }
400 put_cpu_var(fcoe_percpu);
401 return 0;
402 }
403
404 /**
405 * fcoe_fc_crc() - calculates FC CRC in this fcoe skb
406 * @fp: the fc_frame containg data to be checksummed
407 *
408 * This uses crc32() to calculate the crc for fc frame
409 * Return : 32 bit crc
410 */
411 u32 fcoe_fc_crc(struct fc_frame *fp)
412 {
413 struct sk_buff *skb = fp_skb(fp);
414 struct skb_frag_struct *frag;
415 unsigned char *data;
416 unsigned long off, len, clen;
417 u32 crc;
418 unsigned i;
419
420 crc = crc32(~0, skb->data, skb_headlen(skb));
421
422 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
423 frag = &skb_shinfo(skb)->frags[i];
424 off = frag->page_offset;
425 len = frag->size;
426 while (len > 0) {
427 clen = min(len, PAGE_SIZE - (off & ~PAGE_MASK));
428 data = kmap_atomic(frag->page + (off >> PAGE_SHIFT),
429 KM_SKB_DATA_SOFTIRQ);
430 crc = crc32(crc, data + (off & ~PAGE_MASK), clen);
431 kunmap_atomic(data, KM_SKB_DATA_SOFTIRQ);
432 off += clen;
433 len -= clen;
434 }
435 }
436 return crc;
437 }
438 EXPORT_SYMBOL_GPL(fcoe_fc_crc);
439
440 /**
441 * fcoe_xmit() - FCoE frame transmit function
442 * @lp: the associated local port
443 * @fp: the fc_frame to be transmitted
444 *
445 * Return : 0 for success
446 */
447 int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
448 {
449 int wlen, rc = 0;
450 u32 crc;
451 struct ethhdr *eh;
452 struct fcoe_crc_eof *cp;
453 struct sk_buff *skb;
454 struct fcoe_dev_stats *stats;
455 struct fc_frame_header *fh;
456 unsigned int hlen; /* header length implies the version */
457 unsigned int tlen; /* trailer length */
458 unsigned int elen; /* eth header, may include vlan */
459 int flogi_in_progress = 0;
460 struct fcoe_softc *fc;
461 u8 sof, eof;
462 struct fcoe_hdr *hp;
463
464 WARN_ON((fr_len(fp) % sizeof(u32)) != 0);
465
466 fc = lport_priv(lp);
467 /*
468 * if it is a flogi then we need to learn gw-addr
469 * and my own fcid
470 */
471 fh = fc_frame_header_get(fp);
472 if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ)) {
473 if (fc_frame_payload_op(fp) == ELS_FLOGI) {
474 fc->flogi_oxid = ntohs(fh->fh_ox_id);
475 fc->address_mode = FCOE_FCOUI_ADDR_MODE;
476 fc->flogi_progress = 1;
477 flogi_in_progress = 1;
478 } else if (fc->flogi_progress && ntoh24(fh->fh_s_id) != 0) {
479 /*
480 * Here we must've gotten an SID by accepting an FLOGI
481 * from a point-to-point connection. Switch to using
482 * the source mac based on the SID. The destination
483 * MAC in this case would have been set by receving the
484 * FLOGI.
485 */
486 fc_fcoe_set_mac(fc->data_src_addr, fh->fh_s_id);
487 fc->flogi_progress = 0;
488 }
489 }
490
491 skb = fp_skb(fp);
492 sof = fr_sof(fp);
493 eof = fr_eof(fp);
494
495 elen = (fc->real_dev->priv_flags & IFF_802_1Q_VLAN) ?
496 sizeof(struct vlan_ethhdr) : sizeof(struct ethhdr);
497 hlen = sizeof(struct fcoe_hdr);
498 tlen = sizeof(struct fcoe_crc_eof);
499 wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE;
500
501 /* crc offload */
502 if (likely(lp->crc_offload)) {
503 skb->ip_summed = CHECKSUM_PARTIAL;
504 skb->csum_start = skb_headroom(skb);
505 skb->csum_offset = skb->len;
506 crc = 0;
507 } else {
508 skb->ip_summed = CHECKSUM_NONE;
509 crc = fcoe_fc_crc(fp);
510 }
511
512 /* copy fc crc and eof to the skb buff */
513 if (skb_is_nonlinear(skb)) {
514 skb_frag_t *frag;
515 if (fcoe_get_paged_crc_eof(skb, tlen)) {
516 kfree_skb(skb);
517 return -ENOMEM;
518 }
519 frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
520 cp = kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ)
521 + frag->page_offset;
522 } else {
523 cp = (struct fcoe_crc_eof *)skb_put(skb, tlen);
524 }
525
526 memset(cp, 0, sizeof(*cp));
527 cp->fcoe_eof = eof;
528 cp->fcoe_crc32 = cpu_to_le32(~crc);
529
530 if (skb_is_nonlinear(skb)) {
531 kunmap_atomic(cp, KM_SKB_DATA_SOFTIRQ);
532 cp = NULL;
533 }
534
535 /* adjust skb netowrk/transport offsets to match mac/fcoe/fc */
536 skb_push(skb, elen + hlen);
537 skb_reset_mac_header(skb);
538 skb_reset_network_header(skb);
539 skb->mac_len = elen;
540 skb->protocol = htons(ETH_P_FCOE);
541 skb->dev = fc->real_dev;
542
543 /* fill up mac and fcoe headers */
544 eh = eth_hdr(skb);
545 eh->h_proto = htons(ETH_P_FCOE);
546 if (fc->address_mode == FCOE_FCOUI_ADDR_MODE)
547 fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id);
548 else
549 /* insert GW address */
550 memcpy(eh->h_dest, fc->dest_addr, ETH_ALEN);
551
552 if (unlikely(flogi_in_progress))
553 memcpy(eh->h_source, fc->ctl_src_addr, ETH_ALEN);
554 else
555 memcpy(eh->h_source, fc->data_src_addr, ETH_ALEN);
556
557 hp = (struct fcoe_hdr *)(eh + 1);
558 memset(hp, 0, sizeof(*hp));
559 if (FC_FCOE_VER)
560 FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER);
561 hp->fcoe_sof = sof;
562
563 #ifdef NETIF_F_FSO
564 /* fcoe lso, mss is in max_payload which is non-zero for FCP data */
565 if (lp->seq_offload && fr_max_payload(fp)) {
566 skb_shinfo(skb)->gso_type = SKB_GSO_FCOE;
567 skb_shinfo(skb)->gso_size = fr_max_payload(fp);
568 } else {
569 skb_shinfo(skb)->gso_type = 0;
570 skb_shinfo(skb)->gso_size = 0;
571 }
572 #endif
573 /* update tx stats: regardless if LLD fails */
574 stats = fc_lport_get_stats(lp);
575 stats->TxFrames++;
576 stats->TxWords += wlen;
577
578 /* send down to lld */
579 fr_dev(fp) = lp;
580 if (fc->fcoe_pending_queue.qlen)
581 rc = fcoe_check_wait_queue(lp);
582
583 if (rc == 0)
584 rc = fcoe_start_io(skb);
585
586 if (rc) {
587 spin_lock_bh(&fc->fcoe_pending_queue.lock);
588 __skb_queue_tail(&fc->fcoe_pending_queue, skb);
589 spin_unlock_bh(&fc->fcoe_pending_queue.lock);
590 if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
591 lp->qfull = 1;
592 }
593
594 return 0;
595 }
596 EXPORT_SYMBOL_GPL(fcoe_xmit);
597
598 /**
599 * fcoe_percpu_receive_thread() - recv thread per cpu
600 * @arg: ptr to the fcoe per cpu struct
601 *
602 * Return: 0 for success
603 */
604 int fcoe_percpu_receive_thread(void *arg)
605 {
606 struct fcoe_percpu_s *p = arg;
607 u32 fr_len;
608 struct fc_lport *lp;
609 struct fcoe_rcv_info *fr;
610 struct fcoe_dev_stats *stats;
611 struct fc_frame_header *fh;
612 struct sk_buff *skb;
613 struct fcoe_crc_eof crc_eof;
614 struct fc_frame *fp;
615 u8 *mac = NULL;
616 struct fcoe_softc *fc;
617 struct fcoe_hdr *hp;
618
619 set_user_nice(current, -20);
620
621 while (!kthread_should_stop()) {
622
623 spin_lock_bh(&p->fcoe_rx_list.lock);
624 while ((skb = __skb_dequeue(&p->fcoe_rx_list)) == NULL) {
625 set_current_state(TASK_INTERRUPTIBLE);
626 spin_unlock_bh(&p->fcoe_rx_list.lock);
627 schedule();
628 set_current_state(TASK_RUNNING);
629 if (kthread_should_stop())
630 return 0;
631 spin_lock_bh(&p->fcoe_rx_list.lock);
632 }
633 spin_unlock_bh(&p->fcoe_rx_list.lock);
634 fr = fcoe_dev_from_skb(skb);
635 lp = fr->fr_dev;
636 if (unlikely(lp == NULL)) {
637 FC_DBG("invalid HBA Structure");
638 kfree_skb(skb);
639 continue;
640 }
641
642 if (unlikely(debug_fcoe)) {
643 FC_DBG("skb_info: len:%d data_len:%d head:%p data:%p "
644 "tail:%p end:%p sum:%d dev:%s",
645 skb->len, skb->data_len,
646 skb->head, skb->data, skb_tail_pointer(skb),
647 skb_end_pointer(skb), skb->csum,
648 skb->dev ? skb->dev->name : "<NULL>");
649 }
650
651 /*
652 * Save source MAC address before discarding header.
653 */
654 fc = lport_priv(lp);
655 if (unlikely(fc->flogi_progress))
656 mac = eth_hdr(skb)->h_source;
657
658 if (skb_is_nonlinear(skb))
659 skb_linearize(skb); /* not ideal */
660
661 /*
662 * Frame length checks and setting up the header pointers
663 * was done in fcoe_rcv already.
664 */
665 hp = (struct fcoe_hdr *) skb_network_header(skb);
666 fh = (struct fc_frame_header *) skb_transport_header(skb);
667
668 stats = fc_lport_get_stats(lp);
669 if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) {
670 if (stats->ErrorFrames < 5)
671 printk(KERN_WARNING "FCoE version "
672 "mismatch: The frame has "
673 "version %x, but the "
674 "initiator supports version "
675 "%x\n", FC_FCOE_DECAPS_VER(hp),
676 FC_FCOE_VER);
677 stats->ErrorFrames++;
678 kfree_skb(skb);
679 continue;
680 }
681
682 skb_pull(skb, sizeof(struct fcoe_hdr));
683 fr_len = skb->len - sizeof(struct fcoe_crc_eof);
684
685 stats->RxFrames++;
686 stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
687
688 fp = (struct fc_frame *)skb;
689 fc_frame_init(fp);
690 fr_dev(fp) = lp;
691 fr_sof(fp) = hp->fcoe_sof;
692
693 /* Copy out the CRC and EOF trailer for access */
694 if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) {
695 kfree_skb(skb);
696 continue;
697 }
698 fr_eof(fp) = crc_eof.fcoe_eof;
699 fr_crc(fp) = crc_eof.fcoe_crc32;
700 if (pskb_trim(skb, fr_len)) {
701 kfree_skb(skb);
702 continue;
703 }
704
705 /*
706 * We only check CRC if no offload is available and if it is
707 * it's solicited data, in which case, the FCP layer would
708 * check it during the copy.
709 */
710 if (lp->crc_offload && skb->ip_summed == CHECKSUM_UNNECESSARY)
711 fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
712 else
713 fr_flags(fp) |= FCPHF_CRC_UNCHECKED;
714
715 fh = fc_frame_header_get(fp);
716 if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA &&
717 fh->fh_type == FC_TYPE_FCP) {
718 fc_exch_recv(lp, lp->emp, fp);
719 continue;
720 }
721 if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) {
722 if (le32_to_cpu(fr_crc(fp)) !=
723 ~crc32(~0, skb->data, fr_len)) {
724 if (debug_fcoe || stats->InvalidCRCCount < 5)
725 printk(KERN_WARNING "fcoe: dropping "
726 "frame with CRC error\n");
727 stats->InvalidCRCCount++;
728 stats->ErrorFrames++;
729 fc_frame_free(fp);
730 continue;
731 }
732 fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
733 }
734 /* non flogi and non data exchanges are handled here */
735 if (unlikely(fc->flogi_progress))
736 fcoe_recv_flogi(fc, fp, mac);
737 fc_exch_recv(lp, lp->emp, fp);
738 }
739 return 0;
740 }
741
742 /**
743 * fcoe_recv_flogi() - flogi receive function
744 * @fc: associated fcoe_softc
745 * @fp: the recieved frame
746 * @sa: the source address of this flogi
747 *
748 * This is responsible to parse the flogi response and sets the corresponding
749 * mac address for the initiator, eitehr OUI based or GW based.
750 *
751 * Returns: none
752 */
753 static void fcoe_recv_flogi(struct fcoe_softc *fc, struct fc_frame *fp, u8 *sa)
754 {
755 struct fc_frame_header *fh;
756 u8 op;
757
758 fh = fc_frame_header_get(fp);
759 if (fh->fh_type != FC_TYPE_ELS)
760 return;
761 op = fc_frame_payload_op(fp);
762 if (op == ELS_LS_ACC && fh->fh_r_ctl == FC_RCTL_ELS_REP &&
763 fc->flogi_oxid == ntohs(fh->fh_ox_id)) {
764 /*
765 * FLOGI accepted.
766 * If the src mac addr is FC_OUI-based, then we mark the
767 * address_mode flag to use FC_OUI-based Ethernet DA.
768 * Otherwise we use the FCoE gateway addr
769 */
770 if (!compare_ether_addr(sa, (u8[6]) FC_FCOE_FLOGI_MAC)) {
771 fc->address_mode = FCOE_FCOUI_ADDR_MODE;
772 } else {
773 memcpy(fc->dest_addr, sa, ETH_ALEN);
774 fc->address_mode = FCOE_GW_ADDR_MODE;
775 }
776
777 /*
778 * Remove any previously-set unicast MAC filter.
779 * Add secondary FCoE MAC address filter for our OUI.
780 */
781 rtnl_lock();
782 if (compare_ether_addr(fc->data_src_addr, (u8[6]) { 0 }))
783 dev_unicast_delete(fc->real_dev, fc->data_src_addr,
784 ETH_ALEN);
785 fc_fcoe_set_mac(fc->data_src_addr, fh->fh_d_id);
786 dev_unicast_add(fc->real_dev, fc->data_src_addr, ETH_ALEN);
787 rtnl_unlock();
788
789 fc->flogi_progress = 0;
790 } else if (op == ELS_FLOGI && fh->fh_r_ctl == FC_RCTL_ELS_REQ && sa) {
791 /*
792 * Save source MAC for point-to-point responses.
793 */
794 memcpy(fc->dest_addr, sa, ETH_ALEN);
795 fc->address_mode = FCOE_GW_ADDR_MODE;
796 }
797 }
798
799 /**
800 * fcoe_watchdog() - fcoe timer callback
801 * @vp:
802 *
803 * This checks the pending queue length for fcoe and set lport qfull
804 * if the FCOE_MAX_QUEUE_DEPTH is reached. This is done for all fc_lport on the
805 * fcoe_hostlist.
806 *
807 * Returns: 0 for success
808 */
809 void fcoe_watchdog(ulong vp)
810 {
811 struct fcoe_softc *fc;
812
813 read_lock(&fcoe_hostlist_lock);
814 list_for_each_entry(fc, &fcoe_hostlist, list) {
815 if (fc->lp)
816 fcoe_check_wait_queue(fc->lp);
817 }
818 read_unlock(&fcoe_hostlist_lock);
819
820 fcoe_timer.expires = jiffies + (1 * HZ);
821 add_timer(&fcoe_timer);
822 }
823
824
825 /**
826 * fcoe_check_wait_queue() - put the skb into fcoe pending xmit queue
827 * @lp: the fc_port for this skb
828 * @skb: the associated skb to be xmitted
829 *
830 * This empties the wait_queue, dequeue the head of the wait_queue queue
831 * and calls fcoe_start_io() for each packet, if all skb have been
832 * transmitted, return qlen or -1 if a error occurs, then restore
833 * wait_queue and try again later.
834 *
835 * The wait_queue is used when the skb transmit fails. skb will go
836 * in the wait_queue which will be emptied by the time function OR
837 * by the next skb transmit.
838 *
839 * Returns: 0 for success
840 */
841 static int fcoe_check_wait_queue(struct fc_lport *lp)
842 {
843 struct fcoe_softc *fc = lport_priv(lp);
844 struct sk_buff *skb;
845 int rc = -1;
846
847 spin_lock_bh(&fc->fcoe_pending_queue.lock);
848 if (fc->fcoe_pending_queue_active)
849 goto out;
850 fc->fcoe_pending_queue_active = 1;
851
852 while (fc->fcoe_pending_queue.qlen) {
853 /* keep qlen > 0 until fcoe_start_io succeeds */
854 fc->fcoe_pending_queue.qlen++;
855 skb = __skb_dequeue(&fc->fcoe_pending_queue);
856
857 spin_unlock_bh(&fc->fcoe_pending_queue.lock);
858 rc = fcoe_start_io(skb);
859 spin_lock_bh(&fc->fcoe_pending_queue.lock);
860
861 if (rc) {
862 __skb_queue_head(&fc->fcoe_pending_queue, skb);
863 /* undo temporary increment above */
864 fc->fcoe_pending_queue.qlen--;
865 break;
866 }
867 /* undo temporary increment above */
868 fc->fcoe_pending_queue.qlen--;
869 }
870
871 if (fc->fcoe_pending_queue.qlen < FCOE_LOW_QUEUE_DEPTH)
872 lp->qfull = 0;
873 fc->fcoe_pending_queue_active = 0;
874 rc = fc->fcoe_pending_queue.qlen;
875 out:
876 spin_unlock_bh(&fc->fcoe_pending_queue.lock);
877 return rc;
878 }
879
880 /**
881 * fcoe_dev_setup() - setup link change notification interface
882 */
883 static void fcoe_dev_setup()
884 {
885 /*
886 * here setup a interface specific wd time to
887 * monitor the link state
888 */
889 register_netdevice_notifier(&fcoe_notifier);
890 }
891
892 /**
893 * fcoe_dev_setup() - cleanup link change notification interface
894 */
895 static void fcoe_dev_cleanup(void)
896 {
897 unregister_netdevice_notifier(&fcoe_notifier);
898 }
899
900 /**
901 * fcoe_device_notification() - netdev event notification callback
902 * @notifier: context of the notification
903 * @event: type of event
904 * @ptr: fixed array for output parsed ifname
905 *
906 * This function is called by the ethernet driver in case of link change event
907 *
908 * Returns: 0 for success
909 */
910 static int fcoe_device_notification(struct notifier_block *notifier,
911 ulong event, void *ptr)
912 {
913 struct fc_lport *lp = NULL;
914 struct net_device *real_dev = ptr;
915 struct fcoe_softc *fc;
916 struct fcoe_dev_stats *stats;
917 u32 new_link_up;
918 u32 mfs;
919 int rc = NOTIFY_OK;
920
921 read_lock(&fcoe_hostlist_lock);
922 list_for_each_entry(fc, &fcoe_hostlist, list) {
923 if (fc->real_dev == real_dev) {
924 lp = fc->lp;
925 break;
926 }
927 }
928 read_unlock(&fcoe_hostlist_lock);
929 if (lp == NULL) {
930 rc = NOTIFY_DONE;
931 goto out;
932 }
933
934 new_link_up = lp->link_up;
935 switch (event) {
936 case NETDEV_DOWN:
937 case NETDEV_GOING_DOWN:
938 new_link_up = 0;
939 break;
940 case NETDEV_UP:
941 case NETDEV_CHANGE:
942 new_link_up = !fcoe_link_ok(lp);
943 break;
944 case NETDEV_CHANGEMTU:
945 mfs = fc->real_dev->mtu -
946 (sizeof(struct fcoe_hdr) +
947 sizeof(struct fcoe_crc_eof));
948 if (mfs >= FC_MIN_MAX_FRAME)
949 fc_set_mfs(lp, mfs);
950 new_link_up = !fcoe_link_ok(lp);
951 break;
952 case NETDEV_REGISTER:
953 break;
954 default:
955 FC_DBG("unknown event %ld call", event);
956 }
957 if (lp->link_up != new_link_up) {
958 if (new_link_up)
959 fc_linkup(lp);
960 else {
961 stats = fc_lport_get_stats(lp);
962 stats->LinkFailureCount++;
963 fc_linkdown(lp);
964 fcoe_clean_pending_queue(lp);
965 }
966 }
967 out:
968 return rc;
969 }
970
971 /**
972 * fcoe_if_to_netdev() - parse a name buffer to get netdev
973 * @ifname: fixed array for output parsed ifname
974 * @buffer: incoming buffer to be copied
975 *
976 * Returns: NULL or ptr to netdeive
977 */
978 static struct net_device *fcoe_if_to_netdev(const char *buffer)
979 {
980 char *cp;
981 char ifname[IFNAMSIZ + 2];
982
983 if (buffer) {
984 strlcpy(ifname, buffer, IFNAMSIZ);
985 cp = ifname + strlen(ifname);
986 while (--cp >= ifname && *cp == '\n')
987 *cp = '\0';
988 return dev_get_by_name(&init_net, ifname);
989 }
990 return NULL;
991 }
992
993 /**
994 * fcoe_netdev_to_module_owner() - finds out the nic drive moddule of the netdev
995 * @netdev: the target netdev
996 *
997 * Returns: ptr to the struct module, NULL for failure
998 */
999 static struct module *
1000 fcoe_netdev_to_module_owner(const struct net_device *netdev)
1001 {
1002 struct device *dev;
1003
1004 if (!netdev)
1005 return NULL;
1006
1007 dev = netdev->dev.parent;
1008 if (!dev)
1009 return NULL;
1010
1011 if (!dev->driver)
1012 return NULL;
1013
1014 return dev->driver->owner;
1015 }
1016
1017 /**
1018 * fcoe_ethdrv_get() - Hold the Ethernet driver
1019 * @netdev: the target netdev
1020 *
1021 * Holds the Ethernet driver module by try_module_get() for
1022 * the corresponding netdev.
1023 *
1024 * Returns: 0 for succsss
1025 */
1026 static int fcoe_ethdrv_get(const struct net_device *netdev)
1027 {
1028 struct module *owner;
1029
1030 owner = fcoe_netdev_to_module_owner(netdev);
1031 if (owner) {
1032 printk(KERN_DEBUG "fcoe:hold driver module %s for %s\n",
1033 module_name(owner), netdev->name);
1034 return try_module_get(owner);
1035 }
1036 return -ENODEV;
1037 }
1038
1039 /**
1040 * fcoe_ethdrv_put() - Release the Ethernet driver
1041 * @netdev: the target netdev
1042 *
1043 * Releases the Ethernet driver module by module_put for
1044 * the corresponding netdev.
1045 *
1046 * Returns: 0 for succsss
1047 */
1048 static int fcoe_ethdrv_put(const struct net_device *netdev)
1049 {
1050 struct module *owner;
1051
1052 owner = fcoe_netdev_to_module_owner(netdev);
1053 if (owner) {
1054 printk(KERN_DEBUG "fcoe:release driver module %s for %s\n",
1055 module_name(owner), netdev->name);
1056 module_put(owner);
1057 return 0;
1058 }
1059 return -ENODEV;
1060 }
1061
1062 /**
1063 * fcoe_destroy() - handles the destroy from sysfs
1064 * @buffer: expcted to be a eth if name
1065 * @kp: associated kernel param
1066 *
1067 * Returns: 0 for success
1068 */
1069 static int fcoe_destroy(const char *buffer, struct kernel_param *kp)
1070 {
1071 int rc;
1072 struct net_device *netdev;
1073
1074 netdev = fcoe_if_to_netdev(buffer);
1075 if (!netdev) {
1076 rc = -ENODEV;
1077 goto out_nodev;
1078 }
1079 /* look for existing lport */
1080 if (!fcoe_hostlist_lookup(netdev)) {
1081 rc = -ENODEV;
1082 goto out_putdev;
1083 }
1084 /* pass to transport */
1085 rc = fcoe_transport_release(netdev);
1086 if (rc) {
1087 printk(KERN_ERR "fcoe: fcoe_transport_release(%s) failed\n",
1088 netdev->name);
1089 rc = -EIO;
1090 goto out_putdev;
1091 }
1092 fcoe_ethdrv_put(netdev);
1093 rc = 0;
1094 out_putdev:
1095 dev_put(netdev);
1096 out_nodev:
1097 return rc;
1098 }
1099
1100 /**
1101 * fcoe_create() - Handles the create call from sysfs
1102 * @buffer: expcted to be a eth if name
1103 * @kp: associated kernel param
1104 *
1105 * Returns: 0 for success
1106 */
1107 static int fcoe_create(const char *buffer, struct kernel_param *kp)
1108 {
1109 int rc;
1110 struct net_device *netdev;
1111
1112 netdev = fcoe_if_to_netdev(buffer);
1113 if (!netdev) {
1114 rc = -ENODEV;
1115 goto out_nodev;
1116 }
1117 /* look for existing lport */
1118 if (fcoe_hostlist_lookup(netdev)) {
1119 rc = -EEXIST;
1120 goto out_putdev;
1121 }
1122 fcoe_ethdrv_get(netdev);
1123
1124 /* pass to transport */
1125 rc = fcoe_transport_attach(netdev);
1126 if (rc) {
1127 printk(KERN_ERR "fcoe: fcoe_transport_attach(%s) failed\n",
1128 netdev->name);
1129 fcoe_ethdrv_put(netdev);
1130 rc = -EIO;
1131 goto out_putdev;
1132 }
1133 rc = 0;
1134 out_putdev:
1135 dev_put(netdev);
1136 out_nodev:
1137 return rc;
1138 }
1139
1140 module_param_call(create, fcoe_create, NULL, NULL, S_IWUSR);
1141 __MODULE_PARM_TYPE(create, "string");
1142 MODULE_PARM_DESC(create, "Create fcoe port using net device passed in.");
1143 module_param_call(destroy, fcoe_destroy, NULL, NULL, S_IWUSR);
1144 __MODULE_PARM_TYPE(destroy, "string");
1145 MODULE_PARM_DESC(destroy, "Destroy fcoe port");
1146
1147 /**
1148 * fcoe_link_ok() - Check if link is ok for the fc_lport
1149 * @lp: ptr to the fc_lport
1150 *
1151 * Any permanently-disqualifying conditions have been previously checked.
1152 * This also updates the speed setting, which may change with link for 100/1000.
1153 *
1154 * This function should probably be checking for PAUSE support at some point
1155 * in the future. Currently Per-priority-pause is not determinable using
1156 * ethtool, so we shouldn't be restrictive until that problem is resolved.
1157 *
1158 * Returns: 0 if link is OK for use by FCoE.
1159 *
1160 */
1161 int fcoe_link_ok(struct fc_lport *lp)
1162 {
1163 struct fcoe_softc *fc = lport_priv(lp);
1164 struct net_device *dev = fc->real_dev;
1165 struct ethtool_cmd ecmd = { ETHTOOL_GSET };
1166 int rc = 0;
1167
1168 if ((dev->flags & IFF_UP) && netif_carrier_ok(dev)) {
1169 dev = fc->phys_dev;
1170 if (dev->ethtool_ops->get_settings) {
1171 dev->ethtool_ops->get_settings(dev, &ecmd);
1172 lp->link_supported_speeds &=
1173 ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
1174 if (ecmd.supported & (SUPPORTED_1000baseT_Half |
1175 SUPPORTED_1000baseT_Full))
1176 lp->link_supported_speeds |= FC_PORTSPEED_1GBIT;
1177 if (ecmd.supported & SUPPORTED_10000baseT_Full)
1178 lp->link_supported_speeds |=
1179 FC_PORTSPEED_10GBIT;
1180 if (ecmd.speed == SPEED_1000)
1181 lp->link_speed = FC_PORTSPEED_1GBIT;
1182 if (ecmd.speed == SPEED_10000)
1183 lp->link_speed = FC_PORTSPEED_10GBIT;
1184 }
1185 } else
1186 rc = -1;
1187
1188 return rc;
1189 }
1190 EXPORT_SYMBOL_GPL(fcoe_link_ok);
1191
1192 /**
1193 * fcoe_percpu_clean() - Clear the pending skbs for an lport
1194 * @lp: the fc_lport
1195 */
1196 void fcoe_percpu_clean(struct fc_lport *lp)
1197 {
1198 struct fcoe_percpu_s *pp;
1199 struct fcoe_rcv_info *fr;
1200 struct sk_buff_head *list;
1201 struct sk_buff *skb, *next;
1202 struct sk_buff *head;
1203 unsigned int cpu;
1204
1205 for_each_possible_cpu(cpu) {
1206 pp = &per_cpu(fcoe_percpu, cpu);
1207 spin_lock_bh(&pp->fcoe_rx_list.lock);
1208 list = &pp->fcoe_rx_list;
1209 head = list->next;
1210 for (skb = head; skb != (struct sk_buff *)list;
1211 skb = next) {
1212 next = skb->next;
1213 fr = fcoe_dev_from_skb(skb);
1214 if (fr->fr_dev == lp) {
1215 __skb_unlink(skb, list);
1216 kfree_skb(skb);
1217 }
1218 }
1219 spin_unlock_bh(&pp->fcoe_rx_list.lock);
1220 }
1221 }
1222 EXPORT_SYMBOL_GPL(fcoe_percpu_clean);
1223
1224 /**
1225 * fcoe_clean_pending_queue() - Dequeue a skb and free it
1226 * @lp: the corresponding fc_lport
1227 *
1228 * Returns: none
1229 */
1230 void fcoe_clean_pending_queue(struct fc_lport *lp)
1231 {
1232 struct fcoe_softc *fc = lport_priv(lp);
1233 struct sk_buff *skb;
1234
1235 spin_lock_bh(&fc->fcoe_pending_queue.lock);
1236 while ((skb = __skb_dequeue(&fc->fcoe_pending_queue)) != NULL) {
1237 spin_unlock_bh(&fc->fcoe_pending_queue.lock);
1238 kfree_skb(skb);
1239 spin_lock_bh(&fc->fcoe_pending_queue.lock);
1240 }
1241 spin_unlock_bh(&fc->fcoe_pending_queue.lock);
1242 }
1243 EXPORT_SYMBOL_GPL(fcoe_clean_pending_queue);
1244
1245 /**
1246 * libfc_host_alloc() - Allocate a Scsi_Host with room for the fc_lport
1247 * @sht: ptr to the scsi host templ
1248 * @priv_size: size of private data after fc_lport
1249 *
1250 * Returns: ptr to Scsi_Host
1251 * TODO: to libfc?
1252 */
1253 static inline struct Scsi_Host *
1254 libfc_host_alloc(struct scsi_host_template *sht, int priv_size)
1255 {
1256 return scsi_host_alloc(sht, sizeof(struct fc_lport) + priv_size);
1257 }
1258
1259 /**
1260 * fcoe_host_alloc() - Allocate a Scsi_Host with room for the fcoe_softc
1261 * @sht: ptr to the scsi host templ
1262 * @priv_size: size of private data after fc_lport
1263 *
1264 * Returns: ptr to Scsi_Host
1265 */
1266 struct Scsi_Host *fcoe_host_alloc(struct scsi_host_template *sht, int priv_size)
1267 {
1268 return libfc_host_alloc(sht, sizeof(struct fcoe_softc) + priv_size);
1269 }
1270 EXPORT_SYMBOL_GPL(fcoe_host_alloc);
1271
1272 /**
1273 * fcoe_reset() - Resets the fcoe
1274 * @shost: shost the reset is from
1275 *
1276 * Returns: always 0
1277 */
1278 int fcoe_reset(struct Scsi_Host *shost)
1279 {
1280 struct fc_lport *lport = shost_priv(shost);
1281 fc_lport_reset(lport);
1282 return 0;
1283 }
1284 EXPORT_SYMBOL_GPL(fcoe_reset);
1285
1286 /**
1287 * fcoe_wwn_from_mac() - Converts 48-bit IEEE MAC address to 64-bit FC WWN.
1288 * @mac: mac address
1289 * @scheme: check port
1290 * @port: port indicator for converting
1291 *
1292 * Returns: u64 fc world wide name
1293 */
1294 u64 fcoe_wwn_from_mac(unsigned char mac[MAX_ADDR_LEN],
1295 unsigned int scheme, unsigned int port)
1296 {
1297 u64 wwn;
1298 u64 host_mac;
1299
1300 /* The MAC is in NO, so flip only the low 48 bits */
1301 host_mac = ((u64) mac[0] << 40) |
1302 ((u64) mac[1] << 32) |
1303 ((u64) mac[2] << 24) |
1304 ((u64) mac[3] << 16) |
1305 ((u64) mac[4] << 8) |
1306 (u64) mac[5];
1307
1308 WARN_ON(host_mac >= (1ULL << 48));
1309 wwn = host_mac | ((u64) scheme << 60);
1310 switch (scheme) {
1311 case 1:
1312 WARN_ON(port != 0);
1313 break;
1314 case 2:
1315 WARN_ON(port >= 0xfff);
1316 wwn |= (u64) port << 48;
1317 break;
1318 default:
1319 WARN_ON(1);
1320 break;
1321 }
1322
1323 return wwn;
1324 }
1325 EXPORT_SYMBOL_GPL(fcoe_wwn_from_mac);
1326
1327 /**
1328 * fcoe_hostlist_lookup_softc() - find the corresponding lport by a given device
1329 * @device: this is currently ptr to net_device
1330 *
1331 * Returns: NULL or the located fcoe_softc
1332 */
1333 static struct fcoe_softc *
1334 fcoe_hostlist_lookup_softc(const struct net_device *dev)
1335 {
1336 struct fcoe_softc *fc;
1337
1338 read_lock(&fcoe_hostlist_lock);
1339 list_for_each_entry(fc, &fcoe_hostlist, list) {
1340 if (fc->real_dev == dev) {
1341 read_unlock(&fcoe_hostlist_lock);
1342 return fc;
1343 }
1344 }
1345 read_unlock(&fcoe_hostlist_lock);
1346 return NULL;
1347 }
1348
1349 /**
1350 * fcoe_hostlist_lookup() - Find the corresponding lport by netdev
1351 * @netdev: ptr to net_device
1352 *
1353 * Returns: 0 for success
1354 */
1355 struct fc_lport *fcoe_hostlist_lookup(const struct net_device *netdev)
1356 {
1357 struct fcoe_softc *fc;
1358
1359 fc = fcoe_hostlist_lookup_softc(netdev);
1360
1361 return (fc) ? fc->lp : NULL;
1362 }
1363 EXPORT_SYMBOL_GPL(fcoe_hostlist_lookup);
1364
1365 /**
1366 * fcoe_hostlist_add() - Add a lport to lports list
1367 * @lp: ptr to the fc_lport to badded
1368 *
1369 * Returns: 0 for success
1370 */
1371 int fcoe_hostlist_add(const struct fc_lport *lp)
1372 {
1373 struct fcoe_softc *fc;
1374
1375 fc = fcoe_hostlist_lookup_softc(fcoe_netdev(lp));
1376 if (!fc) {
1377 fc = lport_priv(lp);
1378 write_lock_bh(&fcoe_hostlist_lock);
1379 list_add_tail(&fc->list, &fcoe_hostlist);
1380 write_unlock_bh(&fcoe_hostlist_lock);
1381 }
1382 return 0;
1383 }
1384 EXPORT_SYMBOL_GPL(fcoe_hostlist_add);
1385
1386 /**
1387 * fcoe_hostlist_remove() - remove a lport from lports list
1388 * @lp: ptr to the fc_lport to badded
1389 *
1390 * Returns: 0 for success
1391 */
1392 int fcoe_hostlist_remove(const struct fc_lport *lp)
1393 {
1394 struct fcoe_softc *fc;
1395
1396 fc = fcoe_hostlist_lookup_softc(fcoe_netdev(lp));
1397 BUG_ON(!fc);
1398 write_lock_bh(&fcoe_hostlist_lock);
1399 list_del(&fc->list);
1400 write_unlock_bh(&fcoe_hostlist_lock);
1401
1402 return 0;
1403 }
1404 EXPORT_SYMBOL_GPL(fcoe_hostlist_remove);
1405
1406 /**
1407 * fcoe_libfc_config() - sets up libfc related properties for lport
1408 * @lp: ptr to the fc_lport
1409 * @tt: libfc function template
1410 *
1411 * Returns : 0 for success
1412 */
1413 int fcoe_libfc_config(struct fc_lport *lp, struct libfc_function_template *tt)
1414 {
1415 /* Set the function pointers set by the LLDD */
1416 memcpy(&lp->tt, tt, sizeof(*tt));
1417 if (fc_fcp_init(lp))
1418 return -ENOMEM;
1419 fc_exch_init(lp);
1420 fc_elsct_init(lp);
1421 fc_lport_init(lp);
1422 fc_rport_init(lp);
1423 fc_disc_init(lp);
1424
1425 return 0;
1426 }
1427 EXPORT_SYMBOL_GPL(fcoe_libfc_config);
1428
1429 /**
1430 * fcoe_init() - fcoe module loading initialization
1431 *
1432 * Initialization routine
1433 * 1. Will create fc transport software structure
1434 * 2. initialize the link list of port information structure
1435 *
1436 * Returns 0 on success, negative on failure
1437 */
1438 static int __init fcoe_init(void)
1439 {
1440 unsigned int cpu;
1441 int rc = 0;
1442 struct fcoe_percpu_s *p;
1443
1444 INIT_LIST_HEAD(&fcoe_hostlist);
1445 rwlock_init(&fcoe_hostlist_lock);
1446
1447 for_each_possible_cpu(cpu) {
1448 p = &per_cpu(fcoe_percpu, cpu);
1449 skb_queue_head_init(&p->fcoe_rx_list);
1450 }
1451
1452 for_each_online_cpu(cpu)
1453 fcoe_percpu_thread_create(cpu);
1454
1455 /* Initialize per CPU interrupt thread */
1456 rc = register_hotcpu_notifier(&fcoe_cpu_notifier);
1457 if (rc)
1458 goto out_free;
1459
1460 /* Setup link change notification */
1461 fcoe_dev_setup();
1462
1463 setup_timer(&fcoe_timer, fcoe_watchdog, 0);
1464
1465 mod_timer(&fcoe_timer, jiffies + (10 * HZ));
1466
1467 /* initiatlize the fcoe transport */
1468 fcoe_transport_init();
1469
1470 fcoe_sw_init();
1471
1472 return 0;
1473
1474 out_free:
1475 for_each_online_cpu(cpu) {
1476 fcoe_percpu_thread_destroy(cpu);
1477 }
1478
1479 return rc;
1480 }
1481 module_init(fcoe_init);
1482
1483 /**
1484 * fcoe_exit() - fcoe module unloading cleanup
1485 *
1486 * Returns 0 on success, negative on failure
1487 */
1488 static void __exit fcoe_exit(void)
1489 {
1490 unsigned int cpu;
1491 struct fcoe_softc *fc, *tmp;
1492
1493 fcoe_dev_cleanup();
1494
1495 /* Stop the timer */
1496 del_timer_sync(&fcoe_timer);
1497
1498 /* releases the associated fcoe transport for each lport */
1499 list_for_each_entry_safe(fc, tmp, &fcoe_hostlist, list)
1500 fcoe_transport_release(fc->real_dev);
1501
1502 unregister_hotcpu_notifier(&fcoe_cpu_notifier);
1503
1504 for_each_online_cpu(cpu) {
1505 fcoe_percpu_thread_destroy(cpu);
1506 }
1507
1508 /* remove sw trasnport */
1509 fcoe_sw_exit();
1510
1511 /* detach the transport */
1512 fcoe_transport_exit();
1513 }
1514 module_exit(fcoe_exit);
This page took 0.062193 seconds and 5 git commands to generate.