Merge remote-tracking branch 'regulator/topic/core' into regulator-next
[deliverable/linux.git] / net / batman-adv / main.c
1 /* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors:
2 *
3 * Marek Lindner, Simon Wunderlich
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
16 */
17
18 #include <linux/crc32c.h>
19 #include <linux/highmem.h>
20 #include <linux/if_vlan.h>
21 #include <net/ip.h>
22 #include <net/ipv6.h>
23 #include <net/dsfield.h>
24 #include "main.h"
25 #include "sysfs.h"
26 #include "debugfs.h"
27 #include "routing.h"
28 #include "send.h"
29 #include "originator.h"
30 #include "soft-interface.h"
31 #include "icmp_socket.h"
32 #include "translation-table.h"
33 #include "hard-interface.h"
34 #include "gateway_client.h"
35 #include "bridge_loop_avoidance.h"
36 #include "distributed-arp-table.h"
37 #include "multicast.h"
38 #include "gateway_common.h"
39 #include "hash.h"
40 #include "bat_algo.h"
41 #include "network-coding.h"
42 #include "fragmentation.h"
43
44
45 /* List manipulations on hardif_list have to be rtnl_lock()'ed,
46 * list traversals just rcu-locked
47 */
48 struct list_head batadv_hardif_list;
49 static int (*batadv_rx_handler[256])(struct sk_buff *,
50 struct batadv_hard_iface *);
51 char batadv_routing_algo[20] = "BATMAN_IV";
52 static struct hlist_head batadv_algo_list;
53
54 unsigned char batadv_broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
55
56 struct workqueue_struct *batadv_event_workqueue;
57
58 static void batadv_recv_handler_init(void);
59
60 static int __init batadv_init(void)
61 {
62 INIT_LIST_HEAD(&batadv_hardif_list);
63 INIT_HLIST_HEAD(&batadv_algo_list);
64
65 batadv_recv_handler_init();
66
67 batadv_iv_init();
68 batadv_nc_init();
69
70 batadv_event_workqueue = create_singlethread_workqueue("bat_events");
71
72 if (!batadv_event_workqueue)
73 return -ENOMEM;
74
75 batadv_socket_init();
76 batadv_debugfs_init();
77
78 register_netdevice_notifier(&batadv_hard_if_notifier);
79 rtnl_link_register(&batadv_link_ops);
80
81 pr_info("B.A.T.M.A.N. advanced %s (compatibility version %i) loaded\n",
82 BATADV_SOURCE_VERSION, BATADV_COMPAT_VERSION);
83
84 return 0;
85 }
86
87 static void __exit batadv_exit(void)
88 {
89 batadv_debugfs_destroy();
90 rtnl_link_unregister(&batadv_link_ops);
91 unregister_netdevice_notifier(&batadv_hard_if_notifier);
92 batadv_hardif_remove_interfaces();
93
94 flush_workqueue(batadv_event_workqueue);
95 destroy_workqueue(batadv_event_workqueue);
96 batadv_event_workqueue = NULL;
97
98 rcu_barrier();
99 }
100
101 int batadv_mesh_init(struct net_device *soft_iface)
102 {
103 struct batadv_priv *bat_priv = netdev_priv(soft_iface);
104 int ret;
105
106 spin_lock_init(&bat_priv->forw_bat_list_lock);
107 spin_lock_init(&bat_priv->forw_bcast_list_lock);
108 spin_lock_init(&bat_priv->tt.changes_list_lock);
109 spin_lock_init(&bat_priv->tt.req_list_lock);
110 spin_lock_init(&bat_priv->tt.roam_list_lock);
111 spin_lock_init(&bat_priv->tt.last_changeset_lock);
112 spin_lock_init(&bat_priv->tt.commit_lock);
113 spin_lock_init(&bat_priv->gw.list_lock);
114 #ifdef CONFIG_BATMAN_ADV_MCAST
115 spin_lock_init(&bat_priv->mcast.want_lists_lock);
116 #endif
117 spin_lock_init(&bat_priv->tvlv.container_list_lock);
118 spin_lock_init(&bat_priv->tvlv.handler_list_lock);
119 spin_lock_init(&bat_priv->softif_vlan_list_lock);
120
121 INIT_HLIST_HEAD(&bat_priv->forw_bat_list);
122 INIT_HLIST_HEAD(&bat_priv->forw_bcast_list);
123 INIT_HLIST_HEAD(&bat_priv->gw.list);
124 #ifdef CONFIG_BATMAN_ADV_MCAST
125 INIT_HLIST_HEAD(&bat_priv->mcast.want_all_unsnoopables_list);
126 INIT_HLIST_HEAD(&bat_priv->mcast.want_all_ipv4_list);
127 INIT_HLIST_HEAD(&bat_priv->mcast.want_all_ipv6_list);
128 #endif
129 INIT_LIST_HEAD(&bat_priv->tt.changes_list);
130 INIT_LIST_HEAD(&bat_priv->tt.req_list);
131 INIT_LIST_HEAD(&bat_priv->tt.roam_list);
132 #ifdef CONFIG_BATMAN_ADV_MCAST
133 INIT_HLIST_HEAD(&bat_priv->mcast.mla_list);
134 #endif
135 INIT_HLIST_HEAD(&bat_priv->tvlv.container_list);
136 INIT_HLIST_HEAD(&bat_priv->tvlv.handler_list);
137 INIT_HLIST_HEAD(&bat_priv->softif_vlan_list);
138
139 ret = batadv_originator_init(bat_priv);
140 if (ret < 0)
141 goto err;
142
143 ret = batadv_tt_init(bat_priv);
144 if (ret < 0)
145 goto err;
146
147 ret = batadv_bla_init(bat_priv);
148 if (ret < 0)
149 goto err;
150
151 ret = batadv_dat_init(bat_priv);
152 if (ret < 0)
153 goto err;
154
155 ret = batadv_nc_mesh_init(bat_priv);
156 if (ret < 0)
157 goto err;
158
159 batadv_gw_init(bat_priv);
160 batadv_mcast_init(bat_priv);
161
162 atomic_set(&bat_priv->gw.reselect, 0);
163 atomic_set(&bat_priv->mesh_state, BATADV_MESH_ACTIVE);
164
165 return 0;
166
167 err:
168 batadv_mesh_free(soft_iface);
169 return ret;
170 }
171
172 void batadv_mesh_free(struct net_device *soft_iface)
173 {
174 struct batadv_priv *bat_priv = netdev_priv(soft_iface);
175
176 atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
177
178 batadv_purge_outstanding_packets(bat_priv, NULL);
179
180 batadv_gw_node_purge(bat_priv);
181 batadv_nc_mesh_free(bat_priv);
182 batadv_dat_free(bat_priv);
183 batadv_bla_free(bat_priv);
184
185 batadv_mcast_free(bat_priv);
186
187 /* Free the TT and the originator tables only after having terminated
188 * all the other depending components which may use these structures for
189 * their purposes.
190 */
191 batadv_tt_free(bat_priv);
192
193 /* Since the originator table clean up routine is accessing the TT
194 * tables as well, it has to be invoked after the TT tables have been
195 * freed and marked as empty. This ensures that no cleanup RCU callbacks
196 * accessing the TT data are scheduled for later execution.
197 */
198 batadv_originator_free(bat_priv);
199
200 batadv_gw_free(bat_priv);
201
202 free_percpu(bat_priv->bat_counters);
203 bat_priv->bat_counters = NULL;
204
205 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
206 }
207
208 /**
209 * batadv_is_my_mac - check if the given mac address belongs to any of the real
210 * interfaces in the current mesh
211 * @bat_priv: the bat priv with all the soft interface information
212 * @addr: the address to check
213 */
214 int batadv_is_my_mac(struct batadv_priv *bat_priv, const uint8_t *addr)
215 {
216 const struct batadv_hard_iface *hard_iface;
217
218 rcu_read_lock();
219 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
220 if (hard_iface->if_status != BATADV_IF_ACTIVE)
221 continue;
222
223 if (hard_iface->soft_iface != bat_priv->soft_iface)
224 continue;
225
226 if (batadv_compare_eth(hard_iface->net_dev->dev_addr, addr)) {
227 rcu_read_unlock();
228 return 1;
229 }
230 }
231 rcu_read_unlock();
232 return 0;
233 }
234
235 /**
236 * batadv_seq_print_text_primary_if_get - called from debugfs table printing
237 * function that requires the primary interface
238 * @seq: debugfs table seq_file struct
239 *
240 * Returns primary interface if found or NULL otherwise.
241 */
242 struct batadv_hard_iface *
243 batadv_seq_print_text_primary_if_get(struct seq_file *seq)
244 {
245 struct net_device *net_dev = (struct net_device *)seq->private;
246 struct batadv_priv *bat_priv = netdev_priv(net_dev);
247 struct batadv_hard_iface *primary_if;
248
249 primary_if = batadv_primary_if_get_selected(bat_priv);
250
251 if (!primary_if) {
252 seq_printf(seq,
253 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
254 net_dev->name);
255 goto out;
256 }
257
258 if (primary_if->if_status == BATADV_IF_ACTIVE)
259 goto out;
260
261 seq_printf(seq,
262 "BATMAN mesh %s disabled - primary interface not active\n",
263 net_dev->name);
264 batadv_hardif_free_ref(primary_if);
265 primary_if = NULL;
266
267 out:
268 return primary_if;
269 }
270
271 /**
272 * batadv_max_header_len - calculate maximum encapsulation overhead for a
273 * payload packet
274 *
275 * Return the maximum encapsulation overhead in bytes.
276 */
277 int batadv_max_header_len(void)
278 {
279 int header_len = 0;
280
281 header_len = max_t(int, header_len,
282 sizeof(struct batadv_unicast_packet));
283 header_len = max_t(int, header_len,
284 sizeof(struct batadv_unicast_4addr_packet));
285 header_len = max_t(int, header_len,
286 sizeof(struct batadv_bcast_packet));
287
288 #ifdef CONFIG_BATMAN_ADV_NC
289 header_len = max_t(int, header_len,
290 sizeof(struct batadv_coded_packet));
291 #endif
292
293 return header_len + ETH_HLEN;
294 }
295
296 /**
297 * batadv_skb_set_priority - sets skb priority according to packet content
298 * @skb: the packet to be sent
299 * @offset: offset to the packet content
300 *
301 * This function sets a value between 256 and 263 (802.1d priority), which
302 * can be interpreted by the cfg80211 or other drivers.
303 */
304 void batadv_skb_set_priority(struct sk_buff *skb, int offset)
305 {
306 struct iphdr ip_hdr_tmp, *ip_hdr;
307 struct ipv6hdr ip6_hdr_tmp, *ip6_hdr;
308 struct ethhdr ethhdr_tmp, *ethhdr;
309 struct vlan_ethhdr *vhdr, vhdr_tmp;
310 u32 prio;
311
312 /* already set, do nothing */
313 if (skb->priority >= 256 && skb->priority <= 263)
314 return;
315
316 ethhdr = skb_header_pointer(skb, offset, sizeof(*ethhdr), &ethhdr_tmp);
317 if (!ethhdr)
318 return;
319
320 switch (ethhdr->h_proto) {
321 case htons(ETH_P_8021Q):
322 vhdr = skb_header_pointer(skb, offset + sizeof(*vhdr),
323 sizeof(*vhdr), &vhdr_tmp);
324 if (!vhdr)
325 return;
326 prio = ntohs(vhdr->h_vlan_TCI) & VLAN_PRIO_MASK;
327 prio = prio >> VLAN_PRIO_SHIFT;
328 break;
329 case htons(ETH_P_IP):
330 ip_hdr = skb_header_pointer(skb, offset + sizeof(*ethhdr),
331 sizeof(*ip_hdr), &ip_hdr_tmp);
332 if (!ip_hdr)
333 return;
334 prio = (ipv4_get_dsfield(ip_hdr) & 0xfc) >> 5;
335 break;
336 case htons(ETH_P_IPV6):
337 ip6_hdr = skb_header_pointer(skb, offset + sizeof(*ethhdr),
338 sizeof(*ip6_hdr), &ip6_hdr_tmp);
339 if (!ip6_hdr)
340 return;
341 prio = (ipv6_get_dsfield(ip6_hdr) & 0xfc) >> 5;
342 break;
343 default:
344 return;
345 }
346
347 skb->priority = prio + 256;
348 }
349
350 static int batadv_recv_unhandled_packet(struct sk_buff *skb,
351 struct batadv_hard_iface *recv_if)
352 {
353 return NET_RX_DROP;
354 }
355
356 /* incoming packets with the batman ethertype received on any active hard
357 * interface
358 */
359 int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
360 struct packet_type *ptype,
361 struct net_device *orig_dev)
362 {
363 struct batadv_priv *bat_priv;
364 struct batadv_ogm_packet *batadv_ogm_packet;
365 struct batadv_hard_iface *hard_iface;
366 uint8_t idx;
367 int ret;
368
369 hard_iface = container_of(ptype, struct batadv_hard_iface,
370 batman_adv_ptype);
371 skb = skb_share_check(skb, GFP_ATOMIC);
372
373 /* skb was released by skb_share_check() */
374 if (!skb)
375 goto err_out;
376
377 /* packet should hold at least type and version */
378 if (unlikely(!pskb_may_pull(skb, 2)))
379 goto err_free;
380
381 /* expect a valid ethernet header here. */
382 if (unlikely(skb->mac_len != ETH_HLEN || !skb_mac_header(skb)))
383 goto err_free;
384
385 if (!hard_iface->soft_iface)
386 goto err_free;
387
388 bat_priv = netdev_priv(hard_iface->soft_iface);
389
390 if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
391 goto err_free;
392
393 /* discard frames on not active interfaces */
394 if (hard_iface->if_status != BATADV_IF_ACTIVE)
395 goto err_free;
396
397 batadv_ogm_packet = (struct batadv_ogm_packet *)skb->data;
398
399 if (batadv_ogm_packet->version != BATADV_COMPAT_VERSION) {
400 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
401 "Drop packet: incompatible batman version (%i)\n",
402 batadv_ogm_packet->version);
403 goto err_free;
404 }
405
406 /* all receive handlers return whether they received or reused
407 * the supplied skb. if not, we have to free the skb.
408 */
409 idx = batadv_ogm_packet->packet_type;
410 ret = (*batadv_rx_handler[idx])(skb, hard_iface);
411
412 if (ret == NET_RX_DROP)
413 kfree_skb(skb);
414
415 /* return NET_RX_SUCCESS in any case as we
416 * most probably dropped the packet for
417 * routing-logical reasons.
418 */
419 return NET_RX_SUCCESS;
420
421 err_free:
422 kfree_skb(skb);
423 err_out:
424 return NET_RX_DROP;
425 }
426
427 static void batadv_recv_handler_init(void)
428 {
429 int i;
430
431 for (i = 0; i < ARRAY_SIZE(batadv_rx_handler); i++)
432 batadv_rx_handler[i] = batadv_recv_unhandled_packet;
433
434 for (i = BATADV_UNICAST_MIN; i <= BATADV_UNICAST_MAX; i++)
435 batadv_rx_handler[i] = batadv_recv_unhandled_unicast_packet;
436
437 /* compile time checks for sizes */
438 BUILD_BUG_ON(sizeof(struct batadv_bla_claim_dst) != 6);
439 BUILD_BUG_ON(sizeof(struct batadv_ogm_packet) != 24);
440 BUILD_BUG_ON(sizeof(struct batadv_icmp_header) != 20);
441 BUILD_BUG_ON(sizeof(struct batadv_icmp_packet) != 20);
442 BUILD_BUG_ON(sizeof(struct batadv_icmp_packet_rr) != 116);
443 BUILD_BUG_ON(sizeof(struct batadv_unicast_packet) != 10);
444 BUILD_BUG_ON(sizeof(struct batadv_unicast_4addr_packet) != 18);
445 BUILD_BUG_ON(sizeof(struct batadv_frag_packet) != 20);
446 BUILD_BUG_ON(sizeof(struct batadv_bcast_packet) != 14);
447 BUILD_BUG_ON(sizeof(struct batadv_coded_packet) != 46);
448 BUILD_BUG_ON(sizeof(struct batadv_unicast_tvlv_packet) != 20);
449 BUILD_BUG_ON(sizeof(struct batadv_tvlv_hdr) != 4);
450 BUILD_BUG_ON(sizeof(struct batadv_tvlv_gateway_data) != 8);
451 BUILD_BUG_ON(sizeof(struct batadv_tvlv_tt_vlan_data) != 8);
452 BUILD_BUG_ON(sizeof(struct batadv_tvlv_tt_change) != 12);
453 BUILD_BUG_ON(sizeof(struct batadv_tvlv_roam_adv) != 8);
454
455 /* broadcast packet */
456 batadv_rx_handler[BATADV_BCAST] = batadv_recv_bcast_packet;
457
458 /* unicast packets ... */
459 /* unicast with 4 addresses packet */
460 batadv_rx_handler[BATADV_UNICAST_4ADDR] = batadv_recv_unicast_packet;
461 /* unicast packet */
462 batadv_rx_handler[BATADV_UNICAST] = batadv_recv_unicast_packet;
463 /* unicast tvlv packet */
464 batadv_rx_handler[BATADV_UNICAST_TVLV] = batadv_recv_unicast_tvlv;
465 /* batman icmp packet */
466 batadv_rx_handler[BATADV_ICMP] = batadv_recv_icmp_packet;
467 /* Fragmented packets */
468 batadv_rx_handler[BATADV_UNICAST_FRAG] = batadv_recv_frag_packet;
469 }
470
471 int
472 batadv_recv_handler_register(uint8_t packet_type,
473 int (*recv_handler)(struct sk_buff *,
474 struct batadv_hard_iface *))
475 {
476 int (*curr)(struct sk_buff *,
477 struct batadv_hard_iface *);
478 curr = batadv_rx_handler[packet_type];
479
480 if ((curr != batadv_recv_unhandled_packet) &&
481 (curr != batadv_recv_unhandled_unicast_packet))
482 return -EBUSY;
483
484 batadv_rx_handler[packet_type] = recv_handler;
485 return 0;
486 }
487
488 void batadv_recv_handler_unregister(uint8_t packet_type)
489 {
490 batadv_rx_handler[packet_type] = batadv_recv_unhandled_packet;
491 }
492
493 static struct batadv_algo_ops *batadv_algo_get(char *name)
494 {
495 struct batadv_algo_ops *bat_algo_ops = NULL, *bat_algo_ops_tmp;
496
497 hlist_for_each_entry(bat_algo_ops_tmp, &batadv_algo_list, list) {
498 if (strcmp(bat_algo_ops_tmp->name, name) != 0)
499 continue;
500
501 bat_algo_ops = bat_algo_ops_tmp;
502 break;
503 }
504
505 return bat_algo_ops;
506 }
507
508 int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops)
509 {
510 struct batadv_algo_ops *bat_algo_ops_tmp;
511 int ret;
512
513 bat_algo_ops_tmp = batadv_algo_get(bat_algo_ops->name);
514 if (bat_algo_ops_tmp) {
515 pr_info("Trying to register already registered routing algorithm: %s\n",
516 bat_algo_ops->name);
517 ret = -EEXIST;
518 goto out;
519 }
520
521 /* all algorithms must implement all ops (for now) */
522 if (!bat_algo_ops->bat_iface_enable ||
523 !bat_algo_ops->bat_iface_disable ||
524 !bat_algo_ops->bat_iface_update_mac ||
525 !bat_algo_ops->bat_primary_iface_set ||
526 !bat_algo_ops->bat_ogm_schedule ||
527 !bat_algo_ops->bat_ogm_emit ||
528 !bat_algo_ops->bat_neigh_cmp ||
529 !bat_algo_ops->bat_neigh_is_equiv_or_better) {
530 pr_info("Routing algo '%s' does not implement required ops\n",
531 bat_algo_ops->name);
532 ret = -EINVAL;
533 goto out;
534 }
535
536 INIT_HLIST_NODE(&bat_algo_ops->list);
537 hlist_add_head(&bat_algo_ops->list, &batadv_algo_list);
538 ret = 0;
539
540 out:
541 return ret;
542 }
543
544 int batadv_algo_select(struct batadv_priv *bat_priv, char *name)
545 {
546 struct batadv_algo_ops *bat_algo_ops;
547 int ret = -EINVAL;
548
549 bat_algo_ops = batadv_algo_get(name);
550 if (!bat_algo_ops)
551 goto out;
552
553 bat_priv->bat_algo_ops = bat_algo_ops;
554 ret = 0;
555
556 out:
557 return ret;
558 }
559
560 int batadv_algo_seq_print_text(struct seq_file *seq, void *offset)
561 {
562 struct batadv_algo_ops *bat_algo_ops;
563
564 seq_puts(seq, "Available routing algorithms:\n");
565
566 hlist_for_each_entry(bat_algo_ops, &batadv_algo_list, list) {
567 seq_printf(seq, "%s\n", bat_algo_ops->name);
568 }
569
570 return 0;
571 }
572
573 /**
574 * batadv_skb_crc32 - calculate CRC32 of the whole packet and skip bytes in
575 * the header
576 * @skb: skb pointing to fragmented socket buffers
577 * @payload_ptr: Pointer to position inside the head buffer of the skb
578 * marking the start of the data to be CRC'ed
579 *
580 * payload_ptr must always point to an address in the skb head buffer and not to
581 * a fragment.
582 */
583 __be32 batadv_skb_crc32(struct sk_buff *skb, u8 *payload_ptr)
584 {
585 u32 crc = 0;
586 unsigned int from;
587 unsigned int to = skb->len;
588 struct skb_seq_state st;
589 const u8 *data;
590 unsigned int len;
591 unsigned int consumed = 0;
592
593 from = (unsigned int)(payload_ptr - skb->data);
594
595 skb_prepare_seq_read(skb, from, to, &st);
596 while ((len = skb_seq_read(consumed, &data, &st)) != 0) {
597 crc = crc32c(crc, data, len);
598 consumed += len;
599 }
600
601 return htonl(crc);
602 }
603
604 /**
605 * batadv_tvlv_handler_free_ref - decrement the tvlv handler refcounter and
606 * possibly free it
607 * @tvlv_handler: the tvlv handler to free
608 */
609 static void
610 batadv_tvlv_handler_free_ref(struct batadv_tvlv_handler *tvlv_handler)
611 {
612 if (atomic_dec_and_test(&tvlv_handler->refcount))
613 kfree_rcu(tvlv_handler, rcu);
614 }
615
616 /**
617 * batadv_tvlv_handler_get - retrieve tvlv handler from the tvlv handler list
618 * based on the provided type and version (both need to match)
619 * @bat_priv: the bat priv with all the soft interface information
620 * @type: tvlv handler type to look for
621 * @version: tvlv handler version to look for
622 *
623 * Returns tvlv handler if found or NULL otherwise.
624 */
625 static struct batadv_tvlv_handler
626 *batadv_tvlv_handler_get(struct batadv_priv *bat_priv,
627 uint8_t type, uint8_t version)
628 {
629 struct batadv_tvlv_handler *tvlv_handler_tmp, *tvlv_handler = NULL;
630
631 rcu_read_lock();
632 hlist_for_each_entry_rcu(tvlv_handler_tmp,
633 &bat_priv->tvlv.handler_list, list) {
634 if (tvlv_handler_tmp->type != type)
635 continue;
636
637 if (tvlv_handler_tmp->version != version)
638 continue;
639
640 if (!atomic_inc_not_zero(&tvlv_handler_tmp->refcount))
641 continue;
642
643 tvlv_handler = tvlv_handler_tmp;
644 break;
645 }
646 rcu_read_unlock();
647
648 return tvlv_handler;
649 }
650
651 /**
652 * batadv_tvlv_container_free_ref - decrement the tvlv container refcounter and
653 * possibly free it
654 * @tvlv_handler: the tvlv container to free
655 */
656 static void batadv_tvlv_container_free_ref(struct batadv_tvlv_container *tvlv)
657 {
658 if (atomic_dec_and_test(&tvlv->refcount))
659 kfree(tvlv);
660 }
661
662 /**
663 * batadv_tvlv_container_get - retrieve tvlv container from the tvlv container
664 * list based on the provided type and version (both need to match)
665 * @bat_priv: the bat priv with all the soft interface information
666 * @type: tvlv container type to look for
667 * @version: tvlv container version to look for
668 *
669 * Has to be called with the appropriate locks being acquired
670 * (tvlv.container_list_lock).
671 *
672 * Returns tvlv container if found or NULL otherwise.
673 */
674 static struct batadv_tvlv_container
675 *batadv_tvlv_container_get(struct batadv_priv *bat_priv,
676 uint8_t type, uint8_t version)
677 {
678 struct batadv_tvlv_container *tvlv_tmp, *tvlv = NULL;
679
680 hlist_for_each_entry(tvlv_tmp, &bat_priv->tvlv.container_list, list) {
681 if (tvlv_tmp->tvlv_hdr.type != type)
682 continue;
683
684 if (tvlv_tmp->tvlv_hdr.version != version)
685 continue;
686
687 if (!atomic_inc_not_zero(&tvlv_tmp->refcount))
688 continue;
689
690 tvlv = tvlv_tmp;
691 break;
692 }
693
694 return tvlv;
695 }
696
697 /**
698 * batadv_tvlv_container_list_size - calculate the size of the tvlv container
699 * list entries
700 * @bat_priv: the bat priv with all the soft interface information
701 *
702 * Has to be called with the appropriate locks being acquired
703 * (tvlv.container_list_lock).
704 *
705 * Returns size of all currently registered tvlv containers in bytes.
706 */
707 static uint16_t batadv_tvlv_container_list_size(struct batadv_priv *bat_priv)
708 {
709 struct batadv_tvlv_container *tvlv;
710 uint16_t tvlv_len = 0;
711
712 hlist_for_each_entry(tvlv, &bat_priv->tvlv.container_list, list) {
713 tvlv_len += sizeof(struct batadv_tvlv_hdr);
714 tvlv_len += ntohs(tvlv->tvlv_hdr.len);
715 }
716
717 return tvlv_len;
718 }
719
720 /**
721 * batadv_tvlv_container_remove - remove tvlv container from the tvlv container
722 * list
723 * @tvlv: the to be removed tvlv container
724 *
725 * Has to be called with the appropriate locks being acquired
726 * (tvlv.container_list_lock).
727 */
728 static void batadv_tvlv_container_remove(struct batadv_tvlv_container *tvlv)
729 {
730 if (!tvlv)
731 return;
732
733 hlist_del(&tvlv->list);
734
735 /* first call to decrement the counter, second call to free */
736 batadv_tvlv_container_free_ref(tvlv);
737 batadv_tvlv_container_free_ref(tvlv);
738 }
739
740 /**
741 * batadv_tvlv_container_unregister - unregister tvlv container based on the
742 * provided type and version (both need to match)
743 * @bat_priv: the bat priv with all the soft interface information
744 * @type: tvlv container type to unregister
745 * @version: tvlv container type to unregister
746 */
747 void batadv_tvlv_container_unregister(struct batadv_priv *bat_priv,
748 uint8_t type, uint8_t version)
749 {
750 struct batadv_tvlv_container *tvlv;
751
752 spin_lock_bh(&bat_priv->tvlv.container_list_lock);
753 tvlv = batadv_tvlv_container_get(bat_priv, type, version);
754 batadv_tvlv_container_remove(tvlv);
755 spin_unlock_bh(&bat_priv->tvlv.container_list_lock);
756 }
757
758 /**
759 * batadv_tvlv_container_register - register tvlv type, version and content
760 * to be propagated with each (primary interface) OGM
761 * @bat_priv: the bat priv with all the soft interface information
762 * @type: tvlv container type
763 * @version: tvlv container version
764 * @tvlv_value: tvlv container content
765 * @tvlv_value_len: tvlv container content length
766 *
767 * If a container of the same type and version was already registered the new
768 * content is going to replace the old one.
769 */
770 void batadv_tvlv_container_register(struct batadv_priv *bat_priv,
771 uint8_t type, uint8_t version,
772 void *tvlv_value, uint16_t tvlv_value_len)
773 {
774 struct batadv_tvlv_container *tvlv_old, *tvlv_new;
775
776 if (!tvlv_value)
777 tvlv_value_len = 0;
778
779 tvlv_new = kzalloc(sizeof(*tvlv_new) + tvlv_value_len, GFP_ATOMIC);
780 if (!tvlv_new)
781 return;
782
783 tvlv_new->tvlv_hdr.version = version;
784 tvlv_new->tvlv_hdr.type = type;
785 tvlv_new->tvlv_hdr.len = htons(tvlv_value_len);
786
787 memcpy(tvlv_new + 1, tvlv_value, ntohs(tvlv_new->tvlv_hdr.len));
788 INIT_HLIST_NODE(&tvlv_new->list);
789 atomic_set(&tvlv_new->refcount, 1);
790
791 spin_lock_bh(&bat_priv->tvlv.container_list_lock);
792 tvlv_old = batadv_tvlv_container_get(bat_priv, type, version);
793 batadv_tvlv_container_remove(tvlv_old);
794 hlist_add_head(&tvlv_new->list, &bat_priv->tvlv.container_list);
795 spin_unlock_bh(&bat_priv->tvlv.container_list_lock);
796 }
797
798 /**
799 * batadv_tvlv_realloc_packet_buff - reallocate packet buffer to accomodate
800 * requested packet size
801 * @packet_buff: packet buffer
802 * @packet_buff_len: packet buffer size
803 * @packet_min_len: requested packet minimum size
804 * @additional_packet_len: requested additional packet size on top of minimum
805 * size
806 *
807 * Returns true of the packet buffer could be changed to the requested size,
808 * false otherwise.
809 */
810 static bool batadv_tvlv_realloc_packet_buff(unsigned char **packet_buff,
811 int *packet_buff_len,
812 int min_packet_len,
813 int additional_packet_len)
814 {
815 unsigned char *new_buff;
816
817 new_buff = kmalloc(min_packet_len + additional_packet_len, GFP_ATOMIC);
818
819 /* keep old buffer if kmalloc should fail */
820 if (new_buff) {
821 memcpy(new_buff, *packet_buff, min_packet_len);
822 kfree(*packet_buff);
823 *packet_buff = new_buff;
824 *packet_buff_len = min_packet_len + additional_packet_len;
825 return true;
826 }
827
828 return false;
829 }
830
831 /**
832 * batadv_tvlv_container_ogm_append - append tvlv container content to given
833 * OGM packet buffer
834 * @bat_priv: the bat priv with all the soft interface information
835 * @packet_buff: ogm packet buffer
836 * @packet_buff_len: ogm packet buffer size including ogm header and tvlv
837 * content
838 * @packet_min_len: ogm header size to be preserved for the OGM itself
839 *
840 * The ogm packet might be enlarged or shrunk depending on the current size
841 * and the size of the to-be-appended tvlv containers.
842 *
843 * Returns size of all appended tvlv containers in bytes.
844 */
845 uint16_t batadv_tvlv_container_ogm_append(struct batadv_priv *bat_priv,
846 unsigned char **packet_buff,
847 int *packet_buff_len,
848 int packet_min_len)
849 {
850 struct batadv_tvlv_container *tvlv;
851 struct batadv_tvlv_hdr *tvlv_hdr;
852 uint16_t tvlv_value_len;
853 void *tvlv_value;
854 bool ret;
855
856 spin_lock_bh(&bat_priv->tvlv.container_list_lock);
857 tvlv_value_len = batadv_tvlv_container_list_size(bat_priv);
858
859 ret = batadv_tvlv_realloc_packet_buff(packet_buff, packet_buff_len,
860 packet_min_len, tvlv_value_len);
861
862 if (!ret)
863 goto end;
864
865 if (!tvlv_value_len)
866 goto end;
867
868 tvlv_value = (*packet_buff) + packet_min_len;
869
870 hlist_for_each_entry(tvlv, &bat_priv->tvlv.container_list, list) {
871 tvlv_hdr = tvlv_value;
872 tvlv_hdr->type = tvlv->tvlv_hdr.type;
873 tvlv_hdr->version = tvlv->tvlv_hdr.version;
874 tvlv_hdr->len = tvlv->tvlv_hdr.len;
875 tvlv_value = tvlv_hdr + 1;
876 memcpy(tvlv_value, tvlv + 1, ntohs(tvlv->tvlv_hdr.len));
877 tvlv_value = (uint8_t *)tvlv_value + ntohs(tvlv->tvlv_hdr.len);
878 }
879
880 end:
881 spin_unlock_bh(&bat_priv->tvlv.container_list_lock);
882 return tvlv_value_len;
883 }
884
885 /**
886 * batadv_tvlv_call_handler - parse the given tvlv buffer to call the
887 * appropriate handlers
888 * @bat_priv: the bat priv with all the soft interface information
889 * @tvlv_handler: tvlv callback function handling the tvlv content
890 * @ogm_source: flag indicating wether the tvlv is an ogm or a unicast packet
891 * @orig_node: orig node emitting the ogm packet
892 * @src: source mac address of the unicast packet
893 * @dst: destination mac address of the unicast packet
894 * @tvlv_value: tvlv content
895 * @tvlv_value_len: tvlv content length
896 *
897 * Returns success if handler was not found or the return value of the handler
898 * callback.
899 */
900 static int batadv_tvlv_call_handler(struct batadv_priv *bat_priv,
901 struct batadv_tvlv_handler *tvlv_handler,
902 bool ogm_source,
903 struct batadv_orig_node *orig_node,
904 uint8_t *src, uint8_t *dst,
905 void *tvlv_value, uint16_t tvlv_value_len)
906 {
907 if (!tvlv_handler)
908 return NET_RX_SUCCESS;
909
910 if (ogm_source) {
911 if (!tvlv_handler->ogm_handler)
912 return NET_RX_SUCCESS;
913
914 if (!orig_node)
915 return NET_RX_SUCCESS;
916
917 tvlv_handler->ogm_handler(bat_priv, orig_node,
918 BATADV_NO_FLAGS,
919 tvlv_value, tvlv_value_len);
920 tvlv_handler->flags |= BATADV_TVLV_HANDLER_OGM_CALLED;
921 } else {
922 if (!src)
923 return NET_RX_SUCCESS;
924
925 if (!dst)
926 return NET_RX_SUCCESS;
927
928 if (!tvlv_handler->unicast_handler)
929 return NET_RX_SUCCESS;
930
931 return tvlv_handler->unicast_handler(bat_priv, src,
932 dst, tvlv_value,
933 tvlv_value_len);
934 }
935
936 return NET_RX_SUCCESS;
937 }
938
939 /**
940 * batadv_tvlv_containers_process - parse the given tvlv buffer to call the
941 * appropriate handlers
942 * @bat_priv: the bat priv with all the soft interface information
943 * @ogm_source: flag indicating wether the tvlv is an ogm or a unicast packet
944 * @orig_node: orig node emitting the ogm packet
945 * @src: source mac address of the unicast packet
946 * @dst: destination mac address of the unicast packet
947 * @tvlv_value: tvlv content
948 * @tvlv_value_len: tvlv content length
949 *
950 * Returns success when processing an OGM or the return value of all called
951 * handler callbacks.
952 */
953 int batadv_tvlv_containers_process(struct batadv_priv *bat_priv,
954 bool ogm_source,
955 struct batadv_orig_node *orig_node,
956 uint8_t *src, uint8_t *dst,
957 void *tvlv_value, uint16_t tvlv_value_len)
958 {
959 struct batadv_tvlv_handler *tvlv_handler;
960 struct batadv_tvlv_hdr *tvlv_hdr;
961 uint16_t tvlv_value_cont_len;
962 uint8_t cifnotfound = BATADV_TVLV_HANDLER_OGM_CIFNOTFND;
963 int ret = NET_RX_SUCCESS;
964
965 while (tvlv_value_len >= sizeof(*tvlv_hdr)) {
966 tvlv_hdr = tvlv_value;
967 tvlv_value_cont_len = ntohs(tvlv_hdr->len);
968 tvlv_value = tvlv_hdr + 1;
969 tvlv_value_len -= sizeof(*tvlv_hdr);
970
971 if (tvlv_value_cont_len > tvlv_value_len)
972 break;
973
974 tvlv_handler = batadv_tvlv_handler_get(bat_priv,
975 tvlv_hdr->type,
976 tvlv_hdr->version);
977
978 ret |= batadv_tvlv_call_handler(bat_priv, tvlv_handler,
979 ogm_source, orig_node,
980 src, dst, tvlv_value,
981 tvlv_value_cont_len);
982 if (tvlv_handler)
983 batadv_tvlv_handler_free_ref(tvlv_handler);
984 tvlv_value = (uint8_t *)tvlv_value + tvlv_value_cont_len;
985 tvlv_value_len -= tvlv_value_cont_len;
986 }
987
988 if (!ogm_source)
989 return ret;
990
991 rcu_read_lock();
992 hlist_for_each_entry_rcu(tvlv_handler,
993 &bat_priv->tvlv.handler_list, list) {
994 if ((tvlv_handler->flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND) &&
995 !(tvlv_handler->flags & BATADV_TVLV_HANDLER_OGM_CALLED))
996 tvlv_handler->ogm_handler(bat_priv, orig_node,
997 cifnotfound, NULL, 0);
998
999 tvlv_handler->flags &= ~BATADV_TVLV_HANDLER_OGM_CALLED;
1000 }
1001 rcu_read_unlock();
1002
1003 return NET_RX_SUCCESS;
1004 }
1005
1006 /**
1007 * batadv_tvlv_ogm_receive - process an incoming ogm and call the appropriate
1008 * handlers
1009 * @bat_priv: the bat priv with all the soft interface information
1010 * @batadv_ogm_packet: ogm packet containing the tvlv containers
1011 * @orig_node: orig node emitting the ogm packet
1012 */
1013 void batadv_tvlv_ogm_receive(struct batadv_priv *bat_priv,
1014 struct batadv_ogm_packet *batadv_ogm_packet,
1015 struct batadv_orig_node *orig_node)
1016 {
1017 void *tvlv_value;
1018 uint16_t tvlv_value_len;
1019
1020 if (!batadv_ogm_packet)
1021 return;
1022
1023 tvlv_value_len = ntohs(batadv_ogm_packet->tvlv_len);
1024 if (!tvlv_value_len)
1025 return;
1026
1027 tvlv_value = batadv_ogm_packet + 1;
1028
1029 batadv_tvlv_containers_process(bat_priv, true, orig_node, NULL, NULL,
1030 tvlv_value, tvlv_value_len);
1031 }
1032
1033 /**
1034 * batadv_tvlv_handler_register - register tvlv handler based on the provided
1035 * type and version (both need to match) for ogm tvlv payload and/or unicast
1036 * payload
1037 * @bat_priv: the bat priv with all the soft interface information
1038 * @optr: ogm tvlv handler callback function. This function receives the orig
1039 * node, flags and the tvlv content as argument to process.
1040 * @uptr: unicast tvlv handler callback function. This function receives the
1041 * source & destination of the unicast packet as well as the tvlv content
1042 * to process.
1043 * @type: tvlv handler type to be registered
1044 * @version: tvlv handler version to be registered
1045 * @flags: flags to enable or disable TVLV API behavior
1046 */
1047 void batadv_tvlv_handler_register(struct batadv_priv *bat_priv,
1048 void (*optr)(struct batadv_priv *bat_priv,
1049 struct batadv_orig_node *orig,
1050 uint8_t flags,
1051 void *tvlv_value,
1052 uint16_t tvlv_value_len),
1053 int (*uptr)(struct batadv_priv *bat_priv,
1054 uint8_t *src, uint8_t *dst,
1055 void *tvlv_value,
1056 uint16_t tvlv_value_len),
1057 uint8_t type, uint8_t version, uint8_t flags)
1058 {
1059 struct batadv_tvlv_handler *tvlv_handler;
1060
1061 tvlv_handler = batadv_tvlv_handler_get(bat_priv, type, version);
1062 if (tvlv_handler) {
1063 batadv_tvlv_handler_free_ref(tvlv_handler);
1064 return;
1065 }
1066
1067 tvlv_handler = kzalloc(sizeof(*tvlv_handler), GFP_ATOMIC);
1068 if (!tvlv_handler)
1069 return;
1070
1071 tvlv_handler->ogm_handler = optr;
1072 tvlv_handler->unicast_handler = uptr;
1073 tvlv_handler->type = type;
1074 tvlv_handler->version = version;
1075 tvlv_handler->flags = flags;
1076 atomic_set(&tvlv_handler->refcount, 1);
1077 INIT_HLIST_NODE(&tvlv_handler->list);
1078
1079 spin_lock_bh(&bat_priv->tvlv.handler_list_lock);
1080 hlist_add_head_rcu(&tvlv_handler->list, &bat_priv->tvlv.handler_list);
1081 spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
1082 }
1083
1084 /**
1085 * batadv_tvlv_handler_unregister - unregister tvlv handler based on the
1086 * provided type and version (both need to match)
1087 * @bat_priv: the bat priv with all the soft interface information
1088 * @type: tvlv handler type to be unregistered
1089 * @version: tvlv handler version to be unregistered
1090 */
1091 void batadv_tvlv_handler_unregister(struct batadv_priv *bat_priv,
1092 uint8_t type, uint8_t version)
1093 {
1094 struct batadv_tvlv_handler *tvlv_handler;
1095
1096 tvlv_handler = batadv_tvlv_handler_get(bat_priv, type, version);
1097 if (!tvlv_handler)
1098 return;
1099
1100 batadv_tvlv_handler_free_ref(tvlv_handler);
1101 spin_lock_bh(&bat_priv->tvlv.handler_list_lock);
1102 hlist_del_rcu(&tvlv_handler->list);
1103 spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
1104 batadv_tvlv_handler_free_ref(tvlv_handler);
1105 }
1106
1107 /**
1108 * batadv_tvlv_unicast_send - send a unicast packet with tvlv payload to the
1109 * specified host
1110 * @bat_priv: the bat priv with all the soft interface information
1111 * @src: source mac address of the unicast packet
1112 * @dst: destination mac address of the unicast packet
1113 * @type: tvlv type
1114 * @version: tvlv version
1115 * @tvlv_value: tvlv content
1116 * @tvlv_value_len: tvlv content length
1117 */
1118 void batadv_tvlv_unicast_send(struct batadv_priv *bat_priv, uint8_t *src,
1119 uint8_t *dst, uint8_t type, uint8_t version,
1120 void *tvlv_value, uint16_t tvlv_value_len)
1121 {
1122 struct batadv_unicast_tvlv_packet *unicast_tvlv_packet;
1123 struct batadv_tvlv_hdr *tvlv_hdr;
1124 struct batadv_orig_node *orig_node;
1125 struct sk_buff *skb = NULL;
1126 unsigned char *tvlv_buff;
1127 unsigned int tvlv_len;
1128 ssize_t hdr_len = sizeof(*unicast_tvlv_packet);
1129 bool ret = false;
1130
1131 orig_node = batadv_orig_hash_find(bat_priv, dst);
1132 if (!orig_node)
1133 goto out;
1134
1135 tvlv_len = sizeof(*tvlv_hdr) + tvlv_value_len;
1136
1137 skb = netdev_alloc_skb_ip_align(NULL, ETH_HLEN + hdr_len + tvlv_len);
1138 if (!skb)
1139 goto out;
1140
1141 skb->priority = TC_PRIO_CONTROL;
1142 skb_reserve(skb, ETH_HLEN);
1143 tvlv_buff = skb_put(skb, sizeof(*unicast_tvlv_packet) + tvlv_len);
1144 unicast_tvlv_packet = (struct batadv_unicast_tvlv_packet *)tvlv_buff;
1145 unicast_tvlv_packet->packet_type = BATADV_UNICAST_TVLV;
1146 unicast_tvlv_packet->version = BATADV_COMPAT_VERSION;
1147 unicast_tvlv_packet->ttl = BATADV_TTL;
1148 unicast_tvlv_packet->reserved = 0;
1149 unicast_tvlv_packet->tvlv_len = htons(tvlv_len);
1150 unicast_tvlv_packet->align = 0;
1151 ether_addr_copy(unicast_tvlv_packet->src, src);
1152 ether_addr_copy(unicast_tvlv_packet->dst, dst);
1153
1154 tvlv_buff = (unsigned char *)(unicast_tvlv_packet + 1);
1155 tvlv_hdr = (struct batadv_tvlv_hdr *)tvlv_buff;
1156 tvlv_hdr->version = version;
1157 tvlv_hdr->type = type;
1158 tvlv_hdr->len = htons(tvlv_value_len);
1159 tvlv_buff += sizeof(*tvlv_hdr);
1160 memcpy(tvlv_buff, tvlv_value, tvlv_value_len);
1161
1162 if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP)
1163 ret = true;
1164
1165 out:
1166 if (skb && !ret)
1167 kfree_skb(skb);
1168 if (orig_node)
1169 batadv_orig_node_free_ref(orig_node);
1170 }
1171
1172 /**
1173 * batadv_get_vid - extract the VLAN identifier from skb if any
1174 * @skb: the buffer containing the packet
1175 * @header_len: length of the batman header preceding the ethernet header
1176 *
1177 * If the packet embedded in the skb is vlan tagged this function returns the
1178 * VID with the BATADV_VLAN_HAS_TAG flag. Otherwise BATADV_NO_FLAGS is returned.
1179 */
1180 unsigned short batadv_get_vid(struct sk_buff *skb, size_t header_len)
1181 {
1182 struct ethhdr *ethhdr = (struct ethhdr *)(skb->data + header_len);
1183 struct vlan_ethhdr *vhdr;
1184 unsigned short vid;
1185
1186 if (ethhdr->h_proto != htons(ETH_P_8021Q))
1187 return BATADV_NO_FLAGS;
1188
1189 if (!pskb_may_pull(skb, header_len + VLAN_ETH_HLEN))
1190 return BATADV_NO_FLAGS;
1191
1192 vhdr = (struct vlan_ethhdr *)(skb->data + header_len);
1193 vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
1194 vid |= BATADV_VLAN_HAS_TAG;
1195
1196 return vid;
1197 }
1198
1199 /**
1200 * batadv_vlan_ap_isola_get - return the AP isolation status for the given vlan
1201 * @bat_priv: the bat priv with all the soft interface information
1202 * @vid: the VLAN identifier for which the AP isolation attributed as to be
1203 * looked up
1204 *
1205 * Returns true if AP isolation is on for the VLAN idenfied by vid, false
1206 * otherwise
1207 */
1208 bool batadv_vlan_ap_isola_get(struct batadv_priv *bat_priv, unsigned short vid)
1209 {
1210 bool ap_isolation_enabled = false;
1211 struct batadv_softif_vlan *vlan;
1212
1213 /* if the AP isolation is requested on a VLAN, then check for its
1214 * setting in the proper VLAN private data structure
1215 */
1216 vlan = batadv_softif_vlan_get(bat_priv, vid);
1217 if (vlan) {
1218 ap_isolation_enabled = atomic_read(&vlan->ap_isolation);
1219 batadv_softif_vlan_free_ref(vlan);
1220 }
1221
1222 return ap_isolation_enabled;
1223 }
1224
1225 static int batadv_param_set_ra(const char *val, const struct kernel_param *kp)
1226 {
1227 struct batadv_algo_ops *bat_algo_ops;
1228 char *algo_name = (char *)val;
1229 size_t name_len = strlen(algo_name);
1230
1231 if (name_len > 0 && algo_name[name_len - 1] == '\n')
1232 algo_name[name_len - 1] = '\0';
1233
1234 bat_algo_ops = batadv_algo_get(algo_name);
1235 if (!bat_algo_ops) {
1236 pr_err("Routing algorithm '%s' is not supported\n", algo_name);
1237 return -EINVAL;
1238 }
1239
1240 return param_set_copystring(algo_name, kp);
1241 }
1242
1243 static const struct kernel_param_ops batadv_param_ops_ra = {
1244 .set = batadv_param_set_ra,
1245 .get = param_get_string,
1246 };
1247
1248 static struct kparam_string batadv_param_string_ra = {
1249 .maxlen = sizeof(batadv_routing_algo),
1250 .string = batadv_routing_algo,
1251 };
1252
1253 module_param_cb(routing_algo, &batadv_param_ops_ra, &batadv_param_string_ra,
1254 0644);
1255 module_init(batadv_init);
1256 module_exit(batadv_exit);
1257
1258 MODULE_LICENSE("GPL");
1259
1260 MODULE_AUTHOR(BATADV_DRIVER_AUTHOR);
1261 MODULE_DESCRIPTION(BATADV_DRIVER_DESC);
1262 MODULE_SUPPORTED_DEVICE(BATADV_DRIVER_DEVICE);
1263 MODULE_VERSION(BATADV_SOURCE_VERSION);
This page took 0.057686 seconds and 5 git commands to generate.