Merge branch 'wimax-2.6.35.y' of git://git.kernel.org/pub/scm/linux/kernel/git/inaky...
[deliverable/linux.git] / drivers / staging / batman-adv / send.c
1 /*
2 * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
3 *
4 * Marek Lindner, Simon Wunderlich
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA
19 *
20 */
21
22 #include "main.h"
23 #include "send.h"
24 #include "routing.h"
25 #include "translation-table.h"
26 #include "soft-interface.h"
27 #include "hard-interface.h"
28 #include "types.h"
29 #include "vis.h"
30 #include "aggregation.h"
31
32 /* apply hop penalty for a normal link */
33 static uint8_t hop_penalty(const uint8_t tq)
34 {
35 return (tq * (TQ_MAX_VALUE - TQ_HOP_PENALTY)) / (TQ_MAX_VALUE);
36 }
37
38 /* when do we schedule our own packet to be sent */
39 static unsigned long own_send_time(struct bat_priv *bat_priv)
40 {
41 return jiffies +
42 (((atomic_read(&bat_priv->orig_interval) - JITTER +
43 (random32() % 2*JITTER)) * HZ) / 1000);
44 }
45
46 /* when do we schedule a forwarded packet to be sent */
47 static unsigned long forward_send_time(struct bat_priv *bat_priv)
48 {
49 return jiffies + (((random32() % (JITTER/2)) * HZ) / 1000);
50 }
51
52 /* send out an already prepared packet to the given address via the
53 * specified batman interface */
54 int send_skb_packet(struct sk_buff *skb,
55 struct batman_if *batman_if,
56 uint8_t *dst_addr)
57 {
58 struct ethhdr *ethhdr;
59
60 if (batman_if->if_status != IF_ACTIVE)
61 goto send_skb_err;
62
63 if (unlikely(!batman_if->net_dev))
64 goto send_skb_err;
65
66 if (!(batman_if->net_dev->flags & IFF_UP)) {
67 printk(KERN_WARNING
68 "batman-adv:Interface %s "
69 "is not up - can't send packet via that interface!\n",
70 batman_if->dev);
71 goto send_skb_err;
72 }
73
74 /* push to the ethernet header. */
75 if (my_skb_push(skb, sizeof(struct ethhdr)) < 0)
76 goto send_skb_err;
77
78 skb_reset_mac_header(skb);
79
80 ethhdr = (struct ethhdr *) skb_mac_header(skb);
81 memcpy(ethhdr->h_source, batman_if->net_dev->dev_addr, ETH_ALEN);
82 memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN);
83 ethhdr->h_proto = __constant_htons(ETH_P_BATMAN);
84
85 skb_set_network_header(skb, ETH_HLEN);
86 skb->priority = TC_PRIO_CONTROL;
87 skb->protocol = __constant_htons(ETH_P_BATMAN);
88
89 skb->dev = batman_if->net_dev;
90
91 /* dev_queue_xmit() returns a negative result on error. However on
92 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
93 * (which is > 0). This will not be treated as an error. */
94
95 return dev_queue_xmit(skb);
96 send_skb_err:
97 kfree_skb(skb);
98 return NET_XMIT_DROP;
99 }
100
101 /* sends a raw packet. */
102 void send_raw_packet(unsigned char *pack_buff, int pack_buff_len,
103 struct batman_if *batman_if, uint8_t *dst_addr)
104 {
105 struct sk_buff *skb;
106 char *data;
107
108 skb = dev_alloc_skb(pack_buff_len + sizeof(struct ethhdr));
109 if (!skb)
110 return;
111 data = skb_put(skb, pack_buff_len + sizeof(struct ethhdr));
112 memcpy(data + sizeof(struct ethhdr), pack_buff, pack_buff_len);
113 /* pull back to the batman "network header" */
114 skb_pull(skb, sizeof(struct ethhdr));
115 send_skb_packet(skb, batman_if, dst_addr);
116 }
117
118 /* Send a packet to a given interface */
119 static void send_packet_to_if(struct forw_packet *forw_packet,
120 struct batman_if *batman_if)
121 {
122 char *fwd_str;
123 uint8_t packet_num;
124 int16_t buff_pos;
125 struct batman_packet *batman_packet;
126
127 if (batman_if->if_status != IF_ACTIVE)
128 return;
129
130 packet_num = 0;
131 buff_pos = 0;
132 batman_packet = (struct batman_packet *)
133 (forw_packet->packet_buff);
134
135 /* adjust all flags and log packets */
136 while (aggregated_packet(buff_pos,
137 forw_packet->packet_len,
138 batman_packet->num_hna)) {
139
140 /* we might have aggregated direct link packets with an
141 * ordinary base packet */
142 if ((forw_packet->direct_link_flags & (1 << packet_num)) &&
143 (forw_packet->if_incoming == batman_if))
144 batman_packet->flags |= DIRECTLINK;
145 else
146 batman_packet->flags &= ~DIRECTLINK;
147
148 fwd_str = (packet_num > 0 ? "Forwarding" : (forw_packet->own ?
149 "Sending own" :
150 "Forwarding"));
151 bat_dbg(DBG_BATMAN,
152 "%s %spacket (originator %pM, seqno %d, TQ %d, TTL %d,"
153 " IDF %s) on interface %s [%s]\n",
154 fwd_str, (packet_num > 0 ? "aggregated " : ""),
155 batman_packet->orig, ntohs(batman_packet->seqno),
156 batman_packet->tq, batman_packet->ttl,
157 (batman_packet->flags & DIRECTLINK ?
158 "on" : "off"),
159 batman_if->dev, batman_if->addr_str);
160
161 buff_pos += sizeof(struct batman_packet) +
162 (batman_packet->num_hna * ETH_ALEN);
163 packet_num++;
164 batman_packet = (struct batman_packet *)
165 (forw_packet->packet_buff + buff_pos);
166 }
167
168 send_raw_packet(forw_packet->packet_buff,
169 forw_packet->packet_len,
170 batman_if, broadcastAddr);
171 }
172
173 /* send a batman packet */
174 static void send_packet(struct forw_packet *forw_packet)
175 {
176 struct batman_if *batman_if;
177 struct batman_packet *batman_packet =
178 (struct batman_packet *)(forw_packet->packet_buff);
179 unsigned char directlink = (batman_packet->flags & DIRECTLINK ? 1 : 0);
180
181 if (!forw_packet->if_incoming) {
182 printk(KERN_ERR "batman-adv: Error - can't forward packet: "
183 "incoming iface not specified\n");
184 return;
185 }
186
187 if (forw_packet->if_incoming->if_status != IF_ACTIVE)
188 return;
189
190 /* multihomed peer assumed */
191 /* non-primary OGMs are only broadcasted on their interface */
192 if ((directlink && (batman_packet->ttl == 1)) ||
193 (forw_packet->own && (forw_packet->if_incoming->if_num > 0))) {
194
195 /* FIXME: what about aggregated packets ? */
196 bat_dbg(DBG_BATMAN,
197 "%s packet (originator %pM, seqno %d, TTL %d) "
198 "on interface %s [%s]\n",
199 (forw_packet->own ? "Sending own" : "Forwarding"),
200 batman_packet->orig, ntohs(batman_packet->seqno),
201 batman_packet->ttl, forw_packet->if_incoming->dev,
202 forw_packet->if_incoming->addr_str);
203
204 send_raw_packet(forw_packet->packet_buff,
205 forw_packet->packet_len,
206 forw_packet->if_incoming,
207 broadcastAddr);
208 return;
209 }
210
211 /* broadcast on every interface */
212 rcu_read_lock();
213 list_for_each_entry_rcu(batman_if, &if_list, list)
214 send_packet_to_if(forw_packet, batman_if);
215 rcu_read_unlock();
216 }
217
218 static void rebuild_batman_packet(struct batman_if *batman_if)
219 {
220 int new_len;
221 unsigned char *new_buff;
222 struct batman_packet *batman_packet;
223
224 new_len = sizeof(struct batman_packet) + (num_hna * ETH_ALEN);
225 new_buff = kmalloc(new_len, GFP_ATOMIC);
226
227 /* keep old buffer if kmalloc should fail */
228 if (new_buff) {
229 memcpy(new_buff, batman_if->packet_buff,
230 sizeof(struct batman_packet));
231 batman_packet = (struct batman_packet *)new_buff;
232
233 batman_packet->num_hna = hna_local_fill_buffer(
234 new_buff + sizeof(struct batman_packet),
235 new_len - sizeof(struct batman_packet));
236
237 kfree(batman_if->packet_buff);
238 batman_if->packet_buff = new_buff;
239 batman_if->packet_len = new_len;
240 }
241 }
242
243 void schedule_own_packet(struct batman_if *batman_if)
244 {
245 /* FIXME: each batman_if will be attached to a softif */
246 struct bat_priv *bat_priv = netdev_priv(soft_device);
247 unsigned long send_time;
248 struct batman_packet *batman_packet;
249 int vis_server;
250
251 if ((batman_if->if_status == IF_NOT_IN_USE) ||
252 (batman_if->if_status == IF_TO_BE_REMOVED))
253 return;
254
255 vis_server = atomic_read(&bat_priv->vis_mode);
256
257 /**
258 * the interface gets activated here to avoid race conditions between
259 * the moment of activating the interface in
260 * hardif_activate_interface() where the originator mac is set and
261 * outdated packets (especially uninitialized mac addresses) in the
262 * packet queue
263 */
264 if (batman_if->if_status == IF_TO_BE_ACTIVATED)
265 batman_if->if_status = IF_ACTIVE;
266
267 /* if local hna has changed and interface is a primary interface */
268 if ((atomic_read(&hna_local_changed)) &&
269 (batman_if == bat_priv->primary_if))
270 rebuild_batman_packet(batman_if);
271
272 /**
273 * NOTE: packet_buff might just have been re-allocated in
274 * rebuild_batman_packet()
275 */
276 batman_packet = (struct batman_packet *)batman_if->packet_buff;
277
278 /* change sequence number to network order */
279 batman_packet->seqno = htons((uint16_t)atomic_read(&batman_if->seqno));
280
281 if (vis_server == VIS_TYPE_SERVER_SYNC)
282 batman_packet->flags = VIS_SERVER;
283 else
284 batman_packet->flags &= ~VIS_SERVER;
285
286 /* could be read by receive_bat_packet() */
287 atomic_inc(&batman_if->seqno);
288
289 slide_own_bcast_window(batman_if);
290 send_time = own_send_time(bat_priv);
291 add_bat_packet_to_list(bat_priv,
292 batman_if->packet_buff,
293 batman_if->packet_len,
294 batman_if, 1, send_time);
295 }
296
297 void schedule_forward_packet(struct orig_node *orig_node,
298 struct ethhdr *ethhdr,
299 struct batman_packet *batman_packet,
300 uint8_t directlink, int hna_buff_len,
301 struct batman_if *if_incoming)
302 {
303 /* FIXME: each batman_if will be attached to a softif */
304 struct bat_priv *bat_priv = netdev_priv(soft_device);
305 unsigned char in_tq, in_ttl, tq_avg = 0;
306 unsigned long send_time;
307
308 if (batman_packet->ttl <= 1) {
309 bat_dbg(DBG_BATMAN, "ttl exceeded\n");
310 return;
311 }
312
313 in_tq = batman_packet->tq;
314 in_ttl = batman_packet->ttl;
315
316 batman_packet->ttl--;
317 memcpy(batman_packet->prev_sender, ethhdr->h_source, ETH_ALEN);
318
319 /* rebroadcast tq of our best ranking neighbor to ensure the rebroadcast
320 * of our best tq value */
321 if ((orig_node->router) && (orig_node->router->tq_avg != 0)) {
322
323 /* rebroadcast ogm of best ranking neighbor as is */
324 if (!compare_orig(orig_node->router->addr, ethhdr->h_source)) {
325 batman_packet->tq = orig_node->router->tq_avg;
326
327 if (orig_node->router->last_ttl)
328 batman_packet->ttl = orig_node->router->last_ttl
329 - 1;
330 }
331
332 tq_avg = orig_node->router->tq_avg;
333 }
334
335 /* apply hop penalty */
336 batman_packet->tq = hop_penalty(batman_packet->tq);
337
338 bat_dbg(DBG_BATMAN, "Forwarding packet: tq_orig: %i, tq_avg: %i, "
339 "tq_forw: %i, ttl_orig: %i, ttl_forw: %i\n",
340 in_tq, tq_avg, batman_packet->tq, in_ttl - 1,
341 batman_packet->ttl);
342
343 batman_packet->seqno = htons(batman_packet->seqno);
344
345 if (directlink)
346 batman_packet->flags |= DIRECTLINK;
347 else
348 batman_packet->flags &= ~DIRECTLINK;
349
350 send_time = forward_send_time(bat_priv);
351 add_bat_packet_to_list(bat_priv,
352 (unsigned char *)batman_packet,
353 sizeof(struct batman_packet) + hna_buff_len,
354 if_incoming, 0, send_time);
355 }
356
357 static void forw_packet_free(struct forw_packet *forw_packet)
358 {
359 if (forw_packet->skb)
360 kfree_skb(forw_packet->skb);
361 kfree(forw_packet->packet_buff);
362 kfree(forw_packet);
363 }
364
365 static void _add_bcast_packet_to_list(struct forw_packet *forw_packet,
366 unsigned long send_time)
367 {
368 unsigned long flags;
369 INIT_HLIST_NODE(&forw_packet->list);
370
371 /* add new packet to packet list */
372 spin_lock_irqsave(&forw_bcast_list_lock, flags);
373 hlist_add_head(&forw_packet->list, &forw_bcast_list);
374 spin_unlock_irqrestore(&forw_bcast_list_lock, flags);
375
376 /* start timer for this packet */
377 INIT_DELAYED_WORK(&forw_packet->delayed_work,
378 send_outstanding_bcast_packet);
379 queue_delayed_work(bat_event_workqueue, &forw_packet->delayed_work,
380 send_time);
381 }
382
383 #define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0)
384 /* add a broadcast packet to the queue and setup timers. broadcast packets
385 * are sent multiple times to increase probability for beeing received.
386 *
387 * This function returns NETDEV_TX_OK on success and NETDEV_TX_BUSY on
388 * errors.
389 *
390 * The skb is not consumed, so the caller should make sure that the
391 * skb is freed. */
392 int add_bcast_packet_to_list(struct sk_buff *skb)
393 {
394 struct forw_packet *forw_packet;
395
396 if (!atomic_dec_not_zero(&bcast_queue_left)) {
397 bat_dbg(DBG_BATMAN, "bcast packet queue full\n");
398 goto out;
399 }
400
401 forw_packet = kmalloc(sizeof(struct forw_packet), GFP_ATOMIC);
402
403 if (!forw_packet)
404 goto out_and_inc;
405
406 skb = skb_copy(skb, GFP_ATOMIC);
407 if (!skb)
408 goto packet_free;
409
410 skb_reset_mac_header(skb);
411
412 forw_packet->skb = skb;
413 forw_packet->packet_buff = NULL;
414
415 /* how often did we send the bcast packet ? */
416 forw_packet->num_packets = 0;
417
418 _add_bcast_packet_to_list(forw_packet, 1);
419 return NETDEV_TX_OK;
420
421 packet_free:
422 kfree(forw_packet);
423 out_and_inc:
424 atomic_inc(&bcast_queue_left);
425 out:
426 return NETDEV_TX_BUSY;
427 }
428
429 void send_outstanding_bcast_packet(struct work_struct *work)
430 {
431 struct batman_if *batman_if;
432 struct delayed_work *delayed_work =
433 container_of(work, struct delayed_work, work);
434 struct forw_packet *forw_packet =
435 container_of(delayed_work, struct forw_packet, delayed_work);
436 unsigned long flags;
437 struct sk_buff *skb1;
438
439 spin_lock_irqsave(&forw_bcast_list_lock, flags);
440 hlist_del(&forw_packet->list);
441 spin_unlock_irqrestore(&forw_bcast_list_lock, flags);
442
443 /* rebroadcast packet */
444 rcu_read_lock();
445 list_for_each_entry_rcu(batman_if, &if_list, list) {
446 /* send a copy of the saved skb */
447 skb1 = skb_copy(forw_packet->skb, GFP_ATOMIC);
448 if (skb1)
449 send_skb_packet(skb1,
450 batman_if, broadcastAddr);
451 }
452 rcu_read_unlock();
453
454 forw_packet->num_packets++;
455
456 /* if we still have some more bcasts to send and we are not shutting
457 * down */
458 if ((forw_packet->num_packets < 3) &&
459 (atomic_read(&module_state) != MODULE_DEACTIVATING))
460 _add_bcast_packet_to_list(forw_packet, ((5 * HZ) / 1000));
461 else {
462 forw_packet_free(forw_packet);
463 atomic_inc(&bcast_queue_left);
464 }
465 }
466
467 void send_outstanding_bat_packet(struct work_struct *work)
468 {
469 struct delayed_work *delayed_work =
470 container_of(work, struct delayed_work, work);
471 struct forw_packet *forw_packet =
472 container_of(delayed_work, struct forw_packet, delayed_work);
473 unsigned long flags;
474
475 spin_lock_irqsave(&forw_bat_list_lock, flags);
476 hlist_del(&forw_packet->list);
477 spin_unlock_irqrestore(&forw_bat_list_lock, flags);
478
479 send_packet(forw_packet);
480
481 /**
482 * we have to have at least one packet in the queue
483 * to determine the queues wake up time unless we are
484 * shutting down
485 */
486 if ((forw_packet->own) &&
487 (atomic_read(&module_state) != MODULE_DEACTIVATING))
488 schedule_own_packet(forw_packet->if_incoming);
489
490 /* don't count own packet */
491 if (!forw_packet->own)
492 atomic_inc(&batman_queue_left);
493
494 forw_packet_free(forw_packet);
495 }
496
497 void purge_outstanding_packets(struct batman_if *batman_if)
498 {
499 struct forw_packet *forw_packet;
500 struct hlist_node *tmp_node, *safe_tmp_node;
501 unsigned long flags;
502
503 if (batman_if)
504 bat_dbg(DBG_BATMAN, "purge_outstanding_packets(): %s\n",
505 batman_if->dev);
506 else
507 bat_dbg(DBG_BATMAN, "purge_outstanding_packets()\n");
508
509 /* free bcast list */
510 spin_lock_irqsave(&forw_bcast_list_lock, flags);
511 hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
512 &forw_bcast_list, list) {
513
514 /**
515 * if purge_outstanding_packets() was called with an argmument
516 * we delete only packets belonging to the given interface
517 */
518 if ((batman_if) &&
519 (forw_packet->if_incoming != batman_if))
520 continue;
521
522 spin_unlock_irqrestore(&forw_bcast_list_lock, flags);
523
524 /**
525 * send_outstanding_bcast_packet() will lock the list to
526 * delete the item from the list
527 */
528 cancel_delayed_work_sync(&forw_packet->delayed_work);
529 spin_lock_irqsave(&forw_bcast_list_lock, flags);
530 }
531 spin_unlock_irqrestore(&forw_bcast_list_lock, flags);
532
533 /* free batman packet list */
534 spin_lock_irqsave(&forw_bat_list_lock, flags);
535 hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
536 &forw_bat_list, list) {
537
538 /**
539 * if purge_outstanding_packets() was called with an argmument
540 * we delete only packets belonging to the given interface
541 */
542 if ((batman_if) &&
543 (forw_packet->if_incoming != batman_if))
544 continue;
545
546 spin_unlock_irqrestore(&forw_bat_list_lock, flags);
547
548 /**
549 * send_outstanding_bat_packet() will lock the list to
550 * delete the item from the list
551 */
552 cancel_delayed_work_sync(&forw_packet->delayed_work);
553 spin_lock_irqsave(&forw_bat_list_lock, flags);
554 }
555 spin_unlock_irqrestore(&forw_bat_list_lock, flags);
556 }
This page took 0.071569 seconds and 5 git commands to generate.