Commit | Line | Data |
---|---|---|
c6c8fea2 | 1 | /* |
64afe353 | 2 | * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors: |
c6c8fea2 SE |
3 | * |
4 | * Marek Lindner, Simon Wunderlich | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of version 2 of the GNU General Public | |
8 | * License as published by the Free Software Foundation. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, but | |
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
13 | * General Public License for more details. | |
14 | * | |
15 | * You should have received a copy of the GNU General Public License | |
16 | * along with this program; if not, write to the Free Software | |
17 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | |
18 | * 02110-1301, USA | |
19 | * | |
20 | */ | |
21 | ||
22 | #include "main.h" | |
23 | #include "send.h" | |
24 | #include "routing.h" | |
25 | #include "translation-table.h" | |
26 | #include "soft-interface.h" | |
27 | #include "hard-interface.h" | |
c6c8fea2 SE |
28 | #include "vis.h" |
29 | #include "aggregation.h" | |
30 | #include "gateway_common.h" | |
31 | #include "originator.h" | |
32 | ||
33 | static void send_outstanding_bcast_packet(struct work_struct *work); | |
34 | ||
35 | /* apply hop penalty for a normal link */ | |
36 | static uint8_t hop_penalty(const uint8_t tq, struct bat_priv *bat_priv) | |
37 | { | |
38 | int hop_penalty = atomic_read(&bat_priv->hop_penalty); | |
39 | return (tq * (TQ_MAX_VALUE - hop_penalty)) / (TQ_MAX_VALUE); | |
40 | } | |
41 | ||
42 | /* when do we schedule our own packet to be sent */ | |
43 | static unsigned long own_send_time(struct bat_priv *bat_priv) | |
44 | { | |
45 | return jiffies + msecs_to_jiffies( | |
46 | atomic_read(&bat_priv->orig_interval) - | |
47 | JITTER + (random32() % 2*JITTER)); | |
48 | } | |
49 | ||
50 | /* when do we schedule a forwarded packet to be sent */ | |
74ef1153 | 51 | static unsigned long forward_send_time(void) |
c6c8fea2 SE |
52 | { |
53 | return jiffies + msecs_to_jiffies(random32() % (JITTER/2)); | |
54 | } | |
55 | ||
56 | /* send out an already prepared packet to the given address via the | |
57 | * specified batman interface */ | |
58 | int send_skb_packet(struct sk_buff *skb, | |
59 | struct batman_if *batman_if, | |
60 | uint8_t *dst_addr) | |
61 | { | |
62 | struct ethhdr *ethhdr; | |
63 | ||
64 | if (batman_if->if_status != IF_ACTIVE) | |
65 | goto send_skb_err; | |
66 | ||
67 | if (unlikely(!batman_if->net_dev)) | |
68 | goto send_skb_err; | |
69 | ||
70 | if (!(batman_if->net_dev->flags & IFF_UP)) { | |
71 | pr_warning("Interface %s is not up - can't send packet via " | |
72 | "that interface!\n", batman_if->net_dev->name); | |
73 | goto send_skb_err; | |
74 | } | |
75 | ||
76 | /* push to the ethernet header. */ | |
77 | if (my_skb_head_push(skb, sizeof(struct ethhdr)) < 0) | |
78 | goto send_skb_err; | |
79 | ||
80 | skb_reset_mac_header(skb); | |
81 | ||
82 | ethhdr = (struct ethhdr *) skb_mac_header(skb); | |
83 | memcpy(ethhdr->h_source, batman_if->net_dev->dev_addr, ETH_ALEN); | |
84 | memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN); | |
85 | ethhdr->h_proto = __constant_htons(ETH_P_BATMAN); | |
86 | ||
87 | skb_set_network_header(skb, ETH_HLEN); | |
88 | skb->priority = TC_PRIO_CONTROL; | |
89 | skb->protocol = __constant_htons(ETH_P_BATMAN); | |
90 | ||
91 | skb->dev = batman_if->net_dev; | |
92 | ||
93 | /* dev_queue_xmit() returns a negative result on error. However on | |
94 | * congestion and traffic shaping, it drops and returns NET_XMIT_DROP | |
95 | * (which is > 0). This will not be treated as an error. */ | |
96 | ||
97 | return dev_queue_xmit(skb); | |
98 | send_skb_err: | |
99 | kfree_skb(skb); | |
100 | return NET_XMIT_DROP; | |
101 | } | |
102 | ||
103 | /* Send a packet to a given interface */ | |
104 | static void send_packet_to_if(struct forw_packet *forw_packet, | |
105 | struct batman_if *batman_if) | |
106 | { | |
107 | struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface); | |
108 | char *fwd_str; | |
109 | uint8_t packet_num; | |
110 | int16_t buff_pos; | |
111 | struct batman_packet *batman_packet; | |
112 | struct sk_buff *skb; | |
113 | ||
114 | if (batman_if->if_status != IF_ACTIVE) | |
115 | return; | |
116 | ||
117 | packet_num = 0; | |
118 | buff_pos = 0; | |
119 | batman_packet = (struct batman_packet *)forw_packet->skb->data; | |
120 | ||
121 | /* adjust all flags and log packets */ | |
122 | while (aggregated_packet(buff_pos, | |
123 | forw_packet->packet_len, | |
124 | batman_packet->num_hna)) { | |
125 | ||
126 | /* we might have aggregated direct link packets with an | |
127 | * ordinary base packet */ | |
128 | if ((forw_packet->direct_link_flags & (1 << packet_num)) && | |
129 | (forw_packet->if_incoming == batman_if)) | |
130 | batman_packet->flags |= DIRECTLINK; | |
131 | else | |
132 | batman_packet->flags &= ~DIRECTLINK; | |
133 | ||
134 | fwd_str = (packet_num > 0 ? "Forwarding" : (forw_packet->own ? | |
135 | "Sending own" : | |
136 | "Forwarding")); | |
137 | bat_dbg(DBG_BATMAN, bat_priv, | |
138 | "%s %spacket (originator %pM, seqno %d, TQ %d, TTL %d," | |
139 | " IDF %s) on interface %s [%pM]\n", | |
140 | fwd_str, (packet_num > 0 ? "aggregated " : ""), | |
141 | batman_packet->orig, ntohl(batman_packet->seqno), | |
142 | batman_packet->tq, batman_packet->ttl, | |
143 | (batman_packet->flags & DIRECTLINK ? | |
144 | "on" : "off"), | |
145 | batman_if->net_dev->name, batman_if->net_dev->dev_addr); | |
146 | ||
147 | buff_pos += sizeof(struct batman_packet) + | |
148 | (batman_packet->num_hna * ETH_ALEN); | |
149 | packet_num++; | |
150 | batman_packet = (struct batman_packet *) | |
151 | (forw_packet->skb->data + buff_pos); | |
152 | } | |
153 | ||
154 | /* create clone because function is called more than once */ | |
155 | skb = skb_clone(forw_packet->skb, GFP_ATOMIC); | |
156 | if (skb) | |
157 | send_skb_packet(skb, batman_if, broadcast_addr); | |
158 | } | |
159 | ||
160 | /* send a batman packet */ | |
161 | static void send_packet(struct forw_packet *forw_packet) | |
162 | { | |
163 | struct batman_if *batman_if; | |
164 | struct net_device *soft_iface; | |
165 | struct bat_priv *bat_priv; | |
166 | struct batman_packet *batman_packet = | |
167 | (struct batman_packet *)(forw_packet->skb->data); | |
168 | unsigned char directlink = (batman_packet->flags & DIRECTLINK ? 1 : 0); | |
169 | ||
170 | if (!forw_packet->if_incoming) { | |
171 | pr_err("Error - can't forward packet: incoming iface not " | |
172 | "specified\n"); | |
173 | return; | |
174 | } | |
175 | ||
176 | soft_iface = forw_packet->if_incoming->soft_iface; | |
177 | bat_priv = netdev_priv(soft_iface); | |
178 | ||
179 | if (forw_packet->if_incoming->if_status != IF_ACTIVE) | |
180 | return; | |
181 | ||
182 | /* multihomed peer assumed */ | |
183 | /* non-primary OGMs are only broadcasted on their interface */ | |
184 | if ((directlink && (batman_packet->ttl == 1)) || | |
185 | (forw_packet->own && (forw_packet->if_incoming->if_num > 0))) { | |
186 | ||
187 | /* FIXME: what about aggregated packets ? */ | |
188 | bat_dbg(DBG_BATMAN, bat_priv, | |
189 | "%s packet (originator %pM, seqno %d, TTL %d) " | |
190 | "on interface %s [%pM]\n", | |
191 | (forw_packet->own ? "Sending own" : "Forwarding"), | |
192 | batman_packet->orig, ntohl(batman_packet->seqno), | |
193 | batman_packet->ttl, | |
194 | forw_packet->if_incoming->net_dev->name, | |
195 | forw_packet->if_incoming->net_dev->dev_addr); | |
196 | ||
197 | /* skb is only used once and than forw_packet is free'd */ | |
198 | send_skb_packet(forw_packet->skb, forw_packet->if_incoming, | |
199 | broadcast_addr); | |
200 | forw_packet->skb = NULL; | |
201 | ||
202 | return; | |
203 | } | |
204 | ||
205 | /* broadcast on every interface */ | |
206 | rcu_read_lock(); | |
4389e47a | 207 | list_for_each_entry_rcu(batman_if, &hardif_list, list) { |
c6c8fea2 SE |
208 | if (batman_if->soft_iface != soft_iface) |
209 | continue; | |
210 | ||
211 | send_packet_to_if(forw_packet, batman_if); | |
212 | } | |
213 | rcu_read_unlock(); | |
214 | } | |
215 | ||
216 | static void rebuild_batman_packet(struct bat_priv *bat_priv, | |
217 | struct batman_if *batman_if) | |
218 | { | |
219 | int new_len; | |
220 | unsigned char *new_buff; | |
221 | struct batman_packet *batman_packet; | |
222 | ||
223 | new_len = sizeof(struct batman_packet) + | |
224 | (bat_priv->num_local_hna * ETH_ALEN); | |
225 | new_buff = kmalloc(new_len, GFP_ATOMIC); | |
226 | ||
227 | /* keep old buffer if kmalloc should fail */ | |
228 | if (new_buff) { | |
229 | memcpy(new_buff, batman_if->packet_buff, | |
230 | sizeof(struct batman_packet)); | |
231 | batman_packet = (struct batman_packet *)new_buff; | |
232 | ||
233 | batman_packet->num_hna = hna_local_fill_buffer(bat_priv, | |
234 | new_buff + sizeof(struct batman_packet), | |
235 | new_len - sizeof(struct batman_packet)); | |
236 | ||
237 | kfree(batman_if->packet_buff); | |
238 | batman_if->packet_buff = new_buff; | |
239 | batman_if->packet_len = new_len; | |
240 | } | |
241 | } | |
242 | ||
243 | void schedule_own_packet(struct batman_if *batman_if) | |
244 | { | |
245 | struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface); | |
246 | unsigned long send_time; | |
247 | struct batman_packet *batman_packet; | |
248 | int vis_server; | |
249 | ||
250 | if ((batman_if->if_status == IF_NOT_IN_USE) || | |
251 | (batman_if->if_status == IF_TO_BE_REMOVED)) | |
252 | return; | |
253 | ||
254 | vis_server = atomic_read(&bat_priv->vis_mode); | |
255 | ||
256 | /** | |
257 | * the interface gets activated here to avoid race conditions between | |
258 | * the moment of activating the interface in | |
259 | * hardif_activate_interface() where the originator mac is set and | |
260 | * outdated packets (especially uninitialized mac addresses) in the | |
261 | * packet queue | |
262 | */ | |
263 | if (batman_if->if_status == IF_TO_BE_ACTIVATED) | |
264 | batman_if->if_status = IF_ACTIVE; | |
265 | ||
266 | /* if local hna has changed and interface is a primary interface */ | |
267 | if ((atomic_read(&bat_priv->hna_local_changed)) && | |
268 | (batman_if == bat_priv->primary_if)) | |
269 | rebuild_batman_packet(bat_priv, batman_if); | |
270 | ||
271 | /** | |
272 | * NOTE: packet_buff might just have been re-allocated in | |
273 | * rebuild_batman_packet() | |
274 | */ | |
275 | batman_packet = (struct batman_packet *)batman_if->packet_buff; | |
276 | ||
277 | /* change sequence number to network order */ | |
278 | batman_packet->seqno = | |
279 | htonl((uint32_t)atomic_read(&batman_if->seqno)); | |
280 | ||
281 | if (vis_server == VIS_TYPE_SERVER_SYNC) | |
282 | batman_packet->flags |= VIS_SERVER; | |
283 | else | |
284 | batman_packet->flags &= ~VIS_SERVER; | |
285 | ||
286 | if ((batman_if == bat_priv->primary_if) && | |
287 | (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER)) | |
288 | batman_packet->gw_flags = | |
289 | (uint8_t)atomic_read(&bat_priv->gw_bandwidth); | |
290 | else | |
291 | batman_packet->gw_flags = 0; | |
292 | ||
293 | atomic_inc(&batman_if->seqno); | |
294 | ||
295 | slide_own_bcast_window(batman_if); | |
296 | send_time = own_send_time(bat_priv); | |
297 | add_bat_packet_to_list(bat_priv, | |
298 | batman_if->packet_buff, | |
299 | batman_if->packet_len, | |
300 | batman_if, 1, send_time); | |
301 | } | |
302 | ||
303 | void schedule_forward_packet(struct orig_node *orig_node, | |
304 | struct ethhdr *ethhdr, | |
305 | struct batman_packet *batman_packet, | |
306 | uint8_t directlink, int hna_buff_len, | |
307 | struct batman_if *if_incoming) | |
308 | { | |
309 | struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); | |
310 | unsigned char in_tq, in_ttl, tq_avg = 0; | |
311 | unsigned long send_time; | |
312 | ||
313 | if (batman_packet->ttl <= 1) { | |
314 | bat_dbg(DBG_BATMAN, bat_priv, "ttl exceeded\n"); | |
315 | return; | |
316 | } | |
317 | ||
318 | in_tq = batman_packet->tq; | |
319 | in_ttl = batman_packet->ttl; | |
320 | ||
321 | batman_packet->ttl--; | |
322 | memcpy(batman_packet->prev_sender, ethhdr->h_source, ETH_ALEN); | |
323 | ||
324 | /* rebroadcast tq of our best ranking neighbor to ensure the rebroadcast | |
325 | * of our best tq value */ | |
326 | if ((orig_node->router) && (orig_node->router->tq_avg != 0)) { | |
327 | ||
328 | /* rebroadcast ogm of best ranking neighbor as is */ | |
39901e71 | 329 | if (!compare_eth(orig_node->router->addr, ethhdr->h_source)) { |
c6c8fea2 SE |
330 | batman_packet->tq = orig_node->router->tq_avg; |
331 | ||
332 | if (orig_node->router->last_ttl) | |
333 | batman_packet->ttl = orig_node->router->last_ttl | |
334 | - 1; | |
335 | } | |
336 | ||
337 | tq_avg = orig_node->router->tq_avg; | |
338 | } | |
339 | ||
340 | /* apply hop penalty */ | |
341 | batman_packet->tq = hop_penalty(batman_packet->tq, bat_priv); | |
342 | ||
343 | bat_dbg(DBG_BATMAN, bat_priv, | |
344 | "Forwarding packet: tq_orig: %i, tq_avg: %i, " | |
345 | "tq_forw: %i, ttl_orig: %i, ttl_forw: %i\n", | |
346 | in_tq, tq_avg, batman_packet->tq, in_ttl - 1, | |
347 | batman_packet->ttl); | |
348 | ||
349 | batman_packet->seqno = htonl(batman_packet->seqno); | |
350 | ||
351 | /* switch of primaries first hop flag when forwarding */ | |
352 | batman_packet->flags &= ~PRIMARIES_FIRST_HOP; | |
353 | if (directlink) | |
354 | batman_packet->flags |= DIRECTLINK; | |
355 | else | |
356 | batman_packet->flags &= ~DIRECTLINK; | |
357 | ||
74ef1153 | 358 | send_time = forward_send_time(); |
c6c8fea2 SE |
359 | add_bat_packet_to_list(bat_priv, |
360 | (unsigned char *)batman_packet, | |
361 | sizeof(struct batman_packet) + hna_buff_len, | |
362 | if_incoming, 0, send_time); | |
363 | } | |
364 | ||
365 | static void forw_packet_free(struct forw_packet *forw_packet) | |
366 | { | |
367 | if (forw_packet->skb) | |
368 | kfree_skb(forw_packet->skb); | |
369 | kfree(forw_packet); | |
370 | } | |
371 | ||
372 | static void _add_bcast_packet_to_list(struct bat_priv *bat_priv, | |
373 | struct forw_packet *forw_packet, | |
374 | unsigned long send_time) | |
375 | { | |
376 | INIT_HLIST_NODE(&forw_packet->list); | |
377 | ||
378 | /* add new packet to packet list */ | |
379 | spin_lock_bh(&bat_priv->forw_bcast_list_lock); | |
380 | hlist_add_head(&forw_packet->list, &bat_priv->forw_bcast_list); | |
381 | spin_unlock_bh(&bat_priv->forw_bcast_list_lock); | |
382 | ||
383 | /* start timer for this packet */ | |
384 | INIT_DELAYED_WORK(&forw_packet->delayed_work, | |
385 | send_outstanding_bcast_packet); | |
386 | queue_delayed_work(bat_event_workqueue, &forw_packet->delayed_work, | |
387 | send_time); | |
388 | } | |
389 | ||
390 | #define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0) | |
391 | /* add a broadcast packet to the queue and setup timers. broadcast packets | |
392 | * are sent multiple times to increase probability for beeing received. | |
393 | * | |
394 | * This function returns NETDEV_TX_OK on success and NETDEV_TX_BUSY on | |
395 | * errors. | |
396 | * | |
397 | * The skb is not consumed, so the caller should make sure that the | |
398 | * skb is freed. */ | |
399 | int add_bcast_packet_to_list(struct bat_priv *bat_priv, struct sk_buff *skb) | |
400 | { | |
401 | struct forw_packet *forw_packet; | |
402 | struct bcast_packet *bcast_packet; | |
403 | ||
404 | if (!atomic_dec_not_zero(&bat_priv->bcast_queue_left)) { | |
405 | bat_dbg(DBG_BATMAN, bat_priv, "bcast packet queue full\n"); | |
406 | goto out; | |
407 | } | |
408 | ||
409 | if (!bat_priv->primary_if) | |
410 | goto out; | |
411 | ||
412 | forw_packet = kmalloc(sizeof(struct forw_packet), GFP_ATOMIC); | |
413 | ||
414 | if (!forw_packet) | |
415 | goto out_and_inc; | |
416 | ||
417 | skb = skb_copy(skb, GFP_ATOMIC); | |
418 | if (!skb) | |
419 | goto packet_free; | |
420 | ||
421 | /* as we have a copy now, it is safe to decrease the TTL */ | |
422 | bcast_packet = (struct bcast_packet *)skb->data; | |
423 | bcast_packet->ttl--; | |
424 | ||
425 | skb_reset_mac_header(skb); | |
426 | ||
427 | forw_packet->skb = skb; | |
428 | forw_packet->if_incoming = bat_priv->primary_if; | |
429 | ||
430 | /* how often did we send the bcast packet ? */ | |
431 | forw_packet->num_packets = 0; | |
432 | ||
433 | _add_bcast_packet_to_list(bat_priv, forw_packet, 1); | |
434 | return NETDEV_TX_OK; | |
435 | ||
436 | packet_free: | |
437 | kfree(forw_packet); | |
438 | out_and_inc: | |
439 | atomic_inc(&bat_priv->bcast_queue_left); | |
440 | out: | |
441 | return NETDEV_TX_BUSY; | |
442 | } | |
443 | ||
444 | static void send_outstanding_bcast_packet(struct work_struct *work) | |
445 | { | |
446 | struct batman_if *batman_if; | |
447 | struct delayed_work *delayed_work = | |
448 | container_of(work, struct delayed_work, work); | |
449 | struct forw_packet *forw_packet = | |
450 | container_of(delayed_work, struct forw_packet, delayed_work); | |
451 | struct sk_buff *skb1; | |
452 | struct net_device *soft_iface = forw_packet->if_incoming->soft_iface; | |
453 | struct bat_priv *bat_priv = netdev_priv(soft_iface); | |
454 | ||
455 | spin_lock_bh(&bat_priv->forw_bcast_list_lock); | |
456 | hlist_del(&forw_packet->list); | |
457 | spin_unlock_bh(&bat_priv->forw_bcast_list_lock); | |
458 | ||
459 | if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING) | |
460 | goto out; | |
461 | ||
462 | /* rebroadcast packet */ | |
463 | rcu_read_lock(); | |
4389e47a | 464 | list_for_each_entry_rcu(batman_if, &hardif_list, list) { |
c6c8fea2 SE |
465 | if (batman_if->soft_iface != soft_iface) |
466 | continue; | |
467 | ||
468 | /* send a copy of the saved skb */ | |
469 | skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC); | |
470 | if (skb1) | |
471 | send_skb_packet(skb1, batman_if, broadcast_addr); | |
472 | } | |
473 | rcu_read_unlock(); | |
474 | ||
475 | forw_packet->num_packets++; | |
476 | ||
477 | /* if we still have some more bcasts to send */ | |
478 | if (forw_packet->num_packets < 3) { | |
479 | _add_bcast_packet_to_list(bat_priv, forw_packet, | |
480 | ((5 * HZ) / 1000)); | |
481 | return; | |
482 | } | |
483 | ||
484 | out: | |
485 | forw_packet_free(forw_packet); | |
486 | atomic_inc(&bat_priv->bcast_queue_left); | |
487 | } | |
488 | ||
489 | void send_outstanding_bat_packet(struct work_struct *work) | |
490 | { | |
491 | struct delayed_work *delayed_work = | |
492 | container_of(work, struct delayed_work, work); | |
493 | struct forw_packet *forw_packet = | |
494 | container_of(delayed_work, struct forw_packet, delayed_work); | |
495 | struct bat_priv *bat_priv; | |
496 | ||
497 | bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface); | |
498 | spin_lock_bh(&bat_priv->forw_bat_list_lock); | |
499 | hlist_del(&forw_packet->list); | |
500 | spin_unlock_bh(&bat_priv->forw_bat_list_lock); | |
501 | ||
502 | if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING) | |
503 | goto out; | |
504 | ||
505 | send_packet(forw_packet); | |
506 | ||
507 | /** | |
508 | * we have to have at least one packet in the queue | |
509 | * to determine the queues wake up time unless we are | |
510 | * shutting down | |
511 | */ | |
512 | if (forw_packet->own) | |
513 | schedule_own_packet(forw_packet->if_incoming); | |
514 | ||
515 | out: | |
516 | /* don't count own packet */ | |
517 | if (!forw_packet->own) | |
518 | atomic_inc(&bat_priv->batman_queue_left); | |
519 | ||
520 | forw_packet_free(forw_packet); | |
521 | } | |
522 | ||
523 | void purge_outstanding_packets(struct bat_priv *bat_priv, | |
524 | struct batman_if *batman_if) | |
525 | { | |
526 | struct forw_packet *forw_packet; | |
527 | struct hlist_node *tmp_node, *safe_tmp_node; | |
528 | ||
529 | if (batman_if) | |
530 | bat_dbg(DBG_BATMAN, bat_priv, | |
531 | "purge_outstanding_packets(): %s\n", | |
532 | batman_if->net_dev->name); | |
533 | else | |
534 | bat_dbg(DBG_BATMAN, bat_priv, | |
535 | "purge_outstanding_packets()\n"); | |
536 | ||
537 | /* free bcast list */ | |
538 | spin_lock_bh(&bat_priv->forw_bcast_list_lock); | |
539 | hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node, | |
540 | &bat_priv->forw_bcast_list, list) { | |
541 | ||
542 | /** | |
543 | * if purge_outstanding_packets() was called with an argmument | |
544 | * we delete only packets belonging to the given interface | |
545 | */ | |
546 | if ((batman_if) && | |
547 | (forw_packet->if_incoming != batman_if)) | |
548 | continue; | |
549 | ||
550 | spin_unlock_bh(&bat_priv->forw_bcast_list_lock); | |
551 | ||
552 | /** | |
553 | * send_outstanding_bcast_packet() will lock the list to | |
554 | * delete the item from the list | |
555 | */ | |
556 | cancel_delayed_work_sync(&forw_packet->delayed_work); | |
557 | spin_lock_bh(&bat_priv->forw_bcast_list_lock); | |
558 | } | |
559 | spin_unlock_bh(&bat_priv->forw_bcast_list_lock); | |
560 | ||
561 | /* free batman packet list */ | |
562 | spin_lock_bh(&bat_priv->forw_bat_list_lock); | |
563 | hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node, | |
564 | &bat_priv->forw_bat_list, list) { | |
565 | ||
566 | /** | |
567 | * if purge_outstanding_packets() was called with an argmument | |
568 | * we delete only packets belonging to the given interface | |
569 | */ | |
570 | if ((batman_if) && | |
571 | (forw_packet->if_incoming != batman_if)) | |
572 | continue; | |
573 | ||
574 | spin_unlock_bh(&bat_priv->forw_bat_list_lock); | |
575 | ||
576 | /** | |
577 | * send_outstanding_bat_packet() will lock the list to | |
578 | * delete the item from the list | |
579 | */ | |
580 | cancel_delayed_work_sync(&forw_packet->delayed_work); | |
581 | spin_lock_bh(&bat_priv->forw_bat_list_lock); | |
582 | } | |
583 | spin_unlock_bh(&bat_priv->forw_bat_list_lock); | |
584 | } |