Merge branch 'batman-adv/next' of git://git.open-mesh.org/ecsv/linux-merge
[deliverable/linux.git] / net / batman-adv / send.c
CommitLineData
c6c8fea2 1/*
64afe353 2 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
c6c8fea2
SE
3 *
4 * Marek Lindner, Simon Wunderlich
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA
19 *
20 */
21
22#include "main.h"
23#include "send.h"
24#include "routing.h"
25#include "translation-table.h"
26#include "soft-interface.h"
27#include "hard-interface.h"
c6c8fea2
SE
28#include "vis.h"
29#include "aggregation.h"
30#include "gateway_common.h"
31#include "originator.h"
32
33static void send_outstanding_bcast_packet(struct work_struct *work);
34
35/* apply hop penalty for a normal link */
36static uint8_t hop_penalty(const uint8_t tq, struct bat_priv *bat_priv)
37{
38 int hop_penalty = atomic_read(&bat_priv->hop_penalty);
39 return (tq * (TQ_MAX_VALUE - hop_penalty)) / (TQ_MAX_VALUE);
40}
41
42/* when do we schedule our own packet to be sent */
43static unsigned long own_send_time(struct bat_priv *bat_priv)
44{
45 return jiffies + msecs_to_jiffies(
46 atomic_read(&bat_priv->orig_interval) -
47 JITTER + (random32() % 2*JITTER));
48}
49
50/* when do we schedule a forwarded packet to be sent */
74ef1153 51static unsigned long forward_send_time(void)
c6c8fea2
SE
52{
53 return jiffies + msecs_to_jiffies(random32() % (JITTER/2));
54}
55
56/* send out an already prepared packet to the given address via the
57 * specified batman interface */
58int send_skb_packet(struct sk_buff *skb,
e6c10f43 59 struct hard_iface *hard_iface,
c6c8fea2
SE
60 uint8_t *dst_addr)
61{
62 struct ethhdr *ethhdr;
63
e6c10f43 64 if (hard_iface->if_status != IF_ACTIVE)
c6c8fea2
SE
65 goto send_skb_err;
66
e6c10f43 67 if (unlikely(!hard_iface->net_dev))
c6c8fea2
SE
68 goto send_skb_err;
69
e6c10f43 70 if (!(hard_iface->net_dev->flags & IFF_UP)) {
c6c8fea2 71 pr_warning("Interface %s is not up - can't send packet via "
e6c10f43 72 "that interface!\n", hard_iface->net_dev->name);
c6c8fea2
SE
73 goto send_skb_err;
74 }
75
76 /* push to the ethernet header. */
77 if (my_skb_head_push(skb, sizeof(struct ethhdr)) < 0)
78 goto send_skb_err;
79
80 skb_reset_mac_header(skb);
81
82 ethhdr = (struct ethhdr *) skb_mac_header(skb);
e6c10f43 83 memcpy(ethhdr->h_source, hard_iface->net_dev->dev_addr, ETH_ALEN);
c6c8fea2
SE
84 memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN);
85 ethhdr->h_proto = __constant_htons(ETH_P_BATMAN);
86
87 skb_set_network_header(skb, ETH_HLEN);
88 skb->priority = TC_PRIO_CONTROL;
89 skb->protocol = __constant_htons(ETH_P_BATMAN);
90
e6c10f43 91 skb->dev = hard_iface->net_dev;
c6c8fea2
SE
92
93 /* dev_queue_xmit() returns a negative result on error. However on
94 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
95 * (which is > 0). This will not be treated as an error. */
96
97 return dev_queue_xmit(skb);
98send_skb_err:
99 kfree_skb(skb);
100 return NET_XMIT_DROP;
101}
102
103/* Send a packet to a given interface */
104static void send_packet_to_if(struct forw_packet *forw_packet,
e6c10f43 105 struct hard_iface *hard_iface)
c6c8fea2 106{
e6c10f43 107 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
c6c8fea2
SE
108 char *fwd_str;
109 uint8_t packet_num;
110 int16_t buff_pos;
111 struct batman_packet *batman_packet;
112 struct sk_buff *skb;
113
e6c10f43 114 if (hard_iface->if_status != IF_ACTIVE)
c6c8fea2
SE
115 return;
116
117 packet_num = 0;
118 buff_pos = 0;
119 batman_packet = (struct batman_packet *)forw_packet->skb->data;
120
121 /* adjust all flags and log packets */
122 while (aggregated_packet(buff_pos,
123 forw_packet->packet_len,
2dafb49d 124 batman_packet->num_tt)) {
c6c8fea2
SE
125
126 /* we might have aggregated direct link packets with an
127 * ordinary base packet */
128 if ((forw_packet->direct_link_flags & (1 << packet_num)) &&
e6c10f43 129 (forw_packet->if_incoming == hard_iface))
c6c8fea2
SE
130 batman_packet->flags |= DIRECTLINK;
131 else
132 batman_packet->flags &= ~DIRECTLINK;
133
134 fwd_str = (packet_num > 0 ? "Forwarding" : (forw_packet->own ?
135 "Sending own" :
136 "Forwarding"));
137 bat_dbg(DBG_BATMAN, bat_priv,
138 "%s %spacket (originator %pM, seqno %d, TQ %d, TTL %d,"
139 " IDF %s) on interface %s [%pM]\n",
140 fwd_str, (packet_num > 0 ? "aggregated " : ""),
141 batman_packet->orig, ntohl(batman_packet->seqno),
142 batman_packet->tq, batman_packet->ttl,
143 (batman_packet->flags & DIRECTLINK ?
144 "on" : "off"),
e6c10f43
ML
145 hard_iface->net_dev->name,
146 hard_iface->net_dev->dev_addr);
c6c8fea2
SE
147
148 buff_pos += sizeof(struct batman_packet) +
2dafb49d 149 (batman_packet->num_tt * ETH_ALEN);
c6c8fea2
SE
150 packet_num++;
151 batman_packet = (struct batman_packet *)
152 (forw_packet->skb->data + buff_pos);
153 }
154
155 /* create clone because function is called more than once */
156 skb = skb_clone(forw_packet->skb, GFP_ATOMIC);
157 if (skb)
e6c10f43 158 send_skb_packet(skb, hard_iface, broadcast_addr);
c6c8fea2
SE
159}
160
161/* send a batman packet */
162static void send_packet(struct forw_packet *forw_packet)
163{
e6c10f43 164 struct hard_iface *hard_iface;
c6c8fea2
SE
165 struct net_device *soft_iface;
166 struct bat_priv *bat_priv;
167 struct batman_packet *batman_packet =
168 (struct batman_packet *)(forw_packet->skb->data);
169 unsigned char directlink = (batman_packet->flags & DIRECTLINK ? 1 : 0);
170
171 if (!forw_packet->if_incoming) {
172 pr_err("Error - can't forward packet: incoming iface not "
173 "specified\n");
174 return;
175 }
176
177 soft_iface = forw_packet->if_incoming->soft_iface;
178 bat_priv = netdev_priv(soft_iface);
179
180 if (forw_packet->if_incoming->if_status != IF_ACTIVE)
181 return;
182
183 /* multihomed peer assumed */
184 /* non-primary OGMs are only broadcasted on their interface */
185 if ((directlink && (batman_packet->ttl == 1)) ||
186 (forw_packet->own && (forw_packet->if_incoming->if_num > 0))) {
187
188 /* FIXME: what about aggregated packets ? */
189 bat_dbg(DBG_BATMAN, bat_priv,
190 "%s packet (originator %pM, seqno %d, TTL %d) "
191 "on interface %s [%pM]\n",
192 (forw_packet->own ? "Sending own" : "Forwarding"),
193 batman_packet->orig, ntohl(batman_packet->seqno),
194 batman_packet->ttl,
195 forw_packet->if_incoming->net_dev->name,
196 forw_packet->if_incoming->net_dev->dev_addr);
197
198 /* skb is only used once and than forw_packet is free'd */
199 send_skb_packet(forw_packet->skb, forw_packet->if_incoming,
200 broadcast_addr);
201 forw_packet->skb = NULL;
202
203 return;
204 }
205
206 /* broadcast on every interface */
207 rcu_read_lock();
e6c10f43
ML
208 list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
209 if (hard_iface->soft_iface != soft_iface)
c6c8fea2
SE
210 continue;
211
e6c10f43 212 send_packet_to_if(forw_packet, hard_iface);
c6c8fea2
SE
213 }
214 rcu_read_unlock();
215}
216
217static void rebuild_batman_packet(struct bat_priv *bat_priv,
e6c10f43 218 struct hard_iface *hard_iface)
c6c8fea2
SE
219{
220 int new_len;
221 unsigned char *new_buff;
222 struct batman_packet *batman_packet;
223
224 new_len = sizeof(struct batman_packet) +
2dafb49d 225 (bat_priv->num_local_tt * ETH_ALEN);
c6c8fea2
SE
226 new_buff = kmalloc(new_len, GFP_ATOMIC);
227
228 /* keep old buffer if kmalloc should fail */
229 if (new_buff) {
e6c10f43 230 memcpy(new_buff, hard_iface->packet_buff,
c6c8fea2
SE
231 sizeof(struct batman_packet));
232 batman_packet = (struct batman_packet *)new_buff;
233
2dafb49d 234 batman_packet->num_tt = tt_local_fill_buffer(bat_priv,
c6c8fea2
SE
235 new_buff + sizeof(struct batman_packet),
236 new_len - sizeof(struct batman_packet));
237
e6c10f43
ML
238 kfree(hard_iface->packet_buff);
239 hard_iface->packet_buff = new_buff;
240 hard_iface->packet_len = new_len;
c6c8fea2
SE
241 }
242}
243
e6c10f43 244void schedule_own_packet(struct hard_iface *hard_iface)
c6c8fea2 245{
e6c10f43 246 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
32ae9b22 247 struct hard_iface *primary_if;
c6c8fea2
SE
248 unsigned long send_time;
249 struct batman_packet *batman_packet;
250 int vis_server;
251
e6c10f43
ML
252 if ((hard_iface->if_status == IF_NOT_IN_USE) ||
253 (hard_iface->if_status == IF_TO_BE_REMOVED))
c6c8fea2
SE
254 return;
255
256 vis_server = atomic_read(&bat_priv->vis_mode);
32ae9b22 257 primary_if = primary_if_get_selected(bat_priv);
c6c8fea2
SE
258
259 /**
260 * the interface gets activated here to avoid race conditions between
261 * the moment of activating the interface in
262 * hardif_activate_interface() where the originator mac is set and
263 * outdated packets (especially uninitialized mac addresses) in the
264 * packet queue
265 */
e6c10f43
ML
266 if (hard_iface->if_status == IF_TO_BE_ACTIVATED)
267 hard_iface->if_status = IF_ACTIVE;
c6c8fea2 268
2dafb49d
AQ
269 /* if local tt has changed and interface is a primary interface */
270 if ((atomic_read(&bat_priv->tt_local_changed)) &&
32ae9b22 271 (hard_iface == primary_if))
e6c10f43 272 rebuild_batman_packet(bat_priv, hard_iface);
c6c8fea2
SE
273
274 /**
275 * NOTE: packet_buff might just have been re-allocated in
276 * rebuild_batman_packet()
277 */
e6c10f43 278 batman_packet = (struct batman_packet *)hard_iface->packet_buff;
c6c8fea2
SE
279
280 /* change sequence number to network order */
281 batman_packet->seqno =
e6c10f43 282 htonl((uint32_t)atomic_read(&hard_iface->seqno));
c6c8fea2
SE
283
284 if (vis_server == VIS_TYPE_SERVER_SYNC)
285 batman_packet->flags |= VIS_SERVER;
286 else
287 batman_packet->flags &= ~VIS_SERVER;
288
32ae9b22 289 if ((hard_iface == primary_if) &&
c6c8fea2
SE
290 (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER))
291 batman_packet->gw_flags =
292 (uint8_t)atomic_read(&bat_priv->gw_bandwidth);
293 else
294 batman_packet->gw_flags = 0;
295
e6c10f43 296 atomic_inc(&hard_iface->seqno);
c6c8fea2 297
e6c10f43 298 slide_own_bcast_window(hard_iface);
c6c8fea2
SE
299 send_time = own_send_time(bat_priv);
300 add_bat_packet_to_list(bat_priv,
e6c10f43
ML
301 hard_iface->packet_buff,
302 hard_iface->packet_len,
303 hard_iface, 1, send_time);
32ae9b22
ML
304
305 if (primary_if)
306 hardif_free_ref(primary_if);
c6c8fea2
SE
307}
308
309void schedule_forward_packet(struct orig_node *orig_node,
310 struct ethhdr *ethhdr,
311 struct batman_packet *batman_packet,
2dafb49d 312 uint8_t directlink, int tt_buff_len,
e6c10f43 313 struct hard_iface *if_incoming)
c6c8fea2
SE
314{
315 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
e1a5382f 316 struct neigh_node *router;
c6c8fea2
SE
317 unsigned char in_tq, in_ttl, tq_avg = 0;
318 unsigned long send_time;
319
320 if (batman_packet->ttl <= 1) {
321 bat_dbg(DBG_BATMAN, bat_priv, "ttl exceeded\n");
322 return;
323 }
324
e1a5382f
LL
325 router = orig_node_get_router(orig_node);
326
c6c8fea2
SE
327 in_tq = batman_packet->tq;
328 in_ttl = batman_packet->ttl;
329
330 batman_packet->ttl--;
331 memcpy(batman_packet->prev_sender, ethhdr->h_source, ETH_ALEN);
332
333 /* rebroadcast tq of our best ranking neighbor to ensure the rebroadcast
334 * of our best tq value */
e1a5382f 335 if (router && router->tq_avg != 0) {
c6c8fea2
SE
336
337 /* rebroadcast ogm of best ranking neighbor as is */
e1a5382f
LL
338 if (!compare_eth(router->addr, ethhdr->h_source)) {
339 batman_packet->tq = router->tq_avg;
c6c8fea2 340
e1a5382f
LL
341 if (router->last_ttl)
342 batman_packet->ttl = router->last_ttl - 1;
c6c8fea2
SE
343 }
344
e1a5382f 345 tq_avg = router->tq_avg;
c6c8fea2
SE
346 }
347
e1a5382f
LL
348 if (router)
349 neigh_node_free_ref(router);
350
c6c8fea2
SE
351 /* apply hop penalty */
352 batman_packet->tq = hop_penalty(batman_packet->tq, bat_priv);
353
354 bat_dbg(DBG_BATMAN, bat_priv,
355 "Forwarding packet: tq_orig: %i, tq_avg: %i, "
356 "tq_forw: %i, ttl_orig: %i, ttl_forw: %i\n",
357 in_tq, tq_avg, batman_packet->tq, in_ttl - 1,
358 batman_packet->ttl);
359
360 batman_packet->seqno = htonl(batman_packet->seqno);
361
362 /* switch of primaries first hop flag when forwarding */
363 batman_packet->flags &= ~PRIMARIES_FIRST_HOP;
364 if (directlink)
365 batman_packet->flags |= DIRECTLINK;
366 else
367 batman_packet->flags &= ~DIRECTLINK;
368
74ef1153 369 send_time = forward_send_time();
c6c8fea2
SE
370 add_bat_packet_to_list(bat_priv,
371 (unsigned char *)batman_packet,
2dafb49d 372 sizeof(struct batman_packet) + tt_buff_len,
c6c8fea2
SE
373 if_incoming, 0, send_time);
374}
375
376static void forw_packet_free(struct forw_packet *forw_packet)
377{
378 if (forw_packet->skb)
379 kfree_skb(forw_packet->skb);
380 kfree(forw_packet);
381}
382
383static void _add_bcast_packet_to_list(struct bat_priv *bat_priv,
384 struct forw_packet *forw_packet,
385 unsigned long send_time)
386{
387 INIT_HLIST_NODE(&forw_packet->list);
388
389 /* add new packet to packet list */
390 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
391 hlist_add_head(&forw_packet->list, &bat_priv->forw_bcast_list);
392 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
393
394 /* start timer for this packet */
395 INIT_DELAYED_WORK(&forw_packet->delayed_work,
396 send_outstanding_bcast_packet);
397 queue_delayed_work(bat_event_workqueue, &forw_packet->delayed_work,
398 send_time);
399}
400
c6c8fea2
SE
401/* add a broadcast packet to the queue and setup timers. broadcast packets
402 * are sent multiple times to increase probability for beeing received.
403 *
404 * This function returns NETDEV_TX_OK on success and NETDEV_TX_BUSY on
405 * errors.
406 *
407 * The skb is not consumed, so the caller should make sure that the
408 * skb is freed. */
409int add_bcast_packet_to_list(struct bat_priv *bat_priv, struct sk_buff *skb)
410{
32ae9b22 411 struct hard_iface *primary_if = NULL;
c6c8fea2
SE
412 struct forw_packet *forw_packet;
413 struct bcast_packet *bcast_packet;
414
415 if (!atomic_dec_not_zero(&bat_priv->bcast_queue_left)) {
416 bat_dbg(DBG_BATMAN, bat_priv, "bcast packet queue full\n");
417 goto out;
418 }
419
32ae9b22
ML
420 primary_if = primary_if_get_selected(bat_priv);
421 if (!primary_if)
c6c8fea2
SE
422 goto out;
423
424 forw_packet = kmalloc(sizeof(struct forw_packet), GFP_ATOMIC);
425
426 if (!forw_packet)
427 goto out_and_inc;
428
429 skb = skb_copy(skb, GFP_ATOMIC);
430 if (!skb)
431 goto packet_free;
432
433 /* as we have a copy now, it is safe to decrease the TTL */
434 bcast_packet = (struct bcast_packet *)skb->data;
435 bcast_packet->ttl--;
436
437 skb_reset_mac_header(skb);
438
439 forw_packet->skb = skb;
32ae9b22 440 forw_packet->if_incoming = primary_if;
c6c8fea2
SE
441
442 /* how often did we send the bcast packet ? */
443 forw_packet->num_packets = 0;
444
445 _add_bcast_packet_to_list(bat_priv, forw_packet, 1);
446 return NETDEV_TX_OK;
447
448packet_free:
449 kfree(forw_packet);
450out_and_inc:
451 atomic_inc(&bat_priv->bcast_queue_left);
452out:
32ae9b22
ML
453 if (primary_if)
454 hardif_free_ref(primary_if);
c6c8fea2
SE
455 return NETDEV_TX_BUSY;
456}
457
458static void send_outstanding_bcast_packet(struct work_struct *work)
459{
e6c10f43 460 struct hard_iface *hard_iface;
c6c8fea2
SE
461 struct delayed_work *delayed_work =
462 container_of(work, struct delayed_work, work);
463 struct forw_packet *forw_packet =
464 container_of(delayed_work, struct forw_packet, delayed_work);
465 struct sk_buff *skb1;
466 struct net_device *soft_iface = forw_packet->if_incoming->soft_iface;
467 struct bat_priv *bat_priv = netdev_priv(soft_iface);
468
469 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
470 hlist_del(&forw_packet->list);
471 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
472
473 if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING)
474 goto out;
475
476 /* rebroadcast packet */
477 rcu_read_lock();
e6c10f43
ML
478 list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
479 if (hard_iface->soft_iface != soft_iface)
c6c8fea2
SE
480 continue;
481
482 /* send a copy of the saved skb */
483 skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
484 if (skb1)
e6c10f43 485 send_skb_packet(skb1, hard_iface, broadcast_addr);
c6c8fea2
SE
486 }
487 rcu_read_unlock();
488
489 forw_packet->num_packets++;
490
491 /* if we still have some more bcasts to send */
492 if (forw_packet->num_packets < 3) {
493 _add_bcast_packet_to_list(bat_priv, forw_packet,
494 ((5 * HZ) / 1000));
495 return;
496 }
497
498out:
499 forw_packet_free(forw_packet);
500 atomic_inc(&bat_priv->bcast_queue_left);
501}
502
503void send_outstanding_bat_packet(struct work_struct *work)
504{
505 struct delayed_work *delayed_work =
506 container_of(work, struct delayed_work, work);
507 struct forw_packet *forw_packet =
508 container_of(delayed_work, struct forw_packet, delayed_work);
509 struct bat_priv *bat_priv;
510
511 bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
512 spin_lock_bh(&bat_priv->forw_bat_list_lock);
513 hlist_del(&forw_packet->list);
514 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
515
516 if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING)
517 goto out;
518
519 send_packet(forw_packet);
520
521 /**
522 * we have to have at least one packet in the queue
523 * to determine the queues wake up time unless we are
524 * shutting down
525 */
526 if (forw_packet->own)
527 schedule_own_packet(forw_packet->if_incoming);
528
529out:
530 /* don't count own packet */
531 if (!forw_packet->own)
532 atomic_inc(&bat_priv->batman_queue_left);
533
534 forw_packet_free(forw_packet);
535}
536
537void purge_outstanding_packets(struct bat_priv *bat_priv,
e6c10f43 538 struct hard_iface *hard_iface)
c6c8fea2
SE
539{
540 struct forw_packet *forw_packet;
541 struct hlist_node *tmp_node, *safe_tmp_node;
542
e6c10f43 543 if (hard_iface)
c6c8fea2
SE
544 bat_dbg(DBG_BATMAN, bat_priv,
545 "purge_outstanding_packets(): %s\n",
e6c10f43 546 hard_iface->net_dev->name);
c6c8fea2
SE
547 else
548 bat_dbg(DBG_BATMAN, bat_priv,
549 "purge_outstanding_packets()\n");
550
551 /* free bcast list */
552 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
553 hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
554 &bat_priv->forw_bcast_list, list) {
555
556 /**
557 * if purge_outstanding_packets() was called with an argmument
558 * we delete only packets belonging to the given interface
559 */
e6c10f43
ML
560 if ((hard_iface) &&
561 (forw_packet->if_incoming != hard_iface))
c6c8fea2
SE
562 continue;
563
564 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
565
566 /**
567 * send_outstanding_bcast_packet() will lock the list to
568 * delete the item from the list
569 */
570 cancel_delayed_work_sync(&forw_packet->delayed_work);
571 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
572 }
573 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
574
575 /* free batman packet list */
576 spin_lock_bh(&bat_priv->forw_bat_list_lock);
577 hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
578 &bat_priv->forw_bat_list, list) {
579
580 /**
581 * if purge_outstanding_packets() was called with an argmument
582 * we delete only packets belonging to the given interface
583 */
e6c10f43
ML
584 if ((hard_iface) &&
585 (forw_packet->if_incoming != hard_iface))
c6c8fea2
SE
586 continue;
587
588 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
589
590 /**
591 * send_outstanding_bat_packet() will lock the list to
592 * delete the item from the list
593 */
594 cancel_delayed_work_sync(&forw_packet->delayed_work);
595 spin_lock_bh(&bat_priv->forw_bat_list_lock);
596 }
597 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
598}
This page took 0.073112 seconds and 5 git commands to generate.