]>
Commit | Line | Data |
---|---|---|
ac79cbb9 | 1 | /* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors: |
c6c8fea2 SE |
2 | * |
3 | * Marek Lindner, Simon Wunderlich | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or | |
6 | * modify it under the terms of version 2 of the GNU General Public | |
7 | * License as published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, but | |
10 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
12 | * General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
ebf38fb7 | 15 | * along with this program; if not, see <http://www.gnu.org/licenses/>. |
c6c8fea2 SE |
16 | */ |
17 | ||
1e2c2a4f | 18 | #include "send.h" |
c6c8fea2 | 19 | #include "main.h" |
1e2c2a4f SE |
20 | |
21 | #include <linux/atomic.h> | |
9b4aec64 | 22 | #include <linux/bug.h> |
1e2c2a4f | 23 | #include <linux/byteorder/generic.h> |
f50ca95a | 24 | #include <linux/errno.h> |
1e2c2a4f SE |
25 | #include <linux/etherdevice.h> |
26 | #include <linux/fs.h> | |
1e2c2a4f | 27 | #include <linux/if.h> |
fcafa5e7 | 28 | #include <linux/if_ether.h> |
1e2c2a4f SE |
29 | #include <linux/jiffies.h> |
30 | #include <linux/kernel.h> | |
27353446 | 31 | #include <linux/kref.h> |
1e2c2a4f SE |
32 | #include <linux/list.h> |
33 | #include <linux/netdevice.h> | |
34 | #include <linux/printk.h> | |
35 | #include <linux/rculist.h> | |
36 | #include <linux/rcupdate.h> | |
37 | #include <linux/skbuff.h> | |
38 | #include <linux/slab.h> | |
39 | #include <linux/spinlock.h> | |
40 | #include <linux/stddef.h> | |
41 | #include <linux/workqueue.h> | |
42 | ||
c384ea3e | 43 | #include "distributed-arp-table.h" |
1e2c2a4f | 44 | #include "fragmentation.h" |
f097e25d | 45 | #include "gateway_client.h" |
1e2c2a4f | 46 | #include "hard-interface.h" |
ba412080 | 47 | #include "log.h" |
612d2b4f | 48 | #include "network-coding.h" |
1e2c2a4f SE |
49 | #include "originator.h" |
50 | #include "routing.h" | |
51 | #include "soft-interface.h" | |
52 | #include "translation-table.h" | |
af5d4f77 | 53 | |
bb079c82 | 54 | static void batadv_send_outstanding_bcast_packet(struct work_struct *work); |
c6c8fea2 | 55 | |
95d39278 AQ |
56 | /** |
57 | * batadv_send_skb_packet - send an already prepared packet | |
58 | * @skb: the packet to send | |
59 | * @hard_iface: the interface to use to send the broadcast packet | |
60 | * @dst_addr: the payload destination | |
61 | * | |
62 | * Send out an already prepared packet to the given neighbor or broadcast it | |
63 | * using the specified interface. Either hard_iface or neigh_node must be not | |
64 | * NULL. | |
65 | * If neigh_node is NULL, then the packet is broadcasted using hard_iface, | |
66 | * otherwise it is sent as unicast to the given neighbor. | |
67 | * | |
7d72d174 SE |
68 | * Regardless of the return value, the skb is consumed. |
69 | * | |
70 | * Return: A negative errno code is returned on a failure. A success does not | |
71 | * guarantee the frame will be transmitted as it may be dropped due | |
72 | * to congestion or traffic shaping. | |
9cfc7bd6 | 73 | */ |
56303d34 SE |
74 | int batadv_send_skb_packet(struct sk_buff *skb, |
75 | struct batadv_hard_iface *hard_iface, | |
6b5e971a | 76 | const u8 *dst_addr) |
c6c8fea2 | 77 | { |
95d39278 | 78 | struct batadv_priv *bat_priv; |
c6c8fea2 | 79 | struct ethhdr *ethhdr; |
7c946062 | 80 | int ret; |
c6c8fea2 | 81 | |
95d39278 AQ |
82 | bat_priv = netdev_priv(hard_iface->soft_iface); |
83 | ||
e9a4f295 | 84 | if (hard_iface->if_status != BATADV_IF_ACTIVE) |
c6c8fea2 SE |
85 | goto send_skb_err; |
86 | ||
e6c10f43 | 87 | if (unlikely(!hard_iface->net_dev)) |
c6c8fea2 SE |
88 | goto send_skb_err; |
89 | ||
e6c10f43 | 90 | if (!(hard_iface->net_dev->flags & IFF_UP)) { |
67969581 SE |
91 | pr_warn("Interface %s is not up - can't send packet via that interface!\n", |
92 | hard_iface->net_dev->name); | |
c6c8fea2 SE |
93 | goto send_skb_err; |
94 | } | |
95 | ||
96 | /* push to the ethernet header. */ | |
04b482a2 | 97 | if (batadv_skb_head_push(skb, ETH_HLEN) < 0) |
c6c8fea2 SE |
98 | goto send_skb_err; |
99 | ||
100 | skb_reset_mac_header(skb); | |
101 | ||
7ed4be95 | 102 | ethhdr = eth_hdr(skb); |
8fdd0153 AQ |
103 | ether_addr_copy(ethhdr->h_source, hard_iface->net_dev->dev_addr); |
104 | ether_addr_copy(ethhdr->h_dest, dst_addr); | |
293e9338 | 105 | ethhdr->h_proto = htons(ETH_P_BATMAN); |
c6c8fea2 SE |
106 | |
107 | skb_set_network_header(skb, ETH_HLEN); | |
293e9338 | 108 | skb->protocol = htons(ETH_P_BATMAN); |
c6c8fea2 | 109 | |
e6c10f43 | 110 | skb->dev = hard_iface->net_dev; |
c6c8fea2 | 111 | |
612d2b4f MH |
112 | /* Save a clone of the skb to use when decoding coded packets */ |
113 | batadv_nc_skb_store_for_decoding(bat_priv, skb); | |
114 | ||
c6c8fea2 SE |
115 | /* dev_queue_xmit() returns a negative result on error. However on |
116 | * congestion and traffic shaping, it drops and returns NET_XMIT_DROP | |
9cfc7bd6 SE |
117 | * (which is > 0). This will not be treated as an error. |
118 | */ | |
7c946062 SE |
119 | ret = dev_queue_xmit(skb); |
120 | return net_xmit_eval(ret); | |
c6c8fea2 SE |
121 | send_skb_err: |
122 | kfree_skb(skb); | |
123 | return NET_XMIT_DROP; | |
124 | } | |
125 | ||
95d39278 AQ |
126 | int batadv_send_broadcast_skb(struct sk_buff *skb, |
127 | struct batadv_hard_iface *hard_iface) | |
128 | { | |
129 | return batadv_send_skb_packet(skb, hard_iface, batadv_broadcast_addr); | |
130 | } | |
131 | ||
132 | int batadv_send_unicast_skb(struct sk_buff *skb, | |
133 | struct batadv_neigh_node *neigh) | |
134 | { | |
135 | #ifdef CONFIG_BATMAN_ADV_BATMAN_V | |
136 | struct batadv_hardif_neigh_node *hardif_neigh; | |
137 | #endif | |
138 | int ret; | |
139 | ||
140 | ret = batadv_send_skb_packet(skb, neigh->if_incoming, neigh->addr); | |
141 | ||
142 | #ifdef CONFIG_BATMAN_ADV_BATMAN_V | |
143 | hardif_neigh = batadv_hardif_neigh_get(neigh->if_incoming, neigh->addr); | |
144 | ||
145 | if ((hardif_neigh) && (ret != NET_XMIT_DROP)) | |
146 | hardif_neigh->bat_v.last_unicast_tx = jiffies; | |
147 | ||
148 | if (hardif_neigh) | |
149 | batadv_hardif_neigh_put(hardif_neigh); | |
150 | #endif | |
151 | ||
152 | return ret; | |
153 | } | |
154 | ||
bb351ba0 MH |
155 | /** |
156 | * batadv_send_skb_to_orig - Lookup next-hop and transmit skb. | |
157 | * @skb: Packet to be transmitted. | |
158 | * @orig_node: Final destination of the packet. | |
159 | * @recv_if: Interface used when receiving the packet (can be NULL). | |
160 | * | |
161 | * Looks up the best next-hop towards the passed originator and passes the | |
162 | * skb on for preparation of MAC header. If the packet originated from this | |
163 | * host, NULL can be passed as recv_if and no interface alternating is | |
164 | * attempted. | |
165 | * | |
1ad5bcb2 SE |
166 | * Return: negative errno code on a failure, -EINPROGRESS if the skb is |
167 | * buffered for later transmit or the NET_XMIT status returned by the | |
f50ca95a | 168 | * lower routine if the packet has been passed down. |
bb351ba0 | 169 | */ |
e91ecfc6 MH |
170 | int batadv_send_skb_to_orig(struct sk_buff *skb, |
171 | struct batadv_orig_node *orig_node, | |
172 | struct batadv_hard_iface *recv_if) | |
bb351ba0 MH |
173 | { |
174 | struct batadv_priv *bat_priv = orig_node->bat_priv; | |
175 | struct batadv_neigh_node *neigh_node; | |
1ad5bcb2 | 176 | int ret; |
bb351ba0 MH |
177 | |
178 | /* batadv_find_router() increases neigh_nodes refcount if found. */ | |
179 | neigh_node = batadv_find_router(bat_priv, orig_node, recv_if); | |
1ad5bcb2 SE |
180 | if (!neigh_node) { |
181 | ret = -EINVAL; | |
182 | goto free_skb; | |
183 | } | |
ee75ed88 MH |
184 | |
185 | /* Check if the skb is too large to send in one piece and fragment | |
186 | * it if needed. | |
187 | */ | |
188 | if (atomic_read(&bat_priv->fragmentation) && | |
189 | skb->len > neigh_node->if_incoming->net_dev->mtu) { | |
190 | /* Fragment and send packet. */ | |
f50ca95a | 191 | ret = batadv_frag_send_packet(skb, orig_node, neigh_node); |
1ad5bcb2 SE |
192 | /* skb was consumed */ |
193 | skb = NULL; | |
ee75ed88 | 194 | |
1ad5bcb2 | 195 | goto put_neigh_node; |
ee75ed88 | 196 | } |
bb351ba0 | 197 | |
e91ecfc6 MH |
198 | /* try to network code the packet, if it is received on an interface |
199 | * (i.e. being forwarded). If the packet originates from this node or if | |
200 | * network coding fails, then send the packet as usual. | |
201 | */ | |
f50ca95a | 202 | if (recv_if && batadv_nc_skb_forward(skb, neigh_node)) |
99860208 | 203 | ret = -EINPROGRESS; |
f50ca95a AQ |
204 | else |
205 | ret = batadv_send_unicast_skb(skb, neigh_node); | |
bb351ba0 | 206 | |
1ad5bcb2 SE |
207 | /* skb was consumed */ |
208 | skb = NULL; | |
209 | ||
210 | put_neigh_node: | |
211 | batadv_neigh_node_put(neigh_node); | |
212 | free_skb: | |
213 | kfree_skb(skb); | |
bb351ba0 | 214 | |
e91ecfc6 | 215 | return ret; |
bb351ba0 MH |
216 | } |
217 | ||
f097e25d MH |
218 | /** |
219 | * batadv_send_skb_push_fill_unicast - extend the buffer and initialize the | |
220 | * common fields for unicast packets | |
221 | * @skb: the skb carrying the unicast header to initialize | |
222 | * @hdr_size: amount of bytes to push at the beginning of the skb | |
223 | * @orig_node: the destination node | |
224 | * | |
62fe710f | 225 | * Return: false if the buffer extension was not possible or true otherwise. |
f097e25d MH |
226 | */ |
227 | static bool | |
228 | batadv_send_skb_push_fill_unicast(struct sk_buff *skb, int hdr_size, | |
229 | struct batadv_orig_node *orig_node) | |
230 | { | |
231 | struct batadv_unicast_packet *unicast_packet; | |
6b5e971a | 232 | u8 ttvn = (u8)atomic_read(&orig_node->last_ttvn); |
f097e25d MH |
233 | |
234 | if (batadv_skb_head_push(skb, hdr_size) < 0) | |
235 | return false; | |
236 | ||
237 | unicast_packet = (struct batadv_unicast_packet *)skb->data; | |
a40d9b07 | 238 | unicast_packet->version = BATADV_COMPAT_VERSION; |
f097e25d | 239 | /* batman packet type: unicast */ |
a40d9b07 | 240 | unicast_packet->packet_type = BATADV_UNICAST; |
f097e25d | 241 | /* set unicast ttl */ |
a40d9b07 | 242 | unicast_packet->ttl = BATADV_TTL; |
f097e25d | 243 | /* copy the destination for faster routing */ |
8fdd0153 | 244 | ether_addr_copy(unicast_packet->dest, orig_node->orig); |
f097e25d MH |
245 | /* set the destination tt version number */ |
246 | unicast_packet->ttvn = ttvn; | |
247 | ||
248 | return true; | |
249 | } | |
250 | ||
251 | /** | |
252 | * batadv_send_skb_prepare_unicast - encapsulate an skb with a unicast header | |
253 | * @skb: the skb containing the payload to encapsulate | |
254 | * @orig_node: the destination node | |
255 | * | |
62fe710f | 256 | * Return: false if the payload could not be encapsulated or true otherwise. |
f097e25d MH |
257 | */ |
258 | static bool batadv_send_skb_prepare_unicast(struct sk_buff *skb, | |
259 | struct batadv_orig_node *orig_node) | |
260 | { | |
261 | size_t uni_size = sizeof(struct batadv_unicast_packet); | |
262 | ||
263 | return batadv_send_skb_push_fill_unicast(skb, uni_size, orig_node); | |
264 | } | |
265 | ||
266 | /** | |
267 | * batadv_send_skb_prepare_unicast_4addr - encapsulate an skb with a | |
268 | * unicast 4addr header | |
269 | * @bat_priv: the bat priv with all the soft interface information | |
270 | * @skb: the skb containing the payload to encapsulate | |
e51f0397 | 271 | * @orig: the destination node |
f097e25d MH |
272 | * @packet_subtype: the unicast 4addr packet subtype to use |
273 | * | |
62fe710f | 274 | * Return: false if the payload could not be encapsulated or true otherwise. |
f097e25d MH |
275 | */ |
276 | bool batadv_send_skb_prepare_unicast_4addr(struct batadv_priv *bat_priv, | |
277 | struct sk_buff *skb, | |
278 | struct batadv_orig_node *orig, | |
279 | int packet_subtype) | |
280 | { | |
281 | struct batadv_hard_iface *primary_if; | |
282 | struct batadv_unicast_4addr_packet *uc_4addr_packet; | |
283 | bool ret = false; | |
284 | ||
285 | primary_if = batadv_primary_if_get_selected(bat_priv); | |
286 | if (!primary_if) | |
287 | goto out; | |
288 | ||
289 | /* Pull the header space and fill the unicast_packet substructure. | |
290 | * We can do that because the first member of the uc_4addr_packet | |
291 | * is of type struct unicast_packet | |
292 | */ | |
293 | if (!batadv_send_skb_push_fill_unicast(skb, sizeof(*uc_4addr_packet), | |
294 | orig)) | |
295 | goto out; | |
296 | ||
297 | uc_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data; | |
a40d9b07 | 298 | uc_4addr_packet->u.packet_type = BATADV_UNICAST_4ADDR; |
8fdd0153 | 299 | ether_addr_copy(uc_4addr_packet->src, primary_if->net_dev->dev_addr); |
f097e25d MH |
300 | uc_4addr_packet->subtype = packet_subtype; |
301 | uc_4addr_packet->reserved = 0; | |
302 | ||
303 | ret = true; | |
304 | out: | |
305 | if (primary_if) | |
82047ad7 | 306 | batadv_hardif_put(primary_if); |
f097e25d MH |
307 | return ret; |
308 | } | |
309 | ||
310 | /** | |
e300d314 | 311 | * batadv_send_skb_unicast - encapsulate and send an skb via unicast |
f097e25d MH |
312 | * @bat_priv: the bat priv with all the soft interface information |
313 | * @skb: payload to send | |
314 | * @packet_type: the batman unicast packet type to use | |
315 | * @packet_subtype: the unicast 4addr packet subtype (only relevant for unicast | |
316 | * 4addr packets) | |
e300d314 | 317 | * @orig_node: the originator to send the packet to |
c018ad3d | 318 | * @vid: the vid to be used to search the translation table |
f097e25d | 319 | * |
e300d314 LL |
320 | * Wrap the given skb into a batman-adv unicast or unicast-4addr header |
321 | * depending on whether BATADV_UNICAST or BATADV_UNICAST_4ADDR was supplied | |
f19dc777 | 322 | * as packet_type. Then send this frame to the given orig_node. |
e300d314 | 323 | * |
62fe710f | 324 | * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise. |
f097e25d | 325 | */ |
1d8ab8d3 LL |
326 | int batadv_send_skb_unicast(struct batadv_priv *bat_priv, |
327 | struct sk_buff *skb, int packet_type, | |
328 | int packet_subtype, | |
329 | struct batadv_orig_node *orig_node, | |
330 | unsigned short vid) | |
f097e25d | 331 | { |
f097e25d | 332 | struct batadv_unicast_packet *unicast_packet; |
8ea64e27 | 333 | struct ethhdr *ethhdr; |
1ad5bcb2 | 334 | int ret = NET_XMIT_DROP; |
f097e25d | 335 | |
56a5ca84 | 336 | if (!orig_node) |
f097e25d MH |
337 | goto out; |
338 | ||
339 | switch (packet_type) { | |
340 | case BATADV_UNICAST: | |
33faa045 AQ |
341 | if (!batadv_send_skb_prepare_unicast(skb, orig_node)) |
342 | goto out; | |
f097e25d MH |
343 | break; |
344 | case BATADV_UNICAST_4ADDR: | |
33faa045 AQ |
345 | if (!batadv_send_skb_prepare_unicast_4addr(bat_priv, skb, |
346 | orig_node, | |
347 | packet_subtype)) | |
348 | goto out; | |
f097e25d MH |
349 | break; |
350 | default: | |
351 | /* this function supports UNICAST and UNICAST_4ADDR only. It | |
352 | * should never be invoked with any other packet type | |
353 | */ | |
354 | goto out; | |
355 | } | |
356 | ||
927c2ed7 LL |
357 | /* skb->data might have been reallocated by |
358 | * batadv_send_skb_prepare_unicast{,_4addr}() | |
359 | */ | |
360 | ethhdr = eth_hdr(skb); | |
f097e25d MH |
361 | unicast_packet = (struct batadv_unicast_packet *)skb->data; |
362 | ||
363 | /* inform the destination node that we are still missing a correct route | |
364 | * for this client. The destination will receive this packet and will | |
365 | * try to reroute it because the ttvn contained in the header is less | |
366 | * than the current one | |
367 | */ | |
c018ad3d | 368 | if (batadv_tt_global_client_is_roaming(bat_priv, ethhdr->h_dest, vid)) |
f097e25d MH |
369 | unicast_packet->ttvn = unicast_packet->ttvn - 1; |
370 | ||
1ad5bcb2 SE |
371 | ret = batadv_send_skb_to_orig(skb, orig_node, NULL); |
372 | /* skb was consumed */ | |
373 | skb = NULL; | |
f097e25d MH |
374 | |
375 | out: | |
1ad5bcb2 | 376 | kfree_skb(skb); |
f097e25d MH |
377 | return ret; |
378 | } | |
379 | ||
e300d314 LL |
380 | /** |
381 | * batadv_send_skb_via_tt_generic - send an skb via TT lookup | |
382 | * @bat_priv: the bat priv with all the soft interface information | |
383 | * @skb: payload to send | |
384 | * @packet_type: the batman unicast packet type to use | |
385 | * @packet_subtype: the unicast 4addr packet subtype (only relevant for unicast | |
386 | * 4addr packets) | |
c5d3a652 | 387 | * @dst_hint: can be used to override the destination contained in the skb |
e300d314 LL |
388 | * @vid: the vid to be used to search the translation table |
389 | * | |
390 | * Look up the recipient node for the destination address in the ethernet | |
391 | * header via the translation table. Wrap the given skb into a batman-adv | |
392 | * unicast or unicast-4addr header depending on whether BATADV_UNICAST or | |
393 | * BATADV_UNICAST_4ADDR was supplied as packet_type. Then send this frame | |
394 | * to the according destination node. | |
395 | * | |
62fe710f | 396 | * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise. |
e300d314 LL |
397 | */ |
398 | int batadv_send_skb_via_tt_generic(struct batadv_priv *bat_priv, | |
399 | struct sk_buff *skb, int packet_type, | |
6b5e971a | 400 | int packet_subtype, u8 *dst_hint, |
6c413b1c | 401 | unsigned short vid) |
e300d314 LL |
402 | { |
403 | struct ethhdr *ethhdr = (struct ethhdr *)skb->data; | |
404 | struct batadv_orig_node *orig_node; | |
6b5e971a | 405 | u8 *src, *dst; |
f19dc777 | 406 | int ret; |
6c413b1c AQ |
407 | |
408 | src = ethhdr->h_source; | |
409 | dst = ethhdr->h_dest; | |
410 | ||
411 | /* if we got an hint! let's send the packet to this client (if any) */ | |
412 | if (dst_hint) { | |
413 | src = NULL; | |
414 | dst = dst_hint; | |
415 | } | |
416 | orig_node = batadv_transtable_search(bat_priv, src, dst, vid); | |
e300d314 | 417 | |
f19dc777 SE |
418 | ret = batadv_send_skb_unicast(bat_priv, skb, packet_type, |
419 | packet_subtype, orig_node, vid); | |
420 | ||
421 | if (orig_node) | |
422 | batadv_orig_node_put(orig_node); | |
423 | ||
424 | return ret; | |
e300d314 LL |
425 | } |
426 | ||
427 | /** | |
428 | * batadv_send_skb_via_gw - send an skb via gateway lookup | |
429 | * @bat_priv: the bat priv with all the soft interface information | |
430 | * @skb: payload to send | |
431 | * @vid: the vid to be used to search the translation table | |
432 | * | |
433 | * Look up the currently selected gateway. Wrap the given skb into a batman-adv | |
434 | * unicast header and send this frame to this gateway node. | |
435 | * | |
62fe710f | 436 | * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise. |
e300d314 LL |
437 | */ |
438 | int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb, | |
439 | unsigned short vid) | |
440 | { | |
441 | struct batadv_orig_node *orig_node; | |
f19dc777 | 442 | int ret; |
e300d314 LL |
443 | |
444 | orig_node = batadv_gw_get_selected_orig(bat_priv); | |
f19dc777 SE |
445 | ret = batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST_4ADDR, |
446 | BATADV_P_DATA, orig_node, vid); | |
447 | ||
448 | if (orig_node) | |
449 | batadv_orig_node_put(orig_node); | |
450 | ||
451 | return ret; | |
e300d314 LL |
452 | } |
453 | ||
a65e5481 LL |
454 | /** |
455 | * batadv_forw_packet_free - free a forwarding packet | |
456 | * @forw_packet: The packet to free | |
bd687fe4 | 457 | * @dropped: whether the packet is freed because is is dropped |
a65e5481 LL |
458 | * |
459 | * This frees a forwarding packet and releases any resources it might | |
460 | * have claimed. | |
461 | */ | |
bd687fe4 SE |
462 | void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet, |
463 | bool dropped) | |
c6c8fea2 | 464 | { |
bd687fe4 SE |
465 | if (dropped) |
466 | kfree_skb(forw_packet->skb); | |
467 | else | |
468 | consume_skb(forw_packet->skb); | |
469 | ||
6d5808d4 | 470 | if (forw_packet->if_incoming) |
82047ad7 | 471 | batadv_hardif_put(forw_packet->if_incoming); |
ef0a937f | 472 | if (forw_packet->if_outgoing) |
82047ad7 | 473 | batadv_hardif_put(forw_packet->if_outgoing); |
a65e5481 LL |
474 | if (forw_packet->queue_left) |
475 | atomic_inc(forw_packet->queue_left); | |
c6c8fea2 SE |
476 | kfree(forw_packet); |
477 | } | |
478 | ||
a65e5481 LL |
479 | /** |
480 | * batadv_forw_packet_alloc - allocate a forwarding packet | |
481 | * @if_incoming: The (optional) if_incoming to be grabbed | |
482 | * @if_outgoing: The (optional) if_outgoing to be grabbed | |
483 | * @queue_left: The (optional) queue counter to decrease | |
484 | * @bat_priv: The bat_priv for the mesh of this forw_packet | |
99ba18ef | 485 | * @skb: The raw packet this forwarding packet shall contain |
a65e5481 LL |
486 | * |
487 | * Allocates a forwarding packet and tries to get a reference to the | |
488 | * (optional) if_incoming, if_outgoing and queue_left. If queue_left | |
489 | * is NULL then bat_priv is optional, too. | |
490 | * | |
491 | * Return: An allocated forwarding packet on success, NULL otherwise. | |
492 | */ | |
493 | struct batadv_forw_packet * | |
494 | batadv_forw_packet_alloc(struct batadv_hard_iface *if_incoming, | |
495 | struct batadv_hard_iface *if_outgoing, | |
496 | atomic_t *queue_left, | |
99ba18ef LL |
497 | struct batadv_priv *bat_priv, |
498 | struct sk_buff *skb) | |
a65e5481 LL |
499 | { |
500 | struct batadv_forw_packet *forw_packet; | |
501 | const char *qname; | |
502 | ||
503 | if (queue_left && !batadv_atomic_dec_not_zero(queue_left)) { | |
504 | qname = "unknown"; | |
505 | ||
506 | if (queue_left == &bat_priv->bcast_queue_left) | |
507 | qname = "bcast"; | |
508 | ||
509 | if (queue_left == &bat_priv->batman_queue_left) | |
510 | qname = "batman"; | |
511 | ||
512 | batadv_dbg(BATADV_DBG_BATMAN, bat_priv, | |
513 | "%s queue is full\n", qname); | |
514 | ||
515 | return NULL; | |
516 | } | |
517 | ||
518 | forw_packet = kmalloc(sizeof(*forw_packet), GFP_ATOMIC); | |
519 | if (!forw_packet) | |
520 | goto err; | |
521 | ||
522 | if (if_incoming) | |
523 | kref_get(&if_incoming->refcount); | |
524 | ||
525 | if (if_outgoing) | |
526 | kref_get(&if_outgoing->refcount); | |
527 | ||
9b4aec64 LL |
528 | INIT_HLIST_NODE(&forw_packet->list); |
529 | INIT_HLIST_NODE(&forw_packet->cleanup_list); | |
99ba18ef | 530 | forw_packet->skb = skb; |
a65e5481 LL |
531 | forw_packet->queue_left = queue_left; |
532 | forw_packet->if_incoming = if_incoming; | |
533 | forw_packet->if_outgoing = if_outgoing; | |
534 | forw_packet->num_packets = 0; | |
535 | ||
536 | return forw_packet; | |
537 | ||
538 | err: | |
539 | if (queue_left) | |
540 | atomic_inc(queue_left); | |
541 | ||
542 | return NULL; | |
543 | } | |
544 | ||
9b4aec64 LL |
545 | /** |
546 | * batadv_forw_packet_was_stolen - check whether someone stole this packet | |
547 | * @forw_packet: the forwarding packet to check | |
548 | * | |
549 | * This function checks whether the given forwarding packet was claimed by | |
550 | * someone else for free(). | |
551 | * | |
552 | * Return: True if someone stole it, false otherwise. | |
553 | */ | |
554 | static bool | |
555 | batadv_forw_packet_was_stolen(struct batadv_forw_packet *forw_packet) | |
556 | { | |
557 | return !hlist_unhashed(&forw_packet->cleanup_list); | |
558 | } | |
559 | ||
560 | /** | |
561 | * batadv_forw_packet_steal - claim a forw_packet for free() | |
562 | * @forw_packet: the forwarding packet to steal | |
563 | * @lock: a key to the store to steal from (e.g. forw_{bat,bcast}_list_lock) | |
564 | * | |
565 | * This function tries to steal a specific forw_packet from global | |
566 | * visibility for the purpose of getting it for free(). That means | |
567 | * the caller is *not* allowed to requeue it afterwards. | |
568 | * | |
569 | * Return: True if stealing was successful. False if someone else stole it | |
570 | * before us. | |
571 | */ | |
572 | bool batadv_forw_packet_steal(struct batadv_forw_packet *forw_packet, | |
573 | spinlock_t *lock) | |
574 | { | |
575 | /* did purging routine steal it earlier? */ | |
576 | spin_lock_bh(lock); | |
577 | if (batadv_forw_packet_was_stolen(forw_packet)) { | |
578 | spin_unlock_bh(lock); | |
579 | return false; | |
580 | } | |
581 | ||
582 | hlist_del_init(&forw_packet->list); | |
583 | ||
584 | /* Just to spot misuse of this function */ | |
585 | hlist_add_fake(&forw_packet->cleanup_list); | |
586 | ||
587 | spin_unlock_bh(lock); | |
588 | return true; | |
589 | } | |
590 | ||
591 | /** | |
592 | * batadv_forw_packet_list_steal - claim a list of forward packets for free() | |
593 | * @forw_list: the to be stolen forward packets | |
594 | * @cleanup_list: a backup pointer, to be able to dispose the packet later | |
595 | * @hard_iface: the interface to steal forward packets from | |
596 | * | |
597 | * This function claims responsibility to free any forw_packet queued on the | |
598 | * given hard_iface. If hard_iface is NULL forwarding packets on all hard | |
599 | * interfaces will be claimed. | |
600 | * | |
601 | * The packets are being moved from the forw_list to the cleanup_list and | |
602 | * by that allows already running threads to notice the claiming. | |
603 | */ | |
56303d34 | 604 | static void |
9b4aec64 LL |
605 | batadv_forw_packet_list_steal(struct hlist_head *forw_list, |
606 | struct hlist_head *cleanup_list, | |
607 | const struct batadv_hard_iface *hard_iface) | |
c6c8fea2 | 608 | { |
9b4aec64 LL |
609 | struct batadv_forw_packet *forw_packet; |
610 | struct hlist_node *safe_tmp_node; | |
611 | ||
612 | hlist_for_each_entry_safe(forw_packet, safe_tmp_node, | |
613 | forw_list, list) { | |
614 | /* if purge_outstanding_packets() was called with an argument | |
615 | * we delete only packets belonging to the given interface | |
616 | */ | |
617 | if (hard_iface && | |
618 | (forw_packet->if_incoming != hard_iface) && | |
619 | (forw_packet->if_outgoing != hard_iface)) | |
620 | continue; | |
621 | ||
622 | hlist_del(&forw_packet->list); | |
623 | hlist_add_head(&forw_packet->cleanup_list, cleanup_list); | |
624 | } | |
625 | } | |
626 | ||
627 | /** | |
628 | * batadv_forw_packet_list_free - free a list of forward packets | |
629 | * @head: a list of to be freed forw_packets | |
630 | * | |
631 | * This function cancels the scheduling of any packet in the provided list, | |
632 | * waits for any possibly running packet forwarding thread to finish and | |
633 | * finally, safely frees this forward packet. | |
634 | * | |
635 | * This function might sleep. | |
636 | */ | |
637 | static void batadv_forw_packet_list_free(struct hlist_head *head) | |
638 | { | |
639 | struct batadv_forw_packet *forw_packet; | |
640 | struct hlist_node *safe_tmp_node; | |
641 | ||
642 | hlist_for_each_entry_safe(forw_packet, safe_tmp_node, head, | |
643 | cleanup_list) { | |
644 | cancel_delayed_work_sync(&forw_packet->delayed_work); | |
c6c8fea2 | 645 | |
9b4aec64 LL |
646 | hlist_del(&forw_packet->cleanup_list); |
647 | batadv_forw_packet_free(forw_packet, true); | |
648 | } | |
649 | } | |
650 | ||
651 | /** | |
652 | * batadv_forw_packet_queue - try to queue a forwarding packet | |
653 | * @forw_packet: the forwarding packet to queue | |
654 | * @lock: a key to the store (e.g. forw_{bat,bcast}_list_lock) | |
655 | * @head: the shelve to queue it on (e.g. forw_{bat,bcast}_list) | |
656 | * @send_time: timestamp (jiffies) when the packet is to be sent | |
657 | * | |
658 | * This function tries to (re)queue a forwarding packet. Requeuing | |
659 | * is prevented if the according interface is shutting down | |
660 | * (e.g. if batadv_forw_packet_list_steal() was called for this | |
661 | * packet earlier). | |
662 | * | |
663 | * Calling batadv_forw_packet_queue() after a call to | |
664 | * batadv_forw_packet_steal() is forbidden! | |
665 | * | |
666 | * Caller needs to ensure that forw_packet->delayed_work was initialized. | |
667 | */ | |
668 | static void batadv_forw_packet_queue(struct batadv_forw_packet *forw_packet, | |
669 | spinlock_t *lock, struct hlist_head *head, | |
670 | unsigned long send_time) | |
671 | { | |
672 | spin_lock_bh(lock); | |
673 | ||
674 | /* did purging routine steal it from us? */ | |
675 | if (batadv_forw_packet_was_stolen(forw_packet)) { | |
676 | /* If you got it for free() without trouble, then | |
677 | * don't get back into the queue after stealing... | |
678 | */ | |
679 | WARN_ONCE(hlist_fake(&forw_packet->cleanup_list), | |
680 | "Requeuing after batadv_forw_packet_steal() not allowed!\n"); | |
681 | ||
682 | spin_unlock_bh(lock); | |
683 | return; | |
684 | } | |
685 | ||
686 | hlist_del_init(&forw_packet->list); | |
687 | hlist_add_head(&forw_packet->list, head); | |
688 | ||
689 | queue_delayed_work(batadv_event_workqueue, | |
690 | &forw_packet->delayed_work, | |
691 | send_time - jiffies); | |
692 | spin_unlock_bh(lock); | |
693 | } | |
694 | ||
695 | /** | |
696 | * batadv_forw_packet_bcast_queue - try to queue a broadcast packet | |
697 | * @bat_priv: the bat priv with all the soft interface information | |
698 | * @forw_packet: the forwarding packet to queue | |
699 | * @send_time: timestamp (jiffies) when the packet is to be sent | |
700 | * | |
701 | * This function tries to (re)queue a broadcast packet. | |
702 | * | |
703 | * Caller needs to ensure that forw_packet->delayed_work was initialized. | |
704 | */ | |
705 | static void | |
706 | batadv_forw_packet_bcast_queue(struct batadv_priv *bat_priv, | |
707 | struct batadv_forw_packet *forw_packet, | |
708 | unsigned long send_time) | |
709 | { | |
710 | batadv_forw_packet_queue(forw_packet, &bat_priv->forw_bcast_list_lock, | |
711 | &bat_priv->forw_bcast_list, send_time); | |
712 | } | |
713 | ||
714 | /** | |
715 | * batadv_forw_packet_ogmv1_queue - try to queue an OGMv1 packet | |
716 | * @bat_priv: the bat priv with all the soft interface information | |
717 | * @forw_packet: the forwarding packet to queue | |
718 | * @send_time: timestamp (jiffies) when the packet is to be sent | |
719 | * | |
720 | * This function tries to (re)queue an OGMv1 packet. | |
721 | * | |
722 | * Caller needs to ensure that forw_packet->delayed_work was initialized. | |
723 | */ | |
724 | void batadv_forw_packet_ogmv1_queue(struct batadv_priv *bat_priv, | |
725 | struct batadv_forw_packet *forw_packet, | |
726 | unsigned long send_time) | |
727 | { | |
728 | batadv_forw_packet_queue(forw_packet, &bat_priv->forw_bat_list_lock, | |
729 | &bat_priv->forw_bat_list, send_time); | |
c6c8fea2 SE |
730 | } |
731 | ||
62fe710f | 732 | /** |
7afcbbef SE |
733 | * batadv_add_bcast_packet_to_list - queue broadcast packet for multiple sends |
734 | * @bat_priv: the bat priv with all the soft interface information | |
735 | * @skb: broadcast packet to add | |
736 | * @delay: number of jiffies to wait before sending | |
3111beed | 737 | * @own_packet: true if it is a self-generated broadcast packet |
c6c8fea2 | 738 | * |
62fe710f SE |
739 | * add a broadcast packet to the queue and setup timers. broadcast packets |
740 | * are sent multiple times to increase probability for being received. | |
c6c8fea2 SE |
741 | * |
742 | * The skb is not consumed, so the caller should make sure that the | |
9cfc7bd6 | 743 | * skb is freed. |
62fe710f SE |
744 | * |
745 | * Return: NETDEV_TX_OK on success and NETDEV_TX_BUSY on errors. | |
9cfc7bd6 | 746 | */ |
56303d34 | 747 | int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv, |
9455e34c | 748 | const struct sk_buff *skb, |
3111beed LL |
749 | unsigned long delay, |
750 | bool own_packet) | |
c6c8fea2 | 751 | { |
422d2f77 | 752 | struct batadv_hard_iface *primary_if; |
56303d34 | 753 | struct batadv_forw_packet *forw_packet; |
96412690 | 754 | struct batadv_bcast_packet *bcast_packet; |
747e4221 | 755 | struct sk_buff *newskb; |
c6c8fea2 | 756 | |
e5d89254 | 757 | primary_if = batadv_primary_if_get_selected(bat_priv); |
32ae9b22 | 758 | if (!primary_if) |
a65e5481 | 759 | goto err; |
c6c8fea2 | 760 | |
99ba18ef LL |
761 | newskb = skb_copy(skb, GFP_ATOMIC); |
762 | if (!newskb) { | |
763 | batadv_hardif_put(primary_if); | |
764 | goto err; | |
765 | } | |
766 | ||
a65e5481 LL |
767 | forw_packet = batadv_forw_packet_alloc(primary_if, NULL, |
768 | &bat_priv->bcast_queue_left, | |
99ba18ef | 769 | bat_priv, newskb); |
a65e5481 | 770 | batadv_hardif_put(primary_if); |
c6c8fea2 | 771 | if (!forw_packet) |
a65e5481 | 772 | goto err_packet_free; |
c6c8fea2 SE |
773 | |
774 | /* as we have a copy now, it is safe to decrease the TTL */ | |
96412690 | 775 | bcast_packet = (struct batadv_bcast_packet *)newskb->data; |
a40d9b07 | 776 | bcast_packet->ttl--; |
c6c8fea2 | 777 | |
3111beed | 778 | forw_packet->own = own_packet; |
c6c8fea2 | 779 | |
72414442 AQ |
780 | INIT_DELAYED_WORK(&forw_packet->delayed_work, |
781 | batadv_send_outstanding_bcast_packet); | |
782 | ||
9b4aec64 | 783 | batadv_forw_packet_bcast_queue(bat_priv, forw_packet, jiffies + delay); |
c6c8fea2 SE |
784 | return NETDEV_TX_OK; |
785 | ||
a65e5481 | 786 | err_packet_free: |
99ba18ef | 787 | kfree_skb(newskb); |
a65e5481 | 788 | err: |
c6c8fea2 SE |
789 | return NETDEV_TX_BUSY; |
790 | } | |
791 | ||
e2d9ba43 LL |
792 | /** |
793 | * batadv_forw_packet_bcasts_left - check if a retransmission is necessary | |
794 | * @forw_packet: the forwarding packet to check | |
795 | * @hard_iface: the interface to check on | |
796 | * | |
797 | * Checks whether a given packet has any (re)transmissions left on the provided | |
798 | * interface. | |
799 | * | |
800 | * hard_iface may be NULL: In that case the number of transmissions this skb had | |
801 | * so far is compared with the maximum amount of retransmissions independent of | |
802 | * any interface instead. | |
803 | * | |
804 | * Return: True if (re)transmissions are left, false otherwise. | |
805 | */ | |
806 | static bool | |
807 | batadv_forw_packet_bcasts_left(struct batadv_forw_packet *forw_packet, | |
808 | struct batadv_hard_iface *hard_iface) | |
809 | { | |
810 | unsigned int max; | |
811 | ||
812 | if (hard_iface) | |
813 | max = hard_iface->num_bcasts; | |
814 | else | |
815 | max = BATADV_NUM_BCASTS_MAX; | |
816 | ||
817 | return BATADV_SKB_CB(forw_packet->skb)->num_bcasts < max; | |
818 | } | |
819 | ||
820 | /** | |
821 | * batadv_forw_packet_bcasts_inc - increment retransmission counter of a packet | |
822 | * @forw_packet: the packet to increase the counter for | |
823 | */ | |
824 | static void | |
825 | batadv_forw_packet_bcasts_inc(struct batadv_forw_packet *forw_packet) | |
826 | { | |
827 | BATADV_SKB_CB(forw_packet->skb)->num_bcasts++; | |
828 | } | |
829 | ||
830 | /** | |
831 | * batadv_forw_packet_is_rebroadcast - check packet for previous transmissions | |
832 | * @forw_packet: the packet to check | |
833 | * | |
834 | * Return: True if this packet was transmitted before, false otherwise. | |
835 | */ | |
836 | bool batadv_forw_packet_is_rebroadcast(struct batadv_forw_packet *forw_packet) | |
837 | { | |
838 | return BATADV_SKB_CB(forw_packet->skb)->num_bcasts > 0; | |
839 | } | |
840 | ||
bb079c82 | 841 | static void batadv_send_outstanding_bcast_packet(struct work_struct *work) |
c6c8fea2 | 842 | { |
56303d34 | 843 | struct batadv_hard_iface *hard_iface; |
3111beed | 844 | struct batadv_hardif_neigh_node *neigh_node; |
bbb1f90e | 845 | struct delayed_work *delayed_work; |
56303d34 | 846 | struct batadv_forw_packet *forw_packet; |
3111beed | 847 | struct batadv_bcast_packet *bcast_packet; |
c6c8fea2 | 848 | struct sk_buff *skb1; |
56303d34 SE |
849 | struct net_device *soft_iface; |
850 | struct batadv_priv *bat_priv; | |
9b4aec64 | 851 | unsigned long send_time = jiffies + msecs_to_jiffies(5); |
bd687fe4 | 852 | bool dropped = false; |
3111beed LL |
853 | u8 *neigh_addr; |
854 | u8 *orig_neigh; | |
855 | int ret = 0; | |
56303d34 | 856 | |
4ba4bc0f | 857 | delayed_work = to_delayed_work(work); |
56303d34 SE |
858 | forw_packet = container_of(delayed_work, struct batadv_forw_packet, |
859 | delayed_work); | |
860 | soft_iface = forw_packet->if_incoming->soft_iface; | |
861 | bat_priv = netdev_priv(soft_iface); | |
c6c8fea2 | 862 | |
bd687fe4 SE |
863 | if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING) { |
864 | dropped = true; | |
c6c8fea2 | 865 | goto out; |
bd687fe4 | 866 | } |
c6c8fea2 | 867 | |
bd687fe4 SE |
868 | if (batadv_dat_drop_broadcast_packet(bat_priv, forw_packet)) { |
869 | dropped = true; | |
c384ea3e | 870 | goto out; |
bd687fe4 | 871 | } |
c384ea3e | 872 | |
3111beed LL |
873 | bcast_packet = (struct batadv_bcast_packet *)forw_packet->skb->data; |
874 | ||
c6c8fea2 SE |
875 | /* rebroadcast packet */ |
876 | rcu_read_lock(); | |
3193e8fd | 877 | list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) { |
e6c10f43 | 878 | if (hard_iface->soft_iface != soft_iface) |
c6c8fea2 SE |
879 | continue; |
880 | ||
e2d9ba43 | 881 | if (!batadv_forw_packet_bcasts_left(forw_packet, hard_iface)) |
caf65bfc MS |
882 | continue; |
883 | ||
3111beed LL |
884 | if (forw_packet->own) { |
885 | neigh_node = NULL; | |
886 | } else { | |
887 | neigh_addr = eth_hdr(forw_packet->skb)->h_source; | |
888 | neigh_node = batadv_hardif_neigh_get(hard_iface, | |
889 | neigh_addr); | |
890 | } | |
891 | ||
892 | orig_neigh = neigh_node ? neigh_node->orig : NULL; | |
893 | ||
894 | ret = batadv_hardif_no_broadcast(hard_iface, bcast_packet->orig, | |
895 | orig_neigh); | |
896 | ||
897 | if (ret) { | |
898 | char *type; | |
899 | ||
900 | switch (ret) { | |
901 | case BATADV_HARDIF_BCAST_NORECIPIENT: | |
902 | type = "no neighbor"; | |
903 | break; | |
904 | case BATADV_HARDIF_BCAST_DUPFWD: | |
905 | type = "single neighbor is source"; | |
906 | break; | |
907 | case BATADV_HARDIF_BCAST_DUPORIG: | |
908 | type = "single neighbor is originator"; | |
909 | break; | |
910 | default: | |
911 | type = "unknown"; | |
912 | } | |
913 | ||
914 | batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "BCAST packet from orig %pM on %s surpressed: %s\n", | |
915 | bcast_packet->orig, | |
916 | hard_iface->net_dev->name, type); | |
917 | ||
918 | if (neigh_node) | |
919 | batadv_hardif_neigh_put(neigh_node); | |
920 | ||
921 | continue; | |
922 | } | |
923 | ||
924 | if (neigh_node) | |
925 | batadv_hardif_neigh_put(neigh_node); | |
926 | ||
27353446 SE |
927 | if (!kref_get_unless_zero(&hard_iface->refcount)) |
928 | continue; | |
929 | ||
c6c8fea2 SE |
930 | /* send a copy of the saved skb */ |
931 | skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC); | |
932 | if (skb1) | |
95d39278 | 933 | batadv_send_broadcast_skb(skb1, hard_iface); |
27353446 SE |
934 | |
935 | batadv_hardif_put(hard_iface); | |
c6c8fea2 SE |
936 | } |
937 | rcu_read_unlock(); | |
938 | ||
e2d9ba43 | 939 | batadv_forw_packet_bcasts_inc(forw_packet); |
c6c8fea2 SE |
940 | |
941 | /* if we still have some more bcasts to send */ | |
e2d9ba43 | 942 | if (batadv_forw_packet_bcasts_left(forw_packet, NULL)) { |
9b4aec64 LL |
943 | batadv_forw_packet_bcast_queue(bat_priv, forw_packet, |
944 | send_time); | |
c6c8fea2 SE |
945 | return; |
946 | } | |
947 | ||
948 | out: | |
9b4aec64 LL |
949 | /* do we get something for free()? */ |
950 | if (batadv_forw_packet_steal(forw_packet, | |
951 | &bat_priv->forw_bcast_list_lock)) | |
952 | batadv_forw_packet_free(forw_packet, dropped); | |
c6c8fea2 SE |
953 | } |
954 | ||
9b4aec64 LL |
955 | /** |
956 | * batadv_purge_outstanding_packets - stop/purge scheduled bcast/OGMv1 packets | |
957 | * @bat_priv: the bat priv with all the soft interface information | |
958 | * @hard_iface: the hard interface to cancel and purge bcast/ogm packets on | |
959 | * | |
960 | * This method cancels and purges any broadcast and OGMv1 packet on the given | |
961 | * hard_iface. If hard_iface is NULL, broadcast and OGMv1 packets on all hard | |
962 | * interfaces will be canceled and purged. | |
963 | * | |
964 | * This function might sleep. | |
965 | */ | |
56303d34 SE |
966 | void |
967 | batadv_purge_outstanding_packets(struct batadv_priv *bat_priv, | |
968 | const struct batadv_hard_iface *hard_iface) | |
c6c8fea2 | 969 | { |
9b4aec64 | 970 | struct hlist_head head = HLIST_HEAD_INIT; |
c6c8fea2 | 971 | |
e6c10f43 | 972 | if (hard_iface) |
39c75a51 | 973 | batadv_dbg(BATADV_DBG_BATMAN, bat_priv, |
22f0502e SE |
974 | "%s(): %s\n", |
975 | __func__, hard_iface->net_dev->name); | |
c6c8fea2 | 976 | else |
39c75a51 | 977 | batadv_dbg(BATADV_DBG_BATMAN, bat_priv, |
22f0502e | 978 | "%s()\n", __func__); |
c6c8fea2 | 979 | |
9b4aec64 | 980 | /* claim bcast list for free() */ |
c6c8fea2 | 981 | spin_lock_bh(&bat_priv->forw_bcast_list_lock); |
9b4aec64 LL |
982 | batadv_forw_packet_list_steal(&bat_priv->forw_bcast_list, &head, |
983 | hard_iface); | |
c6c8fea2 SE |
984 | spin_unlock_bh(&bat_priv->forw_bcast_list_lock); |
985 | ||
9b4aec64 | 986 | /* claim batman packet list for free() */ |
c6c8fea2 | 987 | spin_lock_bh(&bat_priv->forw_bat_list_lock); |
9b4aec64 LL |
988 | batadv_forw_packet_list_steal(&bat_priv->forw_bat_list, &head, |
989 | hard_iface); | |
c6c8fea2 | 990 | spin_unlock_bh(&bat_priv->forw_bat_list_lock); |
9b4aec64 LL |
991 | |
992 | /* then cancel or wait for packet workers to finish and free */ | |
993 | batadv_forw_packet_list_free(&head); | |
c6c8fea2 | 994 | } |