]>
Commit | Line | Data |
---|---|---|
d2912cb1 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
eb2b9311 | 2 | /* |
264d9b7d | 3 | * Copyright (c) 2008, 2009 open80211s Ltd. |
eb2b9311 | 4 | * Author: Luis Carlos Cobo <[email protected]> |
eb2b9311 LCC |
5 | */ |
6 | ||
7 | #include <linux/etherdevice.h> | |
8 | #include <linux/list.h> | |
eb2b9311 | 9 | #include <linux/random.h> |
5a0e3ad6 | 10 | #include <linux/slab.h> |
eb2b9311 LCC |
11 | #include <linux/spinlock.h> |
12 | #include <linux/string.h> | |
13 | #include <net/mac80211.h> | |
4777be41 | 14 | #include "wme.h" |
eb2b9311 LCC |
15 | #include "ieee80211_i.h" |
16 | #include "mesh.h" | |
17 | ||
74932959 BC |
18 | static void mesh_path_free_rcu(struct mesh_table *tbl, struct mesh_path *mpath); |
19 | ||
60854fd9 BC |
20 | static u32 mesh_table_hash(const void *addr, u32 len, u32 seed) |
21 | { | |
22 | /* Use last four bytes of hw addr as hash index */ | |
40586e3f | 23 | return jhash_1word(__get_unaligned_cpu32((u8 *)addr + 2), seed); |
60854fd9 | 24 | } |
eb2b9311 | 25 | |
60854fd9 BC |
26 | static const struct rhashtable_params mesh_rht_params = { |
27 | .nelem_hint = 2, | |
28 | .automatic_shrinking = true, | |
29 | .key_len = ETH_ALEN, | |
30 | .key_offset = offsetof(struct mesh_path, dst), | |
31 | .head_offset = offsetof(struct mesh_path, rhash), | |
32 | .hashfn = mesh_table_hash, | |
33 | }; | |
eb2b9311 | 34 | |
bf7cd94d JB |
35 | static inline bool mpath_expired(struct mesh_path *mpath) |
36 | { | |
37 | return (mpath->flags & MESH_PATH_ACTIVE) && | |
38 | time_after(jiffies, mpath->exp_time) && | |
39 | !(mpath->flags & MESH_PATH_FIXED); | |
40 | } | |
eb2b9311 | 41 | |
74932959 | 42 | static void mesh_path_rht_free(void *ptr, void *tblptr) |
349eb8cf | 43 | { |
60854fd9 | 44 | struct mesh_path *mpath = ptr; |
74932959 BC |
45 | struct mesh_table *tbl = tblptr; |
46 | ||
47 | mesh_path_free_rcu(tbl, mpath); | |
349eb8cf JB |
48 | } |
49 | ||
60854fd9 | 50 | static struct mesh_table *mesh_table_alloc(void) |
6b86bd62 | 51 | { |
6b86bd62 JB |
52 | struct mesh_table *newtbl; |
53 | ||
d676ff49 | 54 | newtbl = kmalloc(sizeof(struct mesh_table), GFP_ATOMIC); |
6b86bd62 JB |
55 | if (!newtbl) |
56 | return NULL; | |
57 | ||
18b27ff7 | 58 | INIT_HLIST_HEAD(&newtbl->known_gates); |
b4c3fbe6 | 59 | INIT_HLIST_HEAD(&newtbl->walk_head); |
6b86bd62 | 60 | atomic_set(&newtbl->entries, 0); |
5ee68e5b | 61 | spin_lock_init(&newtbl->gates_lock); |
b4c3fbe6 | 62 | spin_lock_init(&newtbl->walk_lock); |
6b86bd62 JB |
63 | |
64 | return newtbl; | |
65 | } | |
66 | ||
60854fd9 | 67 | static void mesh_table_free(struct mesh_table *tbl) |
18889231 | 68 | { |
60854fd9 | 69 | rhashtable_free_and_destroy(&tbl->rhead, |
74932959 | 70 | mesh_path_rht_free, tbl); |
18889231 JC |
71 | kfree(tbl); |
72 | } | |
73 | ||
eb2b9311 LCC |
74 | /** |
75 | * | |
76 | * mesh_path_assign_nexthop - update mesh path next hop | |
77 | * | |
78 | * @mpath: mesh path to update | |
79 | * @sta: next hop to assign | |
80 | * | |
81 | * Locking: mpath->state_lock must be held when calling this function | |
82 | */ | |
83 | void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta) | |
84 | { | |
10c836d7 JC |
85 | struct sk_buff *skb; |
86 | struct ieee80211_hdr *hdr; | |
10c836d7 JC |
87 | unsigned long flags; |
88 | ||
d0709a65 | 89 | rcu_assign_pointer(mpath->next_hop, sta); |
10c836d7 | 90 | |
10c836d7 | 91 | spin_lock_irqsave(&mpath->frame_queue.lock, flags); |
b22bd522 | 92 | skb_queue_walk(&mpath->frame_queue, skb) { |
10c836d7 JC |
93 | hdr = (struct ieee80211_hdr *) skb->data; |
94 | memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN); | |
7e3c8866 | 95 | memcpy(hdr->addr2, mpath->sdata->vif.addr, ETH_ALEN); |
3f52b7e3 | 96 | ieee80211_mps_set_frame_flags(sta->sdata, sta, hdr); |
10c836d7 JC |
97 | } |
98 | ||
10c836d7 | 99 | spin_unlock_irqrestore(&mpath->frame_queue.lock, flags); |
eb2b9311 LCC |
100 | } |
101 | ||
5ee68e5b JC |
102 | static void prepare_for_gate(struct sk_buff *skb, char *dst_addr, |
103 | struct mesh_path *gate_mpath) | |
104 | { | |
105 | struct ieee80211_hdr *hdr; | |
106 | struct ieee80211s_hdr *mshdr; | |
107 | int mesh_hdrlen, hdrlen; | |
108 | char *next_hop; | |
109 | ||
110 | hdr = (struct ieee80211_hdr *) skb->data; | |
111 | hdrlen = ieee80211_hdrlen(hdr->frame_control); | |
112 | mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); | |
113 | ||
114 | if (!(mshdr->flags & MESH_FLAGS_AE)) { | |
115 | /* size of the fixed part of the mesh header */ | |
116 | mesh_hdrlen = 6; | |
117 | ||
118 | /* make room for the two extended addresses */ | |
119 | skb_push(skb, 2 * ETH_ALEN); | |
120 | memmove(skb->data, hdr, hdrlen + mesh_hdrlen); | |
121 | ||
122 | hdr = (struct ieee80211_hdr *) skb->data; | |
123 | ||
124 | /* we preserve the previous mesh header and only add | |
125 | * the new addreses */ | |
126 | mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); | |
127 | mshdr->flags = MESH_FLAGS_AE_A5_A6; | |
128 | memcpy(mshdr->eaddr1, hdr->addr3, ETH_ALEN); | |
129 | memcpy(mshdr->eaddr2, hdr->addr4, ETH_ALEN); | |
130 | } | |
131 | ||
132 | /* update next hop */ | |
133 | hdr = (struct ieee80211_hdr *) skb->data; | |
134 | rcu_read_lock(); | |
135 | next_hop = rcu_dereference(gate_mpath->next_hop)->sta.addr; | |
136 | memcpy(hdr->addr1, next_hop, ETH_ALEN); | |
137 | rcu_read_unlock(); | |
7e3c8866 | 138 | memcpy(hdr->addr2, gate_mpath->sdata->vif.addr, ETH_ALEN); |
5ee68e5b JC |
139 | memcpy(hdr->addr3, dst_addr, ETH_ALEN); |
140 | } | |
141 | ||
142 | /** | |
143 | * | |
144 | * mesh_path_move_to_queue - Move or copy frames from one mpath queue to another | |
145 | * | |
146 | * This function is used to transfer or copy frames from an unresolved mpath to | |
147 | * a gate mpath. The function also adds the Address Extension field and | |
148 | * updates the next hop. | |
149 | * | |
150 | * If a frame already has an Address Extension field, only the next hop and | |
151 | * destination addresses are updated. | |
152 | * | |
153 | * The gate mpath must be an active mpath with a valid mpath->next_hop. | |
154 | * | |
155 | * @mpath: An active mpath the frames will be sent to (i.e. the gate) | |
156 | * @from_mpath: The failed mpath | |
157 | * @copy: When true, copy all the frames to the new mpath queue. When false, | |
158 | * move them. | |
159 | */ | |
160 | static void mesh_path_move_to_queue(struct mesh_path *gate_mpath, | |
161 | struct mesh_path *from_mpath, | |
162 | bool copy) | |
163 | { | |
4bd4c2dd TP |
164 | struct sk_buff *skb, *fskb, *tmp; |
165 | struct sk_buff_head failq; | |
5ee68e5b | 166 | unsigned long flags; |
5ee68e5b | 167 | |
8c5bb1fa JB |
168 | if (WARN_ON(gate_mpath == from_mpath)) |
169 | return; | |
170 | if (WARN_ON(!gate_mpath->next_hop)) | |
171 | return; | |
5ee68e5b | 172 | |
5ee68e5b JC |
173 | __skb_queue_head_init(&failq); |
174 | ||
175 | spin_lock_irqsave(&from_mpath->frame_queue.lock, flags); | |
176 | skb_queue_splice_init(&from_mpath->frame_queue, &failq); | |
177 | spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags); | |
178 | ||
4bd4c2dd TP |
179 | skb_queue_walk_safe(&failq, fskb, tmp) { |
180 | if (skb_queue_len(&gate_mpath->frame_queue) >= | |
181 | MESH_FRAME_QUEUE_LEN) { | |
182 | mpath_dbg(gate_mpath->sdata, "mpath queue full!\n"); | |
183 | break; | |
817a53d9 | 184 | } |
5ee68e5b | 185 | |
4bd4c2dd TP |
186 | skb = skb_copy(fskb, GFP_ATOMIC); |
187 | if (WARN_ON(!skb)) | |
188 | break; | |
189 | ||
5ee68e5b | 190 | prepare_for_gate(skb, gate_mpath->dst, gate_mpath); |
4bd4c2dd TP |
191 | skb_queue_tail(&gate_mpath->frame_queue, skb); |
192 | ||
193 | if (copy) | |
194 | continue; | |
195 | ||
196 | __skb_unlink(fskb, &failq); | |
197 | kfree_skb(fskb); | |
5ee68e5b JC |
198 | } |
199 | ||
bdcbd8e0 JB |
200 | mpath_dbg(gate_mpath->sdata, "Mpath queue for gate %pM has %d frames\n", |
201 | gate_mpath->dst, skb_queue_len(&gate_mpath->frame_queue)); | |
5ee68e5b JC |
202 | |
203 | if (!copy) | |
204 | return; | |
205 | ||
206 | spin_lock_irqsave(&from_mpath->frame_queue.lock, flags); | |
207 | skb_queue_splice(&failq, &from_mpath->frame_queue); | |
208 | spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags); | |
209 | } | |
210 | ||
eb2b9311 | 211 | |
4a3cb702 JB |
212 | static struct mesh_path *mpath_lookup(struct mesh_table *tbl, const u8 *dst, |
213 | struct ieee80211_sub_if_data *sdata) | |
eb2b9311 LCC |
214 | { |
215 | struct mesh_path *mpath; | |
60854fd9 | 216 | |
ef618b1b | 217 | mpath = rhashtable_lookup(&tbl->rhead, dst, mesh_rht_params); |
60854fd9 BC |
218 | |
219 | if (mpath && mpath_expired(mpath)) { | |
220 | spin_lock_bh(&mpath->state_lock); | |
221 | mpath->flags &= ~MESH_PATH_ACTIVE; | |
222 | spin_unlock_bh(&mpath->state_lock); | |
eb2b9311 | 223 | } |
60854fd9 | 224 | return mpath; |
eb2b9311 LCC |
225 | } |
226 | ||
239289e4 JC |
227 | /** |
228 | * mesh_path_lookup - look up a path in the mesh path table | |
239289e4 | 229 | * @sdata: local subif |
bf7cd94d | 230 | * @dst: hardware address (ETH_ALEN length) of destination |
239289e4 JC |
231 | * |
232 | * Returns: pointer to the mesh path structure, or NULL if not found | |
233 | * | |
234 | * Locking: must be called within a read rcu section. | |
235 | */ | |
bf7cd94d JB |
236 | struct mesh_path * |
237 | mesh_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst) | |
79617dee | 238 | { |
60854fd9 | 239 | return mpath_lookup(sdata->u.mesh.mesh_paths, dst, sdata); |
239289e4 | 240 | } |
79617dee | 241 | |
bf7cd94d JB |
242 | struct mesh_path * |
243 | mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst) | |
239289e4 | 244 | { |
60854fd9 | 245 | return mpath_lookup(sdata->u.mesh.mpp_paths, dst, sdata); |
79617dee Y |
246 | } |
247 | ||
60854fd9 BC |
248 | static struct mesh_path * |
249 | __mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx) | |
250 | { | |
b4c3fbe6 HX |
251 | int i = 0; |
252 | struct mesh_path *mpath; | |
60854fd9 | 253 | |
b4c3fbe6 | 254 | hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) { |
60854fd9 BC |
255 | if (i++ == idx) |
256 | break; | |
257 | } | |
60854fd9 | 258 | |
b4c3fbe6 | 259 | if (!mpath) |
60854fd9 BC |
260 | return NULL; |
261 | ||
262 | if (mpath_expired(mpath)) { | |
263 | spin_lock_bh(&mpath->state_lock); | |
264 | mpath->flags &= ~MESH_PATH_ACTIVE; | |
265 | spin_unlock_bh(&mpath->state_lock); | |
266 | } | |
267 | return mpath; | |
268 | } | |
79617dee | 269 | |
eb2b9311 LCC |
270 | /** |
271 | * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index | |
272 | * @idx: index | |
f698d856 | 273 | * @sdata: local subif, or NULL for all entries |
eb2b9311 LCC |
274 | * |
275 | * Returns: pointer to the mesh path structure, or NULL if not found. | |
276 | * | |
277 | * Locking: must be called within a read rcu section. | |
278 | */ | |
bf7cd94d JB |
279 | struct mesh_path * |
280 | mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx) | |
eb2b9311 | 281 | { |
60854fd9 | 282 | return __mesh_path_lookup_by_idx(sdata->u.mesh.mesh_paths, idx); |
eb2b9311 LCC |
283 | } |
284 | ||
a2db2ed3 HR |
285 | /** |
286 | * mpp_path_lookup_by_idx - look up a path in the proxy path table by its index | |
287 | * @idx: index | |
288 | * @sdata: local subif, or NULL for all entries | |
289 | * | |
290 | * Returns: pointer to the proxy path structure, or NULL if not found. | |
291 | * | |
292 | * Locking: must be called within a read rcu section. | |
293 | */ | |
294 | struct mesh_path * | |
295 | mpp_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx) | |
296 | { | |
60854fd9 | 297 | return __mesh_path_lookup_by_idx(sdata->u.mesh.mpp_paths, idx); |
a2db2ed3 HR |
298 | } |
299 | ||
5ee68e5b | 300 | /** |
30be52e4 JB |
301 | * mesh_path_add_gate - add the given mpath to a mesh gate to our path table |
302 | * @mpath: gate path to add to table | |
5ee68e5b | 303 | */ |
30be52e4 | 304 | int mesh_path_add_gate(struct mesh_path *mpath) |
5ee68e5b | 305 | { |
30be52e4 | 306 | struct mesh_table *tbl; |
5ee68e5b JC |
307 | int err; |
308 | ||
309 | rcu_read_lock(); | |
60854fd9 | 310 | tbl = mpath->sdata->u.mesh.mesh_paths; |
5ee68e5b | 311 | |
947c2a0e BC |
312 | spin_lock_bh(&mpath->state_lock); |
313 | if (mpath->is_gate) { | |
314 | err = -EEXIST; | |
315 | spin_unlock_bh(&mpath->state_lock); | |
5ee68e5b JC |
316 | goto err_rcu; |
317 | } | |
5ee68e5b JC |
318 | mpath->is_gate = true; |
319 | mpath->sdata->u.mesh.num_gates++; | |
947c2a0e BC |
320 | |
321 | spin_lock(&tbl->gates_lock); | |
18b27ff7 | 322 | hlist_add_head_rcu(&mpath->gate_list, &tbl->known_gates); |
947c2a0e BC |
323 | spin_unlock(&tbl->gates_lock); |
324 | ||
325 | spin_unlock_bh(&mpath->state_lock); | |
326 | ||
bdcbd8e0 JB |
327 | mpath_dbg(mpath->sdata, |
328 | "Mesh path: Recorded new gate: %pM. %d known gates\n", | |
329 | mpath->dst, mpath->sdata->u.mesh.num_gates); | |
bf7cd94d | 330 | err = 0; |
5ee68e5b JC |
331 | err_rcu: |
332 | rcu_read_unlock(); | |
333 | return err; | |
334 | } | |
335 | ||
336 | /** | |
337 | * mesh_gate_del - remove a mesh gate from the list of known gates | |
338 | * @tbl: table which holds our list of known gates | |
339 | * @mpath: gate mpath | |
5ee68e5b | 340 | */ |
bf7cd94d | 341 | static void mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath) |
5ee68e5b | 342 | { |
947c2a0e BC |
343 | lockdep_assert_held(&mpath->state_lock); |
344 | if (!mpath->is_gate) | |
345 | return; | |
5ee68e5b | 346 | |
947c2a0e BC |
347 | mpath->is_gate = false; |
348 | spin_lock_bh(&tbl->gates_lock); | |
349 | hlist_del_rcu(&mpath->gate_list); | |
350 | mpath->sdata->u.mesh.num_gates--; | |
351 | spin_unlock_bh(&tbl->gates_lock); | |
352 | ||
353 | mpath_dbg(mpath->sdata, | |
354 | "Mesh path: Deleted gate: %pM. %d known gates\n", | |
355 | mpath->dst, mpath->sdata->u.mesh.num_gates); | |
5ee68e5b JC |
356 | } |
357 | ||
5ee68e5b JC |
358 | /** |
359 | * mesh_gate_num - number of gates known to this interface | |
360 | * @sdata: subif data | |
361 | */ | |
362 | int mesh_gate_num(struct ieee80211_sub_if_data *sdata) | |
363 | { | |
364 | return sdata->u.mesh.num_gates; | |
365 | } | |
366 | ||
b15dc38b BC |
367 | static |
368 | struct mesh_path *mesh_path_new(struct ieee80211_sub_if_data *sdata, | |
369 | const u8 *dst, gfp_t gfp_flags) | |
370 | { | |
371 | struct mesh_path *new_mpath; | |
372 | ||
373 | new_mpath = kzalloc(sizeof(struct mesh_path), gfp_flags); | |
374 | if (!new_mpath) | |
375 | return NULL; | |
376 | ||
377 | memcpy(new_mpath->dst, dst, ETH_ALEN); | |
378 | eth_broadcast_addr(new_mpath->rann_snd_addr); | |
379 | new_mpath->is_root = false; | |
380 | new_mpath->sdata = sdata; | |
381 | new_mpath->flags = 0; | |
382 | skb_queue_head_init(&new_mpath->frame_queue); | |
b15dc38b BC |
383 | new_mpath->exp_time = jiffies; |
384 | spin_lock_init(&new_mpath->state_lock); | |
34f11cd3 | 385 | timer_setup(&new_mpath->timer, mesh_path_timer, 0); |
b15dc38b BC |
386 | |
387 | return new_mpath; | |
388 | } | |
389 | ||
eb2b9311 LCC |
390 | /** |
391 | * mesh_path_add - allocate and add a new path to the mesh path table | |
bf7cd94d | 392 | * @dst: destination address of the path (ETH_ALEN length) |
f698d856 | 393 | * @sdata: local subif |
eb2b9311 | 394 | * |
af901ca1 | 395 | * Returns: 0 on success |
eb2b9311 LCC |
396 | * |
397 | * State: the initial state of the new path is set to 0 | |
398 | */ | |
ae76eef0 BC |
399 | struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata, |
400 | const u8 *dst) | |
eb2b9311 | 401 | { |
349eb8cf | 402 | struct mesh_table *tbl; |
eb2b9311 | 403 | struct mesh_path *mpath, *new_mpath; |
eb2b9311 | 404 | |
b203ca39 | 405 | if (ether_addr_equal(dst, sdata->vif.addr)) |
eb2b9311 | 406 | /* never add ourselves as neighbours */ |
ae76eef0 | 407 | return ERR_PTR(-ENOTSUPP); |
eb2b9311 LCC |
408 | |
409 | if (is_multicast_ether_addr(dst)) | |
ae76eef0 | 410 | return ERR_PTR(-ENOTSUPP); |
eb2b9311 | 411 | |
472dbc45 | 412 | if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0) |
ae76eef0 BC |
413 | return ERR_PTR(-ENOSPC); |
414 | ||
b15dc38b | 415 | new_mpath = mesh_path_new(sdata, dst, GFP_ATOMIC); |
402d7752 | 416 | if (!new_mpath) |
60854fd9 | 417 | return ERR_PTR(-ENOMEM); |
18889231 | 418 | |
60854fd9 | 419 | tbl = sdata->u.mesh.mesh_paths; |
b4c3fbe6 | 420 | spin_lock_bh(&tbl->walk_lock); |
36922931 HX |
421 | mpath = rhashtable_lookup_get_insert_fast(&tbl->rhead, |
422 | &new_mpath->rhash, | |
423 | mesh_rht_params); | |
424 | if (!mpath) | |
425 | hlist_add_head(&new_mpath->walk_list, &tbl->walk_head); | |
b4c3fbe6 | 426 | spin_unlock_bh(&tbl->walk_lock); |
18889231 | 427 | |
36922931 | 428 | if (mpath) { |
60854fd9 | 429 | kfree(new_mpath); |
4ff3a9d1 | 430 | |
36922931 HX |
431 | if (IS_ERR(mpath)) |
432 | return mpath; | |
4ff3a9d1 | 433 | |
60854fd9 | 434 | new_mpath = mpath; |
18889231 | 435 | } |
4ff3a9d1 | 436 | |
60854fd9 BC |
437 | sdata->u.mesh.mesh_paths_generation++; |
438 | return new_mpath; | |
18889231 | 439 | } |
eb2b9311 | 440 | |
bf7cd94d JB |
441 | int mpp_path_add(struct ieee80211_sub_if_data *sdata, |
442 | const u8 *dst, const u8 *mpp) | |
79617dee | 443 | { |
349eb8cf | 444 | struct mesh_table *tbl; |
60854fd9 BC |
445 | struct mesh_path *new_mpath; |
446 | int ret; | |
79617dee | 447 | |
b203ca39 | 448 | if (ether_addr_equal(dst, sdata->vif.addr)) |
79617dee Y |
449 | /* never add ourselves as neighbours */ |
450 | return -ENOTSUPP; | |
451 | ||
452 | if (is_multicast_ether_addr(dst)) | |
453 | return -ENOTSUPP; | |
454 | ||
b15dc38b | 455 | new_mpath = mesh_path_new(sdata, dst, GFP_ATOMIC); |
79617dee | 456 | |
60854fd9 BC |
457 | if (!new_mpath) |
458 | return -ENOMEM; | |
79617dee | 459 | |
79617dee | 460 | memcpy(new_mpath->mpp, mpp, ETH_ALEN); |
60854fd9 | 461 | tbl = sdata->u.mesh.mpp_paths; |
b4c3fbe6 HX |
462 | |
463 | spin_lock_bh(&tbl->walk_lock); | |
60854fd9 BC |
464 | ret = rhashtable_lookup_insert_fast(&tbl->rhead, |
465 | &new_mpath->rhash, | |
466 | mesh_rht_params); | |
b4c3fbe6 HX |
467 | if (!ret) |
468 | hlist_add_head_rcu(&new_mpath->walk_list, &tbl->walk_head); | |
469 | spin_unlock_bh(&tbl->walk_lock); | |
a2db2ed3 | 470 | |
4ff3a9d1 HX |
471 | if (ret) |
472 | kfree(new_mpath); | |
473 | ||
2bdaf386 | 474 | sdata->u.mesh.mpp_paths_generation++; |
60854fd9 | 475 | return ret; |
79617dee Y |
476 | } |
477 | ||
478 | ||
eb2b9311 LCC |
479 | /** |
480 | * mesh_plink_broken - deactivates paths and sends perr when a link breaks | |
481 | * | |
482 | * @sta: broken peer link | |
483 | * | |
484 | * This function must be called from the rate control algorithm if enough | |
485 | * delivery errors suggest that a peer link is no longer usable. | |
486 | */ | |
487 | void mesh_plink_broken(struct sta_info *sta) | |
488 | { | |
60854fd9 BC |
489 | struct ieee80211_sub_if_data *sdata = sta->sdata; |
490 | struct mesh_table *tbl = sdata->u.mesh.mesh_paths; | |
15ff6365 | 491 | static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; |
eb2b9311 | 492 | struct mesh_path *mpath; |
60854fd9 | 493 | |
b4c3fbe6 HX |
494 | rcu_read_lock(); |
495 | hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) { | |
2688eba9 | 496 | if (rcu_access_pointer(mpath->next_hop) == sta && |
eb2b9311 LCC |
497 | mpath->flags & MESH_PATH_ACTIVE && |
498 | !(mpath->flags & MESH_PATH_FIXED)) { | |
f5e50cd0 | 499 | spin_lock_bh(&mpath->state_lock); |
eb2b9311 | 500 | mpath->flags &= ~MESH_PATH_ACTIVE; |
d19b3bf6 | 501 | ++mpath->sn; |
eb2b9311 | 502 | spin_unlock_bh(&mpath->state_lock); |
bf7cd94d | 503 | mesh_path_error_tx(sdata, |
f63f8421 CYY |
504 | sdata->u.mesh.mshcfg.element_ttl, |
505 | mpath->dst, mpath->sn, | |
506 | WLAN_REASON_MESH_PATH_DEST_UNREACHABLE, bcast); | |
f5e50cd0 | 507 | } |
eb2b9311 | 508 | } |
b4c3fbe6 | 509 | rcu_read_unlock(); |
19c50b3d JC |
510 | } |
511 | ||
74932959 BC |
512 | static void mesh_path_free_rcu(struct mesh_table *tbl, |
513 | struct mesh_path *mpath) | |
19c50b3d | 514 | { |
60854fd9 | 515 | struct ieee80211_sub_if_data *sdata = mpath->sdata; |
c2e703a5 | 516 | |
947c2a0e | 517 | spin_lock_bh(&mpath->state_lock); |
74932959 | 518 | mpath->flags |= MESH_PATH_RESOLVING | MESH_PATH_DELETED; |
60854fd9 | 519 | mesh_gate_del(tbl, mpath); |
947c2a0e | 520 | spin_unlock_bh(&mpath->state_lock); |
74932959 | 521 | del_timer_sync(&mpath->timer); |
c2e703a5 | 522 | atomic_dec(&sdata->u.mesh.mpaths); |
19c50b3d | 523 | atomic_dec(&tbl->entries); |
74932959 BC |
524 | kfree_rcu(mpath, rcu); |
525 | } | |
526 | ||
527 | static void __mesh_path_del(struct mesh_table *tbl, struct mesh_path *mpath) | |
528 | { | |
b4c3fbe6 | 529 | hlist_del_rcu(&mpath->walk_list); |
74932959 BC |
530 | rhashtable_remove_fast(&tbl->rhead, &mpath->rhash, mesh_rht_params); |
531 | mesh_path_free_rcu(tbl, mpath); | |
19c50b3d JC |
532 | } |
533 | ||
eb2b9311 LCC |
534 | /** |
535 | * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches | |
536 | * | |
2c53040f | 537 | * @sta: mesh peer to match |
eb2b9311 | 538 | * |
b4e08ea1 LCC |
539 | * RCU notes: this function is called when a mesh plink transitions from |
540 | * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that | |
541 | * allows path creation. This will happen before the sta can be freed (because | |
d0709a65 JB |
542 | * sta_info_destroy() calls this) so any reader in a rcu read block will be |
543 | * protected against the plink disappearing. | |
eb2b9311 LCC |
544 | */ |
545 | void mesh_path_flush_by_nexthop(struct sta_info *sta) | |
546 | { | |
2bdaf386 | 547 | struct ieee80211_sub_if_data *sdata = sta->sdata; |
60854fd9 | 548 | struct mesh_table *tbl = sdata->u.mesh.mesh_paths; |
eb2b9311 | 549 | struct mesh_path *mpath; |
b4c3fbe6 | 550 | struct hlist_node *n; |
60854fd9 | 551 | |
b4c3fbe6 HX |
552 | spin_lock_bh(&tbl->walk_lock); |
553 | hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) { | |
60854fd9 BC |
554 | if (rcu_access_pointer(mpath->next_hop) == sta) |
555 | __mesh_path_del(tbl, mpath); | |
eb2b9311 | 556 | } |
b4c3fbe6 | 557 | spin_unlock_bh(&tbl->walk_lock); |
eb2b9311 LCC |
558 | } |
559 | ||
bf5a70e1 HR |
560 | static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata, |
561 | const u8 *proxy) | |
562 | { | |
60854fd9 BC |
563 | struct mesh_table *tbl = sdata->u.mesh.mpp_paths; |
564 | struct mesh_path *mpath; | |
b4c3fbe6 | 565 | struct hlist_node *n; |
60854fd9 | 566 | |
b4c3fbe6 HX |
567 | spin_lock_bh(&tbl->walk_lock); |
568 | hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) { | |
60854fd9 BC |
569 | if (ether_addr_equal(mpath->mpp, proxy)) |
570 | __mesh_path_del(tbl, mpath); | |
bf5a70e1 | 571 | } |
b4c3fbe6 | 572 | spin_unlock_bh(&tbl->walk_lock); |
bf5a70e1 HR |
573 | } |
574 | ||
60854fd9 | 575 | static void table_flush_by_iface(struct mesh_table *tbl) |
eb2b9311 LCC |
576 | { |
577 | struct mesh_path *mpath; | |
b4c3fbe6 | 578 | struct hlist_node *n; |
60854fd9 | 579 | |
b4c3fbe6 HX |
580 | spin_lock_bh(&tbl->walk_lock); |
581 | hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) { | |
60854fd9 | 582 | __mesh_path_del(tbl, mpath); |
eb2b9311 | 583 | } |
b4c3fbe6 | 584 | spin_unlock_bh(&tbl->walk_lock); |
eb2b9311 LCC |
585 | } |
586 | ||
ece1a2e7 JC |
587 | /** |
588 | * mesh_path_flush_by_iface - Deletes all mesh paths associated with a given iface | |
589 | * | |
590 | * This function deletes both mesh paths as well as mesh portal paths. | |
591 | * | |
2c53040f | 592 | * @sdata: interface data to match |
ece1a2e7 JC |
593 | * |
594 | */ | |
595 | void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata) | |
eb2b9311 | 596 | { |
60854fd9 BC |
597 | table_flush_by_iface(sdata->u.mesh.mesh_paths); |
598 | table_flush_by_iface(sdata->u.mesh.mpp_paths); | |
eb2b9311 LCC |
599 | } |
600 | ||
601 | /** | |
4cc955de | 602 | * table_path_del - delete a path from the mesh or mpp table |
eb2b9311 | 603 | * |
4cc955de | 604 | * @tbl: mesh or mpp path table |
f698d856 | 605 | * @sdata: local subif |
4cc955de | 606 | * @addr: dst address (ETH_ALEN length) |
eb2b9311 | 607 | * |
af901ca1 | 608 | * Returns: 0 if successful |
eb2b9311 | 609 | */ |
60854fd9 | 610 | static int table_path_del(struct mesh_table *tbl, |
4cc955de HR |
611 | struct ieee80211_sub_if_data *sdata, |
612 | const u8 *addr) | |
eb2b9311 LCC |
613 | { |
614 | struct mesh_path *mpath; | |
60854fd9 | 615 | |
b4c3fbe6 | 616 | spin_lock_bh(&tbl->walk_lock); |
60854fd9 BC |
617 | mpath = rhashtable_lookup_fast(&tbl->rhead, addr, mesh_rht_params); |
618 | if (!mpath) { | |
f2ffff08 | 619 | spin_unlock_bh(&tbl->walk_lock); |
60854fd9 | 620 | return -ENXIO; |
eb2b9311 LCC |
621 | } |
622 | ||
60854fd9 | 623 | __mesh_path_del(tbl, mpath); |
b4c3fbe6 | 624 | spin_unlock_bh(&tbl->walk_lock); |
60854fd9 | 625 | return 0; |
4cc955de HR |
626 | } |
627 | ||
60854fd9 | 628 | |
4cc955de HR |
629 | /** |
630 | * mesh_path_del - delete a mesh path from the table | |
631 | * | |
632 | * @addr: dst address (ETH_ALEN length) | |
633 | * @sdata: local subif | |
634 | * | |
635 | * Returns: 0 if successful | |
636 | */ | |
637 | int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr) | |
638 | { | |
60854fd9 | 639 | int err; |
4cc955de HR |
640 | |
641 | /* flush relevant mpp entries first */ | |
642 | mpp_flush_by_proxy(sdata, addr); | |
643 | ||
2bdaf386 BC |
644 | err = table_path_del(sdata->u.mesh.mesh_paths, sdata, addr); |
645 | sdata->u.mesh.mesh_paths_generation++; | |
ab1c7906 HR |
646 | return err; |
647 | } | |
648 | ||
eb2b9311 LCC |
649 | /** |
650 | * mesh_path_tx_pending - sends pending frames in a mesh path queue | |
651 | * | |
652 | * @mpath: mesh path to activate | |
653 | * | |
654 | * Locking: the state_lock of the mpath structure must NOT be held when calling | |
655 | * this function. | |
656 | */ | |
657 | void mesh_path_tx_pending(struct mesh_path *mpath) | |
658 | { | |
249b405c JC |
659 | if (mpath->flags & MESH_PATH_ACTIVE) |
660 | ieee80211_add_pending_skbs(mpath->sdata->local, | |
661 | &mpath->frame_queue); | |
eb2b9311 LCC |
662 | } |
663 | ||
5ee68e5b JC |
664 | /** |
665 | * mesh_path_send_to_gates - sends pending frames to all known mesh gates | |
666 | * | |
667 | * @mpath: mesh path whose queue will be emptied | |
668 | * | |
669 | * If there is only one gate, the frames are transferred from the failed mpath | |
670 | * queue to that gate's queue. If there are more than one gates, the frames | |
671 | * are copied from each gate to the next. After frames are copied, the | |
672 | * mpath queues are emptied onto the transmission queue. | |
673 | */ | |
674 | int mesh_path_send_to_gates(struct mesh_path *mpath) | |
675 | { | |
676 | struct ieee80211_sub_if_data *sdata = mpath->sdata; | |
5ee68e5b JC |
677 | struct mesh_table *tbl; |
678 | struct mesh_path *from_mpath = mpath; | |
60854fd9 | 679 | struct mesh_path *gate; |
5ee68e5b | 680 | bool copy = false; |
5ee68e5b | 681 | |
60854fd9 | 682 | tbl = sdata->u.mesh.mesh_paths; |
5ee68e5b | 683 | |
60854fd9 | 684 | rcu_read_lock(); |
18b27ff7 | 685 | hlist_for_each_entry_rcu(gate, &tbl->known_gates, gate_list) { |
947c2a0e BC |
686 | if (gate->flags & MESH_PATH_ACTIVE) { |
687 | mpath_dbg(sdata, "Forwarding to %pM\n", gate->dst); | |
688 | mesh_path_move_to_queue(gate, from_mpath, copy); | |
689 | from_mpath = gate; | |
5ee68e5b JC |
690 | copy = true; |
691 | } else { | |
bdcbd8e0 | 692 | mpath_dbg(sdata, |
d671b2a0 | 693 | "Not forwarding to %pM (flags %#x)\n", |
947c2a0e | 694 | gate->dst, gate->flags); |
5ee68e5b JC |
695 | } |
696 | } | |
697 | ||
18b27ff7 | 698 | hlist_for_each_entry_rcu(gate, &tbl->known_gates, gate_list) { |
947c2a0e BC |
699 | mpath_dbg(sdata, "Sending to %pM\n", gate->dst); |
700 | mesh_path_tx_pending(gate); | |
2bdaf386 | 701 | } |
60854fd9 | 702 | rcu_read_unlock(); |
5ee68e5b JC |
703 | |
704 | return (from_mpath == mpath) ? -EHOSTUNREACH : 0; | |
705 | } | |
706 | ||
eb2b9311 LCC |
707 | /** |
708 | * mesh_path_discard_frame - discard a frame whose path could not be resolved | |
709 | * | |
710 | * @skb: frame to discard | |
f698d856 | 711 | * @sdata: network subif the frame was to be sent through |
eb2b9311 | 712 | * |
eb2b9311 LCC |
713 | * Locking: the function must me called within a rcu_read_lock region |
714 | */ | |
bf7cd94d JB |
715 | void mesh_path_discard_frame(struct ieee80211_sub_if_data *sdata, |
716 | struct sk_buff *skb) | |
eb2b9311 | 717 | { |
eb2b9311 | 718 | kfree_skb(skb); |
472dbc45 | 719 | sdata->u.mesh.mshstats.dropped_frames_no_route++; |
eb2b9311 LCC |
720 | } |
721 | ||
722 | /** | |
723 | * mesh_path_flush_pending - free the pending queue of a mesh path | |
724 | * | |
725 | * @mpath: mesh path whose queue has to be freed | |
726 | * | |
25985edc | 727 | * Locking: the function must me called within a rcu_read_lock region |
eb2b9311 LCC |
728 | */ |
729 | void mesh_path_flush_pending(struct mesh_path *mpath) | |
730 | { | |
eb2b9311 LCC |
731 | struct sk_buff *skb; |
732 | ||
00e3f25c | 733 | while ((skb = skb_dequeue(&mpath->frame_queue)) != NULL) |
bf7cd94d | 734 | mesh_path_discard_frame(mpath->sdata, skb); |
eb2b9311 LCC |
735 | } |
736 | ||
737 | /** | |
738 | * mesh_path_fix_nexthop - force a specific next hop for a mesh path | |
739 | * | |
740 | * @mpath: the mesh path to modify | |
741 | * @next_hop: the next hop to force | |
742 | * | |
743 | * Locking: this function must be called holding mpath->state_lock | |
744 | */ | |
745 | void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop) | |
746 | { | |
747 | spin_lock_bh(&mpath->state_lock); | |
748 | mesh_path_assign_nexthop(mpath, next_hop); | |
d19b3bf6 | 749 | mpath->sn = 0xffff; |
eb2b9311 LCC |
750 | mpath->metric = 0; |
751 | mpath->hop_count = 0; | |
752 | mpath->exp_time = 0; | |
5df20f21 | 753 | mpath->flags = MESH_PATH_FIXED | MESH_PATH_SN_VALID; |
eb2b9311 LCC |
754 | mesh_path_activate(mpath); |
755 | spin_unlock_bh(&mpath->state_lock); | |
3eb0928f MR |
756 | ewma_mesh_fail_avg_init(&next_hop->mesh->fail_avg); |
757 | /* init it at a low value - 0 start is tricky */ | |
758 | ewma_mesh_fail_avg_add(&next_hop->mesh->fail_avg, 1); | |
eb2b9311 LCC |
759 | mesh_path_tx_pending(mpath); |
760 | } | |
761 | ||
2bdaf386 | 762 | int mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata) |
eb2b9311 | 763 | { |
349eb8cf | 764 | struct mesh_table *tbl_path, *tbl_mpp; |
4c5ade41 | 765 | int ret; |
349eb8cf | 766 | |
60854fd9 | 767 | tbl_path = mesh_table_alloc(); |
349eb8cf | 768 | if (!tbl_path) |
79617dee | 769 | return -ENOMEM; |
5ee68e5b | 770 | |
60854fd9 | 771 | tbl_mpp = mesh_table_alloc(); |
349eb8cf | 772 | if (!tbl_mpp) { |
4c5ade41 DC |
773 | ret = -ENOMEM; |
774 | goto free_path; | |
79617dee | 775 | } |
349eb8cf | 776 | |
60854fd9 BC |
777 | rhashtable_init(&tbl_path->rhead, &mesh_rht_params); |
778 | rhashtable_init(&tbl_mpp->rhead, &mesh_rht_params); | |
2bdaf386 | 779 | |
60854fd9 BC |
780 | sdata->u.mesh.mesh_paths = tbl_path; |
781 | sdata->u.mesh.mpp_paths = tbl_mpp; | |
79617dee | 782 | |
eb2b9311 | 783 | return 0; |
4c5ade41 | 784 | |
4c5ade41 | 785 | free_path: |
60854fd9 | 786 | mesh_table_free(tbl_path); |
4c5ade41 | 787 | return ret; |
eb2b9311 LCC |
788 | } |
789 | ||
60854fd9 BC |
790 | static |
791 | void mesh_path_tbl_expire(struct ieee80211_sub_if_data *sdata, | |
792 | struct mesh_table *tbl) | |
eb2b9311 LCC |
793 | { |
794 | struct mesh_path *mpath; | |
b4c3fbe6 | 795 | struct hlist_node *n; |
eb2b9311 | 796 | |
b4c3fbe6 HX |
797 | spin_lock_bh(&tbl->walk_lock); |
798 | hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) { | |
eb2b9311 LCC |
799 | if ((!(mpath->flags & MESH_PATH_RESOLVING)) && |
800 | (!(mpath->flags & MESH_PATH_FIXED)) && | |
f5e50cd0 | 801 | time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE)) |
60854fd9 | 802 | __mesh_path_del(tbl, mpath); |
ab1c7906 | 803 | } |
b4c3fbe6 | 804 | spin_unlock_bh(&tbl->walk_lock); |
60854fd9 | 805 | } |
ab1c7906 | 806 | |
60854fd9 BC |
807 | void mesh_path_expire(struct ieee80211_sub_if_data *sdata) |
808 | { | |
809 | mesh_path_tbl_expire(sdata, sdata->u.mesh.mesh_paths); | |
810 | mesh_path_tbl_expire(sdata, sdata->u.mesh.mpp_paths); | |
eb2b9311 LCC |
811 | } |
812 | ||
2bdaf386 | 813 | void mesh_pathtbl_unregister(struct ieee80211_sub_if_data *sdata) |
eb2b9311 | 814 | { |
60854fd9 BC |
815 | mesh_table_free(sdata->u.mesh.mesh_paths); |
816 | mesh_table_free(sdata->u.mesh.mpp_paths); | |
eb2b9311 | 817 | } |