]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright 2002-2005, Instant802 Networks, Inc. | |
3 | * Copyright 2005-2006, Devicescape Software, Inc. | |
4 | * Copyright 2006-2007 Jiri Benc <[email protected]> | |
5 | * Copyright 2007 Johannes Berg <[email protected]> | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | */ | |
11 | ||
12 | #include <linux/jiffies.h> | |
13 | #include <linux/kernel.h> | |
14 | #include <linux/skbuff.h> | |
15 | #include <linux/netdevice.h> | |
16 | #include <linux/etherdevice.h> | |
17 | #include <linux/rcupdate.h> | |
18 | #include <net/mac80211.h> | |
19 | #include <net/ieee80211_radiotap.h> | |
20 | ||
21 | #include "ieee80211_i.h" | |
22 | #include "driver-ops.h" | |
23 | #include "led.h" | |
24 | #include "mesh.h" | |
25 | #include "wep.h" | |
26 | #include "wpa.h" | |
27 | #include "tkip.h" | |
28 | #include "wme.h" | |
29 | ||
30 | static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw, | |
31 | struct tid_ampdu_rx *tid_agg_rx, | |
32 | struct sk_buff *skb, | |
33 | struct ieee80211_rx_status *status, | |
34 | u16 mpdu_seq_num, | |
35 | int bar_req); | |
36 | /* | |
37 | * monitor mode reception | |
38 | * | |
39 | * This function cleans up the SKB, i.e. it removes all the stuff | |
40 | * only useful for monitoring. | |
41 | */ | |
42 | static struct sk_buff *remove_monitor_info(struct ieee80211_local *local, | |
43 | struct sk_buff *skb, | |
44 | int rtap_len) | |
45 | { | |
46 | skb_pull(skb, rtap_len); | |
47 | ||
48 | if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) { | |
49 | if (likely(skb->len > FCS_LEN)) | |
50 | skb_trim(skb, skb->len - FCS_LEN); | |
51 | else { | |
52 | /* driver bug */ | |
53 | WARN_ON(1); | |
54 | dev_kfree_skb(skb); | |
55 | skb = NULL; | |
56 | } | |
57 | } | |
58 | ||
59 | return skb; | |
60 | } | |
61 | ||
62 | static inline int should_drop_frame(struct ieee80211_rx_status *status, | |
63 | struct sk_buff *skb, | |
64 | int present_fcs_len, | |
65 | int radiotap_len) | |
66 | { | |
67 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | |
68 | ||
69 | if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC)) | |
70 | return 1; | |
71 | if (unlikely(skb->len < 16 + present_fcs_len + radiotap_len)) | |
72 | return 1; | |
73 | if (ieee80211_is_ctl(hdr->frame_control) && | |
74 | !ieee80211_is_pspoll(hdr->frame_control) && | |
75 | !ieee80211_is_back_req(hdr->frame_control)) | |
76 | return 1; | |
77 | return 0; | |
78 | } | |
79 | ||
80 | static int | |
81 | ieee80211_rx_radiotap_len(struct ieee80211_local *local, | |
82 | struct ieee80211_rx_status *status) | |
83 | { | |
84 | int len; | |
85 | ||
86 | /* always present fields */ | |
87 | len = sizeof(struct ieee80211_radiotap_header) + 9; | |
88 | ||
89 | if (status->flag & RX_FLAG_TSFT) | |
90 | len += 8; | |
91 | if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM) | |
92 | len += 1; | |
93 | if (local->hw.flags & IEEE80211_HW_NOISE_DBM) | |
94 | len += 1; | |
95 | ||
96 | if (len & 1) /* padding for RX_FLAGS if necessary */ | |
97 | len++; | |
98 | ||
99 | /* make sure radiotap starts at a naturally aligned address */ | |
100 | if (len % 8) | |
101 | len = roundup(len, 8); | |
102 | ||
103 | return len; | |
104 | } | |
105 | ||
106 | /* | |
107 | * ieee80211_add_rx_radiotap_header - add radiotap header | |
108 | * | |
109 | * add a radiotap header containing all the fields which the hardware provided. | |
110 | */ | |
111 | static void | |
112 | ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, | |
113 | struct sk_buff *skb, | |
114 | struct ieee80211_rx_status *status, | |
115 | struct ieee80211_rate *rate, | |
116 | int rtap_len) | |
117 | { | |
118 | struct ieee80211_radiotap_header *rthdr; | |
119 | unsigned char *pos; | |
120 | ||
121 | rthdr = (struct ieee80211_radiotap_header *)skb_push(skb, rtap_len); | |
122 | memset(rthdr, 0, rtap_len); | |
123 | ||
124 | /* radiotap header, set always present flags */ | |
125 | rthdr->it_present = | |
126 | cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) | | |
127 | (1 << IEEE80211_RADIOTAP_CHANNEL) | | |
128 | (1 << IEEE80211_RADIOTAP_ANTENNA) | | |
129 | (1 << IEEE80211_RADIOTAP_RX_FLAGS)); | |
130 | rthdr->it_len = cpu_to_le16(rtap_len); | |
131 | ||
132 | pos = (unsigned char *)(rthdr+1); | |
133 | ||
134 | /* the order of the following fields is important */ | |
135 | ||
136 | /* IEEE80211_RADIOTAP_TSFT */ | |
137 | if (status->flag & RX_FLAG_TSFT) { | |
138 | *(__le64 *)pos = cpu_to_le64(status->mactime); | |
139 | rthdr->it_present |= | |
140 | cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT); | |
141 | pos += 8; | |
142 | } | |
143 | ||
144 | /* IEEE80211_RADIOTAP_FLAGS */ | |
145 | if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) | |
146 | *pos |= IEEE80211_RADIOTAP_F_FCS; | |
147 | if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC)) | |
148 | *pos |= IEEE80211_RADIOTAP_F_BADFCS; | |
149 | if (status->flag & RX_FLAG_SHORTPRE) | |
150 | *pos |= IEEE80211_RADIOTAP_F_SHORTPRE; | |
151 | pos++; | |
152 | ||
153 | /* IEEE80211_RADIOTAP_RATE */ | |
154 | if (status->flag & RX_FLAG_HT) { | |
155 | /* | |
156 | * TODO: add following information into radiotap header once | |
157 | * suitable fields are defined for it: | |
158 | * - MCS index (status->rate_idx) | |
159 | * - HT40 (status->flag & RX_FLAG_40MHZ) | |
160 | * - short-GI (status->flag & RX_FLAG_SHORT_GI) | |
161 | */ | |
162 | *pos = 0; | |
163 | } else { | |
164 | rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE); | |
165 | *pos = rate->bitrate / 5; | |
166 | } | |
167 | pos++; | |
168 | ||
169 | /* IEEE80211_RADIOTAP_CHANNEL */ | |
170 | *(__le16 *)pos = cpu_to_le16(status->freq); | |
171 | pos += 2; | |
172 | if (status->band == IEEE80211_BAND_5GHZ) | |
173 | *(__le16 *)pos = cpu_to_le16(IEEE80211_CHAN_OFDM | | |
174 | IEEE80211_CHAN_5GHZ); | |
175 | else if (rate->flags & IEEE80211_RATE_ERP_G) | |
176 | *(__le16 *)pos = cpu_to_le16(IEEE80211_CHAN_OFDM | | |
177 | IEEE80211_CHAN_2GHZ); | |
178 | else | |
179 | *(__le16 *)pos = cpu_to_le16(IEEE80211_CHAN_CCK | | |
180 | IEEE80211_CHAN_2GHZ); | |
181 | pos += 2; | |
182 | ||
183 | /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */ | |
184 | if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM) { | |
185 | *pos = status->signal; | |
186 | rthdr->it_present |= | |
187 | cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL); | |
188 | pos++; | |
189 | } | |
190 | ||
191 | /* IEEE80211_RADIOTAP_DBM_ANTNOISE */ | |
192 | if (local->hw.flags & IEEE80211_HW_NOISE_DBM) { | |
193 | *pos = status->noise; | |
194 | rthdr->it_present |= | |
195 | cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTNOISE); | |
196 | pos++; | |
197 | } | |
198 | ||
199 | /* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */ | |
200 | ||
201 | /* IEEE80211_RADIOTAP_ANTENNA */ | |
202 | *pos = status->antenna; | |
203 | pos++; | |
204 | ||
205 | /* IEEE80211_RADIOTAP_DB_ANTNOISE is not used */ | |
206 | ||
207 | /* IEEE80211_RADIOTAP_RX_FLAGS */ | |
208 | /* ensure 2 byte alignment for the 2 byte field as required */ | |
209 | if ((pos - (unsigned char *)rthdr) & 1) | |
210 | pos++; | |
211 | if (status->flag & RX_FLAG_FAILED_PLCP_CRC) | |
212 | *(__le16 *)pos |= cpu_to_le16(IEEE80211_RADIOTAP_F_RX_BADPLCP); | |
213 | pos += 2; | |
214 | } | |
215 | ||
216 | /* | |
217 | * This function copies a received frame to all monitor interfaces and | |
218 | * returns a cleaned-up SKB that no longer includes the FCS nor the | |
219 | * radiotap header the driver might have added. | |
220 | */ | |
221 | static struct sk_buff * | |
222 | ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb, | |
223 | struct ieee80211_rx_status *status, | |
224 | struct ieee80211_rate *rate) | |
225 | { | |
226 | struct ieee80211_sub_if_data *sdata; | |
227 | int needed_headroom = 0; | |
228 | struct sk_buff *skb, *skb2; | |
229 | struct net_device *prev_dev = NULL; | |
230 | int present_fcs_len = 0; | |
231 | int rtap_len = 0; | |
232 | ||
233 | /* | |
234 | * First, we may need to make a copy of the skb because | |
235 | * (1) we need to modify it for radiotap (if not present), and | |
236 | * (2) the other RX handlers will modify the skb we got. | |
237 | * | |
238 | * We don't need to, of course, if we aren't going to return | |
239 | * the SKB because it has a bad FCS/PLCP checksum. | |
240 | */ | |
241 | if (status->flag & RX_FLAG_RADIOTAP) | |
242 | rtap_len = ieee80211_get_radiotap_len(origskb->data); | |
243 | else | |
244 | /* room for the radiotap header based on driver features */ | |
245 | needed_headroom = ieee80211_rx_radiotap_len(local, status); | |
246 | ||
247 | if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) | |
248 | present_fcs_len = FCS_LEN; | |
249 | ||
250 | if (!local->monitors) { | |
251 | if (should_drop_frame(status, origskb, present_fcs_len, | |
252 | rtap_len)) { | |
253 | dev_kfree_skb(origskb); | |
254 | return NULL; | |
255 | } | |
256 | ||
257 | return remove_monitor_info(local, origskb, rtap_len); | |
258 | } | |
259 | ||
260 | if (should_drop_frame(status, origskb, present_fcs_len, rtap_len)) { | |
261 | /* only need to expand headroom if necessary */ | |
262 | skb = origskb; | |
263 | origskb = NULL; | |
264 | ||
265 | /* | |
266 | * This shouldn't trigger often because most devices have an | |
267 | * RX header they pull before we get here, and that should | |
268 | * be big enough for our radiotap information. We should | |
269 | * probably export the length to drivers so that we can have | |
270 | * them allocate enough headroom to start with. | |
271 | */ | |
272 | if (skb_headroom(skb) < needed_headroom && | |
273 | pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) { | |
274 | dev_kfree_skb(skb); | |
275 | return NULL; | |
276 | } | |
277 | } else { | |
278 | /* | |
279 | * Need to make a copy and possibly remove radiotap header | |
280 | * and FCS from the original. | |
281 | */ | |
282 | skb = skb_copy_expand(origskb, needed_headroom, 0, GFP_ATOMIC); | |
283 | ||
284 | origskb = remove_monitor_info(local, origskb, rtap_len); | |
285 | ||
286 | if (!skb) | |
287 | return origskb; | |
288 | } | |
289 | ||
290 | /* if necessary, prepend radiotap information */ | |
291 | if (!(status->flag & RX_FLAG_RADIOTAP)) | |
292 | ieee80211_add_rx_radiotap_header(local, skb, status, rate, | |
293 | needed_headroom); | |
294 | ||
295 | skb_reset_mac_header(skb); | |
296 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
297 | skb->pkt_type = PACKET_OTHERHOST; | |
298 | skb->protocol = htons(ETH_P_802_2); | |
299 | ||
300 | list_for_each_entry_rcu(sdata, &local->interfaces, list) { | |
301 | if (!netif_running(sdata->dev)) | |
302 | continue; | |
303 | ||
304 | if (sdata->vif.type != NL80211_IFTYPE_MONITOR) | |
305 | continue; | |
306 | ||
307 | if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) | |
308 | continue; | |
309 | ||
310 | if (prev_dev) { | |
311 | skb2 = skb_clone(skb, GFP_ATOMIC); | |
312 | if (skb2) { | |
313 | skb2->dev = prev_dev; | |
314 | netif_rx(skb2); | |
315 | } | |
316 | } | |
317 | ||
318 | prev_dev = sdata->dev; | |
319 | sdata->dev->stats.rx_packets++; | |
320 | sdata->dev->stats.rx_bytes += skb->len; | |
321 | } | |
322 | ||
323 | if (prev_dev) { | |
324 | skb->dev = prev_dev; | |
325 | netif_rx(skb); | |
326 | } else | |
327 | dev_kfree_skb(skb); | |
328 | ||
329 | return origskb; | |
330 | } | |
331 | ||
332 | ||
333 | static void ieee80211_parse_qos(struct ieee80211_rx_data *rx) | |
334 | { | |
335 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; | |
336 | int tid; | |
337 | ||
338 | /* does the frame have a qos control field? */ | |
339 | if (ieee80211_is_data_qos(hdr->frame_control)) { | |
340 | u8 *qc = ieee80211_get_qos_ctl(hdr); | |
341 | /* frame has qos control */ | |
342 | tid = *qc & IEEE80211_QOS_CTL_TID_MASK; | |
343 | if (*qc & IEEE80211_QOS_CONTROL_A_MSDU_PRESENT) | |
344 | rx->flags |= IEEE80211_RX_AMSDU; | |
345 | else | |
346 | rx->flags &= ~IEEE80211_RX_AMSDU; | |
347 | } else { | |
348 | /* | |
349 | * IEEE 802.11-2007, 7.1.3.4.1 ("Sequence Number field"): | |
350 | * | |
351 | * Sequence numbers for management frames, QoS data | |
352 | * frames with a broadcast/multicast address in the | |
353 | * Address 1 field, and all non-QoS data frames sent | |
354 | * by QoS STAs are assigned using an additional single | |
355 | * modulo-4096 counter, [...] | |
356 | * | |
357 | * We also use that counter for non-QoS STAs. | |
358 | */ | |
359 | tid = NUM_RX_DATA_QUEUES - 1; | |
360 | } | |
361 | ||
362 | rx->queue = tid; | |
363 | /* Set skb->priority to 1d tag if highest order bit of TID is not set. | |
364 | * For now, set skb->priority to 0 for other cases. */ | |
365 | rx->skb->priority = (tid > 7) ? 0 : tid; | |
366 | } | |
367 | ||
368 | /** | |
369 | * DOC: Packet alignment | |
370 | * | |
371 | * Drivers always need to pass packets that are aligned to two-byte boundaries | |
372 | * to the stack. | |
373 | * | |
374 | * Additionally, should, if possible, align the payload data in a way that | |
375 | * guarantees that the contained IP header is aligned to a four-byte | |
376 | * boundary. In the case of regular frames, this simply means aligning the | |
377 | * payload to a four-byte boundary (because either the IP header is directly | |
378 | * contained, or IV/RFC1042 headers that have a length divisible by four are | |
379 | * in front of it). | |
380 | * | |
381 | * With A-MSDU frames, however, the payload data address must yield two modulo | |
382 | * four because there are 14-byte 802.3 headers within the A-MSDU frames that | |
383 | * push the IP header further back to a multiple of four again. Thankfully, the | |
384 | * specs were sane enough this time around to require padding each A-MSDU | |
385 | * subframe to a length that is a multiple of four. | |
386 | * | |
387 | * Padding like Atheros hardware adds which is inbetween the 802.11 header and | |
388 | * the payload is not supported, the driver is required to move the 802.11 | |
389 | * header to be directly in front of the payload in that case. | |
390 | */ | |
391 | static void ieee80211_verify_alignment(struct ieee80211_rx_data *rx) | |
392 | { | |
393 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; | |
394 | int hdrlen; | |
395 | ||
396 | #ifndef CONFIG_MAC80211_DEBUG_PACKET_ALIGNMENT | |
397 | return; | |
398 | #endif | |
399 | ||
400 | if (WARN_ONCE((unsigned long)rx->skb->data & 1, | |
401 | "unaligned packet at 0x%p\n", rx->skb->data)) | |
402 | return; | |
403 | ||
404 | if (!ieee80211_is_data_present(hdr->frame_control)) | |
405 | return; | |
406 | ||
407 | hdrlen = ieee80211_hdrlen(hdr->frame_control); | |
408 | if (rx->flags & IEEE80211_RX_AMSDU) | |
409 | hdrlen += ETH_HLEN; | |
410 | WARN_ONCE(((unsigned long)(rx->skb->data + hdrlen)) & 3, | |
411 | "unaligned IP payload at 0x%p\n", rx->skb->data + hdrlen); | |
412 | } | |
413 | ||
414 | ||
415 | /* rx handlers */ | |
416 | ||
417 | static ieee80211_rx_result debug_noinline | |
418 | ieee80211_rx_h_passive_scan(struct ieee80211_rx_data *rx) | |
419 | { | |
420 | struct ieee80211_local *local = rx->local; | |
421 | struct sk_buff *skb = rx->skb; | |
422 | ||
423 | if (unlikely(local->hw_scanning)) | |
424 | return ieee80211_scan_rx(rx->sdata, skb, rx->status); | |
425 | ||
426 | if (unlikely(local->sw_scanning)) { | |
427 | /* drop all the other packets during a software scan anyway */ | |
428 | if (ieee80211_scan_rx(rx->sdata, skb, rx->status) | |
429 | != RX_QUEUED) | |
430 | dev_kfree_skb(skb); | |
431 | return RX_QUEUED; | |
432 | } | |
433 | ||
434 | if (unlikely(rx->flags & IEEE80211_RX_IN_SCAN)) { | |
435 | /* scanning finished during invoking of handlers */ | |
436 | I802_DEBUG_INC(local->rx_handlers_drop_passive_scan); | |
437 | return RX_DROP_UNUSABLE; | |
438 | } | |
439 | ||
440 | return RX_CONTINUE; | |
441 | } | |
442 | ||
443 | ||
444 | static int ieee80211_is_unicast_robust_mgmt_frame(struct sk_buff *skb) | |
445 | { | |
446 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | |
447 | ||
448 | if (skb->len < 24 || is_multicast_ether_addr(hdr->addr1)) | |
449 | return 0; | |
450 | ||
451 | return ieee80211_is_robust_mgmt_frame(hdr); | |
452 | } | |
453 | ||
454 | ||
455 | static int ieee80211_is_multicast_robust_mgmt_frame(struct sk_buff *skb) | |
456 | { | |
457 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | |
458 | ||
459 | if (skb->len < 24 || !is_multicast_ether_addr(hdr->addr1)) | |
460 | return 0; | |
461 | ||
462 | return ieee80211_is_robust_mgmt_frame(hdr); | |
463 | } | |
464 | ||
465 | ||
466 | /* Get the BIP key index from MMIE; return -1 if this is not a BIP frame */ | |
467 | static int ieee80211_get_mmie_keyidx(struct sk_buff *skb) | |
468 | { | |
469 | struct ieee80211_mgmt *hdr = (struct ieee80211_mgmt *) skb->data; | |
470 | struct ieee80211_mmie *mmie; | |
471 | ||
472 | if (skb->len < 24 + sizeof(*mmie) || | |
473 | !is_multicast_ether_addr(hdr->da)) | |
474 | return -1; | |
475 | ||
476 | if (!ieee80211_is_robust_mgmt_frame((struct ieee80211_hdr *) hdr)) | |
477 | return -1; /* not a robust management frame */ | |
478 | ||
479 | mmie = (struct ieee80211_mmie *) | |
480 | (skb->data + skb->len - sizeof(*mmie)); | |
481 | if (mmie->element_id != WLAN_EID_MMIE || | |
482 | mmie->length != sizeof(*mmie) - 2) | |
483 | return -1; | |
484 | ||
485 | return le16_to_cpu(mmie->key_id); | |
486 | } | |
487 | ||
488 | ||
489 | static ieee80211_rx_result | |
490 | ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx) | |
491 | { | |
492 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; | |
493 | unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control); | |
494 | ||
495 | if (ieee80211_is_data(hdr->frame_control)) { | |
496 | if (!ieee80211_has_a4(hdr->frame_control)) | |
497 | return RX_DROP_MONITOR; | |
498 | if (memcmp(hdr->addr4, rx->dev->dev_addr, ETH_ALEN) == 0) | |
499 | return RX_DROP_MONITOR; | |
500 | } | |
501 | ||
502 | /* If there is not an established peer link and this is not a peer link | |
503 | * establisment frame, beacon or probe, drop the frame. | |
504 | */ | |
505 | ||
506 | if (!rx->sta || sta_plink_state(rx->sta) != PLINK_ESTAB) { | |
507 | struct ieee80211_mgmt *mgmt; | |
508 | ||
509 | if (!ieee80211_is_mgmt(hdr->frame_control)) | |
510 | return RX_DROP_MONITOR; | |
511 | ||
512 | if (ieee80211_is_action(hdr->frame_control)) { | |
513 | mgmt = (struct ieee80211_mgmt *)hdr; | |
514 | if (mgmt->u.action.category != PLINK_CATEGORY) | |
515 | return RX_DROP_MONITOR; | |
516 | return RX_CONTINUE; | |
517 | } | |
518 | ||
519 | if (ieee80211_is_probe_req(hdr->frame_control) || | |
520 | ieee80211_is_probe_resp(hdr->frame_control) || | |
521 | ieee80211_is_beacon(hdr->frame_control)) | |
522 | return RX_CONTINUE; | |
523 | ||
524 | return RX_DROP_MONITOR; | |
525 | ||
526 | } | |
527 | ||
528 | #define msh_h_get(h, l) ((struct ieee80211s_hdr *) ((u8 *)h + l)) | |
529 | ||
530 | if (ieee80211_is_data(hdr->frame_control) && | |
531 | is_multicast_ether_addr(hdr->addr1) && | |
532 | mesh_rmc_check(hdr->addr4, msh_h_get(hdr, hdrlen), rx->sdata)) | |
533 | return RX_DROP_MONITOR; | |
534 | #undef msh_h_get | |
535 | ||
536 | return RX_CONTINUE; | |
537 | } | |
538 | ||
539 | ||
540 | static ieee80211_rx_result debug_noinline | |
541 | ieee80211_rx_h_check(struct ieee80211_rx_data *rx) | |
542 | { | |
543 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; | |
544 | ||
545 | /* Drop duplicate 802.11 retransmissions (IEEE 802.11 Chap. 9.2.9) */ | |
546 | if (rx->sta && !is_multicast_ether_addr(hdr->addr1)) { | |
547 | if (unlikely(ieee80211_has_retry(hdr->frame_control) && | |
548 | rx->sta->last_seq_ctrl[rx->queue] == | |
549 | hdr->seq_ctrl)) { | |
550 | if (rx->flags & IEEE80211_RX_RA_MATCH) { | |
551 | rx->local->dot11FrameDuplicateCount++; | |
552 | rx->sta->num_duplicates++; | |
553 | } | |
554 | return RX_DROP_MONITOR; | |
555 | } else | |
556 | rx->sta->last_seq_ctrl[rx->queue] = hdr->seq_ctrl; | |
557 | } | |
558 | ||
559 | if (unlikely(rx->skb->len < 16)) { | |
560 | I802_DEBUG_INC(rx->local->rx_handlers_drop_short); | |
561 | return RX_DROP_MONITOR; | |
562 | } | |
563 | ||
564 | /* Drop disallowed frame classes based on STA auth/assoc state; | |
565 | * IEEE 802.11, Chap 5.5. | |
566 | * | |
567 | * mac80211 filters only based on association state, i.e. it drops | |
568 | * Class 3 frames from not associated stations. hostapd sends | |
569 | * deauth/disassoc frames when needed. In addition, hostapd is | |
570 | * responsible for filtering on both auth and assoc states. | |
571 | */ | |
572 | ||
573 | if (ieee80211_vif_is_mesh(&rx->sdata->vif)) | |
574 | return ieee80211_rx_mesh_check(rx); | |
575 | ||
576 | if (unlikely((ieee80211_is_data(hdr->frame_control) || | |
577 | ieee80211_is_pspoll(hdr->frame_control)) && | |
578 | rx->sdata->vif.type != NL80211_IFTYPE_ADHOC && | |
579 | (!rx->sta || !test_sta_flags(rx->sta, WLAN_STA_ASSOC)))) { | |
580 | if ((!ieee80211_has_fromds(hdr->frame_control) && | |
581 | !ieee80211_has_tods(hdr->frame_control) && | |
582 | ieee80211_is_data(hdr->frame_control)) || | |
583 | !(rx->flags & IEEE80211_RX_RA_MATCH)) { | |
584 | /* Drop IBSS frames and frames for other hosts | |
585 | * silently. */ | |
586 | return RX_DROP_MONITOR; | |
587 | } | |
588 | ||
589 | return RX_DROP_MONITOR; | |
590 | } | |
591 | ||
592 | return RX_CONTINUE; | |
593 | } | |
594 | ||
595 | ||
596 | static ieee80211_rx_result debug_noinline | |
597 | ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) | |
598 | { | |
599 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; | |
600 | int keyidx; | |
601 | int hdrlen; | |
602 | ieee80211_rx_result result = RX_DROP_UNUSABLE; | |
603 | struct ieee80211_key *stakey = NULL; | |
604 | int mmie_keyidx = -1; | |
605 | ||
606 | /* | |
607 | * Key selection 101 | |
608 | * | |
609 | * There are four types of keys: | |
610 | * - GTK (group keys) | |
611 | * - IGTK (group keys for management frames) | |
612 | * - PTK (pairwise keys) | |
613 | * - STK (station-to-station pairwise keys) | |
614 | * | |
615 | * When selecting a key, we have to distinguish between multicast | |
616 | * (including broadcast) and unicast frames, the latter can only | |
617 | * use PTKs and STKs while the former always use GTKs and IGTKs. | |
618 | * Unless, of course, actual WEP keys ("pre-RSNA") are used, then | |
619 | * unicast frames can also use key indices like GTKs. Hence, if we | |
620 | * don't have a PTK/STK we check the key index for a WEP key. | |
621 | * | |
622 | * Note that in a regular BSS, multicast frames are sent by the | |
623 | * AP only, associated stations unicast the frame to the AP first | |
624 | * which then multicasts it on their behalf. | |
625 | * | |
626 | * There is also a slight problem in IBSS mode: GTKs are negotiated | |
627 | * with each station, that is something we don't currently handle. | |
628 | * The spec seems to expect that one negotiates the same key with | |
629 | * every station but there's no such requirement; VLANs could be | |
630 | * possible. | |
631 | */ | |
632 | ||
633 | /* | |
634 | * No point in finding a key and decrypting if the frame is neither | |
635 | * addressed to us nor a multicast frame. | |
636 | */ | |
637 | if (!(rx->flags & IEEE80211_RX_RA_MATCH)) | |
638 | return RX_CONTINUE; | |
639 | ||
640 | if (rx->sta) | |
641 | stakey = rcu_dereference(rx->sta->key); | |
642 | ||
643 | if (!ieee80211_has_protected(hdr->frame_control)) | |
644 | mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb); | |
645 | ||
646 | if (!is_multicast_ether_addr(hdr->addr1) && stakey) { | |
647 | rx->key = stakey; | |
648 | /* Skip decryption if the frame is not protected. */ | |
649 | if (!ieee80211_has_protected(hdr->frame_control)) | |
650 | return RX_CONTINUE; | |
651 | } else if (mmie_keyidx >= 0) { | |
652 | /* Broadcast/multicast robust management frame / BIP */ | |
653 | if ((rx->status->flag & RX_FLAG_DECRYPTED) && | |
654 | (rx->status->flag & RX_FLAG_IV_STRIPPED)) | |
655 | return RX_CONTINUE; | |
656 | ||
657 | if (mmie_keyidx < NUM_DEFAULT_KEYS || | |
658 | mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS) | |
659 | return RX_DROP_MONITOR; /* unexpected BIP keyidx */ | |
660 | rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]); | |
661 | } else if (!ieee80211_has_protected(hdr->frame_control)) { | |
662 | /* | |
663 | * The frame was not protected, so skip decryption. However, we | |
664 | * need to set rx->key if there is a key that could have been | |
665 | * used so that the frame may be dropped if encryption would | |
666 | * have been expected. | |
667 | */ | |
668 | struct ieee80211_key *key = NULL; | |
669 | if (ieee80211_is_mgmt(hdr->frame_control) && | |
670 | is_multicast_ether_addr(hdr->addr1) && | |
671 | (key = rcu_dereference(rx->sdata->default_mgmt_key))) | |
672 | rx->key = key; | |
673 | else if ((key = rcu_dereference(rx->sdata->default_key))) | |
674 | rx->key = key; | |
675 | return RX_CONTINUE; | |
676 | } else { | |
677 | /* | |
678 | * The device doesn't give us the IV so we won't be | |
679 | * able to look up the key. That's ok though, we | |
680 | * don't need to decrypt the frame, we just won't | |
681 | * be able to keep statistics accurate. | |
682 | * Except for key threshold notifications, should | |
683 | * we somehow allow the driver to tell us which key | |
684 | * the hardware used if this flag is set? | |
685 | */ | |
686 | if ((rx->status->flag & RX_FLAG_DECRYPTED) && | |
687 | (rx->status->flag & RX_FLAG_IV_STRIPPED)) | |
688 | return RX_CONTINUE; | |
689 | ||
690 | hdrlen = ieee80211_hdrlen(hdr->frame_control); | |
691 | ||
692 | if (rx->skb->len < 8 + hdrlen) | |
693 | return RX_DROP_UNUSABLE; /* TODO: count this? */ | |
694 | ||
695 | /* | |
696 | * no need to call ieee80211_wep_get_keyidx, | |
697 | * it verifies a bunch of things we've done already | |
698 | */ | |
699 | keyidx = rx->skb->data[hdrlen + 3] >> 6; | |
700 | ||
701 | rx->key = rcu_dereference(rx->sdata->keys[keyidx]); | |
702 | ||
703 | /* | |
704 | * RSNA-protected unicast frames should always be sent with | |
705 | * pairwise or station-to-station keys, but for WEP we allow | |
706 | * using a key index as well. | |
707 | */ | |
708 | if (rx->key && rx->key->conf.alg != ALG_WEP && | |
709 | !is_multicast_ether_addr(hdr->addr1)) | |
710 | rx->key = NULL; | |
711 | } | |
712 | ||
713 | if (rx->key) { | |
714 | rx->key->tx_rx_count++; | |
715 | /* TODO: add threshold stuff again */ | |
716 | } else { | |
717 | return RX_DROP_MONITOR; | |
718 | } | |
719 | ||
720 | /* Check for weak IVs if possible */ | |
721 | if (rx->sta && rx->key->conf.alg == ALG_WEP && | |
722 | ieee80211_is_data(hdr->frame_control) && | |
723 | (!(rx->status->flag & RX_FLAG_IV_STRIPPED) || | |
724 | !(rx->status->flag & RX_FLAG_DECRYPTED)) && | |
725 | ieee80211_wep_is_weak_iv(rx->skb, rx->key)) | |
726 | rx->sta->wep_weak_iv_count++; | |
727 | ||
728 | switch (rx->key->conf.alg) { | |
729 | case ALG_WEP: | |
730 | result = ieee80211_crypto_wep_decrypt(rx); | |
731 | break; | |
732 | case ALG_TKIP: | |
733 | result = ieee80211_crypto_tkip_decrypt(rx); | |
734 | break; | |
735 | case ALG_CCMP: | |
736 | result = ieee80211_crypto_ccmp_decrypt(rx); | |
737 | break; | |
738 | case ALG_AES_CMAC: | |
739 | result = ieee80211_crypto_aes_cmac_decrypt(rx); | |
740 | break; | |
741 | } | |
742 | ||
743 | /* either the frame has been decrypted or will be dropped */ | |
744 | rx->status->flag |= RX_FLAG_DECRYPTED; | |
745 | ||
746 | return result; | |
747 | } | |
748 | ||
749 | static ieee80211_rx_result debug_noinline | |
750 | ieee80211_rx_h_check_more_data(struct ieee80211_rx_data *rx) | |
751 | { | |
752 | struct ieee80211_local *local; | |
753 | struct ieee80211_hdr *hdr; | |
754 | struct sk_buff *skb; | |
755 | ||
756 | local = rx->local; | |
757 | skb = rx->skb; | |
758 | hdr = (struct ieee80211_hdr *) skb->data; | |
759 | ||
760 | if (!local->pspolling) | |
761 | return RX_CONTINUE; | |
762 | ||
763 | if (!ieee80211_has_fromds(hdr->frame_control)) | |
764 | /* this is not from AP */ | |
765 | return RX_CONTINUE; | |
766 | ||
767 | if (!ieee80211_is_data(hdr->frame_control)) | |
768 | return RX_CONTINUE; | |
769 | ||
770 | if (!ieee80211_has_moredata(hdr->frame_control)) { | |
771 | /* AP has no more frames buffered for us */ | |
772 | local->pspolling = false; | |
773 | return RX_CONTINUE; | |
774 | } | |
775 | ||
776 | /* more data bit is set, let's request a new frame from the AP */ | |
777 | ieee80211_send_pspoll(local, rx->sdata); | |
778 | ||
779 | return RX_CONTINUE; | |
780 | } | |
781 | ||
782 | static void ap_sta_ps_start(struct sta_info *sta) | |
783 | { | |
784 | struct ieee80211_sub_if_data *sdata = sta->sdata; | |
785 | struct ieee80211_local *local = sdata->local; | |
786 | ||
787 | atomic_inc(&sdata->bss->num_sta_ps); | |
788 | set_and_clear_sta_flags(sta, WLAN_STA_PS, WLAN_STA_PSPOLL); | |
789 | drv_sta_notify(local, &sdata->vif, STA_NOTIFY_SLEEP, &sta->sta); | |
790 | #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG | |
791 | printk(KERN_DEBUG "%s: STA %pM aid %d enters power save mode\n", | |
792 | sdata->dev->name, sta->sta.addr, sta->sta.aid); | |
793 | #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ | |
794 | } | |
795 | ||
796 | static int ap_sta_ps_end(struct sta_info *sta) | |
797 | { | |
798 | struct ieee80211_sub_if_data *sdata = sta->sdata; | |
799 | struct ieee80211_local *local = sdata->local; | |
800 | int sent, buffered; | |
801 | ||
802 | atomic_dec(&sdata->bss->num_sta_ps); | |
803 | ||
804 | clear_sta_flags(sta, WLAN_STA_PS | WLAN_STA_PSPOLL); | |
805 | drv_sta_notify(local, &sdata->vif, STA_NOTIFY_AWAKE, &sta->sta); | |
806 | ||
807 | if (!skb_queue_empty(&sta->ps_tx_buf)) | |
808 | sta_info_clear_tim_bit(sta); | |
809 | ||
810 | #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG | |
811 | printk(KERN_DEBUG "%s: STA %pM aid %d exits power save mode\n", | |
812 | sdata->dev->name, sta->sta.addr, sta->sta.aid); | |
813 | #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ | |
814 | ||
815 | /* Send all buffered frames to the station */ | |
816 | sent = ieee80211_add_pending_skbs(local, &sta->tx_filtered); | |
817 | buffered = ieee80211_add_pending_skbs(local, &sta->ps_tx_buf); | |
818 | sent += buffered; | |
819 | local->total_ps_buffered -= buffered; | |
820 | ||
821 | #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG | |
822 | printk(KERN_DEBUG "%s: STA %pM aid %d sending %d filtered/%d PS frames " | |
823 | "since STA not sleeping anymore\n", sdata->dev->name, | |
824 | sta->sta.addr, sta->sta.aid, sent - buffered, buffered); | |
825 | #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ | |
826 | ||
827 | return sent; | |
828 | } | |
829 | ||
830 | static ieee80211_rx_result debug_noinline | |
831 | ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) | |
832 | { | |
833 | struct sta_info *sta = rx->sta; | |
834 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; | |
835 | ||
836 | if (!sta) | |
837 | return RX_CONTINUE; | |
838 | ||
839 | /* Update last_rx only for IBSS packets which are for the current | |
840 | * BSSID to avoid keeping the current IBSS network alive in cases where | |
841 | * other STAs are using different BSSID. */ | |
842 | if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) { | |
843 | u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len, | |
844 | NL80211_IFTYPE_ADHOC); | |
845 | if (compare_ether_addr(bssid, rx->sdata->u.ibss.bssid) == 0) | |
846 | sta->last_rx = jiffies; | |
847 | } else | |
848 | if (!is_multicast_ether_addr(hdr->addr1) || | |
849 | rx->sdata->vif.type == NL80211_IFTYPE_STATION) { | |
850 | /* Update last_rx only for unicast frames in order to prevent | |
851 | * the Probe Request frames (the only broadcast frames from a | |
852 | * STA in infrastructure mode) from keeping a connection alive. | |
853 | * Mesh beacons will update last_rx when if they are found to | |
854 | * match the current local configuration when processed. | |
855 | */ | |
856 | if (rx->sdata->vif.type == NL80211_IFTYPE_STATION && | |
857 | ieee80211_is_beacon(hdr->frame_control)) { | |
858 | rx->sdata->u.mgd.last_beacon = jiffies; | |
859 | } else | |
860 | sta->last_rx = jiffies; | |
861 | } | |
862 | ||
863 | if (!(rx->flags & IEEE80211_RX_RA_MATCH)) | |
864 | return RX_CONTINUE; | |
865 | ||
866 | if (rx->sdata->vif.type == NL80211_IFTYPE_STATION) | |
867 | ieee80211_sta_rx_notify(rx->sdata, hdr); | |
868 | ||
869 | sta->rx_fragments++; | |
870 | sta->rx_bytes += rx->skb->len; | |
871 | sta->last_signal = rx->status->signal; | |
872 | sta->last_qual = rx->status->qual; | |
873 | sta->last_noise = rx->status->noise; | |
874 | ||
875 | /* | |
876 | * Change STA power saving mode only at the end of a frame | |
877 | * exchange sequence. | |
878 | */ | |
879 | if (!ieee80211_has_morefrags(hdr->frame_control) && | |
880 | (rx->sdata->vif.type == NL80211_IFTYPE_AP || | |
881 | rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) { | |
882 | if (test_sta_flags(sta, WLAN_STA_PS)) { | |
883 | /* | |
884 | * Ignore doze->wake transitions that are | |
885 | * indicated by non-data frames, the standard | |
886 | * is unclear here, but for example going to | |
887 | * PS mode and then scanning would cause a | |
888 | * doze->wake transition for the probe request, | |
889 | * and that is clearly undesirable. | |
890 | */ | |
891 | if (ieee80211_is_data(hdr->frame_control) && | |
892 | !ieee80211_has_pm(hdr->frame_control)) | |
893 | rx->sent_ps_buffered += ap_sta_ps_end(sta); | |
894 | } else { | |
895 | if (ieee80211_has_pm(hdr->frame_control)) | |
896 | ap_sta_ps_start(sta); | |
897 | } | |
898 | } | |
899 | ||
900 | /* Drop data::nullfunc frames silently, since they are used only to | |
901 | * control station power saving mode. */ | |
902 | if (ieee80211_is_nullfunc(hdr->frame_control)) { | |
903 | I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc); | |
904 | /* Update counter and free packet here to avoid counting this | |
905 | * as a dropped packed. */ | |
906 | sta->rx_packets++; | |
907 | dev_kfree_skb(rx->skb); | |
908 | return RX_QUEUED; | |
909 | } | |
910 | ||
911 | return RX_CONTINUE; | |
912 | } /* ieee80211_rx_h_sta_process */ | |
913 | ||
914 | static inline struct ieee80211_fragment_entry * | |
915 | ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata, | |
916 | unsigned int frag, unsigned int seq, int rx_queue, | |
917 | struct sk_buff **skb) | |
918 | { | |
919 | struct ieee80211_fragment_entry *entry; | |
920 | int idx; | |
921 | ||
922 | idx = sdata->fragment_next; | |
923 | entry = &sdata->fragments[sdata->fragment_next++]; | |
924 | if (sdata->fragment_next >= IEEE80211_FRAGMENT_MAX) | |
925 | sdata->fragment_next = 0; | |
926 | ||
927 | if (!skb_queue_empty(&entry->skb_list)) { | |
928 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG | |
929 | struct ieee80211_hdr *hdr = | |
930 | (struct ieee80211_hdr *) entry->skb_list.next->data; | |
931 | printk(KERN_DEBUG "%s: RX reassembly removed oldest " | |
932 | "fragment entry (idx=%d age=%lu seq=%d last_frag=%d " | |
933 | "addr1=%pM addr2=%pM\n", | |
934 | sdata->dev->name, idx, | |
935 | jiffies - entry->first_frag_time, entry->seq, | |
936 | entry->last_frag, hdr->addr1, hdr->addr2); | |
937 | #endif | |
938 | __skb_queue_purge(&entry->skb_list); | |
939 | } | |
940 | ||
941 | __skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */ | |
942 | *skb = NULL; | |
943 | entry->first_frag_time = jiffies; | |
944 | entry->seq = seq; | |
945 | entry->rx_queue = rx_queue; | |
946 | entry->last_frag = frag; | |
947 | entry->ccmp = 0; | |
948 | entry->extra_len = 0; | |
949 | ||
950 | return entry; | |
951 | } | |
952 | ||
953 | static inline struct ieee80211_fragment_entry * | |
954 | ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata, | |
955 | unsigned int frag, unsigned int seq, | |
956 | int rx_queue, struct ieee80211_hdr *hdr) | |
957 | { | |
958 | struct ieee80211_fragment_entry *entry; | |
959 | int i, idx; | |
960 | ||
961 | idx = sdata->fragment_next; | |
962 | for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) { | |
963 | struct ieee80211_hdr *f_hdr; | |
964 | ||
965 | idx--; | |
966 | if (idx < 0) | |
967 | idx = IEEE80211_FRAGMENT_MAX - 1; | |
968 | ||
969 | entry = &sdata->fragments[idx]; | |
970 | if (skb_queue_empty(&entry->skb_list) || entry->seq != seq || | |
971 | entry->rx_queue != rx_queue || | |
972 | entry->last_frag + 1 != frag) | |
973 | continue; | |
974 | ||
975 | f_hdr = (struct ieee80211_hdr *)entry->skb_list.next->data; | |
976 | ||
977 | /* | |
978 | * Check ftype and addresses are equal, else check next fragment | |
979 | */ | |
980 | if (((hdr->frame_control ^ f_hdr->frame_control) & | |
981 | cpu_to_le16(IEEE80211_FCTL_FTYPE)) || | |
982 | compare_ether_addr(hdr->addr1, f_hdr->addr1) != 0 || | |
983 | compare_ether_addr(hdr->addr2, f_hdr->addr2) != 0) | |
984 | continue; | |
985 | ||
986 | if (time_after(jiffies, entry->first_frag_time + 2 * HZ)) { | |
987 | __skb_queue_purge(&entry->skb_list); | |
988 | continue; | |
989 | } | |
990 | return entry; | |
991 | } | |
992 | ||
993 | return NULL; | |
994 | } | |
995 | ||
996 | static ieee80211_rx_result debug_noinline | |
997 | ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) | |
998 | { | |
999 | struct ieee80211_hdr *hdr; | |
1000 | u16 sc; | |
1001 | __le16 fc; | |
1002 | unsigned int frag, seq; | |
1003 | struct ieee80211_fragment_entry *entry; | |
1004 | struct sk_buff *skb; | |
1005 | ||
1006 | hdr = (struct ieee80211_hdr *)rx->skb->data; | |
1007 | fc = hdr->frame_control; | |
1008 | sc = le16_to_cpu(hdr->seq_ctrl); | |
1009 | frag = sc & IEEE80211_SCTL_FRAG; | |
1010 | ||
1011 | if (likely((!ieee80211_has_morefrags(fc) && frag == 0) || | |
1012 | (rx->skb)->len < 24 || | |
1013 | is_multicast_ether_addr(hdr->addr1))) { | |
1014 | /* not fragmented */ | |
1015 | goto out; | |
1016 | } | |
1017 | I802_DEBUG_INC(rx->local->rx_handlers_fragments); | |
1018 | ||
1019 | seq = (sc & IEEE80211_SCTL_SEQ) >> 4; | |
1020 | ||
1021 | if (frag == 0) { | |
1022 | /* This is the first fragment of a new frame. */ | |
1023 | entry = ieee80211_reassemble_add(rx->sdata, frag, seq, | |
1024 | rx->queue, &(rx->skb)); | |
1025 | if (rx->key && rx->key->conf.alg == ALG_CCMP && | |
1026 | ieee80211_has_protected(fc)) { | |
1027 | /* Store CCMP PN so that we can verify that the next | |
1028 | * fragment has a sequential PN value. */ | |
1029 | entry->ccmp = 1; | |
1030 | memcpy(entry->last_pn, | |
1031 | rx->key->u.ccmp.rx_pn[rx->queue], | |
1032 | CCMP_PN_LEN); | |
1033 | } | |
1034 | return RX_QUEUED; | |
1035 | } | |
1036 | ||
1037 | /* This is a fragment for a frame that should already be pending in | |
1038 | * fragment cache. Add this fragment to the end of the pending entry. | |
1039 | */ | |
1040 | entry = ieee80211_reassemble_find(rx->sdata, frag, seq, rx->queue, hdr); | |
1041 | if (!entry) { | |
1042 | I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag); | |
1043 | return RX_DROP_MONITOR; | |
1044 | } | |
1045 | ||
1046 | /* Verify that MPDUs within one MSDU have sequential PN values. | |
1047 | * (IEEE 802.11i, 8.3.3.4.5) */ | |
1048 | if (entry->ccmp) { | |
1049 | int i; | |
1050 | u8 pn[CCMP_PN_LEN], *rpn; | |
1051 | if (!rx->key || rx->key->conf.alg != ALG_CCMP) | |
1052 | return RX_DROP_UNUSABLE; | |
1053 | memcpy(pn, entry->last_pn, CCMP_PN_LEN); | |
1054 | for (i = CCMP_PN_LEN - 1; i >= 0; i--) { | |
1055 | pn[i]++; | |
1056 | if (pn[i]) | |
1057 | break; | |
1058 | } | |
1059 | rpn = rx->key->u.ccmp.rx_pn[rx->queue]; | |
1060 | if (memcmp(pn, rpn, CCMP_PN_LEN)) | |
1061 | return RX_DROP_UNUSABLE; | |
1062 | memcpy(entry->last_pn, pn, CCMP_PN_LEN); | |
1063 | } | |
1064 | ||
1065 | skb_pull(rx->skb, ieee80211_hdrlen(fc)); | |
1066 | __skb_queue_tail(&entry->skb_list, rx->skb); | |
1067 | entry->last_frag = frag; | |
1068 | entry->extra_len += rx->skb->len; | |
1069 | if (ieee80211_has_morefrags(fc)) { | |
1070 | rx->skb = NULL; | |
1071 | return RX_QUEUED; | |
1072 | } | |
1073 | ||
1074 | rx->skb = __skb_dequeue(&entry->skb_list); | |
1075 | if (skb_tailroom(rx->skb) < entry->extra_len) { | |
1076 | I802_DEBUG_INC(rx->local->rx_expand_skb_head2); | |
1077 | if (unlikely(pskb_expand_head(rx->skb, 0, entry->extra_len, | |
1078 | GFP_ATOMIC))) { | |
1079 | I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag); | |
1080 | __skb_queue_purge(&entry->skb_list); | |
1081 | return RX_DROP_UNUSABLE; | |
1082 | } | |
1083 | } | |
1084 | while ((skb = __skb_dequeue(&entry->skb_list))) { | |
1085 | memcpy(skb_put(rx->skb, skb->len), skb->data, skb->len); | |
1086 | dev_kfree_skb(skb); | |
1087 | } | |
1088 | ||
1089 | /* Complete frame has been reassembled - process it now */ | |
1090 | rx->flags |= IEEE80211_RX_FRAGMENTED; | |
1091 | ||
1092 | out: | |
1093 | if (rx->sta) | |
1094 | rx->sta->rx_packets++; | |
1095 | if (is_multicast_ether_addr(hdr->addr1)) | |
1096 | rx->local->dot11MulticastReceivedFrameCount++; | |
1097 | else | |
1098 | ieee80211_led_rx(rx->local); | |
1099 | return RX_CONTINUE; | |
1100 | } | |
1101 | ||
1102 | static ieee80211_rx_result debug_noinline | |
1103 | ieee80211_rx_h_ps_poll(struct ieee80211_rx_data *rx) | |
1104 | { | |
1105 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev); | |
1106 | struct sk_buff *skb; | |
1107 | int no_pending_pkts; | |
1108 | __le16 fc = ((struct ieee80211_hdr *)rx->skb->data)->frame_control; | |
1109 | ||
1110 | if (likely(!rx->sta || !ieee80211_is_pspoll(fc) || | |
1111 | !(rx->flags & IEEE80211_RX_RA_MATCH))) | |
1112 | return RX_CONTINUE; | |
1113 | ||
1114 | if ((sdata->vif.type != NL80211_IFTYPE_AP) && | |
1115 | (sdata->vif.type != NL80211_IFTYPE_AP_VLAN)) | |
1116 | return RX_DROP_UNUSABLE; | |
1117 | ||
1118 | skb = skb_dequeue(&rx->sta->tx_filtered); | |
1119 | if (!skb) { | |
1120 | skb = skb_dequeue(&rx->sta->ps_tx_buf); | |
1121 | if (skb) | |
1122 | rx->local->total_ps_buffered--; | |
1123 | } | |
1124 | no_pending_pkts = skb_queue_empty(&rx->sta->tx_filtered) && | |
1125 | skb_queue_empty(&rx->sta->ps_tx_buf); | |
1126 | ||
1127 | if (skb) { | |
1128 | struct ieee80211_hdr *hdr = | |
1129 | (struct ieee80211_hdr *) skb->data; | |
1130 | ||
1131 | /* | |
1132 | * Tell TX path to send one frame even though the STA may | |
1133 | * still remain is PS mode after this frame exchange. | |
1134 | */ | |
1135 | set_sta_flags(rx->sta, WLAN_STA_PSPOLL); | |
1136 | ||
1137 | #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG | |
1138 | printk(KERN_DEBUG "STA %pM aid %d: PS Poll (entries after %d)\n", | |
1139 | rx->sta->sta.addr, rx->sta->sta.aid, | |
1140 | skb_queue_len(&rx->sta->ps_tx_buf)); | |
1141 | #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ | |
1142 | ||
1143 | /* Use MoreData flag to indicate whether there are more | |
1144 | * buffered frames for this STA */ | |
1145 | if (no_pending_pkts) | |
1146 | hdr->frame_control &= cpu_to_le16(~IEEE80211_FCTL_MOREDATA); | |
1147 | else | |
1148 | hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA); | |
1149 | ||
1150 | dev_queue_xmit(skb); | |
1151 | ||
1152 | if (no_pending_pkts) | |
1153 | sta_info_clear_tim_bit(rx->sta); | |
1154 | #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG | |
1155 | } else if (!rx->sent_ps_buffered) { | |
1156 | /* | |
1157 | * FIXME: This can be the result of a race condition between | |
1158 | * us expiring a frame and the station polling for it. | |
1159 | * Should we send it a null-func frame indicating we | |
1160 | * have nothing buffered for it? | |
1161 | */ | |
1162 | printk(KERN_DEBUG "%s: STA %pM sent PS Poll even " | |
1163 | "though there are no buffered frames for it\n", | |
1164 | rx->dev->name, rx->sta->sta.addr); | |
1165 | #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ | |
1166 | } | |
1167 | ||
1168 | /* Free PS Poll skb here instead of returning RX_DROP that would | |
1169 | * count as an dropped frame. */ | |
1170 | dev_kfree_skb(rx->skb); | |
1171 | ||
1172 | return RX_QUEUED; | |
1173 | } | |
1174 | ||
1175 | static ieee80211_rx_result debug_noinline | |
1176 | ieee80211_rx_h_remove_qos_control(struct ieee80211_rx_data *rx) | |
1177 | { | |
1178 | u8 *data = rx->skb->data; | |
1179 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)data; | |
1180 | ||
1181 | if (!ieee80211_is_data_qos(hdr->frame_control)) | |
1182 | return RX_CONTINUE; | |
1183 | ||
1184 | /* remove the qos control field, update frame type and meta-data */ | |
1185 | memmove(data + IEEE80211_QOS_CTL_LEN, data, | |
1186 | ieee80211_hdrlen(hdr->frame_control) - IEEE80211_QOS_CTL_LEN); | |
1187 | hdr = (struct ieee80211_hdr *)skb_pull(rx->skb, IEEE80211_QOS_CTL_LEN); | |
1188 | /* change frame type to non QOS */ | |
1189 | hdr->frame_control &= ~cpu_to_le16(IEEE80211_STYPE_QOS_DATA); | |
1190 | ||
1191 | return RX_CONTINUE; | |
1192 | } | |
1193 | ||
1194 | static int | |
1195 | ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx) | |
1196 | { | |
1197 | if (unlikely(!rx->sta || | |
1198 | !test_sta_flags(rx->sta, WLAN_STA_AUTHORIZED))) | |
1199 | return -EACCES; | |
1200 | ||
1201 | return 0; | |
1202 | } | |
1203 | ||
1204 | static int | |
1205 | ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc) | |
1206 | { | |
1207 | /* | |
1208 | * Pass through unencrypted frames if the hardware has | |
1209 | * decrypted them already. | |
1210 | */ | |
1211 | if (rx->status->flag & RX_FLAG_DECRYPTED) | |
1212 | return 0; | |
1213 | ||
1214 | /* Drop unencrypted frames if key is set. */ | |
1215 | if (unlikely(!ieee80211_has_protected(fc) && | |
1216 | !ieee80211_is_nullfunc(fc) && | |
1217 | ieee80211_is_data(fc) && | |
1218 | (rx->key || rx->sdata->drop_unencrypted))) | |
1219 | return -EACCES; | |
1220 | if (rx->sta && test_sta_flags(rx->sta, WLAN_STA_MFP)) { | |
1221 | if (unlikely(ieee80211_is_unicast_robust_mgmt_frame(rx->skb) && | |
1222 | rx->key)) | |
1223 | return -EACCES; | |
1224 | /* BIP does not use Protected field, so need to check MMIE */ | |
1225 | if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb) | |
1226 | && ieee80211_get_mmie_keyidx(rx->skb) < 0 && | |
1227 | rx->key)) | |
1228 | return -EACCES; | |
1229 | /* | |
1230 | * When using MFP, Action frames are not allowed prior to | |
1231 | * having configured keys. | |
1232 | */ | |
1233 | if (unlikely(ieee80211_is_action(fc) && !rx->key && | |
1234 | ieee80211_is_robust_mgmt_frame( | |
1235 | (struct ieee80211_hdr *) rx->skb->data))) | |
1236 | return -EACCES; | |
1237 | } | |
1238 | ||
1239 | return 0; | |
1240 | } | |
1241 | ||
1242 | static int | |
1243 | __ieee80211_data_to_8023(struct ieee80211_rx_data *rx) | |
1244 | { | |
1245 | struct net_device *dev = rx->dev; | |
1246 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | |
1247 | ||
1248 | return ieee80211_data_to_8023(rx->skb, dev->dev_addr, sdata->vif.type); | |
1249 | } | |
1250 | ||
1251 | /* | |
1252 | * requires that rx->skb is a frame with ethernet header | |
1253 | */ | |
1254 | static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc) | |
1255 | { | |
1256 | static const u8 pae_group_addr[ETH_ALEN] __aligned(2) | |
1257 | = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 }; | |
1258 | struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data; | |
1259 | ||
1260 | /* | |
1261 | * Allow EAPOL frames to us/the PAE group address regardless | |
1262 | * of whether the frame was encrypted or not. | |
1263 | */ | |
1264 | if (ehdr->h_proto == htons(ETH_P_PAE) && | |
1265 | (compare_ether_addr(ehdr->h_dest, rx->dev->dev_addr) == 0 || | |
1266 | compare_ether_addr(ehdr->h_dest, pae_group_addr) == 0)) | |
1267 | return true; | |
1268 | ||
1269 | if (ieee80211_802_1x_port_control(rx) || | |
1270 | ieee80211_drop_unencrypted(rx, fc)) | |
1271 | return false; | |
1272 | ||
1273 | return true; | |
1274 | } | |
1275 | ||
1276 | /* | |
1277 | * requires that rx->skb is a frame with ethernet header | |
1278 | */ | |
1279 | static void | |
1280 | ieee80211_deliver_skb(struct ieee80211_rx_data *rx) | |
1281 | { | |
1282 | struct net_device *dev = rx->dev; | |
1283 | struct ieee80211_local *local = rx->local; | |
1284 | struct sk_buff *skb, *xmit_skb; | |
1285 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | |
1286 | struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data; | |
1287 | struct sta_info *dsta; | |
1288 | ||
1289 | skb = rx->skb; | |
1290 | xmit_skb = NULL; | |
1291 | ||
1292 | if ((sdata->vif.type == NL80211_IFTYPE_AP || | |
1293 | sdata->vif.type == NL80211_IFTYPE_AP_VLAN) && | |
1294 | !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) && | |
1295 | (rx->flags & IEEE80211_RX_RA_MATCH)) { | |
1296 | if (is_multicast_ether_addr(ehdr->h_dest)) { | |
1297 | /* | |
1298 | * send multicast frames both to higher layers in | |
1299 | * local net stack and back to the wireless medium | |
1300 | */ | |
1301 | xmit_skb = skb_copy(skb, GFP_ATOMIC); | |
1302 | if (!xmit_skb && net_ratelimit()) | |
1303 | printk(KERN_DEBUG "%s: failed to clone " | |
1304 | "multicast frame\n", dev->name); | |
1305 | } else { | |
1306 | dsta = sta_info_get(local, skb->data); | |
1307 | if (dsta && dsta->sdata->dev == dev) { | |
1308 | /* | |
1309 | * The destination station is associated to | |
1310 | * this AP (in this VLAN), so send the frame | |
1311 | * directly to it and do not pass it to local | |
1312 | * net stack. | |
1313 | */ | |
1314 | xmit_skb = skb; | |
1315 | skb = NULL; | |
1316 | } | |
1317 | } | |
1318 | } | |
1319 | ||
1320 | if (skb) { | |
1321 | int align __maybe_unused; | |
1322 | ||
1323 | #if defined(CONFIG_MAC80211_DEBUG_PACKET_ALIGNMENT) || !defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) | |
1324 | /* | |
1325 | * 'align' will only take the values 0 or 2 here | |
1326 | * since all frames are required to be aligned | |
1327 | * to 2-byte boundaries when being passed to | |
1328 | * mac80211. That also explains the __skb_push() | |
1329 | * below. | |
1330 | */ | |
1331 | align = ((unsigned long)(skb->data + sizeof(struct ethhdr))) & 3; | |
1332 | if (align) { | |
1333 | if (WARN_ON(skb_headroom(skb) < 3)) { | |
1334 | dev_kfree_skb(skb); | |
1335 | skb = NULL; | |
1336 | } else { | |
1337 | u8 *data = skb->data; | |
1338 | size_t len = skb->len; | |
1339 | u8 *new = __skb_push(skb, align); | |
1340 | memmove(new, data, len); | |
1341 | __skb_trim(skb, len); | |
1342 | } | |
1343 | } | |
1344 | #endif | |
1345 | ||
1346 | if (skb) { | |
1347 | /* deliver to local stack */ | |
1348 | skb->protocol = eth_type_trans(skb, dev); | |
1349 | memset(skb->cb, 0, sizeof(skb->cb)); | |
1350 | netif_rx(skb); | |
1351 | } | |
1352 | } | |
1353 | ||
1354 | if (xmit_skb) { | |
1355 | /* send to wireless media */ | |
1356 | xmit_skb->protocol = htons(ETH_P_802_3); | |
1357 | skb_reset_network_header(xmit_skb); | |
1358 | skb_reset_mac_header(xmit_skb); | |
1359 | dev_queue_xmit(xmit_skb); | |
1360 | } | |
1361 | } | |
1362 | ||
1363 | static ieee80211_rx_result debug_noinline | |
1364 | ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx) | |
1365 | { | |
1366 | struct net_device *dev = rx->dev; | |
1367 | struct ieee80211_local *local = rx->local; | |
1368 | u16 ethertype; | |
1369 | u8 *payload; | |
1370 | struct sk_buff *skb = rx->skb, *frame = NULL; | |
1371 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | |
1372 | __le16 fc = hdr->frame_control; | |
1373 | const struct ethhdr *eth; | |
1374 | int remaining, err; | |
1375 | u8 dst[ETH_ALEN]; | |
1376 | u8 src[ETH_ALEN]; | |
1377 | ||
1378 | if (unlikely(!ieee80211_is_data(fc))) | |
1379 | return RX_CONTINUE; | |
1380 | ||
1381 | if (unlikely(!ieee80211_is_data_present(fc))) | |
1382 | return RX_DROP_MONITOR; | |
1383 | ||
1384 | if (!(rx->flags & IEEE80211_RX_AMSDU)) | |
1385 | return RX_CONTINUE; | |
1386 | ||
1387 | err = __ieee80211_data_to_8023(rx); | |
1388 | if (unlikely(err)) | |
1389 | return RX_DROP_UNUSABLE; | |
1390 | ||
1391 | skb->dev = dev; | |
1392 | ||
1393 | dev->stats.rx_packets++; | |
1394 | dev->stats.rx_bytes += skb->len; | |
1395 | ||
1396 | /* skip the wrapping header */ | |
1397 | eth = (struct ethhdr *) skb_pull(skb, sizeof(struct ethhdr)); | |
1398 | if (!eth) | |
1399 | return RX_DROP_UNUSABLE; | |
1400 | ||
1401 | while (skb != frame) { | |
1402 | u8 padding; | |
1403 | __be16 len = eth->h_proto; | |
1404 | unsigned int subframe_len = sizeof(struct ethhdr) + ntohs(len); | |
1405 | ||
1406 | remaining = skb->len; | |
1407 | memcpy(dst, eth->h_dest, ETH_ALEN); | |
1408 | memcpy(src, eth->h_source, ETH_ALEN); | |
1409 | ||
1410 | padding = ((4 - subframe_len) & 0x3); | |
1411 | /* the last MSDU has no padding */ | |
1412 | if (subframe_len > remaining) | |
1413 | return RX_DROP_UNUSABLE; | |
1414 | ||
1415 | skb_pull(skb, sizeof(struct ethhdr)); | |
1416 | /* if last subframe reuse skb */ | |
1417 | if (remaining <= subframe_len + padding) | |
1418 | frame = skb; | |
1419 | else { | |
1420 | /* | |
1421 | * Allocate and reserve two bytes more for payload | |
1422 | * alignment since sizeof(struct ethhdr) is 14. | |
1423 | */ | |
1424 | frame = dev_alloc_skb( | |
1425 | ALIGN(local->hw.extra_tx_headroom, 4) + | |
1426 | subframe_len + 2); | |
1427 | ||
1428 | if (frame == NULL) | |
1429 | return RX_DROP_UNUSABLE; | |
1430 | ||
1431 | skb_reserve(frame, | |
1432 | ALIGN(local->hw.extra_tx_headroom, 4) + | |
1433 | sizeof(struct ethhdr) + 2); | |
1434 | memcpy(skb_put(frame, ntohs(len)), skb->data, | |
1435 | ntohs(len)); | |
1436 | ||
1437 | eth = (struct ethhdr *) skb_pull(skb, ntohs(len) + | |
1438 | padding); | |
1439 | if (!eth) { | |
1440 | dev_kfree_skb(frame); | |
1441 | return RX_DROP_UNUSABLE; | |
1442 | } | |
1443 | } | |
1444 | ||
1445 | skb_reset_network_header(frame); | |
1446 | frame->dev = dev; | |
1447 | frame->priority = skb->priority; | |
1448 | rx->skb = frame; | |
1449 | ||
1450 | payload = frame->data; | |
1451 | ethertype = (payload[6] << 8) | payload[7]; | |
1452 | ||
1453 | if (likely((compare_ether_addr(payload, rfc1042_header) == 0 && | |
1454 | ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) || | |
1455 | compare_ether_addr(payload, | |
1456 | bridge_tunnel_header) == 0)) { | |
1457 | /* remove RFC1042 or Bridge-Tunnel | |
1458 | * encapsulation and replace EtherType */ | |
1459 | skb_pull(frame, 6); | |
1460 | memcpy(skb_push(frame, ETH_ALEN), src, ETH_ALEN); | |
1461 | memcpy(skb_push(frame, ETH_ALEN), dst, ETH_ALEN); | |
1462 | } else { | |
1463 | memcpy(skb_push(frame, sizeof(__be16)), | |
1464 | &len, sizeof(__be16)); | |
1465 | memcpy(skb_push(frame, ETH_ALEN), src, ETH_ALEN); | |
1466 | memcpy(skb_push(frame, ETH_ALEN), dst, ETH_ALEN); | |
1467 | } | |
1468 | ||
1469 | if (!ieee80211_frame_allowed(rx, fc)) { | |
1470 | if (skb == frame) /* last frame */ | |
1471 | return RX_DROP_UNUSABLE; | |
1472 | dev_kfree_skb(frame); | |
1473 | continue; | |
1474 | } | |
1475 | ||
1476 | ieee80211_deliver_skb(rx); | |
1477 | } | |
1478 | ||
1479 | return RX_QUEUED; | |
1480 | } | |
1481 | ||
1482 | #ifdef CONFIG_MAC80211_MESH | |
1483 | static ieee80211_rx_result | |
1484 | ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) | |
1485 | { | |
1486 | struct ieee80211_hdr *hdr; | |
1487 | struct ieee80211s_hdr *mesh_hdr; | |
1488 | unsigned int hdrlen; | |
1489 | struct sk_buff *skb = rx->skb, *fwd_skb; | |
1490 | ||
1491 | hdr = (struct ieee80211_hdr *) skb->data; | |
1492 | hdrlen = ieee80211_hdrlen(hdr->frame_control); | |
1493 | mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); | |
1494 | ||
1495 | if (!ieee80211_is_data(hdr->frame_control)) | |
1496 | return RX_CONTINUE; | |
1497 | ||
1498 | if (!mesh_hdr->ttl) | |
1499 | /* illegal frame */ | |
1500 | return RX_DROP_MONITOR; | |
1501 | ||
1502 | if (mesh_hdr->flags & MESH_FLAGS_AE_A5_A6){ | |
1503 | struct ieee80211_sub_if_data *sdata; | |
1504 | struct mesh_path *mppath; | |
1505 | ||
1506 | sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev); | |
1507 | rcu_read_lock(); | |
1508 | mppath = mpp_path_lookup(mesh_hdr->eaddr2, sdata); | |
1509 | if (!mppath) { | |
1510 | mpp_path_add(mesh_hdr->eaddr2, hdr->addr4, sdata); | |
1511 | } else { | |
1512 | spin_lock_bh(&mppath->state_lock); | |
1513 | mppath->exp_time = jiffies; | |
1514 | if (compare_ether_addr(mppath->mpp, hdr->addr4) != 0) | |
1515 | memcpy(mppath->mpp, hdr->addr4, ETH_ALEN); | |
1516 | spin_unlock_bh(&mppath->state_lock); | |
1517 | } | |
1518 | rcu_read_unlock(); | |
1519 | } | |
1520 | ||
1521 | if (compare_ether_addr(rx->dev->dev_addr, hdr->addr3) == 0) | |
1522 | return RX_CONTINUE; | |
1523 | ||
1524 | mesh_hdr->ttl--; | |
1525 | ||
1526 | if (rx->flags & IEEE80211_RX_RA_MATCH) { | |
1527 | if (!mesh_hdr->ttl) | |
1528 | IEEE80211_IFSTA_MESH_CTR_INC(&rx->sdata->u.mesh, | |
1529 | dropped_frames_ttl); | |
1530 | else { | |
1531 | struct ieee80211_hdr *fwd_hdr; | |
1532 | fwd_skb = skb_copy(skb, GFP_ATOMIC); | |
1533 | ||
1534 | if (!fwd_skb && net_ratelimit()) | |
1535 | printk(KERN_DEBUG "%s: failed to clone mesh frame\n", | |
1536 | rx->dev->name); | |
1537 | ||
1538 | fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data; | |
1539 | /* | |
1540 | * Save TA to addr1 to send TA a path error if a | |
1541 | * suitable next hop is not found | |
1542 | */ | |
1543 | memcpy(fwd_hdr->addr1, fwd_hdr->addr2, ETH_ALEN); | |
1544 | memcpy(fwd_hdr->addr2, rx->dev->dev_addr, ETH_ALEN); | |
1545 | fwd_skb->dev = rx->local->mdev; | |
1546 | fwd_skb->iif = rx->dev->ifindex; | |
1547 | dev_queue_xmit(fwd_skb); | |
1548 | } | |
1549 | } | |
1550 | ||
1551 | if (is_multicast_ether_addr(hdr->addr3) || | |
1552 | rx->dev->flags & IFF_PROMISC) | |
1553 | return RX_CONTINUE; | |
1554 | else | |
1555 | return RX_DROP_MONITOR; | |
1556 | } | |
1557 | #endif | |
1558 | ||
1559 | static ieee80211_rx_result debug_noinline | |
1560 | ieee80211_rx_h_data(struct ieee80211_rx_data *rx) | |
1561 | { | |
1562 | struct net_device *dev = rx->dev; | |
1563 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; | |
1564 | __le16 fc = hdr->frame_control; | |
1565 | int err; | |
1566 | ||
1567 | if (unlikely(!ieee80211_is_data(hdr->frame_control))) | |
1568 | return RX_CONTINUE; | |
1569 | ||
1570 | if (unlikely(!ieee80211_is_data_present(hdr->frame_control))) | |
1571 | return RX_DROP_MONITOR; | |
1572 | ||
1573 | err = __ieee80211_data_to_8023(rx); | |
1574 | if (unlikely(err)) | |
1575 | return RX_DROP_UNUSABLE; | |
1576 | ||
1577 | if (!ieee80211_frame_allowed(rx, fc)) | |
1578 | return RX_DROP_MONITOR; | |
1579 | ||
1580 | rx->skb->dev = dev; | |
1581 | ||
1582 | dev->stats.rx_packets++; | |
1583 | dev->stats.rx_bytes += rx->skb->len; | |
1584 | ||
1585 | ieee80211_deliver_skb(rx); | |
1586 | ||
1587 | return RX_QUEUED; | |
1588 | } | |
1589 | ||
1590 | static ieee80211_rx_result debug_noinline | |
1591 | ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx) | |
1592 | { | |
1593 | struct ieee80211_local *local = rx->local; | |
1594 | struct ieee80211_hw *hw = &local->hw; | |
1595 | struct sk_buff *skb = rx->skb; | |
1596 | struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data; | |
1597 | struct tid_ampdu_rx *tid_agg_rx; | |
1598 | u16 start_seq_num; | |
1599 | u16 tid; | |
1600 | ||
1601 | if (likely(!ieee80211_is_ctl(bar->frame_control))) | |
1602 | return RX_CONTINUE; | |
1603 | ||
1604 | if (ieee80211_is_back_req(bar->frame_control)) { | |
1605 | if (!rx->sta) | |
1606 | return RX_CONTINUE; | |
1607 | tid = le16_to_cpu(bar->control) >> 12; | |
1608 | if (rx->sta->ampdu_mlme.tid_state_rx[tid] | |
1609 | != HT_AGG_STATE_OPERATIONAL) | |
1610 | return RX_CONTINUE; | |
1611 | tid_agg_rx = rx->sta->ampdu_mlme.tid_rx[tid]; | |
1612 | ||
1613 | start_seq_num = le16_to_cpu(bar->start_seq_num) >> 4; | |
1614 | ||
1615 | /* reset session timer */ | |
1616 | if (tid_agg_rx->timeout) | |
1617 | mod_timer(&tid_agg_rx->session_timer, | |
1618 | TU_TO_EXP_TIME(tid_agg_rx->timeout)); | |
1619 | ||
1620 | /* manage reordering buffer according to requested */ | |
1621 | /* sequence number */ | |
1622 | rcu_read_lock(); | |
1623 | ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, NULL, NULL, | |
1624 | start_seq_num, 1); | |
1625 | rcu_read_unlock(); | |
1626 | return RX_DROP_UNUSABLE; | |
1627 | } | |
1628 | ||
1629 | return RX_CONTINUE; | |
1630 | } | |
1631 | ||
1632 | static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata, | |
1633 | struct ieee80211_mgmt *mgmt, | |
1634 | size_t len) | |
1635 | { | |
1636 | struct ieee80211_local *local = sdata->local; | |
1637 | struct sk_buff *skb; | |
1638 | struct ieee80211_mgmt *resp; | |
1639 | ||
1640 | if (compare_ether_addr(mgmt->da, sdata->dev->dev_addr) != 0) { | |
1641 | /* Not to own unicast address */ | |
1642 | return; | |
1643 | } | |
1644 | ||
1645 | if (compare_ether_addr(mgmt->sa, sdata->u.mgd.bssid) != 0 || | |
1646 | compare_ether_addr(mgmt->bssid, sdata->u.mgd.bssid) != 0) { | |
1647 | /* Not from the current AP. */ | |
1648 | return; | |
1649 | } | |
1650 | ||
1651 | if (sdata->u.mgd.state == IEEE80211_STA_MLME_ASSOCIATE) { | |
1652 | /* Association in progress; ignore SA Query */ | |
1653 | return; | |
1654 | } | |
1655 | ||
1656 | if (len < 24 + 1 + sizeof(resp->u.action.u.sa_query)) { | |
1657 | /* Too short SA Query request frame */ | |
1658 | return; | |
1659 | } | |
1660 | ||
1661 | skb = dev_alloc_skb(sizeof(*resp) + local->hw.extra_tx_headroom); | |
1662 | if (skb == NULL) | |
1663 | return; | |
1664 | ||
1665 | skb_reserve(skb, local->hw.extra_tx_headroom); | |
1666 | resp = (struct ieee80211_mgmt *) skb_put(skb, 24); | |
1667 | memset(resp, 0, 24); | |
1668 | memcpy(resp->da, mgmt->sa, ETH_ALEN); | |
1669 | memcpy(resp->sa, sdata->dev->dev_addr, ETH_ALEN); | |
1670 | memcpy(resp->bssid, sdata->u.mgd.bssid, ETH_ALEN); | |
1671 | resp->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | | |
1672 | IEEE80211_STYPE_ACTION); | |
1673 | skb_put(skb, 1 + sizeof(resp->u.action.u.sa_query)); | |
1674 | resp->u.action.category = WLAN_CATEGORY_SA_QUERY; | |
1675 | resp->u.action.u.sa_query.action = WLAN_ACTION_SA_QUERY_RESPONSE; | |
1676 | memcpy(resp->u.action.u.sa_query.trans_id, | |
1677 | mgmt->u.action.u.sa_query.trans_id, | |
1678 | WLAN_SA_QUERY_TR_ID_LEN); | |
1679 | ||
1680 | ieee80211_tx_skb(sdata, skb, 1); | |
1681 | } | |
1682 | ||
1683 | static ieee80211_rx_result debug_noinline | |
1684 | ieee80211_rx_h_action(struct ieee80211_rx_data *rx) | |
1685 | { | |
1686 | struct ieee80211_local *local = rx->local; | |
1687 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev); | |
1688 | struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; | |
1689 | struct ieee80211_bss *bss; | |
1690 | int len = rx->skb->len; | |
1691 | ||
1692 | if (!ieee80211_is_action(mgmt->frame_control)) | |
1693 | return RX_CONTINUE; | |
1694 | ||
1695 | if (!rx->sta) | |
1696 | return RX_DROP_MONITOR; | |
1697 | ||
1698 | if (!(rx->flags & IEEE80211_RX_RA_MATCH)) | |
1699 | return RX_DROP_MONITOR; | |
1700 | ||
1701 | if (ieee80211_drop_unencrypted(rx, mgmt->frame_control)) | |
1702 | return RX_DROP_MONITOR; | |
1703 | ||
1704 | /* all categories we currently handle have action_code */ | |
1705 | if (len < IEEE80211_MIN_ACTION_SIZE + 1) | |
1706 | return RX_DROP_MONITOR; | |
1707 | ||
1708 | switch (mgmt->u.action.category) { | |
1709 | case WLAN_CATEGORY_BACK: | |
1710 | /* | |
1711 | * The aggregation code is not prepared to handle | |
1712 | * anything but STA/AP due to the BSSID handling; | |
1713 | * IBSS could work in the code but isn't supported | |
1714 | * by drivers or the standard. | |
1715 | */ | |
1716 | if (sdata->vif.type != NL80211_IFTYPE_STATION && | |
1717 | sdata->vif.type != NL80211_IFTYPE_AP_VLAN && | |
1718 | sdata->vif.type != NL80211_IFTYPE_AP) | |
1719 | return RX_DROP_MONITOR; | |
1720 | ||
1721 | switch (mgmt->u.action.u.addba_req.action_code) { | |
1722 | case WLAN_ACTION_ADDBA_REQ: | |
1723 | if (len < (IEEE80211_MIN_ACTION_SIZE + | |
1724 | sizeof(mgmt->u.action.u.addba_req))) | |
1725 | return RX_DROP_MONITOR; | |
1726 | ieee80211_process_addba_request(local, rx->sta, mgmt, len); | |
1727 | break; | |
1728 | case WLAN_ACTION_ADDBA_RESP: | |
1729 | if (len < (IEEE80211_MIN_ACTION_SIZE + | |
1730 | sizeof(mgmt->u.action.u.addba_resp))) | |
1731 | return RX_DROP_MONITOR; | |
1732 | ieee80211_process_addba_resp(local, rx->sta, mgmt, len); | |
1733 | break; | |
1734 | case WLAN_ACTION_DELBA: | |
1735 | if (len < (IEEE80211_MIN_ACTION_SIZE + | |
1736 | sizeof(mgmt->u.action.u.delba))) | |
1737 | return RX_DROP_MONITOR; | |
1738 | ieee80211_process_delba(sdata, rx->sta, mgmt, len); | |
1739 | break; | |
1740 | } | |
1741 | break; | |
1742 | case WLAN_CATEGORY_SPECTRUM_MGMT: | |
1743 | if (local->hw.conf.channel->band != IEEE80211_BAND_5GHZ) | |
1744 | return RX_DROP_MONITOR; | |
1745 | ||
1746 | if (sdata->vif.type != NL80211_IFTYPE_STATION) | |
1747 | return RX_DROP_MONITOR; | |
1748 | ||
1749 | switch (mgmt->u.action.u.measurement.action_code) { | |
1750 | case WLAN_ACTION_SPCT_MSR_REQ: | |
1751 | if (len < (IEEE80211_MIN_ACTION_SIZE + | |
1752 | sizeof(mgmt->u.action.u.measurement))) | |
1753 | return RX_DROP_MONITOR; | |
1754 | ieee80211_process_measurement_req(sdata, mgmt, len); | |
1755 | break; | |
1756 | case WLAN_ACTION_SPCT_CHL_SWITCH: | |
1757 | if (len < (IEEE80211_MIN_ACTION_SIZE + | |
1758 | sizeof(mgmt->u.action.u.chan_switch))) | |
1759 | return RX_DROP_MONITOR; | |
1760 | ||
1761 | if (sdata->vif.type != NL80211_IFTYPE_STATION) | |
1762 | return RX_DROP_MONITOR; | |
1763 | ||
1764 | if (memcmp(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN)) | |
1765 | return RX_DROP_MONITOR; | |
1766 | ||
1767 | bss = ieee80211_rx_bss_get(local, sdata->u.mgd.bssid, | |
1768 | local->hw.conf.channel->center_freq, | |
1769 | sdata->u.mgd.ssid, | |
1770 | sdata->u.mgd.ssid_len); | |
1771 | if (!bss) | |
1772 | return RX_DROP_MONITOR; | |
1773 | ||
1774 | ieee80211_sta_process_chanswitch(sdata, | |
1775 | &mgmt->u.action.u.chan_switch.sw_elem, bss); | |
1776 | ieee80211_rx_bss_put(local, bss); | |
1777 | break; | |
1778 | } | |
1779 | break; | |
1780 | case WLAN_CATEGORY_SA_QUERY: | |
1781 | if (len < (IEEE80211_MIN_ACTION_SIZE + | |
1782 | sizeof(mgmt->u.action.u.sa_query))) | |
1783 | return RX_DROP_MONITOR; | |
1784 | switch (mgmt->u.action.u.sa_query.action) { | |
1785 | case WLAN_ACTION_SA_QUERY_REQUEST: | |
1786 | if (sdata->vif.type != NL80211_IFTYPE_STATION) | |
1787 | return RX_DROP_MONITOR; | |
1788 | ieee80211_process_sa_query_req(sdata, mgmt, len); | |
1789 | break; | |
1790 | case WLAN_ACTION_SA_QUERY_RESPONSE: | |
1791 | /* | |
1792 | * SA Query response is currently only used in AP mode | |
1793 | * and it is processed in user space. | |
1794 | */ | |
1795 | return RX_CONTINUE; | |
1796 | } | |
1797 | break; | |
1798 | default: | |
1799 | return RX_CONTINUE; | |
1800 | } | |
1801 | ||
1802 | rx->sta->rx_packets++; | |
1803 | dev_kfree_skb(rx->skb); | |
1804 | return RX_QUEUED; | |
1805 | } | |
1806 | ||
1807 | static ieee80211_rx_result debug_noinline | |
1808 | ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx) | |
1809 | { | |
1810 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev); | |
1811 | struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; | |
1812 | ||
1813 | if (!(rx->flags & IEEE80211_RX_RA_MATCH)) | |
1814 | return RX_DROP_MONITOR; | |
1815 | ||
1816 | if (ieee80211_drop_unencrypted(rx, mgmt->frame_control)) | |
1817 | return RX_DROP_MONITOR; | |
1818 | ||
1819 | if (ieee80211_vif_is_mesh(&sdata->vif)) | |
1820 | return ieee80211_mesh_rx_mgmt(sdata, rx->skb, rx->status); | |
1821 | ||
1822 | if (sdata->vif.type == NL80211_IFTYPE_ADHOC) | |
1823 | return ieee80211_ibss_rx_mgmt(sdata, rx->skb, rx->status); | |
1824 | ||
1825 | if (sdata->vif.type == NL80211_IFTYPE_STATION) | |
1826 | return ieee80211_sta_rx_mgmt(sdata, rx->skb, rx->status); | |
1827 | ||
1828 | return RX_DROP_MONITOR; | |
1829 | } | |
1830 | ||
1831 | static void ieee80211_rx_michael_mic_report(struct net_device *dev, | |
1832 | struct ieee80211_hdr *hdr, | |
1833 | struct ieee80211_rx_data *rx) | |
1834 | { | |
1835 | int keyidx; | |
1836 | unsigned int hdrlen; | |
1837 | ||
1838 | hdrlen = ieee80211_hdrlen(hdr->frame_control); | |
1839 | if (rx->skb->len >= hdrlen + 4) | |
1840 | keyidx = rx->skb->data[hdrlen + 3] >> 6; | |
1841 | else | |
1842 | keyidx = -1; | |
1843 | ||
1844 | if (!rx->sta) { | |
1845 | /* | |
1846 | * Some hardware seem to generate incorrect Michael MIC | |
1847 | * reports; ignore them to avoid triggering countermeasures. | |
1848 | */ | |
1849 | goto ignore; | |
1850 | } | |
1851 | ||
1852 | if (!ieee80211_has_protected(hdr->frame_control)) | |
1853 | goto ignore; | |
1854 | ||
1855 | if (rx->sdata->vif.type == NL80211_IFTYPE_AP && keyidx) { | |
1856 | /* | |
1857 | * APs with pairwise keys should never receive Michael MIC | |
1858 | * errors for non-zero keyidx because these are reserved for | |
1859 | * group keys and only the AP is sending real multicast | |
1860 | * frames in the BSS. | |
1861 | */ | |
1862 | goto ignore; | |
1863 | } | |
1864 | ||
1865 | if (!ieee80211_is_data(hdr->frame_control) && | |
1866 | !ieee80211_is_auth(hdr->frame_control)) | |
1867 | goto ignore; | |
1868 | ||
1869 | mac80211_ev_michael_mic_failure(rx->sdata, keyidx, hdr, NULL); | |
1870 | ignore: | |
1871 | dev_kfree_skb(rx->skb); | |
1872 | rx->skb = NULL; | |
1873 | } | |
1874 | ||
1875 | /* TODO: use IEEE80211_RX_FRAGMENTED */ | |
1876 | static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx) | |
1877 | { | |
1878 | struct ieee80211_sub_if_data *sdata; | |
1879 | struct ieee80211_local *local = rx->local; | |
1880 | struct ieee80211_rtap_hdr { | |
1881 | struct ieee80211_radiotap_header hdr; | |
1882 | u8 flags; | |
1883 | u8 rate; | |
1884 | __le16 chan_freq; | |
1885 | __le16 chan_flags; | |
1886 | } __attribute__ ((packed)) *rthdr; | |
1887 | struct sk_buff *skb = rx->skb, *skb2; | |
1888 | struct net_device *prev_dev = NULL; | |
1889 | struct ieee80211_rx_status *status = rx->status; | |
1890 | ||
1891 | if (rx->flags & IEEE80211_RX_CMNTR_REPORTED) | |
1892 | goto out_free_skb; | |
1893 | ||
1894 | if (skb_headroom(skb) < sizeof(*rthdr) && | |
1895 | pskb_expand_head(skb, sizeof(*rthdr), 0, GFP_ATOMIC)) | |
1896 | goto out_free_skb; | |
1897 | ||
1898 | rthdr = (void *)skb_push(skb, sizeof(*rthdr)); | |
1899 | memset(rthdr, 0, sizeof(*rthdr)); | |
1900 | rthdr->hdr.it_len = cpu_to_le16(sizeof(*rthdr)); | |
1901 | rthdr->hdr.it_present = | |
1902 | cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) | | |
1903 | (1 << IEEE80211_RADIOTAP_RATE) | | |
1904 | (1 << IEEE80211_RADIOTAP_CHANNEL)); | |
1905 | ||
1906 | rthdr->rate = rx->rate->bitrate / 5; | |
1907 | rthdr->chan_freq = cpu_to_le16(status->freq); | |
1908 | ||
1909 | if (status->band == IEEE80211_BAND_5GHZ) | |
1910 | rthdr->chan_flags = cpu_to_le16(IEEE80211_CHAN_OFDM | | |
1911 | IEEE80211_CHAN_5GHZ); | |
1912 | else | |
1913 | rthdr->chan_flags = cpu_to_le16(IEEE80211_CHAN_DYN | | |
1914 | IEEE80211_CHAN_2GHZ); | |
1915 | ||
1916 | skb_set_mac_header(skb, 0); | |
1917 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
1918 | skb->pkt_type = PACKET_OTHERHOST; | |
1919 | skb->protocol = htons(ETH_P_802_2); | |
1920 | ||
1921 | list_for_each_entry_rcu(sdata, &local->interfaces, list) { | |
1922 | if (!netif_running(sdata->dev)) | |
1923 | continue; | |
1924 | ||
1925 | if (sdata->vif.type != NL80211_IFTYPE_MONITOR || | |
1926 | !(sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES)) | |
1927 | continue; | |
1928 | ||
1929 | if (prev_dev) { | |
1930 | skb2 = skb_clone(skb, GFP_ATOMIC); | |
1931 | if (skb2) { | |
1932 | skb2->dev = prev_dev; | |
1933 | netif_rx(skb2); | |
1934 | } | |
1935 | } | |
1936 | ||
1937 | prev_dev = sdata->dev; | |
1938 | sdata->dev->stats.rx_packets++; | |
1939 | sdata->dev->stats.rx_bytes += skb->len; | |
1940 | } | |
1941 | ||
1942 | if (prev_dev) { | |
1943 | skb->dev = prev_dev; | |
1944 | netif_rx(skb); | |
1945 | skb = NULL; | |
1946 | } else | |
1947 | goto out_free_skb; | |
1948 | ||
1949 | rx->flags |= IEEE80211_RX_CMNTR_REPORTED; | |
1950 | return; | |
1951 | ||
1952 | out_free_skb: | |
1953 | dev_kfree_skb(skb); | |
1954 | } | |
1955 | ||
1956 | ||
1957 | static void ieee80211_invoke_rx_handlers(struct ieee80211_sub_if_data *sdata, | |
1958 | struct ieee80211_rx_data *rx, | |
1959 | struct sk_buff *skb) | |
1960 | { | |
1961 | ieee80211_rx_result res = RX_DROP_MONITOR; | |
1962 | ||
1963 | rx->skb = skb; | |
1964 | rx->sdata = sdata; | |
1965 | rx->dev = sdata->dev; | |
1966 | ||
1967 | #define CALL_RXH(rxh) \ | |
1968 | do { \ | |
1969 | res = rxh(rx); \ | |
1970 | if (res != RX_CONTINUE) \ | |
1971 | goto rxh_done; \ | |
1972 | } while (0); | |
1973 | ||
1974 | CALL_RXH(ieee80211_rx_h_passive_scan) | |
1975 | CALL_RXH(ieee80211_rx_h_check) | |
1976 | CALL_RXH(ieee80211_rx_h_decrypt) | |
1977 | CALL_RXH(ieee80211_rx_h_check_more_data) | |
1978 | CALL_RXH(ieee80211_rx_h_sta_process) | |
1979 | CALL_RXH(ieee80211_rx_h_defragment) | |
1980 | CALL_RXH(ieee80211_rx_h_ps_poll) | |
1981 | CALL_RXH(ieee80211_rx_h_michael_mic_verify) | |
1982 | /* must be after MMIC verify so header is counted in MPDU mic */ | |
1983 | CALL_RXH(ieee80211_rx_h_remove_qos_control) | |
1984 | CALL_RXH(ieee80211_rx_h_amsdu) | |
1985 | #ifdef CONFIG_MAC80211_MESH | |
1986 | if (ieee80211_vif_is_mesh(&sdata->vif)) | |
1987 | CALL_RXH(ieee80211_rx_h_mesh_fwding); | |
1988 | #endif | |
1989 | CALL_RXH(ieee80211_rx_h_data) | |
1990 | CALL_RXH(ieee80211_rx_h_ctrl) | |
1991 | CALL_RXH(ieee80211_rx_h_action) | |
1992 | CALL_RXH(ieee80211_rx_h_mgmt) | |
1993 | ||
1994 | #undef CALL_RXH | |
1995 | ||
1996 | rxh_done: | |
1997 | switch (res) { | |
1998 | case RX_DROP_MONITOR: | |
1999 | I802_DEBUG_INC(sdata->local->rx_handlers_drop); | |
2000 | if (rx->sta) | |
2001 | rx->sta->rx_dropped++; | |
2002 | /* fall through */ | |
2003 | case RX_CONTINUE: | |
2004 | ieee80211_rx_cooked_monitor(rx); | |
2005 | break; | |
2006 | case RX_DROP_UNUSABLE: | |
2007 | I802_DEBUG_INC(sdata->local->rx_handlers_drop); | |
2008 | if (rx->sta) | |
2009 | rx->sta->rx_dropped++; | |
2010 | dev_kfree_skb(rx->skb); | |
2011 | break; | |
2012 | case RX_QUEUED: | |
2013 | I802_DEBUG_INC(sdata->local->rx_handlers_queued); | |
2014 | break; | |
2015 | } | |
2016 | } | |
2017 | ||
2018 | /* main receive path */ | |
2019 | ||
2020 | static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata, | |
2021 | struct ieee80211_rx_data *rx, | |
2022 | struct ieee80211_hdr *hdr) | |
2023 | { | |
2024 | u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len, sdata->vif.type); | |
2025 | int multicast = is_multicast_ether_addr(hdr->addr1); | |
2026 | ||
2027 | switch (sdata->vif.type) { | |
2028 | case NL80211_IFTYPE_STATION: | |
2029 | if (!bssid) | |
2030 | return 0; | |
2031 | if (!ieee80211_bssid_match(bssid, sdata->u.mgd.bssid)) { | |
2032 | if (!(rx->flags & IEEE80211_RX_IN_SCAN)) | |
2033 | return 0; | |
2034 | rx->flags &= ~IEEE80211_RX_RA_MATCH; | |
2035 | } else if (!multicast && | |
2036 | compare_ether_addr(sdata->dev->dev_addr, | |
2037 | hdr->addr1) != 0) { | |
2038 | if (!(sdata->dev->flags & IFF_PROMISC)) | |
2039 | return 0; | |
2040 | rx->flags &= ~IEEE80211_RX_RA_MATCH; | |
2041 | } | |
2042 | break; | |
2043 | case NL80211_IFTYPE_ADHOC: | |
2044 | if (!bssid) | |
2045 | return 0; | |
2046 | if (ieee80211_is_beacon(hdr->frame_control)) { | |
2047 | return 1; | |
2048 | } | |
2049 | else if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid)) { | |
2050 | if (!(rx->flags & IEEE80211_RX_IN_SCAN)) | |
2051 | return 0; | |
2052 | rx->flags &= ~IEEE80211_RX_RA_MATCH; | |
2053 | } else if (!multicast && | |
2054 | compare_ether_addr(sdata->dev->dev_addr, | |
2055 | hdr->addr1) != 0) { | |
2056 | if (!(sdata->dev->flags & IFF_PROMISC)) | |
2057 | return 0; | |
2058 | rx->flags &= ~IEEE80211_RX_RA_MATCH; | |
2059 | } else if (!rx->sta) { | |
2060 | int rate_idx; | |
2061 | if (rx->status->flag & RX_FLAG_HT) | |
2062 | rate_idx = 0; /* TODO: HT rates */ | |
2063 | else | |
2064 | rate_idx = rx->status->rate_idx; | |
2065 | rx->sta = ieee80211_ibss_add_sta(sdata, bssid, hdr->addr2, | |
2066 | BIT(rate_idx)); | |
2067 | } | |
2068 | break; | |
2069 | case NL80211_IFTYPE_MESH_POINT: | |
2070 | if (!multicast && | |
2071 | compare_ether_addr(sdata->dev->dev_addr, | |
2072 | hdr->addr1) != 0) { | |
2073 | if (!(sdata->dev->flags & IFF_PROMISC)) | |
2074 | return 0; | |
2075 | ||
2076 | rx->flags &= ~IEEE80211_RX_RA_MATCH; | |
2077 | } | |
2078 | break; | |
2079 | case NL80211_IFTYPE_AP_VLAN: | |
2080 | case NL80211_IFTYPE_AP: | |
2081 | if (!bssid) { | |
2082 | if (compare_ether_addr(sdata->dev->dev_addr, | |
2083 | hdr->addr1)) | |
2084 | return 0; | |
2085 | } else if (!ieee80211_bssid_match(bssid, | |
2086 | sdata->dev->dev_addr)) { | |
2087 | if (!(rx->flags & IEEE80211_RX_IN_SCAN)) | |
2088 | return 0; | |
2089 | rx->flags &= ~IEEE80211_RX_RA_MATCH; | |
2090 | } | |
2091 | break; | |
2092 | case NL80211_IFTYPE_WDS: | |
2093 | if (bssid || !ieee80211_is_data(hdr->frame_control)) | |
2094 | return 0; | |
2095 | if (compare_ether_addr(sdata->u.wds.remote_addr, hdr->addr2)) | |
2096 | return 0; | |
2097 | break; | |
2098 | case NL80211_IFTYPE_MONITOR: | |
2099 | /* take everything */ | |
2100 | break; | |
2101 | case NL80211_IFTYPE_UNSPECIFIED: | |
2102 | case __NL80211_IFTYPE_AFTER_LAST: | |
2103 | /* should never get here */ | |
2104 | WARN_ON(1); | |
2105 | break; | |
2106 | } | |
2107 | ||
2108 | return 1; | |
2109 | } | |
2110 | ||
2111 | /* | |
2112 | * This is the actual Rx frames handler. as it blongs to Rx path it must | |
2113 | * be called with rcu_read_lock protection. | |
2114 | */ | |
2115 | static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, | |
2116 | struct sk_buff *skb, | |
2117 | struct ieee80211_rx_status *status, | |
2118 | struct ieee80211_rate *rate) | |
2119 | { | |
2120 | struct ieee80211_local *local = hw_to_local(hw); | |
2121 | struct ieee80211_sub_if_data *sdata; | |
2122 | struct ieee80211_hdr *hdr; | |
2123 | struct ieee80211_rx_data rx; | |
2124 | int prepares; | |
2125 | struct ieee80211_sub_if_data *prev = NULL; | |
2126 | struct sk_buff *skb_new; | |
2127 | ||
2128 | hdr = (struct ieee80211_hdr *)skb->data; | |
2129 | memset(&rx, 0, sizeof(rx)); | |
2130 | rx.skb = skb; | |
2131 | rx.local = local; | |
2132 | ||
2133 | rx.status = status; | |
2134 | rx.rate = rate; | |
2135 | ||
2136 | if (ieee80211_is_data(hdr->frame_control) || ieee80211_is_mgmt(hdr->frame_control)) | |
2137 | local->dot11ReceivedFragmentCount++; | |
2138 | ||
2139 | rx.sta = sta_info_get(local, hdr->addr2); | |
2140 | if (rx.sta) { | |
2141 | rx.sdata = rx.sta->sdata; | |
2142 | rx.dev = rx.sta->sdata->dev; | |
2143 | } | |
2144 | ||
2145 | if ((status->flag & RX_FLAG_MMIC_ERROR)) { | |
2146 | ieee80211_rx_michael_mic_report(local->mdev, hdr, &rx); | |
2147 | return; | |
2148 | } | |
2149 | ||
2150 | if (unlikely(local->sw_scanning || local->hw_scanning)) | |
2151 | rx.flags |= IEEE80211_RX_IN_SCAN; | |
2152 | ||
2153 | ieee80211_parse_qos(&rx); | |
2154 | ieee80211_verify_alignment(&rx); | |
2155 | ||
2156 | skb = rx.skb; | |
2157 | ||
2158 | list_for_each_entry_rcu(sdata, &local->interfaces, list) { | |
2159 | if (!netif_running(sdata->dev)) | |
2160 | continue; | |
2161 | ||
2162 | if (sdata->vif.type == NL80211_IFTYPE_MONITOR) | |
2163 | continue; | |
2164 | ||
2165 | rx.flags |= IEEE80211_RX_RA_MATCH; | |
2166 | prepares = prepare_for_handlers(sdata, &rx, hdr); | |
2167 | ||
2168 | if (!prepares) | |
2169 | continue; | |
2170 | ||
2171 | /* | |
2172 | * frame is destined for this interface, but if it's not | |
2173 | * also for the previous one we handle that after the | |
2174 | * loop to avoid copying the SKB once too much | |
2175 | */ | |
2176 | ||
2177 | if (!prev) { | |
2178 | prev = sdata; | |
2179 | continue; | |
2180 | } | |
2181 | ||
2182 | /* | |
2183 | * frame was destined for the previous interface | |
2184 | * so invoke RX handlers for it | |
2185 | */ | |
2186 | ||
2187 | skb_new = skb_copy(skb, GFP_ATOMIC); | |
2188 | if (!skb_new) { | |
2189 | if (net_ratelimit()) | |
2190 | printk(KERN_DEBUG "%s: failed to copy " | |
2191 | "multicast frame for %s\n", | |
2192 | wiphy_name(local->hw.wiphy), | |
2193 | prev->dev->name); | |
2194 | continue; | |
2195 | } | |
2196 | ieee80211_invoke_rx_handlers(prev, &rx, skb_new); | |
2197 | prev = sdata; | |
2198 | } | |
2199 | if (prev) | |
2200 | ieee80211_invoke_rx_handlers(prev, &rx, skb); | |
2201 | else | |
2202 | dev_kfree_skb(skb); | |
2203 | } | |
2204 | ||
2205 | #define SEQ_MODULO 0x1000 | |
2206 | #define SEQ_MASK 0xfff | |
2207 | ||
2208 | static inline int seq_less(u16 sq1, u16 sq2) | |
2209 | { | |
2210 | return ((sq1 - sq2) & SEQ_MASK) > (SEQ_MODULO >> 1); | |
2211 | } | |
2212 | ||
2213 | static inline u16 seq_inc(u16 sq) | |
2214 | { | |
2215 | return (sq + 1) & SEQ_MASK; | |
2216 | } | |
2217 | ||
2218 | static inline u16 seq_sub(u16 sq1, u16 sq2) | |
2219 | { | |
2220 | return (sq1 - sq2) & SEQ_MASK; | |
2221 | } | |
2222 | ||
2223 | ||
2224 | static void ieee80211_release_reorder_frame(struct ieee80211_hw *hw, | |
2225 | struct tid_ampdu_rx *tid_agg_rx, | |
2226 | int index) | |
2227 | { | |
2228 | struct ieee80211_supported_band *sband; | |
2229 | struct ieee80211_rate *rate; | |
2230 | struct ieee80211_rx_status status; | |
2231 | ||
2232 | if (!tid_agg_rx->reorder_buf[index]) | |
2233 | goto no_frame; | |
2234 | ||
2235 | /* release the reordered frames to stack */ | |
2236 | memcpy(&status, tid_agg_rx->reorder_buf[index]->cb, sizeof(status)); | |
2237 | sband = hw->wiphy->bands[status.band]; | |
2238 | if (status.flag & RX_FLAG_HT) | |
2239 | rate = sband->bitrates; /* TODO: HT rates */ | |
2240 | else | |
2241 | rate = &sband->bitrates[status.rate_idx]; | |
2242 | __ieee80211_rx_handle_packet(hw, tid_agg_rx->reorder_buf[index], | |
2243 | &status, rate); | |
2244 | tid_agg_rx->stored_mpdu_num--; | |
2245 | tid_agg_rx->reorder_buf[index] = NULL; | |
2246 | ||
2247 | no_frame: | |
2248 | tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num); | |
2249 | } | |
2250 | ||
2251 | ||
2252 | /* | |
2253 | * Timeout (in jiffies) for skb's that are waiting in the RX reorder buffer. If | |
2254 | * the skb was added to the buffer longer than this time ago, the earlier | |
2255 | * frames that have not yet been received are assumed to be lost and the skb | |
2256 | * can be released for processing. This may also release other skb's from the | |
2257 | * reorder buffer if there are no additional gaps between the frames. | |
2258 | */ | |
2259 | #define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10) | |
2260 | ||
2261 | /* | |
2262 | * As it function blongs to Rx path it must be called with | |
2263 | * the proper rcu_read_lock protection for its flow. | |
2264 | */ | |
2265 | static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw, | |
2266 | struct tid_ampdu_rx *tid_agg_rx, | |
2267 | struct sk_buff *skb, | |
2268 | struct ieee80211_rx_status *rxstatus, | |
2269 | u16 mpdu_seq_num, | |
2270 | int bar_req) | |
2271 | { | |
2272 | u16 head_seq_num, buf_size; | |
2273 | int index; | |
2274 | ||
2275 | buf_size = tid_agg_rx->buf_size; | |
2276 | head_seq_num = tid_agg_rx->head_seq_num; | |
2277 | ||
2278 | /* frame with out of date sequence number */ | |
2279 | if (seq_less(mpdu_seq_num, head_seq_num)) { | |
2280 | dev_kfree_skb(skb); | |
2281 | return 1; | |
2282 | } | |
2283 | ||
2284 | /* if frame sequence number exceeds our buffering window size or | |
2285 | * block Ack Request arrived - release stored frames */ | |
2286 | if ((!seq_less(mpdu_seq_num, head_seq_num + buf_size)) || (bar_req)) { | |
2287 | /* new head to the ordering buffer */ | |
2288 | if (bar_req) | |
2289 | head_seq_num = mpdu_seq_num; | |
2290 | else | |
2291 | head_seq_num = | |
2292 | seq_inc(seq_sub(mpdu_seq_num, buf_size)); | |
2293 | /* release stored frames up to new head to stack */ | |
2294 | while (seq_less(tid_agg_rx->head_seq_num, head_seq_num)) { | |
2295 | index = seq_sub(tid_agg_rx->head_seq_num, | |
2296 | tid_agg_rx->ssn) | |
2297 | % tid_agg_rx->buf_size; | |
2298 | ieee80211_release_reorder_frame(hw, tid_agg_rx, | |
2299 | index); | |
2300 | } | |
2301 | if (bar_req) | |
2302 | return 1; | |
2303 | } | |
2304 | ||
2305 | /* now the new frame is always in the range of the reordering */ | |
2306 | /* buffer window */ | |
2307 | index = seq_sub(mpdu_seq_num, tid_agg_rx->ssn) | |
2308 | % tid_agg_rx->buf_size; | |
2309 | /* check if we already stored this frame */ | |
2310 | if (tid_agg_rx->reorder_buf[index]) { | |
2311 | dev_kfree_skb(skb); | |
2312 | return 1; | |
2313 | } | |
2314 | ||
2315 | /* if arrived mpdu is in the right order and nothing else stored */ | |
2316 | /* release it immediately */ | |
2317 | if (mpdu_seq_num == tid_agg_rx->head_seq_num && | |
2318 | tid_agg_rx->stored_mpdu_num == 0) { | |
2319 | tid_agg_rx->head_seq_num = | |
2320 | seq_inc(tid_agg_rx->head_seq_num); | |
2321 | return 0; | |
2322 | } | |
2323 | ||
2324 | /* put the frame in the reordering buffer */ | |
2325 | tid_agg_rx->reorder_buf[index] = skb; | |
2326 | tid_agg_rx->reorder_time[index] = jiffies; | |
2327 | memcpy(tid_agg_rx->reorder_buf[index]->cb, rxstatus, | |
2328 | sizeof(*rxstatus)); | |
2329 | tid_agg_rx->stored_mpdu_num++; | |
2330 | /* release the buffer until next missing frame */ | |
2331 | index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) | |
2332 | % tid_agg_rx->buf_size; | |
2333 | if (!tid_agg_rx->reorder_buf[index] && | |
2334 | tid_agg_rx->stored_mpdu_num > 1) { | |
2335 | /* | |
2336 | * No buffers ready to be released, but check whether any | |
2337 | * frames in the reorder buffer have timed out. | |
2338 | */ | |
2339 | int j; | |
2340 | int skipped = 1; | |
2341 | for (j = (index + 1) % tid_agg_rx->buf_size; j != index; | |
2342 | j = (j + 1) % tid_agg_rx->buf_size) { | |
2343 | if (tid_agg_rx->reorder_buf[j] == NULL) { | |
2344 | skipped++; | |
2345 | continue; | |
2346 | } | |
2347 | if (!time_after(jiffies, tid_agg_rx->reorder_time[j] + | |
2348 | HZ / 10)) | |
2349 | break; | |
2350 | ||
2351 | #ifdef CONFIG_MAC80211_HT_DEBUG | |
2352 | if (net_ratelimit()) | |
2353 | printk(KERN_DEBUG "%s: release an RX reorder " | |
2354 | "frame due to timeout on earlier " | |
2355 | "frames\n", | |
2356 | wiphy_name(hw->wiphy)); | |
2357 | #endif | |
2358 | ieee80211_release_reorder_frame(hw, tid_agg_rx, j); | |
2359 | ||
2360 | /* | |
2361 | * Increment the head seq# also for the skipped slots. | |
2362 | */ | |
2363 | tid_agg_rx->head_seq_num = | |
2364 | (tid_agg_rx->head_seq_num + skipped) & | |
2365 | SEQ_MASK; | |
2366 | skipped = 0; | |
2367 | } | |
2368 | } else while (tid_agg_rx->reorder_buf[index]) { | |
2369 | ieee80211_release_reorder_frame(hw, tid_agg_rx, index); | |
2370 | index = seq_sub(tid_agg_rx->head_seq_num, | |
2371 | tid_agg_rx->ssn) % tid_agg_rx->buf_size; | |
2372 | } | |
2373 | return 1; | |
2374 | } | |
2375 | ||
2376 | static u8 ieee80211_rx_reorder_ampdu(struct ieee80211_local *local, | |
2377 | struct sk_buff *skb, | |
2378 | struct ieee80211_rx_status *status) | |
2379 | { | |
2380 | struct ieee80211_hw *hw = &local->hw; | |
2381 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | |
2382 | struct sta_info *sta; | |
2383 | struct tid_ampdu_rx *tid_agg_rx; | |
2384 | u16 sc; | |
2385 | u16 mpdu_seq_num; | |
2386 | u8 ret = 0; | |
2387 | int tid; | |
2388 | ||
2389 | sta = sta_info_get(local, hdr->addr2); | |
2390 | if (!sta) | |
2391 | return ret; | |
2392 | ||
2393 | /* filter the QoS data rx stream according to | |
2394 | * STA/TID and check if this STA/TID is on aggregation */ | |
2395 | if (!ieee80211_is_data_qos(hdr->frame_control)) | |
2396 | goto end_reorder; | |
2397 | ||
2398 | tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; | |
2399 | ||
2400 | if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_OPERATIONAL) | |
2401 | goto end_reorder; | |
2402 | ||
2403 | tid_agg_rx = sta->ampdu_mlme.tid_rx[tid]; | |
2404 | ||
2405 | /* qos null data frames are excluded */ | |
2406 | if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC))) | |
2407 | goto end_reorder; | |
2408 | ||
2409 | /* new un-ordered ampdu frame - process it */ | |
2410 | ||
2411 | /* reset session timer */ | |
2412 | if (tid_agg_rx->timeout) | |
2413 | mod_timer(&tid_agg_rx->session_timer, | |
2414 | TU_TO_EXP_TIME(tid_agg_rx->timeout)); | |
2415 | ||
2416 | /* if this mpdu is fragmented - terminate rx aggregation session */ | |
2417 | sc = le16_to_cpu(hdr->seq_ctrl); | |
2418 | if (sc & IEEE80211_SCTL_FRAG) { | |
2419 | ieee80211_sta_stop_rx_ba_session(sta->sdata, sta->sta.addr, | |
2420 | tid, 0, WLAN_REASON_QSTA_REQUIRE_SETUP); | |
2421 | ret = 1; | |
2422 | goto end_reorder; | |
2423 | } | |
2424 | ||
2425 | /* according to mpdu sequence number deal with reordering buffer */ | |
2426 | mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4; | |
2427 | ret = ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb, status, | |
2428 | mpdu_seq_num, 0); | |
2429 | end_reorder: | |
2430 | return ret; | |
2431 | } | |
2432 | ||
2433 | /* | |
2434 | * This is the receive path handler. It is called by a low level driver when an | |
2435 | * 802.11 MPDU is received from the hardware. | |
2436 | */ | |
2437 | void __ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb, | |
2438 | struct ieee80211_rx_status *status) | |
2439 | { | |
2440 | struct ieee80211_local *local = hw_to_local(hw); | |
2441 | struct ieee80211_rate *rate = NULL; | |
2442 | struct ieee80211_supported_band *sband; | |
2443 | ||
2444 | if (status->band < 0 || | |
2445 | status->band >= IEEE80211_NUM_BANDS) { | |
2446 | WARN_ON(1); | |
2447 | return; | |
2448 | } | |
2449 | ||
2450 | sband = local->hw.wiphy->bands[status->band]; | |
2451 | if (!sband) { | |
2452 | WARN_ON(1); | |
2453 | return; | |
2454 | } | |
2455 | ||
2456 | if (status->flag & RX_FLAG_HT) { | |
2457 | /* rate_idx is MCS index */ | |
2458 | if (WARN_ON(status->rate_idx < 0 || | |
2459 | status->rate_idx >= 76)) | |
2460 | return; | |
2461 | /* HT rates are not in the table - use the highest legacy rate | |
2462 | * for now since other parts of mac80211 may not yet be fully | |
2463 | * MCS aware. */ | |
2464 | rate = &sband->bitrates[sband->n_bitrates - 1]; | |
2465 | } else { | |
2466 | if (WARN_ON(status->rate_idx < 0 || | |
2467 | status->rate_idx >= sband->n_bitrates)) | |
2468 | return; | |
2469 | rate = &sband->bitrates[status->rate_idx]; | |
2470 | } | |
2471 | ||
2472 | /* | |
2473 | * key references and virtual interfaces are protected using RCU | |
2474 | * and this requires that we are in a read-side RCU section during | |
2475 | * receive processing | |
2476 | */ | |
2477 | rcu_read_lock(); | |
2478 | ||
2479 | /* | |
2480 | * Frames with failed FCS/PLCP checksum are not returned, | |
2481 | * all other frames are returned without radiotap header | |
2482 | * if it was previously present. | |
2483 | * Also, frames with less than 16 bytes are dropped. | |
2484 | */ | |
2485 | skb = ieee80211_rx_monitor(local, skb, status, rate); | |
2486 | if (!skb) { | |
2487 | rcu_read_unlock(); | |
2488 | return; | |
2489 | } | |
2490 | ||
2491 | /* | |
2492 | * In theory, the block ack reordering should happen after duplicate | |
2493 | * removal (ieee80211_rx_h_check(), which is an RX handler). As such, | |
2494 | * the call to ieee80211_rx_reorder_ampdu() should really be moved to | |
2495 | * happen as a new RX handler between ieee80211_rx_h_check and | |
2496 | * ieee80211_rx_h_decrypt. This cleanup may eventually happen, but for | |
2497 | * the time being, the call can be here since RX reorder buf processing | |
2498 | * will implicitly skip duplicates. We could, in theory at least, | |
2499 | * process frames that ieee80211_rx_h_passive_scan would drop (e.g., | |
2500 | * frames from other than operational channel), but that should not | |
2501 | * happen in normal networks. | |
2502 | */ | |
2503 | if (!ieee80211_rx_reorder_ampdu(local, skb, status)) | |
2504 | __ieee80211_rx_handle_packet(hw, skb, status, rate); | |
2505 | ||
2506 | rcu_read_unlock(); | |
2507 | } | |
2508 | EXPORT_SYMBOL(__ieee80211_rx); | |
2509 | ||
2510 | /* This is a version of the rx handler that can be called from hard irq | |
2511 | * context. Post the skb on the queue and schedule the tasklet */ | |
2512 | void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb, | |
2513 | struct ieee80211_rx_status *status) | |
2514 | { | |
2515 | struct ieee80211_local *local = hw_to_local(hw); | |
2516 | ||
2517 | BUILD_BUG_ON(sizeof(struct ieee80211_rx_status) > sizeof(skb->cb)); | |
2518 | ||
2519 | skb->dev = local->mdev; | |
2520 | /* copy status into skb->cb for use by tasklet */ | |
2521 | memcpy(skb->cb, status, sizeof(*status)); | |
2522 | skb->pkt_type = IEEE80211_RX_MSG; | |
2523 | skb_queue_tail(&local->skb_queue, skb); | |
2524 | tasklet_schedule(&local->tasklet); | |
2525 | } | |
2526 | EXPORT_SYMBOL(ieee80211_rx_irqsafe); |