1 /******************************************************************************
3 Copyright(c) 2003 - 2004 Intel Corporation. All rights reserved.
5 This program is free software; you can redistribute it and/or modify it
6 under the terms of version 2 of the GNU General Public License as
7 published by the Free Software Foundation.
9 This program is distributed in the hope that it will be useful, but WITHOUT
10 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 The full GNU General Public License is included in this distribution in the
19 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
21 ******************************************************************************
23 Few modifications for Realtek's Wi-Fi drivers by
26 A special thanks goes to Realtek for their support !
28 ******************************************************************************/
30 #include <linux/compiler.h>
31 #include <linux/errno.h>
32 #include <linux/if_arp.h>
33 #include <linux/in6.h>
36 #include <linux/kernel.h>
37 #include <linux/module.h>
38 #include <linux/netdevice.h>
39 #include <linux/pci.h>
40 #include <linux/proc_fs.h>
41 #include <linux/skbuff.h>
42 #include <linux/slab.h>
43 #include <linux/tcp.h>
44 #include <linux/types.h>
45 #include <linux/wireless.h>
46 #include <linux/etherdevice.h>
47 #include <linux/uaccess.h>
48 #include <linux/if_vlan.h>
55 * 802.11 frame_control for data frames - 2 bytes
56 * ,--------------------------------------------------------------------.
57 * bits | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | a | b | c | d | e |
58 * |---|---|---|---|---|---|---|---|---|----|----|-----|-----|-----|----|
59 * val | 0 | 0 | 0 | 1 | x | 0 | 0 | 0 | 1 | 0 | x | x | x | x | x |
60 * |---|---|---|---|---|---|---|---|---|----|----|-----|-----|-----|----|
61 * desc | ver | type | ^-subtype-^ |to |from|more|retry| pwr |more |wep |
62 * | | | x=0 data |DS | DS |frag| | mgm |data | |
63 * | | | x=1 data+ack | | | | | | | |
64 * '--------------------------------------------------------------------'
68 * ,--------- 'ctrl' expands to >---'
70 * ,--'---,-------------------------------------------------------------.
71 * Bytes | 2 | 2 | 6 | 6 | 6 | 2 | 0..2312 | 4 |
72 * |------|------|---------|---------|---------|------|---------|------|
73 * Desc. | ctrl | dura | DA/RA | TA | SA | Sequ | Frame | fcs |
74 * | | tion | (BSSID) | | | ence | data | |
75 * `--------------------------------------------------| |------'
76 * Total: 28 non-data bytes `----.----'
78 * .- 'Frame data' expands to <---------------------------'
81 * ,---------------------------------------------------.
82 * Bytes | 1 | 1 | 1 | 3 | 2 | 0-2304 |
83 * |------|------|---------|----------|------|---------|
84 * Desc. | SNAP | SNAP | Control |Eth Tunnel| Type | IP |
85 * | DSAP | SSAP | | | | Packet |
86 * | 0xAA | 0xAA |0x03 (UI)|0x00-00-F8| | |
87 * `-----------------------------------------| |
88 * Total: 8 non-data bytes `----.----'
90 * .- 'IP Packet' expands, if WEP enabled, to <--'
93 * ,-----------------------.
94 * Bytes | 4 | 0-2296 | 4 |
95 * |-----|-----------|-----|
96 * Desc. | IV | Encrypted | ICV |
98 * `-----------------------'
99 * Total: 8 non-data bytes
102 * 802.3 Ethernet Data Frame
104 * ,-----------------------------------------.
105 * Bytes | 6 | 6 | 2 | Variable | 4 |
106 * |-------|-------|------|-----------|------|
107 * Desc. | Dest. | Source| Type | IP Packet | fcs |
108 * | MAC | MAC | | | |
109 * `-----------------------------------------'
110 * Total: 18 non-data bytes
112 * In the event that fragmentation is required, the incoming payload is split
113 * into N parts of size ieee->fts. The first fragment contains the SNAP header
114 * and the remaining packets are just data.
116 * If encryption is enabled, each fragment payload size is reduced by enough
117 * space to add the prefix and postfix (IV and ICV totalling 8 bytes in
118 * the case of WEP) So if you have 1500 bytes of payload with ieee->fts set to
119 * 500 without encryption it will take 3 frames. With WEP it will take 4 frames
120 * as the payload of each frame is reduced to 492 bytes.
126 * | ETHERNET HEADER ,-<-- PAYLOAD
127 * | | 14 bytes from skb->data
128 * | 2 bytes for Type --> ,T. | (sizeof ethhdr)
130 * |,-Dest.--. ,--Src.---. | | |
131 * | 6 bytes| | 6 bytes | | | |
134 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
137 * | | | | `T' <---- 2 bytes for Type
139 * | | '---SNAP--' <-------- 6 bytes for SNAP
141 * `-IV--' <-------------------- 4 bytes for IV (WEP)
147 static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 };
148 static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 };
150 static int rtllib_put_snap(u8 *data, u16 h_proto)
152 struct rtllib_snap_hdr *snap;
155 snap = (struct rtllib_snap_hdr *)data;
160 if (h_proto == 0x8137 || h_proto == 0x80f3)
164 snap->oui[0] = oui[0];
165 snap->oui[1] = oui[1];
166 snap->oui[2] = oui[2];
168 *(__be16 *)(data + SNAP_SIZE) = htons(h_proto);
170 return SNAP_SIZE + sizeof(u16);
173 int rtllib_encrypt_fragment(struct rtllib_device *ieee, struct sk_buff *frag,
176 struct lib80211_crypt_data *crypt = NULL;
179 crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
181 if (!(crypt && crypt->ops)) {
182 netdev_info(ieee->dev, "=========>%s(), crypt is null\n",
186 /* To encrypt, frame format is:
187 * IV (4 bytes), clear payload (including SNAP), ICV (4 bytes)
190 /* Host-based IEEE 802.11 fragmentation for TX is not yet supported, so
191 * call both MSDU and MPDU encryption functions from here.
193 atomic_inc(&crypt->refcnt);
195 if (crypt->ops->encrypt_msdu)
196 res = crypt->ops->encrypt_msdu(frag, hdr_len, crypt->priv);
197 if (res == 0 && crypt->ops->encrypt_mpdu)
198 res = crypt->ops->encrypt_mpdu(frag, hdr_len, crypt->priv);
200 atomic_dec(&crypt->refcnt);
202 netdev_info(ieee->dev, "%s: Encryption failed: len=%d.\n",
203 ieee->dev->name, frag->len);
211 void rtllib_txb_free(struct rtllib_txb *txb)
218 static struct rtllib_txb *rtllib_alloc_txb(int nr_frags, int txb_size,
221 struct rtllib_txb *txb;
224 txb = kmalloc(sizeof(struct rtllib_txb) + (sizeof(u8 *) * nr_frags),
229 memset(txb, 0, sizeof(struct rtllib_txb));
230 txb->nr_frags = nr_frags;
231 txb->frag_size = cpu_to_le16(txb_size);
233 for (i = 0; i < nr_frags; i++) {
234 txb->fragments[i] = dev_alloc_skb(txb_size);
235 if (unlikely(!txb->fragments[i])) {
239 memset(txb->fragments[i]->cb, 0, sizeof(txb->fragments[i]->cb));
241 if (unlikely(i != nr_frags)) {
243 dev_kfree_skb_any(txb->fragments[i--]);
250 static int rtllib_classify(struct sk_buff *skb, u8 bIsAmsdu)
255 eth = (struct ethhdr *)skb->data;
256 if (eth->h_proto != htons(ETH_P_IP))
260 print_hex_dump_bytes("rtllib_classify(): ", DUMP_PREFIX_NONE, skb->data,
264 switch (ip->tos & 0xfc) {
284 static void rtllib_tx_query_agg_cap(struct rtllib_device *ieee,
286 struct cb_desc *tcb_desc)
288 struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
289 struct tx_ts_record *pTxTs = NULL;
290 struct rtllib_hdr_1addr *hdr = (struct rtllib_hdr_1addr *)skb->data;
292 if (rtllib_act_scanning(ieee, false))
295 if (!pHTInfo->bCurrentHTSupport || !pHTInfo->bEnableHT)
297 if (!IsQoSDataFrame(skb->data))
299 if (is_multicast_ether_addr(hdr->addr1))
302 if (tcb_desc->bdhcp || ieee->CntAfterLink < 2)
305 if (pHTInfo->IOTAction & HT_IOT_ACT_TX_NO_AGGREGATION)
308 if (!ieee->GetNmodeSupportBySecCfg(ieee->dev))
310 if (pHTInfo->bCurrentAMPDUEnable) {
311 if (!GetTs(ieee, (struct ts_common_info **)(&pTxTs), hdr->addr1,
312 skb->priority, TX_DIR, true)) {
313 netdev_info(ieee->dev, "%s: can't get TS\n", __func__);
316 if (pTxTs->TxAdmittedBARecord.bValid == false) {
317 if (ieee->wpa_ie_len && (ieee->pairwise_key_type ==
320 } else if (tcb_desc->bdhcp == 1) {
322 } else if (!pTxTs->bDisable_AddBa) {
323 TsStartAddBaProcess(ieee, pTxTs);
325 goto FORCED_AGG_SETTING;
326 } else if (pTxTs->bUsingBa == false) {
327 if (SN_LESS(pTxTs->TxAdmittedBARecord.BaStartSeqCtrl.field.SeqNum,
328 (pTxTs->TxCurSeq+1)%4096))
329 pTxTs->bUsingBa = true;
331 goto FORCED_AGG_SETTING;
333 if (ieee->iw_mode == IW_MODE_INFRA) {
334 tcb_desc->bAMPDUEnable = true;
335 tcb_desc->ampdu_factor = pHTInfo->CurrentAMPDUFactor;
336 tcb_desc->ampdu_density = pHTInfo->CurrentMPDUDensity;
340 switch (pHTInfo->ForcedAMPDUMode) {
344 case HT_AGG_FORCE_ENABLE:
345 tcb_desc->bAMPDUEnable = true;
346 tcb_desc->ampdu_density = pHTInfo->ForcedMPDUDensity;
347 tcb_desc->ampdu_factor = pHTInfo->ForcedAMPDUFactor;
350 case HT_AGG_FORCE_DISABLE:
351 tcb_desc->bAMPDUEnable = false;
352 tcb_desc->ampdu_density = 0;
353 tcb_desc->ampdu_factor = 0;
358 static void rtllib_qurey_ShortPreambleMode(struct rtllib_device *ieee,
359 struct cb_desc *tcb_desc)
361 tcb_desc->bUseShortPreamble = false;
362 if (tcb_desc->data_rate == 2)
364 else if (ieee->current_network.capability &
365 WLAN_CAPABILITY_SHORT_PREAMBLE)
366 tcb_desc->bUseShortPreamble = true;
369 static void rtllib_query_HTCapShortGI(struct rtllib_device *ieee,
370 struct cb_desc *tcb_desc)
372 struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
374 tcb_desc->bUseShortGI = false;
376 if (!pHTInfo->bCurrentHTSupport || !pHTInfo->bEnableHT)
379 if (pHTInfo->bForcedShortGI) {
380 tcb_desc->bUseShortGI = true;
384 if ((pHTInfo->bCurBW40MHz == true) && pHTInfo->bCurShortGI40MHz)
385 tcb_desc->bUseShortGI = true;
386 else if ((pHTInfo->bCurBW40MHz == false) && pHTInfo->bCurShortGI20MHz)
387 tcb_desc->bUseShortGI = true;
390 static void rtllib_query_BandwidthMode(struct rtllib_device *ieee,
391 struct cb_desc *tcb_desc)
393 struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
395 tcb_desc->bPacketBW = false;
397 if (!pHTInfo->bCurrentHTSupport || !pHTInfo->bEnableHT)
400 if (tcb_desc->bMulticast || tcb_desc->bBroadcast)
403 if ((tcb_desc->data_rate & 0x80) == 0)
405 if (pHTInfo->bCurBW40MHz && pHTInfo->bCurTxBW40MHz &&
406 !ieee->bandwidth_auto_switch.bforced_tx20Mhz)
407 tcb_desc->bPacketBW = true;
410 static void rtllib_query_protectionmode(struct rtllib_device *ieee,
411 struct cb_desc *tcb_desc,
414 struct rt_hi_throughput *pHTInfo;
416 tcb_desc->bRTSSTBC = false;
417 tcb_desc->bRTSUseShortGI = false;
418 tcb_desc->bCTSEnable = false;
420 tcb_desc->bRTSBW = false;
422 if (tcb_desc->bBroadcast || tcb_desc->bMulticast)
425 if (is_broadcast_ether_addr(skb->data+16))
428 if (ieee->mode < IEEE_N_24G) {
429 if (skb->len > ieee->rts) {
430 tcb_desc->bRTSEnable = true;
431 tcb_desc->rts_rate = MGN_24M;
432 } else if (ieee->current_network.buseprotection) {
433 tcb_desc->bRTSEnable = true;
434 tcb_desc->bCTSEnable = true;
435 tcb_desc->rts_rate = MGN_24M;
440 pHTInfo = ieee->pHTInfo;
443 if (pHTInfo->IOTAction & HT_IOT_ACT_FORCED_CTS2SELF) {
444 tcb_desc->bCTSEnable = true;
445 tcb_desc->rts_rate = MGN_24M;
446 tcb_desc->bRTSEnable = true;
448 } else if (pHTInfo->IOTAction & (HT_IOT_ACT_FORCED_RTS |
449 HT_IOT_ACT_PURE_N_MODE)) {
450 tcb_desc->bRTSEnable = true;
451 tcb_desc->rts_rate = MGN_24M;
454 if (ieee->current_network.buseprotection) {
455 tcb_desc->bRTSEnable = true;
456 tcb_desc->bCTSEnable = true;
457 tcb_desc->rts_rate = MGN_24M;
460 if (pHTInfo->bCurrentHTSupport && pHTInfo->bEnableHT) {
461 u8 HTOpMode = pHTInfo->CurrentOpMode;
463 if ((pHTInfo->bCurBW40MHz && (HTOpMode == 2 ||
465 (!pHTInfo->bCurBW40MHz && HTOpMode == 3)) {
466 tcb_desc->rts_rate = MGN_24M;
467 tcb_desc->bRTSEnable = true;
471 if (skb->len > ieee->rts) {
472 tcb_desc->rts_rate = MGN_24M;
473 tcb_desc->bRTSEnable = true;
476 if (tcb_desc->bAMPDUEnable) {
477 tcb_desc->rts_rate = MGN_24M;
478 tcb_desc->bRTSEnable = false;
483 if (ieee->current_network.capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
484 tcb_desc->bUseShortPreamble = true;
485 if (ieee->iw_mode == IW_MODE_MASTER)
489 tcb_desc->bRTSEnable = false;
490 tcb_desc->bCTSEnable = false;
491 tcb_desc->rts_rate = 0;
493 tcb_desc->bRTSBW = false;
497 static void rtllib_txrate_selectmode(struct rtllib_device *ieee,
498 struct cb_desc *tcb_desc)
500 if (ieee->bTxDisableRateFallBack)
501 tcb_desc->bTxDisableRateFallBack = true;
503 if (ieee->bTxUseDriverAssingedRate)
504 tcb_desc->bTxUseDriverAssingedRate = true;
505 if (!tcb_desc->bTxDisableRateFallBack ||
506 !tcb_desc->bTxUseDriverAssingedRate) {
507 if (ieee->iw_mode == IW_MODE_INFRA ||
508 ieee->iw_mode == IW_MODE_ADHOC)
509 tcb_desc->RATRIndex = 0;
513 static u16 rtllib_query_seqnum(struct rtllib_device *ieee, struct sk_buff *skb,
518 if (is_multicast_ether_addr(dst))
520 if (IsQoSDataFrame(skb->data)) {
521 struct tx_ts_record *pTS = NULL;
523 if (!GetTs(ieee, (struct ts_common_info **)(&pTS), dst,
524 skb->priority, TX_DIR, true))
526 seqnum = pTS->TxCurSeq;
527 pTS->TxCurSeq = (pTS->TxCurSeq+1)%4096;
533 static int wme_downgrade_ac(struct sk_buff *skb)
535 switch (skb->priority) {
538 skb->priority = 5; /* VO -> VI */
542 skb->priority = 3; /* VI -> BE */
546 skb->priority = 1; /* BE -> BK */
553 static u8 rtllib_current_rate(struct rtllib_device *ieee)
555 if (ieee->mode & IEEE_MODE_MASK)
558 if (ieee->HTCurrentOperaRate)
559 return ieee->HTCurrentOperaRate;
561 return ieee->rate & 0x7F;
564 static int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
566 struct rtllib_device *ieee = (struct rtllib_device *)
567 netdev_priv_rsl(dev);
568 struct rtllib_txb *txb = NULL;
569 struct rtllib_hdr_3addrqos *frag_hdr;
570 int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size;
572 struct net_device_stats *stats = &ieee->stats;
573 int ether_type = 0, encrypt;
574 int bytes, fc, qos_ctl = 0, hdr_len;
575 struct sk_buff *skb_frag;
576 struct rtllib_hdr_3addrqos header = { /* Ensure zero initialized */
581 int qos_actived = ieee->current_network.qos_data.active;
584 struct lib80211_crypt_data *crypt = NULL;
585 struct cb_desc *tcb_desc;
586 u8 bIsMulticast = false;
590 spin_lock_irqsave(&ieee->lock, flags);
592 /* If there is no driver handler to take the TXB, don't bother
595 if ((!ieee->hard_start_xmit && !(ieee->softmac_features &
596 IEEE_SOFTMAC_TX_QUEUE)) ||
597 ((!ieee->softmac_data_hard_start_xmit &&
598 (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE)))) {
599 netdev_warn(ieee->dev, "No xmit handler.\n");
604 if (likely(ieee->raw_tx == 0)) {
605 if (unlikely(skb->len < SNAP_SIZE + sizeof(u16))) {
606 netdev_warn(ieee->dev, "skb too small (%d).\n",
610 /* Save source and destination addresses */
611 ether_addr_copy(dest, skb->data);
612 ether_addr_copy(src, skb->data + ETH_ALEN);
614 memset(skb->cb, 0, sizeof(skb->cb));
615 ether_type = ntohs(((struct ethhdr *)skb->data)->h_proto);
617 if (ieee->iw_mode == IW_MODE_MONITOR) {
618 txb = rtllib_alloc_txb(1, skb->len, GFP_ATOMIC);
619 if (unlikely(!txb)) {
620 netdev_warn(ieee->dev,
621 "Could not allocate TXB\n");
626 txb->payload_size = cpu_to_le16(skb->len);
627 memcpy(skb_put(txb->fragments[0], skb->len), skb->data,
633 if (skb->len > 282) {
634 if (ether_type == ETH_P_IP) {
635 const struct iphdr *ip = (struct iphdr *)
636 ((u8 *)skb->data+14);
637 if (ip->protocol == IPPROTO_UDP) {
640 udp = (struct udphdr *)((u8 *)ip +
642 if (((((u8 *)udp)[1] == 68) &&
643 (((u8 *)udp)[3] == 67)) ||
644 ((((u8 *)udp)[1] == 67) &&
645 (((u8 *)udp)[3] == 68))) {
647 ieee->LPSDelayCnt = 200;
650 } else if (ether_type == ETH_P_ARP) {
651 netdev_info(ieee->dev,
652 "=================>DHCP Protocol start tx ARP pkt!!\n");
655 ieee->current_network.tim.tim_count;
659 skb->priority = rtllib_classify(skb, IsAmsdu);
660 crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
661 encrypt = !(ether_type == ETH_P_PAE && ieee->ieee802_1x) &&
662 ieee->host_encrypt && crypt && crypt->ops;
663 if (!encrypt && ieee->ieee802_1x &&
664 ieee->drop_unencrypted && ether_type != ETH_P_PAE) {
668 if (crypt && !encrypt && ether_type == ETH_P_PAE) {
669 struct eapol *eap = (struct eapol *)(skb->data +
670 sizeof(struct ethhdr) - SNAP_SIZE -
672 netdev_dbg(ieee->dev,
673 "TX: IEEE 802.11 EAPOL frame: %s\n",
674 eap_get_type(eap->type));
677 /* Advance the SKB to the start of the payload */
678 skb_pull(skb, sizeof(struct ethhdr));
680 /* Determine total amount of storage required for TXB packets */
681 bytes = skb->len + SNAP_SIZE + sizeof(u16);
684 fc = RTLLIB_FTYPE_DATA | RTLLIB_FCTL_WEP;
686 fc = RTLLIB_FTYPE_DATA;
689 fc |= RTLLIB_STYPE_QOS_DATA;
691 fc |= RTLLIB_STYPE_DATA;
693 if (ieee->iw_mode == IW_MODE_INFRA) {
694 fc |= RTLLIB_FCTL_TODS;
695 /* To DS: Addr1 = BSSID, Addr2 = SA,
698 ether_addr_copy(header.addr1,
699 ieee->current_network.bssid);
700 ether_addr_copy(header.addr2, src);
702 ether_addr_copy(header.addr3,
703 ieee->current_network.bssid);
705 ether_addr_copy(header.addr3, dest);
706 } else if (ieee->iw_mode == IW_MODE_ADHOC) {
707 /* not From/To DS: Addr1 = DA, Addr2 = SA,
710 ether_addr_copy(header.addr1, dest);
711 ether_addr_copy(header.addr2, src);
712 ether_addr_copy(header.addr3,
713 ieee->current_network.bssid);
716 bIsMulticast = is_multicast_ether_addr(header.addr1);
718 header.frame_ctl = cpu_to_le16(fc);
720 /* Determine fragmentation size based on destination (multicast
721 * and broadcast are not fragmented)
724 frag_size = MAX_FRAG_THRESHOLD;
725 qos_ctl |= QOS_CTL_NOTCONTAIN_ACK;
727 frag_size = ieee->fts;
732 hdr_len = RTLLIB_3ADDR_LEN + 2;
734 /* in case we are a client verify acm is not set for this ac */
735 while (unlikely(ieee->wmm_acm & (0x01 << skb->priority))) {
736 netdev_info(ieee->dev, "skb->priority = %x\n",
738 if (wme_downgrade_ac(skb))
740 netdev_info(ieee->dev, "converted skb->priority = %x\n",
743 qos_ctl |= skb->priority;
744 header.qos_ctl = cpu_to_le16(qos_ctl & RTLLIB_QOS_TID);
746 hdr_len = RTLLIB_3ADDR_LEN;
748 /* Determine amount of payload per fragment. Regardless of if
749 * this stack is providing the full 802.11 header, one will
750 * eventually be affixed to this fragment -- so we must account
751 * for it when determining the amount of payload space.
753 bytes_per_frag = frag_size - hdr_len;
755 (CFG_RTLLIB_COMPUTE_FCS | CFG_RTLLIB_RESERVE_FCS))
756 bytes_per_frag -= RTLLIB_FCS_LEN;
758 /* Each fragment may need to have room for encrypting
762 bytes_per_frag -= crypt->ops->extra_mpdu_prefix_len +
763 crypt->ops->extra_mpdu_postfix_len +
764 crypt->ops->extra_msdu_prefix_len +
765 crypt->ops->extra_msdu_postfix_len;
767 /* Number of fragments is the total bytes_per_frag /
768 * payload_per_fragment
770 nr_frags = bytes / bytes_per_frag;
771 bytes_last_frag = bytes % bytes_per_frag;
775 bytes_last_frag = bytes_per_frag;
777 /* When we allocate the TXB we allocate enough space for the
778 * reserve and full fragment bytes (bytes_per_frag doesn't
779 * include prefix, postfix, header, FCS, etc.)
781 txb = rtllib_alloc_txb(nr_frags, frag_size +
782 ieee->tx_headroom, GFP_ATOMIC);
783 if (unlikely(!txb)) {
784 netdev_warn(ieee->dev, "Could not allocate TXB\n");
787 txb->encrypted = encrypt;
788 txb->payload_size = cpu_to_le16(bytes);
791 txb->queue_index = UP2AC(skb->priority);
793 txb->queue_index = WME_AC_BE;
795 for (i = 0; i < nr_frags; i++) {
796 skb_frag = txb->fragments[i];
797 tcb_desc = (struct cb_desc *)(skb_frag->cb +
800 skb_frag->priority = skb->priority;
801 tcb_desc->queue_index = UP2AC(skb->priority);
803 skb_frag->priority = WME_AC_BE;
804 tcb_desc->queue_index = WME_AC_BE;
806 skb_reserve(skb_frag, ieee->tx_headroom);
809 if (ieee->hwsec_active)
810 tcb_desc->bHwSec = 1;
812 tcb_desc->bHwSec = 0;
813 skb_reserve(skb_frag,
814 crypt->ops->extra_mpdu_prefix_len +
815 crypt->ops->extra_msdu_prefix_len);
817 tcb_desc->bHwSec = 0;
819 frag_hdr = (struct rtllib_hdr_3addrqos *)
820 skb_put(skb_frag, hdr_len);
821 memcpy(frag_hdr, &header, hdr_len);
823 /* If this is not the last fragment, then add the
824 * MOREFRAGS bit to the frame control
826 if (i != nr_frags - 1) {
827 frag_hdr->frame_ctl = cpu_to_le16(
828 fc | RTLLIB_FCTL_MOREFRAGS);
829 bytes = bytes_per_frag;
832 /* The last fragment has the remaining length */
833 bytes = bytes_last_frag;
835 if ((qos_actived) && (!bIsMulticast)) {
837 cpu_to_le16(rtllib_query_seqnum(ieee, skb_frag,
840 cpu_to_le16(le16_to_cpu(frag_hdr->seq_ctl)<<4 | i);
843 cpu_to_le16(ieee->seq_ctrl[0]<<4 | i);
845 /* Put a SNAP header on the first fragment */
848 skb_put(skb_frag, SNAP_SIZE +
849 sizeof(u16)), ether_type);
850 bytes -= SNAP_SIZE + sizeof(u16);
853 memcpy(skb_put(skb_frag, bytes), skb->data, bytes);
855 /* Advance the SKB... */
856 skb_pull(skb, bytes);
858 /* Encryption routine will move the header forward in
859 * order to insert the IV between the header and the
863 rtllib_encrypt_fragment(ieee, skb_frag,
866 (CFG_RTLLIB_COMPUTE_FCS | CFG_RTLLIB_RESERVE_FCS))
867 skb_put(skb_frag, 4);
870 if ((qos_actived) && (!bIsMulticast)) {
871 if (ieee->seq_ctrl[UP2AC(skb->priority) + 1] == 0xFFF)
872 ieee->seq_ctrl[UP2AC(skb->priority) + 1] = 0;
874 ieee->seq_ctrl[UP2AC(skb->priority) + 1]++;
876 if (ieee->seq_ctrl[0] == 0xFFF)
877 ieee->seq_ctrl[0] = 0;
882 if (unlikely(skb->len < sizeof(struct rtllib_hdr_3addr))) {
883 netdev_warn(ieee->dev, "skb too small (%d).\n",
888 txb = rtllib_alloc_txb(1, skb->len, GFP_ATOMIC);
890 netdev_warn(ieee->dev, "Could not allocate TXB\n");
895 txb->payload_size = cpu_to_le16(skb->len);
896 memcpy(skb_put(txb->fragments[0], skb->len), skb->data,
902 struct cb_desc *tcb_desc = (struct cb_desc *)
903 (txb->fragments[0]->cb + MAX_DEV_ADDR_SIZE);
904 tcb_desc->bTxEnableFwCalcDur = 1;
905 tcb_desc->priority = skb->priority;
907 if (ether_type == ETH_P_PAE) {
908 if (ieee->pHTInfo->IOTAction &
909 HT_IOT_ACT_WA_IOT_Broadcom) {
910 tcb_desc->data_rate =
911 MgntQuery_TxRateExcludeCCKRates(ieee);
912 tcb_desc->bTxDisableRateFallBack = false;
914 tcb_desc->data_rate = ieee->basic_rate;
915 tcb_desc->bTxDisableRateFallBack = 1;
919 tcb_desc->RATRIndex = 7;
920 tcb_desc->bTxUseDriverAssingedRate = 1;
922 if (is_multicast_ether_addr(header.addr1))
923 tcb_desc->bMulticast = 1;
924 if (is_broadcast_ether_addr(header.addr1))
925 tcb_desc->bBroadcast = 1;
926 rtllib_txrate_selectmode(ieee, tcb_desc);
927 if (tcb_desc->bMulticast || tcb_desc->bBroadcast)
928 tcb_desc->data_rate = ieee->basic_rate;
930 tcb_desc->data_rate = rtllib_current_rate(ieee);
933 if (ieee->pHTInfo->IOTAction &
934 HT_IOT_ACT_WA_IOT_Broadcom) {
935 tcb_desc->data_rate =
936 MgntQuery_TxRateExcludeCCKRates(ieee);
937 tcb_desc->bTxDisableRateFallBack = false;
939 tcb_desc->data_rate = MGN_1M;
940 tcb_desc->bTxDisableRateFallBack = 1;
944 tcb_desc->RATRIndex = 7;
945 tcb_desc->bTxUseDriverAssingedRate = 1;
949 rtllib_qurey_ShortPreambleMode(ieee, tcb_desc);
950 rtllib_tx_query_agg_cap(ieee, txb->fragments[0],
952 rtllib_query_HTCapShortGI(ieee, tcb_desc);
953 rtllib_query_BandwidthMode(ieee, tcb_desc);
954 rtllib_query_protectionmode(ieee, tcb_desc,
958 spin_unlock_irqrestore(&ieee->lock, flags);
959 dev_kfree_skb_any(skb);
961 if (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE) {
962 dev->stats.tx_packets++;
963 dev->stats.tx_bytes += le16_to_cpu(txb->payload_size);
964 rtllib_softmac_xmit(txb, ieee);
966 if ((*ieee->hard_start_xmit)(txb, dev) == 0) {
968 stats->tx_bytes += le16_to_cpu(txb->payload_size);
971 rtllib_txb_free(txb);
978 spin_unlock_irqrestore(&ieee->lock, flags);
979 netif_stop_queue(dev);
984 int rtllib_xmit(struct sk_buff *skb, struct net_device *dev)
986 memset(skb->cb, 0, sizeof(skb->cb));
987 return rtllib_xmit_inter(skb, dev);
989 EXPORT_SYMBOL(rtllib_xmit);