1 /******************************************************************************
3 Copyright(c) 2003 - 2004 Intel Corporation. All rights reserved.
5 This program is free software; you can redistribute it and/or modify it
6 under the terms of version 2 of the GNU General Public License as
7 published by the Free Software Foundation.
9 This program is distributed in the hope that it will be useful, but WITHOUT
10 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 You should have received a copy of the GNU General Public License along with
15 this program; if not, write to the Free Software Foundation, Inc., 59
16 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 The full GNU General Public License is included in this distribution in the
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 ******************************************************************************
27 Few modifications for Realtek's Wi-Fi drivers by
30 A special thanks goes to Realtek for their support !
32 ******************************************************************************/
34 #include <linux/compiler.h>
35 #include <linux/errno.h>
36 #include <linux/if_arp.h>
37 #include <linux/in6.h>
40 #include <linux/kernel.h>
41 #include <linux/module.h>
42 #include <linux/netdevice.h>
43 #include <linux/pci.h>
44 #include <linux/proc_fs.h>
45 #include <linux/skbuff.h>
46 #include <linux/slab.h>
47 #include <linux/tcp.h>
48 #include <linux/types.h>
49 #include <linux/wireless.h>
50 #include <linux/etherdevice.h>
51 #include <linux/uaccess.h>
52 #include <linux/if_vlan.h>
62 802.11 frame_control for data frames - 2 bytes
63 ,-----------------------------------------------------------------------------------------.
64 bits | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | a | b | c | d | e |
65 |----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|------|
66 val | 0 | 0 | 0 | 1 | x | 0 | 0 | 0 | 1 | 0 | x | x | x | x | x |
67 |----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|------|
68 desc | ^-ver-^ | ^type-^ | ^-----subtype-----^ | to |from |more |retry| pwr |more |wep |
69 | | | x=0 data,x=1 data+ack | DS | DS |frag | | mgm |data | |
70 '-----------------------------------------------------------------------------------------'
74 ,--------- 'ctrl' expands to >-----------'
76 ,--'---,-------------------------------------------------------------.
77 Bytes | 2 | 2 | 6 | 6 | 6 | 2 | 0..2312 | 4 |
78 |------|------|---------|---------|---------|------|---------|------|
79 Desc. | ctrl | dura | DA/RA | TA | SA | Sequ | Frame | fcs |
80 | | tion | (BSSID) | | | ence | data | |
81 `--------------------------------------------------| |------'
82 Total: 28 non-data bytes `----.----'
84 .- 'Frame data' expands to <---------------------------'
87 ,---------------------------------------------------.
88 Bytes | 1 | 1 | 1 | 3 | 2 | 0-2304 |
89 |------|------|---------|----------|------|---------|
90 Desc. | SNAP | SNAP | Control |Eth Tunnel| Type | IP |
91 | DSAP | SSAP | | | | Packet |
92 | 0xAA | 0xAA |0x03 (UI)|0x00-00-F8| | |
93 `-----------------------------------------| |
94 Total: 8 non-data bytes `----.----'
96 .- 'IP Packet' expands, if WEP enabled, to <--'
99 ,-----------------------.
100 Bytes | 4 | 0-2296 | 4 |
101 |-----|-----------|-----|
102 Desc. | IV | Encrypted | ICV |
104 `-----------------------'
105 Total: 8 non-data bytes
108 802.3 Ethernet Data Frame
110 ,-----------------------------------------.
111 Bytes | 6 | 6 | 2 | Variable | 4 |
112 |-------|-------|------|-----------|------|
113 Desc. | Dest. | Source| Type | IP Packet | fcs |
115 `-----------------------------------------'
116 Total: 18 non-data bytes
118 In the event that fragmentation is required, the incoming payload is split into
119 N parts of size ieee->fts. The first fragment contains the SNAP header and the
120 remaining packets are just data.
122 If encryption is enabled, each fragment payload size is reduced by enough space
123 to add the prefix and postfix (IV and ICV totalling 8 bytes in the case of WEP)
124 So if you have 1500 bytes of payload with ieee->fts set to 500 without
125 encryption it will take 3 frames. With WEP it will take 4 frames as the
126 payload of each frame is reduced to 492 bytes.
132 * | ETHERNET HEADER ,-<-- PAYLOAD
133 * | | 14 bytes from skb->data
134 * | 2 bytes for Type --> ,T. | (sizeof ethhdr)
136 * |,-Dest.--. ,--Src.---. | | |
137 * | 6 bytes| | 6 bytes | | | |
140 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
143 * | | | | `T' <---- 2 bytes for Type
145 * | | '---SNAP--' <-------- 6 bytes for SNAP
147 * `-IV--' <-------------------- 4 bytes for IV (WEP)
153 static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 };
154 static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 };
156 inline int rtllib_put_snap(u8 *data, u16 h_proto)
158 struct rtllib_snap_hdr *snap;
161 snap = (struct rtllib_snap_hdr *)data;
166 if (h_proto == 0x8137 || h_proto == 0x80f3)
170 snap->oui[0] = oui[0];
171 snap->oui[1] = oui[1];
172 snap->oui[2] = oui[2];
174 *(u16 *)(data + SNAP_SIZE) = h_proto;
176 return SNAP_SIZE + sizeof(u16);
179 int rtllib_encrypt_fragment(struct rtllib_device *ieee, struct sk_buff *frag,
182 struct lib80211_crypt_data *crypt = NULL;
185 crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
187 if (!(crypt && crypt->ops)) {
188 printk(KERN_INFO "=========>%s(), crypt is null\n", __func__);
191 /* To encrypt, frame format is:
192 * IV (4 bytes), clear payload (including SNAP), ICV (4 bytes) */
194 /* Host-based IEEE 802.11 fragmentation for TX is not yet supported, so
195 * call both MSDU and MPDU encryption functions from here. */
196 atomic_inc(&crypt->refcnt);
198 if (crypt->ops->encrypt_msdu)
199 res = crypt->ops->encrypt_msdu(frag, hdr_len, crypt->priv);
200 if (res == 0 && crypt->ops->encrypt_mpdu)
201 res = crypt->ops->encrypt_mpdu(frag, hdr_len, crypt->priv);
203 atomic_dec(&crypt->refcnt);
205 printk(KERN_INFO "%s: Encryption failed: len=%d.\n",
206 ieee->dev->name, frag->len);
207 ieee->ieee_stats.tx_discards++;
215 void rtllib_txb_free(struct rtllib_txb *txb)
222 static struct rtllib_txb *rtllib_alloc_txb(int nr_frags, int txb_size,
225 struct rtllib_txb *txb;
227 txb = kmalloc(sizeof(struct rtllib_txb) + (sizeof(u8 *) * nr_frags),
232 memset(txb, 0, sizeof(struct rtllib_txb));
233 txb->nr_frags = nr_frags;
234 txb->frag_size = cpu_to_le16(txb_size);
236 for (i = 0; i < nr_frags; i++) {
237 txb->fragments[i] = dev_alloc_skb(txb_size);
238 if (unlikely(!txb->fragments[i])) {
242 memset(txb->fragments[i]->cb, 0, sizeof(txb->fragments[i]->cb));
244 if (unlikely(i != nr_frags)) {
246 dev_kfree_skb_any(txb->fragments[i--]);
253 static int rtllib_classify(struct sk_buff *skb, u8 bIsAmsdu)
258 eth = (struct ethhdr *)skb->data;
259 if (eth->h_proto != htons(ETH_P_IP))
262 RTLLIB_DEBUG_DATA(RTLLIB_DL_DATA, skb->data, skb->len);
264 switch (ip->tos & 0xfc) {
284 static void rtllib_tx_query_agg_cap(struct rtllib_device *ieee,
286 struct cb_desc *tcb_desc)
288 struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
289 struct tx_ts_record *pTxTs = NULL;
290 struct rtllib_hdr_1addr *hdr = (struct rtllib_hdr_1addr *)skb->data;
292 if (rtllib_act_scanning(ieee, false))
295 if (!pHTInfo->bCurrentHTSupport || !pHTInfo->bEnableHT)
297 if (!IsQoSDataFrame(skb->data))
299 if (is_multicast_ether_addr(hdr->addr1))
302 if (tcb_desc->bdhcp || ieee->CntAfterLink < 2)
305 if (pHTInfo->IOTAction & HT_IOT_ACT_TX_NO_AGGREGATION)
308 if (!ieee->GetNmodeSupportBySecCfg(ieee->dev))
310 if (pHTInfo->bCurrentAMPDUEnable) {
311 if (!GetTs(ieee, (struct ts_common_info **)(&pTxTs), hdr->addr1,
312 skb->priority, TX_DIR, true)) {
313 printk(KERN_INFO "%s: can't get TS\n", __func__);
316 if (pTxTs->TxAdmittedBARecord.bValid == false) {
317 if (ieee->wpa_ie_len && (ieee->pairwise_key_type ==
320 } else if (tcb_desc->bdhcp == 1) {
322 } else if (!pTxTs->bDisable_AddBa) {
323 TsStartAddBaProcess(ieee, pTxTs);
325 goto FORCED_AGG_SETTING;
326 } else if (pTxTs->bUsingBa == false) {
327 if (SN_LESS(pTxTs->TxAdmittedBARecord.BaStartSeqCtrl.field.SeqNum,
328 (pTxTs->TxCurSeq+1)%4096))
329 pTxTs->bUsingBa = true;
331 goto FORCED_AGG_SETTING;
333 if (ieee->iw_mode == IW_MODE_INFRA) {
334 tcb_desc->bAMPDUEnable = true;
335 tcb_desc->ampdu_factor = pHTInfo->CurrentAMPDUFactor;
336 tcb_desc->ampdu_density = pHTInfo->CurrentMPDUDensity;
340 switch (pHTInfo->ForcedAMPDUMode) {
344 case HT_AGG_FORCE_ENABLE:
345 tcb_desc->bAMPDUEnable = true;
346 tcb_desc->ampdu_density = pHTInfo->ForcedMPDUDensity;
347 tcb_desc->ampdu_factor = pHTInfo->ForcedAMPDUFactor;
350 case HT_AGG_FORCE_DISABLE:
351 tcb_desc->bAMPDUEnable = false;
352 tcb_desc->ampdu_density = 0;
353 tcb_desc->ampdu_factor = 0;
359 static void rtllib_qurey_ShortPreambleMode(struct rtllib_device *ieee,
360 struct cb_desc *tcb_desc)
362 tcb_desc->bUseShortPreamble = false;
363 if (tcb_desc->data_rate == 2)
365 else if (ieee->current_network.capability &
366 WLAN_CAPABILITY_SHORT_PREAMBLE)
367 tcb_desc->bUseShortPreamble = true;
371 static void rtllib_query_HTCapShortGI(struct rtllib_device *ieee,
372 struct cb_desc *tcb_desc)
374 struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
376 tcb_desc->bUseShortGI = false;
378 if (!pHTInfo->bCurrentHTSupport || !pHTInfo->bEnableHT)
381 if (pHTInfo->bForcedShortGI) {
382 tcb_desc->bUseShortGI = true;
386 if ((pHTInfo->bCurBW40MHz == true) && pHTInfo->bCurShortGI40MHz)
387 tcb_desc->bUseShortGI = true;
388 else if ((pHTInfo->bCurBW40MHz == false) && pHTInfo->bCurShortGI20MHz)
389 tcb_desc->bUseShortGI = true;
392 static void rtllib_query_BandwidthMode(struct rtllib_device *ieee,
393 struct cb_desc *tcb_desc)
395 struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
397 tcb_desc->bPacketBW = false;
399 if (!pHTInfo->bCurrentHTSupport || !pHTInfo->bEnableHT)
402 if (tcb_desc->bMulticast || tcb_desc->bBroadcast)
405 if ((tcb_desc->data_rate & 0x80) == 0)
407 if (pHTInfo->bCurBW40MHz && pHTInfo->bCurTxBW40MHz &&
408 !ieee->bandwidth_auto_switch.bforced_tx20Mhz)
409 tcb_desc->bPacketBW = true;
413 static void rtllib_query_protectionmode(struct rtllib_device *ieee,
414 struct cb_desc *tcb_desc,
417 tcb_desc->bRTSSTBC = false;
418 tcb_desc->bRTSUseShortGI = false;
419 tcb_desc->bCTSEnable = false;
421 tcb_desc->bRTSBW = false;
423 if (tcb_desc->bBroadcast || tcb_desc->bMulticast)
426 if (is_broadcast_ether_addr(skb->data+16))
429 if (ieee->mode < IEEE_N_24G) {
430 if (skb->len > ieee->rts) {
431 tcb_desc->bRTSEnable = true;
432 tcb_desc->rts_rate = MGN_24M;
433 } else if (ieee->current_network.buseprotection) {
434 tcb_desc->bRTSEnable = true;
435 tcb_desc->bCTSEnable = true;
436 tcb_desc->rts_rate = MGN_24M;
440 struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
442 if (pHTInfo->IOTAction & HT_IOT_ACT_FORCED_CTS2SELF) {
443 tcb_desc->bCTSEnable = true;
444 tcb_desc->rts_rate = MGN_24M;
445 tcb_desc->bRTSEnable = true;
447 } else if (pHTInfo->IOTAction & (HT_IOT_ACT_FORCED_RTS |
448 HT_IOT_ACT_PURE_N_MODE)) {
449 tcb_desc->bRTSEnable = true;
450 tcb_desc->rts_rate = MGN_24M;
453 if (ieee->current_network.buseprotection) {
454 tcb_desc->bRTSEnable = true;
455 tcb_desc->bCTSEnable = true;
456 tcb_desc->rts_rate = MGN_24M;
459 if (pHTInfo->bCurrentHTSupport && pHTInfo->bEnableHT) {
460 u8 HTOpMode = pHTInfo->CurrentOpMode;
461 if ((pHTInfo->bCurBW40MHz && (HTOpMode == 2 ||
463 (!pHTInfo->bCurBW40MHz && HTOpMode == 3)) {
464 tcb_desc->rts_rate = MGN_24M;
465 tcb_desc->bRTSEnable = true;
469 if (skb->len > ieee->rts) {
470 tcb_desc->rts_rate = MGN_24M;
471 tcb_desc->bRTSEnable = true;
474 if (tcb_desc->bAMPDUEnable) {
475 tcb_desc->rts_rate = MGN_24M;
476 tcb_desc->bRTSEnable = false;
482 if (ieee->current_network.capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
483 tcb_desc->bUseShortPreamble = true;
484 if (ieee->iw_mode == IW_MODE_MASTER)
488 tcb_desc->bRTSEnable = false;
489 tcb_desc->bCTSEnable = false;
490 tcb_desc->rts_rate = 0;
492 tcb_desc->bRTSBW = false;
496 static void rtllib_txrate_selectmode(struct rtllib_device *ieee,
497 struct cb_desc *tcb_desc)
499 if (ieee->bTxDisableRateFallBack)
500 tcb_desc->bTxDisableRateFallBack = true;
502 if (ieee->bTxUseDriverAssingedRate)
503 tcb_desc->bTxUseDriverAssingedRate = true;
504 if (!tcb_desc->bTxDisableRateFallBack ||
505 !tcb_desc->bTxUseDriverAssingedRate) {
506 if (ieee->iw_mode == IW_MODE_INFRA ||
507 ieee->iw_mode == IW_MODE_ADHOC)
508 tcb_desc->RATRIndex = 0;
512 u16 rtllib_query_seqnum(struct rtllib_device *ieee, struct sk_buff *skb,
517 if (is_multicast_ether_addr(dst))
519 if (IsQoSDataFrame(skb->data)) {
520 struct tx_ts_record *pTS = NULL;
521 if (!GetTs(ieee, (struct ts_common_info **)(&pTS), dst,
522 skb->priority, TX_DIR, true))
524 seqnum = pTS->TxCurSeq;
525 pTS->TxCurSeq = (pTS->TxCurSeq+1)%4096;
531 static int wme_downgrade_ac(struct sk_buff *skb)
533 switch (skb->priority) {
536 skb->priority = 5; /* VO -> VI */
540 skb->priority = 3; /* VI -> BE */
544 skb->priority = 1; /* BE -> BK */
551 int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
553 struct rtllib_device *ieee = (struct rtllib_device *)
554 netdev_priv_rsl(dev);
555 struct rtllib_txb *txb = NULL;
556 struct rtllib_hdr_3addrqos *frag_hdr;
557 int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size;
559 struct net_device_stats *stats = &ieee->stats;
560 int ether_type = 0, encrypt;
561 int bytes, fc, qos_ctl = 0, hdr_len;
562 struct sk_buff *skb_frag;
563 struct rtllib_hdr_3addrqos header = { /* Ensure zero initialized */
568 u8 dest[ETH_ALEN], src[ETH_ALEN];
569 int qos_actived = ieee->current_network.qos_data.active;
570 struct lib80211_crypt_data *crypt = NULL;
571 struct cb_desc *tcb_desc;
572 u8 bIsMulticast = false;
576 spin_lock_irqsave(&ieee->lock, flags);
578 /* If there is no driver handler to take the TXB, don't bother
580 if ((!ieee->hard_start_xmit && !(ieee->softmac_features &
581 IEEE_SOFTMAC_TX_QUEUE)) ||
582 ((!ieee->softmac_data_hard_start_xmit &&
583 (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE)))) {
584 printk(KERN_WARNING "%s: No xmit handler.\n",
590 if (likely(ieee->raw_tx == 0)) {
591 if (unlikely(skb->len < SNAP_SIZE + sizeof(u16))) {
592 printk(KERN_WARNING "%s: skb too small (%d).\n",
593 ieee->dev->name, skb->len);
596 /* Save source and destination addresses */
597 memcpy(dest, skb->data, ETH_ALEN);
598 memcpy(src, skb->data+ETH_ALEN, ETH_ALEN);
600 memset(skb->cb, 0, sizeof(skb->cb));
601 ether_type = ntohs(((struct ethhdr *)skb->data)->h_proto);
603 if (ieee->iw_mode == IW_MODE_MONITOR) {
604 txb = rtllib_alloc_txb(1, skb->len, GFP_ATOMIC);
605 if (unlikely(!txb)) {
606 printk(KERN_WARNING "%s: Could not allocate "
613 txb->payload_size = cpu_to_le16(skb->len);
614 memcpy(skb_put(txb->fragments[0], skb->len), skb->data,
620 if (skb->len > 282) {
621 if (ETH_P_IP == ether_type) {
622 const struct iphdr *ip = (struct iphdr *)
623 ((u8 *)skb->data+14);
624 if (IPPROTO_UDP == ip->protocol) {
627 udp = (struct udphdr *)((u8 *)ip +
629 if (((((u8 *)udp)[1] == 68) &&
630 (((u8 *)udp)[3] == 67)) ||
631 ((((u8 *)udp)[1] == 67) &&
632 (((u8 *)udp)[3] == 68))) {
634 ieee->LPSDelayCnt = 200;
637 } else if (ETH_P_ARP == ether_type) {
638 printk(KERN_INFO "=================>DHCP "
639 "Protocol start tx ARP pkt!!\n");
642 ieee->current_network.tim.tim_count;
646 skb->priority = rtllib_classify(skb, IsAmsdu);
647 crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
648 encrypt = !(ether_type == ETH_P_PAE && ieee->ieee802_1x) &&
649 ieee->host_encrypt && crypt && crypt->ops;
650 if (!encrypt && ieee->ieee802_1x &&
651 ieee->drop_unencrypted && ether_type != ETH_P_PAE) {
655 if (crypt && !encrypt && ether_type == ETH_P_PAE) {
656 struct eapol *eap = (struct eapol *)(skb->data +
657 sizeof(struct ethhdr) - SNAP_SIZE -
659 RTLLIB_DEBUG_EAP("TX: IEEE 802.11 EAPOL frame: %s\n",
660 eap_get_type(eap->type));
663 /* Advance the SKB to the start of the payload */
664 skb_pull(skb, sizeof(struct ethhdr));
666 /* Determine total amount of storage required for TXB packets */
667 bytes = skb->len + SNAP_SIZE + sizeof(u16);
670 fc = RTLLIB_FTYPE_DATA | RTLLIB_FCTL_WEP;
672 fc = RTLLIB_FTYPE_DATA;
675 fc |= RTLLIB_STYPE_QOS_DATA;
677 fc |= RTLLIB_STYPE_DATA;
679 if (ieee->iw_mode == IW_MODE_INFRA) {
680 fc |= RTLLIB_FCTL_TODS;
681 /* To DS: Addr1 = BSSID, Addr2 = SA,
683 memcpy(&header.addr1, ieee->current_network.bssid,
685 memcpy(&header.addr2, &src, ETH_ALEN);
687 memcpy(&header.addr3,
688 ieee->current_network.bssid, ETH_ALEN);
690 memcpy(&header.addr3, &dest, ETH_ALEN);
691 } else if (ieee->iw_mode == IW_MODE_ADHOC) {
692 /* not From/To DS: Addr1 = DA, Addr2 = SA,
694 memcpy(&header.addr1, dest, ETH_ALEN);
695 memcpy(&header.addr2, src, ETH_ALEN);
696 memcpy(&header.addr3, ieee->current_network.bssid,
700 bIsMulticast = is_multicast_ether_addr(header.addr1);
702 header.frame_ctl = cpu_to_le16(fc);
704 /* Determine fragmentation size based on destination (multicast
705 * and broadcast are not fragmented) */
707 frag_size = MAX_FRAG_THRESHOLD;
708 qos_ctl |= QOS_CTL_NOTCONTAIN_ACK;
710 frag_size = ieee->fts;
715 hdr_len = RTLLIB_3ADDR_LEN + 2;
717 /* in case we are a client verify acm is not set for this ac */
718 while (unlikely(ieee->wmm_acm & (0x01 << skb->priority))) {
719 printk(KERN_INFO "skb->priority = %x\n", skb->priority);
720 if (wme_downgrade_ac(skb))
722 printk(KERN_INFO "converted skb->priority = %x\n",
725 qos_ctl |= skb->priority;
726 header.qos_ctl = cpu_to_le16(qos_ctl & RTLLIB_QOS_TID);
728 hdr_len = RTLLIB_3ADDR_LEN;
730 /* Determine amount of payload per fragment. Regardless of if
731 * this stack is providing the full 802.11 header, one will
732 * eventually be affixed to this fragment -- so we must account
733 * for it when determining the amount of payload space. */
734 bytes_per_frag = frag_size - hdr_len;
736 (CFG_RTLLIB_COMPUTE_FCS | CFG_RTLLIB_RESERVE_FCS))
737 bytes_per_frag -= RTLLIB_FCS_LEN;
739 /* Each fragment may need to have room for encrypting
742 bytes_per_frag -= crypt->ops->extra_mpdu_prefix_len +
743 crypt->ops->extra_mpdu_postfix_len +
744 crypt->ops->extra_msdu_prefix_len +
745 crypt->ops->extra_msdu_postfix_len;
747 /* Number of fragments is the total bytes_per_frag /
748 * payload_per_fragment */
749 nr_frags = bytes / bytes_per_frag;
750 bytes_last_frag = bytes % bytes_per_frag;
754 bytes_last_frag = bytes_per_frag;
756 /* When we allocate the TXB we allocate enough space for the
757 * reserve and full fragment bytes (bytes_per_frag doesn't
758 * include prefix, postfix, header, FCS, etc.) */
759 txb = rtllib_alloc_txb(nr_frags, frag_size +
760 ieee->tx_headroom, GFP_ATOMIC);
761 if (unlikely(!txb)) {
762 printk(KERN_WARNING "%s: Could not allocate TXB\n",
766 txb->encrypted = encrypt;
767 txb->payload_size = cpu_to_le16(bytes);
770 txb->queue_index = UP2AC(skb->priority);
772 txb->queue_index = WME_AC_BE;
774 for (i = 0; i < nr_frags; i++) {
775 skb_frag = txb->fragments[i];
776 tcb_desc = (struct cb_desc *)(skb_frag->cb +
779 skb_frag->priority = skb->priority;
780 tcb_desc->queue_index = UP2AC(skb->priority);
782 skb_frag->priority = WME_AC_BE;
783 tcb_desc->queue_index = WME_AC_BE;
785 skb_reserve(skb_frag, ieee->tx_headroom);
788 if (ieee->hwsec_active)
789 tcb_desc->bHwSec = 1;
791 tcb_desc->bHwSec = 0;
792 skb_reserve(skb_frag,
793 crypt->ops->extra_mpdu_prefix_len +
794 crypt->ops->extra_msdu_prefix_len);
796 tcb_desc->bHwSec = 0;
798 frag_hdr = (struct rtllib_hdr_3addrqos *)
799 skb_put(skb_frag, hdr_len);
800 memcpy(frag_hdr, &header, hdr_len);
802 /* If this is not the last fragment, then add the
803 * MOREFRAGS bit to the frame control */
804 if (i != nr_frags - 1) {
805 frag_hdr->frame_ctl = cpu_to_le16(
806 fc | RTLLIB_FCTL_MOREFRAGS);
807 bytes = bytes_per_frag;
810 /* The last fragment has the remaining length */
811 bytes = bytes_last_frag;
813 if ((qos_actived) && (!bIsMulticast)) {
815 cpu_to_le16(rtllib_query_seqnum(ieee, skb_frag,
818 cpu_to_le16(le16_to_cpu(frag_hdr->seq_ctl)<<4 | i);
821 cpu_to_le16(ieee->seq_ctrl[0]<<4 | i);
823 /* Put a SNAP header on the first fragment */
826 skb_put(skb_frag, SNAP_SIZE +
827 sizeof(u16)), ether_type);
828 bytes -= SNAP_SIZE + sizeof(u16);
831 memcpy(skb_put(skb_frag, bytes), skb->data, bytes);
833 /* Advance the SKB... */
834 skb_pull(skb, bytes);
836 /* Encryption routine will move the header forward in
837 * order to insert the IV between the header and the
840 rtllib_encrypt_fragment(ieee, skb_frag,
843 (CFG_RTLLIB_COMPUTE_FCS | CFG_RTLLIB_RESERVE_FCS))
844 skb_put(skb_frag, 4);
847 if ((qos_actived) && (!bIsMulticast)) {
848 if (ieee->seq_ctrl[UP2AC(skb->priority) + 1] == 0xFFF)
849 ieee->seq_ctrl[UP2AC(skb->priority) + 1] = 0;
851 ieee->seq_ctrl[UP2AC(skb->priority) + 1]++;
853 if (ieee->seq_ctrl[0] == 0xFFF)
854 ieee->seq_ctrl[0] = 0;
859 if (unlikely(skb->len < sizeof(struct rtllib_hdr_3addr))) {
860 printk(KERN_WARNING "%s: skb too small (%d).\n",
861 ieee->dev->name, skb->len);
865 txb = rtllib_alloc_txb(1, skb->len, GFP_ATOMIC);
867 printk(KERN_WARNING "%s: Could not allocate TXB\n",
873 txb->payload_size = cpu_to_le16(skb->len);
874 memcpy(skb_put(txb->fragments[0], skb->len), skb->data,
880 struct cb_desc *tcb_desc = (struct cb_desc *)
881 (txb->fragments[0]->cb + MAX_DEV_ADDR_SIZE);
882 tcb_desc->bTxEnableFwCalcDur = 1;
883 tcb_desc->priority = skb->priority;
885 if (ether_type == ETH_P_PAE) {
886 if (ieee->pHTInfo->IOTAction &
887 HT_IOT_ACT_WA_IOT_Broadcom) {
888 tcb_desc->data_rate =
889 MgntQuery_TxRateExcludeCCKRates(ieee);
890 tcb_desc->bTxDisableRateFallBack = false;
892 tcb_desc->data_rate = ieee->basic_rate;
893 tcb_desc->bTxDisableRateFallBack = 1;
897 tcb_desc->RATRIndex = 7;
898 tcb_desc->bTxUseDriverAssingedRate = 1;
900 if (is_multicast_ether_addr(header.addr1))
901 tcb_desc->bMulticast = 1;
902 if (is_broadcast_ether_addr(header.addr1))
903 tcb_desc->bBroadcast = 1;
904 rtllib_txrate_selectmode(ieee, tcb_desc);
905 if (tcb_desc->bMulticast || tcb_desc->bBroadcast)
906 tcb_desc->data_rate = ieee->basic_rate;
908 tcb_desc->data_rate = CURRENT_RATE(ieee->mode,
909 ieee->rate, ieee->HTCurrentOperaRate);
912 if (ieee->pHTInfo->IOTAction &
913 HT_IOT_ACT_WA_IOT_Broadcom) {
914 tcb_desc->data_rate =
915 MgntQuery_TxRateExcludeCCKRates(ieee);
916 tcb_desc->bTxDisableRateFallBack = false;
918 tcb_desc->data_rate = MGN_1M;
919 tcb_desc->bTxDisableRateFallBack = 1;
923 tcb_desc->RATRIndex = 7;
924 tcb_desc->bTxUseDriverAssingedRate = 1;
928 rtllib_qurey_ShortPreambleMode(ieee, tcb_desc);
929 rtllib_tx_query_agg_cap(ieee, txb->fragments[0],
931 rtllib_query_HTCapShortGI(ieee, tcb_desc);
932 rtllib_query_BandwidthMode(ieee, tcb_desc);
933 rtllib_query_protectionmode(ieee, tcb_desc,
937 spin_unlock_irqrestore(&ieee->lock, flags);
938 dev_kfree_skb_any(skb);
940 if (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE) {
941 dev->stats.tx_packets++;
942 dev->stats.tx_bytes += le16_to_cpu(txb->payload_size);
943 rtllib_softmac_xmit(txb, ieee);
945 if ((*ieee->hard_start_xmit)(txb, dev) == 0) {
947 stats->tx_bytes += le16_to_cpu(txb->payload_size);
950 rtllib_txb_free(txb);
957 spin_unlock_irqrestore(&ieee->lock, flags);
958 netif_stop_queue(dev);
963 int rtllib_xmit(struct sk_buff *skb, struct net_device *dev)
965 memset(skb->cb, 0, sizeof(skb->cb));
966 return rtllib_xmit_inter(skb, dev);
968 EXPORT_SYMBOL(rtllib_xmit);