aboutsummaryrefslogtreecommitdiffstats
path: root/net/ieee80211/ieee80211_tx.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ieee80211/ieee80211_tx.c')
-rw-r--r--net/ieee80211/ieee80211_tx.c88
1 files changed, 70 insertions, 18 deletions
diff --git a/net/ieee80211/ieee80211_tx.c b/net/ieee80211/ieee80211_tx.c
index 8b4332f53394..6a5de1b84459 100644
--- a/net/ieee80211/ieee80211_tx.c
+++ b/net/ieee80211/ieee80211_tx.c
@@ -220,13 +220,43 @@ static struct ieee80211_txb *ieee80211_alloc_txb(int nr_frags, int txb_size,
220 return txb; 220 return txb;
221} 221}
222 222
223static int ieee80211_classify(struct sk_buff *skb)
224{
225 struct ethhdr *eth;
226 struct iphdr *ip;
227
228 eth = (struct ethhdr *)skb->data;
229 if (eth->h_proto != __constant_htons(ETH_P_IP))
230 return 0;
231
232 ip = skb->nh.iph;
233 switch (ip->tos & 0xfc) {
234 case 0x20:
235 return 2;
236 case 0x40:
237 return 1;
238 case 0x60:
239 return 3;
240 case 0x80:
241 return 4;
242 case 0xa0:
243 return 5;
244 case 0xc0:
245 return 6;
246 case 0xe0:
247 return 7;
248 default:
249 return 0;
250 }
251}
252
223/* Incoming skb is converted to a txb which consists of 253/* Incoming skb is converted to a txb which consists of
224 * a block of 802.11 fragment packets (stored as skbs) */ 254 * a block of 802.11 fragment packets (stored as skbs) */
225int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) 255int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
226{ 256{
227 struct ieee80211_device *ieee = netdev_priv(dev); 257 struct ieee80211_device *ieee = netdev_priv(dev);
228 struct ieee80211_txb *txb = NULL; 258 struct ieee80211_txb *txb = NULL;
229 struct ieee80211_hdr_3addr *frag_hdr; 259 struct ieee80211_hdr_3addrqos *frag_hdr;
230 int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size, 260 int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size,
231 rts_required; 261 rts_required;
232 unsigned long flags; 262 unsigned long flags;
@@ -234,9 +264,10 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
234 int ether_type, encrypt, host_encrypt, host_encrypt_msdu, host_build_iv; 264 int ether_type, encrypt, host_encrypt, host_encrypt_msdu, host_build_iv;
235 int bytes, fc, hdr_len; 265 int bytes, fc, hdr_len;
236 struct sk_buff *skb_frag; 266 struct sk_buff *skb_frag;
237 struct ieee80211_hdr_3addr header = { /* Ensure zero initialized */ 267 struct ieee80211_hdr_3addrqos header = {/* Ensure zero initialized */
238 .duration_id = 0, 268 .duration_id = 0,
239 .seq_ctl = 0 269 .seq_ctl = 0,
270 .qos_ctl = 0
240 }; 271 };
241 u8 dest[ETH_ALEN], src[ETH_ALEN]; 272 u8 dest[ETH_ALEN], src[ETH_ALEN];
242 struct ieee80211_crypt_data *crypt; 273 struct ieee80211_crypt_data *crypt;
@@ -282,12 +313,6 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
282 memcpy(dest, skb->data, ETH_ALEN); 313 memcpy(dest, skb->data, ETH_ALEN);
283 memcpy(src, skb->data + ETH_ALEN, ETH_ALEN); 314 memcpy(src, skb->data + ETH_ALEN, ETH_ALEN);
284 315
285 /* Advance the SKB to the start of the payload */
286 skb_pull(skb, sizeof(struct ethhdr));
287
288 /* Determine total amount of storage required for TXB packets */
289 bytes = skb->len + SNAP_SIZE + sizeof(u16);
290
291 if (host_encrypt || host_build_iv) 316 if (host_encrypt || host_build_iv)
292 fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA | 317 fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA |
293 IEEE80211_FCTL_PROTECTED; 318 IEEE80211_FCTL_PROTECTED;
@@ -306,9 +331,23 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
306 memcpy(header.addr2, src, ETH_ALEN); 331 memcpy(header.addr2, src, ETH_ALEN);
307 memcpy(header.addr3, ieee->bssid, ETH_ALEN); 332 memcpy(header.addr3, ieee->bssid, ETH_ALEN);
308 } 333 }
309 header.frame_ctl = cpu_to_le16(fc);
310 hdr_len = IEEE80211_3ADDR_LEN; 334 hdr_len = IEEE80211_3ADDR_LEN;
311 335
336 if (ieee->is_qos_active && ieee->is_qos_active(dev, skb)) {
337 fc |= IEEE80211_STYPE_QOS_DATA;
338 hdr_len += 2;
339
340 skb->priority = ieee80211_classify(skb);
341 header.qos_ctl |= skb->priority & IEEE80211_QCTL_TID;
342 }
343 header.frame_ctl = cpu_to_le16(fc);
344
345 /* Advance the SKB to the start of the payload */
346 skb_pull(skb, sizeof(struct ethhdr));
347
348 /* Determine total amount of storage required for TXB packets */
349 bytes = skb->len + SNAP_SIZE + sizeof(u16);
350
312 /* Encrypt msdu first on the whole data packet. */ 351 /* Encrypt msdu first on the whole data packet. */
313 if ((host_encrypt || host_encrypt_msdu) && 352 if ((host_encrypt || host_encrypt_msdu) &&
314 crypt && crypt->ops && crypt->ops->encrypt_msdu) { 353 crypt && crypt->ops && crypt->ops->encrypt_msdu) {
@@ -402,7 +441,7 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
402 if (rts_required) { 441 if (rts_required) {
403 skb_frag = txb->fragments[0]; 442 skb_frag = txb->fragments[0];
404 frag_hdr = 443 frag_hdr =
405 (struct ieee80211_hdr_3addr *)skb_put(skb_frag, hdr_len); 444 (struct ieee80211_hdr_3addrqos *)skb_put(skb_frag, hdr_len);
406 445
407 /* 446 /*
408 * Set header frame_ctl to the RTS. 447 * Set header frame_ctl to the RTS.
@@ -433,7 +472,7 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
433 crypt->ops->extra_mpdu_prefix_len); 472 crypt->ops->extra_mpdu_prefix_len);
434 473
435 frag_hdr = 474 frag_hdr =
436 (struct ieee80211_hdr_3addr *)skb_put(skb_frag, hdr_len); 475 (struct ieee80211_hdr_3addrqos *)skb_put(skb_frag, hdr_len);
437 memcpy(frag_hdr, &header, hdr_len); 476 memcpy(frag_hdr, &header, hdr_len);
438 477
439 /* If this is not the last fragment, then add the MOREFRAGS 478 /* If this is not the last fragment, then add the MOREFRAGS
@@ -516,7 +555,8 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
516/* Incoming 802.11 strucure is converted to a TXB 555/* Incoming 802.11 strucure is converted to a TXB
517 * a block of 802.11 fragment packets (stored as skbs) */ 556 * a block of 802.11 fragment packets (stored as skbs) */
518int ieee80211_tx_frame(struct ieee80211_device *ieee, 557int ieee80211_tx_frame(struct ieee80211_device *ieee,
519 struct ieee80211_hdr *frame, int len) 558 struct ieee80211_hdr *frame, int hdr_len, int total_len,
559 int encrypt_mpdu)
520{ 560{
521 struct ieee80211_txb *txb = NULL; 561 struct ieee80211_txb *txb = NULL;
522 unsigned long flags; 562 unsigned long flags;
@@ -526,6 +566,9 @@ int ieee80211_tx_frame(struct ieee80211_device *ieee,
526 566
527 spin_lock_irqsave(&ieee->lock, flags); 567 spin_lock_irqsave(&ieee->lock, flags);
528 568
569 if (encrypt_mpdu && !ieee->sec.encrypt)
570 encrypt_mpdu = 0;
571
529 /* If there is no driver handler to take the TXB, dont' bother 572 /* If there is no driver handler to take the TXB, dont' bother
530 * creating it... */ 573 * creating it... */
531 if (!ieee->hard_start_xmit) { 574 if (!ieee->hard_start_xmit) {
@@ -533,32 +576,41 @@ int ieee80211_tx_frame(struct ieee80211_device *ieee,
533 goto success; 576 goto success;
534 } 577 }
535 578
536 if (unlikely(len < 24)) { 579 if (unlikely(total_len < 24)) {
537 printk(KERN_WARNING "%s: skb too small (%d).\n", 580 printk(KERN_WARNING "%s: skb too small (%d).\n",
538 ieee->dev->name, len); 581 ieee->dev->name, total_len);
539 goto success; 582 goto success;
540 } 583 }
541 584
585 if (encrypt_mpdu)
586 frame->frame_ctl |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
587
542 /* When we allocate the TXB we allocate enough space for the reserve 588 /* When we allocate the TXB we allocate enough space for the reserve
543 * and full fragment bytes (bytes_per_frag doesn't include prefix, 589 * and full fragment bytes (bytes_per_frag doesn't include prefix,
544 * postfix, header, FCS, etc.) */ 590 * postfix, header, FCS, etc.) */
545 txb = ieee80211_alloc_txb(1, len, ieee->tx_headroom, GFP_ATOMIC); 591 txb = ieee80211_alloc_txb(1, total_len, ieee->tx_headroom, GFP_ATOMIC);
546 if (unlikely(!txb)) { 592 if (unlikely(!txb)) {
547 printk(KERN_WARNING "%s: Could not allocate TXB\n", 593 printk(KERN_WARNING "%s: Could not allocate TXB\n",
548 ieee->dev->name); 594 ieee->dev->name);
549 goto failed; 595 goto failed;
550 } 596 }
551 txb->encrypted = 0; 597 txb->encrypted = 0;
552 txb->payload_size = len; 598 txb->payload_size = total_len;
553 599
554 skb_frag = txb->fragments[0]; 600 skb_frag = txb->fragments[0];
555 601
556 memcpy(skb_put(skb_frag, len), frame, len); 602 memcpy(skb_put(skb_frag, total_len), frame, total_len);
557 603
558 if (ieee->config & 604 if (ieee->config &
559 (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS)) 605 (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
560 skb_put(skb_frag, 4); 606 skb_put(skb_frag, 4);
561 607
608 /* To avoid overcomplicating things, we do the corner-case frame
609 * encryption in software. The only real situation where encryption is
610 * needed here is during software-based shared key authentication. */
611 if (encrypt_mpdu)
612 ieee80211_encrypt_fragment(ieee, skb_frag, hdr_len);
613
562 success: 614 success:
563 spin_unlock_irqrestore(&ieee->lock, flags); 615 spin_unlock_irqrestore(&ieee->lock, flags);
564 616