aboutsummaryrefslogtreecommitdiffstats
path: root/net/ieee80211/ieee80211_tx.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ieee80211/ieee80211_tx.c')
-rw-r--r--net/ieee80211/ieee80211_tx.c321
1 files changed, 237 insertions, 84 deletions
diff --git a/net/ieee80211/ieee80211_tx.c b/net/ieee80211/ieee80211_tx.c
index eed07bbbe6b6..95ccbadbf55b 100644
--- a/net/ieee80211/ieee80211_tx.c
+++ b/net/ieee80211/ieee80211_tx.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 2
3 Copyright(c) 2003 - 2004 Intel Corporation. All rights reserved. 3 Copyright(c) 2003 - 2005 Intel Corporation. All rights reserved.
4 4
5 This program is free software; you can redistribute it and/or modify it 5 This program is free software; you can redistribute it and/or modify it
6 under the terms of version 2 of the GNU General Public License as 6 under the terms of version 2 of the GNU General Public License as
@@ -128,7 +128,7 @@ payload of each frame is reduced to 492 bytes.
128static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 }; 128static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 };
129static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 }; 129static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 };
130 130
131static inline int ieee80211_put_snap(u8 * data, u16 h_proto) 131static inline int ieee80211_copy_snap(u8 * data, u16 h_proto)
132{ 132{
133 struct ieee80211_snap_hdr *snap; 133 struct ieee80211_snap_hdr *snap;
134 u8 *oui; 134 u8 *oui;
@@ -157,31 +157,14 @@ static inline int ieee80211_encrypt_fragment(struct ieee80211_device *ieee,
157 struct ieee80211_crypt_data *crypt = ieee->crypt[ieee->tx_keyidx]; 157 struct ieee80211_crypt_data *crypt = ieee->crypt[ieee->tx_keyidx];
158 int res; 158 int res;
159 159
160#ifdef CONFIG_IEEE80211_CRYPT_TKIP 160 if (crypt == NULL)
161 struct ieee80211_hdr *header;
162
163 if (ieee->tkip_countermeasures &&
164 crypt && crypt->ops && strcmp(crypt->ops->name, "TKIP") == 0) {
165 header = (struct ieee80211_hdr *)frag->data;
166 if (net_ratelimit()) {
167 printk(KERN_DEBUG "%s: TKIP countermeasures: dropped "
168 "TX packet to " MAC_FMT "\n",
169 ieee->dev->name, MAC_ARG(header->addr1));
170 }
171 return -1; 161 return -1;
172 } 162
173#endif
174 /* To encrypt, frame format is: 163 /* To encrypt, frame format is:
175 * IV (4 bytes), clear payload (including SNAP), ICV (4 bytes) */ 164 * IV (4 bytes), clear payload (including SNAP), ICV (4 bytes) */
176
177 // PR: FIXME: Copied from hostap. Check fragmentation/MSDU/MPDU encryption.
178 /* Host-based IEEE 802.11 fragmentation for TX is not yet supported, so
179 * call both MSDU and MPDU encryption functions from here. */
180 atomic_inc(&crypt->refcnt); 165 atomic_inc(&crypt->refcnt);
181 res = 0; 166 res = 0;
182 if (crypt->ops->encrypt_msdu) 167 if (crypt->ops && crypt->ops->encrypt_mpdu)
183 res = crypt->ops->encrypt_msdu(frag, hdr_len, crypt->priv);
184 if (res == 0 && crypt->ops->encrypt_mpdu)
185 res = crypt->ops->encrypt_mpdu(frag, hdr_len, crypt->priv); 168 res = crypt->ops->encrypt_mpdu(frag, hdr_len, crypt->priv);
186 169
187 atomic_dec(&crypt->refcnt); 170 atomic_dec(&crypt->refcnt);
@@ -207,7 +190,7 @@ void ieee80211_txb_free(struct ieee80211_txb *txb)
207} 190}
208 191
209static struct ieee80211_txb *ieee80211_alloc_txb(int nr_frags, int txb_size, 192static struct ieee80211_txb *ieee80211_alloc_txb(int nr_frags, int txb_size,
210 gfp_t gfp_mask) 193 int headroom, gfp_t gfp_mask)
211{ 194{
212 struct ieee80211_txb *txb; 195 struct ieee80211_txb *txb;
213 int i; 196 int i;
@@ -221,11 +204,13 @@ static struct ieee80211_txb *ieee80211_alloc_txb(int nr_frags, int txb_size,
221 txb->frag_size = txb_size; 204 txb->frag_size = txb_size;
222 205
223 for (i = 0; i < nr_frags; i++) { 206 for (i = 0; i < nr_frags; i++) {
224 txb->fragments[i] = dev_alloc_skb(txb_size); 207 txb->fragments[i] = __dev_alloc_skb(txb_size + headroom,
208 gfp_mask);
225 if (unlikely(!txb->fragments[i])) { 209 if (unlikely(!txb->fragments[i])) {
226 i--; 210 i--;
227 break; 211 break;
228 } 212 }
213 skb_reserve(txb->fragments[i], headroom);
229 } 214 }
230 if (unlikely(i != nr_frags)) { 215 if (unlikely(i != nr_frags)) {
231 while (i >= 0) 216 while (i >= 0)
@@ -236,25 +221,31 @@ static struct ieee80211_txb *ieee80211_alloc_txb(int nr_frags, int txb_size,
236 return txb; 221 return txb;
237} 222}
238 223
239/* SKBs are added to the ieee->tx_queue. */ 224/* Incoming skb is converted to a txb which consists of
225 * a block of 802.11 fragment packets (stored as skbs) */
240int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) 226int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
241{ 227{
242 struct ieee80211_device *ieee = netdev_priv(dev); 228 struct ieee80211_device *ieee = netdev_priv(dev);
243 struct ieee80211_txb *txb = NULL; 229 struct ieee80211_txb *txb = NULL;
244 struct ieee80211_hdr *frag_hdr; 230 struct ieee80211_hdr_3addr *frag_hdr;
245 int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size; 231 int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size,
232 rts_required;
246 unsigned long flags; 233 unsigned long flags;
247 struct net_device_stats *stats = &ieee->stats; 234 struct net_device_stats *stats = &ieee->stats;
248 int ether_type, encrypt; 235 int ether_type, encrypt, host_encrypt, host_encrypt_msdu, host_build_iv;
249 int bytes, fc, hdr_len; 236 int bytes, fc, hdr_len;
250 struct sk_buff *skb_frag; 237 struct sk_buff *skb_frag;
251 struct ieee80211_hdr header = { /* Ensure zero initialized */ 238 struct ieee80211_hdr_3addr header = { /* Ensure zero initialized */
252 .duration_id = 0, 239 .duration_id = 0,
253 .seq_ctl = 0 240 .seq_ctl = 0
254 }; 241 };
255 u8 dest[ETH_ALEN], src[ETH_ALEN]; 242 u8 dest[ETH_ALEN], src[ETH_ALEN];
256
257 struct ieee80211_crypt_data *crypt; 243 struct ieee80211_crypt_data *crypt;
244 int priority = skb->priority;
245 int snapped = 0;
246
247 if (ieee->is_queue_full && (*ieee->is_queue_full) (dev, priority))
248 return NETDEV_TX_BUSY;
258 249
259 spin_lock_irqsave(&ieee->lock, flags); 250 spin_lock_irqsave(&ieee->lock, flags);
260 251
@@ -276,7 +267,11 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
276 crypt = ieee->crypt[ieee->tx_keyidx]; 267 crypt = ieee->crypt[ieee->tx_keyidx];
277 268
278 encrypt = !(ether_type == ETH_P_PAE && ieee->ieee802_1x) && 269 encrypt = !(ether_type == ETH_P_PAE && ieee->ieee802_1x) &&
279 ieee->host_encrypt && crypt && crypt->ops; 270 ieee->sec.encrypt;
271
272 host_encrypt = ieee->host_encrypt && encrypt && crypt;
273 host_encrypt_msdu = ieee->host_encrypt_msdu && encrypt && crypt;
274 host_build_iv = ieee->host_build_iv && encrypt && crypt;
280 275
281 if (!encrypt && ieee->ieee802_1x && 276 if (!encrypt && ieee->ieee802_1x &&
282 ieee->drop_unencrypted && ether_type != ETH_P_PAE) { 277 ieee->drop_unencrypted && ether_type != ETH_P_PAE) {
@@ -285,8 +280,8 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
285 } 280 }
286 281
287 /* Save source and destination addresses */ 282 /* Save source and destination addresses */
288 memcpy(&dest, skb->data, ETH_ALEN); 283 memcpy(dest, skb->data, ETH_ALEN);
289 memcpy(&src, skb->data + ETH_ALEN, ETH_ALEN); 284 memcpy(src, skb->data + ETH_ALEN, ETH_ALEN);
290 285
291 /* Advance the SKB to the start of the payload */ 286 /* Advance the SKB to the start of the payload */
292 skb_pull(skb, sizeof(struct ethhdr)); 287 skb_pull(skb, sizeof(struct ethhdr));
@@ -294,7 +289,7 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
294 /* Determine total amount of storage required for TXB packets */ 289 /* Determine total amount of storage required for TXB packets */
295 bytes = skb->len + SNAP_SIZE + sizeof(u16); 290 bytes = skb->len + SNAP_SIZE + sizeof(u16);
296 291
297 if (encrypt) 292 if (host_encrypt)
298 fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA | 293 fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA |
299 IEEE80211_FCTL_PROTECTED; 294 IEEE80211_FCTL_PROTECTED;
300 else 295 else
@@ -302,70 +297,144 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
302 297
303 if (ieee->iw_mode == IW_MODE_INFRA) { 298 if (ieee->iw_mode == IW_MODE_INFRA) {
304 fc |= IEEE80211_FCTL_TODS; 299 fc |= IEEE80211_FCTL_TODS;
305 /* To DS: Addr1 = BSSID, Addr2 = SA, 300 /* To DS: Addr1 = BSSID, Addr2 = SA, Addr3 = DA */
306 Addr3 = DA */ 301 memcpy(header.addr1, ieee->bssid, ETH_ALEN);
307 memcpy(&header.addr1, ieee->bssid, ETH_ALEN); 302 memcpy(header.addr2, src, ETH_ALEN);
308 memcpy(&header.addr2, &src, ETH_ALEN); 303 memcpy(header.addr3, dest, ETH_ALEN);
309 memcpy(&header.addr3, &dest, ETH_ALEN);
310 } else if (ieee->iw_mode == IW_MODE_ADHOC) { 304 } else if (ieee->iw_mode == IW_MODE_ADHOC) {
311 /* not From/To DS: Addr1 = DA, Addr2 = SA, 305 /* not From/To DS: Addr1 = DA, Addr2 = SA, Addr3 = BSSID */
312 Addr3 = BSSID */ 306 memcpy(header.addr1, dest, ETH_ALEN);
313 memcpy(&header.addr1, dest, ETH_ALEN); 307 memcpy(header.addr2, src, ETH_ALEN);
314 memcpy(&header.addr2, src, ETH_ALEN); 308 memcpy(header.addr3, ieee->bssid, ETH_ALEN);
315 memcpy(&header.addr3, ieee->bssid, ETH_ALEN);
316 } 309 }
317 header.frame_ctl = cpu_to_le16(fc); 310 header.frame_ctl = cpu_to_le16(fc);
318 hdr_len = IEEE80211_3ADDR_LEN; 311 hdr_len = IEEE80211_3ADDR_LEN;
319 312
320 /* Determine fragmentation size based on destination (multicast 313 /* Encrypt msdu first on the whole data packet. */
321 * and broadcast are not fragmented) */ 314 if ((host_encrypt || host_encrypt_msdu) &&
322 if (is_multicast_ether_addr(dest) || is_broadcast_ether_addr(dest)) 315 crypt && crypt->ops && crypt->ops->encrypt_msdu) {
323 frag_size = MAX_FRAG_THRESHOLD; 316 int res = 0;
324 else 317 int len = bytes + hdr_len + crypt->ops->extra_msdu_prefix_len +
325 frag_size = ieee->fts; 318 crypt->ops->extra_msdu_postfix_len;
319 struct sk_buff *skb_new = dev_alloc_skb(len);
320
321 if (unlikely(!skb_new))
322 goto failed;
323
324 skb_reserve(skb_new, crypt->ops->extra_msdu_prefix_len);
325 memcpy(skb_put(skb_new, hdr_len), &header, hdr_len);
326 snapped = 1;
327 ieee80211_copy_snap(skb_put(skb_new, SNAP_SIZE + sizeof(u16)),
328 ether_type);
329 memcpy(skb_put(skb_new, skb->len), skb->data, skb->len);
330 res = crypt->ops->encrypt_msdu(skb_new, hdr_len, crypt->priv);
331 if (res < 0) {
332 IEEE80211_ERROR("msdu encryption failed\n");
333 dev_kfree_skb_any(skb_new);
334 goto failed;
335 }
336 dev_kfree_skb_any(skb);
337 skb = skb_new;
338 bytes += crypt->ops->extra_msdu_prefix_len +
339 crypt->ops->extra_msdu_postfix_len;
340 skb_pull(skb, hdr_len);
341 }
326 342
327 /* Determine amount of payload per fragment. Regardless of if 343 if (host_encrypt || ieee->host_open_frag) {
328 * this stack is providing the full 802.11 header, one will 344 /* Determine fragmentation size based on destination (multicast
329 * eventually be affixed to this fragment -- so we must account for 345 * and broadcast are not fragmented) */
330 * it when determining the amount of payload space. */ 346 if (is_multicast_ether_addr(dest) ||
331 bytes_per_frag = frag_size - IEEE80211_3ADDR_LEN; 347 is_broadcast_ether_addr(dest))
332 if (ieee->config & 348 frag_size = MAX_FRAG_THRESHOLD;
333 (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS)) 349 else
334 bytes_per_frag -= IEEE80211_FCS_LEN; 350 frag_size = ieee->fts;
335 351
336 /* Each fragment may need to have room for encryptiong pre/postfix */ 352 /* Determine amount of payload per fragment. Regardless of if
337 if (encrypt) 353 * this stack is providing the full 802.11 header, one will
338 bytes_per_frag -= crypt->ops->extra_prefix_len + 354 * eventually be affixed to this fragment -- so we must account
339 crypt->ops->extra_postfix_len; 355 * for it when determining the amount of payload space. */
340 356 bytes_per_frag = frag_size - IEEE80211_3ADDR_LEN;
341 /* Number of fragments is the total bytes_per_frag / 357 if (ieee->config &
342 * payload_per_fragment */ 358 (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
343 nr_frags = bytes / bytes_per_frag; 359 bytes_per_frag -= IEEE80211_FCS_LEN;
344 bytes_last_frag = bytes % bytes_per_frag; 360
345 if (bytes_last_frag) 361 /* Each fragment may need to have room for encryptiong
362 * pre/postfix */
363 if (host_encrypt)
364 bytes_per_frag -= crypt->ops->extra_mpdu_prefix_len +
365 crypt->ops->extra_mpdu_postfix_len;
366
367 /* Number of fragments is the total
368 * bytes_per_frag / payload_per_fragment */
369 nr_frags = bytes / bytes_per_frag;
370 bytes_last_frag = bytes % bytes_per_frag;
371 if (bytes_last_frag)
372 nr_frags++;
373 else
374 bytes_last_frag = bytes_per_frag;
375 } else {
376 nr_frags = 1;
377 bytes_per_frag = bytes_last_frag = bytes;
378 frag_size = bytes + IEEE80211_3ADDR_LEN;
379 }
380
381 rts_required = (frag_size > ieee->rts
382 && ieee->config & CFG_IEEE80211_RTS);
383 if (rts_required)
346 nr_frags++; 384 nr_frags++;
347 else
348 bytes_last_frag = bytes_per_frag;
349 385
350 /* When we allocate the TXB we allocate enough space for the reserve 386 /* When we allocate the TXB we allocate enough space for the reserve
351 * and full fragment bytes (bytes_per_frag doesn't include prefix, 387 * and full fragment bytes (bytes_per_frag doesn't include prefix,
352 * postfix, header, FCS, etc.) */ 388 * postfix, header, FCS, etc.) */
353 txb = ieee80211_alloc_txb(nr_frags, frag_size, GFP_ATOMIC); 389 txb = ieee80211_alloc_txb(nr_frags, frag_size,
390 ieee->tx_headroom, GFP_ATOMIC);
354 if (unlikely(!txb)) { 391 if (unlikely(!txb)) {
355 printk(KERN_WARNING "%s: Could not allocate TXB\n", 392 printk(KERN_WARNING "%s: Could not allocate TXB\n",
356 ieee->dev->name); 393 ieee->dev->name);
357 goto failed; 394 goto failed;
358 } 395 }
359 txb->encrypted = encrypt; 396 txb->encrypted = encrypt;
360 txb->payload_size = bytes; 397 if (host_encrypt)
398 txb->payload_size = frag_size * (nr_frags - 1) +
399 bytes_last_frag;
400 else
401 txb->payload_size = bytes;
402
403 if (rts_required) {
404 skb_frag = txb->fragments[0];
405 frag_hdr =
406 (struct ieee80211_hdr_3addr *)skb_put(skb_frag, hdr_len);
407
408 /*
409 * Set header frame_ctl to the RTS.
410 */
411 header.frame_ctl =
412 cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_RTS);
413 memcpy(frag_hdr, &header, hdr_len);
361 414
362 for (i = 0; i < nr_frags; i++) { 415 /*
416 * Restore header frame_ctl to the original data setting.
417 */
418 header.frame_ctl = cpu_to_le16(fc);
419
420 if (ieee->config &
421 (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
422 skb_put(skb_frag, 4);
423
424 txb->rts_included = 1;
425 i = 1;
426 } else
427 i = 0;
428
429 for (; i < nr_frags; i++) {
363 skb_frag = txb->fragments[i]; 430 skb_frag = txb->fragments[i];
364 431
365 if (encrypt) 432 if (host_encrypt || host_build_iv)
366 skb_reserve(skb_frag, crypt->ops->extra_prefix_len); 433 skb_reserve(skb_frag,
434 crypt->ops->extra_mpdu_prefix_len);
367 435
368 frag_hdr = (struct ieee80211_hdr *)skb_put(skb_frag, hdr_len); 436 frag_hdr =
437 (struct ieee80211_hdr_3addr *)skb_put(skb_frag, hdr_len);
369 memcpy(frag_hdr, &header, hdr_len); 438 memcpy(frag_hdr, &header, hdr_len);
370 439
371 /* If this is not the last fragment, then add the MOREFRAGS 440 /* If this is not the last fragment, then add the MOREFRAGS
@@ -379,11 +448,10 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
379 bytes = bytes_last_frag; 448 bytes = bytes_last_frag;
380 } 449 }
381 450
382 /* Put a SNAP header on the first fragment */ 451 if (i == 0 && !snapped) {
383 if (i == 0) { 452 ieee80211_copy_snap(skb_put
384 ieee80211_put_snap(skb_put 453 (skb_frag, SNAP_SIZE + sizeof(u16)),
385 (skb_frag, SNAP_SIZE + sizeof(u16)), 454 ether_type);
386 ether_type);
387 bytes -= SNAP_SIZE + sizeof(u16); 455 bytes -= SNAP_SIZE + sizeof(u16);
388 } 456 }
389 457
@@ -394,8 +462,19 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
394 462
395 /* Encryption routine will move the header forward in order 463 /* Encryption routine will move the header forward in order
396 * to insert the IV between the header and the payload */ 464 * to insert the IV between the header and the payload */
397 if (encrypt) 465 if (host_encrypt)
398 ieee80211_encrypt_fragment(ieee, skb_frag, hdr_len); 466 ieee80211_encrypt_fragment(ieee, skb_frag, hdr_len);
467 else if (host_build_iv) {
468 struct ieee80211_crypt_data *crypt;
469
470 crypt = ieee->crypt[ieee->tx_keyidx];
471 atomic_inc(&crypt->refcnt);
472 if (crypt->ops->build_iv)
473 crypt->ops->build_iv(skb_frag, hdr_len,
474 crypt->priv);
475 atomic_dec(&crypt->refcnt);
476 }
477
399 if (ieee->config & 478 if (ieee->config &
400 (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS)) 479 (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
401 skb_put(skb_frag, 4); 480 skb_put(skb_frag, 4);
@@ -407,11 +486,20 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
407 dev_kfree_skb_any(skb); 486 dev_kfree_skb_any(skb);
408 487
409 if (txb) { 488 if (txb) {
410 if ((*ieee->hard_start_xmit) (txb, dev) == 0) { 489 int ret = (*ieee->hard_start_xmit) (txb, dev, priority);
490 if (ret == 0) {
411 stats->tx_packets++; 491 stats->tx_packets++;
412 stats->tx_bytes += txb->payload_size; 492 stats->tx_bytes += txb->payload_size;
413 return 0; 493 return 0;
414 } 494 }
495
496 if (ret == NETDEV_TX_BUSY) {
497 printk(KERN_ERR "%s: NETDEV_TX_BUSY returned; "
498 "driver should report queue full via "
499 "ieee_device->is_queue_full.\n",
500 ieee->dev->name);
501 }
502
415 ieee80211_txb_free(txb); 503 ieee80211_txb_free(txb);
416 } 504 }
417 505
@@ -422,7 +510,72 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
422 netif_stop_queue(dev); 510 netif_stop_queue(dev);
423 stats->tx_errors++; 511 stats->tx_errors++;
424 return 1; 512 return 1;
513}
514
515/* Incoming 802.11 strucure is converted to a TXB
516 * a block of 802.11 fragment packets (stored as skbs) */
517int ieee80211_tx_frame(struct ieee80211_device *ieee,
518 struct ieee80211_hdr *frame, int len)
519{
520 struct ieee80211_txb *txb = NULL;
521 unsigned long flags;
522 struct net_device_stats *stats = &ieee->stats;
523 struct sk_buff *skb_frag;
524 int priority = -1;
525
526 spin_lock_irqsave(&ieee->lock, flags);
425 527
528 /* If there is no driver handler to take the TXB, dont' bother
529 * creating it... */
530 if (!ieee->hard_start_xmit) {
531 printk(KERN_WARNING "%s: No xmit handler.\n", ieee->dev->name);
532 goto success;
533 }
534
535 if (unlikely(len < 24)) {
536 printk(KERN_WARNING "%s: skb too small (%d).\n",
537 ieee->dev->name, len);
538 goto success;
539 }
540
541 /* When we allocate the TXB we allocate enough space for the reserve
542 * and full fragment bytes (bytes_per_frag doesn't include prefix,
543 * postfix, header, FCS, etc.) */
544 txb = ieee80211_alloc_txb(1, len, ieee->tx_headroom, GFP_ATOMIC);
545 if (unlikely(!txb)) {
546 printk(KERN_WARNING "%s: Could not allocate TXB\n",
547 ieee->dev->name);
548 goto failed;
549 }
550 txb->encrypted = 0;
551 txb->payload_size = len;
552
553 skb_frag = txb->fragments[0];
554
555 memcpy(skb_put(skb_frag, len), frame, len);
556
557 if (ieee->config &
558 (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
559 skb_put(skb_frag, 4);
560
561 success:
562 spin_unlock_irqrestore(&ieee->lock, flags);
563
564 if (txb) {
565 if ((*ieee->hard_start_xmit) (txb, ieee->dev, priority) == 0) {
566 stats->tx_packets++;
567 stats->tx_bytes += txb->payload_size;
568 return 0;
569 }
570 ieee80211_txb_free(txb);
571 }
572 return 0;
573
574 failed:
575 spin_unlock_irqrestore(&ieee->lock, flags);
576 stats->tx_errors++;
577 return 1;
426} 578}
427 579
580EXPORT_SYMBOL(ieee80211_tx_frame);
428EXPORT_SYMBOL(ieee80211_txb_free); 581EXPORT_SYMBOL(ieee80211_txb_free);