aboutsummaryrefslogtreecommitdiffstats
path: root/net/ieee80211/ieee80211_tx.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ieee80211/ieee80211_tx.c')
-rw-r--r--net/ieee80211/ieee80211_tx.c148
1 files changed, 83 insertions, 65 deletions
diff --git a/net/ieee80211/ieee80211_tx.c b/net/ieee80211/ieee80211_tx.c
index f505aa127e21..23a1f88de7cb 100644
--- a/net/ieee80211/ieee80211_tx.c
+++ b/net/ieee80211/ieee80211_tx.c
@@ -128,7 +128,7 @@ payload of each frame is reduced to 492 bytes.
128static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 }; 128static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 };
129static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 }; 129static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 };
130 130
131static inline int ieee80211_put_snap(u8 * data, u16 h_proto) 131static inline int ieee80211_copy_snap(u8 * data, u16 h_proto)
132{ 132{
133 struct ieee80211_snap_hdr *snap; 133 struct ieee80211_snap_hdr *snap;
134 u8 *oui; 134 u8 *oui;
@@ -159,15 +159,9 @@ static inline int ieee80211_encrypt_fragment(struct ieee80211_device *ieee,
159 159
160 /* To encrypt, frame format is: 160 /* To encrypt, frame format is:
161 * IV (4 bytes), clear payload (including SNAP), ICV (4 bytes) */ 161 * IV (4 bytes), clear payload (including SNAP), ICV (4 bytes) */
162
163 // PR: FIXME: Copied from hostap. Check fragmentation/MSDU/MPDU encryption.
164 /* Host-based IEEE 802.11 fragmentation for TX is not yet supported, so
165 * call both MSDU and MPDU encryption functions from here. */
166 atomic_inc(&crypt->refcnt); 162 atomic_inc(&crypt->refcnt);
167 res = 0; 163 res = 0;
168 if (crypt->ops->encrypt_msdu) 164 if (crypt->ops->encrypt_mpdu)
169 res = crypt->ops->encrypt_msdu(frag, hdr_len, crypt->priv);
170 if (res == 0 && crypt->ops->encrypt_mpdu)
171 res = crypt->ops->encrypt_mpdu(frag, hdr_len, crypt->priv); 165 res = crypt->ops->encrypt_mpdu(frag, hdr_len, crypt->priv);
172 166
173 atomic_dec(&crypt->refcnt); 167 atomic_dec(&crypt->refcnt);
@@ -222,7 +216,7 @@ static struct ieee80211_txb *ieee80211_alloc_txb(int nr_frags, int txb_size,
222 return txb; 216 return txb;
223} 217}
224 218
225/* Incoming skb is converted to a txb which consist of 219/* Incoming skb is converted to a txb which consists of
226 * a block of 802.11 fragment packets (stored as skbs) */ 220 * a block of 802.11 fragment packets (stored as skbs) */
227int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) 221int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
228{ 222{
@@ -233,7 +227,7 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
233 rts_required; 227 rts_required;
234 unsigned long flags; 228 unsigned long flags;
235 struct net_device_stats *stats = &ieee->stats; 229 struct net_device_stats *stats = &ieee->stats;
236 int ether_type, encrypt, host_encrypt; 230 int ether_type, encrypt, host_encrypt, host_encrypt_msdu;
237 int bytes, fc, hdr_len; 231 int bytes, fc, hdr_len;
238 struct sk_buff *skb_frag; 232 struct sk_buff *skb_frag;
239 struct ieee80211_hdr_3addr header = { /* Ensure zero initialized */ 233 struct ieee80211_hdr_3addr header = { /* Ensure zero initialized */
@@ -241,8 +235,8 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
241 .seq_ctl = 0 235 .seq_ctl = 0
242 }; 236 };
243 u8 dest[ETH_ALEN], src[ETH_ALEN]; 237 u8 dest[ETH_ALEN], src[ETH_ALEN];
244
245 struct ieee80211_crypt_data *crypt; 238 struct ieee80211_crypt_data *crypt;
239 int snapped = 0;
246 240
247 spin_lock_irqsave(&ieee->lock, flags); 241 spin_lock_irqsave(&ieee->lock, flags);
248 242
@@ -266,6 +260,7 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
266 encrypt = !(ether_type == ETH_P_PAE && ieee->ieee802_1x) && 260 encrypt = !(ether_type == ETH_P_PAE && ieee->ieee802_1x) &&
267 ieee->sec.encrypt; 261 ieee->sec.encrypt;
268 host_encrypt = ieee->host_encrypt && encrypt; 262 host_encrypt = ieee->host_encrypt && encrypt;
263 host_encrypt_msdu = ieee->host_encrypt_msdu && encrypt;
269 264
270 if (!encrypt && ieee->ieee802_1x && 265 if (!encrypt && ieee->ieee802_1x &&
271 ieee->drop_unencrypted && ether_type != ETH_P_PAE) { 266 ieee->drop_unencrypted && ether_type != ETH_P_PAE) {
@@ -291,14 +286,12 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
291 286
292 if (ieee->iw_mode == IW_MODE_INFRA) { 287 if (ieee->iw_mode == IW_MODE_INFRA) {
293 fc |= IEEE80211_FCTL_TODS; 288 fc |= IEEE80211_FCTL_TODS;
294 /* To DS: Addr1 = BSSID, Addr2 = SA, 289 /* To DS: Addr1 = BSSID, Addr2 = SA, Addr3 = DA */
295 Addr3 = DA */
296 memcpy(header.addr1, ieee->bssid, ETH_ALEN); 290 memcpy(header.addr1, ieee->bssid, ETH_ALEN);
297 memcpy(header.addr2, src, ETH_ALEN); 291 memcpy(header.addr2, src, ETH_ALEN);
298 memcpy(header.addr3, dest, ETH_ALEN); 292 memcpy(header.addr3, dest, ETH_ALEN);
299 } else if (ieee->iw_mode == IW_MODE_ADHOC) { 293 } else if (ieee->iw_mode == IW_MODE_ADHOC) {
300 /* not From/To DS: Addr1 = DA, Addr2 = SA, 294 /* not From/To DS: Addr1 = DA, Addr2 = SA, Addr3 = BSSID */
301 Addr3 = BSSID */
302 memcpy(header.addr1, dest, ETH_ALEN); 295 memcpy(header.addr1, dest, ETH_ALEN);
303 memcpy(header.addr2, src, ETH_ALEN); 296 memcpy(header.addr2, src, ETH_ALEN);
304 memcpy(header.addr3, ieee->bssid, ETH_ALEN); 297 memcpy(header.addr3, ieee->bssid, ETH_ALEN);
@@ -306,42 +299,75 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
306 header.frame_ctl = cpu_to_le16(fc); 299 header.frame_ctl = cpu_to_le16(fc);
307 hdr_len = IEEE80211_3ADDR_LEN; 300 hdr_len = IEEE80211_3ADDR_LEN;
308 301
309 /* Determine fragmentation size based on destination (multicast 302 /* Encrypt msdu first on the whole data packet. */
310 * and broadcast are not fragmented) */ 303 if ((host_encrypt || host_encrypt_msdu) &&
311 if (is_multicast_ether_addr(dest) || is_broadcast_ether_addr(dest)) 304 crypt && crypt->ops && crypt->ops->encrypt_msdu) {
312 frag_size = MAX_FRAG_THRESHOLD; 305 int res = 0;
313 else 306 int len = bytes + hdr_len + crypt->ops->extra_msdu_prefix_len +
314 frag_size = ieee->fts; 307 crypt->ops->extra_msdu_postfix_len;
308 struct sk_buff *skb_new = dev_alloc_skb(len);
309 if (unlikely(!skb_new))
310 goto failed;
311 skb_reserve(skb_new, crypt->ops->extra_msdu_prefix_len);
312 memcpy(skb_put(skb_new, hdr_len), &header, hdr_len);
313 snapped = 1;
314 ieee80211_copy_snap(skb_put(skb_new, SNAP_SIZE + sizeof(u16)),
315 ether_type);
316 memcpy(skb_put(skb_new, skb->len), skb->data, skb->len);
317 res = crypt->ops->encrypt_msdu(skb_new, hdr_len, crypt->priv);
318 if (res < 0) {
319 IEEE80211_ERROR("msdu encryption failed\n");
320 dev_kfree_skb_any(skb_new);
321 goto failed;
322 }
323 dev_kfree_skb_any(skb);
324 skb = skb_new;
325 bytes += crypt->ops->extra_msdu_prefix_len +
326 crypt->ops->extra_msdu_postfix_len;
327 skb_pull(skb, hdr_len);
328 }
315 329
316 /* Determine amount of payload per fragment. Regardless of if 330 if (host_encrypt || ieee->host_open_frag) {
317 * this stack is providing the full 802.11 header, one will 331 /* Determine fragmentation size based on destination (multicast
318 * eventually be affixed to this fragment -- so we must account for 332 * and broadcast are not fragmented) */
319 * it when determining the amount of payload space. */ 333 if (is_multicast_ether_addr(dest))
320 bytes_per_frag = frag_size - IEEE80211_3ADDR_LEN; 334 frag_size = MAX_FRAG_THRESHOLD;
321 if (ieee->config & 335 else
322 (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS)) 336 frag_size = ieee->fts;
323 bytes_per_frag -= IEEE80211_FCS_LEN; 337
338 /* Determine amount of payload per fragment. Regardless of if
339 * this stack is providing the full 802.11 header, one will
340 * eventually be affixed to this fragment -- so we must account
341 * for it when determining the amount of payload space. */
342 bytes_per_frag = frag_size - IEEE80211_3ADDR_LEN;
343 if (ieee->config &
344 (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
345 bytes_per_frag -= IEEE80211_FCS_LEN;
324 346
325 /* Each fragment may need to have room for encryptiong pre/postfix */ 347 /* Each fragment may need to have room for encryptiong
326 if (host_encrypt) 348 * pre/postfix */
327 bytes_per_frag -= crypt->ops->extra_prefix_len + 349 if (host_encrypt)
328 crypt->ops->extra_postfix_len; 350 bytes_per_frag -= crypt->ops->extra_mpdu_prefix_len +
329 351 crypt->ops->extra_mpdu_postfix_len;
330 /* Number of fragments is the total bytes_per_frag / 352
331 * payload_per_fragment */ 353 /* Number of fragments is the total
332 nr_frags = bytes / bytes_per_frag; 354 * bytes_per_frag / payload_per_fragment */
333 bytes_last_frag = bytes % bytes_per_frag; 355 nr_frags = bytes / bytes_per_frag;
334 if (bytes_last_frag) 356 bytes_last_frag = bytes % bytes_per_frag;
335 nr_frags++; 357 if (bytes_last_frag)
336 else 358 nr_frags++;
337 bytes_last_frag = bytes_per_frag; 359 else
360 bytes_last_frag = bytes_per_frag;
361 } else {
362 nr_frags = 1;
363 bytes_per_frag = bytes_last_frag = bytes;
364 frag_size = bytes + IEEE80211_3ADDR_LEN;
365 }
338 366
339 rts_required = (frag_size > ieee->rts 367 rts_required = (frag_size > ieee->rts
340 && ieee->config & CFG_IEEE80211_RTS); 368 && ieee->config & CFG_IEEE80211_RTS);
341 if (rts_required) 369 if (rts_required)
342 nr_frags++; 370 nr_frags++;
343 else
344 bytes_last_frag = bytes_per_frag;
345 371
346 /* When we allocate the TXB we allocate enough space for the reserve 372 /* When we allocate the TXB we allocate enough space for the reserve
347 * and full fragment bytes (bytes_per_frag doesn't include prefix, 373 * and full fragment bytes (bytes_per_frag doesn't include prefix,
@@ -353,7 +379,11 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
353 goto failed; 379 goto failed;
354 } 380 }
355 txb->encrypted = encrypt; 381 txb->encrypted = encrypt;
356 txb->payload_size = bytes; 382 if (host_encrypt)
383 txb->payload_size = frag_size * (nr_frags - 1) +
384 bytes_last_frag;
385 else
386 txb->payload_size = bytes;
357 387
358 if (rts_required) { 388 if (rts_required) {
359 skb_frag = txb->fragments[0]; 389 skb_frag = txb->fragments[0];
@@ -385,7 +415,8 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
385 skb_frag = txb->fragments[i]; 415 skb_frag = txb->fragments[i];
386 416
387 if (host_encrypt) 417 if (host_encrypt)
388 skb_reserve(skb_frag, crypt->ops->extra_prefix_len); 418 skb_reserve(skb_frag,
419 crypt->ops->extra_mpdu_prefix_len);
389 420
390 frag_hdr = 421 frag_hdr =
391 (struct ieee80211_hdr_3addr *)skb_put(skb_frag, hdr_len); 422 (struct ieee80211_hdr_3addr *)skb_put(skb_frag, hdr_len);
@@ -402,11 +433,10 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
402 bytes = bytes_last_frag; 433 bytes = bytes_last_frag;
403 } 434 }
404 435
405 /* Put a SNAP header on the first fragment */ 436 if (i == 0 && !snapped) {
406 if (i == 0) { 437 ieee80211_copy_snap(skb_put
407 ieee80211_put_snap(skb_put 438 (skb_frag, SNAP_SIZE + sizeof(u16)),
408 (skb_frag, SNAP_SIZE + sizeof(u16)), 439 ether_type);
409 ether_type);
410 bytes -= SNAP_SIZE + sizeof(u16); 440 bytes -= SNAP_SIZE + sizeof(u16);
411 } 441 }
412 442
@@ -420,19 +450,6 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
420 if (host_encrypt) 450 if (host_encrypt)
421 ieee80211_encrypt_fragment(ieee, skb_frag, hdr_len); 451 ieee80211_encrypt_fragment(ieee, skb_frag, hdr_len);
422 452
423 /* ipw2200/2915 Hardware encryption doesn't support TKIP MIC */
424 if (!ieee->host_encrypt && encrypt &&
425 (ieee->sec.level == SEC_LEVEL_2) &&
426 crypt && crypt->ops && crypt->ops->encrypt_msdu) {
427 int res = 0;
428 res = crypt->ops->encrypt_msdu(skb_frag, hdr_len,
429 crypt->priv);
430 if (res < 0) {
431 IEEE80211_ERROR("TKIP MIC encryption failed\n");
432 goto failed;
433 }
434 }
435
436 if (ieee->config & 453 if (ieee->config &
437 (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS)) 454 (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
438 skb_put(skb_frag, 4); 455 skb_put(skb_frag, 4);
@@ -444,7 +461,8 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
444 dev_kfree_skb_any(skb); 461 dev_kfree_skb_any(skb);
445 462
446 if (txb) { 463 if (txb) {
447 if ((*ieee->hard_start_xmit) (txb, dev) == 0) { 464 int ret = (*ieee->hard_start_xmit) (txb, dev);
465 if (ret == 0) {
448 stats->tx_packets++; 466 stats->tx_packets++;
449 stats->tx_bytes += txb->payload_size; 467 stats->tx_bytes += txb->payload_size;
450 return 0; 468 return 0;