diff options
Diffstat (limited to 'net/mac80211/rx.c')
-rw-r--r-- | net/mac80211/rx.c | 112 |
1 files changed, 43 insertions, 69 deletions
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 5e9d3bc6a2d9..a6701ed87f0d 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
@@ -533,10 +533,11 @@ static inline u16 seq_sub(u16 sq1, u16 sq2) | |||
533 | 533 | ||
534 | static void ieee80211_release_reorder_frame(struct ieee80211_hw *hw, | 534 | static void ieee80211_release_reorder_frame(struct ieee80211_hw *hw, |
535 | struct tid_ampdu_rx *tid_agg_rx, | 535 | struct tid_ampdu_rx *tid_agg_rx, |
536 | int index, | 536 | int index) |
537 | struct sk_buff_head *frames) | ||
538 | { | 537 | { |
538 | struct ieee80211_local *local = hw_to_local(hw); | ||
539 | struct sk_buff *skb = tid_agg_rx->reorder_buf[index]; | 539 | struct sk_buff *skb = tid_agg_rx->reorder_buf[index]; |
540 | struct ieee80211_rx_status *status; | ||
540 | 541 | ||
541 | lockdep_assert_held(&tid_agg_rx->reorder_lock); | 542 | lockdep_assert_held(&tid_agg_rx->reorder_lock); |
542 | 543 | ||
@@ -546,7 +547,9 @@ static void ieee80211_release_reorder_frame(struct ieee80211_hw *hw, | |||
546 | /* release the frame from the reorder ring buffer */ | 547 | /* release the frame from the reorder ring buffer */ |
547 | tid_agg_rx->stored_mpdu_num--; | 548 | tid_agg_rx->stored_mpdu_num--; |
548 | tid_agg_rx->reorder_buf[index] = NULL; | 549 | tid_agg_rx->reorder_buf[index] = NULL; |
549 | __skb_queue_tail(frames, skb); | 550 | status = IEEE80211_SKB_RXCB(skb); |
551 | status->rx_flags |= IEEE80211_RX_DEFERRED_RELEASE; | ||
552 | skb_queue_tail(&local->rx_skb_queue, skb); | ||
550 | 553 | ||
551 | no_frame: | 554 | no_frame: |
552 | tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num); | 555 | tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num); |
@@ -554,8 +557,7 @@ no_frame: | |||
554 | 557 | ||
555 | static void ieee80211_release_reorder_frames(struct ieee80211_hw *hw, | 558 | static void ieee80211_release_reorder_frames(struct ieee80211_hw *hw, |
556 | struct tid_ampdu_rx *tid_agg_rx, | 559 | struct tid_ampdu_rx *tid_agg_rx, |
557 | u16 head_seq_num, | 560 | u16 head_seq_num) |
558 | struct sk_buff_head *frames) | ||
559 | { | 561 | { |
560 | int index; | 562 | int index; |
561 | 563 | ||
@@ -564,7 +566,7 @@ static void ieee80211_release_reorder_frames(struct ieee80211_hw *hw, | |||
564 | while (seq_less(tid_agg_rx->head_seq_num, head_seq_num)) { | 566 | while (seq_less(tid_agg_rx->head_seq_num, head_seq_num)) { |
565 | index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % | 567 | index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % |
566 | tid_agg_rx->buf_size; | 568 | tid_agg_rx->buf_size; |
567 | ieee80211_release_reorder_frame(hw, tid_agg_rx, index, frames); | 569 | ieee80211_release_reorder_frame(hw, tid_agg_rx, index); |
568 | } | 570 | } |
569 | } | 571 | } |
570 | 572 | ||
@@ -580,8 +582,7 @@ static void ieee80211_release_reorder_frames(struct ieee80211_hw *hw, | |||
580 | #define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10) | 582 | #define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10) |
581 | 583 | ||
582 | static void ieee80211_sta_reorder_release(struct ieee80211_hw *hw, | 584 | static void ieee80211_sta_reorder_release(struct ieee80211_hw *hw, |
583 | struct tid_ampdu_rx *tid_agg_rx, | 585 | struct tid_ampdu_rx *tid_agg_rx) |
584 | struct sk_buff_head *frames) | ||
585 | { | 586 | { |
586 | int index, j; | 587 | int index, j; |
587 | 588 | ||
@@ -612,8 +613,7 @@ static void ieee80211_sta_reorder_release(struct ieee80211_hw *hw, | |||
612 | wiphy_debug(hw->wiphy, | 613 | wiphy_debug(hw->wiphy, |
613 | "release an RX reorder frame due to timeout on earlier frames\n"); | 614 | "release an RX reorder frame due to timeout on earlier frames\n"); |
614 | #endif | 615 | #endif |
615 | ieee80211_release_reorder_frame(hw, tid_agg_rx, | 616 | ieee80211_release_reorder_frame(hw, tid_agg_rx, j); |
616 | j, frames); | ||
617 | 617 | ||
618 | /* | 618 | /* |
619 | * Increment the head seq# also for the skipped slots. | 619 | * Increment the head seq# also for the skipped slots. |
@@ -623,31 +623,11 @@ static void ieee80211_sta_reorder_release(struct ieee80211_hw *hw, | |||
623 | skipped = 0; | 623 | skipped = 0; |
624 | } | 624 | } |
625 | } else while (tid_agg_rx->reorder_buf[index]) { | 625 | } else while (tid_agg_rx->reorder_buf[index]) { |
626 | ieee80211_release_reorder_frame(hw, tid_agg_rx, index, frames); | 626 | ieee80211_release_reorder_frame(hw, tid_agg_rx, index); |
627 | index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % | 627 | index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % |
628 | tid_agg_rx->buf_size; | 628 | tid_agg_rx->buf_size; |
629 | } | 629 | } |
630 | 630 | ||
631 | /* | ||
632 | * Disable the reorder release timer for now. | ||
633 | * | ||
634 | * The current implementation lacks a proper locking scheme | ||
635 | * which would protect vital statistic and debug counters | ||
636 | * from being updated by two different but concurrent BHs. | ||
637 | * | ||
638 | * More information about the topic is available from: | ||
639 | * - thread: http://marc.info/?t=128635927000001 | ||
640 | * | ||
641 | * What was wrong: | ||
642 | * => http://marc.info/?l=linux-wireless&m=128636170811964 | ||
643 | * "Basically the thing is that until your patch, the data | ||
644 | * in the struct didn't actually need locking because it | ||
645 | * was accessed by the RX path only which is not concurrent." | ||
646 | * | ||
647 | * List of what needs to be fixed: | ||
648 | * => http://marc.info/?l=linux-wireless&m=128656352920957 | ||
649 | * | ||
650 | |||
651 | if (tid_agg_rx->stored_mpdu_num) { | 631 | if (tid_agg_rx->stored_mpdu_num) { |
652 | j = index = seq_sub(tid_agg_rx->head_seq_num, | 632 | j = index = seq_sub(tid_agg_rx->head_seq_num, |
653 | tid_agg_rx->ssn) % tid_agg_rx->buf_size; | 633 | tid_agg_rx->ssn) % tid_agg_rx->buf_size; |
@@ -666,10 +646,6 @@ static void ieee80211_sta_reorder_release(struct ieee80211_hw *hw, | |||
666 | } else { | 646 | } else { |
667 | del_timer(&tid_agg_rx->reorder_timer); | 647 | del_timer(&tid_agg_rx->reorder_timer); |
668 | } | 648 | } |
669 | */ | ||
670 | |||
671 | set_release_timer: | ||
672 | return; | ||
673 | } | 649 | } |
674 | 650 | ||
675 | /* | 651 | /* |
@@ -679,8 +655,7 @@ set_release_timer: | |||
679 | */ | 655 | */ |
680 | static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw, | 656 | static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw, |
681 | struct tid_ampdu_rx *tid_agg_rx, | 657 | struct tid_ampdu_rx *tid_agg_rx, |
682 | struct sk_buff *skb, | 658 | struct sk_buff *skb) |
683 | struct sk_buff_head *frames) | ||
684 | { | 659 | { |
685 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | 660 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; |
686 | u16 sc = le16_to_cpu(hdr->seq_ctrl); | 661 | u16 sc = le16_to_cpu(hdr->seq_ctrl); |
@@ -707,8 +682,7 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw, | |||
707 | if (!seq_less(mpdu_seq_num, head_seq_num + buf_size)) { | 682 | if (!seq_less(mpdu_seq_num, head_seq_num + buf_size)) { |
708 | head_seq_num = seq_inc(seq_sub(mpdu_seq_num, buf_size)); | 683 | head_seq_num = seq_inc(seq_sub(mpdu_seq_num, buf_size)); |
709 | /* release stored frames up to new head to stack */ | 684 | /* release stored frames up to new head to stack */ |
710 | ieee80211_release_reorder_frames(hw, tid_agg_rx, head_seq_num, | 685 | ieee80211_release_reorder_frames(hw, tid_agg_rx, head_seq_num); |
711 | frames); | ||
712 | } | 686 | } |
713 | 687 | ||
714 | /* Now the new frame is always in the range of the reordering buffer */ | 688 | /* Now the new frame is always in the range of the reordering buffer */ |
@@ -736,7 +710,7 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw, | |||
736 | tid_agg_rx->reorder_buf[index] = skb; | 710 | tid_agg_rx->reorder_buf[index] = skb; |
737 | tid_agg_rx->reorder_time[index] = jiffies; | 711 | tid_agg_rx->reorder_time[index] = jiffies; |
738 | tid_agg_rx->stored_mpdu_num++; | 712 | tid_agg_rx->stored_mpdu_num++; |
739 | ieee80211_sta_reorder_release(hw, tid_agg_rx, frames); | 713 | ieee80211_sta_reorder_release(hw, tid_agg_rx); |
740 | 714 | ||
741 | out: | 715 | out: |
742 | spin_unlock(&tid_agg_rx->reorder_lock); | 716 | spin_unlock(&tid_agg_rx->reorder_lock); |
@@ -747,8 +721,7 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw, | |||
747 | * Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns | 721 | * Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns |
748 | * true if the MPDU was buffered, false if it should be processed. | 722 | * true if the MPDU was buffered, false if it should be processed. |
749 | */ | 723 | */ |
750 | static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx, | 724 | static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx) |
751 | struct sk_buff_head *frames) | ||
752 | { | 725 | { |
753 | struct sk_buff *skb = rx->skb; | 726 | struct sk_buff *skb = rx->skb; |
754 | struct ieee80211_local *local = rx->local; | 727 | struct ieee80211_local *local = rx->local; |
@@ -803,11 +776,11 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx, | |||
803 | * sure that we cannot get to it any more before doing | 776 | * sure that we cannot get to it any more before doing |
804 | * anything with it. | 777 | * anything with it. |
805 | */ | 778 | */ |
806 | if (ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb, frames)) | 779 | if (ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb)) |
807 | return; | 780 | return; |
808 | 781 | ||
809 | dont_reorder: | 782 | dont_reorder: |
810 | __skb_queue_tail(frames, skb); | 783 | skb_queue_tail(&local->rx_skb_queue, skb); |
811 | } | 784 | } |
812 | 785 | ||
813 | static ieee80211_rx_result debug_noinline | 786 | static ieee80211_rx_result debug_noinline |
@@ -1189,6 +1162,7 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) | |||
1189 | * exchange sequence. | 1162 | * exchange sequence. |
1190 | */ | 1163 | */ |
1191 | if (!ieee80211_has_morefrags(hdr->frame_control) && | 1164 | if (!ieee80211_has_morefrags(hdr->frame_control) && |
1165 | !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) && | ||
1192 | (rx->sdata->vif.type == NL80211_IFTYPE_AP || | 1166 | (rx->sdata->vif.type == NL80211_IFTYPE_AP || |
1193 | rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) { | 1167 | rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) { |
1194 | if (test_sta_flags(sta, WLAN_STA_PS_STA)) { | 1168 | if (test_sta_flags(sta, WLAN_STA_PS_STA)) { |
@@ -1831,11 +1805,11 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) | |||
1831 | 1805 | ||
1832 | fwd_skb = skb_copy(skb, GFP_ATOMIC); | 1806 | fwd_skb = skb_copy(skb, GFP_ATOMIC); |
1833 | 1807 | ||
1834 | if (!fwd_skb && net_ratelimit()) { | 1808 | if (!fwd_skb && net_ratelimit()) |
1835 | printk(KERN_DEBUG "%s: failed to clone mesh frame\n", | 1809 | printk(KERN_DEBUG "%s: failed to clone mesh frame\n", |
1836 | sdata->name); | 1810 | sdata->name); |
1811 | if (!fwd_skb) | ||
1837 | goto out; | 1812 | goto out; |
1838 | } | ||
1839 | 1813 | ||
1840 | fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data; | 1814 | fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data; |
1841 | memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN); | 1815 | memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN); |
@@ -1930,7 +1904,7 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx) | |||
1930 | } | 1904 | } |
1931 | 1905 | ||
1932 | static ieee80211_rx_result debug_noinline | 1906 | static ieee80211_rx_result debug_noinline |
1933 | ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames) | 1907 | ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx) |
1934 | { | 1908 | { |
1935 | struct ieee80211_local *local = rx->local; | 1909 | struct ieee80211_local *local = rx->local; |
1936 | struct ieee80211_hw *hw = &local->hw; | 1910 | struct ieee80211_hw *hw = &local->hw; |
@@ -1970,8 +1944,7 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames) | |||
1970 | 1944 | ||
1971 | spin_lock(&tid_agg_rx->reorder_lock); | 1945 | spin_lock(&tid_agg_rx->reorder_lock); |
1972 | /* release stored frames up to start of BAR */ | 1946 | /* release stored frames up to start of BAR */ |
1973 | ieee80211_release_reorder_frames(hw, tid_agg_rx, start_seq_num, | 1947 | ieee80211_release_reorder_frames(hw, tid_agg_rx, start_seq_num); |
1974 | frames); | ||
1975 | spin_unlock(&tid_agg_rx->reorder_lock); | 1948 | spin_unlock(&tid_agg_rx->reorder_lock); |
1976 | 1949 | ||
1977 | kfree_skb(skb); | 1950 | kfree_skb(skb); |
@@ -2488,8 +2461,7 @@ static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx, | |||
2488 | } | 2461 | } |
2489 | } | 2462 | } |
2490 | 2463 | ||
2491 | static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx, | 2464 | static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx) |
2492 | struct sk_buff_head *frames) | ||
2493 | { | 2465 | { |
2494 | ieee80211_rx_result res = RX_DROP_MONITOR; | 2466 | ieee80211_rx_result res = RX_DROP_MONITOR; |
2495 | struct sk_buff *skb; | 2467 | struct sk_buff *skb; |
@@ -2501,7 +2473,15 @@ static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx, | |||
2501 | goto rxh_next; \ | 2473 | goto rxh_next; \ |
2502 | } while (0); | 2474 | } while (0); |
2503 | 2475 | ||
2504 | while ((skb = __skb_dequeue(frames))) { | 2476 | spin_lock(&rx->local->rx_skb_queue.lock); |
2477 | if (rx->local->running_rx_handler) | ||
2478 | goto unlock; | ||
2479 | |||
2480 | rx->local->running_rx_handler = true; | ||
2481 | |||
2482 | while ((skb = __skb_dequeue(&rx->local->rx_skb_queue))) { | ||
2483 | spin_unlock(&rx->local->rx_skb_queue.lock); | ||
2484 | |||
2505 | /* | 2485 | /* |
2506 | * all the other fields are valid across frames | 2486 | * all the other fields are valid across frames |
2507 | * that belong to an aMPDU since they are on the | 2487 | * that belong to an aMPDU since they are on the |
@@ -2524,12 +2504,7 @@ static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx, | |||
2524 | CALL_RXH(ieee80211_rx_h_mesh_fwding); | 2504 | CALL_RXH(ieee80211_rx_h_mesh_fwding); |
2525 | #endif | 2505 | #endif |
2526 | CALL_RXH(ieee80211_rx_h_data) | 2506 | CALL_RXH(ieee80211_rx_h_data) |
2527 | 2507 | CALL_RXH(ieee80211_rx_h_ctrl); | |
2528 | /* special treatment -- needs the queue */ | ||
2529 | res = ieee80211_rx_h_ctrl(rx, frames); | ||
2530 | if (res != RX_CONTINUE) | ||
2531 | goto rxh_next; | ||
2532 | |||
2533 | CALL_RXH(ieee80211_rx_h_mgmt_check) | 2508 | CALL_RXH(ieee80211_rx_h_mgmt_check) |
2534 | CALL_RXH(ieee80211_rx_h_action) | 2509 | CALL_RXH(ieee80211_rx_h_action) |
2535 | CALL_RXH(ieee80211_rx_h_userspace_mgmt) | 2510 | CALL_RXH(ieee80211_rx_h_userspace_mgmt) |
@@ -2538,18 +2513,20 @@ static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx, | |||
2538 | 2513 | ||
2539 | rxh_next: | 2514 | rxh_next: |
2540 | ieee80211_rx_handlers_result(rx, res); | 2515 | ieee80211_rx_handlers_result(rx, res); |
2541 | 2516 | spin_lock(&rx->local->rx_skb_queue.lock); | |
2542 | #undef CALL_RXH | 2517 | #undef CALL_RXH |
2543 | } | 2518 | } |
2519 | |||
2520 | rx->local->running_rx_handler = false; | ||
2521 | |||
2522 | unlock: | ||
2523 | spin_unlock(&rx->local->rx_skb_queue.lock); | ||
2544 | } | 2524 | } |
2545 | 2525 | ||
2546 | static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx) | 2526 | static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx) |
2547 | { | 2527 | { |
2548 | struct sk_buff_head reorder_release; | ||
2549 | ieee80211_rx_result res = RX_DROP_MONITOR; | 2528 | ieee80211_rx_result res = RX_DROP_MONITOR; |
2550 | 2529 | ||
2551 | __skb_queue_head_init(&reorder_release); | ||
2552 | |||
2553 | #define CALL_RXH(rxh) \ | 2530 | #define CALL_RXH(rxh) \ |
2554 | do { \ | 2531 | do { \ |
2555 | res = rxh(rx); \ | 2532 | res = rxh(rx); \ |
@@ -2560,9 +2537,9 @@ static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx) | |||
2560 | CALL_RXH(ieee80211_rx_h_passive_scan) | 2537 | CALL_RXH(ieee80211_rx_h_passive_scan) |
2561 | CALL_RXH(ieee80211_rx_h_check) | 2538 | CALL_RXH(ieee80211_rx_h_check) |
2562 | 2539 | ||
2563 | ieee80211_rx_reorder_ampdu(rx, &reorder_release); | 2540 | ieee80211_rx_reorder_ampdu(rx); |
2564 | 2541 | ||
2565 | ieee80211_rx_handlers(rx, &reorder_release); | 2542 | ieee80211_rx_handlers(rx); |
2566 | return; | 2543 | return; |
2567 | 2544 | ||
2568 | rxh_next: | 2545 | rxh_next: |
@@ -2577,7 +2554,6 @@ static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx) | |||
2577 | */ | 2554 | */ |
2578 | void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid) | 2555 | void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid) |
2579 | { | 2556 | { |
2580 | struct sk_buff_head frames; | ||
2581 | struct ieee80211_rx_data rx = { | 2557 | struct ieee80211_rx_data rx = { |
2582 | .sta = sta, | 2558 | .sta = sta, |
2583 | .sdata = sta->sdata, | 2559 | .sdata = sta->sdata, |
@@ -2590,13 +2566,11 @@ void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid) | |||
2590 | if (!tid_agg_rx) | 2566 | if (!tid_agg_rx) |
2591 | return; | 2567 | return; |
2592 | 2568 | ||
2593 | __skb_queue_head_init(&frames); | ||
2594 | |||
2595 | spin_lock(&tid_agg_rx->reorder_lock); | 2569 | spin_lock(&tid_agg_rx->reorder_lock); |
2596 | ieee80211_sta_reorder_release(&sta->local->hw, tid_agg_rx, &frames); | 2570 | ieee80211_sta_reorder_release(&sta->local->hw, tid_agg_rx); |
2597 | spin_unlock(&tid_agg_rx->reorder_lock); | 2571 | spin_unlock(&tid_agg_rx->reorder_lock); |
2598 | 2572 | ||
2599 | ieee80211_rx_handlers(&rx, &frames); | 2573 | ieee80211_rx_handlers(&rx); |
2600 | } | 2574 | } |
2601 | 2575 | ||
2602 | /* main receive path */ | 2576 | /* main receive path */ |