aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c158
1 files changed, 83 insertions, 75 deletions
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 7dfe4cde55e3..795065974d78 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -687,6 +687,74 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
687} 687}
688 688
689#ifdef CONFIG_INET 689#ifdef CONFIG_INET
690
691static int
692iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes,
693 netdev_features_t netdev_flags,
694 struct sk_buff_head *mpdus_skb)
695{
696 struct sk_buff *tmp, *next;
697 struct ieee80211_hdr *hdr = (void *)skb->data;
698 char cb[sizeof(skb->cb)];
699 u16 i = 0;
700 unsigned int tcp_payload_len;
701 unsigned int mss = skb_shinfo(skb)->gso_size;
702 bool ipv4 = (skb->protocol == htons(ETH_P_IP));
703 u16 ip_base_id = ipv4 ? ntohs(ip_hdr(skb)->id) : 0;
704
705 skb_shinfo(skb)->gso_size = num_subframes * mss;
706 memcpy(cb, skb->cb, sizeof(cb));
707
708 next = skb_gso_segment(skb, netdev_flags);
709 skb_shinfo(skb)->gso_size = mss;
710 if (WARN_ON_ONCE(IS_ERR(next)))
711 return -EINVAL;
712 else if (next)
713 consume_skb(skb);
714
715 while (next) {
716 tmp = next;
717 next = tmp->next;
718
719 memcpy(tmp->cb, cb, sizeof(tmp->cb));
720 /*
721 * Compute the length of all the data added for the A-MSDU.
722 * This will be used to compute the length to write in the TX
723 * command. We have: SNAP + IP + TCP for n -1 subframes and
724 * ETH header for n subframes.
725 */
726 tcp_payload_len = skb_tail_pointer(tmp) -
727 skb_transport_header(tmp) -
728 tcp_hdrlen(tmp) + tmp->data_len;
729
730 if (ipv4)
731 ip_hdr(tmp)->id = htons(ip_base_id + i * num_subframes);
732
733 if (tcp_payload_len > mss) {
734 skb_shinfo(tmp)->gso_size = mss;
735 } else {
736 if (ieee80211_is_data_qos(hdr->frame_control)) {
737 u8 *qc;
738
739 if (ipv4)
740 ip_send_check(ip_hdr(tmp));
741
742 qc = ieee80211_get_qos_ctl((void *)tmp->data);
743 *qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
744 }
745 skb_shinfo(tmp)->gso_size = 0;
746 }
747
748 tmp->prev = NULL;
749 tmp->next = NULL;
750
751 __skb_queue_tail(mpdus_skb, tmp);
752 i++;
753 }
754
755 return 0;
756}
757
690static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, 758static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
691 struct ieee80211_tx_info *info, 759 struct ieee80211_tx_info *info,
692 struct ieee80211_sta *sta, 760 struct ieee80211_sta *sta,
@@ -695,14 +763,10 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
695 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 763 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
696 struct ieee80211_hdr *hdr = (void *)skb->data; 764 struct ieee80211_hdr *hdr = (void *)skb->data;
697 unsigned int mss = skb_shinfo(skb)->gso_size; 765 unsigned int mss = skb_shinfo(skb)->gso_size;
698 struct sk_buff *tmp, *next;
699 char cb[sizeof(skb->cb)];
700 unsigned int num_subframes, tcp_payload_len, subf_len, max_amsdu_len; 766 unsigned int num_subframes, tcp_payload_len, subf_len, max_amsdu_len;
701 bool ipv4 = (skb->protocol == htons(ETH_P_IP)); 767 u16 snap_ip_tcp, pad;
702 u16 ip_base_id = ipv4 ? ntohs(ip_hdr(skb)->id) : 0;
703 u16 snap_ip_tcp, pad, i = 0;
704 unsigned int dbg_max_amsdu_len; 768 unsigned int dbg_max_amsdu_len;
705 netdev_features_t netdev_features = NETIF_F_CSUM_MASK | NETIF_F_SG; 769 netdev_features_t netdev_flags = NETIF_F_CSUM_MASK | NETIF_F_SG;
706 u8 *qc, tid, txf; 770 u8 *qc, tid, txf;
707 771
708 snap_ip_tcp = 8 + skb_transport_header(skb) - skb_network_header(skb) + 772 snap_ip_tcp = 8 + skb_transport_header(skb) - skb_network_header(skb) +
@@ -712,16 +776,8 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
712 776
713 if (!sta->max_amsdu_len || 777 if (!sta->max_amsdu_len ||
714 !ieee80211_is_data_qos(hdr->frame_control) || 778 !ieee80211_is_data_qos(hdr->frame_control) ||
715 (!mvmsta->tlc_amsdu && !dbg_max_amsdu_len)) { 779 (!mvmsta->tlc_amsdu && !dbg_max_amsdu_len))
716 num_subframes = 1; 780 return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb);
717 pad = 0;
718 goto segment;
719 }
720
721 qc = ieee80211_get_qos_ctl(hdr);
722 tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
723 if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
724 return -EINVAL;
725 781
726 /* 782 /*
727 * Do not build AMSDU for IPv6 with extension headers. 783 * Do not build AMSDU for IPv6 with extension headers.
@@ -730,22 +786,22 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
730 if (skb->protocol == htons(ETH_P_IPV6) && 786 if (skb->protocol == htons(ETH_P_IPV6) &&
731 ((struct ipv6hdr *)skb_network_header(skb))->nexthdr != 787 ((struct ipv6hdr *)skb_network_header(skb))->nexthdr !=
732 IPPROTO_TCP) { 788 IPPROTO_TCP) {
733 num_subframes = 1; 789 netdev_flags &= ~NETIF_F_CSUM_MASK;
734 pad = 0; 790 return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb);
735 netdev_features &= ~NETIF_F_CSUM_MASK;
736 goto segment;
737 } 791 }
738 792
793 qc = ieee80211_get_qos_ctl(hdr);
794 tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
795 if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
796 return -EINVAL;
797
739 /* 798 /*
740 * No need to lock amsdu_in_ampdu_allowed since it can't be modified 799 * No need to lock amsdu_in_ampdu_allowed since it can't be modified
741 * during an BA session. 800 * during an BA session.
742 */ 801 */
743 if (info->flags & IEEE80211_TX_CTL_AMPDU && 802 if (info->flags & IEEE80211_TX_CTL_AMPDU &&
744 !mvmsta->tid_data[tid].amsdu_in_ampdu_allowed) { 803 !mvmsta->tid_data[tid].amsdu_in_ampdu_allowed)
745 num_subframes = 1; 804 return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb);
746 pad = 0;
747 goto segment;
748 }
749 805
750 max_amsdu_len = sta->max_amsdu_len; 806 max_amsdu_len = sta->max_amsdu_len;
751 807
@@ -811,56 +867,8 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
811 * Trick the segmentation function to make it 867 * Trick the segmentation function to make it
812 * create SKBs that can fit into one A-MSDU. 868 * create SKBs that can fit into one A-MSDU.
813 */ 869 */
814segment: 870 return iwl_mvm_tx_tso_segment(skb, num_subframes, netdev_flags,
815 skb_shinfo(skb)->gso_size = num_subframes * mss; 871 mpdus_skb);
816 memcpy(cb, skb->cb, sizeof(cb));
817
818 next = skb_gso_segment(skb, netdev_features);
819 skb_shinfo(skb)->gso_size = mss;
820 if (WARN_ON_ONCE(IS_ERR(next)))
821 return -EINVAL;
822 else if (next)
823 consume_skb(skb);
824
825 while (next) {
826 tmp = next;
827 next = tmp->next;
828
829 memcpy(tmp->cb, cb, sizeof(tmp->cb));
830 /*
831 * Compute the length of all the data added for the A-MSDU.
832 * This will be used to compute the length to write in the TX
833 * command. We have: SNAP + IP + TCP for n -1 subframes and
834 * ETH header for n subframes.
835 */
836 tcp_payload_len = skb_tail_pointer(tmp) -
837 skb_transport_header(tmp) -
838 tcp_hdrlen(tmp) + tmp->data_len;
839
840 if (ipv4)
841 ip_hdr(tmp)->id = htons(ip_base_id + i * num_subframes);
842
843 if (tcp_payload_len > mss) {
844 skb_shinfo(tmp)->gso_size = mss;
845 } else {
846 if (ieee80211_is_data_qos(hdr->frame_control)) {
847 qc = ieee80211_get_qos_ctl((void *)tmp->data);
848
849 if (ipv4)
850 ip_send_check(ip_hdr(tmp));
851 *qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
852 }
853 skb_shinfo(tmp)->gso_size = 0;
854 }
855
856 tmp->prev = NULL;
857 tmp->next = NULL;
858
859 __skb_queue_tail(mpdus_skb, tmp);
860 i++;
861 }
862
863 return 0;
864} 872}
865#else /* CONFIG_INET */ 873#else /* CONFIG_INET */
866static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, 874static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,