diff options
author | Johannes Berg <johannes@sipsolutions.net> | 2008-06-19 19:22:30 -0400 |
---|---|---|
committer | John W. Linville <linville@tuxdriver.com> | 2008-06-26 16:50:02 -0400 |
commit | 97b045d62bffae5a91a286b56ac51db0c4385687 (patch) | |
tree | 471f4d88dcbd484bc263deb0b2c80e9236e32b31 | |
parent | 9965183a78ad5303b9154184a0f4056844e8baae (diff) |
mac80211: add single function calling tx handlers
This modifies mac80211 to only have a single function calling the
TX handlers rather than them being invoked in multiple places.
Signed-off-by: Johannes Berg <johannes@sipsolutions.net>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
-rw-r--r-- | net/mac80211/tx.c | 82 |
1 files changed, 40 insertions, 42 deletions
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index ce06e791bf43..7a14a39ebd78 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c | |||
@@ -1083,13 +1083,46 @@ static int __ieee80211_tx(struct ieee80211_local *local, struct sk_buff *skb, | |||
1083 | return IEEE80211_TX_OK; | 1083 | return IEEE80211_TX_OK; |
1084 | } | 1084 | } |
1085 | 1085 | ||
1086 | /* | ||
1087 | * Invoke TX handlers, return 0 on success and non-zero if the | ||
1088 | * frame was dropped or queued. | ||
1089 | */ | ||
1090 | static int invoke_tx_handlers(struct ieee80211_tx_data *tx) | ||
1091 | { | ||
1092 | struct ieee80211_local *local = tx->local; | ||
1093 | struct sk_buff *skb = tx->skb; | ||
1094 | ieee80211_tx_handler *handler; | ||
1095 | ieee80211_tx_result res = TX_DROP; | ||
1096 | int i; | ||
1097 | |||
1098 | for (handler = ieee80211_tx_handlers; *handler != NULL; handler++) { | ||
1099 | res = (*handler)(tx); | ||
1100 | if (res != TX_CONTINUE) | ||
1101 | break; | ||
1102 | } | ||
1103 | |||
1104 | if (unlikely(res == TX_DROP)) { | ||
1105 | I802_DEBUG_INC(local->tx_handlers_drop); | ||
1106 | dev_kfree_skb(skb); | ||
1107 | for (i = 0; i < tx->num_extra_frag; i++) | ||
1108 | if (tx->extra_frag[i]) | ||
1109 | dev_kfree_skb(tx->extra_frag[i]); | ||
1110 | kfree(tx->extra_frag); | ||
1111 | return -1; | ||
1112 | } else if (unlikely(res == TX_QUEUED)) { | ||
1113 | I802_DEBUG_INC(local->tx_handlers_queued); | ||
1114 | return -1; | ||
1115 | } | ||
1116 | |||
1117 | return 0; | ||
1118 | } | ||
1119 | |||
1086 | static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb) | 1120 | static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb) |
1087 | { | 1121 | { |
1088 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 1122 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); |
1089 | struct sta_info *sta; | 1123 | struct sta_info *sta; |
1090 | ieee80211_tx_handler *handler; | ||
1091 | struct ieee80211_tx_data tx; | 1124 | struct ieee80211_tx_data tx; |
1092 | ieee80211_tx_result res = TX_DROP, res_prepare; | 1125 | ieee80211_tx_result res_prepare; |
1093 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | 1126 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
1094 | int ret, i; | 1127 | int ret, i; |
1095 | u16 queue; | 1128 | u16 queue; |
@@ -1118,26 +1151,8 @@ static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb) | |||
1118 | tx.channel = local->hw.conf.channel; | 1151 | tx.channel = local->hw.conf.channel; |
1119 | info->band = tx.channel->band; | 1152 | info->band = tx.channel->band; |
1120 | 1153 | ||
1121 | for (handler = ieee80211_tx_handlers; *handler != NULL; | 1154 | if (invoke_tx_handlers(&tx)) |
1122 | handler++) { | 1155 | goto out; |
1123 | res = (*handler)(&tx); | ||
1124 | if (res != TX_CONTINUE) | ||
1125 | break; | ||
1126 | } | ||
1127 | |||
1128 | if (WARN_ON(tx.skb != skb)) | ||
1129 | goto drop; | ||
1130 | |||
1131 | if (unlikely(res == TX_DROP)) { | ||
1132 | I802_DEBUG_INC(local->tx_handlers_drop); | ||
1133 | goto drop; | ||
1134 | } | ||
1135 | |||
1136 | if (unlikely(res == TX_QUEUED)) { | ||
1137 | I802_DEBUG_INC(local->tx_handlers_queued); | ||
1138 | rcu_read_unlock(); | ||
1139 | return 0; | ||
1140 | } | ||
1141 | 1156 | ||
1142 | if (tx.extra_frag) { | 1157 | if (tx.extra_frag) { |
1143 | for (i = 0; i < tx.num_extra_frag; i++) { | 1158 | for (i = 0; i < tx.num_extra_frag; i++) { |
@@ -1198,6 +1213,7 @@ retry: | |||
1198 | store->last_frag_rate_ctrl_probe = | 1213 | store->last_frag_rate_ctrl_probe = |
1199 | !!(tx.flags & IEEE80211_TX_PROBE_LAST_FRAG); | 1214 | !!(tx.flags & IEEE80211_TX_PROBE_LAST_FRAG); |
1200 | } | 1215 | } |
1216 | out: | ||
1201 | rcu_read_unlock(); | 1217 | rcu_read_unlock(); |
1202 | return 0; | 1218 | return 0; |
1203 | 1219 | ||
@@ -1948,9 +1964,7 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw, | |||
1948 | struct ieee80211_local *local = hw_to_local(hw); | 1964 | struct ieee80211_local *local = hw_to_local(hw); |
1949 | struct sk_buff *skb = NULL; | 1965 | struct sk_buff *skb = NULL; |
1950 | struct sta_info *sta; | 1966 | struct sta_info *sta; |
1951 | ieee80211_tx_handler *handler; | ||
1952 | struct ieee80211_tx_data tx; | 1967 | struct ieee80211_tx_data tx; |
1953 | ieee80211_tx_result res = TX_DROP; | ||
1954 | struct net_device *bdev; | 1968 | struct net_device *bdev; |
1955 | struct ieee80211_sub_if_data *sdata; | 1969 | struct ieee80211_sub_if_data *sdata; |
1956 | struct ieee80211_if_ap *bss = NULL; | 1970 | struct ieee80211_if_ap *bss = NULL; |
@@ -2001,25 +2015,9 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw, | |||
2001 | tx.channel = local->hw.conf.channel; | 2015 | tx.channel = local->hw.conf.channel; |
2002 | info->band = tx.channel->band; | 2016 | info->band = tx.channel->band; |
2003 | 2017 | ||
2004 | for (handler = ieee80211_tx_handlers; *handler != NULL; handler++) { | 2018 | if (invoke_tx_handlers(&tx)) |
2005 | res = (*handler)(&tx); | ||
2006 | if (res == TX_DROP || res == TX_QUEUED) | ||
2007 | break; | ||
2008 | } | ||
2009 | |||
2010 | if (WARN_ON(tx.skb != skb)) | ||
2011 | res = TX_DROP; | ||
2012 | |||
2013 | if (res == TX_DROP) { | ||
2014 | I802_DEBUG_INC(local->tx_handlers_drop); | ||
2015 | dev_kfree_skb(skb); | ||
2016 | skb = NULL; | ||
2017 | } else if (res == TX_QUEUED) { | ||
2018 | I802_DEBUG_INC(local->tx_handlers_queued); | ||
2019 | skb = NULL; | 2019 | skb = NULL; |
2020 | } | 2020 | out: |
2021 | |||
2022 | out: | ||
2023 | rcu_read_unlock(); | 2021 | rcu_read_unlock(); |
2024 | 2022 | ||
2025 | return skb; | 2023 | return skb; |