aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/iwlwifi/iwl-4965.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/iwlwifi/iwl-4965.c')
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c396
1 files changed, 0 insertions, 396 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index 89aff4aae45f..e81120cc45f3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -2946,378 +2946,6 @@ void iwl4965_set_rxon_chain(struct iwl4965_priv *priv)
2946 IWL_DEBUG_ASSOC("rx chain %X\n", priv->staging_rxon.rx_chain); 2946 IWL_DEBUG_ASSOC("rx chain %X\n", priv->staging_rxon.rx_chain);
2947} 2947}
2948 2948
2949#ifdef CONFIG_IWL4965_HT
2950#ifdef CONFIG_IWL4965_HT_AGG
2951/*
2952 get the traffic load value for tid
2953*/
2954static u32 iwl4965_tl_get_load(struct iwl4965_priv *priv, u8 tid)
2955{
2956 u32 load = 0;
2957 u32 current_time = jiffies_to_msecs(jiffies);
2958 u32 time_diff;
2959 s32 index;
2960 unsigned long flags;
2961 struct iwl4965_traffic_load *tid_ptr = NULL;
2962
2963 if (tid >= TID_MAX_LOAD_COUNT)
2964 return 0;
2965
2966 tid_ptr = &(priv->lq_mngr.agg_ctrl.traffic_load[tid]);
2967
2968 current_time -= current_time % TID_ROUND_VALUE;
2969
2970 spin_lock_irqsave(&priv->lq_mngr.lock, flags);
2971 if (!(tid_ptr->queue_count))
2972 goto out;
2973
2974 time_diff = TIME_WRAP_AROUND(tid_ptr->time_stamp, current_time);
2975 index = time_diff / TID_QUEUE_CELL_SPACING;
2976
2977 if (index >= TID_QUEUE_MAX_SIZE) {
2978 u32 oldest_time = current_time - TID_MAX_TIME_DIFF;
2979
2980 while (tid_ptr->queue_count &&
2981 (tid_ptr->time_stamp < oldest_time)) {
2982 tid_ptr->total -= tid_ptr->packet_count[tid_ptr->head];
2983 tid_ptr->packet_count[tid_ptr->head] = 0;
2984 tid_ptr->time_stamp += TID_QUEUE_CELL_SPACING;
2985 tid_ptr->queue_count--;
2986 tid_ptr->head++;
2987 if (tid_ptr->head >= TID_QUEUE_MAX_SIZE)
2988 tid_ptr->head = 0;
2989 }
2990 }
2991 load = tid_ptr->total;
2992
2993 out:
2994 spin_unlock_irqrestore(&priv->lq_mngr.lock, flags);
2995 return load;
2996}
2997
2998/*
2999 increment traffic load value for tid and also remove
3000 any old values if passed the certian time period
3001*/
3002static void iwl4965_tl_add_packet(struct iwl4965_priv *priv, u8 tid)
3003{
3004 u32 current_time = jiffies_to_msecs(jiffies);
3005 u32 time_diff;
3006 s32 index;
3007 unsigned long flags;
3008 struct iwl4965_traffic_load *tid_ptr = NULL;
3009
3010 if (tid >= TID_MAX_LOAD_COUNT)
3011 return;
3012
3013 tid_ptr = &(priv->lq_mngr.agg_ctrl.traffic_load[tid]);
3014
3015 current_time -= current_time % TID_ROUND_VALUE;
3016
3017 spin_lock_irqsave(&priv->lq_mngr.lock, flags);
3018 if (!(tid_ptr->queue_count)) {
3019 tid_ptr->total = 1;
3020 tid_ptr->time_stamp = current_time;
3021 tid_ptr->queue_count = 1;
3022 tid_ptr->head = 0;
3023 tid_ptr->packet_count[0] = 1;
3024 goto out;
3025 }
3026
3027 time_diff = TIME_WRAP_AROUND(tid_ptr->time_stamp, current_time);
3028 index = time_diff / TID_QUEUE_CELL_SPACING;
3029
3030 if (index >= TID_QUEUE_MAX_SIZE) {
3031 u32 oldest_time = current_time - TID_MAX_TIME_DIFF;
3032
3033 while (tid_ptr->queue_count &&
3034 (tid_ptr->time_stamp < oldest_time)) {
3035 tid_ptr->total -= tid_ptr->packet_count[tid_ptr->head];
3036 tid_ptr->packet_count[tid_ptr->head] = 0;
3037 tid_ptr->time_stamp += TID_QUEUE_CELL_SPACING;
3038 tid_ptr->queue_count--;
3039 tid_ptr->head++;
3040 if (tid_ptr->head >= TID_QUEUE_MAX_SIZE)
3041 tid_ptr->head = 0;
3042 }
3043 }
3044
3045 index = (tid_ptr->head + index) % TID_QUEUE_MAX_SIZE;
3046 tid_ptr->packet_count[index] = tid_ptr->packet_count[index] + 1;
3047 tid_ptr->total = tid_ptr->total + 1;
3048
3049 if ((index + 1) > tid_ptr->queue_count)
3050 tid_ptr->queue_count = index + 1;
3051 out:
3052 spin_unlock_irqrestore(&priv->lq_mngr.lock, flags);
3053
3054}
3055
3056#define MMAC_SCHED_MAX_NUMBER_OF_HT_BACK_FLOWS 7
3057enum HT_STATUS {
3058 BA_STATUS_FAILURE = 0,
3059 BA_STATUS_INITIATOR_DELBA,
3060 BA_STATUS_RECIPIENT_DELBA,
3061 BA_STATUS_RENEW_ADDBA_REQUEST,
3062 BA_STATUS_ACTIVE,
3063};
3064
3065/**
3066 * iwl4964_tl_ba_avail - Find out if an unused aggregation queue is available
3067 */
3068static u8 iwl4964_tl_ba_avail(struct iwl4965_priv *priv)
3069{
3070 int i;
3071 struct iwl4965_lq_mngr *lq;
3072 u8 count = 0;
3073 u16 msk;
3074
3075 lq = (struct iwl4965_lq_mngr *)&(priv->lq_mngr);
3076
3077 /* Find out how many agg queues are in use */
3078 for (i = 0; i < TID_MAX_LOAD_COUNT ; i++) {
3079 msk = 1 << i;
3080 if ((lq->agg_ctrl.granted_ba & msk) ||
3081 (lq->agg_ctrl.wait_for_agg_status & msk))
3082 count++;
3083 }
3084
3085 if (count < MMAC_SCHED_MAX_NUMBER_OF_HT_BACK_FLOWS)
3086 return 1;
3087
3088 return 0;
3089}
3090
3091static void iwl4965_ba_status(struct iwl4965_priv *priv,
3092 u8 tid, enum HT_STATUS status);
3093
3094static int iwl4965_perform_addba(struct iwl4965_priv *priv, u8 tid, u32 length,
3095 u32 ba_timeout)
3096{
3097 int rc;
3098
3099 rc = ieee80211_start_BA_session(priv->hw, priv->bssid, tid);
3100 if (rc)
3101 iwl4965_ba_status(priv, tid, BA_STATUS_FAILURE);
3102
3103 return rc;
3104}
3105
3106static int iwl4965_perform_delba(struct iwl4965_priv *priv, u8 tid)
3107{
3108 int rc;
3109
3110 rc = ieee80211_stop_BA_session(priv->hw, priv->bssid, tid);
3111 if (rc)
3112 iwl4965_ba_status(priv, tid, BA_STATUS_FAILURE);
3113
3114 return rc;
3115}
3116
3117static void iwl4965_turn_on_agg_for_tid(struct iwl4965_priv *priv,
3118 struct iwl4965_lq_mngr *lq,
3119 u8 auto_agg, u8 tid)
3120{
3121 u32 tid_msk = (1 << tid);
3122 unsigned long flags;
3123
3124 spin_lock_irqsave(&priv->lq_mngr.lock, flags);
3125/*
3126 if ((auto_agg) && (!lq->enable_counter)){
3127 lq->agg_ctrl.next_retry = 0;
3128 lq->agg_ctrl.tid_retry = 0;
3129 spin_unlock_irqrestore(&priv->lq_mngr.lock, flags);
3130 return;
3131 }
3132*/
3133 if (!(lq->agg_ctrl.granted_ba & tid_msk) &&
3134 (lq->agg_ctrl.requested_ba & tid_msk)) {
3135 u8 available_queues;
3136 u32 load;
3137
3138 spin_unlock_irqrestore(&priv->lq_mngr.lock, flags);
3139 available_queues = iwl4964_tl_ba_avail(priv);
3140 load = iwl4965_tl_get_load(priv, tid);
3141
3142 spin_lock_irqsave(&priv->lq_mngr.lock, flags);
3143 if (!available_queues) {
3144 if (auto_agg)
3145 lq->agg_ctrl.tid_retry |= tid_msk;
3146 else {
3147 lq->agg_ctrl.requested_ba &= ~tid_msk;
3148 lq->agg_ctrl.wait_for_agg_status &= ~tid_msk;
3149 }
3150 } else if ((auto_agg) &&
3151 ((load <= lq->agg_ctrl.tid_traffic_load_threshold) ||
3152 ((lq->agg_ctrl.wait_for_agg_status & tid_msk))))
3153 lq->agg_ctrl.tid_retry |= tid_msk;
3154 else {
3155 lq->agg_ctrl.wait_for_agg_status |= tid_msk;
3156 spin_unlock_irqrestore(&priv->lq_mngr.lock, flags);
3157 iwl4965_perform_addba(priv, tid, 0x40,
3158 lq->agg_ctrl.ba_timeout);
3159 spin_lock_irqsave(&priv->lq_mngr.lock, flags);
3160 }
3161 }
3162 spin_unlock_irqrestore(&priv->lq_mngr.lock, flags);
3163}
3164
3165static void iwl4965_turn_on_agg(struct iwl4965_priv *priv, u8 tid)
3166{
3167 struct iwl4965_lq_mngr *lq;
3168 unsigned long flags;
3169
3170 lq = (struct iwl4965_lq_mngr *)&(priv->lq_mngr);
3171
3172 if ((tid < TID_MAX_LOAD_COUNT))
3173 iwl4965_turn_on_agg_for_tid(priv, lq, lq->agg_ctrl.auto_agg,
3174 tid);
3175 else if (tid == TID_ALL_SPECIFIED) {
3176 if (lq->agg_ctrl.requested_ba) {
3177 for (tid = 0; tid < TID_MAX_LOAD_COUNT; tid++)
3178 iwl4965_turn_on_agg_for_tid(priv, lq,
3179 lq->agg_ctrl.auto_agg, tid);
3180 } else {
3181 spin_lock_irqsave(&priv->lq_mngr.lock, flags);
3182 lq->agg_ctrl.tid_retry = 0;
3183 lq->agg_ctrl.next_retry = 0;
3184 spin_unlock_irqrestore(&priv->lq_mngr.lock, flags);
3185 }
3186 }
3187
3188}
3189
3190void iwl4965_turn_off_agg(struct iwl4965_priv *priv, u8 tid)
3191{
3192 u32 tid_msk;
3193 struct iwl4965_lq_mngr *lq;
3194 unsigned long flags;
3195
3196 lq = (struct iwl4965_lq_mngr *)&(priv->lq_mngr);
3197
3198 if ((tid < TID_MAX_LOAD_COUNT)) {
3199 tid_msk = 1 << tid;
3200 spin_lock_irqsave(&priv->lq_mngr.lock, flags);
3201 lq->agg_ctrl.wait_for_agg_status |= tid_msk;
3202 lq->agg_ctrl.requested_ba &= ~tid_msk;
3203 spin_unlock_irqrestore(&priv->lq_mngr.lock, flags);
3204 iwl4965_perform_delba(priv, tid);
3205 } else if (tid == TID_ALL_SPECIFIED) {
3206 spin_lock_irqsave(&priv->lq_mngr.lock, flags);
3207 for (tid = 0; tid < TID_MAX_LOAD_COUNT; tid++) {
3208 tid_msk = 1 << tid;
3209 lq->agg_ctrl.wait_for_agg_status |= tid_msk;
3210 spin_unlock_irqrestore(&priv->lq_mngr.lock, flags);
3211 iwl4965_perform_delba(priv, tid);
3212 spin_lock_irqsave(&priv->lq_mngr.lock, flags);
3213 }
3214 lq->agg_ctrl.requested_ba = 0;
3215 spin_unlock_irqrestore(&priv->lq_mngr.lock, flags);
3216 }
3217}
3218
3219/**
3220 * iwl4965_ba_status - Update driver's link quality mgr with tid's HT status
3221 */
3222static void iwl4965_ba_status(struct iwl4965_priv *priv,
3223 u8 tid, enum HT_STATUS status)
3224{
3225 struct iwl4965_lq_mngr *lq;
3226 u32 tid_msk = (1 << tid);
3227 unsigned long flags;
3228
3229 lq = (struct iwl4965_lq_mngr *)&(priv->lq_mngr);
3230
3231 if ((tid >= TID_MAX_LOAD_COUNT))
3232 goto out;
3233
3234 spin_lock_irqsave(&priv->lq_mngr.lock, flags);
3235 switch (status) {
3236 case BA_STATUS_ACTIVE:
3237 if (!(lq->agg_ctrl.granted_ba & tid_msk))
3238 lq->agg_ctrl.granted_ba |= tid_msk;
3239 break;
3240 default:
3241 if ((lq->agg_ctrl.granted_ba & tid_msk))
3242 lq->agg_ctrl.granted_ba &= ~tid_msk;
3243 break;
3244 }
3245
3246 lq->agg_ctrl.wait_for_agg_status &= ~tid_msk;
3247 if (status != BA_STATUS_ACTIVE) {
3248 if (lq->agg_ctrl.auto_agg) {
3249 lq->agg_ctrl.tid_retry |= tid_msk;
3250 lq->agg_ctrl.next_retry =
3251 jiffies + msecs_to_jiffies(500);
3252 } else
3253 lq->agg_ctrl.requested_ba &= ~tid_msk;
3254 }
3255 spin_unlock_irqrestore(&priv->lq_mngr.lock, flags);
3256 out:
3257 return;
3258}
3259
3260static void iwl4965_bg_agg_work(struct work_struct *work)
3261{
3262 struct iwl4965_priv *priv = container_of(work, struct iwl4965_priv,
3263 agg_work);
3264
3265 u32 tid;
3266 u32 retry_tid;
3267 u32 tid_msk;
3268 unsigned long flags;
3269 struct iwl4965_lq_mngr *lq = (struct iwl4965_lq_mngr *)&(priv->lq_mngr);
3270
3271 spin_lock_irqsave(&priv->lq_mngr.lock, flags);
3272 retry_tid = lq->agg_ctrl.tid_retry;
3273 lq->agg_ctrl.tid_retry = 0;
3274 spin_unlock_irqrestore(&priv->lq_mngr.lock, flags);
3275
3276 if (retry_tid == TID_ALL_SPECIFIED)
3277 iwl4965_turn_on_agg(priv, TID_ALL_SPECIFIED);
3278 else {
3279 for (tid = 0; tid < TID_MAX_LOAD_COUNT; tid++) {
3280 tid_msk = (1 << tid);
3281 if (retry_tid & tid_msk)
3282 iwl4965_turn_on_agg(priv, tid);
3283 }
3284 }
3285
3286 spin_lock_irqsave(&priv->lq_mngr.lock, flags);
3287 if (lq->agg_ctrl.tid_retry)
3288 lq->agg_ctrl.next_retry = jiffies + msecs_to_jiffies(500);
3289 spin_unlock_irqrestore(&priv->lq_mngr.lock, flags);
3290 return;
3291}
3292
3293/* TODO: move this functionality to rate scaling */
3294void iwl4965_tl_get_stats(struct iwl4965_priv *priv,
3295 struct ieee80211_hdr *hdr)
3296{
3297 __le16 *qc = ieee80211_get_qos_ctrl(hdr);
3298
3299 if (qc &&
3300 (priv->iw_mode != IEEE80211_IF_TYPE_IBSS)) {
3301 u8 tid = 0;
3302 tid = (u8) (le16_to_cpu(*qc) & 0xF);
3303 if (tid < TID_MAX_LOAD_COUNT)
3304 iwl4965_tl_add_packet(priv, tid);
3305 }
3306
3307 if (priv->lq_mngr.agg_ctrl.next_retry &&
3308 (time_after(priv->lq_mngr.agg_ctrl.next_retry, jiffies))) {
3309 unsigned long flags;
3310
3311 spin_lock_irqsave(&priv->lq_mngr.lock, flags);
3312 priv->lq_mngr.agg_ctrl.next_retry = 0;
3313 spin_unlock_irqrestore(&priv->lq_mngr.lock, flags);
3314 schedule_work(&priv->agg_work);
3315 }
3316}
3317
3318#endif /*CONFIG_IWL4965_HT_AGG */
3319#endif /* CONFIG_IWL4965_HT */
3320
3321/** 2949/**
3322 * sign_extend - Sign extend a value using specified bit as sign-bit 2950 * sign_extend - Sign extend a value using specified bit as sign-bit
3323 * 2951 *
@@ -4191,25 +3819,6 @@ static void iwl4965_rx_missed_beacon_notif(struct iwl4965_priv *priv,
4191} 3819}
4192 3820
4193#ifdef CONFIG_IWL4965_HT 3821#ifdef CONFIG_IWL4965_HT
4194#ifdef CONFIG_IWL4965_HT_AGG
4195
4196/**
4197 * iwl4965_set_tx_status - Update driver's record of one Tx frame's status
4198 *
4199 * This will get sent to mac80211.
4200 */
4201static void iwl4965_set_tx_status(struct iwl4965_priv *priv, int txq_id, int idx,
4202 u32 status, u32 retry_count, u32 rate)
4203{
4204 struct ieee80211_tx_status *tx_status =
4205 &(priv->txq[txq_id].txb[idx].status);
4206
4207 tx_status->flags = status ? IEEE80211_TX_STATUS_ACK : 0;
4208 tx_status->retry_count += retry_count;
4209 tx_status->control.tx_rate = rate;
4210}
4211
4212#endif/* CONFIG_IWL4965_HT_AGG */
4213 3822
4214/** 3823/**
4215 * iwl4965_sta_modify_enable_tid_tx - Enable Tx for this TID in station table 3824 * iwl4965_sta_modify_enable_tid_tx - Enable Tx for this TID in station table
@@ -4984,11 +4593,6 @@ void iwl4965_hw_setup_deferred_work(struct iwl4965_priv *priv)
4984#ifdef CONFIG_IWL4965_SENSITIVITY 4593#ifdef CONFIG_IWL4965_SENSITIVITY
4985 INIT_WORK(&priv->sensitivity_work, iwl4965_bg_sensitivity_work); 4594 INIT_WORK(&priv->sensitivity_work, iwl4965_bg_sensitivity_work);
4986#endif 4595#endif
4987#ifdef CONFIG_IWL4965_HT
4988#ifdef CONFIG_IWL4965_HT_AGG
4989 INIT_WORK(&priv->agg_work, iwl4965_bg_agg_work);
4990#endif /* CONFIG_IWL4965_HT_AGG */
4991#endif /* CONFIG_IWL4965_HT */
4992 init_timer(&priv->statistics_periodic); 4596 init_timer(&priv->statistics_periodic);
4993 priv->statistics_periodic.data = (unsigned long)priv; 4597 priv->statistics_periodic.data = (unsigned long)priv;
4994 priv->statistics_periodic.function = iwl4965_bg_statistics_periodic; 4598 priv->statistics_periodic.function = iwl4965_bg_statistics_periodic;