aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorRon Rindjunsky <ron.rindjunsky@intel.com>2008-01-28 07:07:24 -0500
committerJohn W. Linville <linville@tuxdriver.com>2008-02-29 15:19:18 -0500
commitfe01b477bbd23e69c3bdc6bce5be510ddad8297d (patch)
tree353471f96adb7580b7940c3b7cf6df4f8f0044a2 /drivers
parent8114fcf185c58b23dc9fcaf4944b59b4c1407b39 (diff)
iwlwifi: A-MPDU Tx conform flows to mac80211
This patch alters the current iwlwifi behavior to fit the flows introduced by the mac80211, mainly queues handling and start/stop call backs flows Signed-off-by: Ron Rindjunsky <ron.rindjunsky@intel.com> Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965-commands.h24
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c358
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.h20
-rw-r--r--drivers/net/wireless/iwlwifi/iwl4965-base.c121
4 files changed, 302 insertions, 221 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-commands.h b/drivers/net/wireless/iwlwifi/iwl-4965-commands.h
index f3470c896d9a..9edd8abcf570 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-4965-commands.h
@@ -1300,6 +1300,25 @@ struct iwl4965_tx_resp {
1300 __le32 status; /* TX status (for aggregation status of 1st frame) */ 1300 __le32 status; /* TX status (for aggregation status of 1st frame) */
1301} __attribute__ ((packed)); 1301} __attribute__ ((packed));
1302 1302
1303struct agg_tx_status {
1304 __le16 status;
1305 __le16 sequence;
1306} __attribute__ ((packed));
1307
1308struct iwl4965_tx_resp_agg {
1309 u8 frame_count; /* 1 no aggregation, >1 aggregation */
1310 u8 reserved1;
1311 u8 failure_rts;
1312 u8 failure_frame;
1313 __le32 rate_n_flags;
1314 __le16 wireless_media_time;
1315 __le16 reserved3;
1316 __le32 pa_power1;
1317 __le32 pa_power2;
1318 struct agg_tx_status status; /* TX status (for aggregation status */
1319 /* of 1st frame) */
1320} __attribute__ ((packed));
1321
1303/* 1322/*
1304 * REPLY_COMPRESSED_BA = 0xc5 (response only, not a command) 1323 * REPLY_COMPRESSED_BA = 0xc5 (response only, not a command)
1305 * 1324 *
@@ -1313,9 +1332,8 @@ struct iwl4965_compressed_ba_resp {
1313 /* Index of recipient (BA-sending) station in uCode's station table */ 1332 /* Index of recipient (BA-sending) station in uCode's station table */
1314 u8 sta_id; 1333 u8 sta_id;
1315 u8 tid; 1334 u8 tid;
1316 __le16 ba_seq_ctl; 1335 __le16 seq_ctl;
1317 __le32 ba_bitmap0; 1336 __le64 bitmap;
1318 __le32 ba_bitmap1;
1319 __le16 scd_flow; 1337 __le16 scd_flow;
1320 __le16 scd_ssn; 1338 __le16 scd_ssn;
1321} __attribute__ ((packed)); 1339} __attribute__ ((packed));
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index 3fc18dc311e9..b315a099d1a0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -79,6 +79,30 @@ const struct iwl4965_rate_info iwl4965_rates[IWL_RATE_COUNT] = {
79 IWL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */ 79 IWL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */
80}; 80};
81 81
82#ifdef CONFIG_IWL4965_HT
83
84static const u16 default_tid_to_tx_fifo[] = {
85 IWL_TX_FIFO_AC1,
86 IWL_TX_FIFO_AC0,
87 IWL_TX_FIFO_AC0,
88 IWL_TX_FIFO_AC1,
89 IWL_TX_FIFO_AC2,
90 IWL_TX_FIFO_AC2,
91 IWL_TX_FIFO_AC3,
92 IWL_TX_FIFO_AC3,
93 IWL_TX_FIFO_NONE,
94 IWL_TX_FIFO_NONE,
95 IWL_TX_FIFO_NONE,
96 IWL_TX_FIFO_NONE,
97 IWL_TX_FIFO_NONE,
98 IWL_TX_FIFO_NONE,
99 IWL_TX_FIFO_NONE,
100 IWL_TX_FIFO_NONE,
101 IWL_TX_FIFO_AC3
102};
103
104#endif /*CONFIG_IWL4965_HT */
105
82static int is_fat_channel(__le32 rxon_flags) 106static int is_fat_channel(__le32 rxon_flags)
83{ 107{
84 return (rxon_flags & RXON_FLG_CHANNEL_MODE_PURE_40_MSK) || 108 return (rxon_flags & RXON_FLG_CHANNEL_MODE_PURE_40_MSK) ||
@@ -4185,6 +4209,7 @@ static void iwl4965_set_tx_status(struct iwl4965_priv *priv, int txq_id, int idx
4185 tx_status->control.tx_rate = rate; 4209 tx_status->control.tx_rate = rate;
4186} 4210}
4187 4211
4212#endif/* CONFIG_IWL4965_HT_AGG */
4188 4213
4189/** 4214/**
4190 * iwl4965_sta_modify_enable_tid_tx - Enable Tx for this TID in station table 4215 * iwl4965_sta_modify_enable_tid_tx - Enable Tx for this TID in station table
@@ -4204,7 +4229,6 @@ static void iwl4965_sta_modify_enable_tid_tx(struct iwl4965_priv *priv,
4204 iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC); 4229 iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
4205} 4230}
4206 4231
4207
4208/** 4232/**
4209 * iwl4965_tx_status_reply_compressed_ba - Update tx status from block-ack 4233 * iwl4965_tx_status_reply_compressed_ba - Update tx status from block-ack
4210 * 4234 *
@@ -4218,10 +4242,11 @@ static int iwl4965_tx_status_reply_compressed_ba(struct iwl4965_priv *priv,
4218 4242
4219{ 4243{
4220 int i, sh, ack; 4244 int i, sh, ack;
4221 u16 ba_seq_ctl = le16_to_cpu(ba_resp->ba_seq_ctl); 4245 u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
4222 u32 bitmap0, bitmap1; 4246 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
4223 u32 resp_bitmap0 = le32_to_cpu(ba_resp->ba_bitmap0); 4247 u64 bitmap;
4224 u32 resp_bitmap1 = le32_to_cpu(ba_resp->ba_bitmap1); 4248 int successes = 0;
4249 struct ieee80211_tx_status *tx_status;
4225 4250
4226 if (unlikely(!agg->wait_for_ba)) { 4251 if (unlikely(!agg->wait_for_ba)) {
4227 IWL_ERROR("Received BA when not expected\n"); 4252 IWL_ERROR("Received BA when not expected\n");
@@ -4230,17 +4255,15 @@ static int iwl4965_tx_status_reply_compressed_ba(struct iwl4965_priv *priv,
4230 4255
4231 /* Mark that the expected block-ack response arrived */ 4256 /* Mark that the expected block-ack response arrived */
4232 agg->wait_for_ba = 0; 4257 agg->wait_for_ba = 0;
4233 IWL_DEBUG_TX_REPLY("BA %d %d\n", agg->start_idx, ba_resp->ba_seq_ctl); 4258 IWL_DEBUG_TX_REPLY("BA %d %d\n", agg->start_idx, ba_resp->seq_ctl);
4234 4259
4235 /* Calculate shift to align block-ack bits with our Tx window bits */ 4260 /* Calculate shift to align block-ack bits with our Tx window bits */
4236 sh = agg->start_idx - SEQ_TO_INDEX(ba_seq_ctl >> 4); 4261 sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl>>4);
4237 if (sh < 0) /* tbw something is wrong with indices */ 4262 if (sh < 0) /* tbw something is wrong with indices */
4238 sh += 0x100; 4263 sh += 0x100;
4239 4264
4240 /* don't use 64-bit values for now */ 4265 /* don't use 64-bit values for now */
4241 bitmap0 = resp_bitmap0 >> sh; 4266 bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
4242 bitmap1 = resp_bitmap1 >> sh;
4243 bitmap0 |= (resp_bitmap1 & ((1 << sh) | ((1 << sh) - 1))) << (32 - sh);
4244 4267
4245 if (agg->frame_count > (64 - sh)) { 4268 if (agg->frame_count > (64 - sh)) {
4246 IWL_DEBUG_TX_REPLY("more frames than bitmap size"); 4269 IWL_DEBUG_TX_REPLY("more frames than bitmap size");
@@ -4249,23 +4272,106 @@ static int iwl4965_tx_status_reply_compressed_ba(struct iwl4965_priv *priv,
4249 4272
4250 /* check for success or failure according to the 4273 /* check for success or failure according to the
4251 * transmitted bitmap and block-ack bitmap */ 4274 * transmitted bitmap and block-ack bitmap */
4252 bitmap0 &= agg->bitmap0; 4275 bitmap &= agg->bitmap;
4253 bitmap1 &= agg->bitmap1;
4254 4276
4255 /* For each frame attempted in aggregation, 4277 /* For each frame attempted in aggregation,
4256 * update driver's record of tx frame's status. */ 4278 * update driver's record of tx frame's status. */
4257 for (i = 0; i < agg->frame_count ; i++) { 4279 for (i = 0; i < agg->frame_count ; i++) {
4258 int idx = (agg->start_idx + i) & 0xff; 4280 ack = bitmap & (1 << i);
4259 ack = bitmap0 & (1 << i); 4281 successes += !!ack;
4260 IWL_DEBUG_TX_REPLY("%s ON i=%d idx=%d raw=%d\n", 4282 IWL_DEBUG_TX_REPLY("%s ON i=%d idx=%d raw=%d\n",
4261 ack? "ACK":"NACK", i, idx, agg->start_idx + i); 4283 ack? "ACK":"NACK", i, (agg->start_idx + i) & 0xff,
4262 iwl4965_set_tx_status(priv, agg->txq_id, idx, ack, 0, 4284 agg->start_idx + i);
4263 agg->rate_n_flags); 4285 }
4286
4287 tx_status = &priv->txq[scd_flow].txb[agg->start_idx].status;
4288 tx_status->flags = IEEE80211_TX_STATUS_ACK;
4289 tx_status->retry_count++;
4290#ifdef CONFIG_IWL4965_HT_AGG
4291 tx_status->flags |= IEEE80211_TX_STATUS_AGG_STATS;
4292 tx_status->successes = successes;
4293 tx_status->frame_count = agg->frame_count;
4294#endif /* CONFIG_IWL4965_HT_AGG */
4295 tx_status->control.tx_rate = agg->rate_n_flags;
4296
4297 IWL_DEBUG_TX_REPLY("Bitmap %llx\n", bitmap);
4298
4299 return 0;
4300}
4301
4302/**
4303 * iwl4965_tx_queue_stop_scheduler - Stop queue, but keep configuration
4304 */
4305static void iwl4965_tx_queue_stop_scheduler(struct iwl4965_priv *priv,
4306 u16 txq_id)
4307{
4308 /* Simply stop the queue, but don't change any configuration;
4309 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
4310 iwl4965_write_prph(priv,
4311 KDR_SCD_QUEUE_STATUS_BITS(txq_id),
4312 (0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)|
4313 (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
4314}
4264 4315
4316/**
4317 * txq_id must be greater than IWL_BACK_QUEUE_FIRST_ID
4318 */
4319static int iwl4965_tx_queue_agg_disable(struct iwl4965_priv *priv, u16 txq_id,
4320 u16 ssn_idx, u8 tx_fifo)
4321{
4322 if (IWL_BACK_QUEUE_FIRST_ID > txq_id) {
4323 IWL_WARNING("queue number too small: %d, must be > %d\n",
4324 txq_id, IWL_BACK_QUEUE_FIRST_ID);
4325 return -EINVAL;
4265 } 4326 }
4266 4327
4267 IWL_DEBUG_TX_REPLY("Bitmap %x%x\n", bitmap0, bitmap1); 4328 iwl4965_tx_queue_stop_scheduler(priv, txq_id);
4329
4330 iwl4965_clear_bits_prph(priv, KDR_SCD_QUEUECHAIN_SEL, (1 << txq_id));
4331
4332 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
4333 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
4334 /* supposes that ssn_idx is valid (!= 0xFFF) */
4335 iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
4336
4337 iwl4965_clear_bits_prph(priv, KDR_SCD_INTERRUPT_MASK, (1 << txq_id));
4338 iwl4965_txq_ctx_deactivate(priv, txq_id);
4339 iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
4340
4341 return 0;
4342}
4268 4343
4344int iwl4965_check_empty_hw_queue(struct iwl4965_priv *priv, int sta_id,
4345 u8 tid, int txq_id)
4346{
4347 struct iwl4965_queue *q = &priv->txq[txq_id].q;
4348 u8 *addr = priv->stations[sta_id].sta.sta.addr;
4349 struct iwl4965_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
4350
4351 switch (priv->stations[sta_id].tid[tid].agg.state) {
4352 case IWL_EMPTYING_HW_QUEUE_DELBA:
4353 /* We are reclaiming the last packet of the */
4354 /* aggregated HW queue */
4355 if (txq_id == tid_data->agg.txq_id &&
4356 q->read_ptr == q->write_ptr) {
4357 u16 ssn = SEQ_TO_SN(tid_data->seq_number);
4358 int tx_fifo = default_tid_to_tx_fifo[tid];
4359 IWL_DEBUG_HT("HW queue empty: continue DELBA flow\n");
4360 iwl4965_tx_queue_agg_disable(priv, txq_id,
4361 ssn, tx_fifo);
4362 tid_data->agg.state = IWL_AGG_OFF;
4363 ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, addr, tid);
4364 }
4365 break;
4366 case IWL_EMPTYING_HW_QUEUE_ADDBA:
4367 /* We are reclaiming the last packet of the queue */
4368 if (tid_data->tfds_in_queue == 0) {
4369 IWL_DEBUG_HT("HW queue empty: continue ADDBA flow\n");
4370 tid_data->agg.state = IWL_AGG_ON;
4371 ieee80211_start_tx_ba_cb_irqsafe(priv->hw, addr, tid);
4372 }
4373 break;
4374 }
4269 return 0; 4375 return 0;
4270} 4376}
4271 4377
@@ -4293,48 +4399,43 @@ static void iwl4965_rx_reply_compressed_ba(struct iwl4965_priv *priv,
4293 int index; 4399 int index;
4294 struct iwl4965_tx_queue *txq = NULL; 4400 struct iwl4965_tx_queue *txq = NULL;
4295 struct iwl4965_ht_agg *agg; 4401 struct iwl4965_ht_agg *agg;
4402 DECLARE_MAC_BUF(mac);
4296 4403
4297 /* "flow" corresponds to Tx queue */ 4404 /* "flow" corresponds to Tx queue */
4298 u16 ba_resp_scd_flow = le16_to_cpu(ba_resp->scd_flow); 4405 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
4299 4406
4300 /* "ssn" is start of block-ack Tx window, corresponds to index 4407 /* "ssn" is start of block-ack Tx window, corresponds to index
4301 * (in Tx queue's circular buffer) of first TFD/frame in window */ 4408 * (in Tx queue's circular buffer) of first TFD/frame in window */
4302 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn); 4409 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
4303 4410
4304 if (ba_resp_scd_flow >= ARRAY_SIZE(priv->txq)) { 4411 if (scd_flow >= ARRAY_SIZE(priv->txq)) {
4305 IWL_ERROR("BUG_ON scd_flow is bigger than number of queues"); 4412 IWL_ERROR("BUG_ON scd_flow is bigger than number of queues");
4306 return; 4413 return;
4307 } 4414 }
4308 4415
4309 txq = &priv->txq[ba_resp_scd_flow]; 4416 txq = &priv->txq[scd_flow];
4310 agg = &priv->stations[ba_resp->sta_id].tid[ba_resp->tid].agg; 4417 agg = &priv->stations[ba_resp->sta_id].tid[ba_resp->tid].agg;
4311 4418
4312 /* Find index just before block-ack window */ 4419 /* Find index just before block-ack window */
4313 index = iwl4965_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd); 4420 index = iwl4965_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
4314 4421
4315 /* TODO: Need to get this copy more safely - now good for debug */ 4422 /* TODO: Need to get this copy more safely - now good for debug */
4316/* 4423
4317 {
4318 DECLARE_MAC_BUF(mac);
4319 IWL_DEBUG_TX_REPLY("REPLY_COMPRESSED_BA [%d]Received from %s, " 4424 IWL_DEBUG_TX_REPLY("REPLY_COMPRESSED_BA [%d]Received from %s, "
4320 "sta_id = %d\n", 4425 "sta_id = %d\n",
4321 agg->wait_for_ba, 4426 agg->wait_for_ba,
4322 print_mac(mac, (u8*) &ba_resp->sta_addr_lo32), 4427 print_mac(mac, (u8*) &ba_resp->sta_addr_lo32),
4323 ba_resp->sta_id); 4428 ba_resp->sta_id);
4324 IWL_DEBUG_TX_REPLY("TID = %d, SeqCtl = %d, bitmap = 0x%X%X, scd_flow = " 4429 IWL_DEBUG_TX_REPLY("TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = "
4325 "%d, scd_ssn = %d\n", 4430 "%d, scd_ssn = %d\n",
4326 ba_resp->tid, 4431 ba_resp->tid,
4327 ba_resp->ba_seq_ctl, 4432 ba_resp->seq_ctl,
4328 ba_resp->ba_bitmap1, 4433 ba_resp->bitmap,
4329 ba_resp->ba_bitmap0,
4330 ba_resp->scd_flow, 4434 ba_resp->scd_flow,
4331 ba_resp->scd_ssn); 4435 ba_resp->scd_ssn);
4332 IWL_DEBUG_TX_REPLY("DAT start_idx = %d, bitmap = 0x%X%X \n", 4436 IWL_DEBUG_TX_REPLY("DAT start_idx = %d, bitmap = 0x%llx \n",
4333 agg->start_idx, 4437 agg->start_idx,
4334 agg->bitmap1, 4438 agg->bitmap);
4335 agg->bitmap0);
4336 }
4337*/
4338 4439
4339 /* Update driver's record of ACK vs. not for each frame in window */ 4440 /* Update driver's record of ACK vs. not for each frame in window */
4340 iwl4965_tx_status_reply_compressed_ba(priv, agg, ba_resp); 4441 iwl4965_tx_status_reply_compressed_ba(priv, agg, ba_resp);
@@ -4342,23 +4443,17 @@ static void iwl4965_rx_reply_compressed_ba(struct iwl4965_priv *priv,
4342 /* Release all TFDs before the SSN, i.e. all TFDs in front of 4443 /* Release all TFDs before the SSN, i.e. all TFDs in front of
4343 * block-ack window (we assume that they've been successfully 4444 * block-ack window (we assume that they've been successfully
4344 * transmitted ... if not, it's too late anyway). */ 4445 * transmitted ... if not, it's too late anyway). */
4345 if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) 4446 if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
4346 iwl4965_tx_queue_reclaim(priv, ba_resp_scd_flow, index); 4447 int freed = iwl4965_tx_queue_reclaim(priv, scd_flow, index);
4347 4448 priv->stations[ba_resp->sta_id].
4348} 4449 tid[ba_resp->tid].tfds_in_queue -= freed;
4349 4450 if (iwl4965_queue_space(&txq->q) > txq->q.low_mark &&
4350 4451 priv->mac80211_registered &&
4351/** 4452 agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)
4352 * iwl4965_tx_queue_stop_scheduler - Stop queue, but keep configuration 4453 ieee80211_wake_queue(priv->hw, scd_flow);
4353 */ 4454 iwl4965_check_empty_hw_queue(priv, ba_resp->sta_id,
4354static void iwl4965_tx_queue_stop_scheduler(struct iwl4965_priv *priv, u16 txq_id) 4455 ba_resp->tid, scd_flow);
4355{ 4456 }
4356 /* Simply stop the queue, but don't change any configuration;
4357 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
4358 iwl4965_write_prph(priv,
4359 KDR_SCD_QUEUE_STATUS_BITS(txq_id),
4360 (0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)|
4361 (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
4362} 4457}
4363 4458
4364/** 4459/**
@@ -4388,6 +4483,7 @@ static int iwl4965_tx_queue_set_q2ratid(struct iwl4965_priv *priv, u16 ra_tid,
4388 return 0; 4483 return 0;
4389} 4484}
4390 4485
4486
4391/** 4487/**
4392 * iwl4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue 4488 * iwl4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue
4393 * 4489 *
@@ -4455,48 +4551,6 @@ static int iwl4965_tx_queue_agg_enable(struct iwl4965_priv *priv, int txq_id,
4455 return 0; 4551 return 0;
4456} 4552}
4457 4553
4458/**
4459 * txq_id must be greater than IWL_BACK_QUEUE_FIRST_ID
4460 */
4461static int iwl4965_tx_queue_agg_disable(struct iwl4965_priv *priv, u16 txq_id,
4462 u16 ssn_idx, u8 tx_fifo)
4463{
4464 unsigned long flags;
4465 int rc;
4466
4467 if (IWL_BACK_QUEUE_FIRST_ID > txq_id) {
4468 IWL_WARNING("queue number too small: %d, must be > %d\n",
4469 txq_id, IWL_BACK_QUEUE_FIRST_ID);
4470 return -EINVAL;
4471 }
4472
4473 spin_lock_irqsave(&priv->lock, flags);
4474 rc = iwl4965_grab_nic_access(priv);
4475 if (rc) {
4476 spin_unlock_irqrestore(&priv->lock, flags);
4477 return rc;
4478 }
4479
4480 iwl4965_tx_queue_stop_scheduler(priv, txq_id);
4481
4482 iwl4965_clear_bits_prph(priv, KDR_SCD_QUEUECHAIN_SEL, (1 << txq_id));
4483
4484 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
4485 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
4486 /* supposes that ssn_idx is valid (!= 0xFFF) */
4487 iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
4488
4489 iwl4965_clear_bits_prph(priv, KDR_SCD_INTERRUPT_MASK, (1 << txq_id));
4490 iwl4965_txq_ctx_deactivate(priv, txq_id);
4491 iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
4492
4493 iwl4965_release_nic_access(priv);
4494 spin_unlock_irqrestore(&priv->lock, flags);
4495
4496 return 0;
4497}
4498
4499#endif/* CONFIG_IWL4965_HT_AGG */
4500#endif /* CONFIG_IWL4965_HT */ 4554#endif /* CONFIG_IWL4965_HT */
4501 4555
4502/** 4556/**
@@ -4730,28 +4784,6 @@ static void iwl4965_sta_modify_del_ba_tid(struct iwl4965_priv *priv,
4730 iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC); 4784 iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
4731} 4785}
4732 4786
4733#ifdef CONFIG_IWL4965_HT_AGG
4734
4735static const u16 default_tid_to_tx_fifo[] = {
4736 IWL_TX_FIFO_AC1,
4737 IWL_TX_FIFO_AC0,
4738 IWL_TX_FIFO_AC0,
4739 IWL_TX_FIFO_AC1,
4740 IWL_TX_FIFO_AC2,
4741 IWL_TX_FIFO_AC2,
4742 IWL_TX_FIFO_AC3,
4743 IWL_TX_FIFO_AC3,
4744 IWL_TX_FIFO_NONE,
4745 IWL_TX_FIFO_NONE,
4746 IWL_TX_FIFO_NONE,
4747 IWL_TX_FIFO_NONE,
4748 IWL_TX_FIFO_NONE,
4749 IWL_TX_FIFO_NONE,
4750 IWL_TX_FIFO_NONE,
4751 IWL_TX_FIFO_NONE,
4752 IWL_TX_FIFO_AC3
4753};
4754
4755/* 4787/*
4756 * Find first available (lowest unused) Tx Queue, mark it "active". 4788 * Find first available (lowest unused) Tx Queue, mark it "active".
4757 * Called only when finding queue for aggregation. 4789 * Called only when finding queue for aggregation.
@@ -4768,69 +4800,78 @@ static int iwl4965_txq_ctx_activate_free(struct iwl4965_priv *priv)
4768 return -1; 4800 return -1;
4769} 4801}
4770 4802
4771int iwl4965_mac_ht_tx_agg_start(struct ieee80211_hw *hw, u8 *da, u16 tid, 4803static int iwl4965_mac_ht_tx_agg_start(struct ieee80211_hw *hw, const u8 *da,
4772 u16 *start_seq_num) 4804 u16 tid, u16 *start_seq_num)
4773{ 4805{
4774
4775 struct iwl4965_priv *priv = hw->priv; 4806 struct iwl4965_priv *priv = hw->priv;
4776 int sta_id; 4807 int sta_id;
4777 int tx_fifo; 4808 int tx_fifo;
4778 int txq_id; 4809 int txq_id;
4779 int ssn = -1; 4810 int ssn = -1;
4811 int rc = 0;
4780 unsigned long flags; 4812 unsigned long flags;
4781 struct iwl4965_tid_data *tid_data; 4813 struct iwl4965_tid_data *tid_data;
4782 DECLARE_MAC_BUF(mac); 4814 DECLARE_MAC_BUF(mac);
4783 4815
4784 /* Determine Tx DMA/FIFO channel for this Traffic ID */
4785 if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo))) 4816 if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo)))
4786 tx_fifo = default_tid_to_tx_fifo[tid]; 4817 tx_fifo = default_tid_to_tx_fifo[tid];
4787 else 4818 else
4788 return -EINVAL; 4819 return -EINVAL;
4789 4820
4790 IWL_WARNING("iwl-AGG iwl4965_mac_ht_tx_agg_start on da=%s" 4821 IWL_WARNING("%s on da = %s tid = %d\n",
4791 " tid=%d\n", print_mac(mac, da), tid); 4822 __func__, print_mac(mac, da), tid);
4792 4823
4793 /* Get index into station table */
4794 sta_id = iwl4965_hw_find_station(priv, da); 4824 sta_id = iwl4965_hw_find_station(priv, da);
4795 if (sta_id == IWL_INVALID_STATION) 4825 if (sta_id == IWL_INVALID_STATION)
4796 return -ENXIO; 4826 return -ENXIO;
4797 4827
4798 /* Find available Tx queue for aggregation */ 4828 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) {
4829 IWL_ERROR("Start AGG when state is not IWL_AGG_OFF !\n");
4830 return -ENXIO;
4831 }
4832
4799 txq_id = iwl4965_txq_ctx_activate_free(priv); 4833 txq_id = iwl4965_txq_ctx_activate_free(priv);
4800 if (txq_id == -1) 4834 if (txq_id == -1)
4801 return -ENXIO; 4835 return -ENXIO;
4802 4836
4803 spin_lock_irqsave(&priv->sta_lock, flags); 4837 spin_lock_irqsave(&priv->sta_lock, flags);
4804 tid_data = &priv->stations[sta_id].tid[tid]; 4838 tid_data = &priv->stations[sta_id].tid[tid];
4805
4806 /* Get starting sequence number for 1st frame in block ack window.
4807 * We'll use least signif byte as 1st frame's index into Tx queue. */
4808 ssn = SEQ_TO_SN(tid_data->seq_number); 4839 ssn = SEQ_TO_SN(tid_data->seq_number);
4809 tid_data->agg.txq_id = txq_id; 4840 tid_data->agg.txq_id = txq_id;
4810 spin_unlock_irqrestore(&priv->sta_lock, flags); 4841 spin_unlock_irqrestore(&priv->sta_lock, flags);
4811 4842
4812 *start_seq_num = ssn; 4843 *start_seq_num = ssn;
4813 4844 rc = iwl4965_tx_queue_agg_enable(priv, txq_id, tx_fifo,
4814 /* Update driver's link quality manager */
4815 iwl4965_ba_status(priv, tid, BA_STATUS_ACTIVE);
4816
4817 /* Set up and enable aggregation for selected Tx queue and FIFO */
4818 return iwl4965_tx_queue_agg_enable(priv, txq_id, tx_fifo,
4819 sta_id, tid, ssn); 4845 sta_id, tid, ssn);
4820} 4846 if (rc)
4847 return rc;
4821 4848
4849 rc = 0;
4850 if (tid_data->tfds_in_queue == 0) {
4851 printk(KERN_ERR "HW queue is empty\n");
4852 tid_data->agg.state = IWL_AGG_ON;
4853 ieee80211_start_tx_ba_cb_irqsafe(hw, da, tid);
4854 } else {
4855 IWL_DEBUG_HT("HW queue is NOT empty: %d packets in HW queue\n",
4856 tid_data->tfds_in_queue);
4857 tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
4858 }
4859 return rc;
4860}
4822 4861
4823int iwl4965_mac_ht_tx_agg_stop(struct ieee80211_hw *hw, u8 *da, u16 tid) 4862static int iwl4965_mac_ht_tx_agg_stop(struct ieee80211_hw *hw, const u8 *da,
4863 u16 tid)
4824{ 4864{
4825 4865
4826 struct iwl4965_priv *priv = hw->priv; 4866 struct iwl4965_priv *priv = hw->priv;
4827 int tx_fifo_id, txq_id, sta_id, ssn = -1; 4867 int tx_fifo_id, txq_id, sta_id, ssn = -1;
4828 struct iwl4965_tid_data *tid_data; 4868 struct iwl4965_tid_data *tid_data;
4829 int rc; 4869 int rc, write_ptr, read_ptr;
4870 unsigned long flags;
4830 DECLARE_MAC_BUF(mac); 4871 DECLARE_MAC_BUF(mac);
4831 4872
4832 if (!da) { 4873 if (!da) {
4833 IWL_ERROR("%s: da = NULL\n", __func__); 4874 IWL_ERROR("da = NULL\n");
4834 return -EINVAL; 4875 return -EINVAL;
4835 } 4876 }
4836 4877
@@ -4844,33 +4885,44 @@ int iwl4965_mac_ht_tx_agg_stop(struct ieee80211_hw *hw, u8 *da, u16 tid)
4844 if (sta_id == IWL_INVALID_STATION) 4885 if (sta_id == IWL_INVALID_STATION)
4845 return -ENXIO; 4886 return -ENXIO;
4846 4887
4888 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON)
4889 IWL_WARNING("Stopping AGG while state not IWL_AGG_ON\n");
4890
4847 tid_data = &priv->stations[sta_id].tid[tid]; 4891 tid_data = &priv->stations[sta_id].tid[tid];
4848 ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4; 4892 ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
4849 txq_id = tid_data->agg.txq_id; 4893 txq_id = tid_data->agg.txq_id;
4894 write_ptr = priv->txq[txq_id].q.write_ptr;
4895 read_ptr = priv->txq[txq_id].q.read_ptr;
4896
4897 /* The queue is not empty */
4898 if (write_ptr != read_ptr) {
4899 IWL_DEBUG_HT("Stopping a non empty AGG HW QUEUE\n");
4900 priv->stations[sta_id].tid[tid].agg.state =
4901 IWL_EMPTYING_HW_QUEUE_DELBA;
4902 return 0;
4903 }
4904
4905 IWL_DEBUG_HT("HW queue empty\n");;
4906 priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
4850 4907
4908 spin_lock_irqsave(&priv->lock, flags);
4909 rc = iwl4965_grab_nic_access(priv);
4910 if (rc) {
4911 spin_unlock_irqrestore(&priv->lock, flags);
4912 return rc;
4913 }
4851 rc = iwl4965_tx_queue_agg_disable(priv, txq_id, ssn, tx_fifo_id); 4914 rc = iwl4965_tx_queue_agg_disable(priv, txq_id, ssn, tx_fifo_id);
4852 /* FIXME: need more safe way to handle error condition */ 4915 iwl4965_release_nic_access(priv);
4916 spin_unlock_irqrestore(&priv->lock, flags);
4917
4853 if (rc) 4918 if (rc)
4854 return rc; 4919 return rc;
4855 4920
4856 iwl4965_ba_status(priv, tid, BA_STATUS_INITIATOR_DELBA); 4921 ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, da, tid);
4857 IWL_DEBUG_INFO("iwl4965_mac_ht_tx_agg_stop on da=%s tid=%d\n",
4858 print_mac(mac, da), tid);
4859 4922
4860 return 0; 4923 IWL_DEBUG_INFO("iwl4965_mac_ht_tx_agg_stop on da=%s tid=%d\n",
4861} 4924 print_mac(mac, da), tid);
4862
4863
4864#endif /* CONFIG_IWL4965_HT_AGG */
4865
4866int iwl4965_mac_ht_tx_agg_start(struct ieee80211_hw *hw, const u8 *da, u16 tid,
4867 u16 *start_seq_num)
4868{
4869 return 0;
4870}
4871 4925
4872int iwl4965_mac_ht_tx_agg_stop(struct ieee80211_hw *hw, const u8 *da, u16 tid)
4873{
4874 return 0; 4926 return 0;
4875} 4927}
4876 4928
@@ -4924,9 +4976,7 @@ void iwl4965_hw_rx_handler_setup(struct iwl4965_priv *priv)
4924 iwl4965_rx_missed_beacon_notif; 4976 iwl4965_rx_missed_beacon_notif;
4925 4977
4926#ifdef CONFIG_IWL4965_HT 4978#ifdef CONFIG_IWL4965_HT
4927#ifdef CONFIG_IWL4965_HT_AGG
4928 priv->rx_handlers[REPLY_COMPRESSED_BA] = iwl4965_rx_reply_compressed_ba; 4979 priv->rx_handlers[REPLY_COMPRESSED_BA] = iwl4965_rx_reply_compressed_ba;
4929#endif /* CONFIG_IWL4965_HT_AGG */
4930#endif /* CONFIG_IWL4965_HT */ 4980#endif /* CONFIG_IWL4965_HT */
4931} 4981}
4932 4982
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.h b/drivers/net/wireless/iwlwifi/iwl-4965.h
index 4992b8a06367..47c7f3ffe369 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.h
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.h
@@ -433,7 +433,6 @@ struct iwl4965_rx_queue {
433#define IWL_INVALID_VALUE -1 433#define IWL_INVALID_VALUE -1
434 434
435#ifdef CONFIG_IWL4965_HT 435#ifdef CONFIG_IWL4965_HT
436#ifdef CONFIG_IWL4965_HT_AGG
437/** 436/**
438 * struct iwl4965_ht_agg -- aggregation status while waiting for block-ack 437 * struct iwl4965_ht_agg -- aggregation status while waiting for block-ack
439 * @txq_id: Tx queue used for Tx attempt 438 * @txq_id: Tx queue used for Tx attempt
@@ -453,19 +452,22 @@ struct iwl4965_ht_agg {
453 u16 frame_count; 452 u16 frame_count;
454 u16 wait_for_ba; 453 u16 wait_for_ba;
455 u16 start_idx; 454 u16 start_idx;
456 u32 bitmap0; 455 u64 bitmap;
457 u32 bitmap1;
458 u32 rate_n_flags; 456 u32 rate_n_flags;
457#define IWL_AGG_OFF 0
458#define IWL_AGG_ON 1
459#define IWL_EMPTYING_HW_QUEUE_ADDBA 2
460#define IWL_EMPTYING_HW_QUEUE_DELBA 3
461 u8 state;
459}; 462};
460#endif /* CONFIG_IWL4965_HT_AGG */ 463
461#endif /* CONFIG_IWL4965_HT */ 464#endif /* CONFIG_IWL4965_HT */
462 465
463struct iwl4965_tid_data { 466struct iwl4965_tid_data {
464 u16 seq_number; 467 u16 seq_number;
468 u16 tfds_in_queue;
465#ifdef CONFIG_IWL4965_HT 469#ifdef CONFIG_IWL4965_HT
466#ifdef CONFIG_IWL4965_HT_AGG
467 struct iwl4965_ht_agg agg; 470 struct iwl4965_ht_agg agg;
468#endif /* CONFIG_IWL4965_HT_AGG */
469#endif /* CONFIG_IWL4965_HT */ 471#endif /* CONFIG_IWL4965_HT */
470}; 472};
471 473
@@ -743,7 +745,7 @@ extern u8 iwl4965_hw_find_station(struct iwl4965_priv *priv, const u8 *bssid);
743 745
744extern int iwl4965_hw_channel_switch(struct iwl4965_priv *priv, u16 channel); 746extern int iwl4965_hw_channel_switch(struct iwl4965_priv *priv, u16 channel);
745extern int iwl4965_tx_queue_reclaim(struct iwl4965_priv *priv, int txq_id, int index); 747extern int iwl4965_tx_queue_reclaim(struct iwl4965_priv *priv, int txq_id, int index);
746 748extern int iwl4965_queue_space(const struct iwl4965_queue *q);
747struct iwl4965_priv; 749struct iwl4965_priv;
748 750
749/* 751/*
@@ -778,6 +780,8 @@ extern void iwl4965_set_ht_add_station(struct iwl4965_priv *priv, u8 index,
778extern int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw, 780extern int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw,
779 enum ieee80211_ampdu_mlme_action action, 781 enum ieee80211_ampdu_mlme_action action,
780 const u8 *addr, u16 tid, u16 *ssn); 782 const u8 *addr, u16 tid, u16 *ssn);
783extern int iwl4965_check_empty_hw_queue(struct iwl4965_priv *priv, int sta_id,
784 u8 tid, int txq_id);
781#ifdef CONFIG_IWL4965_HT_AGG 785#ifdef CONFIG_IWL4965_HT_AGG
782extern void iwl4965_turn_off_agg(struct iwl4965_priv *priv, u8 tid); 786extern void iwl4965_turn_off_agg(struct iwl4965_priv *priv, u8 tid);
783extern void iwl4965_tl_get_stats(struct iwl4965_priv *priv, 787extern void iwl4965_tl_get_stats(struct iwl4965_priv *priv,
@@ -855,7 +859,7 @@ struct iwl4965_agg_control {
855 u32 ba_timeout; 859 u32 ba_timeout;
856 struct iwl4965_traffic_load traffic_load[TID_MAX_LOAD_COUNT]; 860 struct iwl4965_traffic_load traffic_load[TID_MAX_LOAD_COUNT];
857}; 861};
858#endif /*CONFIG_IWL4965_HT_AGG */ 862#endif /*CONFIG_IWL4965_HT_AGG */
859 863
860struct iwl4965_lq_mngr { 864struct iwl4965_lq_mngr {
861#ifdef CONFIG_IWL4965_HT_AGG 865#ifdef CONFIG_IWL4965_HT_AGG
diff --git a/drivers/net/wireless/iwlwifi/iwl4965-base.c b/drivers/net/wireless/iwlwifi/iwl4965-base.c
index 3f5114f5fe16..16cb990e06fa 100644
--- a/drivers/net/wireless/iwlwifi/iwl4965-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl4965-base.c
@@ -205,7 +205,7 @@ static void iwl4965_print_hex_dump(int level, void *p, u32 len)
205 * See more detailed info in iwl-4965-hw.h. 205 * See more detailed info in iwl-4965-hw.h.
206 ***************************************************/ 206 ***************************************************/
207 207
208static int iwl4965_queue_space(const struct iwl4965_queue *q) 208int iwl4965_queue_space(const struct iwl4965_queue *q)
209{ 209{
210 int s = q->read_ptr - q->write_ptr; 210 int s = q->read_ptr - q->write_ptr;
211 211
@@ -2972,11 +2972,10 @@ static int iwl4965_tx_skb(struct iwl4965_priv *priv,
2972 __constant_cpu_to_le16(IEEE80211_SCTL_FRAG)); 2972 __constant_cpu_to_le16(IEEE80211_SCTL_FRAG));
2973 seq_number += 0x10; 2973 seq_number += 0x10;
2974#ifdef CONFIG_IWL4965_HT 2974#ifdef CONFIG_IWL4965_HT
2975#ifdef CONFIG_IWL4965_HT_AGG
2976 /* aggregation is on for this <sta,tid> */ 2975 /* aggregation is on for this <sta,tid> */
2977 if (ctl->flags & IEEE80211_TXCTL_HT_MPDU_AGG) 2976 if (ctl->flags & IEEE80211_TXCTL_AMPDU)
2978 txq_id = priv->stations[sta_id].tid[tid].agg.txq_id; 2977 txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
2979#endif /* CONFIG_IWL4965_HT_AGG */ 2978 priv->stations[sta_id].tid[tid].tfds_in_queue++;
2980#endif /* CONFIG_IWL4965_HT */ 2979#endif /* CONFIG_IWL4965_HT */
2981 } 2980 }
2982 2981
@@ -3528,10 +3527,10 @@ int iwl4965_tx_queue_reclaim(struct iwl4965_priv *priv, int txq_id, int index)
3528 nfreed++; 3527 nfreed++;
3529 } 3528 }
3530 3529
3531 if (iwl4965_queue_space(q) > q->low_mark && (txq_id >= 0) && 3530/* if (iwl4965_queue_space(q) > q->low_mark && (txq_id >= 0) &&
3532 (txq_id != IWL_CMD_QUEUE_NUM) && 3531 (txq_id != IWL_CMD_QUEUE_NUM) &&
3533 priv->mac80211_registered) 3532 priv->mac80211_registered)
3534 ieee80211_wake_queue(priv->hw, txq_id); 3533 ieee80211_wake_queue(priv->hw, txq_id); */
3535 3534
3536 3535
3537 return nfreed; 3536 return nfreed;
@@ -3550,7 +3549,6 @@ static int iwl4965_is_tx_success(u32 status)
3550 * 3549 *
3551 ******************************************************************************/ 3550 ******************************************************************************/
3552#ifdef CONFIG_IWL4965_HT 3551#ifdef CONFIG_IWL4965_HT
3553#ifdef CONFIG_IWL4965_HT_AGG
3554 3552
3555static inline int iwl4965_get_ra_sta_id(struct iwl4965_priv *priv, 3553static inline int iwl4965_get_ra_sta_id(struct iwl4965_priv *priv,
3556 struct ieee80211_hdr *hdr) 3554 struct ieee80211_hdr *hdr)
@@ -3585,11 +3583,11 @@ static inline u32 iwl4965_get_scd_ssn(struct iwl4965_tx_resp *tx_resp)
3585 */ 3583 */
3586static int iwl4965_tx_status_reply_tx(struct iwl4965_priv *priv, 3584static int iwl4965_tx_status_reply_tx(struct iwl4965_priv *priv,
3587 struct iwl4965_ht_agg *agg, 3585 struct iwl4965_ht_agg *agg,
3588 struct iwl4965_tx_resp *tx_resp, 3586 struct iwl4965_tx_resp_agg *tx_resp,
3589 u16 start_idx) 3587 u16 start_idx)
3590{ 3588{
3591 u32 status; 3589 u16 status;
3592 __le32 *frame_status = &tx_resp->status; 3590 struct agg_tx_status *frame_status = &tx_resp->status;
3593 struct ieee80211_tx_status *tx_status = NULL; 3591 struct ieee80211_tx_status *tx_status = NULL;
3594 struct ieee80211_hdr *hdr = NULL; 3592 struct ieee80211_hdr *hdr = NULL;
3595 int i, sh; 3593 int i, sh;
@@ -3602,26 +3600,25 @@ static int iwl4965_tx_status_reply_tx(struct iwl4965_priv *priv,
3602 agg->frame_count = tx_resp->frame_count; 3600 agg->frame_count = tx_resp->frame_count;
3603 agg->start_idx = start_idx; 3601 agg->start_idx = start_idx;
3604 agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags); 3602 agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
3605 agg->bitmap0 = agg->bitmap1 = 0; 3603 agg->bitmap = 0;
3606 3604
3607 /* # frames attempted by Tx command */ 3605 /* # frames attempted by Tx command */
3608 if (agg->frame_count == 1) { 3606 if (agg->frame_count == 1) {
3609 /* Only one frame was attempted; no block-ack will arrive */ 3607 /* Only one frame was attempted; no block-ack will arrive */
3610 struct iwl4965_tx_queue *txq ; 3608 status = le16_to_cpu(frame_status[0].status);
3611 status = le32_to_cpu(frame_status[0]); 3609 seq = le16_to_cpu(frame_status[0].sequence);
3610 idx = SEQ_TO_INDEX(seq);
3611 txq_id = SEQ_TO_QUEUE(seq);
3612 3612
3613 txq_id = agg->txq_id;
3614 txq = &priv->txq[txq_id];
3615 /* FIXME: code repetition */ 3613 /* FIXME: code repetition */
3616 IWL_DEBUG_TX_REPLY("FrameCnt = %d, StartIdx=%d \n", 3614 IWL_DEBUG_TX_REPLY("FrameCnt = %d, StartIdx=%d idx=%d\n",
3617 agg->frame_count, agg->start_idx); 3615 agg->frame_count, agg->start_idx, idx);
3618 3616
3619 tx_status = &(priv->txq[txq_id].txb[txq->q.read_ptr].status); 3617 tx_status = &(priv->txq[txq_id].txb[idx].status);
3620 tx_status->retry_count = tx_resp->failure_frame; 3618 tx_status->retry_count = tx_resp->failure_frame;
3621 tx_status->queue_number = status & 0xff; 3619 tx_status->queue_number = status & 0xff;
3622 tx_status->queue_length = tx_resp->bt_kill_count; 3620 tx_status->queue_length = tx_resp->failure_rts;
3623 tx_status->queue_length |= tx_resp->failure_rts; 3621 tx_status->control.flags &= ~IEEE80211_TXCTL_AMPDU;
3624
3625 tx_status->flags = iwl4965_is_tx_success(status)? 3622 tx_status->flags = iwl4965_is_tx_success(status)?
3626 IEEE80211_TX_STATUS_ACK : 0; 3623 IEEE80211_TX_STATUS_ACK : 0;
3627 tx_status->control.tx_rate = 3624 tx_status->control.tx_rate =
@@ -3642,8 +3639,8 @@ static int iwl4965_tx_status_reply_tx(struct iwl4965_priv *priv,
3642 /* Construct bit-map of pending frames within Tx window */ 3639 /* Construct bit-map of pending frames within Tx window */
3643 for (i = 0; i < agg->frame_count; i++) { 3640 for (i = 0; i < agg->frame_count; i++) {
3644 u16 sc; 3641 u16 sc;
3645 status = le32_to_cpu(frame_status[i]); 3642 status = le16_to_cpu(frame_status[i].status);
3646 seq = status >> 16; 3643 seq = le16_to_cpu(frame_status[i].sequence);
3647 idx = SEQ_TO_INDEX(seq); 3644 idx = SEQ_TO_INDEX(seq);
3648 txq_id = SEQ_TO_QUEUE(seq); 3645 txq_id = SEQ_TO_QUEUE(seq);
3649 3646
@@ -3687,13 +3684,12 @@ static int iwl4965_tx_status_reply_tx(struct iwl4965_priv *priv,
3687 start, (u32)(bitmap & 0xFFFFFFFF)); 3684 start, (u32)(bitmap & 0xFFFFFFFF));
3688 } 3685 }
3689 3686
3690 agg->bitmap0 = bitmap & 0xFFFFFFFF; 3687 agg->bitmap = bitmap;
3691 agg->bitmap1 = bitmap >> 32;
3692 agg->start_idx = start; 3688 agg->start_idx = start;
3693 agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags); 3689 agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
3694 IWL_DEBUG_TX_REPLY("Frames %d start_idx=%d bitmap=0x%x\n", 3690 IWL_DEBUG_TX_REPLY("Frames %d start_idx=%d bitmap=0x%llx\n",
3695 agg->frame_count, agg->start_idx, 3691 agg->frame_count, agg->start_idx,
3696 agg->bitmap0); 3692 agg->bitmap);
3697 3693
3698 if (bitmap) 3694 if (bitmap)
3699 agg->wait_for_ba = 1; 3695 agg->wait_for_ba = 1;
@@ -3701,7 +3697,6 @@ static int iwl4965_tx_status_reply_tx(struct iwl4965_priv *priv,
3701 return 0; 3697 return 0;
3702} 3698}
3703#endif 3699#endif
3704#endif
3705 3700
3706/** 3701/**
3707 * iwl4965_rx_reply_tx - Handle standard (non-aggregation) Tx response 3702 * iwl4965_rx_reply_tx - Handle standard (non-aggregation) Tx response
@@ -3718,9 +3713,9 @@ static void iwl4965_rx_reply_tx(struct iwl4965_priv *priv,
3718 struct iwl4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0]; 3713 struct iwl4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
3719 u32 status = le32_to_cpu(tx_resp->status); 3714 u32 status = le32_to_cpu(tx_resp->status);
3720#ifdef CONFIG_IWL4965_HT 3715#ifdef CONFIG_IWL4965_HT
3721#ifdef CONFIG_IWL4965_HT_AGG 3716 int tid = MAX_TID_COUNT, sta_id = IWL_INVALID_STATION;
3722 int tid, sta_id; 3717 struct ieee80211_hdr *hdr;
3723#endif 3718 __le16 *qc;
3724#endif 3719#endif
3725 3720
3726 if ((index >= txq->q.n_bd) || (x2_queue_used(&txq->q, index) == 0)) { 3721 if ((index >= txq->q.n_bd) || (x2_queue_used(&txq->q, index) == 0)) {
@@ -3732,44 +3727,51 @@ static void iwl4965_rx_reply_tx(struct iwl4965_priv *priv,
3732 } 3727 }
3733 3728
3734#ifdef CONFIG_IWL4965_HT 3729#ifdef CONFIG_IWL4965_HT
3735#ifdef CONFIG_IWL4965_HT_AGG 3730 hdr = iwl4965_tx_queue_get_hdr(priv, txq_id, index);
3731 qc = ieee80211_get_qos_ctrl(hdr);
3732
3733 if (qc)
3734 tid = le16_to_cpu(*qc) & 0xf;
3735
3736 sta_id = iwl4965_get_ra_sta_id(priv, hdr);
3737 if (txq->sched_retry && unlikely(sta_id == IWL_INVALID_STATION)) {
3738 IWL_ERROR("Station not known\n");
3739 return;
3740 }
3741
3736 if (txq->sched_retry) { 3742 if (txq->sched_retry) {
3737 const u32 scd_ssn = iwl4965_get_scd_ssn(tx_resp); 3743 const u32 scd_ssn = iwl4965_get_scd_ssn(tx_resp);
3738 struct ieee80211_hdr *hdr =
3739 iwl4965_tx_queue_get_hdr(priv, txq_id, index);
3740 struct iwl4965_ht_agg *agg = NULL; 3744 struct iwl4965_ht_agg *agg = NULL;
3741 __le16 *qc = ieee80211_get_qos_ctrl(hdr);
3742 3745
3743 if (qc == NULL) { 3746 if (!qc)
3744 IWL_ERROR("BUG_ON qc is null!!!!\n");
3745 return; 3747 return;
3746 }
3747
3748 tid = le16_to_cpu(*qc) & 0xf;
3749
3750 sta_id = iwl4965_get_ra_sta_id(priv, hdr);
3751 if (unlikely(sta_id == IWL_INVALID_STATION)) {
3752 IWL_ERROR("Station not known for\n");
3753 return;
3754 }
3755 3748
3756 agg = &priv->stations[sta_id].tid[tid].agg; 3749 agg = &priv->stations[sta_id].tid[tid].agg;
3757 3750
3758 iwl4965_tx_status_reply_tx(priv, agg, tx_resp, index); 3751 iwl4965_tx_status_reply_tx(priv, agg,
3752 (struct iwl4965_tx_resp_agg *)tx_resp, index);
3759 3753
3760 if ((tx_resp->frame_count == 1) && 3754 if ((tx_resp->frame_count == 1) &&
3761 !iwl4965_is_tx_success(status)) { 3755 !iwl4965_is_tx_success(status)) {
3762 /* TODO: send BAR */ 3756 /* TODO: send BAR */
3763 } 3757 }
3764 3758
3765 if ((txq->q.read_ptr != (scd_ssn & 0xff))) { 3759 if (txq->q.read_ptr != (scd_ssn & 0xff)) {
3760 int freed;
3766 index = iwl4965_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd); 3761 index = iwl4965_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
3767 IWL_DEBUG_TX_REPLY("Retry scheduler reclaim scd_ssn " 3762 IWL_DEBUG_TX_REPLY("Retry scheduler reclaim scd_ssn "
3768 "%d index %d\n", scd_ssn , index); 3763 "%d index %d\n", scd_ssn , index);
3769 iwl4965_tx_queue_reclaim(priv, txq_id, index); 3764 freed = iwl4965_tx_queue_reclaim(priv, txq_id, index);
3765 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
3766
3767 if (iwl4965_queue_space(&txq->q) > txq->q.low_mark &&
3768 txq_id >= 0 && priv->mac80211_registered &&
3769 agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)
3770 ieee80211_wake_queue(priv->hw, txq_id);
3771
3772 iwl4965_check_empty_hw_queue(priv, sta_id, tid, txq_id);
3770 } 3773 }
3771 } else { 3774 } else {
3772#endif /* CONFIG_IWL4965_HT_AGG */
3773#endif /* CONFIG_IWL4965_HT */ 3775#endif /* CONFIG_IWL4965_HT */
3774 tx_status = &(txq->txb[txq->q.read_ptr].status); 3776 tx_status = &(txq->txb[txq->q.read_ptr].status);
3775 3777
@@ -3790,12 +3792,21 @@ static void iwl4965_rx_reply_tx(struct iwl4965_priv *priv,
3790 tx_resp->failure_frame); 3792 tx_resp->failure_frame);
3791 3793
3792 IWL_DEBUG_TX_REPLY("Tx queue reclaim %d\n", index); 3794 IWL_DEBUG_TX_REPLY("Tx queue reclaim %d\n", index);
3793 if (index != -1) 3795 if (index != -1) {
3794 iwl4965_tx_queue_reclaim(priv, txq_id, index); 3796 int freed = iwl4965_tx_queue_reclaim(priv, txq_id, index);
3797#ifdef CONFIG_IWL4965_HT
3798 if (tid != MAX_TID_COUNT)
3799 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
3800 if (iwl4965_queue_space(&txq->q) > txq->q.low_mark &&
3801 (txq_id >= 0) &&
3802 priv->mac80211_registered)
3803 ieee80211_wake_queue(priv->hw, txq_id);
3804 if (tid != MAX_TID_COUNT)
3805 iwl4965_check_empty_hw_queue(priv, sta_id, tid, txq_id);
3806#endif
3807 }
3795#ifdef CONFIG_IWL4965_HT 3808#ifdef CONFIG_IWL4965_HT
3796#ifdef CONFIG_IWL4965_HT_AGG
3797 } 3809 }
3798#endif /* CONFIG_IWL4965_HT_AGG */
3799#endif /* CONFIG_IWL4965_HT */ 3810#endif /* CONFIG_IWL4965_HT */
3800 3811
3801 if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK)) 3812 if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK))
@@ -9089,10 +9100,8 @@ static int iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
9089 /* Default value; 4 EDCA QOS priorities */ 9100 /* Default value; 4 EDCA QOS priorities */
9090 hw->queues = 4; 9101 hw->queues = 4;
9091#ifdef CONFIG_IWL4965_HT 9102#ifdef CONFIG_IWL4965_HT
9092#ifdef CONFIG_IWL4965_HT_AGG
9093 /* Enhanced value; more queues, to support 11n aggregation */ 9103 /* Enhanced value; more queues, to support 11n aggregation */
9094 hw->queues = 16; 9104 hw->queues = 16;
9095#endif /* CONFIG_IWL4965_HT_AGG */
9096#endif /* CONFIG_IWL4965_HT */ 9105#endif /* CONFIG_IWL4965_HT */
9097 9106
9098 spin_lock_init(&priv->lock); 9107 spin_lock_init(&priv->lock);