aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/iwlwifi/iwl-4965.c
diff options
context:
space:
mode:
authorRon Rindjunsky <ron.rindjunsky@intel.com>2008-01-28 07:07:24 -0500
committerJohn W. Linville <linville@tuxdriver.com>2008-02-29 15:19:18 -0500
commitfe01b477bbd23e69c3bdc6bce5be510ddad8297d (patch)
tree353471f96adb7580b7940c3b7cf6df4f8f0044a2 /drivers/net/wireless/iwlwifi/iwl-4965.c
parent8114fcf185c58b23dc9fcaf4944b59b4c1407b39 (diff)
iwlwifi: A-MPDU Tx conform flows to mac80211
This patch alters the current iwlwifi behavior to fit the flows introduced by the mac80211, mainly queues handling and start/stop call backs flows Signed-off-by: Ron Rindjunsky <ron.rindjunsky@intel.com> Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers/net/wireless/iwlwifi/iwl-4965.c')
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c358
1 files changed, 204 insertions, 154 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index 3fc18dc311e9..b315a099d1a0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -79,6 +79,30 @@ const struct iwl4965_rate_info iwl4965_rates[IWL_RATE_COUNT] = {
79 IWL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */ 79 IWL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */
80}; 80};
81 81
82#ifdef CONFIG_IWL4965_HT
83
84static const u16 default_tid_to_tx_fifo[] = {
85 IWL_TX_FIFO_AC1,
86 IWL_TX_FIFO_AC0,
87 IWL_TX_FIFO_AC0,
88 IWL_TX_FIFO_AC1,
89 IWL_TX_FIFO_AC2,
90 IWL_TX_FIFO_AC2,
91 IWL_TX_FIFO_AC3,
92 IWL_TX_FIFO_AC3,
93 IWL_TX_FIFO_NONE,
94 IWL_TX_FIFO_NONE,
95 IWL_TX_FIFO_NONE,
96 IWL_TX_FIFO_NONE,
97 IWL_TX_FIFO_NONE,
98 IWL_TX_FIFO_NONE,
99 IWL_TX_FIFO_NONE,
100 IWL_TX_FIFO_NONE,
101 IWL_TX_FIFO_AC3
102};
103
104#endif /*CONFIG_IWL4965_HT */
105
82static int is_fat_channel(__le32 rxon_flags) 106static int is_fat_channel(__le32 rxon_flags)
83{ 107{
84 return (rxon_flags & RXON_FLG_CHANNEL_MODE_PURE_40_MSK) || 108 return (rxon_flags & RXON_FLG_CHANNEL_MODE_PURE_40_MSK) ||
@@ -4185,6 +4209,7 @@ static void iwl4965_set_tx_status(struct iwl4965_priv *priv, int txq_id, int idx
4185 tx_status->control.tx_rate = rate; 4209 tx_status->control.tx_rate = rate;
4186} 4210}
4187 4211
4212#endif/* CONFIG_IWL4965_HT_AGG */
4188 4213
4189/** 4214/**
4190 * iwl4965_sta_modify_enable_tid_tx - Enable Tx for this TID in station table 4215 * iwl4965_sta_modify_enable_tid_tx - Enable Tx for this TID in station table
@@ -4204,7 +4229,6 @@ static void iwl4965_sta_modify_enable_tid_tx(struct iwl4965_priv *priv,
4204 iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC); 4229 iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
4205} 4230}
4206 4231
4207
4208/** 4232/**
4209 * iwl4965_tx_status_reply_compressed_ba - Update tx status from block-ack 4233 * iwl4965_tx_status_reply_compressed_ba - Update tx status from block-ack
4210 * 4234 *
@@ -4218,10 +4242,11 @@ static int iwl4965_tx_status_reply_compressed_ba(struct iwl4965_priv *priv,
4218 4242
4219{ 4243{
4220 int i, sh, ack; 4244 int i, sh, ack;
4221 u16 ba_seq_ctl = le16_to_cpu(ba_resp->ba_seq_ctl); 4245 u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
4222 u32 bitmap0, bitmap1; 4246 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
4223 u32 resp_bitmap0 = le32_to_cpu(ba_resp->ba_bitmap0); 4247 u64 bitmap;
4224 u32 resp_bitmap1 = le32_to_cpu(ba_resp->ba_bitmap1); 4248 int successes = 0;
4249 struct ieee80211_tx_status *tx_status;
4225 4250
4226 if (unlikely(!agg->wait_for_ba)) { 4251 if (unlikely(!agg->wait_for_ba)) {
4227 IWL_ERROR("Received BA when not expected\n"); 4252 IWL_ERROR("Received BA when not expected\n");
@@ -4230,17 +4255,15 @@ static int iwl4965_tx_status_reply_compressed_ba(struct iwl4965_priv *priv,
4230 4255
4231 /* Mark that the expected block-ack response arrived */ 4256 /* Mark that the expected block-ack response arrived */
4232 agg->wait_for_ba = 0; 4257 agg->wait_for_ba = 0;
4233 IWL_DEBUG_TX_REPLY("BA %d %d\n", agg->start_idx, ba_resp->ba_seq_ctl); 4258 IWL_DEBUG_TX_REPLY("BA %d %d\n", agg->start_idx, ba_resp->seq_ctl);
4234 4259
4235 /* Calculate shift to align block-ack bits with our Tx window bits */ 4260 /* Calculate shift to align block-ack bits with our Tx window bits */
4236 sh = agg->start_idx - SEQ_TO_INDEX(ba_seq_ctl >> 4); 4261 sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl>>4);
4237 if (sh < 0) /* tbw something is wrong with indices */ 4262 if (sh < 0) /* tbw something is wrong with indices */
4238 sh += 0x100; 4263 sh += 0x100;
4239 4264
4240 /* don't use 64-bit values for now */ 4265 /* don't use 64-bit values for now */
4241 bitmap0 = resp_bitmap0 >> sh; 4266 bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
4242 bitmap1 = resp_bitmap1 >> sh;
4243 bitmap0 |= (resp_bitmap1 & ((1 << sh) | ((1 << sh) - 1))) << (32 - sh);
4244 4267
4245 if (agg->frame_count > (64 - sh)) { 4268 if (agg->frame_count > (64 - sh)) {
4246 IWL_DEBUG_TX_REPLY("more frames than bitmap size"); 4269 IWL_DEBUG_TX_REPLY("more frames than bitmap size");
@@ -4249,23 +4272,106 @@ static int iwl4965_tx_status_reply_compressed_ba(struct iwl4965_priv *priv,
4249 4272
4250 /* check for success or failure according to the 4273 /* check for success or failure according to the
4251 * transmitted bitmap and block-ack bitmap */ 4274 * transmitted bitmap and block-ack bitmap */
4252 bitmap0 &= agg->bitmap0; 4275 bitmap &= agg->bitmap;
4253 bitmap1 &= agg->bitmap1;
4254 4276
4255 /* For each frame attempted in aggregation, 4277 /* For each frame attempted in aggregation,
4256 * update driver's record of tx frame's status. */ 4278 * update driver's record of tx frame's status. */
4257 for (i = 0; i < agg->frame_count ; i++) { 4279 for (i = 0; i < agg->frame_count ; i++) {
4258 int idx = (agg->start_idx + i) & 0xff; 4280 ack = bitmap & (1 << i);
4259 ack = bitmap0 & (1 << i); 4281 successes += !!ack;
4260 IWL_DEBUG_TX_REPLY("%s ON i=%d idx=%d raw=%d\n", 4282 IWL_DEBUG_TX_REPLY("%s ON i=%d idx=%d raw=%d\n",
4261 ack? "ACK":"NACK", i, idx, agg->start_idx + i); 4283 ack? "ACK":"NACK", i, (agg->start_idx + i) & 0xff,
4262 iwl4965_set_tx_status(priv, agg->txq_id, idx, ack, 0, 4284 agg->start_idx + i);
4263 agg->rate_n_flags); 4285 }
4286
4287 tx_status = &priv->txq[scd_flow].txb[agg->start_idx].status;
4288 tx_status->flags = IEEE80211_TX_STATUS_ACK;
4289 tx_status->retry_count++;
4290#ifdef CONFIG_IWL4965_HT_AGG
4291 tx_status->flags |= IEEE80211_TX_STATUS_AGG_STATS;
4292 tx_status->successes = successes;
4293 tx_status->frame_count = agg->frame_count;
4294#endif /* CONFIG_IWL4965_HT_AGG */
4295 tx_status->control.tx_rate = agg->rate_n_flags;
4296
4297 IWL_DEBUG_TX_REPLY("Bitmap %llx\n", bitmap);
4298
4299 return 0;
4300}
4301
4302/**
4303 * iwl4965_tx_queue_stop_scheduler - Stop queue, but keep configuration
4304 */
4305static void iwl4965_tx_queue_stop_scheduler(struct iwl4965_priv *priv,
4306 u16 txq_id)
4307{
4308 /* Simply stop the queue, but don't change any configuration;
4309 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
4310 iwl4965_write_prph(priv,
4311 KDR_SCD_QUEUE_STATUS_BITS(txq_id),
4312 (0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)|
4313 (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
4314}
4264 4315
4316/**
4317 * txq_id must be greater than IWL_BACK_QUEUE_FIRST_ID
4318 */
4319static int iwl4965_tx_queue_agg_disable(struct iwl4965_priv *priv, u16 txq_id,
4320 u16 ssn_idx, u8 tx_fifo)
4321{
4322 if (IWL_BACK_QUEUE_FIRST_ID > txq_id) {
4323 IWL_WARNING("queue number too small: %d, must be > %d\n",
4324 txq_id, IWL_BACK_QUEUE_FIRST_ID);
4325 return -EINVAL;
4265 } 4326 }
4266 4327
4267 IWL_DEBUG_TX_REPLY("Bitmap %x%x\n", bitmap0, bitmap1); 4328 iwl4965_tx_queue_stop_scheduler(priv, txq_id);
4329
4330 iwl4965_clear_bits_prph(priv, KDR_SCD_QUEUECHAIN_SEL, (1 << txq_id));
4331
4332 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
4333 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
4334 /* supposes that ssn_idx is valid (!= 0xFFF) */
4335 iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
4336
4337 iwl4965_clear_bits_prph(priv, KDR_SCD_INTERRUPT_MASK, (1 << txq_id));
4338 iwl4965_txq_ctx_deactivate(priv, txq_id);
4339 iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
4340
4341 return 0;
4342}
4268 4343
4344int iwl4965_check_empty_hw_queue(struct iwl4965_priv *priv, int sta_id,
4345 u8 tid, int txq_id)
4346{
4347 struct iwl4965_queue *q = &priv->txq[txq_id].q;
4348 u8 *addr = priv->stations[sta_id].sta.sta.addr;
4349 struct iwl4965_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
4350
4351 switch (priv->stations[sta_id].tid[tid].agg.state) {
4352 case IWL_EMPTYING_HW_QUEUE_DELBA:
4353 /* We are reclaiming the last packet of the */
4354 /* aggregated HW queue */
4355 if (txq_id == tid_data->agg.txq_id &&
4356 q->read_ptr == q->write_ptr) {
4357 u16 ssn = SEQ_TO_SN(tid_data->seq_number);
4358 int tx_fifo = default_tid_to_tx_fifo[tid];
4359 IWL_DEBUG_HT("HW queue empty: continue DELBA flow\n");
4360 iwl4965_tx_queue_agg_disable(priv, txq_id,
4361 ssn, tx_fifo);
4362 tid_data->agg.state = IWL_AGG_OFF;
4363 ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, addr, tid);
4364 }
4365 break;
4366 case IWL_EMPTYING_HW_QUEUE_ADDBA:
4367 /* We are reclaiming the last packet of the queue */
4368 if (tid_data->tfds_in_queue == 0) {
4369 IWL_DEBUG_HT("HW queue empty: continue ADDBA flow\n");
4370 tid_data->agg.state = IWL_AGG_ON;
4371 ieee80211_start_tx_ba_cb_irqsafe(priv->hw, addr, tid);
4372 }
4373 break;
4374 }
4269 return 0; 4375 return 0;
4270} 4376}
4271 4377
@@ -4293,48 +4399,43 @@ static void iwl4965_rx_reply_compressed_ba(struct iwl4965_priv *priv,
4293 int index; 4399 int index;
4294 struct iwl4965_tx_queue *txq = NULL; 4400 struct iwl4965_tx_queue *txq = NULL;
4295 struct iwl4965_ht_agg *agg; 4401 struct iwl4965_ht_agg *agg;
4402 DECLARE_MAC_BUF(mac);
4296 4403
4297 /* "flow" corresponds to Tx queue */ 4404 /* "flow" corresponds to Tx queue */
4298 u16 ba_resp_scd_flow = le16_to_cpu(ba_resp->scd_flow); 4405 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
4299 4406
4300 /* "ssn" is start of block-ack Tx window, corresponds to index 4407 /* "ssn" is start of block-ack Tx window, corresponds to index
4301 * (in Tx queue's circular buffer) of first TFD/frame in window */ 4408 * (in Tx queue's circular buffer) of first TFD/frame in window */
4302 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn); 4409 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
4303 4410
4304 if (ba_resp_scd_flow >= ARRAY_SIZE(priv->txq)) { 4411 if (scd_flow >= ARRAY_SIZE(priv->txq)) {
4305 IWL_ERROR("BUG_ON scd_flow is bigger than number of queues"); 4412 IWL_ERROR("BUG_ON scd_flow is bigger than number of queues");
4306 return; 4413 return;
4307 } 4414 }
4308 4415
4309 txq = &priv->txq[ba_resp_scd_flow]; 4416 txq = &priv->txq[scd_flow];
4310 agg = &priv->stations[ba_resp->sta_id].tid[ba_resp->tid].agg; 4417 agg = &priv->stations[ba_resp->sta_id].tid[ba_resp->tid].agg;
4311 4418
4312 /* Find index just before block-ack window */ 4419 /* Find index just before block-ack window */
4313 index = iwl4965_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd); 4420 index = iwl4965_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
4314 4421
4315 /* TODO: Need to get this copy more safely - now good for debug */ 4422 /* TODO: Need to get this copy more safely - now good for debug */
4316/* 4423
4317 {
4318 DECLARE_MAC_BUF(mac);
4319 IWL_DEBUG_TX_REPLY("REPLY_COMPRESSED_BA [%d]Received from %s, " 4424 IWL_DEBUG_TX_REPLY("REPLY_COMPRESSED_BA [%d]Received from %s, "
4320 "sta_id = %d\n", 4425 "sta_id = %d\n",
4321 agg->wait_for_ba, 4426 agg->wait_for_ba,
4322 print_mac(mac, (u8*) &ba_resp->sta_addr_lo32), 4427 print_mac(mac, (u8*) &ba_resp->sta_addr_lo32),
4323 ba_resp->sta_id); 4428 ba_resp->sta_id);
4324 IWL_DEBUG_TX_REPLY("TID = %d, SeqCtl = %d, bitmap = 0x%X%X, scd_flow = " 4429 IWL_DEBUG_TX_REPLY("TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = "
4325 "%d, scd_ssn = %d\n", 4430 "%d, scd_ssn = %d\n",
4326 ba_resp->tid, 4431 ba_resp->tid,
4327 ba_resp->ba_seq_ctl, 4432 ba_resp->seq_ctl,
4328 ba_resp->ba_bitmap1, 4433 ba_resp->bitmap,
4329 ba_resp->ba_bitmap0,
4330 ba_resp->scd_flow, 4434 ba_resp->scd_flow,
4331 ba_resp->scd_ssn); 4435 ba_resp->scd_ssn);
4332 IWL_DEBUG_TX_REPLY("DAT start_idx = %d, bitmap = 0x%X%X \n", 4436 IWL_DEBUG_TX_REPLY("DAT start_idx = %d, bitmap = 0x%llx \n",
4333 agg->start_idx, 4437 agg->start_idx,
4334 agg->bitmap1, 4438 agg->bitmap);
4335 agg->bitmap0);
4336 }
4337*/
4338 4439
4339 /* Update driver's record of ACK vs. not for each frame in window */ 4440 /* Update driver's record of ACK vs. not for each frame in window */
4340 iwl4965_tx_status_reply_compressed_ba(priv, agg, ba_resp); 4441 iwl4965_tx_status_reply_compressed_ba(priv, agg, ba_resp);
@@ -4342,23 +4443,17 @@ static void iwl4965_rx_reply_compressed_ba(struct iwl4965_priv *priv,
4342 /* Release all TFDs before the SSN, i.e. all TFDs in front of 4443 /* Release all TFDs before the SSN, i.e. all TFDs in front of
4343 * block-ack window (we assume that they've been successfully 4444 * block-ack window (we assume that they've been successfully
4344 * transmitted ... if not, it's too late anyway). */ 4445 * transmitted ... if not, it's too late anyway). */
4345 if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) 4446 if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
4346 iwl4965_tx_queue_reclaim(priv, ba_resp_scd_flow, index); 4447 int freed = iwl4965_tx_queue_reclaim(priv, scd_flow, index);
4347 4448 priv->stations[ba_resp->sta_id].
4348} 4449 tid[ba_resp->tid].tfds_in_queue -= freed;
4349 4450 if (iwl4965_queue_space(&txq->q) > txq->q.low_mark &&
4350 4451 priv->mac80211_registered &&
4351/** 4452 agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)
4352 * iwl4965_tx_queue_stop_scheduler - Stop queue, but keep configuration 4453 ieee80211_wake_queue(priv->hw, scd_flow);
4353 */ 4454 iwl4965_check_empty_hw_queue(priv, ba_resp->sta_id,
4354static void iwl4965_tx_queue_stop_scheduler(struct iwl4965_priv *priv, u16 txq_id) 4455 ba_resp->tid, scd_flow);
4355{ 4456 }
4356 /* Simply stop the queue, but don't change any configuration;
4357 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
4358 iwl4965_write_prph(priv,
4359 KDR_SCD_QUEUE_STATUS_BITS(txq_id),
4360 (0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)|
4361 (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
4362} 4457}
4363 4458
4364/** 4459/**
@@ -4388,6 +4483,7 @@ static int iwl4965_tx_queue_set_q2ratid(struct iwl4965_priv *priv, u16 ra_tid,
4388 return 0; 4483 return 0;
4389} 4484}
4390 4485
4486
4391/** 4487/**
4392 * iwl4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue 4488 * iwl4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue
4393 * 4489 *
@@ -4455,48 +4551,6 @@ static int iwl4965_tx_queue_agg_enable(struct iwl4965_priv *priv, int txq_id,
4455 return 0; 4551 return 0;
4456} 4552}
4457 4553
4458/**
4459 * txq_id must be greater than IWL_BACK_QUEUE_FIRST_ID
4460 */
4461static int iwl4965_tx_queue_agg_disable(struct iwl4965_priv *priv, u16 txq_id,
4462 u16 ssn_idx, u8 tx_fifo)
4463{
4464 unsigned long flags;
4465 int rc;
4466
4467 if (IWL_BACK_QUEUE_FIRST_ID > txq_id) {
4468 IWL_WARNING("queue number too small: %d, must be > %d\n",
4469 txq_id, IWL_BACK_QUEUE_FIRST_ID);
4470 return -EINVAL;
4471 }
4472
4473 spin_lock_irqsave(&priv->lock, flags);
4474 rc = iwl4965_grab_nic_access(priv);
4475 if (rc) {
4476 spin_unlock_irqrestore(&priv->lock, flags);
4477 return rc;
4478 }
4479
4480 iwl4965_tx_queue_stop_scheduler(priv, txq_id);
4481
4482 iwl4965_clear_bits_prph(priv, KDR_SCD_QUEUECHAIN_SEL, (1 << txq_id));
4483
4484 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
4485 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
4486 /* supposes that ssn_idx is valid (!= 0xFFF) */
4487 iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
4488
4489 iwl4965_clear_bits_prph(priv, KDR_SCD_INTERRUPT_MASK, (1 << txq_id));
4490 iwl4965_txq_ctx_deactivate(priv, txq_id);
4491 iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
4492
4493 iwl4965_release_nic_access(priv);
4494 spin_unlock_irqrestore(&priv->lock, flags);
4495
4496 return 0;
4497}
4498
4499#endif/* CONFIG_IWL4965_HT_AGG */
4500#endif /* CONFIG_IWL4965_HT */ 4554#endif /* CONFIG_IWL4965_HT */
4501 4555
4502/** 4556/**
@@ -4730,28 +4784,6 @@ static void iwl4965_sta_modify_del_ba_tid(struct iwl4965_priv *priv,
4730 iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC); 4784 iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
4731} 4785}
4732 4786
4733#ifdef CONFIG_IWL4965_HT_AGG
4734
4735static const u16 default_tid_to_tx_fifo[] = {
4736 IWL_TX_FIFO_AC1,
4737 IWL_TX_FIFO_AC0,
4738 IWL_TX_FIFO_AC0,
4739 IWL_TX_FIFO_AC1,
4740 IWL_TX_FIFO_AC2,
4741 IWL_TX_FIFO_AC2,
4742 IWL_TX_FIFO_AC3,
4743 IWL_TX_FIFO_AC3,
4744 IWL_TX_FIFO_NONE,
4745 IWL_TX_FIFO_NONE,
4746 IWL_TX_FIFO_NONE,
4747 IWL_TX_FIFO_NONE,
4748 IWL_TX_FIFO_NONE,
4749 IWL_TX_FIFO_NONE,
4750 IWL_TX_FIFO_NONE,
4751 IWL_TX_FIFO_NONE,
4752 IWL_TX_FIFO_AC3
4753};
4754
4755/* 4787/*
4756 * Find first available (lowest unused) Tx Queue, mark it "active". 4788 * Find first available (lowest unused) Tx Queue, mark it "active".
4757 * Called only when finding queue for aggregation. 4789 * Called only when finding queue for aggregation.
@@ -4768,69 +4800,78 @@ static int iwl4965_txq_ctx_activate_free(struct iwl4965_priv *priv)
4768 return -1; 4800 return -1;
4769} 4801}
4770 4802
4771int iwl4965_mac_ht_tx_agg_start(struct ieee80211_hw *hw, u8 *da, u16 tid, 4803static int iwl4965_mac_ht_tx_agg_start(struct ieee80211_hw *hw, const u8 *da,
4772 u16 *start_seq_num) 4804 u16 tid, u16 *start_seq_num)
4773{ 4805{
4774
4775 struct iwl4965_priv *priv = hw->priv; 4806 struct iwl4965_priv *priv = hw->priv;
4776 int sta_id; 4807 int sta_id;
4777 int tx_fifo; 4808 int tx_fifo;
4778 int txq_id; 4809 int txq_id;
4779 int ssn = -1; 4810 int ssn = -1;
4811 int rc = 0;
4780 unsigned long flags; 4812 unsigned long flags;
4781 struct iwl4965_tid_data *tid_data; 4813 struct iwl4965_tid_data *tid_data;
4782 DECLARE_MAC_BUF(mac); 4814 DECLARE_MAC_BUF(mac);
4783 4815
4784 /* Determine Tx DMA/FIFO channel for this Traffic ID */
4785 if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo))) 4816 if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo)))
4786 tx_fifo = default_tid_to_tx_fifo[tid]; 4817 tx_fifo = default_tid_to_tx_fifo[tid];
4787 else 4818 else
4788 return -EINVAL; 4819 return -EINVAL;
4789 4820
4790 IWL_WARNING("iwl-AGG iwl4965_mac_ht_tx_agg_start on da=%s" 4821 IWL_WARNING("%s on da = %s tid = %d\n",
4791 " tid=%d\n", print_mac(mac, da), tid); 4822 __func__, print_mac(mac, da), tid);
4792 4823
4793 /* Get index into station table */
4794 sta_id = iwl4965_hw_find_station(priv, da); 4824 sta_id = iwl4965_hw_find_station(priv, da);
4795 if (sta_id == IWL_INVALID_STATION) 4825 if (sta_id == IWL_INVALID_STATION)
4796 return -ENXIO; 4826 return -ENXIO;
4797 4827
4798 /* Find available Tx queue for aggregation */ 4828 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) {
4829 IWL_ERROR("Start AGG when state is not IWL_AGG_OFF !\n");
4830 return -ENXIO;
4831 }
4832
4799 txq_id = iwl4965_txq_ctx_activate_free(priv); 4833 txq_id = iwl4965_txq_ctx_activate_free(priv);
4800 if (txq_id == -1) 4834 if (txq_id == -1)
4801 return -ENXIO; 4835 return -ENXIO;
4802 4836
4803 spin_lock_irqsave(&priv->sta_lock, flags); 4837 spin_lock_irqsave(&priv->sta_lock, flags);
4804 tid_data = &priv->stations[sta_id].tid[tid]; 4838 tid_data = &priv->stations[sta_id].tid[tid];
4805
4806 /* Get starting sequence number for 1st frame in block ack window.
4807 * We'll use least signif byte as 1st frame's index into Tx queue. */
4808 ssn = SEQ_TO_SN(tid_data->seq_number); 4839 ssn = SEQ_TO_SN(tid_data->seq_number);
4809 tid_data->agg.txq_id = txq_id; 4840 tid_data->agg.txq_id = txq_id;
4810 spin_unlock_irqrestore(&priv->sta_lock, flags); 4841 spin_unlock_irqrestore(&priv->sta_lock, flags);
4811 4842
4812 *start_seq_num = ssn; 4843 *start_seq_num = ssn;
4813 4844 rc = iwl4965_tx_queue_agg_enable(priv, txq_id, tx_fifo,
4814 /* Update driver's link quality manager */
4815 iwl4965_ba_status(priv, tid, BA_STATUS_ACTIVE);
4816
4817 /* Set up and enable aggregation for selected Tx queue and FIFO */
4818 return iwl4965_tx_queue_agg_enable(priv, txq_id, tx_fifo,
4819 sta_id, tid, ssn); 4845 sta_id, tid, ssn);
4820} 4846 if (rc)
4847 return rc;
4821 4848
4849 rc = 0;
4850 if (tid_data->tfds_in_queue == 0) {
4851 printk(KERN_ERR "HW queue is empty\n");
4852 tid_data->agg.state = IWL_AGG_ON;
4853 ieee80211_start_tx_ba_cb_irqsafe(hw, da, tid);
4854 } else {
4855 IWL_DEBUG_HT("HW queue is NOT empty: %d packets in HW queue\n",
4856 tid_data->tfds_in_queue);
4857 tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
4858 }
4859 return rc;
4860}
4822 4861
4823int iwl4965_mac_ht_tx_agg_stop(struct ieee80211_hw *hw, u8 *da, u16 tid) 4862static int iwl4965_mac_ht_tx_agg_stop(struct ieee80211_hw *hw, const u8 *da,
4863 u16 tid)
4824{ 4864{
4825 4865
4826 struct iwl4965_priv *priv = hw->priv; 4866 struct iwl4965_priv *priv = hw->priv;
4827 int tx_fifo_id, txq_id, sta_id, ssn = -1; 4867 int tx_fifo_id, txq_id, sta_id, ssn = -1;
4828 struct iwl4965_tid_data *tid_data; 4868 struct iwl4965_tid_data *tid_data;
4829 int rc; 4869 int rc, write_ptr, read_ptr;
4870 unsigned long flags;
4830 DECLARE_MAC_BUF(mac); 4871 DECLARE_MAC_BUF(mac);
4831 4872
4832 if (!da) { 4873 if (!da) {
4833 IWL_ERROR("%s: da = NULL\n", __func__); 4874 IWL_ERROR("da = NULL\n");
4834 return -EINVAL; 4875 return -EINVAL;
4835 } 4876 }
4836 4877
@@ -4844,33 +4885,44 @@ int iwl4965_mac_ht_tx_agg_stop(struct ieee80211_hw *hw, u8 *da, u16 tid)
4844 if (sta_id == IWL_INVALID_STATION) 4885 if (sta_id == IWL_INVALID_STATION)
4845 return -ENXIO; 4886 return -ENXIO;
4846 4887
4888 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON)
4889 IWL_WARNING("Stopping AGG while state not IWL_AGG_ON\n");
4890
4847 tid_data = &priv->stations[sta_id].tid[tid]; 4891 tid_data = &priv->stations[sta_id].tid[tid];
4848 ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4; 4892 ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
4849 txq_id = tid_data->agg.txq_id; 4893 txq_id = tid_data->agg.txq_id;
4894 write_ptr = priv->txq[txq_id].q.write_ptr;
4895 read_ptr = priv->txq[txq_id].q.read_ptr;
4896
4897 /* The queue is not empty */
4898 if (write_ptr != read_ptr) {
4899 IWL_DEBUG_HT("Stopping a non empty AGG HW QUEUE\n");
4900 priv->stations[sta_id].tid[tid].agg.state =
4901 IWL_EMPTYING_HW_QUEUE_DELBA;
4902 return 0;
4903 }
4904
4905 IWL_DEBUG_HT("HW queue empty\n");;
4906 priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
4850 4907
4908 spin_lock_irqsave(&priv->lock, flags);
4909 rc = iwl4965_grab_nic_access(priv);
4910 if (rc) {
4911 spin_unlock_irqrestore(&priv->lock, flags);
4912 return rc;
4913 }
4851 rc = iwl4965_tx_queue_agg_disable(priv, txq_id, ssn, tx_fifo_id); 4914 rc = iwl4965_tx_queue_agg_disable(priv, txq_id, ssn, tx_fifo_id);
4852 /* FIXME: need more safe way to handle error condition */ 4915 iwl4965_release_nic_access(priv);
4916 spin_unlock_irqrestore(&priv->lock, flags);
4917
4853 if (rc) 4918 if (rc)
4854 return rc; 4919 return rc;
4855 4920
4856 iwl4965_ba_status(priv, tid, BA_STATUS_INITIATOR_DELBA); 4921 ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, da, tid);
4857 IWL_DEBUG_INFO("iwl4965_mac_ht_tx_agg_stop on da=%s tid=%d\n",
4858 print_mac(mac, da), tid);
4859 4922
4860 return 0; 4923 IWL_DEBUG_INFO("iwl4965_mac_ht_tx_agg_stop on da=%s tid=%d\n",
4861} 4924 print_mac(mac, da), tid);
4862
4863
4864#endif /* CONFIG_IWL4965_HT_AGG */
4865
4866int iwl4965_mac_ht_tx_agg_start(struct ieee80211_hw *hw, const u8 *da, u16 tid,
4867 u16 *start_seq_num)
4868{
4869 return 0;
4870}
4871 4925
4872int iwl4965_mac_ht_tx_agg_stop(struct ieee80211_hw *hw, const u8 *da, u16 tid)
4873{
4874 return 0; 4926 return 0;
4875} 4927}
4876 4928
@@ -4924,9 +4976,7 @@ void iwl4965_hw_rx_handler_setup(struct iwl4965_priv *priv)
4924 iwl4965_rx_missed_beacon_notif; 4976 iwl4965_rx_missed_beacon_notif;
4925 4977
4926#ifdef CONFIG_IWL4965_HT 4978#ifdef CONFIG_IWL4965_HT
4927#ifdef CONFIG_IWL4965_HT_AGG
4928 priv->rx_handlers[REPLY_COMPRESSED_BA] = iwl4965_rx_reply_compressed_ba; 4979 priv->rx_handlers[REPLY_COMPRESSED_BA] = iwl4965_rx_reply_compressed_ba;
4929#endif /* CONFIG_IWL4965_HT_AGG */
4930#endif /* CONFIG_IWL4965_HT */ 4980#endif /* CONFIG_IWL4965_HT */
4931} 4981}
4932 4982