diff options
author | Emmanuel Grumbach <emmanuel.grumbach@intel.com> | 2011-07-10 03:47:01 -0400 |
---|---|---|
committer | Wey-Yi Guy <wey-yi.w.guy@intel.com> | 2011-07-21 10:29:12 -0400 |
commit | 48d42c426947d8ffba0caa3cf9c58be6903302e0 (patch) | |
tree | 7ae7a2e9956329df0419c89fa7f63ad53fedcc81 /drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c | |
parent | 2e27799621f9b6dc69d9fac5e365cb867eac539c (diff) |
iwlagn: SCD configuration for AMPDU moves to transport layer
All the configurations of the HW for AMPDU are now in the transport layer.
Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
Signed-off-by: Wey-Yi Guy <wey-yi.w.guy@intel.com>
Diffstat (limited to 'drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c')
-rw-r--r-- | drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c | 224 |
1 files changed, 224 insertions, 0 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c index f3b531b34475..9cecb1076280 100644 --- a/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c +++ b/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c | |||
@@ -35,9 +35,56 @@ | |||
35 | #include "iwl-dev.h" | 35 | #include "iwl-dev.h" |
36 | #include "iwl-core.h" | 36 | #include "iwl-core.h" |
37 | #include "iwl-io.h" | 37 | #include "iwl-io.h" |
38 | #include "iwl-sta.h" | ||
38 | #include "iwl-helpers.h" | 39 | #include "iwl-helpers.h" |
39 | #include "iwl-trans-int-pcie.h" | 40 | #include "iwl-trans-int-pcie.h" |
40 | 41 | ||
42 | /* TODO:this file should _not_ include the external API header file | ||
43 | * (iwl-trans.h). This is needed as a W/A until reclaim functions will move to | ||
44 | * the transport layer */ | ||
45 | #include "iwl-trans.h" | ||
46 | |||
47 | /** | ||
48 | * iwl_trans_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array | ||
49 | */ | ||
50 | void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_priv *priv, | ||
51 | struct iwl_tx_queue *txq, | ||
52 | u16 byte_cnt) | ||
53 | { | ||
54 | struct iwlagn_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr; | ||
55 | int write_ptr = txq->q.write_ptr; | ||
56 | int txq_id = txq->q.id; | ||
57 | u8 sec_ctl = 0; | ||
58 | u8 sta_id = 0; | ||
59 | u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE; | ||
60 | __le16 bc_ent; | ||
61 | |||
62 | WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX); | ||
63 | |||
64 | sta_id = txq->cmd[txq->q.write_ptr]->cmd.tx.sta_id; | ||
65 | sec_ctl = txq->cmd[txq->q.write_ptr]->cmd.tx.sec_ctl; | ||
66 | |||
67 | switch (sec_ctl & TX_CMD_SEC_MSK) { | ||
68 | case TX_CMD_SEC_CCM: | ||
69 | len += CCMP_MIC_LEN; | ||
70 | break; | ||
71 | case TX_CMD_SEC_TKIP: | ||
72 | len += TKIP_ICV_LEN; | ||
73 | break; | ||
74 | case TX_CMD_SEC_WEP: | ||
75 | len += WEP_IV_LEN + WEP_ICV_LEN; | ||
76 | break; | ||
77 | } | ||
78 | |||
79 | bc_ent = cpu_to_le16((len & 0xFFF) | (sta_id << 12)); | ||
80 | |||
81 | scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent; | ||
82 | |||
83 | if (write_ptr < TFD_QUEUE_SIZE_BC_DUP) | ||
84 | scd_bc_tbl[txq_id]. | ||
85 | tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent; | ||
86 | } | ||
87 | |||
41 | /** | 88 | /** |
42 | * iwl_txq_update_write_ptr - Send new write index to hardware | 89 | * iwl_txq_update_write_ptr - Send new write index to hardware |
43 | */ | 90 | */ |
@@ -291,6 +338,183 @@ int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q, | |||
291 | return 0; | 338 | return 0; |
292 | } | 339 | } |
293 | 340 | ||
341 | /*TODO: this functions should NOT be exported from trans module - export it | ||
342 | * until the reclaim flow will be brought to the transport module too */ | ||
343 | void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv, | ||
344 | struct iwl_tx_queue *txq) | ||
345 | { | ||
346 | struct iwlagn_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr; | ||
347 | int txq_id = txq->q.id; | ||
348 | int read_ptr = txq->q.read_ptr; | ||
349 | u8 sta_id = 0; | ||
350 | __le16 bc_ent; | ||
351 | |||
352 | WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX); | ||
353 | |||
354 | if (txq_id != priv->cmd_queue) | ||
355 | sta_id = txq->cmd[read_ptr]->cmd.tx.sta_id; | ||
356 | |||
357 | bc_ent = cpu_to_le16(1 | (sta_id << 12)); | ||
358 | scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent; | ||
359 | |||
360 | if (read_ptr < TFD_QUEUE_SIZE_BC_DUP) | ||
361 | scd_bc_tbl[txq_id]. | ||
362 | tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent; | ||
363 | } | ||
364 | |||
365 | static int iwlagn_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid, | ||
366 | u16 txq_id) | ||
367 | { | ||
368 | u32 tbl_dw_addr; | ||
369 | u32 tbl_dw; | ||
370 | u16 scd_q2ratid; | ||
371 | |||
372 | scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK; | ||
373 | |||
374 | tbl_dw_addr = priv->scd_base_addr + | ||
375 | SCD_TRANS_TBL_OFFSET_QUEUE(txq_id); | ||
376 | |||
377 | tbl_dw = iwl_read_targ_mem(priv, tbl_dw_addr); | ||
378 | |||
379 | if (txq_id & 0x1) | ||
380 | tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF); | ||
381 | else | ||
382 | tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000); | ||
383 | |||
384 | iwl_write_targ_mem(priv, tbl_dw_addr, tbl_dw); | ||
385 | |||
386 | return 0; | ||
387 | } | ||
388 | |||
389 | static void iwlagn_tx_queue_stop_scheduler(struct iwl_priv *priv, u16 txq_id) | ||
390 | { | ||
391 | /* Simply stop the queue, but don't change any configuration; | ||
392 | * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */ | ||
393 | iwl_write_prph(priv, | ||
394 | SCD_QUEUE_STATUS_BITS(txq_id), | ||
395 | (0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)| | ||
396 | (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN)); | ||
397 | } | ||
398 | |||
399 | void iwl_trans_set_wr_ptrs(struct iwl_priv *priv, | ||
400 | int txq_id, u32 index) | ||
401 | { | ||
402 | iwl_write_direct32(priv, HBUS_TARG_WRPTR, | ||
403 | (index & 0xff) | (txq_id << 8)); | ||
404 | iwl_write_prph(priv, SCD_QUEUE_RDPTR(txq_id), index); | ||
405 | } | ||
406 | |||
407 | void iwl_trans_tx_queue_set_status(struct iwl_priv *priv, | ||
408 | struct iwl_tx_queue *txq, | ||
409 | int tx_fifo_id, int scd_retry) | ||
410 | { | ||
411 | int txq_id = txq->q.id; | ||
412 | int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0; | ||
413 | |||
414 | iwl_write_prph(priv, SCD_QUEUE_STATUS_BITS(txq_id), | ||
415 | (active << SCD_QUEUE_STTS_REG_POS_ACTIVE) | | ||
416 | (tx_fifo_id << SCD_QUEUE_STTS_REG_POS_TXF) | | ||
417 | (1 << SCD_QUEUE_STTS_REG_POS_WSL) | | ||
418 | SCD_QUEUE_STTS_REG_MSK); | ||
419 | |||
420 | txq->sched_retry = scd_retry; | ||
421 | |||
422 | IWL_DEBUG_INFO(priv, "%s %s Queue %d on FIFO %d\n", | ||
423 | active ? "Activate" : "Deactivate", | ||
424 | scd_retry ? "BA" : "AC/CMD", txq_id, tx_fifo_id); | ||
425 | } | ||
426 | |||
427 | void iwl_trans_txq_agg_setup(struct iwl_priv *priv, int sta_id, int tid, | ||
428 | int frame_limit) | ||
429 | { | ||
430 | int tx_fifo, txq_id, ssn_idx; | ||
431 | u16 ra_tid; | ||
432 | unsigned long flags; | ||
433 | struct iwl_tid_data *tid_data; | ||
434 | |||
435 | if (WARN_ON(sta_id == IWL_INVALID_STATION)) | ||
436 | return; | ||
437 | if (WARN_ON(tid >= MAX_TID_COUNT)) | ||
438 | return; | ||
439 | |||
440 | spin_lock_irqsave(&priv->sta_lock, flags); | ||
441 | tid_data = &priv->stations[sta_id].tid[tid]; | ||
442 | ssn_idx = SEQ_TO_SN(tid_data->seq_number); | ||
443 | txq_id = tid_data->agg.txq_id; | ||
444 | tx_fifo = tid_data->agg.tx_fifo; | ||
445 | spin_unlock_irqrestore(&priv->sta_lock, flags); | ||
446 | |||
447 | ra_tid = BUILD_RAxTID(sta_id, tid); | ||
448 | |||
449 | spin_lock_irqsave(&priv->lock, flags); | ||
450 | |||
451 | /* Stop this Tx queue before configuring it */ | ||
452 | iwlagn_tx_queue_stop_scheduler(priv, txq_id); | ||
453 | |||
454 | /* Map receiver-address / traffic-ID to this queue */ | ||
455 | iwlagn_tx_queue_set_q2ratid(priv, ra_tid, txq_id); | ||
456 | |||
457 | /* Set this queue as a chain-building queue */ | ||
458 | iwl_set_bits_prph(priv, SCD_QUEUECHAIN_SEL, (1<<txq_id)); | ||
459 | |||
460 | /* enable aggregations for the queue */ | ||
461 | iwl_set_bits_prph(priv, SCD_AGGR_SEL, (1<<txq_id)); | ||
462 | |||
463 | /* Place first TFD at index corresponding to start sequence number. | ||
464 | * Assumes that ssn_idx is valid (!= 0xFFF) */ | ||
465 | priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff); | ||
466 | priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff); | ||
467 | iwl_trans_set_wr_ptrs(priv, txq_id, ssn_idx); | ||
468 | |||
469 | /* Set up Tx window size and frame limit for this queue */ | ||
470 | iwl_write_targ_mem(priv, priv->scd_base_addr + | ||
471 | SCD_CONTEXT_QUEUE_OFFSET(txq_id) + | ||
472 | sizeof(u32), | ||
473 | ((frame_limit << | ||
474 | SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) & | ||
475 | SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) | | ||
476 | ((frame_limit << | ||
477 | SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & | ||
478 | SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK)); | ||
479 | |||
480 | iwl_set_bits_prph(priv, SCD_INTERRUPT_MASK, (1 << txq_id)); | ||
481 | |||
482 | /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */ | ||
483 | iwl_trans_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1); | ||
484 | |||
485 | spin_unlock_irqrestore(&priv->lock, flags); | ||
486 | } | ||
487 | |||
488 | int iwl_trans_txq_agg_disable(struct iwl_priv *priv, u16 txq_id, | ||
489 | u16 ssn_idx, u8 tx_fifo) | ||
490 | { | ||
491 | if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) || | ||
492 | (IWLAGN_FIRST_AMPDU_QUEUE + | ||
493 | priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) { | ||
494 | IWL_ERR(priv, | ||
495 | "queue number out of range: %d, must be %d to %d\n", | ||
496 | txq_id, IWLAGN_FIRST_AMPDU_QUEUE, | ||
497 | IWLAGN_FIRST_AMPDU_QUEUE + | ||
498 | priv->cfg->base_params->num_of_ampdu_queues - 1); | ||
499 | return -EINVAL; | ||
500 | } | ||
501 | |||
502 | iwlagn_tx_queue_stop_scheduler(priv, txq_id); | ||
503 | |||
504 | iwl_clear_bits_prph(priv, SCD_AGGR_SEL, (1 << txq_id)); | ||
505 | |||
506 | priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff); | ||
507 | priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff); | ||
508 | /* supposes that ssn_idx is valid (!= 0xFFF) */ | ||
509 | iwl_trans_set_wr_ptrs(priv, txq_id, ssn_idx); | ||
510 | |||
511 | iwl_clear_bits_prph(priv, SCD_INTERRUPT_MASK, (1 << txq_id)); | ||
512 | iwl_txq_ctx_deactivate(priv, txq_id); | ||
513 | iwl_trans_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0); | ||
514 | |||
515 | return 0; | ||
516 | } | ||
517 | |||
294 | /*************** HOST COMMAND QUEUE FUNCTIONS *****/ | 518 | /*************** HOST COMMAND QUEUE FUNCTIONS *****/ |
295 | 519 | ||
296 | /** | 520 | /** |