aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/iwlwifi/iwl-5000.c
diff options
context:
space:
mode:
authorWey-Yi Guy <wey-yi.w.guy@intel.com>2010-03-16 20:41:22 -0400
committerReinette Chatre <reinette.chatre@intel.com>2010-03-25 14:17:51 -0400
commitb305a08058f794c8a99c5ee87827b92d6b8c24ae (patch)
treee277fa80991207e72b7a6ae37308ead6518afb04 /drivers/net/wireless/iwlwifi/iwl-5000.c
parent7dc77dba6a8bde512996824643da5669d73cbcdc (diff)
iwlwifi: move tx queue related code to separate file
Multiple iwlagn based devices shared the same tansmit queue functions. Move tx queue related code from iwl-5000.c to iwl-agn-tx.c file. Signed-off-by: Wey-Yi Guy <wey-yi.w.guy@intel.com> Signed-off-by: Reinette Chatre <reinette.chatre@intel.com>
Diffstat (limited to 'drivers/net/wireless/iwlwifi/iwl-5000.c')
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c253
1 files changed, 12 insertions, 241 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index 8aa382e370f6..7e278956ec37 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -358,34 +358,6 @@ restart:
358 queue_work(priv->workqueue, &priv->restart); 358 queue_work(priv->workqueue, &priv->restart);
359} 359}
360 360
361static void iwl5000_set_wr_ptrs(struct iwl_priv *priv,
362 int txq_id, u32 index)
363{
364 iwl_write_direct32(priv, HBUS_TARG_WRPTR,
365 (index & 0xff) | (txq_id << 8));
366 iwl_write_prph(priv, IWL50_SCD_QUEUE_RDPTR(txq_id), index);
367}
368
369static void iwl5000_tx_queue_set_status(struct iwl_priv *priv,
370 struct iwl_tx_queue *txq,
371 int tx_fifo_id, int scd_retry)
372{
373 int txq_id = txq->q.id;
374 int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0;
375
376 iwl_write_prph(priv, IWL50_SCD_QUEUE_STATUS_BITS(txq_id),
377 (active << IWL50_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
378 (tx_fifo_id << IWL50_SCD_QUEUE_STTS_REG_POS_TXF) |
379 (1 << IWL50_SCD_QUEUE_STTS_REG_POS_WSL) |
380 IWL50_SCD_QUEUE_STTS_REG_MSK);
381
382 txq->sched_retry = scd_retry;
383
384 IWL_DEBUG_INFO(priv, "%s %s Queue %d on FIFO %d\n",
385 active ? "Activate" : "Deactivate",
386 scd_retry ? "BA" : "AC/CMD", txq_id, tx_fifo_id);
387}
388
389int iwl5000_alive_notify(struct iwl_priv *priv) 361int iwl5000_alive_notify(struct iwl_priv *priv)
390{ 362{
391 u32 a; 363 u32 a;
@@ -448,7 +420,7 @@ int iwl5000_alive_notify(struct iwl_priv *priv)
448 /* Activate all Tx DMA/FIFO channels */ 420 /* Activate all Tx DMA/FIFO channels */
449 priv->cfg->ops->lib->txq_set_sched(priv, IWL_MASK(0, 7)); 421 priv->cfg->ops->lib->txq_set_sched(priv, IWL_MASK(0, 7));
450 422
451 iwl5000_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0); 423 iwlagn_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0);
452 424
453 /* make sure all queue are not stopped */ 425 /* make sure all queue are not stopped */
454 memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped)); 426 memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
@@ -468,7 +440,7 @@ int iwl5000_alive_notify(struct iwl_priv *priv)
468 if (ac == IWL_TX_FIFO_UNUSED) 440 if (ac == IWL_TX_FIFO_UNUSED)
469 continue; 441 continue;
470 442
471 iwl5000_tx_queue_set_status(priv, &priv->txq[i], ac, 0); 443 iwlagn_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
472 } 444 }
473 445
474 spin_unlock_irqrestore(&priv->lock, flags); 446 spin_unlock_irqrestore(&priv->lock, flags);
@@ -539,207 +511,6 @@ int iwl5000_hw_set_hw_params(struct iwl_priv *priv)
539 return 0; 511 return 0;
540} 512}
541 513
542/**
543 * iwl5000_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
544 */
545void iwl5000_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
546 struct iwl_tx_queue *txq,
547 u16 byte_cnt)
548{
549 struct iwl5000_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr;
550 int write_ptr = txq->q.write_ptr;
551 int txq_id = txq->q.id;
552 u8 sec_ctl = 0;
553 u8 sta_id = 0;
554 u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
555 __le16 bc_ent;
556
557 WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
558
559 if (txq_id != IWL_CMD_QUEUE_NUM) {
560 sta_id = txq->cmd[txq->q.write_ptr]->cmd.tx.sta_id;
561 sec_ctl = txq->cmd[txq->q.write_ptr]->cmd.tx.sec_ctl;
562
563 switch (sec_ctl & TX_CMD_SEC_MSK) {
564 case TX_CMD_SEC_CCM:
565 len += CCMP_MIC_LEN;
566 break;
567 case TX_CMD_SEC_TKIP:
568 len += TKIP_ICV_LEN;
569 break;
570 case TX_CMD_SEC_WEP:
571 len += WEP_IV_LEN + WEP_ICV_LEN;
572 break;
573 }
574 }
575
576 bc_ent = cpu_to_le16((len & 0xFFF) | (sta_id << 12));
577
578 scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
579
580 if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
581 scd_bc_tbl[txq_id].
582 tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
583}
584
585void iwl5000_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
586 struct iwl_tx_queue *txq)
587{
588 struct iwl5000_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr;
589 int txq_id = txq->q.id;
590 int read_ptr = txq->q.read_ptr;
591 u8 sta_id = 0;
592 __le16 bc_ent;
593
594 WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
595
596 if (txq_id != IWL_CMD_QUEUE_NUM)
597 sta_id = txq->cmd[read_ptr]->cmd.tx.sta_id;
598
599 bc_ent = cpu_to_le16(1 | (sta_id << 12));
600 scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
601
602 if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
603 scd_bc_tbl[txq_id].
604 tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
605}
606
607static int iwl5000_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
608 u16 txq_id)
609{
610 u32 tbl_dw_addr;
611 u32 tbl_dw;
612 u16 scd_q2ratid;
613
614 scd_q2ratid = ra_tid & IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
615
616 tbl_dw_addr = priv->scd_base_addr +
617 IWL50_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
618
619 tbl_dw = iwl_read_targ_mem(priv, tbl_dw_addr);
620
621 if (txq_id & 0x1)
622 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
623 else
624 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
625
626 iwl_write_targ_mem(priv, tbl_dw_addr, tbl_dw);
627
628 return 0;
629}
630static void iwl5000_tx_queue_stop_scheduler(struct iwl_priv *priv, u16 txq_id)
631{
632 /* Simply stop the queue, but don't change any configuration;
633 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
634 iwl_write_prph(priv,
635 IWL50_SCD_QUEUE_STATUS_BITS(txq_id),
636 (0 << IWL50_SCD_QUEUE_STTS_REG_POS_ACTIVE)|
637 (1 << IWL50_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
638}
639
640int iwl5000_txq_agg_enable(struct iwl_priv *priv, int txq_id,
641 int tx_fifo, int sta_id, int tid, u16 ssn_idx)
642{
643 unsigned long flags;
644 u16 ra_tid;
645
646 if ((IWL50_FIRST_AMPDU_QUEUE > txq_id) ||
647 (IWL50_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues
648 <= txq_id)) {
649 IWL_WARN(priv,
650 "queue number out of range: %d, must be %d to %d\n",
651 txq_id, IWL50_FIRST_AMPDU_QUEUE,
652 IWL50_FIRST_AMPDU_QUEUE +
653 priv->cfg->num_of_ampdu_queues - 1);
654 return -EINVAL;
655 }
656
657 ra_tid = BUILD_RAxTID(sta_id, tid);
658
659 /* Modify device's station table to Tx this TID */
660 iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
661
662 spin_lock_irqsave(&priv->lock, flags);
663
664 /* Stop this Tx queue before configuring it */
665 iwl5000_tx_queue_stop_scheduler(priv, txq_id);
666
667 /* Map receiver-address / traffic-ID to this queue */
668 iwl5000_tx_queue_set_q2ratid(priv, ra_tid, txq_id);
669
670 /* Set this queue as a chain-building queue */
671 iwl_set_bits_prph(priv, IWL50_SCD_QUEUECHAIN_SEL, (1<<txq_id));
672
673 /* enable aggregations for the queue */
674 iwl_set_bits_prph(priv, IWL50_SCD_AGGR_SEL, (1<<txq_id));
675
676 /* Place first TFD at index corresponding to start sequence number.
677 * Assumes that ssn_idx is valid (!= 0xFFF) */
678 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
679 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
680 iwl5000_set_wr_ptrs(priv, txq_id, ssn_idx);
681
682 /* Set up Tx window size and frame limit for this queue */
683 iwl_write_targ_mem(priv, priv->scd_base_addr +
684 IWL50_SCD_CONTEXT_QUEUE_OFFSET(txq_id) +
685 sizeof(u32),
686 ((SCD_WIN_SIZE <<
687 IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
688 IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
689 ((SCD_FRAME_LIMIT <<
690 IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
691 IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
692
693 iwl_set_bits_prph(priv, IWL50_SCD_INTERRUPT_MASK, (1 << txq_id));
694
695 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
696 iwl5000_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1);
697
698 spin_unlock_irqrestore(&priv->lock, flags);
699
700 return 0;
701}
702
703int iwl5000_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
704 u16 ssn_idx, u8 tx_fifo)
705{
706 if ((IWL50_FIRST_AMPDU_QUEUE > txq_id) ||
707 (IWL50_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues
708 <= txq_id)) {
709 IWL_ERR(priv,
710 "queue number out of range: %d, must be %d to %d\n",
711 txq_id, IWL50_FIRST_AMPDU_QUEUE,
712 IWL50_FIRST_AMPDU_QUEUE +
713 priv->cfg->num_of_ampdu_queues - 1);
714 return -EINVAL;
715 }
716
717 iwl5000_tx_queue_stop_scheduler(priv, txq_id);
718
719 iwl_clear_bits_prph(priv, IWL50_SCD_AGGR_SEL, (1 << txq_id));
720
721 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
722 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
723 /* supposes that ssn_idx is valid (!= 0xFFF) */
724 iwl5000_set_wr_ptrs(priv, txq_id, ssn_idx);
725
726 iwl_clear_bits_prph(priv, IWL50_SCD_INTERRUPT_MASK, (1 << txq_id));
727 iwl_txq_ctx_deactivate(priv, txq_id);
728 iwl5000_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
729
730 return 0;
731}
732
733/*
734 * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
735 * must be called under priv->lock and mac access
736 */
737void iwl5000_txq_set_sched(struct iwl_priv *priv, u32 mask)
738{
739 iwl_write_prph(priv, IWL50_SCD_TXFACT, mask);
740}
741
742
743static inline u32 iwl5000_get_scd_ssn(struct iwl5000_tx_resp *tx_resp) 514static inline u32 iwl5000_get_scd_ssn(struct iwl5000_tx_resp *tx_resp)
744{ 515{
745 return le32_to_cpup((__le32 *)&tx_resp->status + 516 return le32_to_cpup((__le32 *)&tx_resp->status +
@@ -1063,11 +834,11 @@ static int iwl5000_hw_channel_switch(struct iwl_priv *priv, u16 channel)
1063 834
1064struct iwl_lib_ops iwl5000_lib = { 835struct iwl_lib_ops iwl5000_lib = {
1065 .set_hw_params = iwl5000_hw_set_hw_params, 836 .set_hw_params = iwl5000_hw_set_hw_params,
1066 .txq_update_byte_cnt_tbl = iwl5000_txq_update_byte_cnt_tbl, 837 .txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl,
1067 .txq_inval_byte_cnt_tbl = iwl5000_txq_inval_byte_cnt_tbl, 838 .txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl,
1068 .txq_set_sched = iwl5000_txq_set_sched, 839 .txq_set_sched = iwlagn_txq_set_sched,
1069 .txq_agg_enable = iwl5000_txq_agg_enable, 840 .txq_agg_enable = iwlagn_txq_agg_enable,
1070 .txq_agg_disable = iwl5000_txq_agg_disable, 841 .txq_agg_disable = iwlagn_txq_agg_disable,
1071 .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd, 842 .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
1072 .txq_free_tfd = iwl_hw_txq_free_tfd, 843 .txq_free_tfd = iwl_hw_txq_free_tfd,
1073 .txq_init = iwl_hw_tx_queue_init, 844 .txq_init = iwl_hw_tx_queue_init,
@@ -1121,11 +892,11 @@ struct iwl_lib_ops iwl5000_lib = {
1121 892
1122static struct iwl_lib_ops iwl5150_lib = { 893static struct iwl_lib_ops iwl5150_lib = {
1123 .set_hw_params = iwl5000_hw_set_hw_params, 894 .set_hw_params = iwl5000_hw_set_hw_params,
1124 .txq_update_byte_cnt_tbl = iwl5000_txq_update_byte_cnt_tbl, 895 .txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl,
1125 .txq_inval_byte_cnt_tbl = iwl5000_txq_inval_byte_cnt_tbl, 896 .txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl,
1126 .txq_set_sched = iwl5000_txq_set_sched, 897 .txq_set_sched = iwlagn_txq_set_sched,
1127 .txq_agg_enable = iwl5000_txq_agg_enable, 898 .txq_agg_enable = iwlagn_txq_agg_enable,
1128 .txq_agg_disable = iwl5000_txq_agg_disable, 899 .txq_agg_disable = iwlagn_txq_agg_disable,
1129 .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd, 900 .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
1130 .txq_free_tfd = iwl_hw_txq_free_tfd, 901 .txq_free_tfd = iwl_hw_txq_free_tfd,
1131 .txq_init = iwl_hw_tx_queue_init, 902 .txq_init = iwl_hw_tx_queue_init,