aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorSamuel Ortiz <samuel.ortiz@intel.com>2009-01-19 18:30:26 -0500
committerJohn W. Linville <linville@tuxdriver.com>2009-01-29 16:00:56 -0500
commit7aaa1d79e3a2d573ac469744506f17b1c9386840 (patch)
treeb003f298588e7f6a9f97764387f1c62a17c18b9e /drivers
parent4f3602c8a3cf8d31e8b08b82d7ea9b0c30f28965 (diff)
iwlwifi: Add TFD library operations
The TFD structures for 3945 and agn HWs are fundamentally different. We thus need to define operations for attaching and freeing them. This will allow us to share a fair amount of code (cmd and tx queue related) between both drivers. Signed-off-by: Samuel Ortiz <samuel.ortiz@intel.com> Signed-off-by: Reinette Chatre <reinette.chatre@intel.com> Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c28
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.h9
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c120
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h10
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c134
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c34
8 files changed, 176 insertions, 163 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index e6d4503cd213..18e0f0e3a74f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -318,7 +318,7 @@ static void iwl3945_tx_queue_reclaim(struct iwl_priv *priv,
318 tx_info = &txq->txb[txq->q.read_ptr]; 318 tx_info = &txq->txb[txq->q.read_ptr];
319 ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb[0]); 319 ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb[0]);
320 tx_info->skb[0] = NULL; 320 tx_info->skb[0] = NULL;
321 iwl3945_hw_txq_free_tfd(priv, txq); 321 priv->cfg->ops->lib->txq_free_tfd(priv, txq);
322 } 322 }
323 323
324 if (iwl_queue_space(q) > q->low_mark && (txq_id >= 0) && 324 if (iwl_queue_space(q) > q->low_mark && (txq_id >= 0) &&
@@ -724,15 +724,21 @@ static void iwl3945_rx_reply_rx(struct iwl_priv *priv,
724 iwl3945_pass_packet_to_mac80211(priv, rxb, &rx_status); 724 iwl3945_pass_packet_to_mac80211(priv, rxb, &rx_status);
725} 725}
726 726
727int iwl3945_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, void *ptr, 727int iwl3945_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
728 dma_addr_t addr, u16 len) 728 struct iwl_tx_queue *txq,
729 dma_addr_t addr, u16 len, u8 reset, u8 pad)
729{ 730{
730 int count; 731 int count;
731 u32 pad; 732 struct iwl_queue *q;
732 struct iwl3945_tfd *tfd = (struct iwl3945_tfd *)ptr; 733 struct iwl3945_tfd *tfd;
734
735 q = &txq->q;
736 tfd = &txq->tfds39[q->write_ptr];
737
738 if (reset)
739 memset(tfd, 0, sizeof(*tfd));
733 740
734 count = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags)); 741 count = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags));
735 pad = TFD_CTL_PAD_GET(le32_to_cpu(tfd->control_flags));
736 742
737 if ((count >= NUM_TFD_CHUNKS) || (count < 0)) { 743 if ((count >= NUM_TFD_CHUNKS) || (count < 0)) {
738 IWL_ERR(priv, "Error can not send more than %d chunks\n", 744 IWL_ERR(priv, "Error can not send more than %d chunks\n",
@@ -756,7 +762,7 @@ int iwl3945_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, void *ptr,
756 * 762 *
757 * Does NOT advance any indexes 763 * Does NOT advance any indexes
758 */ 764 */
759int iwl3945_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq) 765void iwl3945_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
760{ 766{
761 struct iwl3945_tfd *tfd_tmp = (struct iwl3945_tfd *)&txq->tfds39[0]; 767 struct iwl3945_tfd *tfd_tmp = (struct iwl3945_tfd *)&txq->tfds39[0];
762 struct iwl3945_tfd *tfd = &tfd_tmp[txq->q.read_ptr]; 768 struct iwl3945_tfd *tfd = &tfd_tmp[txq->q.read_ptr];
@@ -767,14 +773,14 @@ int iwl3945_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
767 /* classify bd */ 773 /* classify bd */
768 if (txq->q.id == IWL_CMD_QUEUE_NUM) 774 if (txq->q.id == IWL_CMD_QUEUE_NUM)
769 /* nothing to cleanup after for host commands */ 775 /* nothing to cleanup after for host commands */
770 return 0; 776 return;
771 777
772 /* sanity check */ 778 /* sanity check */
773 counter = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags)); 779 counter = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags));
774 if (counter > NUM_TFD_CHUNKS) { 780 if (counter > NUM_TFD_CHUNKS) {
775 IWL_ERR(priv, "Too many chunks: %i\n", counter); 781 IWL_ERR(priv, "Too many chunks: %i\n", counter);
776 /* @todo issue fatal error, it is quite serious situation */ 782 /* @todo issue fatal error, it is quite serious situation */
777 return 0; 783 return;
778 } 784 }
779 785
780 /* unmap chunks if any */ 786 /* unmap chunks if any */
@@ -791,7 +797,7 @@ int iwl3945_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
791 } 797 }
792 } 798 }
793 } 799 }
794 return 0; 800 return ;
795} 801}
796 802
797u8 iwl3945_hw_find_station(struct iwl_priv *priv, const u8 *addr) 803u8 iwl3945_hw_find_station(struct iwl_priv *priv, const u8 *addr)
@@ -2697,6 +2703,8 @@ static int iwl3945_load_bsm(struct iwl_priv *priv)
2697} 2703}
2698 2704
2699static struct iwl_lib_ops iwl3945_lib = { 2705static struct iwl_lib_ops iwl3945_lib = {
2706 .txq_attach_buf_to_tfd = iwl3945_hw_txq_attach_buf_to_tfd,
2707 .txq_free_tfd = iwl3945_hw_txq_free_tfd,
2700 .load_ucode = iwl3945_load_bsm, 2708 .load_ucode = iwl3945_load_bsm,
2701 .apm_ops = { 2709 .apm_ops = {
2702 .init = iwl3945_apm_init, 2710 .init = iwl3945_apm_init,
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.h b/drivers/net/wireless/iwlwifi/iwl-3945.h
index 97dfa7c5a3ce..54538df50d3e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.h
@@ -260,9 +260,12 @@ extern int iwl3945_hw_nic_stop_master(struct iwl_priv *priv);
260extern void iwl3945_hw_txq_ctx_free(struct iwl_priv *priv); 260extern void iwl3945_hw_txq_ctx_free(struct iwl_priv *priv);
261extern void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv); 261extern void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv);
262extern int iwl3945_hw_nic_reset(struct iwl_priv *priv); 262extern int iwl3945_hw_nic_reset(struct iwl_priv *priv);
263extern int iwl3945_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, void *tfd, 263extern int iwl3945_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
264 dma_addr_t addr, u16 len); 264 struct iwl_tx_queue *txq,
265extern int iwl3945_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq); 265 dma_addr_t addr, u16 len,
266 u8 reset, u8 pad);
267extern void iwl3945_hw_txq_free_tfd(struct iwl_priv *priv,
268 struct iwl_tx_queue *txq);
266extern int iwl3945_hw_get_temperature(struct iwl_priv *priv); 269extern int iwl3945_hw_get_temperature(struct iwl_priv *priv);
267extern int iwl3945_hw_tx_queue_init(struct iwl_priv *priv, 270extern int iwl3945_hw_tx_queue_init(struct iwl_priv *priv,
268 struct iwl_tx_queue *txq); 271 struct iwl_tx_queue *txq);
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index 6171ba533f2e..31315decc07d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -2295,6 +2295,8 @@ static struct iwl_lib_ops iwl4965_lib = {
2295 .txq_set_sched = iwl4965_txq_set_sched, 2295 .txq_set_sched = iwl4965_txq_set_sched,
2296 .txq_agg_enable = iwl4965_txq_agg_enable, 2296 .txq_agg_enable = iwl4965_txq_agg_enable,
2297 .txq_agg_disable = iwl4965_txq_agg_disable, 2297 .txq_agg_disable = iwl4965_txq_agg_disable,
2298 .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
2299 .txq_free_tfd = iwl_hw_txq_free_tfd,
2298 .rx_handler_setup = iwl4965_rx_handler_setup, 2300 .rx_handler_setup = iwl4965_rx_handler_setup,
2299 .setup_deferred_work = iwl4965_setup_deferred_work, 2301 .setup_deferred_work = iwl4965_setup_deferred_work,
2300 .cancel_deferred_work = iwl4965_cancel_deferred_work, 2302 .cancel_deferred_work = iwl4965_cancel_deferred_work,
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index 429dcbeff162..a35af671f850 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -1492,6 +1492,8 @@ static struct iwl_lib_ops iwl5000_lib = {
1492 .txq_set_sched = iwl5000_txq_set_sched, 1492 .txq_set_sched = iwl5000_txq_set_sched,
1493 .txq_agg_enable = iwl5000_txq_agg_enable, 1493 .txq_agg_enable = iwl5000_txq_agg_enable,
1494 .txq_agg_disable = iwl5000_txq_agg_disable, 1494 .txq_agg_disable = iwl5000_txq_agg_disable,
1495 .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
1496 .txq_free_tfd = iwl_hw_txq_free_tfd,
1495 .rx_handler_setup = iwl5000_rx_handler_setup, 1497 .rx_handler_setup = iwl5000_rx_handler_setup,
1496 .setup_deferred_work = iwl5000_setup_deferred_work, 1498 .setup_deferred_work = iwl5000_setup_deferred_work,
1497 .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr, 1499 .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr,
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 7e0baf6deedf..4a15e42ad00a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -471,6 +471,126 @@ static int iwl_send_beacon_cmd(struct iwl_priv *priv)
471 return rc; 471 return rc;
472} 472}
473 473
474static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
475{
476 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
477
478 dma_addr_t addr = get_unaligned_le32(&tb->lo);
479 if (sizeof(dma_addr_t) > sizeof(u32))
480 addr |=
481 ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
482
483 return addr;
484}
485
486static inline u16 iwl_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
487{
488 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
489
490 return le16_to_cpu(tb->hi_n_len) >> 4;
491}
492
493static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
494 dma_addr_t addr, u16 len)
495{
496 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
497 u16 hi_n_len = len << 4;
498
499 put_unaligned_le32(addr, &tb->lo);
500 if (sizeof(dma_addr_t) > sizeof(u32))
501 hi_n_len |= ((addr >> 16) >> 16) & 0xF;
502
503 tb->hi_n_len = cpu_to_le16(hi_n_len);
504
505 tfd->num_tbs = idx + 1;
506}
507
508static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd)
509{
510 return tfd->num_tbs & 0x1f;
511}
512
513/**
514 * iwl_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
515 * @priv - driver private data
516 * @txq - tx queue
517 *
518 * Does NOT advance any TFD circular buffer read/write indexes
519 * Does NOT free the TFD itself (which is within circular buffer)
520 */
521void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
522{
523 struct iwl_tfd *tfd_tmp = (struct iwl_tfd *)&txq->tfds[0];
524 struct iwl_tfd *tfd;
525 struct pci_dev *dev = priv->pci_dev;
526 int index = txq->q.read_ptr;
527 int i;
528 int num_tbs;
529
530 tfd = &tfd_tmp[index];
531
532 /* Sanity check on number of chunks */
533 num_tbs = iwl_tfd_get_num_tbs(tfd);
534
535 if (num_tbs >= IWL_NUM_OF_TBS) {
536 IWL_ERR(priv, "Too many chunks: %i\n", num_tbs);
537 /* @todo issue fatal error, it is quite serious situation */
538 return;
539 }
540
541 /* Unmap tx_cmd */
542 if (num_tbs)
543 pci_unmap_single(dev,
544 pci_unmap_addr(&txq->cmd[index]->meta, mapping),
545 pci_unmap_len(&txq->cmd[index]->meta, len),
546 PCI_DMA_TODEVICE);
547
548 /* Unmap chunks, if any. */
549 for (i = 1; i < num_tbs; i++) {
550 pci_unmap_single(dev, iwl_tfd_tb_get_addr(tfd, i),
551 iwl_tfd_tb_get_len(tfd, i), PCI_DMA_TODEVICE);
552
553 if (txq->txb) {
554 dev_kfree_skb(txq->txb[txq->q.read_ptr].skb[i - 1]);
555 txq->txb[txq->q.read_ptr].skb[i - 1] = NULL;
556 }
557 }
558}
559
560int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
561 struct iwl_tx_queue *txq,
562 dma_addr_t addr, u16 len,
563 u8 reset, u8 pad)
564{
565 struct iwl_queue *q;
566 struct iwl_tfd *tfd;
567 u32 num_tbs;
568
569 q = &txq->q;
570 tfd = &txq->tfds[q->write_ptr];
571
572 if (reset)
573 memset(tfd, 0, sizeof(*tfd));
574
575 num_tbs = iwl_tfd_get_num_tbs(tfd);
576
577 /* Each TFD can point to a maximum 20 Tx buffers */
578 if (num_tbs >= IWL_NUM_OF_TBS) {
579 IWL_ERR(priv, "Error can not send more than %d chunks\n",
580 IWL_NUM_OF_TBS);
581 return -EINVAL;
582 }
583
584 BUG_ON(addr & ~DMA_BIT_MASK(36));
585 if (unlikely(addr & ~IWL_TX_DMA_MASK))
586 IWL_ERR(priv, "Unaligned address = %llx\n",
587 (unsigned long long)addr);
588
589 iwl_tfd_set_tb(tfd, num_tbs, addr, len);
590
591 return 0;
592}
593
474/****************************************************************************** 594/******************************************************************************
475 * 595 *
476 * Misc. internal state and helper functions 596 * Misc. internal state and helper functions
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index 466130ff07a9..9abdfb4acbf4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -110,6 +110,12 @@ struct iwl_lib_ops {
110 void (*txq_inval_byte_cnt_tbl)(struct iwl_priv *priv, 110 void (*txq_inval_byte_cnt_tbl)(struct iwl_priv *priv,
111 struct iwl_tx_queue *txq); 111 struct iwl_tx_queue *txq);
112 void (*txq_set_sched)(struct iwl_priv *priv, u32 mask); 112 void (*txq_set_sched)(struct iwl_priv *priv, u32 mask);
113 int (*txq_attach_buf_to_tfd)(struct iwl_priv *priv,
114 struct iwl_tx_queue *txq,
115 dma_addr_t addr,
116 u16 len, u8 reset, u8 pad);
117 void (*txq_free_tfd)(struct iwl_priv *priv,
118 struct iwl_tx_queue *txq);
113 /* aggregations */ 119 /* aggregations */
114 int (*txq_agg_enable)(struct iwl_priv *priv, int txq_id, int tx_fifo, 120 int (*txq_agg_enable)(struct iwl_priv *priv, int txq_id, int tx_fifo,
115 int sta_id, int tid, u16 ssn_idx); 121 int sta_id, int tid, u16 ssn_idx);
@@ -252,6 +258,10 @@ void iwl_rx_statistics(struct iwl_priv *priv,
252* TX 258* TX
253******************************************************/ 259******************************************************/
254int iwl_txq_ctx_reset(struct iwl_priv *priv); 260int iwl_txq_ctx_reset(struct iwl_priv *priv);
261void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq);
262int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
263 struct iwl_tx_queue *txq,
264 dma_addr_t addr, u16 len, u8 reset, u8 pad);
255int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb); 265int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb);
256void iwl_hw_txq_ctx_free(struct iwl_priv *priv); 266void iwl_hw_txq_ctx_free(struct iwl_priv *priv);
257int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq); 267int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq);
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index 913c77a2fea2..487a1d652292 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -76,116 +76,6 @@ static inline void iwl_free_dma_ptr(struct iwl_priv *priv,
76 memset(ptr, 0, sizeof(*ptr)); 76 memset(ptr, 0, sizeof(*ptr));
77} 77}
78 78
79static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
80{
81 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
82
83 dma_addr_t addr = get_unaligned_le32(&tb->lo);
84 if (sizeof(dma_addr_t) > sizeof(u32))
85 addr |=
86 ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
87
88 return addr;
89}
90
91static inline u16 iwl_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
92{
93 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
94
95 return le16_to_cpu(tb->hi_n_len) >> 4;
96}
97
98static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
99 dma_addr_t addr, u16 len)
100{
101 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
102 u16 hi_n_len = len << 4;
103
104 put_unaligned_le32(addr, &tb->lo);
105 if (sizeof(dma_addr_t) > sizeof(u32))
106 hi_n_len |= ((addr >> 16) >> 16) & 0xF;
107
108 tb->hi_n_len = cpu_to_le16(hi_n_len);
109
110 tfd->num_tbs = idx + 1;
111}
112
113static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd)
114{
115 return tfd->num_tbs & 0x1f;
116}
117
118/**
119 * iwl_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
120 * @priv - driver private data
121 * @txq - tx queue
122 *
123 * Does NOT advance any TFD circular buffer read/write indexes
124 * Does NOT free the TFD itself (which is within circular buffer)
125 */
126static void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
127{
128 struct iwl_tfd *tfd_tmp = (struct iwl_tfd *)&txq->tfds[0];
129 struct iwl_tfd *tfd;
130 struct pci_dev *dev = priv->pci_dev;
131 int index = txq->q.read_ptr;
132 int i;
133 int num_tbs;
134
135 tfd = &tfd_tmp[index];
136
137 /* Sanity check on number of chunks */
138 num_tbs = iwl_tfd_get_num_tbs(tfd);
139
140 if (num_tbs >= IWL_NUM_OF_TBS) {
141 IWL_ERR(priv, "Too many chunks: %i\n", num_tbs);
142 /* @todo issue fatal error, it is quite serious situation */
143 return;
144 }
145
146 /* Unmap tx_cmd */
147 if (num_tbs)
148 pci_unmap_single(dev,
149 pci_unmap_addr(&txq->cmd[index]->meta, mapping),
150 pci_unmap_len(&txq->cmd[index]->meta, len),
151 PCI_DMA_TODEVICE);
152
153 /* Unmap chunks, if any. */
154 for (i = 1; i < num_tbs; i++) {
155 pci_unmap_single(dev, iwl_tfd_tb_get_addr(tfd, i),
156 iwl_tfd_tb_get_len(tfd, i), PCI_DMA_TODEVICE);
157
158 if (txq->txb) {
159 dev_kfree_skb(txq->txb[txq->q.read_ptr].skb[i - 1]);
160 txq->txb[txq->q.read_ptr].skb[i - 1] = NULL;
161 }
162 }
163}
164
165static int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
166 struct iwl_tfd *tfd,
167 dma_addr_t addr, u16 len)
168{
169
170 u32 num_tbs = iwl_tfd_get_num_tbs(tfd);
171
172 /* Each TFD can point to a maximum 20 Tx buffers */
173 if (num_tbs >= IWL_NUM_OF_TBS) {
174 IWL_ERR(priv, "Error can not send more than %d chunks\n",
175 IWL_NUM_OF_TBS);
176 return -EINVAL;
177 }
178
179 BUG_ON(addr & ~DMA_BIT_MASK(36));
180 if (unlikely(addr & ~IWL_TX_DMA_MASK))
181 IWL_ERR(priv, "Unaligned address = %llx\n",
182 (unsigned long long)addr);
183
184 iwl_tfd_set_tb(tfd, num_tbs, addr, len);
185
186 return 0;
187}
188
189/** 79/**
190 * iwl_txq_update_write_ptr - Send new write index to hardware 80 * iwl_txq_update_write_ptr - Send new write index to hardware
191 */ 81 */
@@ -254,7 +144,7 @@ static void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
254 /* first, empty all BD's */ 144 /* first, empty all BD's */
255 for (; q->write_ptr != q->read_ptr; 145 for (; q->write_ptr != q->read_ptr;
256 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) 146 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd))
257 iwl_hw_txq_free_tfd(priv, txq); 147 priv->cfg->ops->lib->txq_free_tfd(priv, txq);
258 148
259 len = sizeof(struct iwl_cmd) * q->n_window; 149 len = sizeof(struct iwl_cmd) * q->n_window;
260 150
@@ -822,7 +712,6 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
822{ 712{
823 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 713 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
824 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 714 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
825 struct iwl_tfd *tfd;
826 struct iwl_tx_queue *txq; 715 struct iwl_tx_queue *txq;
827 struct iwl_queue *q; 716 struct iwl_queue *q;
828 struct iwl_cmd *out_cmd; 717 struct iwl_cmd *out_cmd;
@@ -913,10 +802,6 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
913 802
914 spin_lock_irqsave(&priv->lock, flags); 803 spin_lock_irqsave(&priv->lock, flags);
915 804
916 /* Set up first empty TFD within this queue's circular TFD buffer */
917 tfd = &txq->tfds[q->write_ptr];
918 memset(tfd, 0, sizeof(*tfd));
919
920 /* Set up driver data for this TFD */ 805 /* Set up driver data for this TFD */
921 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info)); 806 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
922 txq->txb[q->write_ptr].skb[0] = skb; 807 txq->txb[q->write_ptr].skb[0] = skb;
@@ -970,7 +855,8 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
970 /* Add buffer containing Tx command and MAC(!) header to TFD's 855 /* Add buffer containing Tx command and MAC(!) header to TFD's
971 * first entry */ 856 * first entry */
972 txcmd_phys += offsetof(struct iwl_cmd, hdr); 857 txcmd_phys += offsetof(struct iwl_cmd, hdr);
973 iwl_hw_txq_attach_buf_to_tfd(priv, tfd, txcmd_phys, len); 858 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
859 txcmd_phys, len, 1, 0);
974 860
975 if (info->control.hw_key) 861 if (info->control.hw_key)
976 iwl_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id); 862 iwl_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);
@@ -981,7 +867,9 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
981 if (len) { 867 if (len) {
982 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len, 868 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
983 len, PCI_DMA_TODEVICE); 869 len, PCI_DMA_TODEVICE);
984 iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, len); 870 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
871 phys_addr, len,
872 0, 0);
985 } 873 }
986 874
987 /* Tell NIC about any 2-byte padding after MAC header */ 875 /* Tell NIC about any 2-byte padding after MAC header */
@@ -1063,7 +951,6 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
1063{ 951{
1064 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM]; 952 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
1065 struct iwl_queue *q = &txq->q; 953 struct iwl_queue *q = &txq->q;
1066 struct iwl_tfd *tfd;
1067 struct iwl_cmd *out_cmd; 954 struct iwl_cmd *out_cmd;
1068 dma_addr_t phys_addr; 955 dma_addr_t phys_addr;
1069 unsigned long flags; 956 unsigned long flags;
@@ -1092,10 +979,6 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
1092 979
1093 spin_lock_irqsave(&priv->hcmd_lock, flags); 980 spin_lock_irqsave(&priv->hcmd_lock, flags);
1094 981
1095 tfd = &txq->tfds[q->write_ptr];
1096 memset(tfd, 0, sizeof(*tfd));
1097
1098
1099 idx = get_cmd_index(q, q->write_ptr, cmd->meta.flags & CMD_SIZE_HUGE); 982 idx = get_cmd_index(q, q->write_ptr, cmd->meta.flags & CMD_SIZE_HUGE);
1100 out_cmd = txq->cmd[idx]; 983 out_cmd = txq->cmd[idx];
1101 984
@@ -1120,7 +1003,8 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
1120 pci_unmap_len_set(&out_cmd->meta, len, len); 1003 pci_unmap_len_set(&out_cmd->meta, len, len);
1121 phys_addr += offsetof(struct iwl_cmd, hdr); 1004 phys_addr += offsetof(struct iwl_cmd, hdr);
1122 1005
1123 iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, fix_size); 1006 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
1007 phys_addr, fix_size, 1, 0);
1124 1008
1125#ifdef CONFIG_IWLWIFI_DEBUG 1009#ifdef CONFIG_IWLWIFI_DEBUG
1126 switch (out_cmd->hdr.cmd) { 1010 switch (out_cmd->hdr.cmd) {
@@ -1180,7 +1064,7 @@ int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
1180 if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl) 1064 if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl)
1181 priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq); 1065 priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq);
1182 1066
1183 iwl_hw_txq_free_tfd(priv, txq); 1067 priv->cfg->ops->lib->txq_free_tfd(priv, txq);
1184 nfreed++; 1068 nfreed++;
1185 } 1069 }
1186 return nfreed; 1070 return nfreed;
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index 66b7e22d7e84..4474a4c3ddcd 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -274,7 +274,7 @@ void iwl3945_tx_queue_free(struct iwl_priv *priv, struct iwl_tx_queue *txq)
274 /* first, empty all BD's */ 274 /* first, empty all BD's */
275 for (; q->write_ptr != q->read_ptr; 275 for (; q->write_ptr != q->read_ptr;
276 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) 276 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd))
277 iwl3945_hw_txq_free_tfd(priv, txq); 277 priv->cfg->ops->lib->txq_free_tfd(priv, txq);
278 278
279 len = sizeof(struct iwl_cmd) * q->n_window; 279 len = sizeof(struct iwl_cmd) * q->n_window;
280 if (q->id == IWL_CMD_QUEUE_NUM) 280 if (q->id == IWL_CMD_QUEUE_NUM)
@@ -453,12 +453,10 @@ static int iwl3945_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
453{ 453{
454 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM]; 454 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
455 struct iwl_queue *q = &txq->q; 455 struct iwl_queue *q = &txq->q;
456 struct iwl3945_tfd *tfd;
457 struct iwl_cmd *out_cmd; 456 struct iwl_cmd *out_cmd;
458 u32 idx; 457 u32 idx;
459 u16 fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr)); 458 u16 fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));
460 dma_addr_t phys_addr; 459 dma_addr_t phys_addr;
461 int pad;
462 int ret, len; 460 int ret, len;
463 unsigned long flags; 461 unsigned long flags;
464 462
@@ -481,9 +479,6 @@ static int iwl3945_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
481 479
482 spin_lock_irqsave(&priv->hcmd_lock, flags); 480 spin_lock_irqsave(&priv->hcmd_lock, flags);
483 481
484 tfd = &txq->tfds39[q->write_ptr];
485 memset(tfd, 0, sizeof(*tfd));
486
487 idx = get_cmd_index(q, q->write_ptr, cmd->meta.flags & CMD_SIZE_HUGE); 482 idx = get_cmd_index(q, q->write_ptr, cmd->meta.flags & CMD_SIZE_HUGE);
488 out_cmd = txq->cmd[idx]; 483 out_cmd = txq->cmd[idx];
489 484
@@ -509,10 +504,9 @@ static int iwl3945_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
509 pci_unmap_len_set(&out_cmd->meta, len, len); 504 pci_unmap_len_set(&out_cmd->meta, len, len);
510 phys_addr += offsetof(struct iwl_cmd, hdr); 505 phys_addr += offsetof(struct iwl_cmd, hdr);
511 506
512 iwl3945_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, fix_size); 507 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
513 508 phys_addr, fix_size,
514 pad = U32_PAD(cmd->len); 509 1, U32_PAD(cmd->len));
515 tfd->control_flags |= cpu_to_le32(TFD_CTL_PAD_SET(pad));
516 510
517 IWL_DEBUG_HC("Sending command %s (#%x), seq: 0x%04X, " 511 IWL_DEBUG_HC("Sending command %s (#%x), seq: 0x%04X, "
518 "%d bytes at %d[%d]:%d\n", 512 "%d bytes at %d[%d]:%d\n",
@@ -2158,7 +2152,6 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
2158{ 2152{
2159 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 2153 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2160 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 2154 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2161 struct iwl3945_tfd *tfd;
2162 struct iwl3945_tx_cmd *tx; 2155 struct iwl3945_tx_cmd *tx;
2163 struct iwl_tx_queue *txq = NULL; 2156 struct iwl_tx_queue *txq = NULL;
2164 struct iwl_queue *q = NULL; 2157 struct iwl_queue *q = NULL;
@@ -2243,9 +2236,6 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
2243 2236
2244 spin_lock_irqsave(&priv->lock, flags); 2237 spin_lock_irqsave(&priv->lock, flags);
2245 2238
2246 /* Set up first empty TFD within this queue's circular TFD buffer */
2247 tfd = &txq->tfds39[q->write_ptr];
2248 memset(tfd, 0, sizeof(*tfd));
2249 idx = get_cmd_index(q, q->write_ptr, 0); 2239 idx = get_cmd_index(q, q->write_ptr, 0);
2250 2240
2251 /* Set up driver data for this TFD */ 2241 /* Set up driver data for this TFD */
@@ -2304,7 +2294,8 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
2304 2294
2305 /* Add buffer containing Tx command and MAC(!) header to TFD's 2295 /* Add buffer containing Tx command and MAC(!) header to TFD's
2306 * first entry */ 2296 * first entry */
2307 iwl3945_hw_txq_attach_buf_to_tfd(priv, tfd, txcmd_phys, len); 2297 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
2298 txcmd_phys, len, 1, 0);
2308 2299
2309 if (info->control.hw_key) 2300 if (info->control.hw_key)
2310 iwl3945_build_tx_cmd_hwcrypto(priv, info, out_cmd, skb, 0); 2301 iwl3945_build_tx_cmd_hwcrypto(priv, info, out_cmd, skb, 0);
@@ -2315,18 +2306,11 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
2315 if (len) { 2306 if (len) {
2316 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len, 2307 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
2317 len, PCI_DMA_TODEVICE); 2308 len, PCI_DMA_TODEVICE);
2318 iwl3945_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, len); 2309 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
2310 phys_addr, len,
2311 0, U32_PAD(len));
2319 } 2312 }
2320 2313
2321 if (!len)
2322 /* If there is no payload, then we use only one Tx buffer */
2323 tfd->control_flags = cpu_to_le32(TFD_CTL_COUNT_SET(1));
2324 else
2325 /* Else use 2 buffers.
2326 * Tell 3945 about any padding after MAC header */
2327 tfd->control_flags = cpu_to_le32(TFD_CTL_COUNT_SET(2) |
2328 TFD_CTL_PAD_SET(U32_PAD(len)));
2329
2330 /* Total # bytes to be transmitted */ 2314 /* Total # bytes to be transmitted */
2331 len = (u16)skb->len; 2315 len = (u16)skb->len;
2332 tx->len = cpu_to_le16(len); 2316 tx->len = cpu_to_le16(len);