aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/iwlwifi/iwl-tx.c
diff options
context:
space:
mode:
authorWinkler, Tomas <tomas.winkler@intel.com>2008-12-18 21:37:33 -0500
committerJohn W. Linville <linville@tuxdriver.com>2009-01-29 15:59:17 -0500
commit15b1687cb4f45b87ddbe4dfc7759ff5bb69497d2 (patch)
treec4e3e70b0d9b7ba877e52cd0a5116c4411bbf8e1 /drivers/net/wireless/iwlwifi/iwl-tx.c
parent39aadf8c29ad959e823efca15381bea9d0770b1e (diff)
iwlwifi: replace IWL_ERROR with IWL_ERR
IWL_ERR doesn't use hidden priv pointer. Signed-off-by: Tomas Winkler <tomas.winkler@intel.com> Signed-off-by: Zhu Yi <yi.zhu@intel.com> Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers/net/wireless/iwlwifi/iwl-tx.c')
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c35
1 files changed, 18 insertions, 17 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index e829e86181ec..77a573f2c6ee 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -138,7 +138,7 @@ static void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
138 num_tbs = iwl_tfd_get_num_tbs(tfd); 138 num_tbs = iwl_tfd_get_num_tbs(tfd);
139 139
140 if (num_tbs >= IWL_NUM_OF_TBS) { 140 if (num_tbs >= IWL_NUM_OF_TBS) {
141 IWL_ERROR("Too many chunks: %i\n", num_tbs); 141 IWL_ERR(priv, "Too many chunks: %i\n", num_tbs);
142 /* @todo issue fatal error, it is quite serious situation */ 142 /* @todo issue fatal error, it is quite serious situation */
143 return; 143 return;
144 } 144 }
@@ -171,14 +171,14 @@ static int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
171 171
172 /* Each TFD can point to a maximum 20 Tx buffers */ 172 /* Each TFD can point to a maximum 20 Tx buffers */
173 if (num_tbs >= IWL_NUM_OF_TBS) { 173 if (num_tbs >= IWL_NUM_OF_TBS) {
174 IWL_ERROR("Error can not send more than %d chunks\n", 174 IWL_ERR(priv, "Error can not send more than %d chunks\n",
175 IWL_NUM_OF_TBS); 175 IWL_NUM_OF_TBS);
176 return -EINVAL; 176 return -EINVAL;
177 } 177 }
178 178
179 BUG_ON(addr & ~DMA_BIT_MASK(36)); 179 BUG_ON(addr & ~DMA_BIT_MASK(36));
180 if (unlikely(addr & ~IWL_TX_DMA_MASK)) 180 if (unlikely(addr & ~IWL_TX_DMA_MASK))
181 IWL_ERROR("Unaligned address = %llx\n", 181 IWL_ERR(priv, "Unaligned address = %llx\n",
182 (unsigned long long)addr); 182 (unsigned long long)addr);
183 183
184 iwl_tfd_set_tb(tfd, num_tbs, addr, len); 184 iwl_tfd_set_tb(tfd, num_tbs, addr, len);
@@ -395,7 +395,7 @@ static int iwl_tx_queue_alloc(struct iwl_priv *priv,
395 txq->txb = kmalloc(sizeof(txq->txb[0]) * 395 txq->txb = kmalloc(sizeof(txq->txb[0]) *
396 TFD_QUEUE_SIZE_MAX, GFP_KERNEL); 396 TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
397 if (!txq->txb) { 397 if (!txq->txb) {
398 IWL_ERROR("kmalloc for auxiliary BD " 398 IWL_ERR(priv, "kmalloc for auxiliary BD "
399 "structures failed\n"); 399 "structures failed\n");
400 goto error; 400 goto error;
401 } 401 }
@@ -409,7 +409,7 @@ static int iwl_tx_queue_alloc(struct iwl_priv *priv,
409 &txq->q.dma_addr); 409 &txq->q.dma_addr);
410 410
411 if (!txq->tfds) { 411 if (!txq->tfds) {
412 IWL_ERROR("pci_alloc_consistent(%zd) failed\n", 412 IWL_ERR(priv, "pci_alloc_consistent(%zd) failed\n",
413 sizeof(txq->tfds[0]) * TFD_QUEUE_SIZE_MAX); 413 sizeof(txq->tfds[0]) * TFD_QUEUE_SIZE_MAX);
414 goto error; 414 goto error;
415 } 415 }
@@ -557,13 +557,13 @@ int iwl_txq_ctx_reset(struct iwl_priv *priv)
557 ret = iwl_alloc_dma_ptr(priv, &priv->scd_bc_tbls, 557 ret = iwl_alloc_dma_ptr(priv, &priv->scd_bc_tbls,
558 priv->hw_params.scd_bc_tbls_size); 558 priv->hw_params.scd_bc_tbls_size);
559 if (ret) { 559 if (ret) {
560 IWL_ERROR("Scheduler BC Table allocation failed\n"); 560 IWL_ERR(priv, "Scheduler BC Table allocation failed\n");
561 goto error_bc_tbls; 561 goto error_bc_tbls;
562 } 562 }
563 /* Alloc keep-warm buffer */ 563 /* Alloc keep-warm buffer */
564 ret = iwl_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE); 564 ret = iwl_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE);
565 if (ret) { 565 if (ret) {
566 IWL_ERROR("Keep Warm allocation failed\n"); 566 IWL_ERR(priv, "Keep Warm allocation failed\n");
567 goto error_kw; 567 goto error_kw;
568 } 568 }
569 spin_lock_irqsave(&priv->lock, flags); 569 spin_lock_irqsave(&priv->lock, flags);
@@ -589,7 +589,7 @@ int iwl_txq_ctx_reset(struct iwl_priv *priv)
589 ret = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num, 589 ret = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num,
590 txq_id); 590 txq_id);
591 if (ret) { 591 if (ret) {
592 IWL_ERROR("Tx %d queue init failed\n", txq_id); 592 IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
593 goto error; 593 goto error;
594 } 594 }
595 } 595 }
@@ -850,7 +850,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
850 850
851 if ((ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xFF) == 851 if ((ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xFF) ==
852 IWL_INVALID_RATE) { 852 IWL_INVALID_RATE) {
853 IWL_ERROR("ERROR: No TX rate available.\n"); 853 IWL_ERR(priv, "ERROR: No TX rate available.\n");
854 goto drop_unlock; 854 goto drop_unlock;
855 } 855 }
856 856
@@ -1086,7 +1086,7 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
1086 } 1086 }
1087 1087
1088 if (iwl_queue_space(q) < ((cmd->meta.flags & CMD_ASYNC) ? 2 : 1)) { 1088 if (iwl_queue_space(q) < ((cmd->meta.flags & CMD_ASYNC) ? 2 : 1)) {
1089 IWL_ERROR("No space for Tx\n"); 1089 IWL_ERR(priv, "No space for Tx\n");
1090 return -ENOSPC; 1090 return -ENOSPC;
1091 } 1091 }
1092 1092
@@ -1163,7 +1163,7 @@ int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
1163 int nfreed = 0; 1163 int nfreed = 0;
1164 1164
1165 if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) { 1165 if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) {
1166 IWL_ERROR("Read index for DMA queue txq id (%d), index %d, " 1166 IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
1167 "is out of range [0-%d] %d %d.\n", txq_id, 1167 "is out of range [0-%d] %d %d.\n", txq_id,
1168 index, q->n_bd, q->write_ptr, q->read_ptr); 1168 index, q->n_bd, q->write_ptr, q->read_ptr);
1169 return 0; 1169 return 0;
@@ -1203,7 +1203,7 @@ static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id,
1203 int nfreed = 0; 1203 int nfreed = 0;
1204 1204
1205 if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) { 1205 if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) {
1206 IWL_ERROR("Read index for DMA queue txq id (%d), index %d, " 1206 IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
1207 "is out of range [0-%d] %d %d.\n", txq_id, 1207 "is out of range [0-%d] %d %d.\n", txq_id,
1208 idx, q->n_bd, q->write_ptr, q->read_ptr); 1208 idx, q->n_bd, q->write_ptr, q->read_ptr);
1209 return; 1209 return;
@@ -1218,7 +1218,7 @@ static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id,
1218 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { 1218 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
1219 1219
1220 if (nfreed++ > 0) { 1220 if (nfreed++ > 0) {
1221 IWL_ERROR("HCMD skipped: index (%d) %d %d\n", idx, 1221 IWL_ERR(priv, "HCMD skipped: index (%d) %d %d\n", idx,
1222 q->write_ptr, q->read_ptr); 1222 q->write_ptr, q->read_ptr);
1223 queue_work(priv->workqueue, &priv->restart); 1223 queue_work(priv->workqueue, &priv->restart);
1224 } 1224 }
@@ -1314,7 +1314,7 @@ int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn)
1314 return -ENXIO; 1314 return -ENXIO;
1315 1315
1316 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) { 1316 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) {
1317 IWL_ERROR("Start AGG when state is not IWL_AGG_OFF !\n"); 1317 IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
1318 return -ENXIO; 1318 return -ENXIO;
1319 } 1319 }
1320 1320
@@ -1354,7 +1354,7 @@ int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid)
1354 unsigned long flags; 1354 unsigned long flags;
1355 1355
1356 if (!ra) { 1356 if (!ra) {
1357 IWL_ERROR("ra = NULL\n"); 1357 IWL_ERR(priv, "ra = NULL\n");
1358 return -EINVAL; 1358 return -EINVAL;
1359 } 1359 }
1360 1360
@@ -1455,7 +1455,7 @@ static int iwl_tx_status_reply_compressed_ba(struct iwl_priv *priv,
1455 struct ieee80211_tx_info *info; 1455 struct ieee80211_tx_info *info;
1456 1456
1457 if (unlikely(!agg->wait_for_ba)) { 1457 if (unlikely(!agg->wait_for_ba)) {
1458 IWL_ERROR("Received BA when not expected\n"); 1458 IWL_ERR(priv, "Received BA when not expected\n");
1459 return -EINVAL; 1459 return -EINVAL;
1460 } 1460 }
1461 1461
@@ -1528,7 +1528,8 @@ void iwl_rx_reply_compressed_ba(struct iwl_priv *priv,
1528 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn); 1528 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
1529 1529
1530 if (scd_flow >= priv->hw_params.max_txq_num) { 1530 if (scd_flow >= priv->hw_params.max_txq_num) {
1531 IWL_ERROR("BUG_ON scd_flow is bigger than number of queues\n"); 1531 IWL_ERR(priv,
1532 "BUG_ON scd_flow is bigger than number of queues\n");
1532 return; 1533 return;
1533 } 1534 }
1534 1535