aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/iwlegacy/iwl-tx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/iwlegacy/iwl-tx.c')
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-tx.c168
1 files changed, 84 insertions, 84 deletions
diff --git a/drivers/net/wireless/iwlegacy/iwl-tx.c b/drivers/net/wireless/iwlegacy/iwl-tx.c
index 1c27c60c753a..af6ac4fe2670 100644
--- a/drivers/net/wireless/iwlegacy/iwl-tx.c
+++ b/drivers/net/wireless/iwlegacy/iwl-tx.c
@@ -42,7 +42,7 @@
42 * il_txq_update_write_ptr - Send new write index to hardware 42 * il_txq_update_write_ptr - Send new write index to hardware
43 */ 43 */
44void 44void
45il_txq_update_write_ptr(struct il_priv *priv, struct il_tx_queue *txq) 45il_txq_update_write_ptr(struct il_priv *il, struct il_tx_queue *txq)
46{ 46{
47 u32 reg = 0; 47 u32 reg = 0;
48 int txq_id = txq->q.id; 48 int txq_id = txq->q.id;
@@ -51,22 +51,22 @@ il_txq_update_write_ptr(struct il_priv *priv, struct il_tx_queue *txq)
51 return; 51 return;
52 52
53 /* if we're trying to save power */ 53 /* if we're trying to save power */
54 if (test_bit(STATUS_POWER_PMI, &priv->status)) { 54 if (test_bit(STATUS_POWER_PMI, &il->status)) {
55 /* wake up nic if it's powered down ... 55 /* wake up nic if it's powered down ...
56 * uCode will wake up, and interrupt us again, so next 56 * uCode will wake up, and interrupt us again, so next
57 * time we'll skip this part. */ 57 * time we'll skip this part. */
58 reg = il_read32(priv, CSR_UCODE_DRV_GP1); 58 reg = il_read32(il, CSR_UCODE_DRV_GP1);
59 59
60 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { 60 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
61 IL_DEBUG_INFO(priv, 61 IL_DEBUG_INFO(il,
62 "Tx queue %d requesting wakeup," 62 "Tx queue %d requesting wakeup,"
63 " GP1 = 0x%x\n", txq_id, reg); 63 " GP1 = 0x%x\n", txq_id, reg);
64 il_set_bit(priv, CSR_GP_CNTRL, 64 il_set_bit(il, CSR_GP_CNTRL,
65 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 65 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
66 return; 66 return;
67 } 67 }
68 68
69 il_write_direct32(priv, HBUS_TARG_WRPTR, 69 il_write_direct32(il, HBUS_TARG_WRPTR,
70 txq->q.write_ptr | (txq_id << 8)); 70 txq->q.write_ptr | (txq_id << 8));
71 71
72 /* 72 /*
@@ -75,7 +75,7 @@ il_txq_update_write_ptr(struct il_priv *priv, struct il_tx_queue *txq)
75 * trying to tx (during RFKILL, we're not trying to tx). 75 * trying to tx (during RFKILL, we're not trying to tx).
76 */ 76 */
77 } else 77 } else
78 il_write32(priv, HBUS_TARG_WRPTR, 78 il_write32(il, HBUS_TARG_WRPTR,
79 txq->q.write_ptr | (txq_id << 8)); 79 txq->q.write_ptr | (txq_id << 8));
80 txq->need_update = 0; 80 txq->need_update = 0;
81} 81}
@@ -84,16 +84,16 @@ EXPORT_SYMBOL(il_txq_update_write_ptr);
84/** 84/**
85 * il_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's 85 * il_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
86 */ 86 */
87void il_tx_queue_unmap(struct il_priv *priv, int txq_id) 87void il_tx_queue_unmap(struct il_priv *il, int txq_id)
88{ 88{
89 struct il_tx_queue *txq = &priv->txq[txq_id]; 89 struct il_tx_queue *txq = &il->txq[txq_id];
90 struct il_queue *q = &txq->q; 90 struct il_queue *q = &txq->q;
91 91
92 if (q->n_bd == 0) 92 if (q->n_bd == 0)
93 return; 93 return;
94 94
95 while (q->write_ptr != q->read_ptr) { 95 while (q->write_ptr != q->read_ptr) {
96 priv->cfg->ops->lib->txq_free_tfd(priv, txq); 96 il->cfg->ops->lib->txq_free_tfd(il, txq);
97 q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd); 97 q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd);
98 } 98 }
99} 99}
@@ -107,13 +107,13 @@ EXPORT_SYMBOL(il_tx_queue_unmap);
107 * Free all buffers. 107 * Free all buffers.
108 * 0-fill, but do not free "txq" descriptor structure. 108 * 0-fill, but do not free "txq" descriptor structure.
109 */ 109 */
110void il_tx_queue_free(struct il_priv *priv, int txq_id) 110void il_tx_queue_free(struct il_priv *il, int txq_id)
111{ 111{
112 struct il_tx_queue *txq = &priv->txq[txq_id]; 112 struct il_tx_queue *txq = &il->txq[txq_id];
113 struct device *dev = &priv->pci_dev->dev; 113 struct device *dev = &il->pci_dev->dev;
114 int i; 114 int i;
115 115
116 il_tx_queue_unmap(priv, txq_id); 116 il_tx_queue_unmap(il, txq_id);
117 117
118 /* De-alloc array of command/tx buffers */ 118 /* De-alloc array of command/tx buffers */
119 for (i = 0; i < TFD_TX_CMD_SLOTS; i++) 119 for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
@@ -121,7 +121,7 @@ void il_tx_queue_free(struct il_priv *priv, int txq_id)
121 121
122 /* De-alloc circular buffer of TFDs */ 122 /* De-alloc circular buffer of TFDs */
123 if (txq->q.n_bd) 123 if (txq->q.n_bd)
124 dma_free_coherent(dev, priv->hw_params.tfd_size * 124 dma_free_coherent(dev, il->hw_params.tfd_size *
125 txq->q.n_bd, txq->tfds, txq->q.dma_addr); 125 txq->q.n_bd, txq->tfds, txq->q.dma_addr);
126 126
127 /* De-alloc array of per-TFD driver data */ 127 /* De-alloc array of per-TFD driver data */
@@ -142,9 +142,9 @@ EXPORT_SYMBOL(il_tx_queue_free);
142/** 142/**
143 * il_cmd_queue_unmap - Unmap any remaining DMA mappings from command queue 143 * il_cmd_queue_unmap - Unmap any remaining DMA mappings from command queue
144 */ 144 */
145void il_cmd_queue_unmap(struct il_priv *priv) 145void il_cmd_queue_unmap(struct il_priv *il)
146{ 146{
147 struct il_tx_queue *txq = &priv->txq[priv->cmd_queue]; 147 struct il_tx_queue *txq = &il->txq[il->cmd_queue];
148 struct il_queue *q = &txq->q; 148 struct il_queue *q = &txq->q;
149 int i; 149 int i;
150 150
@@ -155,7 +155,7 @@ void il_cmd_queue_unmap(struct il_priv *priv)
155 i = il_get_cmd_index(q, q->read_ptr, 0); 155 i = il_get_cmd_index(q, q->read_ptr, 0);
156 156
157 if (txq->meta[i].flags & CMD_MAPPED) { 157 if (txq->meta[i].flags & CMD_MAPPED) {
158 pci_unmap_single(priv->pci_dev, 158 pci_unmap_single(il->pci_dev,
159 dma_unmap_addr(&txq->meta[i], mapping), 159 dma_unmap_addr(&txq->meta[i], mapping),
160 dma_unmap_len(&txq->meta[i], len), 160 dma_unmap_len(&txq->meta[i], len),
161 PCI_DMA_BIDIRECTIONAL); 161 PCI_DMA_BIDIRECTIONAL);
@@ -167,7 +167,7 @@ void il_cmd_queue_unmap(struct il_priv *priv)
167 167
168 i = q->n_window; 168 i = q->n_window;
169 if (txq->meta[i].flags & CMD_MAPPED) { 169 if (txq->meta[i].flags & CMD_MAPPED) {
170 pci_unmap_single(priv->pci_dev, 170 pci_unmap_single(il->pci_dev,
171 dma_unmap_addr(&txq->meta[i], mapping), 171 dma_unmap_addr(&txq->meta[i], mapping),
172 dma_unmap_len(&txq->meta[i], len), 172 dma_unmap_len(&txq->meta[i], len),
173 PCI_DMA_BIDIRECTIONAL); 173 PCI_DMA_BIDIRECTIONAL);
@@ -184,13 +184,13 @@ EXPORT_SYMBOL(il_cmd_queue_unmap);
184 * Free all buffers. 184 * Free all buffers.
185 * 0-fill, but do not free "txq" descriptor structure. 185 * 0-fill, but do not free "txq" descriptor structure.
186 */ 186 */
187void il_cmd_queue_free(struct il_priv *priv) 187void il_cmd_queue_free(struct il_priv *il)
188{ 188{
189 struct il_tx_queue *txq = &priv->txq[priv->cmd_queue]; 189 struct il_tx_queue *txq = &il->txq[il->cmd_queue];
190 struct device *dev = &priv->pci_dev->dev; 190 struct device *dev = &il->pci_dev->dev;
191 int i; 191 int i;
192 192
193 il_cmd_queue_unmap(priv); 193 il_cmd_queue_unmap(il);
194 194
195 /* De-alloc array of command/tx buffers */ 195 /* De-alloc array of command/tx buffers */
196 for (i = 0; i <= TFD_CMD_SLOTS; i++) 196 for (i = 0; i <= TFD_CMD_SLOTS; i++)
@@ -198,7 +198,7 @@ void il_cmd_queue_free(struct il_priv *priv)
198 198
199 /* De-alloc circular buffer of TFDs */ 199 /* De-alloc circular buffer of TFDs */
200 if (txq->q.n_bd) 200 if (txq->q.n_bd)
201 dma_free_coherent(dev, priv->hw_params.tfd_size * txq->q.n_bd, 201 dma_free_coherent(dev, il->hw_params.tfd_size * txq->q.n_bd,
202 txq->tfds, txq->q.dma_addr); 202 txq->tfds, txq->q.dma_addr);
203 203
204 /* deallocate arrays */ 204 /* deallocate arrays */
@@ -256,7 +256,7 @@ EXPORT_SYMBOL(il_queue_space);
256/** 256/**
257 * il_queue_init - Initialize queue's high/low-water and read/write indexes 257 * il_queue_init - Initialize queue's high/low-water and read/write indexes
258 */ 258 */
259static int il_queue_init(struct il_priv *priv, struct il_queue *q, 259static int il_queue_init(struct il_priv *il, struct il_queue *q,
260 int count, int slots_num, u32 id) 260 int count, int slots_num, u32 id)
261{ 261{
262 q->n_bd = count; 262 q->n_bd = count;
@@ -287,19 +287,19 @@ static int il_queue_init(struct il_priv *priv, struct il_queue *q,
287/** 287/**
288 * il_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue 288 * il_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
289 */ 289 */
290static int il_tx_queue_alloc(struct il_priv *priv, 290static int il_tx_queue_alloc(struct il_priv *il,
291 struct il_tx_queue *txq, u32 id) 291 struct il_tx_queue *txq, u32 id)
292{ 292{
293 struct device *dev = &priv->pci_dev->dev; 293 struct device *dev = &il->pci_dev->dev;
294 size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX; 294 size_t tfd_sz = il->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
295 295
296 /* Driver private data, only for Tx (not command) queues, 296 /* Driver ilate data, only for Tx (not command) queues,
297 * not shared with device. */ 297 * not shared with device. */
298 if (id != priv->cmd_queue) { 298 if (id != il->cmd_queue) {
299 txq->txb = kzalloc(sizeof(txq->txb[0]) * 299 txq->txb = kzalloc(sizeof(txq->txb[0]) *
300 TFD_QUEUE_SIZE_MAX, GFP_KERNEL); 300 TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
301 if (!txq->txb) { 301 if (!txq->txb) {
302 IL_ERR(priv, "kmalloc for auxiliary BD " 302 IL_ERR(il, "kmalloc for auxiliary BD "
303 "structures failed\n"); 303 "structures failed\n");
304 goto error; 304 goto error;
305 } 305 }
@@ -312,7 +312,7 @@ static int il_tx_queue_alloc(struct il_priv *priv,
312 txq->tfds = dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr, 312 txq->tfds = dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr,
313 GFP_KERNEL); 313 GFP_KERNEL);
314 if (!txq->tfds) { 314 if (!txq->tfds) {
315 IL_ERR(priv, "pci_alloc_consistent(%zd) failed\n", tfd_sz); 315 IL_ERR(il, "pci_alloc_consistent(%zd) failed\n", tfd_sz);
316 goto error; 316 goto error;
317 } 317 }
318 txq->q.id = id; 318 txq->q.id = id;
@@ -329,7 +329,7 @@ static int il_tx_queue_alloc(struct il_priv *priv,
329/** 329/**
330 * il_tx_queue_init - Allocate and initialize one tx/cmd queue 330 * il_tx_queue_init - Allocate and initialize one tx/cmd queue
331 */ 331 */
332int il_tx_queue_init(struct il_priv *priv, struct il_tx_queue *txq, 332int il_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq,
333 int slots_num, u32 txq_id) 333 int slots_num, u32 txq_id)
334{ 334{
335 int i, len; 335 int i, len;
@@ -344,7 +344,7 @@ int il_tx_queue_init(struct il_priv *priv, struct il_tx_queue *txq,
344 * For normal Tx queues (all other queues), no super-size command 344 * For normal Tx queues (all other queues), no super-size command
345 * space is needed. 345 * space is needed.
346 */ 346 */
347 if (txq_id == priv->cmd_queue) 347 if (txq_id == il->cmd_queue)
348 actual_slots++; 348 actual_slots++;
349 349
350 txq->meta = kzalloc(sizeof(struct il_cmd_meta) * actual_slots, 350 txq->meta = kzalloc(sizeof(struct il_cmd_meta) * actual_slots,
@@ -367,7 +367,7 @@ int il_tx_queue_init(struct il_priv *priv, struct il_tx_queue *txq,
367 } 367 }
368 368
369 /* Alloc driver data array and TFD circular buffer */ 369 /* Alloc driver data array and TFD circular buffer */
370 ret = il_tx_queue_alloc(priv, txq, txq_id); 370 ret = il_tx_queue_alloc(il, txq, txq_id);
371 if (ret) 371 if (ret)
372 goto err; 372 goto err;
373 373
@@ -386,11 +386,11 @@ int il_tx_queue_init(struct il_priv *priv, struct il_tx_queue *txq,
386 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1)); 386 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
387 387
388 /* Initialize queue's high/low-water marks, and head/tail indexes */ 388 /* Initialize queue's high/low-water marks, and head/tail indexes */
389 il_queue_init(priv, &txq->q, 389 il_queue_init(il, &txq->q,
390 TFD_QUEUE_SIZE_MAX, slots_num, txq_id); 390 TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
391 391
392 /* Tell device where to find queue */ 392 /* Tell device where to find queue */
393 priv->cfg->ops->lib->txq_init(priv, txq); 393 il->cfg->ops->lib->txq_init(il, txq);
394 394
395 return 0; 395 return 0;
396err: 396err:
@@ -404,12 +404,12 @@ out_free_arrays:
404} 404}
405EXPORT_SYMBOL(il_tx_queue_init); 405EXPORT_SYMBOL(il_tx_queue_init);
406 406
407void il_tx_queue_reset(struct il_priv *priv, struct il_tx_queue *txq, 407void il_tx_queue_reset(struct il_priv *il, struct il_tx_queue *txq,
408 int slots_num, u32 txq_id) 408 int slots_num, u32 txq_id)
409{ 409{
410 int actual_slots = slots_num; 410 int actual_slots = slots_num;
411 411
412 if (txq_id == priv->cmd_queue) 412 if (txq_id == il->cmd_queue)
413 actual_slots++; 413 actual_slots++;
414 414
415 memset(txq->meta, 0, sizeof(struct il_cmd_meta) * actual_slots); 415 memset(txq->meta, 0, sizeof(struct il_cmd_meta) * actual_slots);
@@ -417,11 +417,11 @@ void il_tx_queue_reset(struct il_priv *priv, struct il_tx_queue *txq,
417 txq->need_update = 0; 417 txq->need_update = 0;
418 418
419 /* Initialize queue's high/low-water marks, and head/tail indexes */ 419 /* Initialize queue's high/low-water marks, and head/tail indexes */
420 il_queue_init(priv, &txq->q, 420 il_queue_init(il, &txq->q,
421 TFD_QUEUE_SIZE_MAX, slots_num, txq_id); 421 TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
422 422
423 /* Tell device where to find queue */ 423 /* Tell device where to find queue */
424 priv->cfg->ops->lib->txq_init(priv, txq); 424 il->cfg->ops->lib->txq_init(il, txq);
425} 425}
426EXPORT_SYMBOL(il_tx_queue_reset); 426EXPORT_SYMBOL(il_tx_queue_reset);
427 427
@@ -429,16 +429,16 @@ EXPORT_SYMBOL(il_tx_queue_reset);
429 429
430/** 430/**
431 * il_enqueue_hcmd - enqueue a uCode command 431 * il_enqueue_hcmd - enqueue a uCode command
432 * @priv: device private data point 432 * @il: device ilate data point
433 * @cmd: a point to the ucode command structure 433 * @cmd: a point to the ucode command structure
434 * 434 *
435 * The function returns < 0 values to indicate the operation is 435 * The function returns < 0 values to indicate the operation is
436 * failed. On success, it turns the index (> 0) of command in the 436 * failed. On success, it turns the index (> 0) of command in the
437 * command queue. 437 * command queue.
438 */ 438 */
439int il_enqueue_hcmd(struct il_priv *priv, struct il_host_cmd *cmd) 439int il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd)
440{ 440{
441 struct il_tx_queue *txq = &priv->txq[priv->cmd_queue]; 441 struct il_tx_queue *txq = &il->txq[il->cmd_queue];
442 struct il_queue *q = &txq->q; 442 struct il_queue *q = &txq->q;
443 struct il_device_cmd *out_cmd; 443 struct il_device_cmd *out_cmd;
444 struct il_cmd_meta *out_meta; 444 struct il_cmd_meta *out_meta;
@@ -448,7 +448,7 @@ int il_enqueue_hcmd(struct il_priv *priv, struct il_host_cmd *cmd)
448 u32 idx; 448 u32 idx;
449 u16 fix_size; 449 u16 fix_size;
450 450
451 cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len); 451 cmd->len = il->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len);
452 fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr)); 452 fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));
453 453
454 /* If any of the command structures end up being larger than 454 /* If any of the command structures end up being larger than
@@ -460,19 +460,19 @@ int il_enqueue_hcmd(struct il_priv *priv, struct il_host_cmd *cmd)
460 !(cmd->flags & CMD_SIZE_HUGE)); 460 !(cmd->flags & CMD_SIZE_HUGE));
461 BUG_ON(fix_size > IL_MAX_CMD_SIZE); 461 BUG_ON(fix_size > IL_MAX_CMD_SIZE);
462 462
463 if (il_is_rfkill(priv) || il_is_ctkill(priv)) { 463 if (il_is_rfkill(il) || il_is_ctkill(il)) {
464 IL_WARN(priv, "Not sending command - %s KILL\n", 464 IL_WARN(il, "Not sending command - %s KILL\n",
465 il_is_rfkill(priv) ? "RF" : "CT"); 465 il_is_rfkill(il) ? "RF" : "CT");
466 return -EIO; 466 return -EIO;
467 } 467 }
468 468
469 spin_lock_irqsave(&priv->hcmd_lock, flags); 469 spin_lock_irqsave(&il->hcmd_lock, flags);
470 470
471 if (il_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { 471 if (il_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
472 spin_unlock_irqrestore(&priv->hcmd_lock, flags); 472 spin_unlock_irqrestore(&il->hcmd_lock, flags);
473 473
474 IL_ERR(priv, "Restarting adapter due to command queue full\n"); 474 IL_ERR(il, "Restarting adapter due to command queue full\n");
475 queue_work(priv->workqueue, &priv->restart); 475 queue_work(il->workqueue, &il->restart);
476 return -ENOSPC; 476 return -ENOSPC;
477 } 477 }
478 478
@@ -481,7 +481,7 @@ int il_enqueue_hcmd(struct il_priv *priv, struct il_host_cmd *cmd)
481 out_meta = &txq->meta[idx]; 481 out_meta = &txq->meta[idx];
482 482
483 if (WARN_ON(out_meta->flags & CMD_MAPPED)) { 483 if (WARN_ON(out_meta->flags & CMD_MAPPED)) {
484 spin_unlock_irqrestore(&priv->hcmd_lock, flags); 484 spin_unlock_irqrestore(&il->hcmd_lock, flags);
485 return -ENOSPC; 485 return -ENOSPC;
486 } 486 }
487 487
@@ -499,7 +499,7 @@ int il_enqueue_hcmd(struct il_priv *priv, struct il_host_cmd *cmd)
499 * information */ 499 * information */
500 500
501 out_cmd->hdr.flags = 0; 501 out_cmd->hdr.flags = 0;
502 out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(priv->cmd_queue) | 502 out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(il->cmd_queue) |
503 INDEX_TO_SEQ(q->write_ptr)); 503 INDEX_TO_SEQ(q->write_ptr));
504 if (cmd->flags & CMD_SIZE_HUGE) 504 if (cmd->flags & CMD_SIZE_HUGE)
505 out_cmd->hdr.sequence |= SEQ_HUGE_FRAME; 505 out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
@@ -511,43 +511,43 @@ int il_enqueue_hcmd(struct il_priv *priv, struct il_host_cmd *cmd)
511 switch (out_cmd->hdr.cmd) { 511 switch (out_cmd->hdr.cmd) {
512 case REPLY_TX_LINK_QUALITY_CMD: 512 case REPLY_TX_LINK_QUALITY_CMD:
513 case SENSITIVITY_CMD: 513 case SENSITIVITY_CMD:
514 IL_DEBUG_HC_DUMP(priv, 514 IL_DEBUG_HC_DUMP(il,
515 "Sending command %s (#%x), seq: 0x%04X, " 515 "Sending command %s (#%x), seq: 0x%04X, "
516 "%d bytes at %d[%d]:%d\n", 516 "%d bytes at %d[%d]:%d\n",
517 il_get_cmd_string(out_cmd->hdr.cmd), 517 il_get_cmd_string(out_cmd->hdr.cmd),
518 out_cmd->hdr.cmd, 518 out_cmd->hdr.cmd,
519 le16_to_cpu(out_cmd->hdr.sequence), fix_size, 519 le16_to_cpu(out_cmd->hdr.sequence), fix_size,
520 q->write_ptr, idx, priv->cmd_queue); 520 q->write_ptr, idx, il->cmd_queue);
521 break; 521 break;
522 default: 522 default:
523 IL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, " 523 IL_DEBUG_HC(il, "Sending command %s (#%x), seq: 0x%04X, "
524 "%d bytes at %d[%d]:%d\n", 524 "%d bytes at %d[%d]:%d\n",
525 il_get_cmd_string(out_cmd->hdr.cmd), 525 il_get_cmd_string(out_cmd->hdr.cmd),
526 out_cmd->hdr.cmd, 526 out_cmd->hdr.cmd,
527 le16_to_cpu(out_cmd->hdr.sequence), fix_size, 527 le16_to_cpu(out_cmd->hdr.sequence), fix_size,
528 q->write_ptr, idx, priv->cmd_queue); 528 q->write_ptr, idx, il->cmd_queue);
529 } 529 }
530#endif 530#endif
531 txq->need_update = 1; 531 txq->need_update = 1;
532 532
533 if (priv->cfg->ops->lib->txq_update_byte_cnt_tbl) 533 if (il->cfg->ops->lib->txq_update_byte_cnt_tbl)
534 /* Set up entry in queue's byte count circular buffer */ 534 /* Set up entry in queue's byte count circular buffer */
535 priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 0); 535 il->cfg->ops->lib->txq_update_byte_cnt_tbl(il, txq, 0);
536 536
537 phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr, 537 phys_addr = pci_map_single(il->pci_dev, &out_cmd->hdr,
538 fix_size, PCI_DMA_BIDIRECTIONAL); 538 fix_size, PCI_DMA_BIDIRECTIONAL);
539 dma_unmap_addr_set(out_meta, mapping, phys_addr); 539 dma_unmap_addr_set(out_meta, mapping, phys_addr);
540 dma_unmap_len_set(out_meta, len, fix_size); 540 dma_unmap_len_set(out_meta, len, fix_size);
541 541
542 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, 542 il->cfg->ops->lib->txq_attach_buf_to_tfd(il, txq,
543 phys_addr, fix_size, 1, 543 phys_addr, fix_size, 1,
544 U32_PAD(cmd->len)); 544 U32_PAD(cmd->len));
545 545
546 /* Increment and update queue's write index */ 546 /* Increment and update queue's write index */
547 q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd); 547 q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd);
548 il_txq_update_write_ptr(priv, txq); 548 il_txq_update_write_ptr(il, txq);
549 549
550 spin_unlock_irqrestore(&priv->hcmd_lock, flags); 550 spin_unlock_irqrestore(&il->hcmd_lock, flags);
551 return idx; 551 return idx;
552} 552}
553 553
@@ -558,15 +558,15 @@ int il_enqueue_hcmd(struct il_priv *priv, struct il_host_cmd *cmd)
558 * need to be reclaimed. As result, some free space forms. If there is 558 * need to be reclaimed. As result, some free space forms. If there is
559 * enough free space (> low mark), wake the stack that feeds us. 559 * enough free space (> low mark), wake the stack that feeds us.
560 */ 560 */
561static void il_hcmd_queue_reclaim(struct il_priv *priv, int txq_id, 561static void il_hcmd_queue_reclaim(struct il_priv *il, int txq_id,
562 int idx, int cmd_idx) 562 int idx, int cmd_idx)
563{ 563{
564 struct il_tx_queue *txq = &priv->txq[txq_id]; 564 struct il_tx_queue *txq = &il->txq[txq_id];
565 struct il_queue *q = &txq->q; 565 struct il_queue *q = &txq->q;
566 int nfreed = 0; 566 int nfreed = 0;
567 567
568 if ((idx >= q->n_bd) || (il_queue_used(q, idx) == 0)) { 568 if ((idx >= q->n_bd) || (il_queue_used(q, idx) == 0)) {
569 IL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, " 569 IL_ERR(il, "Read index for DMA queue txq id (%d), index %d, "
570 "is out of range [0-%d] %d %d.\n", txq_id, 570 "is out of range [0-%d] %d %d.\n", txq_id,
571 idx, q->n_bd, q->write_ptr, q->read_ptr); 571 idx, q->n_bd, q->write_ptr, q->read_ptr);
572 return; 572 return;
@@ -576,9 +576,9 @@ static void il_hcmd_queue_reclaim(struct il_priv *priv, int txq_id,
576 q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) { 576 q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) {
577 577
578 if (nfreed++ > 0) { 578 if (nfreed++ > 0) {
579 IL_ERR(priv, "HCMD skipped: index (%d) %d %d\n", idx, 579 IL_ERR(il, "HCMD skipped: index (%d) %d %d\n", idx,
580 q->write_ptr, q->read_ptr); 580 q->write_ptr, q->read_ptr);
581 queue_work(priv->workqueue, &priv->restart); 581 queue_work(il->workqueue, &il->restart);
582 } 582 }
583 583
584 } 584 }
@@ -593,7 +593,7 @@ static void il_hcmd_queue_reclaim(struct il_priv *priv, int txq_id,
593 * if the callback returns 1 593 * if the callback returns 1
594 */ 594 */
595void 595void
596il_tx_cmd_complete(struct il_priv *priv, struct il_rx_mem_buffer *rxb) 596il_tx_cmd_complete(struct il_priv *il, struct il_rx_mem_buffer *rxb)
597{ 597{
598 struct il_rx_packet *pkt = rxb_addr(rxb); 598 struct il_rx_packet *pkt = rxb_addr(rxb);
599 u16 sequence = le16_to_cpu(pkt->hdr.sequence); 599 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
@@ -603,18 +603,18 @@ il_tx_cmd_complete(struct il_priv *priv, struct il_rx_mem_buffer *rxb)
603 bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME); 603 bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
604 struct il_device_cmd *cmd; 604 struct il_device_cmd *cmd;
605 struct il_cmd_meta *meta; 605 struct il_cmd_meta *meta;
606 struct il_tx_queue *txq = &priv->txq[priv->cmd_queue]; 606 struct il_tx_queue *txq = &il->txq[il->cmd_queue];
607 unsigned long flags; 607 unsigned long flags;
608 608
609 /* If a Tx command is being handled and it isn't in the actual 609 /* If a Tx command is being handled and it isn't in the actual
610 * command queue then there a command routing bug has been introduced 610 * command queue then there a command routing bug has been introduced
611 * in the queue management code. */ 611 * in the queue management code. */
612 if (WARN(txq_id != priv->cmd_queue, 612 if (WARN(txq_id != il->cmd_queue,
613 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n", 613 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
614 txq_id, priv->cmd_queue, sequence, 614 txq_id, il->cmd_queue, sequence,
615 priv->txq[priv->cmd_queue].q.read_ptr, 615 il->txq[il->cmd_queue].q.read_ptr,
616 priv->txq[priv->cmd_queue].q.write_ptr)) { 616 il->txq[il->cmd_queue].q.write_ptr)) {
617 il_print_hex_error(priv, pkt, 32); 617 il_print_hex_error(il, pkt, 32);
618 return; 618 return;
619 } 619 }
620 620
@@ -624,7 +624,7 @@ il_tx_cmd_complete(struct il_priv *priv, struct il_rx_mem_buffer *rxb)
624 624
625 txq->time_stamp = jiffies; 625 txq->time_stamp = jiffies;
626 626
627 pci_unmap_single(priv->pci_dev, 627 pci_unmap_single(il->pci_dev,
628 dma_unmap_addr(meta, mapping), 628 dma_unmap_addr(meta, mapping),
629 dma_unmap_len(meta, len), 629 dma_unmap_len(meta, len),
630 PCI_DMA_BIDIRECTIONAL); 630 PCI_DMA_BIDIRECTIONAL);
@@ -634,22 +634,22 @@ il_tx_cmd_complete(struct il_priv *priv, struct il_rx_mem_buffer *rxb)
634 meta->source->reply_page = (unsigned long)rxb_addr(rxb); 634 meta->source->reply_page = (unsigned long)rxb_addr(rxb);
635 rxb->page = NULL; 635 rxb->page = NULL;
636 } else if (meta->callback) 636 } else if (meta->callback)
637 meta->callback(priv, cmd, pkt); 637 meta->callback(il, cmd, pkt);
638 638
639 spin_lock_irqsave(&priv->hcmd_lock, flags); 639 spin_lock_irqsave(&il->hcmd_lock, flags);
640 640
641 il_hcmd_queue_reclaim(priv, txq_id, index, cmd_index); 641 il_hcmd_queue_reclaim(il, txq_id, index, cmd_index);
642 642
643 if (!(meta->flags & CMD_ASYNC)) { 643 if (!(meta->flags & CMD_ASYNC)) {
644 clear_bit(STATUS_HCMD_ACTIVE, &priv->status); 644 clear_bit(STATUS_HCMD_ACTIVE, &il->status);
645 IL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n", 645 IL_DEBUG_INFO(il, "Clearing HCMD_ACTIVE for command %s\n",
646 il_get_cmd_string(cmd->hdr.cmd)); 646 il_get_cmd_string(cmd->hdr.cmd));
647 wake_up(&priv->wait_command_queue); 647 wake_up(&il->wait_command_queue);
648 } 648 }
649 649
650 /* Mark as unmapped */ 650 /* Mark as unmapped */
651 meta->flags = 0; 651 meta->flags = 0;
652 652
653 spin_unlock_irqrestore(&priv->hcmd_lock, flags); 653 spin_unlock_irqrestore(&il->hcmd_lock, flags);
654} 654}
655EXPORT_SYMBOL(il_tx_cmd_complete); 655EXPORT_SYMBOL(il_tx_cmd_complete);