aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEmmanuel Grumbach <emmanuel.grumbach@intel.com>2011-08-26 02:11:06 -0400
committerJohn W. Linville <linville@tuxdriver.com>2011-08-29 15:30:27 -0400
commit6d8f6eeb350696050a1f5cf8f9d0daabab68eaf5 (patch)
tree69ec1c4cbe98e48240ba28b4bf9ce48fd2ae3c96
parent790428b6552c698b2f295457b5dee686323cb732 (diff)
iwlagn: transport layer should receive iwl_trans
Change a lot of functions to have them receive iwl_trans and not iwl_priv. Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com> Signed-off-by: Wey-Yi Guy <wey-yi.w.guy@intel.com> Signed-off-by: John W. Linville <linville@tuxdriver.com>
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-int-pcie.h16
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c186
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.c248
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h40
4 files changed, 251 insertions, 239 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-int-pcie.h b/drivers/net/wireless/iwlwifi/iwl-trans-int-pcie.h
index cb4b59dcfc3b..b2af467430a9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-int-pcie.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-int-pcie.h
@@ -140,28 +140,26 @@ irqreturn_t iwl_isr_ict(int irq, void *data);
140* TX / HCMD 140* TX / HCMD
141******************************************************/ 141******************************************************/
142void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq); 142void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq);
143int iwlagn_txq_attach_buf_to_tfd(struct iwl_priv *priv, 143int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans,
144 struct iwl_tx_queue *txq, 144 struct iwl_tx_queue *txq,
145 dma_addr_t addr, u16 len, u8 reset); 145 dma_addr_t addr, u16 len, u8 reset);
146int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q, 146int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id);
147 int count, int slots_num, u32 id); 147int iwl_trans_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
148int iwl_trans_pcie_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd); 148int __must_check iwl_trans_pcie_send_cmd_pdu(struct iwl_trans *trans, u8 id,
149int __must_check iwl_trans_pcie_send_cmd_pdu(struct iwl_priv *priv, u8 id,
150 u32 flags, u16 len, const void *data); 149 u32 flags, u16 len, const void *data);
151void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb); 150void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
152void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_priv *priv, 151void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
153 struct iwl_tx_queue *txq, 152 struct iwl_tx_queue *txq,
154 u16 byte_cnt); 153 u16 byte_cnt);
155int iwl_trans_pcie_txq_agg_disable(struct iwl_priv *priv, u16 txq_id, 154int iwl_trans_pcie_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
156 u16 ssn_idx, u8 tx_fifo); 155 u16 ssn_idx, u8 tx_fifo);
157void iwl_trans_set_wr_ptrs(struct iwl_priv *priv, 156void iwl_trans_set_wr_ptrs(struct iwl_trans *trans, int txq_id, u32 index);
158 int txq_id, u32 index);
159void iwl_trans_tx_queue_set_status(struct iwl_priv *priv, 157void iwl_trans_tx_queue_set_status(struct iwl_priv *priv,
160 struct iwl_tx_queue *txq, 158 struct iwl_tx_queue *txq,
161 int tx_fifo_id, int scd_retry); 159 int tx_fifo_id, int scd_retry);
162void iwl_trans_pcie_txq_agg_setup(struct iwl_priv *priv, int sta_id, int tid, 160void iwl_trans_pcie_txq_agg_setup(struct iwl_priv *priv, int sta_id, int tid,
163 int frame_limit); 161 int frame_limit);
164void iwlagn_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq, 162void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
165 int index); 163 int index);
166void iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index, 164void iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
167 struct sk_buff_head *skbs); 165 struct sk_buff_head *skbs);
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c
index ed497247f97e..1704eab8ddf2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c
@@ -41,12 +41,11 @@
41/** 41/**
42 * iwl_trans_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array 42 * iwl_trans_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
43 */ 43 */
44void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_priv *priv, 44void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
45 struct iwl_tx_queue *txq, 45 struct iwl_tx_queue *txq,
46 u16 byte_cnt) 46 u16 byte_cnt)
47{ 47{
48 struct iwlagn_scd_bc_tbl *scd_bc_tbl; 48 struct iwlagn_scd_bc_tbl *scd_bc_tbl;
49 struct iwl_trans *trans = trans(priv);
50 struct iwl_trans_pcie *trans_pcie = 49 struct iwl_trans_pcie *trans_pcie =
51 IWL_TRANS_GET_PCIE_TRANS(trans); 50 IWL_TRANS_GET_PCIE_TRANS(trans);
52 int write_ptr = txq->q.write_ptr; 51 int write_ptr = txq->q.write_ptr;
@@ -170,7 +169,7 @@ static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd)
170 return tfd->num_tbs & 0x1f; 169 return tfd->num_tbs & 0x1f;
171} 170}
172 171
173static void iwlagn_unmap_tfd(struct iwl_priv *priv, struct iwl_cmd_meta *meta, 172static void iwlagn_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
174 struct iwl_tfd *tfd, enum dma_data_direction dma_dir) 173 struct iwl_tfd *tfd, enum dma_data_direction dma_dir)
175{ 174{
176 int i; 175 int i;
@@ -180,39 +179,39 @@ static void iwlagn_unmap_tfd(struct iwl_priv *priv, struct iwl_cmd_meta *meta,
180 num_tbs = iwl_tfd_get_num_tbs(tfd); 179 num_tbs = iwl_tfd_get_num_tbs(tfd);
181 180
182 if (num_tbs >= IWL_NUM_OF_TBS) { 181 if (num_tbs >= IWL_NUM_OF_TBS) {
183 IWL_ERR(priv, "Too many chunks: %i\n", num_tbs); 182 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
184 /* @todo issue fatal error, it is quite serious situation */ 183 /* @todo issue fatal error, it is quite serious situation */
185 return; 184 return;
186 } 185 }
187 186
188 /* Unmap tx_cmd */ 187 /* Unmap tx_cmd */
189 if (num_tbs) 188 if (num_tbs)
190 dma_unmap_single(priv->bus->dev, 189 dma_unmap_single(bus(trans)->dev,
191 dma_unmap_addr(meta, mapping), 190 dma_unmap_addr(meta, mapping),
192 dma_unmap_len(meta, len), 191 dma_unmap_len(meta, len),
193 DMA_BIDIRECTIONAL); 192 DMA_BIDIRECTIONAL);
194 193
195 /* Unmap chunks, if any. */ 194 /* Unmap chunks, if any. */
196 for (i = 1; i < num_tbs; i++) 195 for (i = 1; i < num_tbs; i++)
197 dma_unmap_single(priv->bus->dev, iwl_tfd_tb_get_addr(tfd, i), 196 dma_unmap_single(bus(trans)->dev, iwl_tfd_tb_get_addr(tfd, i),
198 iwl_tfd_tb_get_len(tfd, i), dma_dir); 197 iwl_tfd_tb_get_len(tfd, i), dma_dir);
199} 198}
200 199
201/** 200/**
202 * iwlagn_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] 201 * iwlagn_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
203 * @priv - driver private data 202 * @trans - transport private data
204 * @txq - tx queue 203 * @txq - tx queue
205 * @index - the index of the TFD to be freed 204 * @index - the index of the TFD to be freed
206 * 205 *
207 * Does NOT advance any TFD circular buffer read/write indexes 206 * Does NOT advance any TFD circular buffer read/write indexes
208 * Does NOT free the TFD itself (which is within circular buffer) 207 * Does NOT free the TFD itself (which is within circular buffer)
209 */ 208 */
210void iwlagn_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq, 209void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
211 int index) 210 int index)
212{ 211{
213 struct iwl_tfd *tfd_tmp = txq->tfds; 212 struct iwl_tfd *tfd_tmp = txq->tfds;
214 213
215 iwlagn_unmap_tfd(priv, &txq->meta[index], &tfd_tmp[index], 214 iwlagn_unmap_tfd(trans, &txq->meta[index], &tfd_tmp[index],
216 DMA_TO_DEVICE); 215 DMA_TO_DEVICE);
217 216
218 /* free SKB */ 217 /* free SKB */
@@ -229,7 +228,7 @@ void iwlagn_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq,
229 } 228 }
230} 229}
231 230
232int iwlagn_txq_attach_buf_to_tfd(struct iwl_priv *priv, 231int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans,
233 struct iwl_tx_queue *txq, 232 struct iwl_tx_queue *txq,
234 dma_addr_t addr, u16 len, 233 dma_addr_t addr, u16 len,
235 u8 reset) 234 u8 reset)
@@ -249,7 +248,7 @@ int iwlagn_txq_attach_buf_to_tfd(struct iwl_priv *priv,
249 248
250 /* Each TFD can point to a maximum 20 Tx buffers */ 249 /* Each TFD can point to a maximum 20 Tx buffers */
251 if (num_tbs >= IWL_NUM_OF_TBS) { 250 if (num_tbs >= IWL_NUM_OF_TBS) {
252 IWL_ERR(priv, "Error can not send more than %d chunks\n", 251 IWL_ERR(trans, "Error can not send more than %d chunks\n",
253 IWL_NUM_OF_TBS); 252 IWL_NUM_OF_TBS);
254 return -EINVAL; 253 return -EINVAL;
255 } 254 }
@@ -258,7 +257,7 @@ int iwlagn_txq_attach_buf_to_tfd(struct iwl_priv *priv,
258 return -EINVAL; 257 return -EINVAL;
259 258
260 if (unlikely(addr & ~IWL_TX_DMA_MASK)) 259 if (unlikely(addr & ~IWL_TX_DMA_MASK))
261 IWL_ERR(priv, "Unaligned address = %llx\n", 260 IWL_ERR(trans, "Unaligned address = %llx\n",
262 (unsigned long long)addr); 261 (unsigned long long)addr);
263 262
264 iwl_tfd_set_tb(tfd, num_tbs, addr, len); 263 iwl_tfd_set_tb(tfd, num_tbs, addr, len);
@@ -307,8 +306,7 @@ int iwl_queue_space(const struct iwl_queue *q)
307/** 306/**
308 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes 307 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
309 */ 308 */
310int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q, 309int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id)
311 int count, int slots_num, u32 id)
312{ 310{
313 q->n_bd = count; 311 q->n_bd = count;
314 q->n_window = slots_num; 312 q->n_window = slots_num;
@@ -337,23 +335,20 @@ int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
337 return 0; 335 return 0;
338} 336}
339 337
340static void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv, 338static void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
341 struct iwl_tx_queue *txq) 339 struct iwl_tx_queue *txq)
342{ 340{
343 struct iwlagn_scd_bc_tbl *scd_bc_tbl;
344 struct iwl_trans *trans = trans(priv);
345 struct iwl_trans_pcie *trans_pcie = 341 struct iwl_trans_pcie *trans_pcie =
346 IWL_TRANS_GET_PCIE_TRANS(trans); 342 IWL_TRANS_GET_PCIE_TRANS(trans);
343 struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
347 int txq_id = txq->q.id; 344 int txq_id = txq->q.id;
348 int read_ptr = txq->q.read_ptr; 345 int read_ptr = txq->q.read_ptr;
349 u8 sta_id = 0; 346 u8 sta_id = 0;
350 __le16 bc_ent; 347 __le16 bc_ent;
351 348
352 scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
353
354 WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX); 349 WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
355 350
356 if (txq_id != priv->shrd->cmd_queue) 351 if (txq_id != trans->shrd->cmd_queue)
357 sta_id = txq->cmd[read_ptr]->cmd.tx.sta_id; 352 sta_id = txq->cmd[read_ptr]->cmd.tx.sta_id;
358 353
359 bc_ent = cpu_to_le16(1 | (sta_id << 12)); 354 bc_ent = cpu_to_le16(1 | (sta_id << 12));
@@ -364,14 +359,13 @@ static void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
364 tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent; 359 tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
365} 360}
366 361
367static int iwlagn_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid, 362static int iwlagn_tx_queue_set_q2ratid(struct iwl_trans *trans, u16 ra_tid,
368 u16 txq_id) 363 u16 txq_id)
369{ 364{
370 u32 tbl_dw_addr; 365 u32 tbl_dw_addr;
371 u32 tbl_dw; 366 u32 tbl_dw;
372 u16 scd_q2ratid; 367 u16 scd_q2ratid;
373 368
374 struct iwl_trans *trans = trans(priv);
375 struct iwl_trans_pcie *trans_pcie = 369 struct iwl_trans_pcie *trans_pcie =
376 IWL_TRANS_GET_PCIE_TRANS(trans); 370 IWL_TRANS_GET_PCIE_TRANS(trans);
377 371
@@ -380,34 +374,34 @@ static int iwlagn_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
380 tbl_dw_addr = trans_pcie->scd_base_addr + 374 tbl_dw_addr = trans_pcie->scd_base_addr +
381 SCD_TRANS_TBL_OFFSET_QUEUE(txq_id); 375 SCD_TRANS_TBL_OFFSET_QUEUE(txq_id);
382 376
383 tbl_dw = iwl_read_targ_mem(priv, tbl_dw_addr); 377 tbl_dw = iwl_read_targ_mem(priv(trans), tbl_dw_addr);
384 378
385 if (txq_id & 0x1) 379 if (txq_id & 0x1)
386 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF); 380 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
387 else 381 else
388 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000); 382 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
389 383
390 iwl_write_targ_mem(priv, tbl_dw_addr, tbl_dw); 384 iwl_write_targ_mem(priv(trans), tbl_dw_addr, tbl_dw);
391 385
392 return 0; 386 return 0;
393} 387}
394 388
395static void iwlagn_tx_queue_stop_scheduler(struct iwl_priv *priv, u16 txq_id) 389static void iwlagn_tx_queue_stop_scheduler(struct iwl_trans *trans, u16 txq_id)
396{ 390{
397 /* Simply stop the queue, but don't change any configuration; 391 /* Simply stop the queue, but don't change any configuration;
398 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */ 392 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
399 iwl_write_prph(priv, 393 iwl_write_prph(priv(trans),
400 SCD_QUEUE_STATUS_BITS(txq_id), 394 SCD_QUEUE_STATUS_BITS(txq_id),
401 (0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)| 395 (0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)|
402 (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN)); 396 (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
403} 397}
404 398
405void iwl_trans_set_wr_ptrs(struct iwl_priv *priv, 399void iwl_trans_set_wr_ptrs(struct iwl_trans *trans,
406 int txq_id, u32 index) 400 int txq_id, u32 index)
407{ 401{
408 iwl_write_direct32(priv, HBUS_TARG_WRPTR, 402 iwl_write_direct32(priv(trans), HBUS_TARG_WRPTR,
409 (index & 0xff) | (txq_id << 8)); 403 (index & 0xff) | (txq_id << 8));
410 iwl_write_prph(priv, SCD_QUEUE_RDPTR(txq_id), index); 404 iwl_write_prph(priv(trans), SCD_QUEUE_RDPTR(txq_id), index);
411} 405}
412 406
413void iwl_trans_tx_queue_set_status(struct iwl_priv *priv, 407void iwl_trans_tx_queue_set_status(struct iwl_priv *priv,
@@ -459,10 +453,10 @@ void iwl_trans_pcie_txq_agg_setup(struct iwl_priv *priv, int sta_id, int tid,
459 spin_lock_irqsave(&priv->shrd->lock, flags); 453 spin_lock_irqsave(&priv->shrd->lock, flags);
460 454
461 /* Stop this Tx queue before configuring it */ 455 /* Stop this Tx queue before configuring it */
462 iwlagn_tx_queue_stop_scheduler(priv, txq_id); 456 iwlagn_tx_queue_stop_scheduler(trans, txq_id);
463 457
464 /* Map receiver-address / traffic-ID to this queue */ 458 /* Map receiver-address / traffic-ID to this queue */
465 iwlagn_tx_queue_set_q2ratid(priv, ra_tid, txq_id); 459 iwlagn_tx_queue_set_q2ratid(trans, ra_tid, txq_id);
466 460
467 /* Set this queue as a chain-building queue */ 461 /* Set this queue as a chain-building queue */
468 iwl_set_bits_prph(priv, SCD_QUEUECHAIN_SEL, (1<<txq_id)); 462 iwl_set_bits_prph(priv, SCD_QUEUECHAIN_SEL, (1<<txq_id));
@@ -474,7 +468,7 @@ void iwl_trans_pcie_txq_agg_setup(struct iwl_priv *priv, int sta_id, int tid,
474 * Assumes that ssn_idx is valid (!= 0xFFF) */ 468 * Assumes that ssn_idx is valid (!= 0xFFF) */
475 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff); 469 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
476 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff); 470 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
477 iwl_trans_set_wr_ptrs(priv, txq_id, ssn_idx); 471 iwl_trans_set_wr_ptrs(trans, txq_id, ssn_idx);
478 472
479 /* Set up Tx window size and frame limit for this queue */ 473 /* Set up Tx window size and frame limit for this queue */
480 iwl_write_targ_mem(priv, trans_pcie->scd_base_addr + 474 iwl_write_targ_mem(priv, trans_pcie->scd_base_addr +
@@ -501,6 +495,7 @@ void iwl_trans_pcie_txq_agg_setup(struct iwl_priv *priv, int sta_id, int tid,
501int iwl_trans_pcie_txq_agg_disable(struct iwl_priv *priv, u16 txq_id, 495int iwl_trans_pcie_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
502 u16 ssn_idx, u8 tx_fifo) 496 u16 ssn_idx, u8 tx_fifo)
503{ 497{
498 struct iwl_trans *trans = trans(priv);
504 if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) || 499 if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) ||
505 (IWLAGN_FIRST_AMPDU_QUEUE + 500 (IWLAGN_FIRST_AMPDU_QUEUE +
506 priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) { 501 priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
@@ -512,14 +507,14 @@ int iwl_trans_pcie_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
512 return -EINVAL; 507 return -EINVAL;
513 } 508 }
514 509
515 iwlagn_tx_queue_stop_scheduler(priv, txq_id); 510 iwlagn_tx_queue_stop_scheduler(trans, txq_id);
516 511
517 iwl_clear_bits_prph(priv, SCD_AGGR_SEL, (1 << txq_id)); 512 iwl_clear_bits_prph(priv, SCD_AGGR_SEL, (1 << txq_id));
518 513
519 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff); 514 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
520 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff); 515 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
521 /* supposes that ssn_idx is valid (!= 0xFFF) */ 516 /* supposes that ssn_idx is valid (!= 0xFFF) */
522 iwl_trans_set_wr_ptrs(priv, txq_id, ssn_idx); 517 iwl_trans_set_wr_ptrs(trans, txq_id, ssn_idx);
523 518
524 iwl_clear_bits_prph(priv, SCD_INTERRUPT_MASK, (1 << txq_id)); 519 iwl_clear_bits_prph(priv, SCD_INTERRUPT_MASK, (1 << txq_id));
525 iwl_txq_ctx_deactivate(priv, txq_id); 520 iwl_txq_ctx_deactivate(priv, txq_id);
@@ -539,8 +534,9 @@ int iwl_trans_pcie_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
539 * failed. On success, it turns the index (> 0) of command in the 534 * failed. On success, it turns the index (> 0) of command in the
540 * command queue. 535 * command queue.
541 */ 536 */
542static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) 537static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
543{ 538{
539 struct iwl_priv *priv = priv(trans);
544 struct iwl_tx_queue *txq = &priv->txq[priv->shrd->cmd_queue]; 540 struct iwl_tx_queue *txq = &priv->txq[priv->shrd->cmd_queue];
545 struct iwl_queue *q = &txq->q; 541 struct iwl_queue *q = &txq->q;
546 struct iwl_device_cmd *out_cmd; 542 struct iwl_device_cmd *out_cmd;
@@ -559,14 +555,14 @@ static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
559 int trace_idx; 555 int trace_idx;
560#endif 556#endif
561 557
562 if (test_bit(STATUS_FW_ERROR, &priv->shrd->status)) { 558 if (test_bit(STATUS_FW_ERROR, &trans->shrd->status)) {
563 IWL_WARN(priv, "fw recovery, no hcmd send\n"); 559 IWL_WARN(trans, "fw recovery, no hcmd send\n");
564 return -EIO; 560 return -EIO;
565 } 561 }
566 562
567 if ((priv->ucode_owner == IWL_OWNERSHIP_TM) && 563 if ((priv->ucode_owner == IWL_OWNERSHIP_TM) &&
568 !(cmd->flags & CMD_ON_DEMAND)) { 564 !(cmd->flags & CMD_ON_DEMAND)) {
569 IWL_DEBUG_HC(priv, "tm own the uCode, no regular hcmd send\n"); 565 IWL_DEBUG_HC(trans, "tm own the uCode, no regular hcmd send\n");
570 return -EIO; 566 return -EIO;
571 } 567 }
572 568
@@ -599,9 +595,9 @@ static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
599 if (WARN_ON(copy_size > TFD_MAX_PAYLOAD_SIZE)) 595 if (WARN_ON(copy_size > TFD_MAX_PAYLOAD_SIZE))
600 return -EINVAL; 596 return -EINVAL;
601 597
602 if (iwl_is_rfkill(priv->shrd) || iwl_is_ctkill(priv->shrd)) { 598 if (iwl_is_rfkill(trans->shrd) || iwl_is_ctkill(trans->shrd)) {
603 IWL_WARN(priv, "Not sending command - %s KILL\n", 599 IWL_WARN(trans, "Not sending command - %s KILL\n",
604 iwl_is_rfkill(priv->shrd) ? "RF" : "CT"); 600 iwl_is_rfkill(trans->shrd) ? "RF" : "CT");
605 return -EIO; 601 return -EIO;
606 } 602 }
607 603
@@ -610,10 +606,10 @@ static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
610 if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { 606 if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
611 spin_unlock_irqrestore(&priv->hcmd_lock, flags); 607 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
612 608
613 IWL_ERR(priv, "No space in command queue\n"); 609 IWL_ERR(trans, "No space in command queue\n");
614 is_ct_kill = iwl_check_for_ct_kill(priv); 610 is_ct_kill = iwl_check_for_ct_kill(priv);
615 if (!is_ct_kill) { 611 if (!is_ct_kill) {
616 IWL_ERR(priv, "Restarting adapter due to queue full\n"); 612 IWL_ERR(trans, "Restarting adapter queue is full\n");
617 iwlagn_fw_error(priv, false); 613 iwlagn_fw_error(priv, false);
618 } 614 }
619 return -ENOSPC; 615 return -ENOSPC;
@@ -634,7 +630,7 @@ static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
634 out_cmd->hdr.cmd = cmd->id; 630 out_cmd->hdr.cmd = cmd->id;
635 out_cmd->hdr.flags = 0; 631 out_cmd->hdr.flags = 0;
636 out_cmd->hdr.sequence = 632 out_cmd->hdr.sequence =
637 cpu_to_le16(QUEUE_TO_SEQ(priv->shrd->cmd_queue) | 633 cpu_to_le16(QUEUE_TO_SEQ(trans->shrd->cmd_queue) |
638 INDEX_TO_SEQ(q->write_ptr)); 634 INDEX_TO_SEQ(q->write_ptr));
639 635
640 /* and copy the data that needs to be copied */ 636 /* and copy the data that needs to be copied */
@@ -649,16 +645,16 @@ static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
649 cmd_dest += cmd->len[i]; 645 cmd_dest += cmd->len[i];
650 } 646 }
651 647
652 IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, " 648 IWL_DEBUG_HC(trans, "Sending command %s (#%x), seq: 0x%04X, "
653 "%d bytes at %d[%d]:%d\n", 649 "%d bytes at %d[%d]:%d\n",
654 get_cmd_string(out_cmd->hdr.cmd), 650 get_cmd_string(out_cmd->hdr.cmd),
655 out_cmd->hdr.cmd, 651 out_cmd->hdr.cmd,
656 le16_to_cpu(out_cmd->hdr.sequence), cmd_size, 652 le16_to_cpu(out_cmd->hdr.sequence), cmd_size,
657 q->write_ptr, idx, priv->shrd->cmd_queue); 653 q->write_ptr, idx, trans->shrd->cmd_queue);
658 654
659 phys_addr = dma_map_single(priv->bus->dev, &out_cmd->hdr, copy_size, 655 phys_addr = dma_map_single(bus(trans)->dev, &out_cmd->hdr, copy_size,
660 DMA_BIDIRECTIONAL); 656 DMA_BIDIRECTIONAL);
661 if (unlikely(dma_mapping_error(priv->bus->dev, phys_addr))) { 657 if (unlikely(dma_mapping_error(bus(trans)->dev, phys_addr))) {
662 idx = -ENOMEM; 658 idx = -ENOMEM;
663 goto out; 659 goto out;
664 } 660 }
@@ -666,7 +662,8 @@ static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
666 dma_unmap_addr_set(out_meta, mapping, phys_addr); 662 dma_unmap_addr_set(out_meta, mapping, phys_addr);
667 dma_unmap_len_set(out_meta, len, copy_size); 663 dma_unmap_len_set(out_meta, len, copy_size);
668 664
669 iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr, copy_size, 1); 665 iwlagn_txq_attach_buf_to_tfd(trans, txq,
666 phys_addr, copy_size, 1);
670#ifdef CONFIG_IWLWIFI_DEVICE_TRACING 667#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
671 trace_bufs[0] = &out_cmd->hdr; 668 trace_bufs[0] = &out_cmd->hdr;
672 trace_lens[0] = copy_size; 669 trace_lens[0] = copy_size;
@@ -678,17 +675,18 @@ static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
678 continue; 675 continue;
679 if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY)) 676 if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
680 continue; 677 continue;
681 phys_addr = dma_map_single(priv->bus->dev, (void *)cmd->data[i], 678 phys_addr = dma_map_single(bus(trans)->dev,
679 (void *)cmd->data[i],
682 cmd->len[i], DMA_BIDIRECTIONAL); 680 cmd->len[i], DMA_BIDIRECTIONAL);
683 if (dma_mapping_error(priv->bus->dev, phys_addr)) { 681 if (dma_mapping_error(bus(trans)->dev, phys_addr)) {
684 iwlagn_unmap_tfd(priv, out_meta, 682 iwlagn_unmap_tfd(trans, out_meta,
685 &txq->tfds[q->write_ptr], 683 &txq->tfds[q->write_ptr],
686 DMA_BIDIRECTIONAL); 684 DMA_BIDIRECTIONAL);
687 idx = -ENOMEM; 685 idx = -ENOMEM;
688 goto out; 686 goto out;
689 } 687 }
690 688
691 iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr, 689 iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr,
692 cmd->len[i], 0); 690 cmd->len[i], 0);
693#ifdef CONFIG_IWLWIFI_DEVICE_TRACING 691#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
694 trace_bufs[trace_idx] = cmd->data[i]; 692 trace_bufs[trace_idx] = cmd->data[i];
@@ -768,17 +766,18 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
768 int cmd_index; 766 int cmd_index;
769 struct iwl_device_cmd *cmd; 767 struct iwl_device_cmd *cmd;
770 struct iwl_cmd_meta *meta; 768 struct iwl_cmd_meta *meta;
771 struct iwl_tx_queue *txq = &priv->txq[priv->shrd->cmd_queue]; 769 struct iwl_trans *trans = trans(priv);
770 struct iwl_tx_queue *txq = &priv->txq[trans->shrd->cmd_queue];
772 unsigned long flags; 771 unsigned long flags;
773 772
774 /* If a Tx command is being handled and it isn't in the actual 773 /* If a Tx command is being handled and it isn't in the actual
775 * command queue then there a command routing bug has been introduced 774 * command queue then there a command routing bug has been introduced
776 * in the queue management code. */ 775 * in the queue management code. */
777 if (WARN(txq_id != priv->shrd->cmd_queue, 776 if (WARN(txq_id != trans->shrd->cmd_queue,
778 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n", 777 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
779 txq_id, priv->shrd->cmd_queue, sequence, 778 txq_id, trans->shrd->cmd_queue, sequence,
780 priv->txq[priv->shrd->cmd_queue].q.read_ptr, 779 priv->txq[trans->shrd->cmd_queue].q.read_ptr,
781 priv->txq[priv->shrd->cmd_queue].q.write_ptr)) { 780 priv->txq[trans->shrd->cmd_queue].q.write_ptr)) {
782 iwl_print_hex_error(priv, pkt, 32); 781 iwl_print_hex_error(priv, pkt, 32);
783 return; 782 return;
784 } 783 }
@@ -787,7 +786,8 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
787 cmd = txq->cmd[cmd_index]; 786 cmd = txq->cmd[cmd_index];
788 meta = &txq->meta[cmd_index]; 787 meta = &txq->meta[cmd_index];
789 788
790 iwlagn_unmap_tfd(priv, meta, &txq->tfds[index], DMA_BIDIRECTIONAL); 789 iwlagn_unmap_tfd(trans, meta, &txq->tfds[index],
790 DMA_BIDIRECTIONAL);
791 791
792 /* Input error checking is done when commands are added to queue. */ 792 /* Input error checking is done when commands are added to queue. */
793 if (meta->flags & CMD_WANT_SKB) { 793 if (meta->flags & CMD_WANT_SKB) {
@@ -801,8 +801,8 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
801 iwl_hcmd_queue_reclaim(priv, txq_id, index); 801 iwl_hcmd_queue_reclaim(priv, txq_id, index);
802 802
803 if (!(meta->flags & CMD_ASYNC)) { 803 if (!(meta->flags & CMD_ASYNC)) {
804 clear_bit(STATUS_HCMD_ACTIVE, &priv->shrd->status); 804 clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
805 IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n", 805 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
806 get_cmd_string(cmd->hdr.cmd)); 806 get_cmd_string(cmd->hdr.cmd));
807 wake_up_interruptible(&priv->wait_command_queue); 807 wake_up_interruptible(&priv->wait_command_queue);
808 } 808 }
@@ -920,7 +920,7 @@ static void iwl_generic_cmd_callback(struct iwl_priv *priv,
920#endif 920#endif
921} 921}
922 922
923static int iwl_send_cmd_async(struct iwl_priv *priv, struct iwl_host_cmd *cmd) 923static int iwl_send_cmd_async(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
924{ 924{
925 int ret; 925 int ret;
926 926
@@ -932,77 +932,77 @@ static int iwl_send_cmd_async(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
932 if (!cmd->callback) 932 if (!cmd->callback)
933 cmd->callback = iwl_generic_cmd_callback; 933 cmd->callback = iwl_generic_cmd_callback;
934 934
935 if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status)) 935 if (test_bit(STATUS_EXIT_PENDING, &trans->shrd->status))
936 return -EBUSY; 936 return -EBUSY;
937 937
938 ret = iwl_enqueue_hcmd(priv, cmd); 938 ret = iwl_enqueue_hcmd(trans, cmd);
939 if (ret < 0) { 939 if (ret < 0) {
940 IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n", 940 IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
941 get_cmd_string(cmd->id), ret); 941 get_cmd_string(cmd->id), ret);
942 return ret; 942 return ret;
943 } 943 }
944 return 0; 944 return 0;
945} 945}
946 946
947static int iwl_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd) 947static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
948{ 948{
949 int cmd_idx; 949 int cmd_idx;
950 int ret; 950 int ret;
951 951
952 lockdep_assert_held(&priv->shrd->mutex); 952 lockdep_assert_held(&trans->shrd->mutex);
953 953
954 /* A synchronous command can not have a callback set. */ 954 /* A synchronous command can not have a callback set. */
955 if (WARN_ON(cmd->callback)) 955 if (WARN_ON(cmd->callback))
956 return -EINVAL; 956 return -EINVAL;
957 957
958 IWL_DEBUG_INFO(priv, "Attempting to send sync command %s\n", 958 IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n",
959 get_cmd_string(cmd->id)); 959 get_cmd_string(cmd->id));
960 960
961 set_bit(STATUS_HCMD_ACTIVE, &priv->shrd->status); 961 set_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
962 IWL_DEBUG_INFO(priv, "Setting HCMD_ACTIVE for command %s\n", 962 IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n",
963 get_cmd_string(cmd->id)); 963 get_cmd_string(cmd->id));
964 964
965 cmd_idx = iwl_enqueue_hcmd(priv, cmd); 965 cmd_idx = iwl_enqueue_hcmd(trans, cmd);
966 if (cmd_idx < 0) { 966 if (cmd_idx < 0) {
967 ret = cmd_idx; 967 ret = cmd_idx;
968 clear_bit(STATUS_HCMD_ACTIVE, &priv->shrd->status); 968 clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
969 IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n", 969 IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
970 get_cmd_string(cmd->id), ret); 970 get_cmd_string(cmd->id), ret);
971 return ret; 971 return ret;
972 } 972 }
973 973
974 ret = wait_event_interruptible_timeout(priv->wait_command_queue, 974 ret = wait_event_interruptible_timeout(priv(trans)->wait_command_queue,
975 !test_bit(STATUS_HCMD_ACTIVE, &priv->shrd->status), 975 !test_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status),
976 HOST_COMPLETE_TIMEOUT); 976 HOST_COMPLETE_TIMEOUT);
977 if (!ret) { 977 if (!ret) {
978 if (test_bit(STATUS_HCMD_ACTIVE, &priv->shrd->status)) { 978 if (test_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status)) {
979 IWL_ERR(priv, 979 IWL_ERR(trans,
980 "Error sending %s: time out after %dms.\n", 980 "Error sending %s: time out after %dms.\n",
981 get_cmd_string(cmd->id), 981 get_cmd_string(cmd->id),
982 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); 982 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
983 983
984 clear_bit(STATUS_HCMD_ACTIVE, &priv->shrd->status); 984 clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
985 IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command" 985 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command"
986 "%s\n", get_cmd_string(cmd->id)); 986 "%s\n", get_cmd_string(cmd->id));
987 ret = -ETIMEDOUT; 987 ret = -ETIMEDOUT;
988 goto cancel; 988 goto cancel;
989 } 989 }
990 } 990 }
991 991
992 if (test_bit(STATUS_RF_KILL_HW, &priv->shrd->status)) { 992 if (test_bit(STATUS_RF_KILL_HW, &trans->shrd->status)) {
993 IWL_ERR(priv, "Command %s aborted: RF KILL Switch\n", 993 IWL_ERR(trans, "Command %s aborted: RF KILL Switch\n",
994 get_cmd_string(cmd->id)); 994 get_cmd_string(cmd->id));
995 ret = -ECANCELED; 995 ret = -ECANCELED;
996 goto fail; 996 goto fail;
997 } 997 }
998 if (test_bit(STATUS_FW_ERROR, &priv->shrd->status)) { 998 if (test_bit(STATUS_FW_ERROR, &trans->shrd->status)) {
999 IWL_ERR(priv, "Command %s failed: FW Error\n", 999 IWL_ERR(trans, "Command %s failed: FW Error\n",
1000 get_cmd_string(cmd->id)); 1000 get_cmd_string(cmd->id));
1001 ret = -EIO; 1001 ret = -EIO;
1002 goto fail; 1002 goto fail;
1003 } 1003 }
1004 if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) { 1004 if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) {
1005 IWL_ERR(priv, "Error: Response NULL in '%s'\n", 1005 IWL_ERR(trans, "Error: Response NULL in '%s'\n",
1006 get_cmd_string(cmd->id)); 1006 get_cmd_string(cmd->id));
1007 ret = -EIO; 1007 ret = -EIO;
1008 goto cancel; 1008 goto cancel;
@@ -1018,27 +1018,27 @@ cancel:
1018 * in later, it will possibly set an invalid 1018 * in later, it will possibly set an invalid
1019 * address (cmd->meta.source). 1019 * address (cmd->meta.source).
1020 */ 1020 */
1021 priv->txq[priv->shrd->cmd_queue].meta[cmd_idx].flags &= 1021 priv(trans)->txq[trans->shrd->cmd_queue].meta[cmd_idx].flags &=
1022 ~CMD_WANT_SKB; 1022 ~CMD_WANT_SKB;
1023 } 1023 }
1024fail: 1024fail:
1025 if (cmd->reply_page) { 1025 if (cmd->reply_page) {
1026 iwl_free_pages(priv->shrd, cmd->reply_page); 1026 iwl_free_pages(trans->shrd, cmd->reply_page);
1027 cmd->reply_page = 0; 1027 cmd->reply_page = 0;
1028 } 1028 }
1029 1029
1030 return ret; 1030 return ret;
1031} 1031}
1032 1032
1033int iwl_trans_pcie_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) 1033int iwl_trans_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
1034{ 1034{
1035 if (cmd->flags & CMD_ASYNC) 1035 if (cmd->flags & CMD_ASYNC)
1036 return iwl_send_cmd_async(priv, cmd); 1036 return iwl_send_cmd_async(trans, cmd);
1037 1037
1038 return iwl_send_cmd_sync(priv, cmd); 1038 return iwl_send_cmd_sync(trans, cmd);
1039} 1039}
1040 1040
1041int iwl_trans_pcie_send_cmd_pdu(struct iwl_priv *priv, u8 id, u32 flags, 1041int iwl_trans_pcie_send_cmd_pdu(struct iwl_trans *trans, u8 id, u32 flags,
1042 u16 len, const void *data) 1042 u16 len, const void *data)
1043{ 1043{
1044 struct iwl_host_cmd cmd = { 1044 struct iwl_host_cmd cmd = {
@@ -1048,7 +1048,7 @@ int iwl_trans_pcie_send_cmd_pdu(struct iwl_priv *priv, u8 id, u32 flags,
1048 .flags = flags, 1048 .flags = flags,
1049 }; 1049 };
1050 1050
1051 return iwl_trans_pcie_send_cmd(priv, &cmd); 1051 return iwl_trans_pcie_send_cmd(trans, &cmd);
1052} 1052}
1053 1053
1054/* Frees buffers until index _not_ inclusive */ 1054/* Frees buffers until index _not_ inclusive */
@@ -1096,8 +1096,8 @@ void iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
1096 1096
1097 tx_info->skb = NULL; 1097 tx_info->skb = NULL;
1098 1098
1099 iwlagn_txq_inval_byte_cnt_tbl(priv(trans), txq); 1099 iwlagn_txq_inval_byte_cnt_tbl(trans, txq);
1100 1100
1101 iwlagn_txq_free_tfd(priv(trans), txq, txq->q.read_ptr); 1101 iwlagn_txq_free_tfd(trans, txq, txq->q.read_ptr);
1102 } 1102 }
1103} 1103}
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.c b/drivers/net/wireless/iwlwifi/iwl-trans.c
index 89560089a348..b448e79c259b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.c
@@ -62,6 +62,8 @@
62 *****************************************************************************/ 62 *****************************************************************************/
63#include <linux/interrupt.h> 63#include <linux/interrupt.h>
64#include <linux/debugfs.h> 64#include <linux/debugfs.h>
65#include <linux/bitops.h>
66#include <linux/gfp.h>
65 67
66#include "iwl-dev.h" 68#include "iwl-dev.h"
67#include "iwl-trans.h" 69#include "iwl-trans.h"
@@ -263,22 +265,22 @@ static void iwl_trans_pcie_rx_free(struct iwl_trans *trans)
263 rxq->rb_stts = NULL; 265 rxq->rb_stts = NULL;
264} 266}
265 267
266static int iwl_trans_rx_stop(struct iwl_priv *priv) 268static int iwl_trans_rx_stop(struct iwl_trans *trans)
267{ 269{
268 270
269 /* stop Rx DMA */ 271 /* stop Rx DMA */
270 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); 272 iwl_write_direct32(priv(trans), FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
271 return iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG, 273 return iwl_poll_direct_bit(priv(trans), FH_MEM_RSSR_RX_STATUS_REG,
272 FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000); 274 FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
273} 275}
274 276
275static inline int iwlagn_alloc_dma_ptr(struct iwl_priv *priv, 277static inline int iwlagn_alloc_dma_ptr(struct iwl_trans *trans,
276 struct iwl_dma_ptr *ptr, size_t size) 278 struct iwl_dma_ptr *ptr, size_t size)
277{ 279{
278 if (WARN_ON(ptr->addr)) 280 if (WARN_ON(ptr->addr))
279 return -EINVAL; 281 return -EINVAL;
280 282
281 ptr->addr = dma_alloc_coherent(priv->bus->dev, size, 283 ptr->addr = dma_alloc_coherent(bus(trans)->dev, size,
282 &ptr->dma, GFP_KERNEL); 284 &ptr->dma, GFP_KERNEL);
283 if (!ptr->addr) 285 if (!ptr->addr)
284 return -ENOMEM; 286 return -ENOMEM;
@@ -286,20 +288,21 @@ static inline int iwlagn_alloc_dma_ptr(struct iwl_priv *priv,
286 return 0; 288 return 0;
287} 289}
288 290
289static inline void iwlagn_free_dma_ptr(struct iwl_priv *priv, 291static inline void iwlagn_free_dma_ptr(struct iwl_trans *trans,
290 struct iwl_dma_ptr *ptr) 292 struct iwl_dma_ptr *ptr)
291{ 293{
292 if (unlikely(!ptr->addr)) 294 if (unlikely(!ptr->addr))
293 return; 295 return;
294 296
295 dma_free_coherent(priv->bus->dev, ptr->size, ptr->addr, ptr->dma); 297 dma_free_coherent(bus(trans)->dev, ptr->size, ptr->addr, ptr->dma);
296 memset(ptr, 0, sizeof(*ptr)); 298 memset(ptr, 0, sizeof(*ptr));
297} 299}
298 300
299static int iwl_trans_txq_alloc(struct iwl_priv *priv, struct iwl_tx_queue *txq, 301static int iwl_trans_txq_alloc(struct iwl_trans *trans,
300 int slots_num, u32 txq_id) 302 struct iwl_tx_queue *txq, int slots_num,
303 u32 txq_id)
301{ 304{
302 size_t tfd_sz = hw_params(priv).tfd_size * TFD_QUEUE_SIZE_MAX; 305 size_t tfd_sz = hw_params(trans).tfd_size * TFD_QUEUE_SIZE_MAX;
303 int i; 306 int i;
304 307
305 if (WARN_ON(txq->meta || txq->cmd || txq->txb || txq->tfds)) 308 if (WARN_ON(txq->meta || txq->cmd || txq->txb || txq->tfds))
@@ -325,11 +328,11 @@ static int iwl_trans_txq_alloc(struct iwl_priv *priv, struct iwl_tx_queue *txq,
325 /* Alloc driver data array and TFD circular buffer */ 328 /* Alloc driver data array and TFD circular buffer */
326 /* Driver private data, only for Tx (not command) queues, 329 /* Driver private data, only for Tx (not command) queues,
327 * not shared with device. */ 330 * not shared with device. */
328 if (txq_id != priv->shrd->cmd_queue) { 331 if (txq_id != trans->shrd->cmd_queue) {
329 txq->txb = kzalloc(sizeof(txq->txb[0]) * 332 txq->txb = kzalloc(sizeof(txq->txb[0]) *
330 TFD_QUEUE_SIZE_MAX, GFP_KERNEL); 333 TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
331 if (!txq->txb) { 334 if (!txq->txb) {
332 IWL_ERR(priv, "kmalloc for auxiliary BD " 335 IWL_ERR(trans, "kmalloc for auxiliary BD "
333 "structures failed\n"); 336 "structures failed\n");
334 goto error; 337 goto error;
335 } 338 }
@@ -339,10 +342,10 @@ static int iwl_trans_txq_alloc(struct iwl_priv *priv, struct iwl_tx_queue *txq,
339 342
340 /* Circular buffer of transmit frame descriptors (TFDs), 343 /* Circular buffer of transmit frame descriptors (TFDs),
341 * shared with device */ 344 * shared with device */
342 txq->tfds = dma_alloc_coherent(priv->bus->dev, tfd_sz, &txq->q.dma_addr, 345 txq->tfds = dma_alloc_coherent(bus(trans)->dev, tfd_sz,
343 GFP_KERNEL); 346 &txq->q.dma_addr, GFP_KERNEL);
344 if (!txq->tfds) { 347 if (!txq->tfds) {
345 IWL_ERR(priv, "dma_alloc_coherent(%zd) failed\n", tfd_sz); 348 IWL_ERR(trans, "dma_alloc_coherent(%zd) failed\n", tfd_sz);
346 goto error; 349 goto error;
347 } 350 }
348 txq->q.id = txq_id; 351 txq->q.id = txq_id;
@@ -365,7 +368,7 @@ error:
365 368
366} 369}
367 370
368static int iwl_trans_txq_init(struct iwl_priv *priv, struct iwl_tx_queue *txq, 371static int iwl_trans_txq_init(struct iwl_trans *trans, struct iwl_tx_queue *txq,
369 int slots_num, u32 txq_id) 372 int slots_num, u32 txq_id)
370{ 373{
371 int ret; 374 int ret;
@@ -386,7 +389,7 @@ static int iwl_trans_txq_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
386 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1)); 389 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
387 390
388 /* Initialize queue's high/low-water marks, and head/tail indexes */ 391 /* Initialize queue's high/low-water marks, and head/tail indexes */
389 ret = iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, 392 ret = iwl_queue_init(&txq->q, TFD_QUEUE_SIZE_MAX, slots_num,
390 txq_id); 393 txq_id);
391 if (ret) 394 if (ret)
392 return ret; 395 return ret;
@@ -395,7 +398,7 @@ static int iwl_trans_txq_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
395 * Tell nic where to find circular buffer of Tx Frame Descriptors for 398 * Tell nic where to find circular buffer of Tx Frame Descriptors for
396 * given Tx queue, and enable the DMA channel used for that queue. 399 * given Tx queue, and enable the DMA channel used for that queue.
397 * Circular buffer (TFD queue in DRAM) physical base address */ 400 * Circular buffer (TFD queue in DRAM) physical base address */
398 iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id), 401 iwl_write_direct32(priv(trans), FH_MEM_CBBC_QUEUE(txq_id),
399 txq->q.dma_addr >> 8); 402 txq->q.dma_addr >> 8);
400 403
401 return 0; 404 return 0;
@@ -404,8 +407,9 @@ static int iwl_trans_txq_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
404/** 407/**
405 * iwl_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's 408 * iwl_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
406 */ 409 */
407static void iwl_tx_queue_unmap(struct iwl_priv *priv, int txq_id) 410static void iwl_tx_queue_unmap(struct iwl_trans *trans, int txq_id)
408{ 411{
412 struct iwl_priv *priv = priv(trans);
409 struct iwl_tx_queue *txq = &priv->txq[txq_id]; 413 struct iwl_tx_queue *txq = &priv->txq[txq_id];
410 struct iwl_queue *q = &txq->q; 414 struct iwl_queue *q = &txq->q;
411 415
@@ -414,7 +418,7 @@ static void iwl_tx_queue_unmap(struct iwl_priv *priv, int txq_id)
414 418
415 while (q->write_ptr != q->read_ptr) { 419 while (q->write_ptr != q->read_ptr) {
416 /* The read_ptr needs to bound by q->n_window */ 420 /* The read_ptr needs to bound by q->n_window */
417 iwlagn_txq_free_tfd(priv, txq, get_cmd_index(q, q->read_ptr)); 421 iwlagn_txq_free_tfd(trans, txq, get_cmd_index(q, q->read_ptr));
418 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd); 422 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
419 } 423 }
420} 424}
@@ -427,15 +431,16 @@ static void iwl_tx_queue_unmap(struct iwl_priv *priv, int txq_id)
427 * Free all buffers. 431 * Free all buffers.
428 * 0-fill, but do not free "txq" descriptor structure. 432 * 0-fill, but do not free "txq" descriptor structure.
429 */ 433 */
430static void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id) 434static void iwl_tx_queue_free(struct iwl_trans *trans, int txq_id)
431{ 435{
436 struct iwl_priv *priv = priv(trans);
432 struct iwl_tx_queue *txq = &priv->txq[txq_id]; 437 struct iwl_tx_queue *txq = &priv->txq[txq_id];
433 struct device *dev = priv->bus->dev; 438 struct device *dev = bus(trans)->dev;
434 int i; 439 int i;
435 if (WARN_ON(!txq)) 440 if (WARN_ON(!txq))
436 return; 441 return;
437 442
438 iwl_tx_queue_unmap(priv, txq_id); 443 iwl_tx_queue_unmap(trans, txq_id);
439 444
440 /* De-alloc array of command/tx buffers */ 445 /* De-alloc array of command/tx buffers */
441 for (i = 0; i < txq->q.n_window; i++) 446 for (i = 0; i < txq->q.n_window; i++)
@@ -443,7 +448,7 @@ static void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
443 448
444 /* De-alloc circular buffer of TFDs */ 449 /* De-alloc circular buffer of TFDs */
445 if (txq->q.n_bd) { 450 if (txq->q.n_bd) {
446 dma_free_coherent(dev, hw_params(priv).tfd_size * 451 dma_free_coherent(dev, hw_params(trans).tfd_size *
447 txq->q.n_bd, txq->tfds, txq->q.dma_addr); 452 txq->q.n_bd, txq->tfds, txq->q.dma_addr);
448 memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr)); 453 memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr));
449 } 454 }
@@ -467,26 +472,26 @@ static void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
467 * 472 *
468 * Destroy all TX DMA queues and structures 473 * Destroy all TX DMA queues and structures
469 */ 474 */
470static void iwl_trans_pcie_tx_free(struct iwl_priv *priv) 475static void iwl_trans_pcie_tx_free(struct iwl_trans *trans)
471{ 476{
472 int txq_id; 477 int txq_id;
473 struct iwl_trans *trans = trans(priv);
474 struct iwl_trans_pcie *trans_pcie = 478 struct iwl_trans_pcie *trans_pcie =
475 IWL_TRANS_GET_PCIE_TRANS(trans); 479 IWL_TRANS_GET_PCIE_TRANS(trans);
480 struct iwl_priv *priv = priv(trans);
476 481
477 /* Tx queues */ 482 /* Tx queues */
478 if (priv->txq) { 483 if (priv->txq) {
479 for (txq_id = 0; 484 for (txq_id = 0;
480 txq_id < hw_params(priv).max_txq_num; txq_id++) 485 txq_id < hw_params(trans).max_txq_num; txq_id++)
481 iwl_tx_queue_free(priv, txq_id); 486 iwl_tx_queue_free(trans, txq_id);
482 } 487 }
483 488
484 kfree(priv->txq); 489 kfree(priv->txq);
485 priv->txq = NULL; 490 priv->txq = NULL;
486 491
487 iwlagn_free_dma_ptr(priv, &priv->kw); 492 iwlagn_free_dma_ptr(trans, &priv->kw);
488 493
489 iwlagn_free_dma_ptr(priv, &trans_pcie->scd_bc_tbls); 494 iwlagn_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls);
490} 495}
491 496
492/** 497/**
@@ -496,11 +501,11 @@ static void iwl_trans_pcie_tx_free(struct iwl_priv *priv)
496 * @param priv 501 * @param priv
497 * @return error code 502 * @return error code
498 */ 503 */
499static int iwl_trans_tx_alloc(struct iwl_priv *priv) 504static int iwl_trans_tx_alloc(struct iwl_trans *trans)
500{ 505{
501 int ret; 506 int ret;
502 int txq_id, slots_num; 507 int txq_id, slots_num;
503 struct iwl_trans *trans = trans(priv); 508 struct iwl_priv *priv = priv(trans);
504 struct iwl_trans_pcie *trans_pcie = 509 struct iwl_trans_pcie *trans_pcie =
505 IWL_TRANS_GET_PCIE_TRANS(trans); 510 IWL_TRANS_GET_PCIE_TRANS(trans);
506 511
@@ -511,36 +516,36 @@ static int iwl_trans_tx_alloc(struct iwl_priv *priv)
511 goto error; 516 goto error;
512 } 517 }
513 518
514 ret = iwlagn_alloc_dma_ptr(priv, &trans_pcie->scd_bc_tbls, 519 ret = iwlagn_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls,
515 hw_params(priv).scd_bc_tbls_size); 520 hw_params(trans).scd_bc_tbls_size);
516 if (ret) { 521 if (ret) {
517 IWL_ERR(priv, "Scheduler BC Table allocation failed\n"); 522 IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
518 goto error; 523 goto error;
519 } 524 }
520 525
521 /* Alloc keep-warm buffer */ 526 /* Alloc keep-warm buffer */
522 ret = iwlagn_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE); 527 ret = iwlagn_alloc_dma_ptr(trans, &priv->kw, IWL_KW_SIZE);
523 if (ret) { 528 if (ret) {
524 IWL_ERR(priv, "Keep Warm allocation failed\n"); 529 IWL_ERR(trans, "Keep Warm allocation failed\n");
525 goto error; 530 goto error;
526 } 531 }
527 532
528 priv->txq = kzalloc(sizeof(struct iwl_tx_queue) * 533 priv->txq = kzalloc(sizeof(struct iwl_tx_queue) *
529 priv->cfg->base_params->num_of_queues, GFP_KERNEL); 534 priv->cfg->base_params->num_of_queues, GFP_KERNEL);
530 if (!priv->txq) { 535 if (!priv->txq) {
531 IWL_ERR(priv, "Not enough memory for txq\n"); 536 IWL_ERR(trans, "Not enough memory for txq\n");
532 ret = ENOMEM; 537 ret = ENOMEM;
533 goto error; 538 goto error;
534 } 539 }
535 540
536 /* Alloc and init all Tx queues, including the command queue (#4/#9) */ 541 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
537 for (txq_id = 0; txq_id < hw_params(priv).max_txq_num; txq_id++) { 542 for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++) {
538 slots_num = (txq_id == priv->shrd->cmd_queue) ? 543 slots_num = (txq_id == trans->shrd->cmd_queue) ?
539 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; 544 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
540 ret = iwl_trans_txq_alloc(priv, &priv->txq[txq_id], slots_num, 545 ret = iwl_trans_txq_alloc(trans, &priv->txq[txq_id], slots_num,
541 txq_id); 546 txq_id);
542 if (ret) { 547 if (ret) {
543 IWL_ERR(priv, "Tx %d queue alloc failed\n", txq_id); 548 IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
544 goto error; 549 goto error;
545 } 550 }
546 } 551 }
@@ -548,25 +553,26 @@ static int iwl_trans_tx_alloc(struct iwl_priv *priv)
548 return 0; 553 return 0;
549 554
550error: 555error:
551 iwl_trans_tx_free(trans(priv)); 556 iwl_trans_tx_free(trans);
552 557
553 return ret; 558 return ret;
554} 559}
555static int iwl_tx_init(struct iwl_priv *priv) 560static int iwl_tx_init(struct iwl_trans *trans)
556{ 561{
557 int ret; 562 int ret;
558 int txq_id, slots_num; 563 int txq_id, slots_num;
559 unsigned long flags; 564 unsigned long flags;
560 bool alloc = false; 565 bool alloc = false;
566 struct iwl_priv *priv = priv(trans);
561 567
562 if (!priv->txq) { 568 if (!priv->txq) {
563 ret = iwl_trans_tx_alloc(priv); 569 ret = iwl_trans_tx_alloc(trans);
564 if (ret) 570 if (ret)
565 goto error; 571 goto error;
566 alloc = true; 572 alloc = true;
567 } 573 }
568 574
569 spin_lock_irqsave(&priv->shrd->lock, flags); 575 spin_lock_irqsave(&trans->shrd->lock, flags);
570 576
571 /* Turn off all Tx DMA fifos */ 577 /* Turn off all Tx DMA fifos */
572 iwl_write_prph(priv, SCD_TXFACT, 0); 578 iwl_write_prph(priv, SCD_TXFACT, 0);
@@ -574,16 +580,16 @@ static int iwl_tx_init(struct iwl_priv *priv)
574 /* Tell NIC where to find the "keep warm" buffer */ 580 /* Tell NIC where to find the "keep warm" buffer */
575 iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4); 581 iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
576 582
577 spin_unlock_irqrestore(&priv->shrd->lock, flags); 583 spin_unlock_irqrestore(&trans->shrd->lock, flags);
578 584
579 /* Alloc and init all Tx queues, including the command queue (#4/#9) */ 585 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
580 for (txq_id = 0; txq_id < hw_params(priv).max_txq_num; txq_id++) { 586 for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++) {
581 slots_num = (txq_id == priv->shrd->cmd_queue) ? 587 slots_num = (txq_id == trans->shrd->cmd_queue) ?
582 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; 588 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
583 ret = iwl_trans_txq_init(priv, &priv->txq[txq_id], slots_num, 589 ret = iwl_trans_txq_init(trans, &priv->txq[txq_id], slots_num,
584 txq_id); 590 txq_id);
585 if (ret) { 591 if (ret) {
586 IWL_ERR(priv, "Tx %d queue init failed\n", txq_id); 592 IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
587 goto error; 593 goto error;
588 } 594 }
589 } 595 }
@@ -592,7 +598,7 @@ static int iwl_tx_init(struct iwl_priv *priv)
592error: 598error:
593 /*Upon error, free only if we allocated something */ 599 /*Upon error, free only if we allocated something */
594 if (alloc) 600 if (alloc)
595 iwl_trans_tx_free(trans(priv)); 601 iwl_trans_tx_free(trans);
596 return ret; 602 return ret;
597} 603}
598 604
@@ -613,28 +619,29 @@ static void iwl_set_pwr_vmain(struct iwl_priv *priv)
613 ~APMG_PS_CTRL_MSK_PWR_SRC); 619 ~APMG_PS_CTRL_MSK_PWR_SRC);
614} 620}
615 621
616static int iwl_nic_init(struct iwl_priv *priv) 622static int iwl_nic_init(struct iwl_trans *trans)
617{ 623{
618 unsigned long flags; 624 unsigned long flags;
625 struct iwl_priv *priv = priv(trans);
619 626
620 /* nic_init */ 627 /* nic_init */
621 spin_lock_irqsave(&priv->shrd->lock, flags); 628 spin_lock_irqsave(&trans->shrd->lock, flags);
622 iwl_apm_init(priv); 629 iwl_apm_init(priv);
623 630
624 /* Set interrupt coalescing calibration timer to default (512 usecs) */ 631 /* Set interrupt coalescing calibration timer to default (512 usecs) */
625 iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF); 632 iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
626 633
627 spin_unlock_irqrestore(&priv->shrd->lock, flags); 634 spin_unlock_irqrestore(&trans->shrd->lock, flags);
628 635
629 iwl_set_pwr_vmain(priv); 636 iwl_set_pwr_vmain(priv);
630 637
631 priv->cfg->lib->nic_config(priv); 638 priv->cfg->lib->nic_config(priv);
632 639
633 /* Allocate the RX queue, or reset if it is already allocated */ 640 /* Allocate the RX queue, or reset if it is already allocated */
634 iwl_rx_init(trans(priv)); 641 iwl_rx_init(trans);
635 642
636 /* Allocate or reset and init all Tx and Command queues */ 643 /* Allocate or reset and init all Tx and Command queues */
637 if (iwl_tx_init(priv)) 644 if (iwl_tx_init(trans))
638 return -ENOMEM; 645 return -ENOMEM;
639 646
640 if (priv->cfg->base_params->shadow_reg_enable) { 647 if (priv->cfg->base_params->shadow_reg_enable) {
@@ -643,7 +650,7 @@ static int iwl_nic_init(struct iwl_priv *priv)
643 0x800FFFFF); 650 0x800FFFFF);
644 } 651 }
645 652
646 set_bit(STATUS_INIT, &priv->shrd->status); 653 set_bit(STATUS_INIT, &trans->shrd->status);
647 654
648 return 0; 655 return 0;
649} 656}
@@ -651,39 +658,39 @@ static int iwl_nic_init(struct iwl_priv *priv)
651#define HW_READY_TIMEOUT (50) 658#define HW_READY_TIMEOUT (50)
652 659
653/* Note: returns poll_bit return value, which is >= 0 if success */ 660/* Note: returns poll_bit return value, which is >= 0 if success */
654static int iwl_set_hw_ready(struct iwl_priv *priv) 661static int iwl_set_hw_ready(struct iwl_trans *trans)
655{ 662{
656 int ret; 663 int ret;
657 664
658 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, 665 iwl_set_bit(priv(trans), CSR_HW_IF_CONFIG_REG,
659 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY); 666 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
660 667
661 /* See if we got it */ 668 /* See if we got it */
662 ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG, 669 ret = iwl_poll_bit(priv(trans), CSR_HW_IF_CONFIG_REG,
663 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, 670 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
664 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, 671 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
665 HW_READY_TIMEOUT); 672 HW_READY_TIMEOUT);
666 673
667 IWL_DEBUG_INFO(priv, "hardware%s ready\n", ret < 0 ? " not" : ""); 674 IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : "");
668 return ret; 675 return ret;
669} 676}
670 677
671/* Note: returns standard 0/-ERROR code */ 678/* Note: returns standard 0/-ERROR code */
672static int iwl_trans_pcie_prepare_card_hw(struct iwl_priv *priv) 679static int iwl_trans_pcie_prepare_card_hw(struct iwl_trans *trans)
673{ 680{
674 int ret; 681 int ret;
675 682
676 IWL_DEBUG_INFO(priv, "iwl_trans_prepare_card_hw enter\n"); 683 IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
677 684
678 ret = iwl_set_hw_ready(priv); 685 ret = iwl_set_hw_ready(trans);
679 if (ret >= 0) 686 if (ret >= 0)
680 return 0; 687 return 0;
681 688
682 /* If HW is not ready, prepare the conditions to check again */ 689 /* If HW is not ready, prepare the conditions to check again */
683 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, 690 iwl_set_bit(priv(trans), CSR_HW_IF_CONFIG_REG,
684 CSR_HW_IF_CONFIG_REG_PREPARE); 691 CSR_HW_IF_CONFIG_REG_PREPARE);
685 692
686 ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG, 693 ret = iwl_poll_bit(priv(trans), CSR_HW_IF_CONFIG_REG,
687 ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 694 ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
688 CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000); 695 CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
689 696
@@ -691,42 +698,43 @@ static int iwl_trans_pcie_prepare_card_hw(struct iwl_priv *priv)
691 return ret; 698 return ret;
692 699
693 /* HW should be ready by now, check again. */ 700 /* HW should be ready by now, check again. */
694 ret = iwl_set_hw_ready(priv); 701 ret = iwl_set_hw_ready(trans);
695 if (ret >= 0) 702 if (ret >= 0)
696 return 0; 703 return 0;
697 return ret; 704 return ret;
698} 705}
699 706
700static int iwl_trans_pcie_start_device(struct iwl_priv *priv) 707static int iwl_trans_pcie_start_device(struct iwl_trans *trans)
701{ 708{
702 int ret; 709 int ret;
710 struct iwl_priv *priv = priv(trans);
703 711
704 priv->ucode_owner = IWL_OWNERSHIP_DRIVER; 712 priv->ucode_owner = IWL_OWNERSHIP_DRIVER;
705 713
706 if ((priv->cfg->sku & EEPROM_SKU_CAP_AMT_ENABLE) && 714 if ((priv->cfg->sku & EEPROM_SKU_CAP_AMT_ENABLE) &&
707 iwl_trans_pcie_prepare_card_hw(priv)) { 715 iwl_trans_pcie_prepare_card_hw(trans)) {
708 IWL_WARN(priv, "Exit HW not ready\n"); 716 IWL_WARN(trans, "Exit HW not ready\n");
709 return -EIO; 717 return -EIO;
710 } 718 }
711 719
712 /* If platform's RF_KILL switch is NOT set to KILL */ 720 /* If platform's RF_KILL switch is NOT set to KILL */
713 if (iwl_read32(priv, CSR_GP_CNTRL) & 721 if (iwl_read32(priv, CSR_GP_CNTRL) &
714 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) 722 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
715 clear_bit(STATUS_RF_KILL_HW, &priv->shrd->status); 723 clear_bit(STATUS_RF_KILL_HW, &trans->shrd->status);
716 else 724 else
717 set_bit(STATUS_RF_KILL_HW, &priv->shrd->status); 725 set_bit(STATUS_RF_KILL_HW, &trans->shrd->status);
718 726
719 if (iwl_is_rfkill(priv->shrd)) { 727 if (iwl_is_rfkill(trans->shrd)) {
720 wiphy_rfkill_set_hw_state(priv->hw->wiphy, true); 728 wiphy_rfkill_set_hw_state(priv->hw->wiphy, true);
721 iwl_enable_interrupts(trans(priv)); 729 iwl_enable_interrupts(trans);
722 return -ERFKILL; 730 return -ERFKILL;
723 } 731 }
724 732
725 iwl_write32(priv, CSR_INT, 0xFFFFFFFF); 733 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
726 734
727 ret = iwl_nic_init(priv); 735 ret = iwl_nic_init(trans);
728 if (ret) { 736 if (ret) {
729 IWL_ERR(priv, "Unable to init nic\n"); 737 IWL_ERR(trans, "Unable to init nic\n");
730 return ret; 738 return ret;
731 } 739 }
732 740
@@ -737,7 +745,7 @@ static int iwl_trans_pcie_start_device(struct iwl_priv *priv)
737 745
738 /* clear (again), then enable host interrupts */ 746 /* clear (again), then enable host interrupts */
739 iwl_write32(priv, CSR_INT, 0xFFFFFFFF); 747 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
740 iwl_enable_interrupts(trans(priv)); 748 iwl_enable_interrupts(trans);
741 749
742 /* really make sure rfkill handshake bits are cleared */ 750 /* really make sure rfkill handshake bits are cleared */
743 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 751 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
@@ -750,9 +758,9 @@ static int iwl_trans_pcie_start_device(struct iwl_priv *priv)
750 * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask 758 * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
751 * must be called under priv->shrd->lock and mac access 759 * must be called under priv->shrd->lock and mac access
752 */ 760 */
753static void iwl_trans_txq_set_sched(struct iwl_priv *priv, u32 mask) 761static void iwl_trans_txq_set_sched(struct iwl_trans *trans, u32 mask)
754{ 762{
755 iwl_write_prph(priv, SCD_TXFACT, mask); 763 iwl_write_prph(priv(trans), SCD_TXFACT, mask);
756} 764}
757 765
758#define IWL_AC_UNSET -1 766#define IWL_AC_UNSET -1
@@ -788,11 +796,11 @@ static const struct queue_to_fifo_ac iwlagn_ipan_queue_to_tx_fifo[] = {
788 { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, }, 796 { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
789 { IWL_TX_FIFO_AUX, IWL_AC_UNSET, }, 797 { IWL_TX_FIFO_AUX, IWL_AC_UNSET, },
790}; 798};
791static void iwl_trans_pcie_tx_start(struct iwl_priv *priv) 799static void iwl_trans_pcie_tx_start(struct iwl_trans *trans)
792{ 800{
793 const struct queue_to_fifo_ac *queue_to_fifo; 801 const struct queue_to_fifo_ac *queue_to_fifo;
794 struct iwl_rxon_context *ctx; 802 struct iwl_rxon_context *ctx;
795 struct iwl_trans *trans = trans(priv); 803 struct iwl_priv *priv = priv(trans);
796 struct iwl_trans_pcie *trans_pcie = 804 struct iwl_trans_pcie *trans_pcie =
797 IWL_TRANS_GET_PCIE_TRANS(trans); 805 IWL_TRANS_GET_PCIE_TRANS(trans);
798 u32 a; 806 u32 a;
@@ -856,7 +864,7 @@ static void iwl_trans_pcie_tx_start(struct iwl_priv *priv)
856 IWL_MASK(0, hw_params(trans).max_txq_num)); 864 IWL_MASK(0, hw_params(trans).max_txq_num));
857 865
858 /* Activate all Tx DMA/FIFO channels */ 866 /* Activate all Tx DMA/FIFO channels */
859 iwl_trans_txq_set_sched(priv, IWL_MASK(0, 7)); 867 iwl_trans_txq_set_sched(trans, IWL_MASK(0, 7));
860 868
861 /* map queues to FIFOs */ 869 /* map queues to FIFOs */
862 if (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS)) 870 if (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS))
@@ -864,7 +872,7 @@ static void iwl_trans_pcie_tx_start(struct iwl_priv *priv)
864 else 872 else
865 queue_to_fifo = iwlagn_default_queue_to_tx_fifo; 873 queue_to_fifo = iwlagn_default_queue_to_tx_fifo;
866 874
867 iwl_trans_set_wr_ptrs(priv, priv->shrd->cmd_queue, 0); 875 iwl_trans_set_wr_ptrs(trans, trans->shrd->cmd_queue, 0);
868 876
869 /* make sure all queue are not stopped */ 877 /* make sure all queue are not stopped */
870 memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped)); 878 memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
@@ -895,7 +903,7 @@ static void iwl_trans_pcie_tx_start(struct iwl_priv *priv)
895 iwl_trans_tx_queue_set_status(priv, &priv->txq[i], fifo, 0); 903 iwl_trans_tx_queue_set_status(priv, &priv->txq[i], fifo, 0);
896 } 904 }
897 905
898 spin_unlock_irqrestore(&priv->shrd->lock, flags); 906 spin_unlock_irqrestore(&trans->shrd->lock, flags);
899 907
900 /* Enable L1-Active */ 908 /* Enable L1-Active */
901 iwl_clear_bits_prph(priv, APMG_PCIDEV_STT_REG, 909 iwl_clear_bits_prph(priv, APMG_PCIDEV_STT_REG,
@@ -905,50 +913,53 @@ static void iwl_trans_pcie_tx_start(struct iwl_priv *priv)
905/** 913/**
906 * iwlagn_txq_ctx_stop - Stop all Tx DMA channels 914 * iwlagn_txq_ctx_stop - Stop all Tx DMA channels
907 */ 915 */
908static int iwl_trans_tx_stop(struct iwl_priv *priv) 916static int iwl_trans_tx_stop(struct iwl_trans *trans)
909{ 917{
910 int ch, txq_id; 918 int ch, txq_id;
911 unsigned long flags; 919 unsigned long flags;
920 struct iwl_priv *priv = priv(trans);
912 921
913 /* Turn off all Tx DMA fifos */ 922 /* Turn off all Tx DMA fifos */
914 spin_lock_irqsave(&priv->shrd->lock, flags); 923 spin_lock_irqsave(&trans->shrd->lock, flags);
915 924
916 iwl_trans_txq_set_sched(priv, 0); 925 iwl_trans_txq_set_sched(trans, 0);
917 926
918 /* Stop each Tx DMA channel, and wait for it to be idle */ 927 /* Stop each Tx DMA channel, and wait for it to be idle */
919 for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) { 928 for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
920 iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0); 929 iwl_write_direct32(priv(trans),
921 if (iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG, 930 FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
931 if (iwl_poll_direct_bit(priv(trans), FH_TSSR_TX_STATUS_REG,
922 FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), 932 FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
923 1000)) 933 1000))
924 IWL_ERR(priv, "Failing on timeout while stopping" 934 IWL_ERR(trans, "Failing on timeout while stopping"
925 " DMA channel %d [0x%08x]", ch, 935 " DMA channel %d [0x%08x]", ch,
926 iwl_read_direct32(priv, FH_TSSR_TX_STATUS_REG)); 936 iwl_read_direct32(priv(trans),
937 FH_TSSR_TX_STATUS_REG));
927 } 938 }
928 spin_unlock_irqrestore(&priv->shrd->lock, flags); 939 spin_unlock_irqrestore(&trans->shrd->lock, flags);
929 940
930 if (!priv->txq) { 941 if (!priv->txq) {
931 IWL_WARN(priv, "Stopping tx queues that aren't allocated..."); 942 IWL_WARN(trans, "Stopping tx queues that aren't allocated...");
932 return 0; 943 return 0;
933 } 944 }
934 945
935 /* Unmap DMA from host system and free skb's */ 946 /* Unmap DMA from host system and free skb's */
936 for (txq_id = 0; txq_id < hw_params(priv).max_txq_num; txq_id++) 947 for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++)
937 iwl_tx_queue_unmap(priv, txq_id); 948 iwl_tx_queue_unmap(trans, txq_id);
938 949
939 return 0; 950 return 0;
940} 951}
941 952
942static void iwl_trans_pcie_stop_device(struct iwl_priv *priv) 953static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
943{ 954{
944 /* stop and reset the on-board processor */ 955 /* stop and reset the on-board processor */
945 iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); 956 iwl_write32(priv(trans), CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
946 957
947 /* tell the device to stop sending interrupts */ 958 /* tell the device to stop sending interrupts */
948 iwl_trans_disable_sync_irq(trans(priv)); 959 iwl_trans_disable_sync_irq(trans);
949 960
950 /* device going down, Stop using ICT table */ 961 /* device going down, Stop using ICT table */
951 iwl_disable_ict(trans(priv)); 962 iwl_disable_ict(trans);
952 963
953 /* 964 /*
954 * If a HW restart happens during firmware loading, 965 * If a HW restart happens during firmware loading,
@@ -957,26 +968,28 @@ static void iwl_trans_pcie_stop_device(struct iwl_priv *priv)
957 * restart. So don't process again if the device is 968 * restart. So don't process again if the device is
958 * already dead. 969 * already dead.
959 */ 970 */
960 if (test_bit(STATUS_DEVICE_ENABLED, &priv->shrd->status)) { 971 if (test_bit(STATUS_DEVICE_ENABLED, &trans->shrd->status)) {
961 iwl_trans_tx_stop(priv); 972 iwl_trans_tx_stop(trans);
962 iwl_trans_rx_stop(priv); 973 iwl_trans_rx_stop(trans);
963 974
964 /* Power-down device's busmaster DMA clocks */ 975 /* Power-down device's busmaster DMA clocks */
965 iwl_write_prph(priv, APMG_CLK_DIS_REG, 976 iwl_write_prph(priv(trans), APMG_CLK_DIS_REG,
966 APMG_CLK_VAL_DMA_CLK_RQT); 977 APMG_CLK_VAL_DMA_CLK_RQT);
967 udelay(5); 978 udelay(5);
968 } 979 }
969 980
970 /* Make sure (redundant) we've released our request to stay awake */ 981 /* Make sure (redundant) we've released our request to stay awake */
971 iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 982 iwl_clear_bit(priv(trans), CSR_GP_CNTRL,
983 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
972 984
973 /* Stop the device, and put it in low power state */ 985 /* Stop the device, and put it in low power state */
974 iwl_apm_stop(priv); 986 iwl_apm_stop(priv(trans));
975} 987}
976 988
977static struct iwl_tx_cmd *iwl_trans_pcie_get_tx_cmd(struct iwl_priv *priv, 989static struct iwl_tx_cmd *iwl_trans_pcie_get_tx_cmd(struct iwl_trans *trans,
978 int txq_id) 990 int txq_id)
979{ 991{
992 struct iwl_priv *priv = priv(trans);
980 struct iwl_tx_queue *txq = &priv->txq[txq_id]; 993 struct iwl_tx_queue *txq = &priv->txq[txq_id];
981 struct iwl_queue *q = &txq->q; 994 struct iwl_queue *q = &txq->q;
982 struct iwl_device_cmd *dev_cmd; 995 struct iwl_device_cmd *dev_cmd;
@@ -1072,9 +1085,10 @@ static int iwl_trans_pcie_tx(struct iwl_priv *priv, struct sk_buff *skb,
1072 } 1085 }
1073 1086
1074 /* Attach buffers to TFD */ 1087 /* Attach buffers to TFD */
1075 iwlagn_txq_attach_buf_to_tfd(priv, txq, txcmd_phys, firstlen, 1); 1088 iwlagn_txq_attach_buf_to_tfd(trans(priv), txq, txcmd_phys,
1089 firstlen, 1);
1076 if (secondlen > 0) 1090 if (secondlen > 0)
1077 iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr, 1091 iwlagn_txq_attach_buf_to_tfd(trans(priv), txq, phys_addr,
1078 secondlen, 0); 1092 secondlen, 0);
1079 1093
1080 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) + 1094 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
@@ -1094,7 +1108,7 @@ static int iwl_trans_pcie_tx(struct iwl_priv *priv, struct sk_buff *skb,
1094 1108
1095 /* Set up entry for this TFD in Tx byte-count array */ 1109 /* Set up entry for this TFD in Tx byte-count array */
1096 if (ampdu) 1110 if (ampdu)
1097 iwl_trans_txq_update_byte_cnt_tbl(priv, txq, 1111 iwl_trans_txq_update_byte_cnt_tbl(trans(priv), txq,
1098 le16_to_cpu(tx_cmd->len)); 1112 le16_to_cpu(tx_cmd->len));
1099 1113
1100 dma_sync_single_for_device(priv->bus->dev, txcmd_phys, firstlen, 1114 dma_sync_single_for_device(priv->bus->dev, txcmd_phys, firstlen,
@@ -1127,10 +1141,10 @@ static int iwl_trans_pcie_tx(struct iwl_priv *priv, struct sk_buff *skb,
1127 return 0; 1141 return 0;
1128} 1142}
1129 1143
1130static void iwl_trans_pcie_kick_nic(struct iwl_priv *priv) 1144static void iwl_trans_pcie_kick_nic(struct iwl_trans *trans)
1131{ 1145{
1132 /* Remove all resets to allow NIC to operate */ 1146 /* Remove all resets to allow NIC to operate */
1133 iwl_write32(priv, CSR_RESET, 0); 1147 iwl_write32(priv(trans), CSR_RESET, 0);
1134} 1148}
1135 1149
1136static int iwl_trans_pcie_request_irq(struct iwl_trans *trans) 1150static int iwl_trans_pcie_request_irq(struct iwl_trans *trans)
@@ -1201,12 +1215,12 @@ static void iwl_trans_pcie_disable_sync_irq(struct iwl_trans *trans)
1201 tasklet_kill(&trans_pcie->irq_tasklet); 1215 tasklet_kill(&trans_pcie->irq_tasklet);
1202} 1216}
1203 1217
1204static void iwl_trans_pcie_free(struct iwl_priv *priv) 1218static void iwl_trans_pcie_free(struct iwl_trans *trans)
1205{ 1219{
1206 free_irq(priv->bus->irq, trans(priv)); 1220 free_irq(bus(trans)->irq, trans);
1207 iwl_free_isr_ict(trans(priv)); 1221 iwl_free_isr_ict(trans);
1208 kfree(trans(priv)); 1222 trans->shrd->trans = NULL;
1209 trans(priv) = NULL; 1223 kfree(trans);
1210} 1224}
1211 1225
1212#ifdef CONFIG_PM 1226#ifdef CONFIG_PM
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index 7fd0296f155e..da6cc59dfa2b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -109,18 +109,18 @@ struct iwl_trans_ops {
109 109
110 struct iwl_trans *(*alloc)(struct iwl_shared *shrd); 110 struct iwl_trans *(*alloc)(struct iwl_shared *shrd);
111 int (*request_irq)(struct iwl_trans *iwl_trans); 111 int (*request_irq)(struct iwl_trans *iwl_trans);
112 int (*start_device)(struct iwl_priv *priv); 112 int (*start_device)(struct iwl_trans *trans);
113 int (*prepare_card_hw)(struct iwl_priv *priv); 113 int (*prepare_card_hw)(struct iwl_trans *trans);
114 void (*stop_device)(struct iwl_priv *priv); 114 void (*stop_device)(struct iwl_trans *trans);
115 void (*tx_start)(struct iwl_priv *priv); 115 void (*tx_start)(struct iwl_trans *trans);
116 void (*tx_free)(struct iwl_priv *priv); 116 void (*tx_free)(struct iwl_trans *trans);
117 void (*rx_free)(struct iwl_trans *trans); 117 void (*rx_free)(struct iwl_trans *trans);
118 118
119 int (*send_cmd)(struct iwl_priv *priv, struct iwl_host_cmd *cmd); 119 int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
120 120
121 int (*send_cmd_pdu)(struct iwl_priv *priv, u8 id, u32 flags, u16 len, 121 int (*send_cmd_pdu)(struct iwl_trans *trans, u8 id, u32 flags, u16 len,
122 const void *data); 122 const void *data);
123 struct iwl_tx_cmd * (*get_tx_cmd)(struct iwl_priv *priv, int txq_id); 123 struct iwl_tx_cmd * (*get_tx_cmd)(struct iwl_trans *trans, int txq_id);
124 int (*tx)(struct iwl_priv *priv, struct sk_buff *skb, 124 int (*tx)(struct iwl_priv *priv, struct sk_buff *skb,
125 struct iwl_tx_cmd *tx_cmd, int txq_id, __le16 fc, bool ampdu, 125 struct iwl_tx_cmd *tx_cmd, int txq_id, __le16 fc, bool ampdu,
126 struct iwl_rxon_context *ctx); 126 struct iwl_rxon_context *ctx);
@@ -132,10 +132,10 @@ struct iwl_trans_ops {
132 void (*txq_agg_setup)(struct iwl_priv *priv, int sta_id, int tid, 132 void (*txq_agg_setup)(struct iwl_priv *priv, int sta_id, int tid,
133 int frame_limit); 133 int frame_limit);
134 134
135 void (*kick_nic)(struct iwl_priv *priv); 135 void (*kick_nic)(struct iwl_trans *trans);
136 136
137 void (*disable_sync_irq)(struct iwl_trans *trans); 137 void (*disable_sync_irq)(struct iwl_trans *trans);
138 void (*free)(struct iwl_priv *priv); 138 void (*free)(struct iwl_trans *trans);
139 139
140 int (*dbgfs_register)(struct iwl_trans *trans, struct dentry* dir); 140 int (*dbgfs_register)(struct iwl_trans *trans, struct dentry* dir);
141 int (*suspend)(struct iwl_trans *trans); 141 int (*suspend)(struct iwl_trans *trans);
@@ -163,22 +163,22 @@ static inline int iwl_trans_request_irq(struct iwl_trans *trans)
163 163
164static inline int iwl_trans_start_device(struct iwl_trans *trans) 164static inline int iwl_trans_start_device(struct iwl_trans *trans)
165{ 165{
166 return trans->ops->start_device(priv(trans)); 166 return trans->ops->start_device(trans);
167} 167}
168 168
169static inline int iwl_trans_prepare_card_hw(struct iwl_trans *trans) 169static inline int iwl_trans_prepare_card_hw(struct iwl_trans *trans)
170{ 170{
171 return trans->ops->prepare_card_hw(priv(trans)); 171 return trans->ops->prepare_card_hw(trans);
172} 172}
173 173
174static inline void iwl_trans_stop_device(struct iwl_trans *trans) 174static inline void iwl_trans_stop_device(struct iwl_trans *trans)
175{ 175{
176 trans->ops->stop_device(priv(trans)); 176 trans->ops->stop_device(trans);
177} 177}
178 178
179static inline void iwl_trans_tx_start(struct iwl_trans *trans) 179static inline void iwl_trans_tx_start(struct iwl_trans *trans)
180{ 180{
181 trans->ops->tx_start(priv(trans)); 181 trans->ops->tx_start(trans);
182} 182}
183 183
184static inline void iwl_trans_rx_free(struct iwl_trans *trans) 184static inline void iwl_trans_rx_free(struct iwl_trans *trans)
@@ -188,25 +188,25 @@ static inline void iwl_trans_rx_free(struct iwl_trans *trans)
188 188
189static inline void iwl_trans_tx_free(struct iwl_trans *trans) 189static inline void iwl_trans_tx_free(struct iwl_trans *trans)
190{ 190{
191 trans->ops->tx_free(priv(trans)); 191 trans->ops->tx_free(trans);
192} 192}
193 193
194static inline int iwl_trans_send_cmd(struct iwl_trans *trans, 194static inline int iwl_trans_send_cmd(struct iwl_trans *trans,
195 struct iwl_host_cmd *cmd) 195 struct iwl_host_cmd *cmd)
196{ 196{
197 return trans->ops->send_cmd(priv(trans), cmd); 197 return trans->ops->send_cmd(trans, cmd);
198} 198}
199 199
200static inline int iwl_trans_send_cmd_pdu(struct iwl_trans *trans, u8 id, 200static inline int iwl_trans_send_cmd_pdu(struct iwl_trans *trans, u8 id,
201 u32 flags, u16 len, const void *data) 201 u32 flags, u16 len, const void *data)
202{ 202{
203 return trans->ops->send_cmd_pdu(priv(trans), id, flags, len, data); 203 return trans->ops->send_cmd_pdu(trans, id, flags, len, data);
204} 204}
205 205
206static inline struct iwl_tx_cmd *iwl_trans_get_tx_cmd(struct iwl_trans *trans, 206static inline struct iwl_tx_cmd *iwl_trans_get_tx_cmd(struct iwl_trans *trans,
207 int txq_id) 207 int txq_id)
208{ 208{
209 return trans->ops->get_tx_cmd(priv(trans), txq_id); 209 return trans->ops->get_tx_cmd(trans, txq_id);
210} 210}
211 211
212static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb, 212static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
@@ -238,7 +238,7 @@ static inline void iwl_trans_txq_agg_setup(struct iwl_trans *trans, int sta_id,
238 238
239static inline void iwl_trans_kick_nic(struct iwl_trans *trans) 239static inline void iwl_trans_kick_nic(struct iwl_trans *trans)
240{ 240{
241 trans->ops->kick_nic(priv(trans)); 241 trans->ops->kick_nic(trans);
242} 242}
243 243
244static inline void iwl_trans_disable_sync_irq(struct iwl_trans *trans) 244static inline void iwl_trans_disable_sync_irq(struct iwl_trans *trans)
@@ -248,7 +248,7 @@ static inline void iwl_trans_disable_sync_irq(struct iwl_trans *trans)
248 248
249static inline void iwl_trans_free(struct iwl_trans *trans) 249static inline void iwl_trans_free(struct iwl_trans *trans)
250{ 250{
251 trans->ops->free(priv(trans)); 251 trans->ops->free(trans);
252} 252}
253 253
254static inline int iwl_trans_dbgfs_register(struct iwl_trans *trans, 254static inline int iwl_trans_dbgfs_register(struct iwl_trans *trans,