aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/iwlwifi/iwl-4965.c
diff options
context:
space:
mode:
authorTomas Winkler <tomas.winkler@intel.com>2008-03-25 19:33:37 -0400
committerJohn W. Linville <linville@tuxdriver.com>2008-03-27 16:03:17 -0400
commit3395f6e9cf48469d7ee05703cad1502002741c16 (patch)
treeca07b726dde7d42e113e9105e3c10f7ae9b1cb3a /drivers/net/wireless/iwlwifi/iwl-4965.c
parentab53d8af6772b22d4d68b1bcd74f7a5dba693983 (diff)
iwlwifi: rename iwl-4965-io.h to iwl-io.h
This patch renames iwl-4965-io.h back to iw-io.h it also remove 4965 from all functions it supplies Signed-off-by: Tomas Winkler <tomas.winkler@intel.com> Signed-off-by: Reinette Chatre <reinette.chatre@intel.com> Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers/net/wireless/iwlwifi/iwl-4965.c')
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c215
1 files changed, 106 insertions, 109 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index 8b5cacb14618..e5f64d7fbfde 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -41,6 +41,7 @@
41#include "iwl-eeprom.h" 41#include "iwl-eeprom.h"
42#include "iwl-core.h" 42#include "iwl-core.h"
43#include "iwl-4965.h" 43#include "iwl-4965.h"
44#include "iwl-io.h"
44#include "iwl-helpers.h" 45#include "iwl-helpers.h"
45 46
46/* module parameters */ 47/* module parameters */
@@ -315,20 +316,20 @@ int iwl4965_hw_rxq_stop(struct iwl_priv *priv)
315 unsigned long flags; 316 unsigned long flags;
316 317
317 spin_lock_irqsave(&priv->lock, flags); 318 spin_lock_irqsave(&priv->lock, flags);
318 rc = iwl4965_grab_nic_access(priv); 319 rc = iwl_grab_nic_access(priv);
319 if (rc) { 320 if (rc) {
320 spin_unlock_irqrestore(&priv->lock, flags); 321 spin_unlock_irqrestore(&priv->lock, flags);
321 return rc; 322 return rc;
322 } 323 }
323 324
324 /* stop Rx DMA */ 325 /* stop Rx DMA */
325 iwl4965_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); 326 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
326 rc = iwl4965_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG, 327 rc = iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
327 (1 << 24), 1000); 328 (1 << 24), 1000);
328 if (rc < 0) 329 if (rc < 0)
329 IWL_ERROR("Can't stop Rx DMA.\n"); 330 IWL_ERROR("Can't stop Rx DMA.\n");
330 331
331 iwl4965_release_nic_access(priv); 332 iwl_release_nic_access(priv);
332 spin_unlock_irqrestore(&priv->lock, flags); 333 spin_unlock_irqrestore(&priv->lock, flags);
333 334
334 return 0; 335 return 0;
@@ -372,7 +373,7 @@ static int iwl4965_nic_set_pwr_src(struct iwl_priv *priv, int pwr_max)
372 unsigned long flags; 373 unsigned long flags;
373 374
374 spin_lock_irqsave(&priv->lock, flags); 375 spin_lock_irqsave(&priv->lock, flags);
375 ret = iwl4965_grab_nic_access(priv); 376 ret = iwl_grab_nic_access(priv);
376 if (ret) { 377 if (ret) {
377 spin_unlock_irqrestore(&priv->lock, flags); 378 spin_unlock_irqrestore(&priv->lock, flags);
378 return ret; 379 return ret;
@@ -385,15 +386,15 @@ static int iwl4965_nic_set_pwr_src(struct iwl_priv *priv, int pwr_max)
385 &val); 386 &val);
386 387
387 if (val & PCI_CFG_PMC_PME_FROM_D3COLD_SUPPORT) 388 if (val & PCI_CFG_PMC_PME_FROM_D3COLD_SUPPORT)
388 iwl4965_set_bits_mask_prph(priv, APMG_PS_CTRL_REG, 389 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
389 APMG_PS_CTRL_VAL_PWR_SRC_VAUX, 390 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
390 ~APMG_PS_CTRL_MSK_PWR_SRC); 391 ~APMG_PS_CTRL_MSK_PWR_SRC);
391 } else 392 } else
392 iwl4965_set_bits_mask_prph(priv, APMG_PS_CTRL_REG, 393 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
393 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, 394 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
394 ~APMG_PS_CTRL_MSK_PWR_SRC); 395 ~APMG_PS_CTRL_MSK_PWR_SRC);
395 396
396 iwl4965_release_nic_access(priv); 397 iwl_release_nic_access(priv);
397 spin_unlock_irqrestore(&priv->lock, flags); 398 spin_unlock_irqrestore(&priv->lock, flags);
398 399
399 return ret; 400 return ret;
@@ -406,7 +407,7 @@ static int iwl4965_rx_init(struct iwl_priv *priv, struct iwl4965_rx_queue *rxq)
406 unsigned int rb_size; 407 unsigned int rb_size;
407 408
408 spin_lock_irqsave(&priv->lock, flags); 409 spin_lock_irqsave(&priv->lock, flags);
409 rc = iwl4965_grab_nic_access(priv); 410 rc = iwl_grab_nic_access(priv);
410 if (rc) { 411 if (rc) {
411 spin_unlock_irqrestore(&priv->lock, flags); 412 spin_unlock_irqrestore(&priv->lock, flags);
412 return rc; 413 return rc;
@@ -418,34 +419,34 @@ static int iwl4965_rx_init(struct iwl_priv *priv, struct iwl4965_rx_queue *rxq)
418 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; 419 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
419 420
420 /* Stop Rx DMA */ 421 /* Stop Rx DMA */
421 iwl4965_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); 422 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
422 423
423 /* Reset driver's Rx queue write index */ 424 /* Reset driver's Rx queue write index */
424 iwl4965_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0); 425 iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
425 426
426 /* Tell device where to find RBD circular buffer in DRAM */ 427 /* Tell device where to find RBD circular buffer in DRAM */
427 iwl4965_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG, 428 iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
428 rxq->dma_addr >> 8); 429 rxq->dma_addr >> 8);
429 430
430 /* Tell device where in DRAM to update its Rx status */ 431 /* Tell device where in DRAM to update its Rx status */
431 iwl4965_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG, 432 iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
432 (priv->hw_setting.shared_phys + 433 (priv->hw_setting.shared_phys +
433 offsetof(struct iwl4965_shared, val0)) >> 4); 434 offsetof(struct iwl4965_shared, val0)) >> 4);
434 435
435 /* Enable Rx DMA, enable host interrupt, Rx buffer size 4k, 256 RBDs */ 436 /* Enable Rx DMA, enable host interrupt, Rx buffer size 4k, 256 RBDs */
436 iwl4965_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 437 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
437 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | 438 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
438 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | 439 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
439 rb_size | 440 rb_size |
440 /*0x10 << 4 | */ 441 /*0x10 << 4 | */
441 (RX_QUEUE_SIZE_LOG << 442 (RX_QUEUE_SIZE_LOG <<
442 FH_RCSR_RX_CONFIG_RBDCB_SIZE_BITSHIFT)); 443 FH_RCSR_RX_CONFIG_RBDCB_SIZE_BITSHIFT));
443 444
444 /* 445 /*
445 * iwl4965_write32(priv,CSR_INT_COAL_REG,0); 446 * iwl_write32(priv,CSR_INT_COAL_REG,0);
446 */ 447 */
447 448
448 iwl4965_release_nic_access(priv); 449 iwl_release_nic_access(priv);
449 spin_unlock_irqrestore(&priv->lock, flags); 450 spin_unlock_irqrestore(&priv->lock, flags);
450 451
451 return 0; 452 return 0;
@@ -458,13 +459,13 @@ static int iwl4965_kw_init(struct iwl_priv *priv)
458 int rc; 459 int rc;
459 460
460 spin_lock_irqsave(&priv->lock, flags); 461 spin_lock_irqsave(&priv->lock, flags);
461 rc = iwl4965_grab_nic_access(priv); 462 rc = iwl_grab_nic_access(priv);
462 if (rc) 463 if (rc)
463 goto out; 464 goto out;
464 465
465 iwl4965_write_direct32(priv, IWL_FH_KW_MEM_ADDR_REG, 466 iwl_write_direct32(priv, IWL_FH_KW_MEM_ADDR_REG,
466 priv->kw.dma_addr >> 4); 467 priv->kw.dma_addr >> 4);
467 iwl4965_release_nic_access(priv); 468 iwl_release_nic_access(priv);
468out: 469out:
469 spin_unlock_irqrestore(&priv->lock, flags); 470 spin_unlock_irqrestore(&priv->lock, flags);
470 return rc; 471 return rc;
@@ -524,7 +525,7 @@ static int iwl4965_txq_ctx_reset(struct iwl_priv *priv)
524 525
525 spin_lock_irqsave(&priv->lock, flags); 526 spin_lock_irqsave(&priv->lock, flags);
526 527
527 rc = iwl4965_grab_nic_access(priv); 528 rc = iwl_grab_nic_access(priv);
528 if (unlikely(rc)) { 529 if (unlikely(rc)) {
529 IWL_ERROR("TX reset failed"); 530 IWL_ERROR("TX reset failed");
530 spin_unlock_irqrestore(&priv->lock, flags); 531 spin_unlock_irqrestore(&priv->lock, flags);
@@ -532,8 +533,8 @@ static int iwl4965_txq_ctx_reset(struct iwl_priv *priv)
532 } 533 }
533 534
534 /* Turn off all Tx DMA channels */ 535 /* Turn off all Tx DMA channels */
535 iwl4965_write_prph(priv, KDR_SCD_TXFACT, 0); 536 iwl_write_prph(priv, KDR_SCD_TXFACT, 0);
536 iwl4965_release_nic_access(priv); 537 iwl_release_nic_access(priv);
537 spin_unlock_irqrestore(&priv->lock, flags); 538 spin_unlock_irqrestore(&priv->lock, flags);
538 539
539 /* Tell 4965 where to find the keep-warm buffer */ 540 /* Tell 4965 where to find the keep-warm buffer */
@@ -580,11 +581,11 @@ int iwl4965_hw_nic_init(struct iwl_priv *priv)
580 /* nic_init */ 581 /* nic_init */
581 spin_lock_irqsave(&priv->lock, flags); 582 spin_lock_irqsave(&priv->lock, flags);
582 583
583 iwl4965_set_bit(priv, CSR_GIO_CHICKEN_BITS, 584 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
584 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER); 585 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
585 586
586 iwl4965_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 587 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
587 rc = iwl4965_poll_bit(priv, CSR_GP_CNTRL, 588 rc = iwl_poll_bit(priv, CSR_GP_CNTRL,
588 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 589 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
589 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000); 590 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
590 if (rc < 0) { 591 if (rc < 0) {
@@ -593,26 +594,25 @@ int iwl4965_hw_nic_init(struct iwl_priv *priv)
593 return rc; 594 return rc;
594 } 595 }
595 596
596 rc = iwl4965_grab_nic_access(priv); 597 rc = iwl_grab_nic_access(priv);
597 if (rc) { 598 if (rc) {
598 spin_unlock_irqrestore(&priv->lock, flags); 599 spin_unlock_irqrestore(&priv->lock, flags);
599 return rc; 600 return rc;
600 } 601 }
601 602
602 iwl4965_read_prph(priv, APMG_CLK_CTRL_REG); 603 iwl_read_prph(priv, APMG_CLK_CTRL_REG);
603 604
604 iwl4965_write_prph(priv, APMG_CLK_CTRL_REG, 605 iwl_write_prph(priv, APMG_CLK_CTRL_REG,
605 APMG_CLK_VAL_DMA_CLK_RQT | 606 APMG_CLK_VAL_DMA_CLK_RQT | APMG_CLK_VAL_BSM_CLK_RQT);
606 APMG_CLK_VAL_BSM_CLK_RQT); 607 iwl_read_prph(priv, APMG_CLK_CTRL_REG);
607 iwl4965_read_prph(priv, APMG_CLK_CTRL_REG);
608 608
609 udelay(20); 609 udelay(20);
610 610
611 iwl4965_set_bits_prph(priv, APMG_PCIDEV_STT_REG, 611 iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
612 APMG_PCIDEV_STT_VAL_L1_ACT_DIS); 612 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
613 613
614 iwl4965_release_nic_access(priv); 614 iwl_release_nic_access(priv);
615 iwl4965_write32(priv, CSR_INT_COALESCING, 512 / 32); 615 iwl_write32(priv, CSR_INT_COALESCING, 512 / 32);
616 spin_unlock_irqrestore(&priv->lock, flags); 616 spin_unlock_irqrestore(&priv->lock, flags);
617 617
618 /* Determine HW type */ 618 /* Determine HW type */
@@ -648,26 +648,24 @@ int iwl4965_hw_nic_init(struct iwl_priv *priv)
648 648
649 /* set CSR_HW_CONFIG_REG for uCode use */ 649 /* set CSR_HW_CONFIG_REG for uCode use */
650 650
651 iwl4965_set_bit(priv, CSR_HW_IF_CONFIG_REG, 651 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
652 CSR49_HW_IF_CONFIG_REG_BIT_4965_R | 652 CSR49_HW_IF_CONFIG_REG_BIT_4965_R |
653 CSR49_HW_IF_CONFIG_REG_BIT_RADIO_SI | 653 CSR49_HW_IF_CONFIG_REG_BIT_RADIO_SI |
654 CSR49_HW_IF_CONFIG_REG_BIT_MAC_SI); 654 CSR49_HW_IF_CONFIG_REG_BIT_MAC_SI);
655 655
656 rc = iwl4965_grab_nic_access(priv); 656 rc = iwl_grab_nic_access(priv);
657 if (rc < 0) { 657 if (rc < 0) {
658 spin_unlock_irqrestore(&priv->lock, flags); 658 spin_unlock_irqrestore(&priv->lock, flags);
659 IWL_DEBUG_INFO("Failed to init the card\n"); 659 IWL_DEBUG_INFO("Failed to init the card\n");
660 return rc; 660 return rc;
661 } 661 }
662 662
663 iwl4965_read_prph(priv, APMG_PS_CTRL_REG); 663 iwl_read_prph(priv, APMG_PS_CTRL_REG);
664 iwl4965_set_bits_prph(priv, APMG_PS_CTRL_REG, 664 iwl_set_bits_prph(priv, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ);
665 APMG_PS_CTRL_VAL_RESET_REQ);
666 udelay(5); 665 udelay(5);
667 iwl4965_clear_bits_prph(priv, APMG_PS_CTRL_REG, 666 iwl_clear_bits_prph(priv, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ);
668 APMG_PS_CTRL_VAL_RESET_REQ);
669 667
670 iwl4965_release_nic_access(priv); 668 iwl_release_nic_access(priv);
671 spin_unlock_irqrestore(&priv->lock, flags); 669 spin_unlock_irqrestore(&priv->lock, flags);
672 670
673 iwl4965_hw_card_show_info(priv); 671 iwl4965_hw_card_show_info(priv);
@@ -720,16 +718,16 @@ int iwl4965_hw_nic_stop_master(struct iwl_priv *priv)
720 spin_lock_irqsave(&priv->lock, flags); 718 spin_lock_irqsave(&priv->lock, flags);
721 719
722 /* set stop master bit */ 720 /* set stop master bit */
723 iwl4965_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER); 721 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
724 722
725 reg_val = iwl4965_read32(priv, CSR_GP_CNTRL); 723 reg_val = iwl_read32(priv, CSR_GP_CNTRL);
726 724
727 if (CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE == 725 if (CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE ==
728 (reg_val & CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE)) 726 (reg_val & CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE))
729 IWL_DEBUG_INFO("Card in power save, master is already " 727 IWL_DEBUG_INFO("Card in power save, master is already "
730 "stopped\n"); 728 "stopped\n");
731 else { 729 else {
732 rc = iwl4965_poll_bit(priv, CSR_RESET, 730 rc = iwl_poll_bit(priv, CSR_RESET,
733 CSR_RESET_REG_FLAG_MASTER_DISABLED, 731 CSR_RESET_REG_FLAG_MASTER_DISABLED,
734 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100); 732 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
735 if (rc < 0) { 733 if (rc < 0) {
@@ -756,18 +754,17 @@ void iwl4965_hw_txq_ctx_stop(struct iwl_priv *priv)
756 /* Stop each Tx DMA channel, and wait for it to be idle */ 754 /* Stop each Tx DMA channel, and wait for it to be idle */
757 for (txq_id = 0; txq_id < priv->hw_setting.max_txq_num; txq_id++) { 755 for (txq_id = 0; txq_id < priv->hw_setting.max_txq_num; txq_id++) {
758 spin_lock_irqsave(&priv->lock, flags); 756 spin_lock_irqsave(&priv->lock, flags);
759 if (iwl4965_grab_nic_access(priv)) { 757 if (iwl_grab_nic_access(priv)) {
760 spin_unlock_irqrestore(&priv->lock, flags); 758 spin_unlock_irqrestore(&priv->lock, flags);
761 continue; 759 continue;
762 } 760 }
763 761
764 iwl4965_write_direct32(priv, 762 iwl_write_direct32(priv,
765 IWL_FH_TCSR_CHNL_TX_CONFIG_REG(txq_id), 763 IWL_FH_TCSR_CHNL_TX_CONFIG_REG(txq_id), 0x0);
766 0x0); 764 iwl_poll_direct_bit(priv, IWL_FH_TSSR_TX_STATUS_REG,
767 iwl4965_poll_direct_bit(priv, IWL_FH_TSSR_TX_STATUS_REG, 765 IWL_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE
768 IWL_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE 766 (txq_id), 200);
769 (txq_id), 200); 767 iwl_release_nic_access(priv);
770 iwl4965_release_nic_access(priv);
771 spin_unlock_irqrestore(&priv->lock, flags); 768 spin_unlock_irqrestore(&priv->lock, flags);
772 } 769 }
773 770
@@ -784,29 +781,29 @@ int iwl4965_hw_nic_reset(struct iwl_priv *priv)
784 781
785 spin_lock_irqsave(&priv->lock, flags); 782 spin_lock_irqsave(&priv->lock, flags);
786 783
787 iwl4965_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); 784 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
788 785
789 udelay(10); 786 udelay(10);
790 787
791 iwl4965_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 788 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
792 rc = iwl4965_poll_bit(priv, CSR_RESET, 789 rc = iwl_poll_bit(priv, CSR_RESET,
793 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 790 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
794 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25); 791 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25);
795 792
796 udelay(10); 793 udelay(10);
797 794
798 rc = iwl4965_grab_nic_access(priv); 795 rc = iwl_grab_nic_access(priv);
799 if (!rc) { 796 if (!rc) {
800 iwl4965_write_prph(priv, APMG_CLK_EN_REG, 797 iwl_write_prph(priv, APMG_CLK_EN_REG,
801 APMG_CLK_VAL_DMA_CLK_RQT | 798 APMG_CLK_VAL_DMA_CLK_RQT |
802 APMG_CLK_VAL_BSM_CLK_RQT); 799 APMG_CLK_VAL_BSM_CLK_RQT);
803 800
804 udelay(10); 801 udelay(10);
805 802
806 iwl4965_set_bits_prph(priv, APMG_PCIDEV_STT_REG, 803 iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
807 APMG_PCIDEV_STT_VAL_L1_ACT_DIS); 804 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
808 805
809 iwl4965_release_nic_access(priv); 806 iwl_release_nic_access(priv);
810 } 807 }
811 808
812 clear_bit(STATUS_HCMD_ACTIVE, &priv->status); 809 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
@@ -872,7 +869,7 @@ void iwl4965_rf_kill_ct_config(struct iwl_priv *priv)
872 int ret = 0; 869 int ret = 0;
873 870
874 spin_lock_irqsave(&priv->lock, flags); 871 spin_lock_irqsave(&priv->lock, flags);
875 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_CLR, 872 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
876 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT); 873 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
877 spin_unlock_irqrestore(&priv->lock, flags); 874 spin_unlock_irqrestore(&priv->lock, flags);
878 875
@@ -1733,9 +1730,9 @@ static void iwl4965_bg_txpower_work(struct work_struct *work)
1733 */ 1730 */
1734static void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index) 1731static void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index)
1735{ 1732{
1736 iwl4965_write_direct32(priv, HBUS_TARG_WRPTR, 1733 iwl_write_direct32(priv, HBUS_TARG_WRPTR,
1737 (index & 0xff) | (txq_id << 8)); 1734 (index & 0xff) | (txq_id << 8));
1738 iwl4965_write_prph(priv, KDR_SCD_QUEUE_RDPTR(txq_id), index); 1735 iwl_write_prph(priv, KDR_SCD_QUEUE_RDPTR(txq_id), index);
1739} 1736}
1740 1737
1741/** 1738/**
@@ -1755,7 +1752,7 @@ static void iwl4965_tx_queue_set_status(struct iwl_priv *priv,
1755 int active = test_bit(txq_id, &priv->txq_ctx_active_msk)?1:0; 1752 int active = test_bit(txq_id, &priv->txq_ctx_active_msk)?1:0;
1756 1753
1757 /* Set up and activate */ 1754 /* Set up and activate */
1758 iwl4965_write_prph(priv, KDR_SCD_QUEUE_STATUS_BITS(txq_id), 1755 iwl_write_prph(priv, KDR_SCD_QUEUE_STATUS_BITS(txq_id),
1759 (active << SCD_QUEUE_STTS_REG_POS_ACTIVE) | 1756 (active << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1760 (tx_fifo_id << SCD_QUEUE_STTS_REG_POS_TXF) | 1757 (tx_fifo_id << SCD_QUEUE_STTS_REG_POS_TXF) |
1761 (scd_retry << SCD_QUEUE_STTS_REG_POS_WSL) | 1758 (scd_retry << SCD_QUEUE_STTS_REG_POS_WSL) |
@@ -1807,46 +1804,46 @@ int iwl4965_alive_notify(struct iwl_priv *priv)
1807 priv->chain_noise_data.delta_gain_code[i] = 1804 priv->chain_noise_data.delta_gain_code[i] =
1808 CHAIN_NOISE_DELTA_GAIN_INIT_VAL; 1805 CHAIN_NOISE_DELTA_GAIN_INIT_VAL;
1809#endif /* CONFIG_IWL4965_SENSITIVITY*/ 1806#endif /* CONFIG_IWL4965_SENSITIVITY*/
1810 ret = iwl4965_grab_nic_access(priv); 1807 ret = iwl_grab_nic_access(priv);
1811 if (ret) { 1808 if (ret) {
1812 spin_unlock_irqrestore(&priv->lock, flags); 1809 spin_unlock_irqrestore(&priv->lock, flags);
1813 return ret; 1810 return ret;
1814 } 1811 }
1815 1812
1816 /* Clear 4965's internal Tx Scheduler data base */ 1813 /* Clear 4965's internal Tx Scheduler data base */
1817 priv->scd_base_addr = iwl4965_read_prph(priv, KDR_SCD_SRAM_BASE_ADDR); 1814 priv->scd_base_addr = iwl_read_prph(priv, KDR_SCD_SRAM_BASE_ADDR);
1818 a = priv->scd_base_addr + SCD_CONTEXT_DATA_OFFSET; 1815 a = priv->scd_base_addr + SCD_CONTEXT_DATA_OFFSET;
1819 for (; a < priv->scd_base_addr + SCD_TX_STTS_BITMAP_OFFSET; a += 4) 1816 for (; a < priv->scd_base_addr + SCD_TX_STTS_BITMAP_OFFSET; a += 4)
1820 iwl4965_write_targ_mem(priv, a, 0); 1817 iwl_write_targ_mem(priv, a, 0);
1821 for (; a < priv->scd_base_addr + SCD_TRANSLATE_TBL_OFFSET; a += 4) 1818 for (; a < priv->scd_base_addr + SCD_TRANSLATE_TBL_OFFSET; a += 4)
1822 iwl4965_write_targ_mem(priv, a, 0); 1819 iwl_write_targ_mem(priv, a, 0);
1823 for (; a < sizeof(u16) * priv->hw_setting.max_txq_num; a += 4) 1820 for (; a < sizeof(u16) * priv->hw_setting.max_txq_num; a += 4)
1824 iwl4965_write_targ_mem(priv, a, 0); 1821 iwl_write_targ_mem(priv, a, 0);
1825 1822
1826 /* Tel 4965 where to find Tx byte count tables */ 1823 /* Tel 4965 where to find Tx byte count tables */
1827 iwl4965_write_prph(priv, KDR_SCD_DRAM_BASE_ADDR, 1824 iwl_write_prph(priv, KDR_SCD_DRAM_BASE_ADDR,
1828 (priv->hw_setting.shared_phys + 1825 (priv->hw_setting.shared_phys +
1829 offsetof(struct iwl4965_shared, queues_byte_cnt_tbls)) >> 10); 1826 offsetof(struct iwl4965_shared, queues_byte_cnt_tbls)) >> 10);
1830 1827
1831 /* Disable chain mode for all queues */ 1828 /* Disable chain mode for all queues */
1832 iwl4965_write_prph(priv, KDR_SCD_QUEUECHAIN_SEL, 0); 1829 iwl_write_prph(priv, KDR_SCD_QUEUECHAIN_SEL, 0);
1833 1830
1834 /* Initialize each Tx queue (including the command queue) */ 1831 /* Initialize each Tx queue (including the command queue) */
1835 for (i = 0; i < priv->hw_setting.max_txq_num; i++) { 1832 for (i = 0; i < priv->hw_setting.max_txq_num; i++) {
1836 1833
1837 /* TFD circular buffer read/write indexes */ 1834 /* TFD circular buffer read/write indexes */
1838 iwl4965_write_prph(priv, KDR_SCD_QUEUE_RDPTR(i), 0); 1835 iwl_write_prph(priv, KDR_SCD_QUEUE_RDPTR(i), 0);
1839 iwl4965_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8)); 1836 iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
1840 1837
1841 /* Max Tx Window size for Scheduler-ACK mode */ 1838 /* Max Tx Window size for Scheduler-ACK mode */
1842 iwl4965_write_targ_mem(priv, priv->scd_base_addr + 1839 iwl_write_targ_mem(priv, priv->scd_base_addr +
1843 SCD_CONTEXT_QUEUE_OFFSET(i), 1840 SCD_CONTEXT_QUEUE_OFFSET(i),
1844 (SCD_WIN_SIZE << 1841 (SCD_WIN_SIZE <<
1845 SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) & 1842 SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
1846 SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK); 1843 SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
1847 1844
1848 /* Frame limit */ 1845 /* Frame limit */
1849 iwl4965_write_targ_mem(priv, priv->scd_base_addr + 1846 iwl_write_targ_mem(priv, priv->scd_base_addr +
1850 SCD_CONTEXT_QUEUE_OFFSET(i) + 1847 SCD_CONTEXT_QUEUE_OFFSET(i) +
1851 sizeof(u32), 1848 sizeof(u32),
1852 (SCD_FRAME_LIMIT << 1849 (SCD_FRAME_LIMIT <<
@@ -1854,11 +1851,11 @@ int iwl4965_alive_notify(struct iwl_priv *priv)
1854 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK); 1851 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
1855 1852
1856 } 1853 }
1857 iwl4965_write_prph(priv, KDR_SCD_INTERRUPT_MASK, 1854 iwl_write_prph(priv, KDR_SCD_INTERRUPT_MASK,
1858 (1 << priv->hw_setting.max_txq_num) - 1); 1855 (1 << priv->hw_setting.max_txq_num) - 1);
1859 1856
1860 /* Activate all Tx DMA/FIFO channels */ 1857 /* Activate all Tx DMA/FIFO channels */
1861 iwl4965_write_prph(priv, KDR_SCD_TXFACT, 1858 iwl_write_prph(priv, KDR_SCD_TXFACT,
1862 SCD_TXFACT_REG_TXFIFO_MASK(0, 7)); 1859 SCD_TXFACT_REG_TXFIFO_MASK(0, 7));
1863 1860
1864 iwl4965_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0); 1861 iwl4965_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0);
@@ -1870,7 +1867,7 @@ int iwl4965_alive_notify(struct iwl_priv *priv)
1870 iwl4965_tx_queue_set_status(priv, &priv->txq[i], ac, 0); 1867 iwl4965_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
1871 } 1868 }
1872 1869
1873 iwl4965_release_nic_access(priv); 1870 iwl_release_nic_access(priv);
1874 spin_unlock_irqrestore(&priv->lock, flags); 1871 spin_unlock_irqrestore(&priv->lock, flags);
1875 1872
1876 return ret; 1873 return ret;
@@ -2929,22 +2926,22 @@ int iwl4965_hw_tx_queue_init(struct iwl_priv *priv, struct iwl4965_tx_queue *txq
2929 int txq_id = txq->q.id; 2926 int txq_id = txq->q.id;
2930 2927
2931 spin_lock_irqsave(&priv->lock, flags); 2928 spin_lock_irqsave(&priv->lock, flags);
2932 rc = iwl4965_grab_nic_access(priv); 2929 rc = iwl_grab_nic_access(priv);
2933 if (rc) { 2930 if (rc) {
2934 spin_unlock_irqrestore(&priv->lock, flags); 2931 spin_unlock_irqrestore(&priv->lock, flags);
2935 return rc; 2932 return rc;
2936 } 2933 }
2937 2934
2938 /* Circular buffer (TFD queue in DRAM) physical base address */ 2935 /* Circular buffer (TFD queue in DRAM) physical base address */
2939 iwl4965_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id), 2936 iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
2940 txq->q.dma_addr >> 8); 2937 txq->q.dma_addr >> 8);
2941 2938
2942 /* Enable DMA channel, using same id as for TFD queue */ 2939 /* Enable DMA channel, using same id as for TFD queue */
2943 iwl4965_write_direct32( 2940 iwl_write_direct32(
2944 priv, IWL_FH_TCSR_CHNL_TX_CONFIG_REG(txq_id), 2941 priv, IWL_FH_TCSR_CHNL_TX_CONFIG_REG(txq_id),
2945 IWL_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | 2942 IWL_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
2946 IWL_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL); 2943 IWL_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL);
2947 iwl4965_release_nic_access(priv); 2944 iwl_release_nic_access(priv);
2948 spin_unlock_irqrestore(&priv->lock, flags); 2945 spin_unlock_irqrestore(&priv->lock, flags);
2949 2946
2950 return 0; 2947 return 0;
@@ -4259,7 +4256,7 @@ static void iwl4965_tx_queue_stop_scheduler(struct iwl_priv *priv,
4259{ 4256{
4260 /* Simply stop the queue, but don't change any configuration; 4257 /* Simply stop the queue, but don't change any configuration;
4261 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */ 4258 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
4262 iwl4965_write_prph(priv, 4259 iwl_write_prph(priv,
4263 KDR_SCD_QUEUE_STATUS_BITS(txq_id), 4260 KDR_SCD_QUEUE_STATUS_BITS(txq_id),
4264 (0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)| 4261 (0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)|
4265 (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN)); 4262 (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
@@ -4280,24 +4277,24 @@ static int iwl4965_tx_queue_agg_disable(struct iwl_priv *priv, u16 txq_id,
4280 return -EINVAL; 4277 return -EINVAL;
4281 } 4278 }
4282 4279
4283 ret = iwl4965_grab_nic_access(priv); 4280 ret = iwl_grab_nic_access(priv);
4284 if (ret) 4281 if (ret)
4285 return ret; 4282 return ret;
4286 4283
4287 iwl4965_tx_queue_stop_scheduler(priv, txq_id); 4284 iwl4965_tx_queue_stop_scheduler(priv, txq_id);
4288 4285
4289 iwl4965_clear_bits_prph(priv, KDR_SCD_QUEUECHAIN_SEL, (1 << txq_id)); 4286 iwl_clear_bits_prph(priv, KDR_SCD_QUEUECHAIN_SEL, (1 << txq_id));
4290 4287
4291 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff); 4288 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
4292 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff); 4289 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
4293 /* supposes that ssn_idx is valid (!= 0xFFF) */ 4290 /* supposes that ssn_idx is valid (!= 0xFFF) */
4294 iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx); 4291 iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
4295 4292
4296 iwl4965_clear_bits_prph(priv, KDR_SCD_INTERRUPT_MASK, (1 << txq_id)); 4293 iwl_clear_bits_prph(priv, KDR_SCD_INTERRUPT_MASK, (1 << txq_id));
4297 iwl4965_txq_ctx_deactivate(priv, txq_id); 4294 iwl4965_txq_ctx_deactivate(priv, txq_id);
4298 iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0); 4295 iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
4299 4296
4300 iwl4965_release_nic_access(priv); 4297 iwl_release_nic_access(priv);
4301 4298
4302 return 0; 4299 return 0;
4303} 4300}
@@ -4432,14 +4429,14 @@ static int iwl4965_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
4432 tbl_dw_addr = priv->scd_base_addr + 4429 tbl_dw_addr = priv->scd_base_addr +
4433 SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id); 4430 SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
4434 4431
4435 tbl_dw = iwl4965_read_targ_mem(priv, tbl_dw_addr); 4432 tbl_dw = iwl_read_targ_mem(priv, tbl_dw_addr);
4436 4433
4437 if (txq_id & 0x1) 4434 if (txq_id & 0x1)
4438 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF); 4435 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
4439 else 4436 else
4440 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000); 4437 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
4441 4438
4442 iwl4965_write_targ_mem(priv, tbl_dw_addr, tbl_dw); 4439 iwl_write_targ_mem(priv, tbl_dw_addr, tbl_dw);
4443 4440
4444 return 0; 4441 return 0;
4445} 4442}
@@ -4469,7 +4466,7 @@ static int iwl4965_tx_queue_agg_enable(struct iwl_priv *priv, int txq_id,
4469 iwl4965_sta_modify_enable_tid_tx(priv, sta_id, tid); 4466 iwl4965_sta_modify_enable_tid_tx(priv, sta_id, tid);
4470 4467
4471 spin_lock_irqsave(&priv->lock, flags); 4468 spin_lock_irqsave(&priv->lock, flags);
4472 rc = iwl4965_grab_nic_access(priv); 4469 rc = iwl_grab_nic_access(priv);
4473 if (rc) { 4470 if (rc) {
4474 spin_unlock_irqrestore(&priv->lock, flags); 4471 spin_unlock_irqrestore(&priv->lock, flags);
4475 return rc; 4472 return rc;
@@ -4482,7 +4479,7 @@ static int iwl4965_tx_queue_agg_enable(struct iwl_priv *priv, int txq_id,
4482 iwl4965_tx_queue_set_q2ratid(priv, ra_tid, txq_id); 4479 iwl4965_tx_queue_set_q2ratid(priv, ra_tid, txq_id);
4483 4480
4484 /* Set this queue as a chain-building queue */ 4481 /* Set this queue as a chain-building queue */
4485 iwl4965_set_bits_prph(priv, KDR_SCD_QUEUECHAIN_SEL, (1 << txq_id)); 4482 iwl_set_bits_prph(priv, KDR_SCD_QUEUECHAIN_SEL, (1 << txq_id));
4486 4483
4487 /* Place first TFD at index corresponding to start sequence number. 4484 /* Place first TFD at index corresponding to start sequence number.
4488 * Assumes that ssn_idx is valid (!= 0xFFF) */ 4485 * Assumes that ssn_idx is valid (!= 0xFFF) */
@@ -4491,22 +4488,22 @@ static int iwl4965_tx_queue_agg_enable(struct iwl_priv *priv, int txq_id,
4491 iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx); 4488 iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
4492 4489
4493 /* Set up Tx window size and frame limit for this queue */ 4490 /* Set up Tx window size and frame limit for this queue */
4494 iwl4965_write_targ_mem(priv, 4491 iwl_write_targ_mem(priv,
4495 priv->scd_base_addr + SCD_CONTEXT_QUEUE_OFFSET(txq_id), 4492 priv->scd_base_addr + SCD_CONTEXT_QUEUE_OFFSET(txq_id),
4496 (SCD_WIN_SIZE << SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) & 4493 (SCD_WIN_SIZE << SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
4497 SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK); 4494 SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
4498 4495
4499 iwl4965_write_targ_mem(priv, priv->scd_base_addr + 4496 iwl_write_targ_mem(priv, priv->scd_base_addr +
4500 SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32), 4497 SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
4501 (SCD_FRAME_LIMIT << SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) 4498 (SCD_FRAME_LIMIT << SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS)
4502 & SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK); 4499 & SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
4503 4500
4504 iwl4965_set_bits_prph(priv, KDR_SCD_INTERRUPT_MASK, (1 << txq_id)); 4501 iwl_set_bits_prph(priv, KDR_SCD_INTERRUPT_MASK, (1 << txq_id));
4505 4502
4506 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */ 4503 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
4507 iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1); 4504 iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1);
4508 4505
4509 iwl4965_release_nic_access(priv); 4506 iwl_release_nic_access(priv);
4510 spin_unlock_irqrestore(&priv->lock, flags); 4507 spin_unlock_irqrestore(&priv->lock, flags);
4511 4508
4512 return 0; 4509 return 0;