aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/iwlwifi/iwl-3945.c
diff options
context:
space:
mode:
authorMohamed Abbas <mohamed.abbas@intel.com>2009-05-22 14:01:47 -0400
committerJohn W. Linville <linville@tuxdriver.com>2009-05-22 14:06:04 -0400
commita8b50a0a966d7ac313f624c6ab4996231a5fe25a (patch)
tree8c000ee1d429f1e639fde9613cb48b268aee1ff6 /drivers/net/wireless/iwlwifi/iwl-3945.c
parent0848e297c2107dbc12a91a1709c879c73bd188d8 (diff)
iwlcore: register locks
Add new lock to be used when accessing some registers. Also move the register lock and iwl_grab_nic_access inside the function for register access. This will prevent from forgetting to hold locks and nic access in the right way and make code easier to maintain. We over use the priv->lock spin lock and I guess we need to add new one for Tx queue after that we might need to change most of these lock to BH and just keep priv->lock as irq type. Signed-off-by: Mohamed Abbas <mohamed.abbas@intel.com> Signed-off-by: Reinette Chatre <reinette.chatre@intel.com> Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers/net/wireless/iwlwifi/iwl-3945.c')
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c144
1 files changed, 16 insertions, 128 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index 5b0c6e5bda92..8bed0445ff5d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -98,7 +98,6 @@ const struct iwl3945_rate_info iwl3945_rates[IWL_RATE_COUNT_3945] = {
98 * ... and set IWL_EVT_DISABLE to 1. */ 98 * ... and set IWL_EVT_DISABLE to 1. */
99void iwl3945_disable_events(struct iwl_priv *priv) 99void iwl3945_disable_events(struct iwl_priv *priv)
100{ 100{
101 int ret;
102 int i; 101 int i;
103 u32 base; /* SRAM address of event log header */ 102 u32 base; /* SRAM address of event log header */
104 u32 disable_ptr; /* SRAM address of event-disable bitmap array */ 103 u32 disable_ptr; /* SRAM address of event-disable bitmap array */
@@ -159,26 +158,17 @@ void iwl3945_disable_events(struct iwl_priv *priv)
159 return; 158 return;
160 } 159 }
161 160
162 ret = iwl_grab_nic_access(priv);
163 if (ret) {
164 IWL_WARN(priv, "Can not read from adapter at this time.\n");
165 return;
166 }
167
168 disable_ptr = iwl_read_targ_mem(priv, base + (4 * sizeof(u32))); 161 disable_ptr = iwl_read_targ_mem(priv, base + (4 * sizeof(u32)));
169 array_size = iwl_read_targ_mem(priv, base + (5 * sizeof(u32))); 162 array_size = iwl_read_targ_mem(priv, base + (5 * sizeof(u32)));
170 iwl_release_nic_access(priv);
171 163
172 if (IWL_EVT_DISABLE && (array_size == IWL_EVT_DISABLE_SIZE)) { 164 if (IWL_EVT_DISABLE && (array_size == IWL_EVT_DISABLE_SIZE)) {
173 IWL_DEBUG_INFO(priv, "Disabling selected uCode log events at 0x%x\n", 165 IWL_DEBUG_INFO(priv, "Disabling selected uCode log events at 0x%x\n",
174 disable_ptr); 166 disable_ptr);
175 ret = iwl_grab_nic_access(priv);
176 for (i = 0; i < IWL_EVT_DISABLE_SIZE; i++) 167 for (i = 0; i < IWL_EVT_DISABLE_SIZE; i++)
177 iwl_write_targ_mem(priv, 168 iwl_write_targ_mem(priv,
178 disable_ptr + (i * sizeof(u32)), 169 disable_ptr + (i * sizeof(u32)),
179 evt_disable[i]); 170 evt_disable[i]);
180 171
181 iwl_release_nic_access(priv);
182 } else { 172 } else {
183 IWL_DEBUG_INFO(priv, "Selected uCode log events may be disabled\n"); 173 IWL_DEBUG_INFO(priv, "Selected uCode log events may be disabled\n");
184 IWL_DEBUG_INFO(priv, " by writing \"1\"s into disable bitmap\n"); 174 IWL_DEBUG_INFO(priv, " by writing \"1\"s into disable bitmap\n");
@@ -908,55 +898,30 @@ u8 iwl3945_sync_sta(struct iwl_priv *priv, int sta_id, u16 tx_rate, u8 flags)
908 898
909static int iwl3945_set_pwr_src(struct iwl_priv *priv, enum iwl_pwr_src src) 899static int iwl3945_set_pwr_src(struct iwl_priv *priv, enum iwl_pwr_src src)
910{ 900{
911 int ret;
912 unsigned long flags;
913
914 spin_lock_irqsave(&priv->lock, flags);
915 ret = iwl_grab_nic_access(priv);
916 if (ret) {
917 spin_unlock_irqrestore(&priv->lock, flags);
918 return ret;
919 }
920
921 if (src == IWL_PWR_SRC_VAUX) { 901 if (src == IWL_PWR_SRC_VAUX) {
922 if (pci_pme_capable(priv->pci_dev, PCI_D3cold)) { 902 if (pci_pme_capable(priv->pci_dev, PCI_D3cold)) {
923 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG, 903 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
924 APMG_PS_CTRL_VAL_PWR_SRC_VAUX, 904 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
925 ~APMG_PS_CTRL_MSK_PWR_SRC); 905 ~APMG_PS_CTRL_MSK_PWR_SRC);
926 iwl_release_nic_access(priv);
927 906
928 iwl_poll_bit(priv, CSR_GPIO_IN, 907 iwl_poll_bit(priv, CSR_GPIO_IN,
929 CSR_GPIO_IN_VAL_VAUX_PWR_SRC, 908 CSR_GPIO_IN_VAL_VAUX_PWR_SRC,
930 CSR_GPIO_IN_BIT_AUX_POWER, 5000); 909 CSR_GPIO_IN_BIT_AUX_POWER, 5000);
931 } else {
932 iwl_release_nic_access(priv);
933 } 910 }
934 } else { 911 } else {
935 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG, 912 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
936 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, 913 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
937 ~APMG_PS_CTRL_MSK_PWR_SRC); 914 ~APMG_PS_CTRL_MSK_PWR_SRC);
938 915
939 iwl_release_nic_access(priv);
940 iwl_poll_bit(priv, CSR_GPIO_IN, CSR_GPIO_IN_VAL_VMAIN_PWR_SRC, 916 iwl_poll_bit(priv, CSR_GPIO_IN, CSR_GPIO_IN_VAL_VMAIN_PWR_SRC,
941 CSR_GPIO_IN_BIT_AUX_POWER, 5000); /* uS */ 917 CSR_GPIO_IN_BIT_AUX_POWER, 5000); /* uS */
942 } 918 }
943 spin_unlock_irqrestore(&priv->lock, flags);
944 919
945 return ret; 920 return 0;
946} 921}
947 922
948static int iwl3945_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq) 923static int iwl3945_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
949{ 924{
950 int rc;
951 unsigned long flags;
952
953 spin_lock_irqsave(&priv->lock, flags);
954 rc = iwl_grab_nic_access(priv);
955 if (rc) {
956 spin_unlock_irqrestore(&priv->lock, flags);
957 return rc;
958 }
959
960 iwl_write_direct32(priv, FH39_RCSR_RBD_BASE(0), rxq->dma_addr); 925 iwl_write_direct32(priv, FH39_RCSR_RBD_BASE(0), rxq->dma_addr);
961 iwl_write_direct32(priv, FH39_RCSR_RPTR_ADDR(0), rxq->rb_stts_dma); 926 iwl_write_direct32(priv, FH39_RCSR_RPTR_ADDR(0), rxq->rb_stts_dma);
962 iwl_write_direct32(priv, FH39_RCSR_WPTR(0), 0); 927 iwl_write_direct32(priv, FH39_RCSR_WPTR(0), 0);
@@ -973,23 +938,11 @@ static int iwl3945_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
973 /* fake read to flush all prev I/O */ 938 /* fake read to flush all prev I/O */
974 iwl_read_direct32(priv, FH39_RSSR_CTRL); 939 iwl_read_direct32(priv, FH39_RSSR_CTRL);
975 940
976 iwl_release_nic_access(priv);
977 spin_unlock_irqrestore(&priv->lock, flags);
978
979 return 0; 941 return 0;
980} 942}
981 943
982static int iwl3945_tx_reset(struct iwl_priv *priv) 944static int iwl3945_tx_reset(struct iwl_priv *priv)
983{ 945{
984 int rc;
985 unsigned long flags;
986
987 spin_lock_irqsave(&priv->lock, flags);
988 rc = iwl_grab_nic_access(priv);
989 if (rc) {
990 spin_unlock_irqrestore(&priv->lock, flags);
991 return rc;
992 }
993 946
994 /* bypass mode */ 947 /* bypass mode */
995 iwl_write_prph(priv, ALM_SCD_MODE_REG, 0x2); 948 iwl_write_prph(priv, ALM_SCD_MODE_REG, 0x2);
@@ -1017,8 +970,6 @@ static int iwl3945_tx_reset(struct iwl_priv *priv)
1017 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH | 970 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH |
1018 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH); 971 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH);
1019 972
1020 iwl_release_nic_access(priv);
1021 spin_unlock_irqrestore(&priv->lock, flags);
1022 973
1023 return 0; 974 return 0;
1024} 975}
@@ -1061,7 +1012,7 @@ static int iwl3945_txq_ctx_reset(struct iwl_priv *priv)
1061 1012
1062static int iwl3945_apm_init(struct iwl_priv *priv) 1013static int iwl3945_apm_init(struct iwl_priv *priv)
1063{ 1014{
1064 int ret = 0; 1015 int ret;
1065 1016
1066 iwl_power_initialize(priv); 1017 iwl_power_initialize(priv);
1067 1018
@@ -1083,10 +1034,6 @@ static int iwl3945_apm_init(struct iwl_priv *priv)
1083 goto out; 1034 goto out;
1084 } 1035 }
1085 1036
1086 ret = iwl_grab_nic_access(priv);
1087 if (ret)
1088 goto out;
1089
1090 /* enable DMA */ 1037 /* enable DMA */
1091 iwl_write_prph(priv, APMG_CLK_CTRL_REG, APMG_CLK_VAL_DMA_CLK_RQT | 1038 iwl_write_prph(priv, APMG_CLK_CTRL_REG, APMG_CLK_VAL_DMA_CLK_RQT |
1092 APMG_CLK_VAL_BSM_CLK_RQT); 1039 APMG_CLK_VAL_BSM_CLK_RQT);
@@ -1097,7 +1044,6 @@ static int iwl3945_apm_init(struct iwl_priv *priv)
1097 iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG, 1044 iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
1098 APMG_PCIDEV_STT_VAL_L1_ACT_DIS); 1045 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1099 1046
1100 iwl_release_nic_access(priv);
1101out: 1047out:
1102 return ret; 1048 return ret;
1103} 1049}
@@ -1196,22 +1142,13 @@ int iwl3945_hw_nic_init(struct iwl_priv *priv)
1196 1142
1197 iwl3945_rx_init(priv, rxq); 1143 iwl3945_rx_init(priv, rxq);
1198 1144
1199 spin_lock_irqsave(&priv->lock, flags);
1200 1145
1201 /* Look at using this instead: 1146 /* Look at using this instead:
1202 rxq->need_update = 1; 1147 rxq->need_update = 1;
1203 iwl_rx_queue_update_write_ptr(priv, rxq); 1148 iwl_rx_queue_update_write_ptr(priv, rxq);
1204 */ 1149 */
1205 1150
1206 rc = iwl_grab_nic_access(priv);
1207 if (rc) {
1208 spin_unlock_irqrestore(&priv->lock, flags);
1209 return rc;
1210 }
1211 iwl_write_direct32(priv, FH39_RCSR_WPTR(0), rxq->write & ~7); 1151 iwl_write_direct32(priv, FH39_RCSR_WPTR(0), rxq->write & ~7);
1212 iwl_release_nic_access(priv);
1213
1214 spin_unlock_irqrestore(&priv->lock, flags);
1215 1152
1216 rc = iwl3945_txq_ctx_reset(priv); 1153 rc = iwl3945_txq_ctx_reset(priv);
1217 if (rc) 1154 if (rc)
@@ -1243,14 +1180,6 @@ void iwl3945_hw_txq_ctx_free(struct iwl_priv *priv)
1243void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv) 1180void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv)
1244{ 1181{
1245 int txq_id; 1182 int txq_id;
1246 unsigned long flags;
1247
1248 spin_lock_irqsave(&priv->lock, flags);
1249 if (iwl_grab_nic_access(priv)) {
1250 spin_unlock_irqrestore(&priv->lock, flags);
1251 iwl3945_hw_txq_ctx_free(priv);
1252 return;
1253 }
1254 1183
1255 /* stop SCD */ 1184 /* stop SCD */
1256 iwl_write_prph(priv, ALM_SCD_MODE_REG, 0); 1185 iwl_write_prph(priv, ALM_SCD_MODE_REG, 0);
@@ -1263,9 +1192,6 @@ void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv)
1263 1000); 1192 1000);
1264 } 1193 }
1265 1194
1266 iwl_release_nic_access(priv);
1267 spin_unlock_irqrestore(&priv->lock, flags);
1268
1269 iwl3945_hw_txq_ctx_free(priv); 1195 iwl3945_hw_txq_ctx_free(priv);
1270} 1196}
1271 1197
@@ -1310,12 +1236,8 @@ static void iwl3945_apm_stop(struct iwl_priv *priv)
1310 1236
1311static int iwl3945_apm_reset(struct iwl_priv *priv) 1237static int iwl3945_apm_reset(struct iwl_priv *priv)
1312{ 1238{
1313 int rc;
1314 unsigned long flags;
1315
1316 iwl3945_apm_stop_master(priv); 1239 iwl3945_apm_stop_master(priv);
1317 1240
1318 spin_lock_irqsave(&priv->lock, flags);
1319 1241
1320 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); 1242 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
1321 udelay(10); 1243 udelay(10);
@@ -1325,36 +1247,31 @@ static int iwl3945_apm_reset(struct iwl_priv *priv)
1325 iwl_poll_direct_bit(priv, CSR_GP_CNTRL, 1247 iwl_poll_direct_bit(priv, CSR_GP_CNTRL,
1326 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000); 1248 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
1327 1249
1328 rc = iwl_grab_nic_access(priv); 1250 iwl_write_prph(priv, APMG_CLK_CTRL_REG,
1329 if (!rc) { 1251 APMG_CLK_VAL_BSM_CLK_RQT);
1330 iwl_write_prph(priv, APMG_CLK_CTRL_REG,
1331 APMG_CLK_VAL_BSM_CLK_RQT);
1332 1252
1333 iwl_write_prph(priv, APMG_RTC_INT_MSK_REG, 0x0); 1253 iwl_write_prph(priv, APMG_RTC_INT_MSK_REG, 0x0);
1334 iwl_write_prph(priv, APMG_RTC_INT_STT_REG, 1254 iwl_write_prph(priv, APMG_RTC_INT_STT_REG,
1335 0xFFFFFFFF); 1255 0xFFFFFFFF);
1336 1256
1337 /* enable DMA */ 1257 /* enable DMA */
1338 iwl_write_prph(priv, APMG_CLK_EN_REG, 1258 iwl_write_prph(priv, APMG_CLK_EN_REG,
1339 APMG_CLK_VAL_DMA_CLK_RQT | 1259 APMG_CLK_VAL_DMA_CLK_RQT |
1340 APMG_CLK_VAL_BSM_CLK_RQT); 1260 APMG_CLK_VAL_BSM_CLK_RQT);
1341 udelay(10); 1261 udelay(10);
1342 1262
1343 iwl_set_bits_prph(priv, APMG_PS_CTRL_REG, 1263 iwl_set_bits_prph(priv, APMG_PS_CTRL_REG,
1344 APMG_PS_CTRL_VAL_RESET_REQ); 1264 APMG_PS_CTRL_VAL_RESET_REQ);
1345 udelay(5); 1265 udelay(5);
1346 iwl_clear_bits_prph(priv, APMG_PS_CTRL_REG, 1266 iwl_clear_bits_prph(priv, APMG_PS_CTRL_REG,
1347 APMG_PS_CTRL_VAL_RESET_REQ); 1267 APMG_PS_CTRL_VAL_RESET_REQ);
1348 iwl_release_nic_access(priv);
1349 }
1350 1268
1351 /* Clear the 'host command active' bit... */ 1269 /* Clear the 'host command active' bit... */
1352 clear_bit(STATUS_HCMD_ACTIVE, &priv->status); 1270 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
1353 1271
1354 wake_up_interruptible(&priv->wait_command_queue); 1272 wake_up_interruptible(&priv->wait_command_queue);
1355 spin_unlock_irqrestore(&priv->lock, flags);
1356 1273
1357 return rc; 1274 return 0;
1358} 1275}
1359 1276
1360/** 1277/**
@@ -2500,14 +2417,6 @@ int iwl3945_txpower_set_from_eeprom(struct iwl_priv *priv)
2500int iwl3945_hw_rxq_stop(struct iwl_priv *priv) 2417int iwl3945_hw_rxq_stop(struct iwl_priv *priv)
2501{ 2418{
2502 int rc; 2419 int rc;
2503 unsigned long flags;
2504
2505 spin_lock_irqsave(&priv->lock, flags);
2506 rc = iwl_grab_nic_access(priv);
2507 if (rc) {
2508 spin_unlock_irqrestore(&priv->lock, flags);
2509 return rc;
2510 }
2511 2420
2512 iwl_write_direct32(priv, FH39_RCSR_CONFIG(0), 0); 2421 iwl_write_direct32(priv, FH39_RCSR_CONFIG(0), 0);
2513 rc = iwl_poll_direct_bit(priv, FH39_RSSR_STATUS, 2422 rc = iwl_poll_direct_bit(priv, FH39_RSSR_STATUS,
@@ -2515,28 +2424,17 @@ int iwl3945_hw_rxq_stop(struct iwl_priv *priv)
2515 if (rc < 0) 2424 if (rc < 0)
2516 IWL_ERR(priv, "Can't stop Rx DMA.\n"); 2425 IWL_ERR(priv, "Can't stop Rx DMA.\n");
2517 2426
2518 iwl_release_nic_access(priv);
2519 spin_unlock_irqrestore(&priv->lock, flags);
2520
2521 return 0; 2427 return 0;
2522} 2428}
2523 2429
2524int iwl3945_hw_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq) 2430int iwl3945_hw_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq)
2525{ 2431{
2526 int rc;
2527 unsigned long flags;
2528 int txq_id = txq->q.id; 2432 int txq_id = txq->q.id;
2529 2433
2530 struct iwl3945_shared *shared_data = priv->shared_virt; 2434 struct iwl3945_shared *shared_data = priv->shared_virt;
2531 2435
2532 shared_data->tx_base_ptr[txq_id] = cpu_to_le32((u32)txq->q.dma_addr); 2436 shared_data->tx_base_ptr[txq_id] = cpu_to_le32((u32)txq->q.dma_addr);
2533 2437
2534 spin_lock_irqsave(&priv->lock, flags);
2535 rc = iwl_grab_nic_access(priv);
2536 if (rc) {
2537 spin_unlock_irqrestore(&priv->lock, flags);
2538 return rc;
2539 }
2540 iwl_write_direct32(priv, FH39_CBCC_CTRL(txq_id), 0); 2438 iwl_write_direct32(priv, FH39_CBCC_CTRL(txq_id), 0);
2541 iwl_write_direct32(priv, FH39_CBCC_BASE(txq_id), 0); 2439 iwl_write_direct32(priv, FH39_CBCC_BASE(txq_id), 0);
2542 2440
@@ -2546,11 +2444,9 @@ int iwl3945_hw_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq)
2546 FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD | 2444 FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD |
2547 FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL | 2445 FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL |
2548 FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE); 2446 FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE);
2549 iwl_release_nic_access(priv);
2550 2447
2551 /* fake read to flush all prev. writes */ 2448 /* fake read to flush all prev. writes */
2552 iwl_read32(priv, FH39_TSSR_CBB_BASE); 2449 iwl_read32(priv, FH39_TSSR_CBB_BASE);
2553 spin_unlock_irqrestore(&priv->lock, flags);
2554 2450
2555 return 0; 2451 return 0;
2556} 2452}
@@ -2858,10 +2754,6 @@ static int iwl3945_load_bsm(struct iwl_priv *priv)
2858 inst_len = priv->ucode_init.len; 2754 inst_len = priv->ucode_init.len;
2859 data_len = priv->ucode_init_data.len; 2755 data_len = priv->ucode_init_data.len;
2860 2756
2861 rc = iwl_grab_nic_access(priv);
2862 if (rc)
2863 return rc;
2864
2865 iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst); 2757 iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
2866 iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata); 2758 iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
2867 iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len); 2759 iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
@@ -2875,10 +2767,8 @@ static int iwl3945_load_bsm(struct iwl_priv *priv)
2875 le32_to_cpu(*image)); 2767 le32_to_cpu(*image));
2876 2768
2877 rc = iwl3945_verify_bsm(priv); 2769 rc = iwl3945_verify_bsm(priv);
2878 if (rc) { 2770 if (rc)
2879 iwl_release_nic_access(priv);
2880 return rc; 2771 return rc;
2881 }
2882 2772
2883 /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */ 2773 /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
2884 iwl_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0); 2774 iwl_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0);
@@ -2910,8 +2800,6 @@ static int iwl3945_load_bsm(struct iwl_priv *priv)
2910 iwl_write_prph(priv, BSM_WR_CTRL_REG, 2800 iwl_write_prph(priv, BSM_WR_CTRL_REG,
2911 BSM_WR_CTRL_REG_BIT_START_EN); 2801 BSM_WR_CTRL_REG_BIT_START_EN);
2912 2802
2913 iwl_release_nic_access(priv);
2914
2915 return 0; 2803 return 0;
2916} 2804}
2917 2805