diff options
Diffstat (limited to 'drivers/net/wireless/iwlwifi/pcie/trans.c')
-rw-r--r-- | drivers/net/wireless/iwlwifi/pcie/trans.c | 410 |
1 files changed, 298 insertions, 112 deletions
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c index f9507807b486..dcfd6d866d09 100644 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c | |||
@@ -75,6 +75,20 @@ | |||
75 | #include "iwl-agn-hw.h" | 75 | #include "iwl-agn-hw.h" |
76 | #include "internal.h" | 76 | #include "internal.h" |
77 | 77 | ||
78 | static u32 iwl_trans_pcie_read_shr(struct iwl_trans *trans, u32 reg) | ||
79 | { | ||
80 | iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG, | ||
81 | ((reg & 0x0000ffff) | (2 << 28))); | ||
82 | return iwl_read32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG); | ||
83 | } | ||
84 | |||
85 | static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val) | ||
86 | { | ||
87 | iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG, val); | ||
88 | iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG, | ||
89 | ((reg & 0x0000ffff) | (3 << 28))); | ||
90 | } | ||
91 | |||
78 | static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux) | 92 | static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux) |
79 | { | 93 | { |
80 | if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold)) | 94 | if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold)) |
@@ -89,6 +103,7 @@ static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux) | |||
89 | 103 | ||
90 | /* PCI registers */ | 104 | /* PCI registers */ |
91 | #define PCI_CFG_RETRY_TIMEOUT 0x041 | 105 | #define PCI_CFG_RETRY_TIMEOUT 0x041 |
106 | #define CPU1_CPU2_SEPARATOR_SECTION 0xFFFFCCCC | ||
92 | 107 | ||
93 | static void iwl_pcie_apm_config(struct iwl_trans *trans) | 108 | static void iwl_pcie_apm_config(struct iwl_trans *trans) |
94 | { | 109 | { |
@@ -132,8 +147,9 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans) | |||
132 | */ | 147 | */ |
133 | 148 | ||
134 | /* Disable L0S exit timer (platform NMI Work/Around) */ | 149 | /* Disable L0S exit timer (platform NMI Work/Around) */ |
135 | iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS, | 150 | if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) |
136 | CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER); | 151 | iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS, |
152 | CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER); | ||
137 | 153 | ||
138 | /* | 154 | /* |
139 | * Disable L0s without affecting L1; | 155 | * Disable L0s without affecting L1; |
@@ -203,19 +219,23 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans) | |||
203 | /* | 219 | /* |
204 | * Enable DMA clock and wait for it to stabilize. | 220 | * Enable DMA clock and wait for it to stabilize. |
205 | * | 221 | * |
206 | * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits | 222 | * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" |
207 | * do not disable clocks. This preserves any hardware bits already | 223 | * bits do not disable clocks. This preserves any hardware |
208 | * set by default in "CLK_CTRL_REG" after reset. | 224 | * bits already set by default in "CLK_CTRL_REG" after reset. |
209 | */ | 225 | */ |
210 | iwl_write_prph(trans, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT); | 226 | if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) { |
211 | udelay(20); | 227 | iwl_write_prph(trans, APMG_CLK_EN_REG, |
228 | APMG_CLK_VAL_DMA_CLK_RQT); | ||
229 | udelay(20); | ||
212 | 230 | ||
213 | /* Disable L1-Active */ | 231 | /* Disable L1-Active */ |
214 | iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG, | 232 | iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG, |
215 | APMG_PCIDEV_STT_VAL_L1_ACT_DIS); | 233 | APMG_PCIDEV_STT_VAL_L1_ACT_DIS); |
216 | 234 | ||
217 | /* Clear the interrupt in APMG if the NIC is in RFKILL */ | 235 | /* Clear the interrupt in APMG if the NIC is in RFKILL */ |
218 | iwl_write_prph(trans, APMG_RTC_INT_STT_REG, APMG_RTC_INT_STT_RFKILL); | 236 | iwl_write_prph(trans, APMG_RTC_INT_STT_REG, |
237 | APMG_RTC_INT_STT_RFKILL); | ||
238 | } | ||
219 | 239 | ||
220 | set_bit(STATUS_DEVICE_ENABLED, &trans->status); | 240 | set_bit(STATUS_DEVICE_ENABLED, &trans->status); |
221 | 241 | ||
@@ -223,6 +243,116 @@ out: | |||
223 | return ret; | 243 | return ret; |
224 | } | 244 | } |
225 | 245 | ||
246 | /* | ||
247 | * Enable LP XTAL to avoid HW bug where device may consume much power if | ||
248 | * FW is not loaded after device reset. LP XTAL is disabled by default | ||
249 | * after device HW reset. Do it only if XTAL is fed by internal source. | ||
250 | * Configure device's "persistence" mode to avoid resetting XTAL again when | ||
251 | * SHRD_HW_RST occurs in S3. | ||
252 | */ | ||
253 | static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans) | ||
254 | { | ||
255 | int ret; | ||
256 | u32 apmg_gp1_reg; | ||
257 | u32 apmg_xtal_cfg_reg; | ||
258 | u32 dl_cfg_reg; | ||
259 | |||
260 | /* Force XTAL ON */ | ||
261 | __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, | ||
262 | CSR_GP_CNTRL_REG_FLAG_XTAL_ON); | ||
263 | |||
264 | /* Reset entire device - do controller reset (results in SHRD_HW_RST) */ | ||
265 | iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); | ||
266 | |||
267 | udelay(10); | ||
268 | |||
269 | /* | ||
270 | * Set "initialization complete" bit to move adapter from | ||
271 | * D0U* --> D0A* (powered-up active) state. | ||
272 | */ | ||
273 | iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); | ||
274 | |||
275 | /* | ||
276 | * Wait for clock stabilization; once stabilized, access to | ||
277 | * device-internal resources is possible. | ||
278 | */ | ||
279 | ret = iwl_poll_bit(trans, CSR_GP_CNTRL, | ||
280 | CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, | ||
281 | CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, | ||
282 | 25000); | ||
283 | if (WARN_ON(ret < 0)) { | ||
284 | IWL_ERR(trans, "Access time out - failed to enable LP XTAL\n"); | ||
285 | /* Release XTAL ON request */ | ||
286 | __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, | ||
287 | CSR_GP_CNTRL_REG_FLAG_XTAL_ON); | ||
288 | return; | ||
289 | } | ||
290 | |||
291 | /* | ||
292 | * Clear "disable persistence" to avoid LP XTAL resetting when | ||
293 | * SHRD_HW_RST is applied in S3. | ||
294 | */ | ||
295 | iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG, | ||
296 | APMG_PCIDEV_STT_VAL_PERSIST_DIS); | ||
297 | |||
298 | /* | ||
299 | * Force APMG XTAL to be active to prevent its disabling by HW | ||
300 | * caused by APMG idle state. | ||
301 | */ | ||
302 | apmg_xtal_cfg_reg = iwl_trans_pcie_read_shr(trans, | ||
303 | SHR_APMG_XTAL_CFG_REG); | ||
304 | iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG, | ||
305 | apmg_xtal_cfg_reg | | ||
306 | SHR_APMG_XTAL_CFG_XTAL_ON_REQ); | ||
307 | |||
308 | /* | ||
309 | * Reset entire device again - do controller reset (results in | ||
310 | * SHRD_HW_RST). Turn MAC off before proceeding. | ||
311 | */ | ||
312 | iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); | ||
313 | |||
314 | udelay(10); | ||
315 | |||
316 | /* Enable LP XTAL by indirect access through CSR */ | ||
317 | apmg_gp1_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_GP1_REG); | ||
318 | iwl_trans_pcie_write_shr(trans, SHR_APMG_GP1_REG, apmg_gp1_reg | | ||
319 | SHR_APMG_GP1_WF_XTAL_LP_EN | | ||
320 | SHR_APMG_GP1_CHICKEN_BIT_SELECT); | ||
321 | |||
322 | /* Clear delay line clock power up */ | ||
323 | dl_cfg_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_DL_CFG_REG); | ||
324 | iwl_trans_pcie_write_shr(trans, SHR_APMG_DL_CFG_REG, dl_cfg_reg & | ||
325 | ~SHR_APMG_DL_CFG_DL_CLOCK_POWER_UP); | ||
326 | |||
327 | /* | ||
328 | * Enable persistence mode to avoid LP XTAL resetting when | ||
329 | * SHRD_HW_RST is applied in S3. | ||
330 | */ | ||
331 | iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, | ||
332 | CSR_HW_IF_CONFIG_REG_PERSIST_MODE); | ||
333 | |||
334 | /* | ||
335 | * Clear "initialization complete" bit to move adapter from | ||
336 | * D0A* (powered-up Active) --> D0U* (Uninitialized) state. | ||
337 | */ | ||
338 | iwl_clear_bit(trans, CSR_GP_CNTRL, | ||
339 | CSR_GP_CNTRL_REG_FLAG_INIT_DONE); | ||
340 | |||
341 | /* Activates XTAL resources monitor */ | ||
342 | __iwl_trans_pcie_set_bit(trans, CSR_MONITOR_CFG_REG, | ||
343 | CSR_MONITOR_XTAL_RESOURCES); | ||
344 | |||
345 | /* Release XTAL ON request */ | ||
346 | __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, | ||
347 | CSR_GP_CNTRL_REG_FLAG_XTAL_ON); | ||
348 | udelay(10); | ||
349 | |||
350 | /* Release APMG XTAL */ | ||
351 | iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG, | ||
352 | apmg_xtal_cfg_reg & | ||
353 | ~SHR_APMG_XTAL_CFG_XTAL_ON_REQ); | ||
354 | } | ||
355 | |||
226 | static int iwl_pcie_apm_stop_master(struct iwl_trans *trans) | 356 | static int iwl_pcie_apm_stop_master(struct iwl_trans *trans) |
227 | { | 357 | { |
228 | int ret = 0; | 358 | int ret = 0; |
@@ -250,6 +380,11 @@ static void iwl_pcie_apm_stop(struct iwl_trans *trans) | |||
250 | /* Stop device's DMA activity */ | 380 | /* Stop device's DMA activity */ |
251 | iwl_pcie_apm_stop_master(trans); | 381 | iwl_pcie_apm_stop_master(trans); |
252 | 382 | ||
383 | if (trans->cfg->lp_xtal_workaround) { | ||
384 | iwl_pcie_apm_lp_xtal_enable(trans); | ||
385 | return; | ||
386 | } | ||
387 | |||
253 | /* Reset the entire device */ | 388 | /* Reset the entire device */ |
254 | iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); | 389 | iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); |
255 | 390 | ||
@@ -273,7 +408,8 @@ static int iwl_pcie_nic_init(struct iwl_trans *trans) | |||
273 | 408 | ||
274 | spin_unlock(&trans_pcie->irq_lock); | 409 | spin_unlock(&trans_pcie->irq_lock); |
275 | 410 | ||
276 | iwl_pcie_set_pwr(trans, false); | 411 | if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) |
412 | iwl_pcie_set_pwr(trans, false); | ||
277 | 413 | ||
278 | iwl_op_mode_nic_config(trans->op_mode); | 414 | iwl_op_mode_nic_config(trans->op_mode); |
279 | 415 | ||
@@ -435,78 +571,106 @@ static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num, | |||
435 | return ret; | 571 | return ret; |
436 | } | 572 | } |
437 | 573 | ||
438 | static int iwl_pcie_secure_set(struct iwl_trans *trans, int cpu) | 574 | static int iwl_pcie_load_cpu_secured_sections(struct iwl_trans *trans, |
575 | const struct fw_img *image, | ||
576 | int cpu, | ||
577 | int *first_ucode_section) | ||
439 | { | 578 | { |
440 | int shift_param; | 579 | int shift_param; |
441 | u32 address; | 580 | int i, ret = 0; |
442 | int ret = 0; | 581 | u32 last_read_idx = 0; |
443 | 582 | ||
444 | if (cpu == 1) { | 583 | if (cpu == 1) { |
445 | shift_param = 0; | 584 | shift_param = 0; |
446 | address = CSR_SECURE_BOOT_CPU1_STATUS_ADDR; | 585 | *first_ucode_section = 0; |
447 | } else { | 586 | } else { |
448 | shift_param = 16; | 587 | shift_param = 16; |
449 | address = CSR_SECURE_BOOT_CPU2_STATUS_ADDR; | 588 | (*first_ucode_section)++; |
450 | } | 589 | } |
451 | 590 | ||
452 | /* set CPU to started */ | 591 | for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) { |
453 | iwl_trans_set_bits_mask(trans, | 592 | last_read_idx = i; |
454 | CSR_UCODE_LOAD_STATUS_ADDR, | ||
455 | CSR_CPU_STATUS_LOADING_STARTED << shift_param, | ||
456 | 1); | ||
457 | |||
458 | /* set last complete descriptor number */ | ||
459 | iwl_trans_set_bits_mask(trans, | ||
460 | CSR_UCODE_LOAD_STATUS_ADDR, | ||
461 | CSR_CPU_STATUS_NUM_OF_LAST_COMPLETED | ||
462 | << shift_param, | ||
463 | 1); | ||
464 | |||
465 | /* set last loaded block */ | ||
466 | iwl_trans_set_bits_mask(trans, | ||
467 | CSR_UCODE_LOAD_STATUS_ADDR, | ||
468 | CSR_CPU_STATUS_NUM_OF_LAST_LOADED_BLOCK | ||
469 | << shift_param, | ||
470 | 1); | ||
471 | 593 | ||
594 | if (!image->sec[i].data || | ||
595 | image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION) { | ||
596 | IWL_DEBUG_FW(trans, | ||
597 | "Break since Data not valid or Empty section, sec = %d\n", | ||
598 | i); | ||
599 | break; | ||
600 | } | ||
601 | |||
602 | if (i == (*first_ucode_section) + 1) | ||
603 | /* set CPU to started */ | ||
604 | iwl_set_bits_prph(trans, | ||
605 | CSR_UCODE_LOAD_STATUS_ADDR, | ||
606 | LMPM_CPU_HDRS_LOADING_COMPLETED | ||
607 | << shift_param); | ||
608 | |||
609 | ret = iwl_pcie_load_section(trans, i, &image->sec[i]); | ||
610 | if (ret) | ||
611 | return ret; | ||
612 | } | ||
472 | /* image loading complete */ | 613 | /* image loading complete */ |
473 | iwl_trans_set_bits_mask(trans, | 614 | iwl_set_bits_prph(trans, |
474 | CSR_UCODE_LOAD_STATUS_ADDR, | 615 | CSR_UCODE_LOAD_STATUS_ADDR, |
475 | CSR_CPU_STATUS_LOADING_COMPLETED | 616 | LMPM_CPU_UCODE_LOADING_COMPLETED << shift_param); |
476 | << shift_param, | 617 | |
477 | 1); | 618 | *first_ucode_section = last_read_idx; |
478 | 619 | ||
479 | /* set FH_TCSR_0_REG */ | 620 | return 0; |
480 | iwl_trans_set_bits_mask(trans, FH_TCSR_0_REG0, 0x00400000, 1); | 621 | } |
481 | 622 | ||
482 | /* verify image verification started */ | 623 | static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans, |
483 | ret = iwl_poll_bit(trans, address, | 624 | const struct fw_img *image, |
484 | CSR_SECURE_BOOT_CPU_STATUS_VERF_STATUS, | 625 | int cpu, |
485 | CSR_SECURE_BOOT_CPU_STATUS_VERF_STATUS, | 626 | int *first_ucode_section) |
486 | CSR_SECURE_TIME_OUT); | 627 | { |
487 | if (ret < 0) { | 628 | int shift_param; |
488 | IWL_ERR(trans, "secure boot process didn't start\n"); | 629 | int i, ret = 0; |
489 | return ret; | 630 | u32 last_read_idx = 0; |
631 | |||
632 | if (cpu == 1) { | ||
633 | shift_param = 0; | ||
634 | *first_ucode_section = 0; | ||
635 | } else { | ||
636 | shift_param = 16; | ||
637 | (*first_ucode_section)++; | ||
490 | } | 638 | } |
491 | 639 | ||
492 | /* wait for image verification to complete */ | 640 | for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) { |
493 | ret = iwl_poll_bit(trans, address, | 641 | last_read_idx = i; |
494 | CSR_SECURE_BOOT_CPU_STATUS_VERF_COMPLETED, | ||
495 | CSR_SECURE_BOOT_CPU_STATUS_VERF_COMPLETED, | ||
496 | CSR_SECURE_TIME_OUT); | ||
497 | 642 | ||
498 | if (ret < 0) { | 643 | if (!image->sec[i].data || |
499 | IWL_ERR(trans, "Time out on secure boot process\n"); | 644 | image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION) { |
500 | return ret; | 645 | IWL_DEBUG_FW(trans, |
646 | "Break since Data not valid or Empty section, sec = %d\n", | ||
647 | i); | ||
648 | break; | ||
649 | } | ||
650 | |||
651 | ret = iwl_pcie_load_section(trans, i, &image->sec[i]); | ||
652 | if (ret) | ||
653 | return ret; | ||
501 | } | 654 | } |
502 | 655 | ||
656 | if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) | ||
657 | iwl_set_bits_prph(trans, | ||
658 | CSR_UCODE_LOAD_STATUS_ADDR, | ||
659 | (LMPM_CPU_UCODE_LOADING_COMPLETED | | ||
660 | LMPM_CPU_HDRS_LOADING_COMPLETED | | ||
661 | LMPM_CPU_UCODE_LOADING_STARTED) << | ||
662 | shift_param); | ||
663 | |||
664 | *first_ucode_section = last_read_idx; | ||
665 | |||
503 | return 0; | 666 | return 0; |
504 | } | 667 | } |
505 | 668 | ||
506 | static int iwl_pcie_load_given_ucode(struct iwl_trans *trans, | 669 | static int iwl_pcie_load_given_ucode(struct iwl_trans *trans, |
507 | const struct fw_img *image) | 670 | const struct fw_img *image) |
508 | { | 671 | { |
509 | int i, ret = 0; | 672 | int ret = 0; |
673 | int first_ucode_section; | ||
510 | 674 | ||
511 | IWL_DEBUG_FW(trans, | 675 | IWL_DEBUG_FW(trans, |
512 | "working with %s image\n", | 676 | "working with %s image\n", |
@@ -518,53 +682,68 @@ static int iwl_pcie_load_given_ucode(struct iwl_trans *trans, | |||
518 | /* configure the ucode to be ready to get the secured image */ | 682 | /* configure the ucode to be ready to get the secured image */ |
519 | if (image->is_secure) { | 683 | if (image->is_secure) { |
520 | /* set secure boot inspector addresses */ | 684 | /* set secure boot inspector addresses */ |
521 | iwl_write32(trans, CSR_SECURE_INSPECTOR_CODE_ADDR, 0); | 685 | iwl_write_prph(trans, |
522 | iwl_write32(trans, CSR_SECURE_INSPECTOR_DATA_ADDR, 0); | 686 | LMPM_SECURE_INSPECTOR_CODE_ADDR, |
523 | 687 | LMPM_SECURE_INSPECTOR_CODE_MEM_SPACE); | |
524 | /* release CPU1 reset if secure inspector image burned in OTP */ | 688 | |
525 | iwl_write32(trans, CSR_RESET, 0); | 689 | iwl_write_prph(trans, |
526 | } | 690 | LMPM_SECURE_INSPECTOR_DATA_ADDR, |
527 | 691 | LMPM_SECURE_INSPECTOR_DATA_MEM_SPACE); | |
528 | /* load to FW the binary sections of CPU1 */ | 692 | |
529 | IWL_DEBUG_INFO(trans, "Loading CPU1\n"); | 693 | /* set CPU1 header address */ |
530 | for (i = 0; | 694 | iwl_write_prph(trans, |
531 | i < IWL_UCODE_FIRST_SECTION_OF_SECOND_CPU; | 695 | LMPM_SECURE_UCODE_LOAD_CPU1_HDR_ADDR, |
532 | i++) { | 696 | LMPM_SECURE_CPU1_HDR_MEM_SPACE); |
533 | if (!image->sec[i].data) | 697 | |
534 | break; | 698 | /* load to FW the binary Secured sections of CPU1 */ |
535 | ret = iwl_pcie_load_section(trans, i, &image->sec[i]); | 699 | ret = iwl_pcie_load_cpu_secured_sections(trans, image, 1, |
700 | &first_ucode_section); | ||
536 | if (ret) | 701 | if (ret) |
537 | return ret; | 702 | return ret; |
538 | } | ||
539 | 703 | ||
540 | /* configure the ucode to start secure process on CPU1 */ | 704 | } else { |
541 | if (image->is_secure) { | 705 | /* load to FW the binary Non secured sections of CPU1 */ |
542 | /* config CPU1 to start secure protocol */ | 706 | ret = iwl_pcie_load_cpu_sections(trans, image, 1, |
543 | ret = iwl_pcie_secure_set(trans, 1); | 707 | &first_ucode_section); |
544 | if (ret) | 708 | if (ret) |
545 | return ret; | 709 | return ret; |
546 | } else { | ||
547 | /* Remove all resets to allow NIC to operate */ | ||
548 | iwl_write32(trans, CSR_RESET, 0); | ||
549 | } | 710 | } |
550 | 711 | ||
551 | if (image->is_dual_cpus) { | 712 | if (image->is_dual_cpus) { |
713 | /* set CPU2 header address */ | ||
714 | iwl_write_prph(trans, | ||
715 | LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR, | ||
716 | LMPM_SECURE_CPU2_HDR_MEM_SPACE); | ||
717 | |||
552 | /* load to FW the binary sections of CPU2 */ | 718 | /* load to FW the binary sections of CPU2 */ |
553 | IWL_DEBUG_INFO(trans, "working w/ DUAL CPUs - Loading CPU2\n"); | 719 | if (image->is_secure) |
554 | for (i = IWL_UCODE_FIRST_SECTION_OF_SECOND_CPU; | 720 | ret = iwl_pcie_load_cpu_secured_sections( |
555 | i < IWL_UCODE_SECTION_MAX; i++) { | 721 | trans, image, 2, |
556 | if (!image->sec[i].data) | 722 | &first_ucode_section); |
557 | break; | 723 | else |
558 | ret = iwl_pcie_load_section(trans, i, &image->sec[i]); | 724 | ret = iwl_pcie_load_cpu_sections(trans, image, 2, |
559 | if (ret) | 725 | &first_ucode_section); |
560 | return ret; | 726 | if (ret) |
561 | } | 727 | return ret; |
728 | } | ||
729 | |||
730 | /* release CPU reset */ | ||
731 | if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) | ||
732 | iwl_write_prph(trans, RELEASE_CPU_RESET, RELEASE_CPU_RESET_BIT); | ||
733 | else | ||
734 | iwl_write32(trans, CSR_RESET, 0); | ||
562 | 735 | ||
563 | if (image->is_secure) { | 736 | if (image->is_secure) { |
564 | /* set CPU2 for secure protocol */ | 737 | /* wait for image verification to complete */ |
565 | ret = iwl_pcie_secure_set(trans, 2); | 738 | ret = iwl_poll_prph_bit(trans, |
566 | if (ret) | 739 | LMPM_SECURE_BOOT_CPU1_STATUS_ADDR, |
567 | return ret; | 740 | LMPM_SECURE_BOOT_STATUS_SUCCESS, |
741 | LMPM_SECURE_BOOT_STATUS_SUCCESS, | ||
742 | LMPM_SECURE_TIME_OUT); | ||
743 | |||
744 | if (ret < 0) { | ||
745 | IWL_ERR(trans, "Time out on secure boot process\n"); | ||
746 | return ret; | ||
568 | } | 747 | } |
569 | } | 748 | } |
570 | 749 | ||
@@ -591,7 +770,7 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, | |||
591 | set_bit(STATUS_RFKILL, &trans->status); | 770 | set_bit(STATUS_RFKILL, &trans->status); |
592 | else | 771 | else |
593 | clear_bit(STATUS_RFKILL, &trans->status); | 772 | clear_bit(STATUS_RFKILL, &trans->status); |
594 | iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); | 773 | iwl_trans_pcie_rf_kill(trans, hw_rfkill); |
595 | if (hw_rfkill && !run_in_rfkill) | 774 | if (hw_rfkill && !run_in_rfkill) |
596 | return -ERFKILL; | 775 | return -ERFKILL; |
597 | 776 | ||
@@ -706,7 +885,13 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans) | |||
706 | else | 885 | else |
707 | clear_bit(STATUS_RFKILL, &trans->status); | 886 | clear_bit(STATUS_RFKILL, &trans->status); |
708 | if (hw_rfkill != was_hw_rfkill) | 887 | if (hw_rfkill != was_hw_rfkill) |
709 | iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); | 888 | iwl_trans_pcie_rf_kill(trans, hw_rfkill); |
889 | } | ||
890 | |||
891 | void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state) | ||
892 | { | ||
893 | if (iwl_op_mode_hw_rf_kill(trans->op_mode, state)) | ||
894 | iwl_trans_pcie_stop_device(trans); | ||
710 | } | 895 | } |
711 | 896 | ||
712 | static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test) | 897 | static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test) |
@@ -815,7 +1000,7 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans) | |||
815 | set_bit(STATUS_RFKILL, &trans->status); | 1000 | set_bit(STATUS_RFKILL, &trans->status); |
816 | else | 1001 | else |
817 | clear_bit(STATUS_RFKILL, &trans->status); | 1002 | clear_bit(STATUS_RFKILL, &trans->status); |
818 | iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); | 1003 | iwl_trans_pcie_rf_kill(trans, hw_rfkill); |
819 | 1004 | ||
820 | return 0; | 1005 | return 0; |
821 | } | 1006 | } |
@@ -1158,6 +1343,7 @@ static const char *get_csr_string(int cmd) | |||
1158 | IWL_CMD(CSR_GIO_CHICKEN_BITS); | 1343 | IWL_CMD(CSR_GIO_CHICKEN_BITS); |
1159 | IWL_CMD(CSR_ANA_PLL_CFG); | 1344 | IWL_CMD(CSR_ANA_PLL_CFG); |
1160 | IWL_CMD(CSR_HW_REV_WA_REG); | 1345 | IWL_CMD(CSR_HW_REV_WA_REG); |
1346 | IWL_CMD(CSR_MONITOR_STATUS_REG); | ||
1161 | IWL_CMD(CSR_DBG_HPET_MEM_REG); | 1347 | IWL_CMD(CSR_DBG_HPET_MEM_REG); |
1162 | default: | 1348 | default: |
1163 | return "UNKNOWN"; | 1349 | return "UNKNOWN"; |
@@ -1190,6 +1376,7 @@ void iwl_pcie_dump_csr(struct iwl_trans *trans) | |||
1190 | CSR_DRAM_INT_TBL_REG, | 1376 | CSR_DRAM_INT_TBL_REG, |
1191 | CSR_GIO_CHICKEN_BITS, | 1377 | CSR_GIO_CHICKEN_BITS, |
1192 | CSR_ANA_PLL_CFG, | 1378 | CSR_ANA_PLL_CFG, |
1379 | CSR_MONITOR_STATUS_REG, | ||
1193 | CSR_HW_REV_WA_REG, | 1380 | CSR_HW_REV_WA_REG, |
1194 | CSR_DBG_HPET_MEM_REG | 1381 | CSR_DBG_HPET_MEM_REG |
1195 | }; | 1382 | }; |
@@ -1407,16 +1594,15 @@ static ssize_t iwl_dbgfs_fh_reg_read(struct file *file, | |||
1407 | { | 1594 | { |
1408 | struct iwl_trans *trans = file->private_data; | 1595 | struct iwl_trans *trans = file->private_data; |
1409 | char *buf = NULL; | 1596 | char *buf = NULL; |
1410 | int pos = 0; | 1597 | ssize_t ret; |
1411 | ssize_t ret = -EFAULT; | ||
1412 | |||
1413 | ret = pos = iwl_dump_fh(trans, &buf); | ||
1414 | if (buf) { | ||
1415 | ret = simple_read_from_buffer(user_buf, | ||
1416 | count, ppos, buf, pos); | ||
1417 | kfree(buf); | ||
1418 | } | ||
1419 | 1598 | ||
1599 | ret = iwl_dump_fh(trans, &buf); | ||
1600 | if (ret < 0) | ||
1601 | return ret; | ||
1602 | if (!buf) | ||
1603 | return -EINVAL; | ||
1604 | ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); | ||
1605 | kfree(buf); | ||
1420 | return ret; | 1606 | return ret; |
1421 | } | 1607 | } |
1422 | 1608 | ||