aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEmmanuel Grumbach <emmanuel.grumbach@intel.com>2013-04-30 07:33:04 -0400
committerJohannes Berg <johannes.berg@intel.com>2013-05-13 12:12:01 -0400
commitc571573a027bb65ac415141f919df3c0fa0fedb4 (patch)
treeb7e1979d5a54470820779c6ef418857feb3d8626
parent7df15b1e6f5994115bee369a527b50ec3521a39b (diff)
iwlwifi: pcie: prefer to load the firmware in one shot
Users complained about allocation failures, so we loaded the firmware in small chunks (PAGE_SIZE). This makes the firmware restart considerably slower. So, always prefer to load it in one shot allocating a big chunk of coherent, and use smaller chunks as a fallback solution. On my laptop, this reduces the fw loading time from 120ms to 20ms. Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com> Reviewed-by: Moshe Island <moshe.island@intel.com> Signed-off-by: Johannes Berg <johannes.berg@intel.com>
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c21
1 files changed, 14 insertions, 7 deletions
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 50ba0a468f94..e5365196e5fe 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -405,20 +405,27 @@ static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
405{ 405{
406 u8 *v_addr; 406 u8 *v_addr;
407 dma_addr_t p_addr; 407 dma_addr_t p_addr;
408 u32 offset; 408 u32 offset, chunk_sz = section->len;
409 int ret = 0; 409 int ret = 0;
410 410
411 IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n", 411 IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
412 section_num); 412 section_num);
413 413
414 v_addr = dma_alloc_coherent(trans->dev, PAGE_SIZE, &p_addr, GFP_KERNEL); 414 v_addr = dma_alloc_coherent(trans->dev, chunk_sz, &p_addr,
415 if (!v_addr) 415 GFP_KERNEL | __GFP_NOWARN);
416 return -ENOMEM; 416 if (!v_addr) {
417 IWL_DEBUG_INFO(trans, "Falling back to small chunks of DMA\n");
418 chunk_sz = PAGE_SIZE;
419 v_addr = dma_alloc_coherent(trans->dev, chunk_sz,
420 &p_addr, GFP_KERNEL);
421 if (!v_addr)
422 return -ENOMEM;
423 }
417 424
418 for (offset = 0; offset < section->len; offset += PAGE_SIZE) { 425 for (offset = 0; offset < section->len; offset += chunk_sz) {
419 u32 copy_size; 426 u32 copy_size;
420 427
421 copy_size = min_t(u32, PAGE_SIZE, section->len - offset); 428 copy_size = min_t(u32, chunk_sz, section->len - offset);
422 429
423 memcpy(v_addr, (u8 *)section->data + offset, copy_size); 430 memcpy(v_addr, (u8 *)section->data + offset, copy_size);
424 ret = iwl_pcie_load_firmware_chunk(trans, 431 ret = iwl_pcie_load_firmware_chunk(trans,
@@ -432,7 +439,7 @@ static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
432 } 439 }
433 } 440 }
434 441
435 dma_free_coherent(trans->dev, PAGE_SIZE, v_addr, p_addr); 442 dma_free_coherent(trans->dev, chunk_sz, v_addr, p_addr);
436 return ret; 443 return ret;
437} 444}
438 445