diff options
author | KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> | 2009-11-17 02:21:09 -0500 |
---|---|---|
committer | David Woodhouse <David.Woodhouse@intel.com> | 2009-12-08 05:12:04 -0500 |
commit | 354bb65e6e0df0aaae0e5b1ea33948d8e0b61418 (patch) | |
tree | afcb7dca4c8362442ec50ce799290ae1211bde19 /drivers/pci | |
parent | 1672af1164d3d50ba8908014fd34cc0b58afdc1e (diff) |
Revert "Intel IOMMU: Avoid memory allocation failures in dma map api calls"
commit eb3fa7cb51 said Intel IOMMU
Intel IOMMU driver needs memory during DMA map calls to setup its
internal page tables and for other data structures. As we all know
that these DMA map calls are mostly called in the interrupt context
or with the spinlock held by the upper level drivers(network/storage
drivers), so in order to avoid any memory allocation failure due to
low memory issues, this patch makes memory allocation by temporarily
setting PF_MEMALLOC flags for the current task before making memory
allocation calls.
We evaluated mempools as a backup when kmem_cache_alloc() fails
and found that mempools are really not useful here because
1) We don't know for sure how much to reserve in advance
2) And mempools are not useful for GFP_ATOMIC case (as we call
memory alloc functions with GFP_ATOMIC)
(akpm: point 2 is wrong...)
The above description doesn't justify to waste system emergency memory
at all. Non MM subsystem must not use PF_MEMALLOC. Memory reclaim need
few memory, anyone must not prevent it. Otherwise the system cause
mysterious hang-up and/or OOM Killer invokation.
Plus, akpm already pointed out what we should do.
Then, this patch revert it.
Cc: Keshavamurthy Anil S <anil.s.keshavamurthy@intel.com>
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'drivers/pci')
-rw-r--r-- | drivers/pci/intel-iommu.c | 25 |
1 files changed, 3 insertions, 22 deletions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index 466079535330..4e1dd40f18e3 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c | |||
@@ -387,33 +387,14 @@ static struct kmem_cache *iommu_domain_cache; | |||
387 | static struct kmem_cache *iommu_devinfo_cache; | 387 | static struct kmem_cache *iommu_devinfo_cache; |
388 | static struct kmem_cache *iommu_iova_cache; | 388 | static struct kmem_cache *iommu_iova_cache; |
389 | 389 | ||
390 | static inline void *iommu_kmem_cache_alloc(struct kmem_cache *cachep) | ||
391 | { | ||
392 | unsigned int flags; | ||
393 | void *vaddr; | ||
394 | |||
395 | /* trying to avoid low memory issues */ | ||
396 | flags = current->flags & PF_MEMALLOC; | ||
397 | current->flags |= PF_MEMALLOC; | ||
398 | vaddr = kmem_cache_alloc(cachep, GFP_ATOMIC); | ||
399 | current->flags &= (~PF_MEMALLOC | flags); | ||
400 | return vaddr; | ||
401 | } | ||
402 | |||
403 | |||
404 | static inline void *alloc_pgtable_page(int node) | 390 | static inline void *alloc_pgtable_page(int node) |
405 | { | 391 | { |
406 | unsigned int flags; | ||
407 | struct page *page; | 392 | struct page *page; |
408 | void *vaddr = NULL; | 393 | void *vaddr = NULL; |
409 | 394 | ||
410 | /* trying to avoid low memory issues */ | ||
411 | flags = current->flags & PF_MEMALLOC; | ||
412 | current->flags |= PF_MEMALLOC; | ||
413 | page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0); | 395 | page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0); |
414 | if (page) | 396 | if (page) |
415 | vaddr = page_address(page); | 397 | vaddr = page_address(page); |
416 | current->flags &= (~PF_MEMALLOC | flags); | ||
417 | return vaddr; | 398 | return vaddr; |
418 | } | 399 | } |
419 | 400 | ||
@@ -424,7 +405,7 @@ static inline void free_pgtable_page(void *vaddr) | |||
424 | 405 | ||
425 | static inline void *alloc_domain_mem(void) | 406 | static inline void *alloc_domain_mem(void) |
426 | { | 407 | { |
427 | return iommu_kmem_cache_alloc(iommu_domain_cache); | 408 | return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC); |
428 | } | 409 | } |
429 | 410 | ||
430 | static void free_domain_mem(void *vaddr) | 411 | static void free_domain_mem(void *vaddr) |
@@ -434,7 +415,7 @@ static void free_domain_mem(void *vaddr) | |||
434 | 415 | ||
435 | static inline void * alloc_devinfo_mem(void) | 416 | static inline void * alloc_devinfo_mem(void) |
436 | { | 417 | { |
437 | return iommu_kmem_cache_alloc(iommu_devinfo_cache); | 418 | return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC); |
438 | } | 419 | } |
439 | 420 | ||
440 | static inline void free_devinfo_mem(void *vaddr) | 421 | static inline void free_devinfo_mem(void *vaddr) |
@@ -444,7 +425,7 @@ static inline void free_devinfo_mem(void *vaddr) | |||
444 | 425 | ||
445 | struct iova *alloc_iova_mem(void) | 426 | struct iova *alloc_iova_mem(void) |
446 | { | 427 | { |
447 | return iommu_kmem_cache_alloc(iommu_iova_cache); | 428 | return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC); |
448 | } | 429 | } |
449 | 430 | ||
450 | void free_iova_mem(struct iova *iova) | 431 | void free_iova_mem(struct iova *iova) |