aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci/intel-iommu.c
diff options
context:
space:
mode:
authorKeshavamurthy, Anil S <anil.s.keshavamurthy@intel.com>2007-10-21 19:41:52 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-22 11:13:18 -0400
commiteb3fa7cb51a2a35cd95eb92e65d8039c779468a5 (patch)
tree929311227a650db28238888b21169dd4fa26f5f8 /drivers/pci/intel-iommu.c
parentba39592764ed20cee09aae5352e603a27bf56b0d (diff)
Intel IOMMU: Avoid memory allocation failures in dma map api calls
Intel IOMMU driver needs memory during DMA map calls to setup its internal page tables and for other data structures. As we all know that these DMA map calls are mostly called in the interrupt context or with the spinlock held by the upper level drivers(network/storage drivers), so in order to avoid any memory allocation failure due to low memory issues, this patch makes memory allocation by temporarily setting PF_MEMALLOC flags for the current task before making memory allocation calls. We evaluated mempools as a backup when kmem_cache_alloc() fails and found that mempools are really not useful here because 1) We don't know for sure how much to reserve in advance 2) And mempools are not useful for GFP_ATOMIC case (as we call memory alloc functions with GFP_ATOMIC) (akpm: point 2 is wrong...) With PF_MEMALLOC flag set in the current->flags, the VM subsystem avoids any watermark checks before allocating memory thus guarantee'ing the memory till the last free page. Further, looking at the code in mm/page_alloc.c in __alloc_pages() function, looks like this flag is useful only in the non-interrupt context. If we are in the interrupt context and memory allocation in IOMMU driver fails for some reason, then the DMA map api's will return failure and it is up to the higher level drivers to retry. Suppose, if upper level driver programs the controller with the buggy DMA virtual address, the IOMMU will block that DMA transaction when that happens thus preventing any corruption to main memory. So far in our test scenario, we were unable to create any memory allocation failure inside dma map api calls. Signed-off-by: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> Cc: Andi Kleen <ak@suse.de> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Muli Ben-Yehuda <muli@il.ibm.com> Cc: "Siddha, Suresh B" <suresh.b.siddha@intel.com> Cc: Arjan van de Ven <arjan@infradead.org> Cc: Ashok Raj <ashok.raj@intel.com> Cc: "David S. Miller" <davem@davemloft.net> Cc: Christoph Lameter <clameter@sgi.com> Cc: Greg KH <greg@kroah.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/pci/intel-iommu.c')
-rw-r--r--drivers/pci/intel-iommu.c30
1 files changed, 26 insertions, 4 deletions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 93ed771b3254..05630b44dbfc 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -85,9 +85,31 @@ static struct kmem_cache *iommu_domain_cache;
85static struct kmem_cache *iommu_devinfo_cache; 85static struct kmem_cache *iommu_devinfo_cache;
86static struct kmem_cache *iommu_iova_cache; 86static struct kmem_cache *iommu_iova_cache;
87 87
88static inline void *iommu_kmem_cache_alloc(struct kmem_cache *cachep)
89{
90 unsigned int flags;
91 void *vaddr;
92
93 /* trying to avoid low memory issues */
94 flags = current->flags & PF_MEMALLOC;
95 current->flags |= PF_MEMALLOC;
96 vaddr = kmem_cache_alloc(cachep, GFP_ATOMIC);
97 current->flags &= (~PF_MEMALLOC | flags);
98 return vaddr;
99}
100
101
88static inline void *alloc_pgtable_page(void) 102static inline void *alloc_pgtable_page(void)
89{ 103{
90 return (void *)get_zeroed_page(GFP_ATOMIC); 104 unsigned int flags;
105 void *vaddr;
106
107 /* trying to avoid low memory issues */
108 flags = current->flags & PF_MEMALLOC;
109 current->flags |= PF_MEMALLOC;
110 vaddr = (void *)get_zeroed_page(GFP_ATOMIC);
111 current->flags &= (~PF_MEMALLOC | flags);
112 return vaddr;
91} 113}
92 114
93static inline void free_pgtable_page(void *vaddr) 115static inline void free_pgtable_page(void *vaddr)
@@ -97,7 +119,7 @@ static inline void free_pgtable_page(void *vaddr)
97 119
98static inline void *alloc_domain_mem(void) 120static inline void *alloc_domain_mem(void)
99{ 121{
100 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC); 122 return iommu_kmem_cache_alloc(iommu_domain_cache);
101} 123}
102 124
103static inline void free_domain_mem(void *vaddr) 125static inline void free_domain_mem(void *vaddr)
@@ -107,7 +129,7 @@ static inline void free_domain_mem(void *vaddr)
107 129
108static inline void * alloc_devinfo_mem(void) 130static inline void * alloc_devinfo_mem(void)
109{ 131{
110 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC); 132 return iommu_kmem_cache_alloc(iommu_devinfo_cache);
111} 133}
112 134
113static inline void free_devinfo_mem(void *vaddr) 135static inline void free_devinfo_mem(void *vaddr)
@@ -117,7 +139,7 @@ static inline void free_devinfo_mem(void *vaddr)
117 139
118struct iova *alloc_iova_mem(void) 140struct iova *alloc_iova_mem(void)
119{ 141{
120 return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC); 142 return iommu_kmem_cache_alloc(iommu_iova_cache);
121} 143}
122 144
123void free_iova_mem(struct iova *iova) 145void free_iova_mem(struct iova *iova)