aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-01-07 15:56:23 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2016-01-07 15:56:23 -0500
commit1d8b0e79083d229829a408cd016dc4a038f637dd (patch)
tree120e466200b31570e59357c0da9ebc1eb938fc5f
parent2626820d838f9e98f323bf47b4fb7722d1c52e53 (diff)
parent164afb1d85b872907cfac048b46c094db596d529 (diff)
Merge tag 'iommu-fixes-v4.4-rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu
Pull IOMMU fixes from Joerg Roedel: - Two build issues, one in the ipmmu-vmsa driver and one for the new generic dma-api implemention used on arm64 - A performance fix for said dma-api implemention - An issue caused by a wrong offset in map_sg in the same code as above * tag 'iommu-fixes-v4.4-rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: iommu/dma: Use correct offset in map_sg iommu/ipmmu-vmsa: Don't truncate ttbr if LPAE is not enabled iommu/dma: Avoid unlikely high-order allocations iommu/dma: Add some missing #includes
-rw-r--r--drivers/iommu/dma-iommu.c11
-rw-r--r--drivers/iommu/ipmmu-vmsa.c2
2 files changed, 9 insertions, 4 deletions
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 3a20db4f8604..72d6182666cb 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -21,10 +21,13 @@
21 21
22#include <linux/device.h> 22#include <linux/device.h>
23#include <linux/dma-iommu.h> 23#include <linux/dma-iommu.h>
24#include <linux/gfp.h>
24#include <linux/huge_mm.h> 25#include <linux/huge_mm.h>
25#include <linux/iommu.h> 26#include <linux/iommu.h>
26#include <linux/iova.h> 27#include <linux/iova.h>
27#include <linux/mm.h> 28#include <linux/mm.h>
29#include <linux/scatterlist.h>
30#include <linux/vmalloc.h>
28 31
29int iommu_dma_init(void) 32int iommu_dma_init(void)
30{ 33{
@@ -191,6 +194,7 @@ static struct page **__iommu_dma_alloc_pages(unsigned int count, gfp_t gfp)
191{ 194{
192 struct page **pages; 195 struct page **pages;
193 unsigned int i = 0, array_size = count * sizeof(*pages); 196 unsigned int i = 0, array_size = count * sizeof(*pages);
197 unsigned int order = MAX_ORDER;
194 198
195 if (array_size <= PAGE_SIZE) 199 if (array_size <= PAGE_SIZE)
196 pages = kzalloc(array_size, GFP_KERNEL); 200 pages = kzalloc(array_size, GFP_KERNEL);
@@ -204,14 +208,15 @@ static struct page **__iommu_dma_alloc_pages(unsigned int count, gfp_t gfp)
204 208
205 while (count) { 209 while (count) {
206 struct page *page = NULL; 210 struct page *page = NULL;
207 int j, order = __fls(count); 211 int j;
208 212
209 /* 213 /*
210 * Higher-order allocations are a convenience rather 214 * Higher-order allocations are a convenience rather
211 * than a necessity, hence using __GFP_NORETRY until 215 * than a necessity, hence using __GFP_NORETRY until
212 * falling back to single-page allocations. 216 * falling back to single-page allocations.
213 */ 217 */
214 for (order = min(order, MAX_ORDER); order > 0; order--) { 218 for (order = min_t(unsigned int, order, __fls(count));
219 order > 0; order--) {
215 page = alloc_pages(gfp | __GFP_NORETRY, order); 220 page = alloc_pages(gfp | __GFP_NORETRY, order);
216 if (!page) 221 if (!page)
217 continue; 222 continue;
@@ -453,7 +458,7 @@ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
453 size_t s_offset = iova_offset(iovad, s->offset); 458 size_t s_offset = iova_offset(iovad, s->offset);
454 size_t s_length = s->length; 459 size_t s_length = s->length;
455 460
456 sg_dma_address(s) = s->offset; 461 sg_dma_address(s) = s_offset;
457 sg_dma_len(s) = s_length; 462 sg_dma_len(s) = s_length;
458 s->offset -= s_offset; 463 s->offset -= s_offset;
459 s_length = iova_align(iovad, s_length + s_offset); 464 s_length = iova_align(iovad, s_length + s_offset);
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index 8cf605fa9946..dfb868e2d129 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -295,7 +295,7 @@ static struct iommu_gather_ops ipmmu_gather_ops = {
295 295
296static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain) 296static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
297{ 297{
298 phys_addr_t ttbr; 298 u64 ttbr;
299 299
300 /* 300 /*
301 * Allocate the page table operations. 301 * Allocate the page table operations.