aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/dma-mapping.c
diff options
context:
space:
mode:
authorTomasz Figa <tfiga@chromium.org>2015-04-01 02:26:33 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2015-04-02 04:58:25 -0400
commit49f28aa6b0d0735dbe5f04263c49a199ed0c5bb7 (patch)
treea1ff240e7ac0d1b3373a81ca3820d7b7a7b198fe /arch/arm/mm/dma-mapping.c
parent8defb3367fcd19d1af64c07792aade0747b54e0f (diff)
ARM: 8337/1: mm: Do not invoke OOM for higher order IOMMU DMA allocations
IOMMU should be able to use single pages as well as bigger blocks, so if higher order allocations fail, we should not affect state of the system, with events such as OOM killer, but rather fall back to order 0 allocations. This patch changes the behavior of ARM IOMMU DMA allocator to use __GFP_NORETRY, which bypasses OOM invocation, for orders higher than zero and, only if that fails, fall back to normal order 0 allocation which might invoke OOM killer. Signed-off-by: Tomasz Figa <tfiga@chromium.org> Reviewed-by: Doug Anderson <dianders@chromium.org> Acked-by: David Rientjes <rientjes@google.com> Acked-by: Marek Szyprowski <m.szyprowski@samsung.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/mm/dma-mapping.c')
-rw-r--r--arch/arm/mm/dma-mapping.c27
1 files changed, 21 insertions, 6 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index c27447653903..f9941cd689e9 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1135,13 +1135,28 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
1135 gfp |= __GFP_NOWARN | __GFP_HIGHMEM; 1135 gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
1136 1136
1137 while (count) { 1137 while (count) {
1138 int j, order = __fls(count); 1138 int j, order;
1139
1140 for (order = __fls(count); order > 0; --order) {
1141 /*
1142 * We do not want OOM killer to be invoked as long
1143 * as we can fall back to single pages, so we force
1144 * __GFP_NORETRY for orders higher than zero.
1145 */
1146 pages[i] = alloc_pages(gfp | __GFP_NORETRY, order);
1147 if (pages[i])
1148 break;
1149 }
1139 1150
1140 pages[i] = alloc_pages(gfp, order); 1151 if (!pages[i]) {
1141 while (!pages[i] && order) 1152 /*
1142 pages[i] = alloc_pages(gfp, --order); 1153 * Fall back to single page allocation.
1143 if (!pages[i]) 1154 * Might invoke OOM killer as last resort.
1144 goto error; 1155 */
1156 pages[i] = alloc_pages(gfp, 0);
1157 if (!pages[i])
1158 goto error;
1159 }
1145 1160
1146 if (order) { 1161 if (order) {
1147 split_page(pages[i], order); 1162 split_page(pages[i], order);