aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/memory.c11
1 files changed, 0 insertions, 11 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 9b8a01d941cb..a58bbebb3070 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1290,13 +1290,6 @@ static unsigned long unmap_page_range(struct mmu_gather *tlb,
1290 return addr; 1290 return addr;
1291} 1291}
1292 1292
1293#ifdef CONFIG_PREEMPT
1294# define ZAP_BLOCK_SIZE (8 * PAGE_SIZE)
1295#else
1296/* No preempt: go for improved straight-line efficiency */
1297# define ZAP_BLOCK_SIZE (1024 * PAGE_SIZE)
1298#endif
1299
1300/** 1293/**
1301 * unmap_vmas - unmap a range of memory covered by a list of vma's 1294 * unmap_vmas - unmap a range of memory covered by a list of vma's
1302 * @tlb: address of the caller's struct mmu_gather 1295 * @tlb: address of the caller's struct mmu_gather
@@ -1310,10 +1303,6 @@ static unsigned long unmap_page_range(struct mmu_gather *tlb,
1310 * 1303 *
1311 * Unmap all pages in the vma list. 1304 * Unmap all pages in the vma list.
1312 * 1305 *
1313 * We aim to not hold locks for too long (for scheduling latency reasons).
1314 * So zap pages in ZAP_BLOCK_SIZE bytecounts. This means we need to
1315 * return the ending mmu_gather to the caller.
1316 *
1317 * Only addresses between `start' and `end' will be unmapped. 1306 * Only addresses between `start' and `end' will be unmapped.
1318 * 1307 *
1319 * The VMA list must be sorted in ascending virtual address order. 1308 * The VMA list must be sorted in ascending virtual address order.