diff options
author | Andrew Morton <akpm@linux-foundation.org> | 2011-07-25 20:12:16 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-07-25 23:57:09 -0400 |
commit | 6ac47520063b230641a64062b8a229201cd0a3a8 (patch) | |
tree | e26a25226f980a50468f001bbd3243d74d0d9768 /mm/memory.c | |
parent | 32f84528fbb5177275193a3311be8756f0cbd62c (diff) |
mm/memory.c: remove ZAP_BLOCK_SIZE
ZAP_BLOCK_SIZE became unused in the preemptible-mmu_gather work ("mm:
Remove i_mmap_lock lockbreak"). So zap it.
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r-- | mm/memory.c | 11 |
1 files changed, 0 insertions, 11 deletions
diff --git a/mm/memory.c b/mm/memory.c index 9b8a01d941cb..a58bbebb3070 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -1290,13 +1290,6 @@ static unsigned long unmap_page_range(struct mmu_gather *tlb, | |||
1290 | return addr; | 1290 | return addr; |
1291 | } | 1291 | } |
1292 | 1292 | ||
1293 | #ifdef CONFIG_PREEMPT | ||
1294 | # define ZAP_BLOCK_SIZE (8 * PAGE_SIZE) | ||
1295 | #else | ||
1296 | /* No preempt: go for improved straight-line efficiency */ | ||
1297 | # define ZAP_BLOCK_SIZE (1024 * PAGE_SIZE) | ||
1298 | #endif | ||
1299 | |||
1300 | /** | 1293 | /** |
1301 | * unmap_vmas - unmap a range of memory covered by a list of vma's | 1294 | * unmap_vmas - unmap a range of memory covered by a list of vma's |
1302 | * @tlb: address of the caller's struct mmu_gather | 1295 | * @tlb: address of the caller's struct mmu_gather |
@@ -1310,10 +1303,6 @@ static unsigned long unmap_page_range(struct mmu_gather *tlb, | |||
1310 | * | 1303 | * |
1311 | * Unmap all pages in the vma list. | 1304 | * Unmap all pages in the vma list. |
1312 | * | 1305 | * |
1313 | * We aim to not hold locks for too long (for scheduling latency reasons). | ||
1314 | * So zap pages in ZAP_BLOCK_SIZE bytecounts. This means we need to | ||
1315 | * return the ending mmu_gather to the caller. | ||
1316 | * | ||
1317 | * Only addresses between `start' and `end' will be unmapped. | 1306 | * Only addresses between `start' and `end' will be unmapped. |
1318 | * | 1307 | * |
1319 | * The VMA list must be sorted in ascending virtual address order. | 1308 | * The VMA list must be sorted in ascending virtual address order. |