aboutsummaryrefslogtreecommitdiffstats
path: root/mm/compaction.c
diff options
context:
space:
mode:
authorAndrea Arcangeli <aarcange@redhat.com>2011-03-22 19:33:10 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-03-22 20:44:05 -0400
commitb2eef8c0d09101bbbff2531c097543aedde0b525 (patch)
treeba0cbc88a8f66dc53c915509d90b3c4eeae8e7f1 /mm/compaction.c
parent602605a42ea4c299aeed4d806c49fb9dd18cd204 (diff)
mm: compaction: minimise the time IRQs are disabled while isolating pages for migration
compaction_alloc() isolates pages for migration in isolate_migratepages. While it's scanning, IRQs are disabled on the mistaken assumption the scanning should be short. Tests show this to be true for the most part but contention times on the LRU lock can be increased. Before this patch, the IRQ disabled times for a simple test looked like Total sampled time IRQs off (not real total time): 5493 Event shrink_inactive_list..shrink_zone 1596 us count 1 Event shrink_inactive_list..shrink_zone 1530 us count 1 Event shrink_inactive_list..shrink_zone 956 us count 1 Event shrink_inactive_list..shrink_zone 541 us count 1 Event shrink_inactive_list..shrink_zone 531 us count 1 Event split_huge_page..add_to_swap 232 us count 1 Event save_args..call_softirq 36 us count 1 Event save_args..call_softirq 35 us count 2 Event __wake_up..__wake_up 1 us count 1 This patch reduces the worst-case IRQs-disabled latencies by releasing the lock every SWAP_CLUSTER_MAX pages that are scanned and releasing the CPU if necessary. The cost of this is that the processing performing compaction will be slower but IRQs being disabled for too long a time has worse consequences as the following report shows; Total sampled time IRQs off (not real total time): 4367 Event shrink_inactive_list..shrink_zone 881 us count 1 Event shrink_inactive_list..shrink_zone 875 us count 1 Event shrink_inactive_list..shrink_zone 868 us count 1 Event shrink_inactive_list..shrink_zone 555 us count 1 Event split_huge_page..add_to_swap 495 us count 1 Event compact_zone..compact_zone_order 269 us count 1 Event split_huge_page..add_to_swap 266 us count 1 Event shrink_inactive_list..shrink_zone 85 us count 1 Event save_args..call_softirq 36 us count 2 Event __wake_up..__wake_up 1 us count 1 [akpm@linux-foundation.org: simplify with s/unlocked/locked/] Signed-off-by: Andrea Arcangeli <aarcange@redhat.com> Signed-off-by: Mel Gorman <mel@csn.ul.ie> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Arthur Marsh <arthur.marsh@internode.on.net> Cc: Clemens Ladisch <cladisch@googlemail.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Minchan Kim <minchan.kim@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/compaction.c')
-rw-r--r--mm/compaction.c18
1 files changed, 18 insertions, 0 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index b27802e04b91..021a2960ef9e 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -277,9 +277,27 @@ static unsigned long isolate_migratepages(struct zone *zone,
277 } 277 }
278 278
279 /* Time to isolate some pages for migration */ 279 /* Time to isolate some pages for migration */
280 cond_resched();
280 spin_lock_irq(&zone->lru_lock); 281 spin_lock_irq(&zone->lru_lock);
281 for (; low_pfn < end_pfn; low_pfn++) { 282 for (; low_pfn < end_pfn; low_pfn++) {
282 struct page *page; 283 struct page *page;
284 bool locked = true;
285
286 /* give a chance to irqs before checking need_resched() */
287 if (!((low_pfn+1) % SWAP_CLUSTER_MAX)) {
288 spin_unlock_irq(&zone->lru_lock);
289 locked = false;
290 }
291 if (need_resched() || spin_is_contended(&zone->lru_lock)) {
292 if (locked)
293 spin_unlock_irq(&zone->lru_lock);
294 cond_resched();
295 spin_lock_irq(&zone->lru_lock);
296 if (fatal_signal_pending(current))
297 break;
298 } else if (!locked)
299 spin_lock_irq(&zone->lru_lock);
300
283 if (!pfn_valid_within(low_pfn)) 301 if (!pfn_valid_within(low_pfn))
284 continue; 302 continue;
285 nr_scanned++; 303 nr_scanned++;