summaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorLucas Stach <l.stach@pengutronix.de>2017-12-14 18:32:55 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2017-12-14 19:00:48 -0500
commitc24ad77d962c31af92f2b731dad2104cbf3fbb03 (patch)
tree4a2121e76b9561c1cdd85d58c41192c570b284b0 /mm/page_alloc.c
parent183f24aa5b76e37da690b2def41cc70f0792ce09 (diff)
mm/page_alloc.c: avoid excessive IRQ disabled times in free_unref_page_list()
Since commit 9cca35d42eb6 ("mm, page_alloc: enable/disable IRQs once when freeing a list of pages") we see excessive IRQ disabled times of up to 25ms on an embedded ARM system (tracing overhead included). This is due to graphics buffers being freed back to the system via release_pages(). Graphics buffers can be huge, so it's not hard to hit cases where the list of pages to free has 2048 entries. Disabling IRQs while freeing all those pages is clearly not a good idea. Introduce a batch limit, which allows IRQ servicing once every few pages. The batch count is the same as used in other parts of the MM subsystem when dealing with IRQ disabled regions. Link: http://lkml.kernel.org/r/20171207170314.4419-1-l.stach@pengutronix.de Fixes: 9cca35d42eb6 ("mm, page_alloc: enable/disable IRQs once when freeing a list of pages") Signed-off-by: Lucas Stach <l.stach@pengutronix.de> Acked-by: Mel Gorman <mgorman@techsingularity.net> Cc: Michal Hocko <mhocko@suse.com> Cc: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c11
1 files changed, 11 insertions, 0 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 73f5d4556b3d..7e5e775e97f4 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2684,6 +2684,7 @@ void free_unref_page_list(struct list_head *list)
2684{ 2684{
2685 struct page *page, *next; 2685 struct page *page, *next;
2686 unsigned long flags, pfn; 2686 unsigned long flags, pfn;
2687 int batch_count = 0;
2687 2688
2688 /* Prepare pages for freeing */ 2689 /* Prepare pages for freeing */
2689 list_for_each_entry_safe(page, next, list, lru) { 2690 list_for_each_entry_safe(page, next, list, lru) {
@@ -2700,6 +2701,16 @@ void free_unref_page_list(struct list_head *list)
2700 set_page_private(page, 0); 2701 set_page_private(page, 0);
2701 trace_mm_page_free_batched(page); 2702 trace_mm_page_free_batched(page);
2702 free_unref_page_commit(page, pfn); 2703 free_unref_page_commit(page, pfn);
2704
2705 /*
2706 * Guard against excessive IRQ disabled times when we get
2707 * a large list of pages to free.
2708 */
2709 if (++batch_count == SWAP_CLUSTER_MAX) {
2710 local_irq_restore(flags);
2711 batch_count = 0;
2712 local_irq_save(flags);
2713 }
2703 } 2714 }
2704 local_irq_restore(flags); 2715 local_irq_restore(flags);
2705} 2716}