aboutsummaryrefslogtreecommitdiffstats
path: root/mm/compaction.c
diff options
context:
space:
mode:
authorRik van Riel <riel@redhat.com>2012-03-21 19:33:52 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-21 20:54:56 -0400
commit7be62de99adcab4449d416977b4274985c5fe023 (patch)
tree20ae021ec9811ad730e6a17a3530d3aa6b5027d0 /mm/compaction.c
parentfe2c2a106663130a5ab45cb0e3414b52df2fff0c (diff)
vmscan: kswapd carefully call compaction
With CONFIG_COMPACTION enabled, kswapd does not try to free contiguous free pages, even when it is woken for a higher order request. This could be bad for eg. jumbo frame network allocations, which are done from interrupt context and cannot compact memory themselves. Higher than before allocation failure rates in the network receive path have been observed in kernels with compaction enabled. Teach kswapd to defragment the memory zones in a node, but only if required and compaction is not deferred in a zone. [akpm@linux-foundation.org: reduce scope of zones_need_compaction] Signed-off-by: Rik van Riel <riel@redhat.com> Acked-by: Mel Gorman <mel@csn.ul.ie> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Minchan Kim <minchan.kim@gmail.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Hillf Danton <dhillf@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/compaction.c')
-rw-r--r--mm/compaction.c53
1 files changed, 35 insertions, 18 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index d9ebebe1a2aa..36f0f61f4a24 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -675,44 +675,61 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
675 675
676 676
677/* Compact all zones within a node */ 677/* Compact all zones within a node */
678static int compact_node(int nid) 678static int __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)
679{ 679{
680 int zoneid; 680 int zoneid;
681 pg_data_t *pgdat;
682 struct zone *zone; 681 struct zone *zone;
683 682
684 if (nid < 0 || nid >= nr_node_ids || !node_online(nid))
685 return -EINVAL;
686 pgdat = NODE_DATA(nid);
687
688 /* Flush pending updates to the LRU lists */ 683 /* Flush pending updates to the LRU lists */
689 lru_add_drain_all(); 684 lru_add_drain_all();
690 685
691 for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { 686 for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
692 struct compact_control cc = {
693 .nr_freepages = 0,
694 .nr_migratepages = 0,
695 .order = -1,
696 .sync = true,
697 };
698 687
699 zone = &pgdat->node_zones[zoneid]; 688 zone = &pgdat->node_zones[zoneid];
700 if (!populated_zone(zone)) 689 if (!populated_zone(zone))
701 continue; 690 continue;
702 691
703 cc.zone = zone; 692 cc->nr_freepages = 0;
704 INIT_LIST_HEAD(&cc.freepages); 693 cc->nr_migratepages = 0;
705 INIT_LIST_HEAD(&cc.migratepages); 694 cc->zone = zone;
695 INIT_LIST_HEAD(&cc->freepages);
696 INIT_LIST_HEAD(&cc->migratepages);
706 697
707 compact_zone(zone, &cc); 698 if (cc->order < 0 || !compaction_deferred(zone))
699 compact_zone(zone, cc);
708 700
709 VM_BUG_ON(!list_empty(&cc.freepages)); 701 VM_BUG_ON(!list_empty(&cc->freepages));
710 VM_BUG_ON(!list_empty(&cc.migratepages)); 702 VM_BUG_ON(!list_empty(&cc->migratepages));
711 } 703 }
712 704
713 return 0; 705 return 0;
714} 706}
715 707
708int compact_pgdat(pg_data_t *pgdat, int order)
709{
710 struct compact_control cc = {
711 .order = order,
712 .sync = false,
713 };
714
715 return __compact_pgdat(pgdat, &cc);
716}
717
718static int compact_node(int nid)
719{
720 pg_data_t *pgdat;
721 struct compact_control cc = {
722 .order = -1,
723 .sync = true,
724 };
725
726 if (nid < 0 || nid >= nr_node_ids || !node_online(nid))
727 return -EINVAL;
728 pgdat = NODE_DATA(nid);
729
730 return __compact_pgdat(pgdat, &cc);
731}
732
716/* Compact all nodes in the system */ 733/* Compact all nodes in the system */
717static int compact_nodes(void) 734static int compact_nodes(void)
718{ 735{