summaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorMichal Hocko <mhocko@suse.com>2017-01-10 19:57:39 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2017-01-10 21:31:54 -0500
commit2df26639e708a88dcc22171949da638a9998f3bc (patch)
tree43abfc9c954130a1116fa13b200fad312f6d7cef /mm/page_alloc.c
parentf931ab479dd24cf7a2c6e2df19778406892591fb (diff)
mm: fix remote numa hits statistics
Jia He has noticed that commit b9f00e147f27 ("mm, page_alloc: reduce branches in zone_statistics") has an unintentional side effect that remote node allocation requests are accounted as NUMA_MISS rathat than NUMA_HIT and NUMA_OTHER if such a request doesn't use __GFP_OTHER_NODE. There are many of these potentially because the flag is used very rarely while we have many users of __alloc_pages_node. Fix this by simply ignoring __GFP_OTHER_NODE (it can be removed in a follow up patch) and treat all allocations that were satisfied from the preferred zone's node as NUMA_HITS because this is the same node we requested the allocation from in most cases. If this is not the local node then we just account it as NUMA_OTHER rather than NUMA_LOCAL. One downsize would be that an allocation request for a node which is outside of the mempolicy nodemask would be reported as a hit which is a bit weird but that was the case before b9f00e147f27 already. Fixes: b9f00e147f27 ("mm, page_alloc: reduce branches in zone_statistics") Link: http://lkml.kernel.org/r/20170102153057.9451-2-mhocko@kernel.org Signed-off-by: Michal Hocko <mhocko@suse.com> Reported-by: Jia He <hejianet@gmail.com> Reviewed-by: Vlastimil Babka <vbabka@suse.cz> # with cbmc[1] superpowers Acked-by: Mel Gorman <mgorman@suse.de> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Taku Izumi <izumi.taku@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c15
1 files changed, 4 insertions, 11 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 2c6d5f64feca..cba2a64792e6 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2583,30 +2583,23 @@ int __isolate_free_page(struct page *page, unsigned int order)
2583 * Update NUMA hit/miss statistics 2583 * Update NUMA hit/miss statistics
2584 * 2584 *
2585 * Must be called with interrupts disabled. 2585 * Must be called with interrupts disabled.
2586 *
2587 * When __GFP_OTHER_NODE is set assume the node of the preferred
2588 * zone is the local node. This is useful for daemons who allocate
2589 * memory on behalf of other processes.
2590 */ 2586 */
2591static inline void zone_statistics(struct zone *preferred_zone, struct zone *z, 2587static inline void zone_statistics(struct zone *preferred_zone, struct zone *z,
2592 gfp_t flags) 2588 gfp_t flags)
2593{ 2589{
2594#ifdef CONFIG_NUMA 2590#ifdef CONFIG_NUMA
2595 int local_nid = numa_node_id();
2596 enum zone_stat_item local_stat = NUMA_LOCAL; 2591 enum zone_stat_item local_stat = NUMA_LOCAL;
2597 2592
2598 if (unlikely(flags & __GFP_OTHER_NODE)) { 2593 if (z->node != numa_node_id())
2599 local_stat = NUMA_OTHER; 2594 local_stat = NUMA_OTHER;
2600 local_nid = preferred_zone->node;
2601 }
2602 2595
2603 if (z->node == local_nid) { 2596 if (z->node == preferred_zone->node)
2604 __inc_zone_state(z, NUMA_HIT); 2597 __inc_zone_state(z, NUMA_HIT);
2605 __inc_zone_state(z, local_stat); 2598 else {
2606 } else {
2607 __inc_zone_state(z, NUMA_MISS); 2599 __inc_zone_state(z, NUMA_MISS);
2608 __inc_zone_state(preferred_zone, NUMA_FOREIGN); 2600 __inc_zone_state(preferred_zone, NUMA_FOREIGN);
2609 } 2601 }
2602 __inc_zone_state(z, local_stat);
2610#endif 2603#endif
2611} 2604}
2612 2605