summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMel Gorman <mgorman@techsingularity.net>2016-05-19 20:13:27 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-05-19 22:12:14 -0400
commit060e74173f292fb3e0398b3dca8765568d195ff1 (patch)
tree2b7a8c91c724ee6e0c256cdc0e71b919fcc56712
parentb9f00e147f27d86691f7f52a3c8126d25432477c (diff)
mm, page_alloc: inline zone_statistics
zone_statistics has one call-site but it's a public function. Make it static and inline. The performance difference on a page allocator microbenchmark is; 4.6.0-rc2 4.6.0-rc2 statbranch-v1r20 statinline-v1r20 Min alloc-odr0-1 419.00 ( 0.00%) 412.00 ( 1.67%) Min alloc-odr0-2 305.00 ( 0.00%) 301.00 ( 1.31%) Min alloc-odr0-4 250.00 ( 0.00%) 247.00 ( 1.20%) Min alloc-odr0-8 219.00 ( 0.00%) 215.00 ( 1.83%) Min alloc-odr0-16 203.00 ( 0.00%) 199.00 ( 1.97%) Min alloc-odr0-32 195.00 ( 0.00%) 191.00 ( 2.05%) Min alloc-odr0-64 191.00 ( 0.00%) 187.00 ( 2.09%) Min alloc-odr0-128 189.00 ( 0.00%) 185.00 ( 2.12%) Min alloc-odr0-256 198.00 ( 0.00%) 193.00 ( 2.53%) Min alloc-odr0-512 210.00 ( 0.00%) 207.00 ( 1.43%) Min alloc-odr0-1024 216.00 ( 0.00%) 213.00 ( 1.39%) Min alloc-odr0-2048 221.00 ( 0.00%) 220.00 ( 0.45%) Min alloc-odr0-4096 227.00 ( 0.00%) 226.00 ( 0.44%) Min alloc-odr0-8192 232.00 ( 0.00%) 229.00 ( 1.29%) Min alloc-odr0-16384 232.00 ( 0.00%) 229.00 ( 1.29%) Signed-off-by: Mel Gorman <mgorman@techsingularity.net> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Jesper Dangaard Brouer <brouer@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/vmstat.h2
-rw-r--r--mm/page_alloc.c31
-rw-r--r--mm/vmstat.c29
3 files changed, 31 insertions, 31 deletions
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 02fce415b3d9..d2da8e053210 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -163,12 +163,10 @@ static inline unsigned long zone_page_state_snapshot(struct zone *zone,
163#ifdef CONFIG_NUMA 163#ifdef CONFIG_NUMA
164 164
165extern unsigned long node_page_state(int node, enum zone_stat_item item); 165extern unsigned long node_page_state(int node, enum zone_stat_item item);
166extern void zone_statistics(struct zone *, struct zone *, gfp_t gfp);
167 166
168#else 167#else
169 168
170#define node_page_state(node, item) global_page_state(item) 169#define node_page_state(node, item) global_page_state(item)
171#define zone_statistics(_zl, _z, gfp) do { } while (0)
172 170
173#endif /* CONFIG_NUMA */ 171#endif /* CONFIG_NUMA */
174 172
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 7be1ce8b6be0..36384baa74e1 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2355,6 +2355,37 @@ int split_free_page(struct page *page)
2355} 2355}
2356 2356
2357/* 2357/*
2358 * Update NUMA hit/miss statistics
2359 *
2360 * Must be called with interrupts disabled.
2361 *
2362 * When __GFP_OTHER_NODE is set assume the node of the preferred
2363 * zone is the local node. This is useful for daemons who allocate
2364 * memory on behalf of other processes.
2365 */
2366static inline void zone_statistics(struct zone *preferred_zone, struct zone *z,
2367 gfp_t flags)
2368{
2369#ifdef CONFIG_NUMA
2370 int local_nid = numa_node_id();
2371 enum zone_stat_item local_stat = NUMA_LOCAL;
2372
2373 if (unlikely(flags & __GFP_OTHER_NODE)) {
2374 local_stat = NUMA_OTHER;
2375 local_nid = preferred_zone->node;
2376 }
2377
2378 if (z->node == local_nid) {
2379 __inc_zone_state(z, NUMA_HIT);
2380 __inc_zone_state(z, local_stat);
2381 } else {
2382 __inc_zone_state(z, NUMA_MISS);
2383 __inc_zone_state(preferred_zone, NUMA_FOREIGN);
2384 }
2385#endif
2386}
2387
2388/*
2358 * Allocate a page from the given zone. Use pcplists for order-0 allocations. 2389 * Allocate a page from the given zone. Use pcplists for order-0 allocations.
2359 */ 2390 */
2360static inline 2391static inline
diff --git a/mm/vmstat.c b/mm/vmstat.c
index d585de27e960..f1a73bfb77b5 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -570,35 +570,6 @@ void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
570 570
571#ifdef CONFIG_NUMA 571#ifdef CONFIG_NUMA
572/* 572/*
573 * zonelist = the list of zones passed to the allocator
574 * z = the zone from which the allocation occurred.
575 *
576 * Must be called with interrupts disabled.
577 *
578 * When __GFP_OTHER_NODE is set assume the node of the preferred
579 * zone is the local node. This is useful for daemons who allocate
580 * memory on behalf of other processes.
581 */
582void zone_statistics(struct zone *preferred_zone, struct zone *z, gfp_t flags)
583{
584 int local_nid = numa_node_id();
585 enum zone_stat_item local_stat = NUMA_LOCAL;
586
587 if (unlikely(flags & __GFP_OTHER_NODE)) {
588 local_stat = NUMA_OTHER;
589 local_nid = preferred_zone->node;
590 }
591
592 if (z->node == local_nid) {
593 __inc_zone_state(z, NUMA_HIT);
594 __inc_zone_state(z, local_stat);
595 } else {
596 __inc_zone_state(z, NUMA_MISS);
597 __inc_zone_state(preferred_zone, NUMA_FOREIGN);
598 }
599}
600
601/*
602 * Determine the per node value of a stat item. 573 * Determine the per node value of a stat item.
603 */ 574 */
604unsigned long node_page_state(int node, enum zone_stat_item item) 575unsigned long node_page_state(int node, enum zone_stat_item item)