diff options
author | Christoph Lameter <cl@linux-foundation.org> | 2010-07-09 15:07:10 -0400 |
---|---|---|
committer | Pekka Enberg <penberg@cs.helsinki.fi> | 2010-07-16 04:13:06 -0400 |
commit | 2154a336381f85f5390d9a84c6cf4a7d2847b6ed (patch) | |
tree | 6b4f2136878a578e8675f042bd4a361a8ca253df /mm/slub.c | |
parent | 1c5474a65bf15a4cb162dfff86d6d0b5a08a740c (diff) |
slub: Use a constant for a unspecified node.
kmalloc_node() and friends can be passed a constant -1 to indicate
that no choice was made for the node from which the object needs to
come.
Use NUMA_NO_NODE instead of -1.
CC: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: David Rientjes <rientjes@google.com>
Signed-off-by: Christoph Lameter <cl@linux-foundation.org>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 14 |
1 files changed, 7 insertions, 7 deletions
@@ -1073,7 +1073,7 @@ static inline struct page *alloc_slab_page(gfp_t flags, int node, | |||
1073 | 1073 | ||
1074 | flags |= __GFP_NOTRACK; | 1074 | flags |= __GFP_NOTRACK; |
1075 | 1075 | ||
1076 | if (node == -1) | 1076 | if (node == NUMA_NO_NODE) |
1077 | return alloc_pages(flags, order); | 1077 | return alloc_pages(flags, order); |
1078 | else | 1078 | else |
1079 | return alloc_pages_exact_node(node, flags, order); | 1079 | return alloc_pages_exact_node(node, flags, order); |
@@ -1387,7 +1387,7 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags) | |||
1387 | static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node) | 1387 | static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node) |
1388 | { | 1388 | { |
1389 | struct page *page; | 1389 | struct page *page; |
1390 | int searchnode = (node == -1) ? numa_node_id() : node; | 1390 | int searchnode = (node == NUMA_NO_NODE) ? numa_node_id() : node; |
1391 | 1391 | ||
1392 | page = get_partial_node(get_node(s, searchnode)); | 1392 | page = get_partial_node(get_node(s, searchnode)); |
1393 | if (page || (flags & __GFP_THISNODE)) | 1393 | if (page || (flags & __GFP_THISNODE)) |
@@ -1515,7 +1515,7 @@ static void flush_all(struct kmem_cache *s) | |||
1515 | static inline int node_match(struct kmem_cache_cpu *c, int node) | 1515 | static inline int node_match(struct kmem_cache_cpu *c, int node) |
1516 | { | 1516 | { |
1517 | #ifdef CONFIG_NUMA | 1517 | #ifdef CONFIG_NUMA |
1518 | if (node != -1 && c->node != node) | 1518 | if (node != NUMA_NO_NODE && c->node != node) |
1519 | return 0; | 1519 | return 0; |
1520 | #endif | 1520 | #endif |
1521 | return 1; | 1521 | return 1; |
@@ -1727,7 +1727,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s, | |||
1727 | 1727 | ||
1728 | void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) | 1728 | void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) |
1729 | { | 1729 | { |
1730 | void *ret = slab_alloc(s, gfpflags, -1, _RET_IP_); | 1730 | void *ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_); |
1731 | 1731 | ||
1732 | trace_kmem_cache_alloc(_RET_IP_, ret, s->objsize, s->size, gfpflags); | 1732 | trace_kmem_cache_alloc(_RET_IP_, ret, s->objsize, s->size, gfpflags); |
1733 | 1733 | ||
@@ -1738,7 +1738,7 @@ EXPORT_SYMBOL(kmem_cache_alloc); | |||
1738 | #ifdef CONFIG_TRACING | 1738 | #ifdef CONFIG_TRACING |
1739 | void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags) | 1739 | void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags) |
1740 | { | 1740 | { |
1741 | return slab_alloc(s, gfpflags, -1, _RET_IP_); | 1741 | return slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_); |
1742 | } | 1742 | } |
1743 | EXPORT_SYMBOL(kmem_cache_alloc_notrace); | 1743 | EXPORT_SYMBOL(kmem_cache_alloc_notrace); |
1744 | #endif | 1744 | #endif |
@@ -2728,7 +2728,7 @@ void *__kmalloc(size_t size, gfp_t flags) | |||
2728 | if (unlikely(ZERO_OR_NULL_PTR(s))) | 2728 | if (unlikely(ZERO_OR_NULL_PTR(s))) |
2729 | return s; | 2729 | return s; |
2730 | 2730 | ||
2731 | ret = slab_alloc(s, flags, -1, _RET_IP_); | 2731 | ret = slab_alloc(s, flags, NUMA_NO_NODE, _RET_IP_); |
2732 | 2732 | ||
2733 | trace_kmalloc(_RET_IP_, ret, size, s->size, flags); | 2733 | trace_kmalloc(_RET_IP_, ret, size, s->size, flags); |
2734 | 2734 | ||
@@ -3312,7 +3312,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) | |||
3312 | if (unlikely(ZERO_OR_NULL_PTR(s))) | 3312 | if (unlikely(ZERO_OR_NULL_PTR(s))) |
3313 | return s; | 3313 | return s; |
3314 | 3314 | ||
3315 | ret = slab_alloc(s, gfpflags, -1, caller); | 3315 | ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, caller); |
3316 | 3316 | ||
3317 | /* Honor the call site pointer we recieved. */ | 3317 | /* Honor the call site pointer we recieved. */ |
3318 | trace_kmalloc(caller, ret, size, s->size, gfpflags); | 3318 | trace_kmalloc(caller, ret, size, s->size, gfpflags); |