aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2008-01-08 02:20:26 -0500
committerChristoph Lameter <clameter@sgi.com>2008-02-04 13:56:02 -0500
commit9824601ead957a29e35d539e43266c003f7b085b (patch)
tree13df23987102e39fce77d64f60e499401444a905 /mm
parentf61396aed90acb033952531c522d1010f87e24f4 (diff)
SLUB: rename defrag to remote_node_defrag_ratio
The NUMA defrag works by allocating objects from partial slabs on remote nodes. Rename it to remote_node_defrag_ratio to be clear about this. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/slub.c17
1 files changed, 9 insertions, 8 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 9aa12b54ad1b..5146e2779c11 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1295,7 +1295,8 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
1295 * expensive if we do it every time we are trying to find a slab 1295 * expensive if we do it every time we are trying to find a slab
1296 * with available objects. 1296 * with available objects.
1297 */ 1297 */
1298 if (!s->defrag_ratio || get_cycles() % 1024 > s->defrag_ratio) 1298 if (!s->remote_node_defrag_ratio ||
1299 get_cycles() % 1024 > s->remote_node_defrag_ratio)
1299 return NULL; 1300 return NULL;
1300 1301
1301 zonelist = &NODE_DATA(slab_node(current->mempolicy)) 1302 zonelist = &NODE_DATA(slab_node(current->mempolicy))
@@ -2209,7 +2210,7 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
2209 2210
2210 s->refcount = 1; 2211 s->refcount = 1;
2211#ifdef CONFIG_NUMA 2212#ifdef CONFIG_NUMA
2212 s->defrag_ratio = 100; 2213 s->remote_node_defrag_ratio = 100;
2213#endif 2214#endif
2214 if (!init_kmem_cache_nodes(s, gfpflags & ~SLUB_DMA)) 2215 if (!init_kmem_cache_nodes(s, gfpflags & ~SLUB_DMA))
2215 goto error; 2216 goto error;
@@ -3847,21 +3848,21 @@ static ssize_t free_calls_show(struct kmem_cache *s, char *buf)
3847SLAB_ATTR_RO(free_calls); 3848SLAB_ATTR_RO(free_calls);
3848 3849
3849#ifdef CONFIG_NUMA 3850#ifdef CONFIG_NUMA
3850static ssize_t defrag_ratio_show(struct kmem_cache *s, char *buf) 3851static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf)
3851{ 3852{
3852 return sprintf(buf, "%d\n", s->defrag_ratio / 10); 3853 return sprintf(buf, "%d\n", s->remote_node_defrag_ratio / 10);
3853} 3854}
3854 3855
3855static ssize_t defrag_ratio_store(struct kmem_cache *s, 3856static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s,
3856 const char *buf, size_t length) 3857 const char *buf, size_t length)
3857{ 3858{
3858 int n = simple_strtoul(buf, NULL, 10); 3859 int n = simple_strtoul(buf, NULL, 10);
3859 3860
3860 if (n < 100) 3861 if (n < 100)
3861 s->defrag_ratio = n * 10; 3862 s->remote_node_defrag_ratio = n * 10;
3862 return length; 3863 return length;
3863} 3864}
3864SLAB_ATTR(defrag_ratio); 3865SLAB_ATTR(remote_node_defrag_ratio);
3865#endif 3866#endif
3866 3867
3867static struct attribute * slab_attrs[] = { 3868static struct attribute * slab_attrs[] = {
@@ -3892,7 +3893,7 @@ static struct attribute * slab_attrs[] = {
3892 &cache_dma_attr.attr, 3893 &cache_dma_attr.attr,
3893#endif 3894#endif
3894#ifdef CONFIG_NUMA 3895#ifdef CONFIG_NUMA
3895 &defrag_ratio_attr.attr, 3896 &remote_node_defrag_ratio_attr.attr,
3896#endif 3897#endif
3897 NULL 3898 NULL
3898}; 3899};