diff options
author | Christoph Lameter <clameter@sgi.com> | 2007-07-17 07:03:20 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-07-17 13:23:01 -0400 |
commit | 68dff6a9af9f27df5aeee6d0339818b0e36c1b51 (patch) | |
tree | f25cc5e85925a8901e301e8f8d5d04188f27c0ee /mm | |
parent | 5b95a4acf157eee552e013795b54eaa2ab1ee4a1 (diff) |
SLUB slab validation: Move tracking information alloc outside of lock
We currently have to do an GFP_ATOMIC allocation because the list_lock is
already taken when we first allocate memory for tracking allocation
information. It would be better if we could avoid atomic allocations.
Allocate a size of the tracking table that is usually sufficient (one page)
before we take the list lock. We will then only do the atomic allocation
if we need to resize the table to become larger than a page (mostly only
needed under large NUMA because of the tracking of cpus and nodes otherwise
the table stays small).
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/slub.c | 17 |
1 files changed, 7 insertions, 10 deletions
@@ -2902,18 +2902,14 @@ static void free_loc_track(struct loc_track *t) | |||
2902 | get_order(sizeof(struct location) * t->max)); | 2902 | get_order(sizeof(struct location) * t->max)); |
2903 | } | 2903 | } |
2904 | 2904 | ||
2905 | static int alloc_loc_track(struct loc_track *t, unsigned long max) | 2905 | static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags) |
2906 | { | 2906 | { |
2907 | struct location *l; | 2907 | struct location *l; |
2908 | int order; | 2908 | int order; |
2909 | 2909 | ||
2910 | if (!max) | ||
2911 | max = PAGE_SIZE / sizeof(struct location); | ||
2912 | |||
2913 | order = get_order(sizeof(struct location) * max); | 2910 | order = get_order(sizeof(struct location) * max); |
2914 | 2911 | ||
2915 | l = (void *)__get_free_pages(GFP_ATOMIC, order); | 2912 | l = (void *)__get_free_pages(flags, order); |
2916 | |||
2917 | if (!l) | 2913 | if (!l) |
2918 | return 0; | 2914 | return 0; |
2919 | 2915 | ||
@@ -2979,7 +2975,7 @@ static int add_location(struct loc_track *t, struct kmem_cache *s, | |||
2979 | /* | 2975 | /* |
2980 | * Not found. Insert new tracking element. | 2976 | * Not found. Insert new tracking element. |
2981 | */ | 2977 | */ |
2982 | if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max)) | 2978 | if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC)) |
2983 | return 0; | 2979 | return 0; |
2984 | 2980 | ||
2985 | l = t->loc + pos; | 2981 | l = t->loc + pos; |
@@ -3022,11 +3018,12 @@ static int list_locations(struct kmem_cache *s, char *buf, | |||
3022 | { | 3018 | { |
3023 | int n = 0; | 3019 | int n = 0; |
3024 | unsigned long i; | 3020 | unsigned long i; |
3025 | struct loc_track t; | 3021 | struct loc_track t = { 0, 0, NULL }; |
3026 | int node; | 3022 | int node; |
3027 | 3023 | ||
3028 | t.count = 0; | 3024 | if (!alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location), |
3029 | t.max = 0; | 3025 | GFP_KERNEL)) |
3026 | return sprintf(buf, "Out of memory\n"); | ||
3030 | 3027 | ||
3031 | /* Push back cpu slabs */ | 3028 | /* Push back cpu slabs */ |
3032 | flush_all(s); | 3029 | flush_all(s); |