diff options
author | Christoph Lameter <clameter@sgi.com> | 2007-05-09 05:32:40 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-05-09 15:30:45 -0400 |
commit | 7656c72b5a631452ace361037ccf8384454d0f72 (patch) | |
tree | de14c2fe8b9145f8c618f2e11b6ebc2d2e047de0 /mm/slub.c | |
parent | 672bba3a4b2e65ed95ebd0cf764bd628bd1da74f (diff) |
SLUB: add macros for scanning objects in a slab
Scanning of objects happens in a number of functions. Consolidate that code.
DECLARE_BITMAP instead of coding the declaration for bitmaps.
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 75 |
1 files changed, 44 insertions, 31 deletions
@@ -209,6 +209,38 @@ static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) | |||
209 | } | 209 | } |
210 | 210 | ||
211 | /* | 211 | /* |
212 | * Slow version of get and set free pointer. | ||
213 | * | ||
214 | * This version requires touching the cache lines of kmem_cache which | ||
215 | * we avoid to do in the fast alloc free paths. There we obtain the offset | ||
216 | * from the page struct. | ||
217 | */ | ||
218 | static inline void *get_freepointer(struct kmem_cache *s, void *object) | ||
219 | { | ||
220 | return *(void **)(object + s->offset); | ||
221 | } | ||
222 | |||
223 | static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp) | ||
224 | { | ||
225 | *(void **)(object + s->offset) = fp; | ||
226 | } | ||
227 | |||
228 | /* Loop over all objects in a slab */ | ||
229 | #define for_each_object(__p, __s, __addr) \ | ||
230 | for (__p = (__addr); __p < (__addr) + (__s)->objects * (__s)->size;\ | ||
231 | __p += (__s)->size) | ||
232 | |||
233 | /* Scan freelist */ | ||
234 | #define for_each_free_object(__p, __s, __free) \ | ||
235 | for (__p = (__free); __p; __p = get_freepointer((__s), __p)) | ||
236 | |||
237 | /* Determine object index from a given position */ | ||
238 | static inline int slab_index(void *p, struct kmem_cache *s, void *addr) | ||
239 | { | ||
240 | return (p - addr) / s->size; | ||
241 | } | ||
242 | |||
243 | /* | ||
212 | * Object debugging | 244 | * Object debugging |
213 | */ | 245 | */ |
214 | static void print_section(char *text, u8 *addr, unsigned int length) | 246 | static void print_section(char *text, u8 *addr, unsigned int length) |
@@ -244,23 +276,6 @@ static void print_section(char *text, u8 *addr, unsigned int length) | |||
244 | } | 276 | } |
245 | 277 | ||
246 | /* | 278 | /* |
247 | * Slow version of get and set free pointer. | ||
248 | * | ||
249 | * This version requires touching the cache lines of kmem_cache which | ||
250 | * we avoid to do in the fast alloc free paths. There we obtain the offset | ||
251 | * from the page struct. | ||
252 | */ | ||
253 | static void *get_freepointer(struct kmem_cache *s, void *object) | ||
254 | { | ||
255 | return *(void **)(object + s->offset); | ||
256 | } | ||
257 | |||
258 | static void set_freepointer(struct kmem_cache *s, void *object, void *fp) | ||
259 | { | ||
260 | *(void **)(object + s->offset) = fp; | ||
261 | } | ||
262 | |||
263 | /* | ||
264 | * Tracking user of a slab. | 279 | * Tracking user of a slab. |
265 | */ | 280 | */ |
266 | struct track { | 281 | struct track { |
@@ -852,7 +867,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) | |||
852 | memset(start, POISON_INUSE, PAGE_SIZE << s->order); | 867 | memset(start, POISON_INUSE, PAGE_SIZE << s->order); |
853 | 868 | ||
854 | last = start; | 869 | last = start; |
855 | for (p = start + s->size; p < end; p += s->size) { | 870 | for_each_object(p, s, start) { |
856 | setup_object(s, page, last); | 871 | setup_object(s, page, last); |
857 | set_freepointer(s, last, p); | 872 | set_freepointer(s, last, p); |
858 | last = p; | 873 | last = p; |
@@ -873,12 +888,10 @@ static void __free_slab(struct kmem_cache *s, struct page *page) | |||
873 | int pages = 1 << s->order; | 888 | int pages = 1 << s->order; |
874 | 889 | ||
875 | if (unlikely(PageError(page) || s->dtor)) { | 890 | if (unlikely(PageError(page) || s->dtor)) { |
876 | void *start = page_address(page); | ||
877 | void *end = start + (pages << PAGE_SHIFT); | ||
878 | void *p; | 891 | void *p; |
879 | 892 | ||
880 | slab_pad_check(s, page); | 893 | slab_pad_check(s, page); |
881 | for (p = start; p <= end - s->size; p += s->size) { | 894 | for_each_object(p, s, page_address(page)) { |
882 | if (s->dtor) | 895 | if (s->dtor) |
883 | s->dtor(p, s, 0); | 896 | s->dtor(p, s, 0); |
884 | check_object(s, page, p, 0); | 897 | check_object(s, page, p, 0); |
@@ -2583,7 +2596,7 @@ static int validate_slab(struct kmem_cache *s, struct page *page) | |||
2583 | { | 2596 | { |
2584 | void *p; | 2597 | void *p; |
2585 | void *addr = page_address(page); | 2598 | void *addr = page_address(page); |
2586 | unsigned long map[BITS_TO_LONGS(s->objects)]; | 2599 | DECLARE_BITMAP(map, s->objects); |
2587 | 2600 | ||
2588 | if (!check_slab(s, page) || | 2601 | if (!check_slab(s, page) || |
2589 | !on_freelist(s, page, NULL)) | 2602 | !on_freelist(s, page, NULL)) |
@@ -2592,14 +2605,14 @@ static int validate_slab(struct kmem_cache *s, struct page *page) | |||
2592 | /* Now we know that a valid freelist exists */ | 2605 | /* Now we know that a valid freelist exists */ |
2593 | bitmap_zero(map, s->objects); | 2606 | bitmap_zero(map, s->objects); |
2594 | 2607 | ||
2595 | for(p = page->freelist; p; p = get_freepointer(s, p)) { | 2608 | for_each_free_object(p, s, page->freelist) { |
2596 | set_bit((p - addr) / s->size, map); | 2609 | set_bit(slab_index(p, s, addr), map); |
2597 | if (!check_object(s, page, p, 0)) | 2610 | if (!check_object(s, page, p, 0)) |
2598 | return 0; | 2611 | return 0; |
2599 | } | 2612 | } |
2600 | 2613 | ||
2601 | for(p = addr; p < addr + s->objects * s->size; p += s->size) | 2614 | for_each_object(p, s, addr) |
2602 | if (!test_bit((p - addr) / s->size, map)) | 2615 | if (!test_bit(slab_index(p, s, addr), map)) |
2603 | if (!check_object(s, page, p, 1)) | 2616 | if (!check_object(s, page, p, 1)) |
2604 | return 0; | 2617 | return 0; |
2605 | return 1; | 2618 | return 1; |
@@ -2771,15 +2784,15 @@ static void process_slab(struct loc_track *t, struct kmem_cache *s, | |||
2771 | struct page *page, enum track_item alloc) | 2784 | struct page *page, enum track_item alloc) |
2772 | { | 2785 | { |
2773 | void *addr = page_address(page); | 2786 | void *addr = page_address(page); |
2774 | unsigned long map[BITS_TO_LONGS(s->objects)]; | 2787 | DECLARE_BITMAP(map, s->objects); |
2775 | void *p; | 2788 | void *p; |
2776 | 2789 | ||
2777 | bitmap_zero(map, s->objects); | 2790 | bitmap_zero(map, s->objects); |
2778 | for (p = page->freelist; p; p = get_freepointer(s, p)) | 2791 | for_each_free_object(p, s, page->freelist) |
2779 | set_bit((p - addr) / s->size, map); | 2792 | set_bit(slab_index(p, s, addr), map); |
2780 | 2793 | ||
2781 | for (p = addr; p < addr + s->objects * s->size; p += s->size) | 2794 | for_each_object(p, s, addr) |
2782 | if (!test_bit((p - addr) / s->size, map)) { | 2795 | if (!test_bit(slab_index(p, s, addr), map)) { |
2783 | void *addr = get_track(s, p, alloc)->addr; | 2796 | void *addr = get_track(s, p, alloc)->addr; |
2784 | 2797 | ||
2785 | add_location(t, s, addr); | 2798 | add_location(t, s, addr); |