diff options
-rw-r--r-- | mm/slab.c | 18 |
1 files changed, 9 insertions, 9 deletions
@@ -634,8 +634,8 @@ static void cache_estimate(unsigned long gfporder, size_t buffer_size, | |||
634 | 634 | ||
635 | } else { | 635 | } else { |
636 | nr_objs = calculate_nr_objs(slab_size, buffer_size, | 636 | nr_objs = calculate_nr_objs(slab_size, buffer_size, |
637 | sizeof(unsigned int), align); | 637 | sizeof(freelist_idx_t), align); |
638 | mgmt_size = ALIGN(nr_objs * sizeof(unsigned int), align); | 638 | mgmt_size = ALIGN(nr_objs * sizeof(freelist_idx_t), align); |
639 | } | 639 | } |
640 | *num = nr_objs; | 640 | *num = nr_objs; |
641 | *left_over = slab_size - nr_objs*buffer_size - mgmt_size; | 641 | *left_over = slab_size - nr_objs*buffer_size - mgmt_size; |
@@ -2038,7 +2038,7 @@ static size_t calculate_slab_order(struct kmem_cache *cachep, | |||
2038 | * looping condition in cache_grow(). | 2038 | * looping condition in cache_grow(). |
2039 | */ | 2039 | */ |
2040 | offslab_limit = size; | 2040 | offslab_limit = size; |
2041 | offslab_limit /= sizeof(unsigned int); | 2041 | offslab_limit /= sizeof(freelist_idx_t); |
2042 | 2042 | ||
2043 | if (num > offslab_limit) | 2043 | if (num > offslab_limit) |
2044 | break; | 2044 | break; |
@@ -2286,7 +2286,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) | |||
2286 | return -E2BIG; | 2286 | return -E2BIG; |
2287 | 2287 | ||
2288 | freelist_size = | 2288 | freelist_size = |
2289 | ALIGN(cachep->num * sizeof(unsigned int), cachep->align); | 2289 | ALIGN(cachep->num * sizeof(freelist_idx_t), cachep->align); |
2290 | 2290 | ||
2291 | /* | 2291 | /* |
2292 | * If the slab has been placed off-slab, and we have enough space then | 2292 | * If the slab has been placed off-slab, and we have enough space then |
@@ -2299,7 +2299,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) | |||
2299 | 2299 | ||
2300 | if (flags & CFLGS_OFF_SLAB) { | 2300 | if (flags & CFLGS_OFF_SLAB) { |
2301 | /* really off slab. No need for manual alignment */ | 2301 | /* really off slab. No need for manual alignment */ |
2302 | freelist_size = cachep->num * sizeof(unsigned int); | 2302 | freelist_size = cachep->num * sizeof(freelist_idx_t); |
2303 | 2303 | ||
2304 | #ifdef CONFIG_PAGE_POISONING | 2304 | #ifdef CONFIG_PAGE_POISONING |
2305 | /* If we're going to use the generic kernel_map_pages() | 2305 | /* If we're going to use the generic kernel_map_pages() |
@@ -2569,15 +2569,15 @@ static void *alloc_slabmgmt(struct kmem_cache *cachep, | |||
2569 | return freelist; | 2569 | return freelist; |
2570 | } | 2570 | } |
2571 | 2571 | ||
2572 | static inline unsigned int get_free_obj(struct page *page, unsigned int idx) | 2572 | static inline freelist_idx_t get_free_obj(struct page *page, unsigned char idx) |
2573 | { | 2573 | { |
2574 | return ((unsigned int *)page->freelist)[idx]; | 2574 | return ((freelist_idx_t *)page->freelist)[idx]; |
2575 | } | 2575 | } |
2576 | 2576 | ||
2577 | static inline void set_free_obj(struct page *page, | 2577 | static inline void set_free_obj(struct page *page, |
2578 | unsigned int idx, unsigned int val) | 2578 | unsigned char idx, freelist_idx_t val) |
2579 | { | 2579 | { |
2580 | ((unsigned int *)(page->freelist))[idx] = val; | 2580 | ((freelist_idx_t *)(page->freelist))[idx] = val; |
2581 | } | 2581 | } |
2582 | 2582 | ||
2583 | static void cache_init_objs(struct kmem_cache *cachep, | 2583 | static void cache_init_objs(struct kmem_cache *cachep, |