diff options
Diffstat (limited to 'mm/slab.c')
| -rw-r--r-- | mm/slab.c | 23 |
1 files changed, 11 insertions, 12 deletions
| @@ -252,8 +252,8 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent) | |||
| 252 | MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \ | 252 | MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \ |
| 253 | } while (0) | 253 | } while (0) |
| 254 | 254 | ||
| 255 | #define CFLGS_OBJFREELIST_SLAB (0x40000000UL) | 255 | #define CFLGS_OBJFREELIST_SLAB ((slab_flags_t __force)0x40000000UL) |
| 256 | #define CFLGS_OFF_SLAB (0x80000000UL) | 256 | #define CFLGS_OFF_SLAB ((slab_flags_t __force)0x80000000UL) |
| 257 | #define OBJFREELIST_SLAB(x) ((x)->flags & CFLGS_OBJFREELIST_SLAB) | 257 | #define OBJFREELIST_SLAB(x) ((x)->flags & CFLGS_OBJFREELIST_SLAB) |
| 258 | #define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB) | 258 | #define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB) |
| 259 | 259 | ||
| @@ -441,7 +441,7 @@ static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) | |||
| 441 | * Calculate the number of objects and left-over bytes for a given buffer size. | 441 | * Calculate the number of objects and left-over bytes for a given buffer size. |
| 442 | */ | 442 | */ |
| 443 | static unsigned int cache_estimate(unsigned long gfporder, size_t buffer_size, | 443 | static unsigned int cache_estimate(unsigned long gfporder, size_t buffer_size, |
| 444 | unsigned long flags, size_t *left_over) | 444 | slab_flags_t flags, size_t *left_over) |
| 445 | { | 445 | { |
| 446 | unsigned int num; | 446 | unsigned int num; |
| 447 | size_t slab_size = PAGE_SIZE << gfporder; | 447 | size_t slab_size = PAGE_SIZE << gfporder; |
| @@ -1759,7 +1759,7 @@ static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list) | |||
| 1759 | * towards high-order requests, this should be changed. | 1759 | * towards high-order requests, this should be changed. |
| 1760 | */ | 1760 | */ |
| 1761 | static size_t calculate_slab_order(struct kmem_cache *cachep, | 1761 | static size_t calculate_slab_order(struct kmem_cache *cachep, |
| 1762 | size_t size, unsigned long flags) | 1762 | size_t size, slab_flags_t flags) |
| 1763 | { | 1763 | { |
| 1764 | size_t left_over = 0; | 1764 | size_t left_over = 0; |
| 1765 | int gfporder; | 1765 | int gfporder; |
| @@ -1886,8 +1886,8 @@ static int __ref setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) | |||
| 1886 | return 0; | 1886 | return 0; |
| 1887 | } | 1887 | } |
| 1888 | 1888 | ||
| 1889 | unsigned long kmem_cache_flags(unsigned long object_size, | 1889 | slab_flags_t kmem_cache_flags(unsigned long object_size, |
| 1890 | unsigned long flags, const char *name, | 1890 | slab_flags_t flags, const char *name, |
| 1891 | void (*ctor)(void *)) | 1891 | void (*ctor)(void *)) |
| 1892 | { | 1892 | { |
| 1893 | return flags; | 1893 | return flags; |
| @@ -1895,7 +1895,7 @@ unsigned long kmem_cache_flags(unsigned long object_size, | |||
| 1895 | 1895 | ||
| 1896 | struct kmem_cache * | 1896 | struct kmem_cache * |
| 1897 | __kmem_cache_alias(const char *name, size_t size, size_t align, | 1897 | __kmem_cache_alias(const char *name, size_t size, size_t align, |
| 1898 | unsigned long flags, void (*ctor)(void *)) | 1898 | slab_flags_t flags, void (*ctor)(void *)) |
| 1899 | { | 1899 | { |
| 1900 | struct kmem_cache *cachep; | 1900 | struct kmem_cache *cachep; |
| 1901 | 1901 | ||
| @@ -1913,7 +1913,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align, | |||
| 1913 | } | 1913 | } |
| 1914 | 1914 | ||
| 1915 | static bool set_objfreelist_slab_cache(struct kmem_cache *cachep, | 1915 | static bool set_objfreelist_slab_cache(struct kmem_cache *cachep, |
| 1916 | size_t size, unsigned long flags) | 1916 | size_t size, slab_flags_t flags) |
| 1917 | { | 1917 | { |
| 1918 | size_t left; | 1918 | size_t left; |
| 1919 | 1919 | ||
| @@ -1936,7 +1936,7 @@ static bool set_objfreelist_slab_cache(struct kmem_cache *cachep, | |||
| 1936 | } | 1936 | } |
| 1937 | 1937 | ||
| 1938 | static bool set_off_slab_cache(struct kmem_cache *cachep, | 1938 | static bool set_off_slab_cache(struct kmem_cache *cachep, |
| 1939 | size_t size, unsigned long flags) | 1939 | size_t size, slab_flags_t flags) |
| 1940 | { | 1940 | { |
| 1941 | size_t left; | 1941 | size_t left; |
| 1942 | 1942 | ||
| @@ -1970,7 +1970,7 @@ static bool set_off_slab_cache(struct kmem_cache *cachep, | |||
| 1970 | } | 1970 | } |
| 1971 | 1971 | ||
| 1972 | static bool set_on_slab_cache(struct kmem_cache *cachep, | 1972 | static bool set_on_slab_cache(struct kmem_cache *cachep, |
| 1973 | size_t size, unsigned long flags) | 1973 | size_t size, slab_flags_t flags) |
| 1974 | { | 1974 | { |
| 1975 | size_t left; | 1975 | size_t left; |
| 1976 | 1976 | ||
| @@ -2006,8 +2006,7 @@ static bool set_on_slab_cache(struct kmem_cache *cachep, | |||
| 2006 | * cacheline. This can be beneficial if you're counting cycles as closely | 2006 | * cacheline. This can be beneficial if you're counting cycles as closely |
| 2007 | * as davem. | 2007 | * as davem. |
| 2008 | */ | 2008 | */ |
| 2009 | int | 2009 | int __kmem_cache_create(struct kmem_cache *cachep, slab_flags_t flags) |
| 2010 | __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) | ||
| 2011 | { | 2010 | { |
| 2012 | size_t ralign = BYTES_PER_WORD; | 2011 | size_t ralign = BYTES_PER_WORD; |
| 2013 | gfp_t gfp; | 2012 | gfp_t gfp; |
