diff options
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 105 |
1 files changed, 42 insertions, 63 deletions
@@ -102,44 +102,12 @@ | |||
102 | * the fast path and disables lockless freelists. | 102 | * the fast path and disables lockless freelists. |
103 | */ | 103 | */ |
104 | 104 | ||
105 | #define FROZEN (1 << PG_active) | ||
106 | |||
107 | #ifdef CONFIG_SLUB_DEBUG | 105 | #ifdef CONFIG_SLUB_DEBUG |
108 | #define SLABDEBUG (1 << PG_error) | 106 | #define SLABDEBUG 1 |
109 | #else | 107 | #else |
110 | #define SLABDEBUG 0 | 108 | #define SLABDEBUG 0 |
111 | #endif | 109 | #endif |
112 | 110 | ||
113 | static inline int SlabFrozen(struct page *page) | ||
114 | { | ||
115 | return page->flags & FROZEN; | ||
116 | } | ||
117 | |||
118 | static inline void SetSlabFrozen(struct page *page) | ||
119 | { | ||
120 | page->flags |= FROZEN; | ||
121 | } | ||
122 | |||
123 | static inline void ClearSlabFrozen(struct page *page) | ||
124 | { | ||
125 | page->flags &= ~FROZEN; | ||
126 | } | ||
127 | |||
128 | static inline int SlabDebug(struct page *page) | ||
129 | { | ||
130 | return page->flags & SLABDEBUG; | ||
131 | } | ||
132 | |||
133 | static inline void SetSlabDebug(struct page *page) | ||
134 | { | ||
135 | page->flags |= SLABDEBUG; | ||
136 | } | ||
137 | |||
138 | static inline void ClearSlabDebug(struct page *page) | ||
139 | { | ||
140 | page->flags &= ~SLABDEBUG; | ||
141 | } | ||
142 | |||
143 | /* | 111 | /* |
144 | * Issues still to be resolved: | 112 | * Issues still to be resolved: |
145 | * | 113 | * |
@@ -971,7 +939,7 @@ static int free_debug_processing(struct kmem_cache *s, struct page *page, | |||
971 | } | 939 | } |
972 | 940 | ||
973 | /* Special debug activities for freeing objects */ | 941 | /* Special debug activities for freeing objects */ |
974 | if (!SlabFrozen(page) && !page->freelist) | 942 | if (!PageSlubFrozen(page) && !page->freelist) |
975 | remove_full(s, page); | 943 | remove_full(s, page); |
976 | if (s->flags & SLAB_STORE_USER) | 944 | if (s->flags & SLAB_STORE_USER) |
977 | set_track(s, object, TRACK_FREE, addr); | 945 | set_track(s, object, TRACK_FREE, addr); |
@@ -1044,7 +1012,7 @@ __setup("slub_debug", setup_slub_debug); | |||
1044 | 1012 | ||
1045 | static unsigned long kmem_cache_flags(unsigned long objsize, | 1013 | static unsigned long kmem_cache_flags(unsigned long objsize, |
1046 | unsigned long flags, const char *name, | 1014 | unsigned long flags, const char *name, |
1047 | void (*ctor)(struct kmem_cache *, void *)) | 1015 | void (*ctor)(void *)) |
1048 | { | 1016 | { |
1049 | /* | 1017 | /* |
1050 | * Enable debugging if selected on the kernel commandline. | 1018 | * Enable debugging if selected on the kernel commandline. |
@@ -1072,7 +1040,7 @@ static inline int check_object(struct kmem_cache *s, struct page *page, | |||
1072 | static inline void add_full(struct kmem_cache_node *n, struct page *page) {} | 1040 | static inline void add_full(struct kmem_cache_node *n, struct page *page) {} |
1073 | static inline unsigned long kmem_cache_flags(unsigned long objsize, | 1041 | static inline unsigned long kmem_cache_flags(unsigned long objsize, |
1074 | unsigned long flags, const char *name, | 1042 | unsigned long flags, const char *name, |
1075 | void (*ctor)(struct kmem_cache *, void *)) | 1043 | void (*ctor)(void *)) |
1076 | { | 1044 | { |
1077 | return flags; | 1045 | return flags; |
1078 | } | 1046 | } |
@@ -1135,7 +1103,7 @@ static void setup_object(struct kmem_cache *s, struct page *page, | |||
1135 | { | 1103 | { |
1136 | setup_object_debug(s, page, object); | 1104 | setup_object_debug(s, page, object); |
1137 | if (unlikely(s->ctor)) | 1105 | if (unlikely(s->ctor)) |
1138 | s->ctor(s, object); | 1106 | s->ctor(object); |
1139 | } | 1107 | } |
1140 | 1108 | ||
1141 | static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) | 1109 | static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) |
@@ -1157,7 +1125,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) | |||
1157 | page->flags |= 1 << PG_slab; | 1125 | page->flags |= 1 << PG_slab; |
1158 | if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON | | 1126 | if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON | |
1159 | SLAB_STORE_USER | SLAB_TRACE)) | 1127 | SLAB_STORE_USER | SLAB_TRACE)) |
1160 | SetSlabDebug(page); | 1128 | __SetPageSlubDebug(page); |
1161 | 1129 | ||
1162 | start = page_address(page); | 1130 | start = page_address(page); |
1163 | 1131 | ||
@@ -1184,14 +1152,14 @@ static void __free_slab(struct kmem_cache *s, struct page *page) | |||
1184 | int order = compound_order(page); | 1152 | int order = compound_order(page); |
1185 | int pages = 1 << order; | 1153 | int pages = 1 << order; |
1186 | 1154 | ||
1187 | if (unlikely(SlabDebug(page))) { | 1155 | if (unlikely(SLABDEBUG && PageSlubDebug(page))) { |
1188 | void *p; | 1156 | void *p; |
1189 | 1157 | ||
1190 | slab_pad_check(s, page); | 1158 | slab_pad_check(s, page); |
1191 | for_each_object(p, s, page_address(page), | 1159 | for_each_object(p, s, page_address(page), |
1192 | page->objects) | 1160 | page->objects) |
1193 | check_object(s, page, p, 0); | 1161 | check_object(s, page, p, 0); |
1194 | ClearSlabDebug(page); | 1162 | __ClearPageSlubDebug(page); |
1195 | } | 1163 | } |
1196 | 1164 | ||
1197 | mod_zone_page_state(page_zone(page), | 1165 | mod_zone_page_state(page_zone(page), |
@@ -1288,7 +1256,7 @@ static inline int lock_and_freeze_slab(struct kmem_cache_node *n, | |||
1288 | if (slab_trylock(page)) { | 1256 | if (slab_trylock(page)) { |
1289 | list_del(&page->lru); | 1257 | list_del(&page->lru); |
1290 | n->nr_partial--; | 1258 | n->nr_partial--; |
1291 | SetSlabFrozen(page); | 1259 | __SetPageSlubFrozen(page); |
1292 | return 1; | 1260 | return 1; |
1293 | } | 1261 | } |
1294 | return 0; | 1262 | return 0; |
@@ -1361,7 +1329,7 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags) | |||
1361 | n = get_node(s, zone_to_nid(zone)); | 1329 | n = get_node(s, zone_to_nid(zone)); |
1362 | 1330 | ||
1363 | if (n && cpuset_zone_allowed_hardwall(zone, flags) && | 1331 | if (n && cpuset_zone_allowed_hardwall(zone, flags) && |
1364 | n->nr_partial > MIN_PARTIAL) { | 1332 | n->nr_partial > n->min_partial) { |
1365 | page = get_partial_node(n); | 1333 | page = get_partial_node(n); |
1366 | if (page) | 1334 | if (page) |
1367 | return page; | 1335 | return page; |
@@ -1398,7 +1366,7 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail) | |||
1398 | struct kmem_cache_node *n = get_node(s, page_to_nid(page)); | 1366 | struct kmem_cache_node *n = get_node(s, page_to_nid(page)); |
1399 | struct kmem_cache_cpu *c = get_cpu_slab(s, smp_processor_id()); | 1367 | struct kmem_cache_cpu *c = get_cpu_slab(s, smp_processor_id()); |
1400 | 1368 | ||
1401 | ClearSlabFrozen(page); | 1369 | __ClearPageSlubFrozen(page); |
1402 | if (page->inuse) { | 1370 | if (page->inuse) { |
1403 | 1371 | ||
1404 | if (page->freelist) { | 1372 | if (page->freelist) { |
@@ -1406,13 +1374,14 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail) | |||
1406 | stat(c, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD); | 1374 | stat(c, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD); |
1407 | } else { | 1375 | } else { |
1408 | stat(c, DEACTIVATE_FULL); | 1376 | stat(c, DEACTIVATE_FULL); |
1409 | if (SlabDebug(page) && (s->flags & SLAB_STORE_USER)) | 1377 | if (SLABDEBUG && PageSlubDebug(page) && |
1378 | (s->flags & SLAB_STORE_USER)) | ||
1410 | add_full(n, page); | 1379 | add_full(n, page); |
1411 | } | 1380 | } |
1412 | slab_unlock(page); | 1381 | slab_unlock(page); |
1413 | } else { | 1382 | } else { |
1414 | stat(c, DEACTIVATE_EMPTY); | 1383 | stat(c, DEACTIVATE_EMPTY); |
1415 | if (n->nr_partial < MIN_PARTIAL) { | 1384 | if (n->nr_partial < n->min_partial) { |
1416 | /* | 1385 | /* |
1417 | * Adding an empty slab to the partial slabs in order | 1386 | * Adding an empty slab to the partial slabs in order |
1418 | * to avoid page allocator overhead. This slab needs | 1387 | * to avoid page allocator overhead. This slab needs |
@@ -1551,7 +1520,7 @@ load_freelist: | |||
1551 | object = c->page->freelist; | 1520 | object = c->page->freelist; |
1552 | if (unlikely(!object)) | 1521 | if (unlikely(!object)) |
1553 | goto another_slab; | 1522 | goto another_slab; |
1554 | if (unlikely(SlabDebug(c->page))) | 1523 | if (unlikely(SLABDEBUG && PageSlubDebug(c->page))) |
1555 | goto debug; | 1524 | goto debug; |
1556 | 1525 | ||
1557 | c->freelist = object[c->offset]; | 1526 | c->freelist = object[c->offset]; |
@@ -1588,7 +1557,7 @@ new_slab: | |||
1588 | if (c->page) | 1557 | if (c->page) |
1589 | flush_slab(s, c); | 1558 | flush_slab(s, c); |
1590 | slab_lock(new); | 1559 | slab_lock(new); |
1591 | SetSlabFrozen(new); | 1560 | __SetPageSlubFrozen(new); |
1592 | c->page = new; | 1561 | c->page = new; |
1593 | goto load_freelist; | 1562 | goto load_freelist; |
1594 | } | 1563 | } |
@@ -1674,7 +1643,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, | |||
1674 | stat(c, FREE_SLOWPATH); | 1643 | stat(c, FREE_SLOWPATH); |
1675 | slab_lock(page); | 1644 | slab_lock(page); |
1676 | 1645 | ||
1677 | if (unlikely(SlabDebug(page))) | 1646 | if (unlikely(SLABDEBUG && PageSlubDebug(page))) |
1678 | goto debug; | 1647 | goto debug; |
1679 | 1648 | ||
1680 | checks_ok: | 1649 | checks_ok: |
@@ -1682,7 +1651,7 @@ checks_ok: | |||
1682 | page->freelist = object; | 1651 | page->freelist = object; |
1683 | page->inuse--; | 1652 | page->inuse--; |
1684 | 1653 | ||
1685 | if (unlikely(SlabFrozen(page))) { | 1654 | if (unlikely(PageSlubFrozen(page))) { |
1686 | stat(c, FREE_FROZEN); | 1655 | stat(c, FREE_FROZEN); |
1687 | goto out_unlock; | 1656 | goto out_unlock; |
1688 | } | 1657 | } |
@@ -1944,9 +1913,21 @@ static void init_kmem_cache_cpu(struct kmem_cache *s, | |||
1944 | #endif | 1913 | #endif |
1945 | } | 1914 | } |
1946 | 1915 | ||
1947 | static void init_kmem_cache_node(struct kmem_cache_node *n) | 1916 | static void |
1917 | init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s) | ||
1948 | { | 1918 | { |
1949 | n->nr_partial = 0; | 1919 | n->nr_partial = 0; |
1920 | |||
1921 | /* | ||
1922 | * The larger the object size is, the more pages we want on the partial | ||
1923 | * list to avoid pounding the page allocator excessively. | ||
1924 | */ | ||
1925 | n->min_partial = ilog2(s->size); | ||
1926 | if (n->min_partial < MIN_PARTIAL) | ||
1927 | n->min_partial = MIN_PARTIAL; | ||
1928 | else if (n->min_partial > MAX_PARTIAL) | ||
1929 | n->min_partial = MAX_PARTIAL; | ||
1930 | |||
1950 | spin_lock_init(&n->list_lock); | 1931 | spin_lock_init(&n->list_lock); |
1951 | INIT_LIST_HEAD(&n->partial); | 1932 | INIT_LIST_HEAD(&n->partial); |
1952 | #ifdef CONFIG_SLUB_DEBUG | 1933 | #ifdef CONFIG_SLUB_DEBUG |
@@ -2118,7 +2099,7 @@ static struct kmem_cache_node *early_kmem_cache_node_alloc(gfp_t gfpflags, | |||
2118 | init_object(kmalloc_caches, n, 1); | 2099 | init_object(kmalloc_caches, n, 1); |
2119 | init_tracking(kmalloc_caches, n); | 2100 | init_tracking(kmalloc_caches, n); |
2120 | #endif | 2101 | #endif |
2121 | init_kmem_cache_node(n); | 2102 | init_kmem_cache_node(n, kmalloc_caches); |
2122 | inc_slabs_node(kmalloc_caches, node, page->objects); | 2103 | inc_slabs_node(kmalloc_caches, node, page->objects); |
2123 | 2104 | ||
2124 | /* | 2105 | /* |
@@ -2175,7 +2156,7 @@ static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags) | |||
2175 | 2156 | ||
2176 | } | 2157 | } |
2177 | s->node[node] = n; | 2158 | s->node[node] = n; |
2178 | init_kmem_cache_node(n); | 2159 | init_kmem_cache_node(n, s); |
2179 | } | 2160 | } |
2180 | return 1; | 2161 | return 1; |
2181 | } | 2162 | } |
@@ -2186,7 +2167,7 @@ static void free_kmem_cache_nodes(struct kmem_cache *s) | |||
2186 | 2167 | ||
2187 | static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags) | 2168 | static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags) |
2188 | { | 2169 | { |
2189 | init_kmem_cache_node(&s->local_node); | 2170 | init_kmem_cache_node(&s->local_node, s); |
2190 | return 1; | 2171 | return 1; |
2191 | } | 2172 | } |
2192 | #endif | 2173 | #endif |
@@ -2317,7 +2298,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order) | |||
2317 | static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags, | 2298 | static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags, |
2318 | const char *name, size_t size, | 2299 | const char *name, size_t size, |
2319 | size_t align, unsigned long flags, | 2300 | size_t align, unsigned long flags, |
2320 | void (*ctor)(struct kmem_cache *, void *)) | 2301 | void (*ctor)(void *)) |
2321 | { | 2302 | { |
2322 | memset(s, 0, kmem_size); | 2303 | memset(s, 0, kmem_size); |
2323 | s->name = name; | 2304 | s->name = name; |
@@ -2746,7 +2727,6 @@ size_t ksize(const void *object) | |||
2746 | */ | 2727 | */ |
2747 | return s->size; | 2728 | return s->size; |
2748 | } | 2729 | } |
2749 | EXPORT_SYMBOL(ksize); | ||
2750 | 2730 | ||
2751 | void kfree(const void *x) | 2731 | void kfree(const void *x) |
2752 | { | 2732 | { |
@@ -2921,7 +2901,7 @@ static int slab_mem_going_online_callback(void *arg) | |||
2921 | ret = -ENOMEM; | 2901 | ret = -ENOMEM; |
2922 | goto out; | 2902 | goto out; |
2923 | } | 2903 | } |
2924 | init_kmem_cache_node(n); | 2904 | init_kmem_cache_node(n, s); |
2925 | s->node[nid] = n; | 2905 | s->node[nid] = n; |
2926 | } | 2906 | } |
2927 | out: | 2907 | out: |
@@ -3073,7 +3053,7 @@ static int slab_unmergeable(struct kmem_cache *s) | |||
3073 | 3053 | ||
3074 | static struct kmem_cache *find_mergeable(size_t size, | 3054 | static struct kmem_cache *find_mergeable(size_t size, |
3075 | size_t align, unsigned long flags, const char *name, | 3055 | size_t align, unsigned long flags, const char *name, |
3076 | void (*ctor)(struct kmem_cache *, void *)) | 3056 | void (*ctor)(void *)) |
3077 | { | 3057 | { |
3078 | struct kmem_cache *s; | 3058 | struct kmem_cache *s; |
3079 | 3059 | ||
@@ -3113,8 +3093,7 @@ static struct kmem_cache *find_mergeable(size_t size, | |||
3113 | } | 3093 | } |
3114 | 3094 | ||
3115 | struct kmem_cache *kmem_cache_create(const char *name, size_t size, | 3095 | struct kmem_cache *kmem_cache_create(const char *name, size_t size, |
3116 | size_t align, unsigned long flags, | 3096 | size_t align, unsigned long flags, void (*ctor)(void *)) |
3117 | void (*ctor)(struct kmem_cache *, void *)) | ||
3118 | { | 3097 | { |
3119 | struct kmem_cache *s; | 3098 | struct kmem_cache *s; |
3120 | 3099 | ||
@@ -3317,12 +3296,12 @@ static void validate_slab_slab(struct kmem_cache *s, struct page *page, | |||
3317 | s->name, page); | 3296 | s->name, page); |
3318 | 3297 | ||
3319 | if (s->flags & DEBUG_DEFAULT_FLAGS) { | 3298 | if (s->flags & DEBUG_DEFAULT_FLAGS) { |
3320 | if (!SlabDebug(page)) | 3299 | if (!PageSlubDebug(page)) |
3321 | printk(KERN_ERR "SLUB %s: SlabDebug not set " | 3300 | printk(KERN_ERR "SLUB %s: SlubDebug not set " |
3322 | "on slab 0x%p\n", s->name, page); | 3301 | "on slab 0x%p\n", s->name, page); |
3323 | } else { | 3302 | } else { |
3324 | if (SlabDebug(page)) | 3303 | if (PageSlubDebug(page)) |
3325 | printk(KERN_ERR "SLUB %s: SlabDebug set on " | 3304 | printk(KERN_ERR "SLUB %s: SlubDebug set on " |
3326 | "slab 0x%p\n", s->name, page); | 3305 | "slab 0x%p\n", s->name, page); |
3327 | } | 3306 | } |
3328 | } | 3307 | } |