diff options
| -rw-r--r-- | include/linux/page-flags.h | 2 | ||||
| -rw-r--r-- | mm/slab.c | 9 | ||||
| -rw-r--r-- | mm/slob.c | 6 | ||||
| -rw-r--r-- | mm/slub.c | 87 | ||||
| -rw-r--r-- | mm/util.c | 4 |
5 files changed, 84 insertions, 24 deletions
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index d6792f88a176..e2e5ce543595 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h | |||
| @@ -118,7 +118,6 @@ enum pageflags { | |||
| 118 | PG_savepinned = PG_dirty, | 118 | PG_savepinned = PG_dirty, |
| 119 | 119 | ||
| 120 | /* SLOB */ | 120 | /* SLOB */ |
| 121 | PG_slob_page = PG_active, | ||
| 122 | PG_slob_free = PG_private, | 121 | PG_slob_free = PG_private, |
| 123 | 122 | ||
| 124 | /* SLUB */ | 123 | /* SLUB */ |
| @@ -201,7 +200,6 @@ PAGEFLAG(SavePinned, savepinned); /* Xen */ | |||
| 201 | PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved) | 200 | PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved) |
| 202 | PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked) | 201 | PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked) |
| 203 | 202 | ||
| 204 | __PAGEFLAG(SlobPage, slob_page) | ||
| 205 | __PAGEFLAG(SlobFree, slob_free) | 203 | __PAGEFLAG(SlobFree, slob_free) |
| 206 | 204 | ||
| 207 | __PAGEFLAG(SlubFrozen, slub_frozen) | 205 | __PAGEFLAG(SlubFrozen, slub_frozen) |
| @@ -2308,6 +2308,15 @@ kmem_cache_create (const char *name, size_t size, size_t align, | |||
| 2308 | /* really off slab. No need for manual alignment */ | 2308 | /* really off slab. No need for manual alignment */ |
| 2309 | slab_size = | 2309 | slab_size = |
| 2310 | cachep->num * sizeof(kmem_bufctl_t) + sizeof(struct slab); | 2310 | cachep->num * sizeof(kmem_bufctl_t) + sizeof(struct slab); |
| 2311 | |||
| 2312 | #ifdef CONFIG_PAGE_POISONING | ||
| 2313 | /* If we're going to use the generic kernel_map_pages() | ||
| 2314 | * poisoning, then it's going to smash the contents of | ||
| 2315 | * the redzone and userword anyhow, so switch them off. | ||
| 2316 | */ | ||
| 2317 | if (size % PAGE_SIZE == 0 && flags & SLAB_POISON) | ||
| 2318 | flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER); | ||
| 2319 | #endif | ||
| 2311 | } | 2320 | } |
| 2312 | 2321 | ||
| 2313 | cachep->colour_off = cache_line_size(); | 2322 | cachep->colour_off = cache_line_size(); |
| @@ -133,17 +133,17 @@ static LIST_HEAD(free_slob_large); | |||
| 133 | */ | 133 | */ |
| 134 | static inline int is_slob_page(struct slob_page *sp) | 134 | static inline int is_slob_page(struct slob_page *sp) |
| 135 | { | 135 | { |
| 136 | return PageSlobPage((struct page *)sp); | 136 | return PageSlab((struct page *)sp); |
| 137 | } | 137 | } |
| 138 | 138 | ||
| 139 | static inline void set_slob_page(struct slob_page *sp) | 139 | static inline void set_slob_page(struct slob_page *sp) |
| 140 | { | 140 | { |
| 141 | __SetPageSlobPage((struct page *)sp); | 141 | __SetPageSlab((struct page *)sp); |
| 142 | } | 142 | } |
| 143 | 143 | ||
| 144 | static inline void clear_slob_page(struct slob_page *sp) | 144 | static inline void clear_slob_page(struct slob_page *sp) |
| 145 | { | 145 | { |
| 146 | __ClearPageSlobPage((struct page *)sp); | 146 | __ClearPageSlab((struct page *)sp); |
| 147 | } | 147 | } |
| 148 | 148 | ||
| 149 | static inline struct slob_page *slob_page(const void *addr) | 149 | static inline struct slob_page *slob_page(const void *addr) |
| @@ -840,6 +840,11 @@ static inline unsigned long slabs_node(struct kmem_cache *s, int node) | |||
| 840 | return atomic_long_read(&n->nr_slabs); | 840 | return atomic_long_read(&n->nr_slabs); |
| 841 | } | 841 | } |
| 842 | 842 | ||
| 843 | static inline unsigned long node_nr_slabs(struct kmem_cache_node *n) | ||
| 844 | { | ||
| 845 | return atomic_long_read(&n->nr_slabs); | ||
| 846 | } | ||
| 847 | |||
| 843 | static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects) | 848 | static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects) |
| 844 | { | 849 | { |
| 845 | struct kmem_cache_node *n = get_node(s, node); | 850 | struct kmem_cache_node *n = get_node(s, node); |
| @@ -1058,6 +1063,8 @@ static inline unsigned long kmem_cache_flags(unsigned long objsize, | |||
| 1058 | 1063 | ||
| 1059 | static inline unsigned long slabs_node(struct kmem_cache *s, int node) | 1064 | static inline unsigned long slabs_node(struct kmem_cache *s, int node) |
| 1060 | { return 0; } | 1065 | { return 0; } |
| 1066 | static inline unsigned long node_nr_slabs(struct kmem_cache_node *n) | ||
| 1067 | { return 0; } | ||
| 1061 | static inline void inc_slabs_node(struct kmem_cache *s, int node, | 1068 | static inline void inc_slabs_node(struct kmem_cache *s, int node, |
| 1062 | int objects) {} | 1069 | int objects) {} |
| 1063 | static inline void dec_slabs_node(struct kmem_cache *s, int node, | 1070 | static inline void dec_slabs_node(struct kmem_cache *s, int node, |
| @@ -1514,6 +1521,65 @@ static inline int node_match(struct kmem_cache_cpu *c, int node) | |||
| 1514 | return 1; | 1521 | return 1; |
| 1515 | } | 1522 | } |
| 1516 | 1523 | ||
| 1524 | static int count_free(struct page *page) | ||
| 1525 | { | ||
| 1526 | return page->objects - page->inuse; | ||
| 1527 | } | ||
| 1528 | |||
| 1529 | static unsigned long count_partial(struct kmem_cache_node *n, | ||
| 1530 | int (*get_count)(struct page *)) | ||
| 1531 | { | ||
| 1532 | unsigned long flags; | ||
| 1533 | unsigned long x = 0; | ||
| 1534 | struct page *page; | ||
| 1535 | |||
| 1536 | spin_lock_irqsave(&n->list_lock, flags); | ||
| 1537 | list_for_each_entry(page, &n->partial, lru) | ||
| 1538 | x += get_count(page); | ||
| 1539 | spin_unlock_irqrestore(&n->list_lock, flags); | ||
| 1540 | return x; | ||
| 1541 | } | ||
| 1542 | |||
| 1543 | static inline unsigned long node_nr_objs(struct kmem_cache_node *n) | ||
| 1544 | { | ||
| 1545 | #ifdef CONFIG_SLUB_DEBUG | ||
| 1546 | return atomic_long_read(&n->total_objects); | ||
| 1547 | #else | ||
| 1548 | return 0; | ||
| 1549 | #endif | ||
| 1550 | } | ||
| 1551 | |||
| 1552 | static noinline void | ||
| 1553 | slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) | ||
| 1554 | { | ||
| 1555 | int node; | ||
| 1556 | |||
| 1557 | printk(KERN_WARNING | ||
| 1558 | "SLUB: Unable to allocate memory on node %d (gfp=0x%x)\n", | ||
| 1559 | nid, gfpflags); | ||
| 1560 | printk(KERN_WARNING " cache: %s, object size: %d, buffer size: %d, " | ||
| 1561 | "default order: %d, min order: %d\n", s->name, s->objsize, | ||
| 1562 | s->size, oo_order(s->oo), oo_order(s->min)); | ||
| 1563 | |||
| 1564 | for_each_online_node(node) { | ||
| 1565 | struct kmem_cache_node *n = get_node(s, node); | ||
| 1566 | unsigned long nr_slabs; | ||
| 1567 | unsigned long nr_objs; | ||
| 1568 | unsigned long nr_free; | ||
| 1569 | |||
| 1570 | if (!n) | ||
| 1571 | continue; | ||
| 1572 | |||
| 1573 | nr_free = count_partial(n, count_free); | ||
| 1574 | nr_slabs = node_nr_slabs(n); | ||
| 1575 | nr_objs = node_nr_objs(n); | ||
| 1576 | |||
| 1577 | printk(KERN_WARNING | ||
| 1578 | " node %d: slabs: %ld, objs: %ld, free: %ld\n", | ||
| 1579 | node, nr_slabs, nr_objs, nr_free); | ||
| 1580 | } | ||
| 1581 | } | ||
| 1582 | |||
| 1517 | /* | 1583 | /* |
| 1518 | * Slow path. The lockless freelist is empty or we need to perform | 1584 | * Slow path. The lockless freelist is empty or we need to perform |
| 1519 | * debugging duties. | 1585 | * debugging duties. |
| @@ -1595,6 +1661,8 @@ new_slab: | |||
| 1595 | c->page = new; | 1661 | c->page = new; |
| 1596 | goto load_freelist; | 1662 | goto load_freelist; |
| 1597 | } | 1663 | } |
| 1664 | if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit()) | ||
| 1665 | slab_out_of_memory(s, gfpflags, node); | ||
| 1598 | return NULL; | 1666 | return NULL; |
| 1599 | debug: | 1667 | debug: |
| 1600 | if (!alloc_debug_processing(s, c->page, object, addr)) | 1668 | if (!alloc_debug_processing(s, c->page, object, addr)) |
| @@ -3368,20 +3436,6 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, | |||
| 3368 | } | 3436 | } |
| 3369 | 3437 | ||
| 3370 | #ifdef CONFIG_SLUB_DEBUG | 3438 | #ifdef CONFIG_SLUB_DEBUG |
| 3371 | static unsigned long count_partial(struct kmem_cache_node *n, | ||
| 3372 | int (*get_count)(struct page *)) | ||
| 3373 | { | ||
| 3374 | unsigned long flags; | ||
| 3375 | unsigned long x = 0; | ||
| 3376 | struct page *page; | ||
| 3377 | |||
| 3378 | spin_lock_irqsave(&n->list_lock, flags); | ||
| 3379 | list_for_each_entry(page, &n->partial, lru) | ||
| 3380 | x += get_count(page); | ||
| 3381 | spin_unlock_irqrestore(&n->list_lock, flags); | ||
| 3382 | return x; | ||
| 3383 | } | ||
| 3384 | |||
| 3385 | static int count_inuse(struct page *page) | 3439 | static int count_inuse(struct page *page) |
| 3386 | { | 3440 | { |
| 3387 | return page->inuse; | 3441 | return page->inuse; |
| @@ -3392,11 +3446,6 @@ static int count_total(struct page *page) | |||
| 3392 | return page->objects; | 3446 | return page->objects; |
| 3393 | } | 3447 | } |
| 3394 | 3448 | ||
| 3395 | static int count_free(struct page *page) | ||
| 3396 | { | ||
| 3397 | return page->objects - page->inuse; | ||
| 3398 | } | ||
| 3399 | |||
| 3400 | static int validate_slab(struct kmem_cache *s, struct page *page, | 3449 | static int validate_slab(struct kmem_cache *s, struct page *page, |
| 3401 | unsigned long *map) | 3450 | unsigned long *map) |
| 3402 | { | 3451 | { |
| @@ -168,6 +168,10 @@ EXPORT_SYMBOL(krealloc); | |||
| 168 | * | 168 | * |
| 169 | * The memory of the object @p points to is zeroed before freed. | 169 | * The memory of the object @p points to is zeroed before freed. |
| 170 | * If @p is %NULL, kzfree() does nothing. | 170 | * If @p is %NULL, kzfree() does nothing. |
| 171 | * | ||
| 172 | * Note: this function zeroes the whole allocated buffer which can be a good | ||
| 173 | * deal bigger than the requested buffer size passed to kmalloc(). So be | ||
| 174 | * careful when using this function in performance sensitive code. | ||
| 171 | */ | 175 | */ |
| 172 | void kzfree(const void *p) | 176 | void kzfree(const void *p) |
| 173 | { | 177 | { |
