diff options
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 105 |
1 files changed, 82 insertions, 23 deletions
@@ -840,6 +840,11 @@ static inline unsigned long slabs_node(struct kmem_cache *s, int node) | |||
840 | return atomic_long_read(&n->nr_slabs); | 840 | return atomic_long_read(&n->nr_slabs); |
841 | } | 841 | } |
842 | 842 | ||
843 | static inline unsigned long node_nr_slabs(struct kmem_cache_node *n) | ||
844 | { | ||
845 | return atomic_long_read(&n->nr_slabs); | ||
846 | } | ||
847 | |||
843 | static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects) | 848 | static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects) |
844 | { | 849 | { |
845 | struct kmem_cache_node *n = get_node(s, node); | 850 | struct kmem_cache_node *n = get_node(s, node); |
@@ -1058,6 +1063,8 @@ static inline unsigned long kmem_cache_flags(unsigned long objsize, | |||
1058 | 1063 | ||
1059 | static inline unsigned long slabs_node(struct kmem_cache *s, int node) | 1064 | static inline unsigned long slabs_node(struct kmem_cache *s, int node) |
1060 | { return 0; } | 1065 | { return 0; } |
1066 | static inline unsigned long node_nr_slabs(struct kmem_cache_node *n) | ||
1067 | { return 0; } | ||
1061 | static inline void inc_slabs_node(struct kmem_cache *s, int node, | 1068 | static inline void inc_slabs_node(struct kmem_cache *s, int node, |
1062 | int objects) {} | 1069 | int objects) {} |
1063 | static inline void dec_slabs_node(struct kmem_cache *s, int node, | 1070 | static inline void dec_slabs_node(struct kmem_cache *s, int node, |
@@ -1514,6 +1521,65 @@ static inline int node_match(struct kmem_cache_cpu *c, int node) | |||
1514 | return 1; | 1521 | return 1; |
1515 | } | 1522 | } |
1516 | 1523 | ||
1524 | static int count_free(struct page *page) | ||
1525 | { | ||
1526 | return page->objects - page->inuse; | ||
1527 | } | ||
1528 | |||
1529 | static unsigned long count_partial(struct kmem_cache_node *n, | ||
1530 | int (*get_count)(struct page *)) | ||
1531 | { | ||
1532 | unsigned long flags; | ||
1533 | unsigned long x = 0; | ||
1534 | struct page *page; | ||
1535 | |||
1536 | spin_lock_irqsave(&n->list_lock, flags); | ||
1537 | list_for_each_entry(page, &n->partial, lru) | ||
1538 | x += get_count(page); | ||
1539 | spin_unlock_irqrestore(&n->list_lock, flags); | ||
1540 | return x; | ||
1541 | } | ||
1542 | |||
1543 | static inline unsigned long node_nr_objs(struct kmem_cache_node *n) | ||
1544 | { | ||
1545 | #ifdef CONFIG_SLUB_DEBUG | ||
1546 | return atomic_long_read(&n->total_objects); | ||
1547 | #else | ||
1548 | return 0; | ||
1549 | #endif | ||
1550 | } | ||
1551 | |||
1552 | static noinline void | ||
1553 | slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) | ||
1554 | { | ||
1555 | int node; | ||
1556 | |||
1557 | printk(KERN_WARNING | ||
1558 | "SLUB: Unable to allocate memory on node %d (gfp=0x%x)\n", | ||
1559 | nid, gfpflags); | ||
1560 | printk(KERN_WARNING " cache: %s, object size: %d, buffer size: %d, " | ||
1561 | "default order: %d, min order: %d\n", s->name, s->objsize, | ||
1562 | s->size, oo_order(s->oo), oo_order(s->min)); | ||
1563 | |||
1564 | for_each_online_node(node) { | ||
1565 | struct kmem_cache_node *n = get_node(s, node); | ||
1566 | unsigned long nr_slabs; | ||
1567 | unsigned long nr_objs; | ||
1568 | unsigned long nr_free; | ||
1569 | |||
1570 | if (!n) | ||
1571 | continue; | ||
1572 | |||
1573 | nr_free = count_partial(n, count_free); | ||
1574 | nr_slabs = node_nr_slabs(n); | ||
1575 | nr_objs = node_nr_objs(n); | ||
1576 | |||
1577 | printk(KERN_WARNING | ||
1578 | " node %d: slabs: %ld, objs: %ld, free: %ld\n", | ||
1579 | node, nr_slabs, nr_objs, nr_free); | ||
1580 | } | ||
1581 | } | ||
1582 | |||
1517 | /* | 1583 | /* |
1518 | * Slow path. The lockless freelist is empty or we need to perform | 1584 | * Slow path. The lockless freelist is empty or we need to perform |
1519 | * debugging duties. | 1585 | * debugging duties. |
@@ -1595,6 +1661,8 @@ new_slab: | |||
1595 | c->page = new; | 1661 | c->page = new; |
1596 | goto load_freelist; | 1662 | goto load_freelist; |
1597 | } | 1663 | } |
1664 | if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit()) | ||
1665 | slab_out_of_memory(s, gfpflags, node); | ||
1598 | return NULL; | 1666 | return NULL; |
1599 | debug: | 1667 | debug: |
1600 | if (!alloc_debug_processing(s, c->page, object, addr)) | 1668 | if (!alloc_debug_processing(s, c->page, object, addr)) |
@@ -2636,6 +2704,7 @@ static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags) | |||
2636 | struct kmem_cache *s; | 2704 | struct kmem_cache *s; |
2637 | char *text; | 2705 | char *text; |
2638 | size_t realsize; | 2706 | size_t realsize; |
2707 | unsigned long slabflags; | ||
2639 | 2708 | ||
2640 | s = kmalloc_caches_dma[index]; | 2709 | s = kmalloc_caches_dma[index]; |
2641 | if (s) | 2710 | if (s) |
@@ -2657,10 +2726,18 @@ static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags) | |||
2657 | (unsigned int)realsize); | 2726 | (unsigned int)realsize); |
2658 | s = kmalloc(kmem_size, flags & ~SLUB_DMA); | 2727 | s = kmalloc(kmem_size, flags & ~SLUB_DMA); |
2659 | 2728 | ||
2729 | /* | ||
2730 | * Must defer sysfs creation to a workqueue because we don't know | ||
2731 | * what context we are called from. Before sysfs comes up, we don't | ||
2732 | * need to do anything because our sysfs initcall will start by | ||
2733 | * adding all existing slabs to sysfs. | ||
2734 | */ | ||
2735 | slabflags = SLAB_CACHE_DMA|SLAB_NOTRACK; | ||
2736 | if (slab_state >= SYSFS) | ||
2737 | slabflags |= __SYSFS_ADD_DEFERRED; | ||
2738 | |||
2660 | if (!s || !text || !kmem_cache_open(s, flags, text, | 2739 | if (!s || !text || !kmem_cache_open(s, flags, text, |
2661 | realsize, ARCH_KMALLOC_MINALIGN, | 2740 | realsize, ARCH_KMALLOC_MINALIGN, slabflags, NULL)) { |
2662 | SLAB_CACHE_DMA|SLAB_NOTRACK|__SYSFS_ADD_DEFERRED, | ||
2663 | NULL)) { | ||
2664 | kfree(s); | 2741 | kfree(s); |
2665 | kfree(text); | 2742 | kfree(text); |
2666 | goto unlock_out; | 2743 | goto unlock_out; |
@@ -2669,7 +2746,8 @@ static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags) | |||
2669 | list_add(&s->list, &slab_caches); | 2746 | list_add(&s->list, &slab_caches); |
2670 | kmalloc_caches_dma[index] = s; | 2747 | kmalloc_caches_dma[index] = s; |
2671 | 2748 | ||
2672 | schedule_work(&sysfs_add_work); | 2749 | if (slab_state >= SYSFS) |
2750 | schedule_work(&sysfs_add_work); | ||
2673 | 2751 | ||
2674 | unlock_out: | 2752 | unlock_out: |
2675 | up_write(&slub_lock); | 2753 | up_write(&slub_lock); |
@@ -3368,20 +3446,6 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, | |||
3368 | } | 3446 | } |
3369 | 3447 | ||
3370 | #ifdef CONFIG_SLUB_DEBUG | 3448 | #ifdef CONFIG_SLUB_DEBUG |
3371 | static unsigned long count_partial(struct kmem_cache_node *n, | ||
3372 | int (*get_count)(struct page *)) | ||
3373 | { | ||
3374 | unsigned long flags; | ||
3375 | unsigned long x = 0; | ||
3376 | struct page *page; | ||
3377 | |||
3378 | spin_lock_irqsave(&n->list_lock, flags); | ||
3379 | list_for_each_entry(page, &n->partial, lru) | ||
3380 | x += get_count(page); | ||
3381 | spin_unlock_irqrestore(&n->list_lock, flags); | ||
3382 | return x; | ||
3383 | } | ||
3384 | |||
3385 | static int count_inuse(struct page *page) | 3449 | static int count_inuse(struct page *page) |
3386 | { | 3450 | { |
3387 | return page->inuse; | 3451 | return page->inuse; |
@@ -3392,11 +3456,6 @@ static int count_total(struct page *page) | |||
3392 | return page->objects; | 3456 | return page->objects; |
3393 | } | 3457 | } |
3394 | 3458 | ||
3395 | static int count_free(struct page *page) | ||
3396 | { | ||
3397 | return page->objects - page->inuse; | ||
3398 | } | ||
3399 | |||
3400 | static int validate_slab(struct kmem_cache *s, struct page *page, | 3459 | static int validate_slab(struct kmem_cache *s, struct page *page, |
3401 | unsigned long *map) | 3460 | unsigned long *map) |
3402 | { | 3461 | { |