diff options
| -rw-r--r-- | include/linux/mm_types.h | 21 | ||||
| -rw-r--r-- | include/linux/slab_def.h | 4 | ||||
| -rw-r--r-- | mm/slab.c | 306 |
3 files changed, 158 insertions, 173 deletions
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 959cb369b197..95bf0c5a7eb9 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h | |||
| @@ -42,18 +42,22 @@ struct page { | |||
| 42 | /* First double word block */ | 42 | /* First double word block */ |
| 43 | unsigned long flags; /* Atomic flags, some possibly | 43 | unsigned long flags; /* Atomic flags, some possibly |
| 44 | * updated asynchronously */ | 44 | * updated asynchronously */ |
| 45 | struct address_space *mapping; /* If low bit clear, points to | 45 | union { |
| 46 | * inode address_space, or NULL. | 46 | struct address_space *mapping; /* If low bit clear, points to |
| 47 | * If page mapped as anonymous | 47 | * inode address_space, or NULL. |
| 48 | * memory, low bit is set, and | 48 | * If page mapped as anonymous |
| 49 | * it points to anon_vma object: | 49 | * memory, low bit is set, and |
| 50 | * see PAGE_MAPPING_ANON below. | 50 | * it points to anon_vma object: |
| 51 | */ | 51 | * see PAGE_MAPPING_ANON below. |
| 52 | */ | ||
| 53 | void *s_mem; /* slab first object */ | ||
| 54 | }; | ||
| 55 | |||
| 52 | /* Second double word */ | 56 | /* Second double word */ |
| 53 | struct { | 57 | struct { |
| 54 | union { | 58 | union { |
| 55 | pgoff_t index; /* Our offset within mapping. */ | 59 | pgoff_t index; /* Our offset within mapping. */ |
| 56 | void *freelist; /* slub/slob first free object */ | 60 | void *freelist; /* sl[aou]b first free object */ |
| 57 | bool pfmemalloc; /* If set by the page allocator, | 61 | bool pfmemalloc; /* If set by the page allocator, |
| 58 | * ALLOC_NO_WATERMARKS was set | 62 | * ALLOC_NO_WATERMARKS was set |
| 59 | * and the low watermark was not | 63 | * and the low watermark was not |
| @@ -109,6 +113,7 @@ struct page { | |||
| 109 | }; | 113 | }; |
| 110 | atomic_t _count; /* Usage count, see below. */ | 114 | atomic_t _count; /* Usage count, see below. */ |
| 111 | }; | 115 | }; |
| 116 | unsigned int active; /* SLAB */ | ||
| 112 | }; | 117 | }; |
| 113 | }; | 118 | }; |
| 114 | 119 | ||
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index cd401580bdd3..ca82e8ff89fa 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h | |||
| @@ -41,8 +41,8 @@ struct kmem_cache { | |||
| 41 | 41 | ||
| 42 | size_t colour; /* cache colouring range */ | 42 | size_t colour; /* cache colouring range */ |
| 43 | unsigned int colour_off; /* colour offset */ | 43 | unsigned int colour_off; /* colour offset */ |
| 44 | struct kmem_cache *slabp_cache; | 44 | struct kmem_cache *freelist_cache; |
| 45 | unsigned int slab_size; | 45 | unsigned int freelist_size; |
| 46 | 46 | ||
| 47 | /* constructor func */ | 47 | /* constructor func */ |
| 48 | void (*ctor)(void *obj); | 48 | void (*ctor)(void *obj); |
| @@ -164,21 +164,6 @@ | |||
| 164 | static bool pfmemalloc_active __read_mostly; | 164 | static bool pfmemalloc_active __read_mostly; |
| 165 | 165 | ||
| 166 | /* | 166 | /* |
| 167 | * struct slab | ||
| 168 | * | ||
| 169 | * Manages the objs in a slab. Placed either at the beginning of mem allocated | ||
| 170 | * for a slab, or allocated from an general cache. | ||
| 171 | * Slabs are chained into three list: fully used, partial, fully free slabs. | ||
| 172 | */ | ||
| 173 | struct slab { | ||
| 174 | struct { | ||
| 175 | struct list_head list; | ||
| 176 | void *s_mem; /* including colour offset */ | ||
| 177 | unsigned int active; /* num of objs active in slab */ | ||
| 178 | }; | ||
| 179 | }; | ||
| 180 | |||
| 181 | /* | ||
| 182 | * struct array_cache | 167 | * struct array_cache |
| 183 | * | 168 | * |
| 184 | * Purpose: | 169 | * Purpose: |
| @@ -405,18 +390,10 @@ static inline struct kmem_cache *virt_to_cache(const void *obj) | |||
| 405 | return page->slab_cache; | 390 | return page->slab_cache; |
| 406 | } | 391 | } |
| 407 | 392 | ||
| 408 | static inline struct slab *virt_to_slab(const void *obj) | 393 | static inline void *index_to_obj(struct kmem_cache *cache, struct page *page, |
| 409 | { | ||
| 410 | struct page *page = virt_to_head_page(obj); | ||
| 411 | |||
| 412 | VM_BUG_ON(!PageSlab(page)); | ||
| 413 | return page->slab_page; | ||
| 414 | } | ||
| 415 | |||
| 416 | static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab, | ||
| 417 | unsigned int idx) | 394 | unsigned int idx) |
| 418 | { | 395 | { |
| 419 | return slab->s_mem + cache->size * idx; | 396 | return page->s_mem + cache->size * idx; |
| 420 | } | 397 | } |
| 421 | 398 | ||
| 422 | /* | 399 | /* |
| @@ -426,9 +403,9 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab, | |||
| 426 | * reciprocal_divide(offset, cache->reciprocal_buffer_size) | 403 | * reciprocal_divide(offset, cache->reciprocal_buffer_size) |
| 427 | */ | 404 | */ |
| 428 | static inline unsigned int obj_to_index(const struct kmem_cache *cache, | 405 | static inline unsigned int obj_to_index(const struct kmem_cache *cache, |
| 429 | const struct slab *slab, void *obj) | 406 | const struct page *page, void *obj) |
| 430 | { | 407 | { |
| 431 | u32 offset = (obj - slab->s_mem); | 408 | u32 offset = (obj - page->s_mem); |
| 432 | return reciprocal_divide(offset, cache->reciprocal_buffer_size); | 409 | return reciprocal_divide(offset, cache->reciprocal_buffer_size); |
| 433 | } | 410 | } |
| 434 | 411 | ||
| @@ -590,7 +567,7 @@ static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) | |||
| 590 | 567 | ||
| 591 | static size_t slab_mgmt_size(size_t nr_objs, size_t align) | 568 | static size_t slab_mgmt_size(size_t nr_objs, size_t align) |
| 592 | { | 569 | { |
| 593 | return ALIGN(sizeof(struct slab)+nr_objs*sizeof(unsigned int), align); | 570 | return ALIGN(nr_objs * sizeof(unsigned int), align); |
| 594 | } | 571 | } |
| 595 | 572 | ||
| 596 | /* | 573 | /* |
| @@ -609,7 +586,6 @@ static void cache_estimate(unsigned long gfporder, size_t buffer_size, | |||
| 609 | * on it. For the latter case, the memory allocated for a | 586 | * on it. For the latter case, the memory allocated for a |
| 610 | * slab is used for: | 587 | * slab is used for: |
| 611 | * | 588 | * |
| 612 | * - The struct slab | ||
| 613 | * - One unsigned int for each object | 589 | * - One unsigned int for each object |
| 614 | * - Padding to respect alignment of @align | 590 | * - Padding to respect alignment of @align |
| 615 | * - @buffer_size bytes for each object | 591 | * - @buffer_size bytes for each object |
| @@ -632,8 +608,7 @@ static void cache_estimate(unsigned long gfporder, size_t buffer_size, | |||
| 632 | * into the memory allocation when taking the padding | 608 | * into the memory allocation when taking the padding |
| 633 | * into account. | 609 | * into account. |
| 634 | */ | 610 | */ |
| 635 | nr_objs = (slab_size - sizeof(struct slab)) / | 611 | nr_objs = (slab_size) / (buffer_size + sizeof(unsigned int)); |
| 636 | (buffer_size + sizeof(unsigned int)); | ||
| 637 | 612 | ||
| 638 | /* | 613 | /* |
| 639 | * This calculated number will be either the right | 614 | * This calculated number will be either the right |
| @@ -773,11 +748,11 @@ static struct array_cache *alloc_arraycache(int node, int entries, | |||
| 773 | return nc; | 748 | return nc; |
| 774 | } | 749 | } |
| 775 | 750 | ||
| 776 | static inline bool is_slab_pfmemalloc(struct slab *slabp) | 751 | static inline bool is_slab_pfmemalloc(struct page *page) |
| 777 | { | 752 | { |
| 778 | struct page *page = virt_to_page(slabp->s_mem); | 753 | struct page *mem_page = virt_to_page(page->s_mem); |
| 779 | 754 | ||
| 780 | return PageSlabPfmemalloc(page); | 755 | return PageSlabPfmemalloc(mem_page); |
| 781 | } | 756 | } |
| 782 | 757 | ||
| 783 | /* Clears pfmemalloc_active if no slabs have pfmalloc set */ | 758 | /* Clears pfmemalloc_active if no slabs have pfmalloc set */ |
| @@ -785,23 +760,23 @@ static void recheck_pfmemalloc_active(struct kmem_cache *cachep, | |||
| 785 | struct array_cache *ac) | 760 | struct array_cache *ac) |
| 786 | { | 761 | { |
| 787 | struct kmem_cache_node *n = cachep->node[numa_mem_id()]; | 762 | struct kmem_cache_node *n = cachep->node[numa_mem_id()]; |
| 788 | struct slab *slabp; | 763 | struct page *page; |
| 789 | unsigned long flags; | 764 | unsigned long flags; |
| 790 | 765 | ||
| 791 | if (!pfmemalloc_active) | 766 | if (!pfmemalloc_active) |
| 792 | return; | 767 | return; |
| 793 | 768 | ||
| 794 | spin_lock_irqsave(&n->list_lock, flags); | 769 | spin_lock_irqsave(&n->list_lock, flags); |
| 795 | list_for_each_entry(slabp, &n->slabs_full, list) | 770 | list_for_each_entry(page, &n->slabs_full, lru) |
| 796 | if (is_slab_pfmemalloc(slabp)) | 771 | if (is_slab_pfmemalloc(page)) |
| 797 | goto out; | 772 | goto out; |
| 798 | 773 | ||
| 799 | list_for_each_entry(slabp, &n->slabs_partial, list) | 774 | list_for_each_entry(page, &n->slabs_partial, lru) |
| 800 | if (is_slab_pfmemalloc(slabp)) | 775 | if (is_slab_pfmemalloc(page)) |
| 801 | goto out; | 776 | goto out; |
| 802 | 777 | ||
| 803 | list_for_each_entry(slabp, &n->slabs_free, list) | 778 | list_for_each_entry(page, &n->slabs_free, lru) |
| 804 | if (is_slab_pfmemalloc(slabp)) | 779 | if (is_slab_pfmemalloc(page)) |
| 805 | goto out; | 780 | goto out; |
| 806 | 781 | ||
| 807 | pfmemalloc_active = false; | 782 | pfmemalloc_active = false; |
| @@ -841,8 +816,8 @@ static void *__ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac, | |||
| 841 | */ | 816 | */ |
| 842 | n = cachep->node[numa_mem_id()]; | 817 | n = cachep->node[numa_mem_id()]; |
| 843 | if (!list_empty(&n->slabs_free) && force_refill) { | 818 | if (!list_empty(&n->slabs_free) && force_refill) { |
| 844 | struct slab *slabp = virt_to_slab(objp); | 819 | struct page *page = virt_to_head_page(objp); |
| 845 | ClearPageSlabPfmemalloc(virt_to_head_page(slabp->s_mem)); | 820 | ClearPageSlabPfmemalloc(virt_to_head_page(page->s_mem)); |
| 846 | clear_obj_pfmemalloc(&objp); | 821 | clear_obj_pfmemalloc(&objp); |
| 847 | recheck_pfmemalloc_active(cachep, ac); | 822 | recheck_pfmemalloc_active(cachep, ac); |
| 848 | return objp; | 823 | return objp; |
| @@ -874,9 +849,9 @@ static void *__ac_put_obj(struct kmem_cache *cachep, struct array_cache *ac, | |||
| 874 | { | 849 | { |
| 875 | if (unlikely(pfmemalloc_active)) { | 850 | if (unlikely(pfmemalloc_active)) { |
| 876 | /* Some pfmemalloc slabs exist, check if this is one */ | 851 | /* Some pfmemalloc slabs exist, check if this is one */ |
| 877 | struct slab *slabp = virt_to_slab(objp); | 852 | struct page *page = virt_to_head_page(objp); |
| 878 | struct page *page = virt_to_head_page(slabp->s_mem); | 853 | struct page *mem_page = virt_to_head_page(page->s_mem); |
| 879 | if (PageSlabPfmemalloc(page)) | 854 | if (PageSlabPfmemalloc(mem_page)) |
| 880 | set_obj_pfmemalloc(&objp); | 855 | set_obj_pfmemalloc(&objp); |
| 881 | } | 856 | } |
| 882 | 857 | ||
| @@ -1633,7 +1608,7 @@ static noinline void | |||
| 1633 | slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid) | 1608 | slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid) |
| 1634 | { | 1609 | { |
| 1635 | struct kmem_cache_node *n; | 1610 | struct kmem_cache_node *n; |
| 1636 | struct slab *slabp; | 1611 | struct page *page; |
| 1637 | unsigned long flags; | 1612 | unsigned long flags; |
| 1638 | int node; | 1613 | int node; |
| 1639 | 1614 | ||
| @@ -1652,15 +1627,15 @@ slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid) | |||
| 1652 | continue; | 1627 | continue; |
| 1653 | 1628 | ||
| 1654 | spin_lock_irqsave(&n->list_lock, flags); | 1629 | spin_lock_irqsave(&n->list_lock, flags); |
| 1655 | list_for_each_entry(slabp, &n->slabs_full, list) { | 1630 | list_for_each_entry(page, &n->slabs_full, lru) { |
| 1656 | active_objs += cachep->num; | 1631 | active_objs += cachep->num; |
| 1657 | active_slabs++; | 1632 | active_slabs++; |
| 1658 | } | 1633 | } |
| 1659 | list_for_each_entry(slabp, &n->slabs_partial, list) { | 1634 | list_for_each_entry(page, &n->slabs_partial, lru) { |
| 1660 | active_objs += slabp->active; | 1635 | active_objs += page->active; |
| 1661 | active_slabs++; | 1636 | active_slabs++; |
| 1662 | } | 1637 | } |
| 1663 | list_for_each_entry(slabp, &n->slabs_free, list) | 1638 | list_for_each_entry(page, &n->slabs_free, lru) |
| 1664 | num_slabs++; | 1639 | num_slabs++; |
| 1665 | 1640 | ||
| 1666 | free_objects += n->free_objects; | 1641 | free_objects += n->free_objects; |
| @@ -1746,6 +1721,8 @@ static void kmem_freepages(struct kmem_cache *cachep, struct page *page) | |||
| 1746 | BUG_ON(!PageSlab(page)); | 1721 | BUG_ON(!PageSlab(page)); |
| 1747 | __ClearPageSlabPfmemalloc(page); | 1722 | __ClearPageSlabPfmemalloc(page); |
| 1748 | __ClearPageSlab(page); | 1723 | __ClearPageSlab(page); |
| 1724 | page_mapcount_reset(page); | ||
| 1725 | page->mapping = NULL; | ||
| 1749 | 1726 | ||
| 1750 | memcg_release_pages(cachep, cachep->gfporder); | 1727 | memcg_release_pages(cachep, cachep->gfporder); |
| 1751 | if (current->reclaim_state) | 1728 | if (current->reclaim_state) |
| @@ -1910,19 +1887,19 @@ static void check_poison_obj(struct kmem_cache *cachep, void *objp) | |||
| 1910 | /* Print some data about the neighboring objects, if they | 1887 | /* Print some data about the neighboring objects, if they |
| 1911 | * exist: | 1888 | * exist: |
| 1912 | */ | 1889 | */ |
| 1913 | struct slab *slabp = virt_to_slab(objp); | 1890 | struct page *page = virt_to_head_page(objp); |
| 1914 | unsigned int objnr; | 1891 | unsigned int objnr; |
| 1915 | 1892 | ||
| 1916 | objnr = obj_to_index(cachep, slabp, objp); | 1893 | objnr = obj_to_index(cachep, page, objp); |
| 1917 | if (objnr) { | 1894 | if (objnr) { |
| 1918 | objp = index_to_obj(cachep, slabp, objnr - 1); | 1895 | objp = index_to_obj(cachep, page, objnr - 1); |
| 1919 | realobj = (char *)objp + obj_offset(cachep); | 1896 | realobj = (char *)objp + obj_offset(cachep); |
| 1920 | printk(KERN_ERR "Prev obj: start=%p, len=%d\n", | 1897 | printk(KERN_ERR "Prev obj: start=%p, len=%d\n", |
| 1921 | realobj, size); | 1898 | realobj, size); |
| 1922 | print_objinfo(cachep, objp, 2); | 1899 | print_objinfo(cachep, objp, 2); |
| 1923 | } | 1900 | } |
| 1924 | if (objnr + 1 < cachep->num) { | 1901 | if (objnr + 1 < cachep->num) { |
| 1925 | objp = index_to_obj(cachep, slabp, objnr + 1); | 1902 | objp = index_to_obj(cachep, page, objnr + 1); |
| 1926 | realobj = (char *)objp + obj_offset(cachep); | 1903 | realobj = (char *)objp + obj_offset(cachep); |
| 1927 | printk(KERN_ERR "Next obj: start=%p, len=%d\n", | 1904 | printk(KERN_ERR "Next obj: start=%p, len=%d\n", |
| 1928 | realobj, size); | 1905 | realobj, size); |
| @@ -1933,11 +1910,12 @@ static void check_poison_obj(struct kmem_cache *cachep, void *objp) | |||
| 1933 | #endif | 1910 | #endif |
| 1934 | 1911 | ||
| 1935 | #if DEBUG | 1912 | #if DEBUG |
| 1936 | static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slabp) | 1913 | static void slab_destroy_debugcheck(struct kmem_cache *cachep, |
| 1914 | struct page *page) | ||
| 1937 | { | 1915 | { |
| 1938 | int i; | 1916 | int i; |
| 1939 | for (i = 0; i < cachep->num; i++) { | 1917 | for (i = 0; i < cachep->num; i++) { |
| 1940 | void *objp = index_to_obj(cachep, slabp, i); | 1918 | void *objp = index_to_obj(cachep, page, i); |
| 1941 | 1919 | ||
| 1942 | if (cachep->flags & SLAB_POISON) { | 1920 | if (cachep->flags & SLAB_POISON) { |
| 1943 | #ifdef CONFIG_DEBUG_PAGEALLOC | 1921 | #ifdef CONFIG_DEBUG_PAGEALLOC |
| @@ -1962,7 +1940,8 @@ static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slab | |||
| 1962 | } | 1940 | } |
| 1963 | } | 1941 | } |
| 1964 | #else | 1942 | #else |
| 1965 | static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slabp) | 1943 | static void slab_destroy_debugcheck(struct kmem_cache *cachep, |
| 1944 | struct page *page) | ||
| 1966 | { | 1945 | { |
| 1967 | } | 1946 | } |
| 1968 | #endif | 1947 | #endif |
| @@ -1976,11 +1955,12 @@ static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slab | |||
| 1976 | * Before calling the slab must have been unlinked from the cache. The | 1955 | * Before calling the slab must have been unlinked from the cache. The |
| 1977 | * cache-lock is not held/needed. | 1956 | * cache-lock is not held/needed. |
| 1978 | */ | 1957 | */ |
| 1979 | static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp) | 1958 | static void slab_destroy(struct kmem_cache *cachep, struct page *page) |
| 1980 | { | 1959 | { |
| 1981 | struct page *page = virt_to_head_page(slabp->s_mem); | 1960 | struct freelist *freelist; |
| 1982 | 1961 | ||
| 1983 | slab_destroy_debugcheck(cachep, slabp); | 1962 | freelist = page->freelist; |
| 1963 | slab_destroy_debugcheck(cachep, page); | ||
| 1984 | if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) { | 1964 | if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) { |
| 1985 | struct rcu_head *head; | 1965 | struct rcu_head *head; |
| 1986 | 1966 | ||
| @@ -1998,11 +1978,11 @@ static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp) | |||
| 1998 | } | 1978 | } |
| 1999 | 1979 | ||
| 2000 | /* | 1980 | /* |
| 2001 | * From now on, we don't use slab management | 1981 | * From now on, we don't use freelist |
| 2002 | * although actual page can be freed in rcu context | 1982 | * although actual page can be freed in rcu context |
| 2003 | */ | 1983 | */ |
| 2004 | if (OFF_SLAB(cachep)) | 1984 | if (OFF_SLAB(cachep)) |
| 2005 | kmem_cache_free(cachep->slabp_cache, slabp); | 1985 | kmem_cache_free(cachep->freelist_cache, freelist); |
| 2006 | } | 1986 | } |
| 2007 | 1987 | ||
| 2008 | /** | 1988 | /** |
| @@ -2039,7 +2019,7 @@ static size_t calculate_slab_order(struct kmem_cache *cachep, | |||
| 2039 | * use off-slab slabs. Needed to avoid a possible | 2019 | * use off-slab slabs. Needed to avoid a possible |
| 2040 | * looping condition in cache_grow(). | 2020 | * looping condition in cache_grow(). |
| 2041 | */ | 2021 | */ |
| 2042 | offslab_limit = size - sizeof(struct slab); | 2022 | offslab_limit = size; |
| 2043 | offslab_limit /= sizeof(unsigned int); | 2023 | offslab_limit /= sizeof(unsigned int); |
| 2044 | 2024 | ||
| 2045 | if (num > offslab_limit) | 2025 | if (num > offslab_limit) |
| @@ -2162,7 +2142,7 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) | |||
| 2162 | int | 2142 | int |
| 2163 | __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) | 2143 | __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) |
| 2164 | { | 2144 | { |
| 2165 | size_t left_over, slab_size, ralign; | 2145 | size_t left_over, freelist_size, ralign; |
| 2166 | gfp_t gfp; | 2146 | gfp_t gfp; |
| 2167 | int err; | 2147 | int err; |
| 2168 | size_t size = cachep->size; | 2148 | size_t size = cachep->size; |
| @@ -2281,22 +2261,21 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) | |||
| 2281 | if (!cachep->num) | 2261 | if (!cachep->num) |
| 2282 | return -E2BIG; | 2262 | return -E2BIG; |
| 2283 | 2263 | ||
| 2284 | slab_size = ALIGN(cachep->num * sizeof(unsigned int) | 2264 | freelist_size = |
| 2285 | + sizeof(struct slab), cachep->align); | 2265 | ALIGN(cachep->num * sizeof(unsigned int), cachep->align); |
| 2286 | 2266 | ||
| 2287 | /* | 2267 | /* |
| 2288 | * If the slab has been placed off-slab, and we have enough space then | 2268 | * If the slab has been placed off-slab, and we have enough space then |
| 2289 | * move it on-slab. This is at the expense of any extra colouring. | 2269 | * move it on-slab. This is at the expense of any extra colouring. |
| 2290 | */ | 2270 | */ |
| 2291 | if (flags & CFLGS_OFF_SLAB && left_over >= slab_size) { | 2271 | if (flags & CFLGS_OFF_SLAB && left_over >= freelist_size) { |
| 2292 | flags &= ~CFLGS_OFF_SLAB; | 2272 | flags &= ~CFLGS_OFF_SLAB; |
| 2293 | left_over -= slab_size; | 2273 | left_over -= freelist_size; |
| 2294 | } | 2274 | } |
| 2295 | 2275 | ||
| 2296 | if (flags & CFLGS_OFF_SLAB) { | 2276 | if (flags & CFLGS_OFF_SLAB) { |
| 2297 | /* really off slab. No need for manual alignment */ | 2277 | /* really off slab. No need for manual alignment */ |
| 2298 | slab_size = | 2278 | freelist_size = cachep->num * sizeof(unsigned int); |
| 2299 | cachep->num * sizeof(unsigned int) + sizeof(struct slab); | ||
| 2300 | 2279 | ||
| 2301 | #ifdef CONFIG_PAGE_POISONING | 2280 | #ifdef CONFIG_PAGE_POISONING |
| 2302 | /* If we're going to use the generic kernel_map_pages() | 2281 | /* If we're going to use the generic kernel_map_pages() |
| @@ -2313,7 +2292,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) | |||
| 2313 | if (cachep->colour_off < cachep->align) | 2292 | if (cachep->colour_off < cachep->align) |
| 2314 | cachep->colour_off = cachep->align; | 2293 | cachep->colour_off = cachep->align; |
| 2315 | cachep->colour = left_over / cachep->colour_off; | 2294 | cachep->colour = left_over / cachep->colour_off; |
| 2316 | cachep->slab_size = slab_size; | 2295 | cachep->freelist_size = freelist_size; |
| 2317 | cachep->flags = flags; | 2296 | cachep->flags = flags; |
| 2318 | cachep->allocflags = __GFP_COMP; | 2297 | cachep->allocflags = __GFP_COMP; |
| 2319 | if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA)) | 2298 | if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA)) |
| @@ -2322,7 +2301,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) | |||
| 2322 | cachep->reciprocal_buffer_size = reciprocal_value(size); | 2301 | cachep->reciprocal_buffer_size = reciprocal_value(size); |
| 2323 | 2302 | ||
| 2324 | if (flags & CFLGS_OFF_SLAB) { | 2303 | if (flags & CFLGS_OFF_SLAB) { |
| 2325 | cachep->slabp_cache = kmalloc_slab(slab_size, 0u); | 2304 | cachep->freelist_cache = kmalloc_slab(freelist_size, 0u); |
| 2326 | /* | 2305 | /* |
| 2327 | * This is a possibility for one of the malloc_sizes caches. | 2306 | * This is a possibility for one of the malloc_sizes caches. |
| 2328 | * But since we go off slab only for object size greater than | 2307 | * But since we go off slab only for object size greater than |
| @@ -2330,7 +2309,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) | |||
| 2330 | * this should not happen at all. | 2309 | * this should not happen at all. |
| 2331 | * But leave a BUG_ON for some lucky dude. | 2310 | * But leave a BUG_ON for some lucky dude. |
| 2332 | */ | 2311 | */ |
| 2333 | BUG_ON(ZERO_OR_NULL_PTR(cachep->slabp_cache)); | 2312 | BUG_ON(ZERO_OR_NULL_PTR(cachep->freelist_cache)); |
| 2334 | } | 2313 | } |
| 2335 | 2314 | ||
| 2336 | err = setup_cpu_cache(cachep, gfp); | 2315 | err = setup_cpu_cache(cachep, gfp); |
| @@ -2436,7 +2415,7 @@ static int drain_freelist(struct kmem_cache *cache, | |||
| 2436 | { | 2415 | { |
| 2437 | struct list_head *p; | 2416 | struct list_head *p; |
| 2438 | int nr_freed; | 2417 | int nr_freed; |
| 2439 | struct slab *slabp; | 2418 | struct page *page; |
| 2440 | 2419 | ||
| 2441 | nr_freed = 0; | 2420 | nr_freed = 0; |
| 2442 | while (nr_freed < tofree && !list_empty(&n->slabs_free)) { | 2421 | while (nr_freed < tofree && !list_empty(&n->slabs_free)) { |
| @@ -2448,18 +2427,18 @@ static int drain_freelist(struct kmem_cache *cache, | |||
| 2448 | goto out; | 2427 | goto out; |
| 2449 | } | 2428 | } |
| 2450 | 2429 | ||
| 2451 | slabp = list_entry(p, struct slab, list); | 2430 | page = list_entry(p, struct page, lru); |
| 2452 | #if DEBUG | 2431 | #if DEBUG |
| 2453 | BUG_ON(slabp->active); | 2432 | BUG_ON(page->active); |
| 2454 | #endif | 2433 | #endif |
| 2455 | list_del(&slabp->list); | 2434 | list_del(&page->lru); |
| 2456 | /* | 2435 | /* |
| 2457 | * Safe to drop the lock. The slab is no longer linked | 2436 | * Safe to drop the lock. The slab is no longer linked |
| 2458 | * to the cache. | 2437 | * to the cache. |
| 2459 | */ | 2438 | */ |
| 2460 | n->free_objects -= cache->num; | 2439 | n->free_objects -= cache->num; |
| 2461 | spin_unlock_irq(&n->list_lock); | 2440 | spin_unlock_irq(&n->list_lock); |
| 2462 | slab_destroy(cache, slabp); | 2441 | slab_destroy(cache, page); |
| 2463 | nr_freed++; | 2442 | nr_freed++; |
| 2464 | } | 2443 | } |
| 2465 | out: | 2444 | out: |
| @@ -2542,18 +2521,18 @@ int __kmem_cache_shutdown(struct kmem_cache *cachep) | |||
| 2542 | * descriptors in kmem_cache_create, we search through the malloc_sizes array. | 2521 | * descriptors in kmem_cache_create, we search through the malloc_sizes array. |
| 2543 | * If we are creating a malloc_sizes cache here it would not be visible to | 2522 | * If we are creating a malloc_sizes cache here it would not be visible to |
| 2544 | * kmem_find_general_cachep till the initialization is complete. | 2523 | * kmem_find_general_cachep till the initialization is complete. |
| 2545 | * Hence we cannot have slabp_cache same as the original cache. | 2524 | * Hence we cannot have freelist_cache same as the original cache. |
| 2546 | */ | 2525 | */ |
| 2547 | static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, | 2526 | static struct freelist *alloc_slabmgmt(struct kmem_cache *cachep, |
| 2548 | struct page *page, int colour_off, | 2527 | struct page *page, int colour_off, |
| 2549 | gfp_t local_flags, int nodeid) | 2528 | gfp_t local_flags, int nodeid) |
| 2550 | { | 2529 | { |
| 2551 | struct slab *slabp; | 2530 | struct freelist *freelist; |
| 2552 | void *addr = page_address(page); | 2531 | void *addr = page_address(page); |
| 2553 | 2532 | ||
| 2554 | if (OFF_SLAB(cachep)) { | 2533 | if (OFF_SLAB(cachep)) { |
| 2555 | /* Slab management obj is off-slab. */ | 2534 | /* Slab management obj is off-slab. */ |
| 2556 | slabp = kmem_cache_alloc_node(cachep->slabp_cache, | 2535 | freelist = kmem_cache_alloc_node(cachep->freelist_cache, |
| 2557 | local_flags, nodeid); | 2536 | local_flags, nodeid); |
| 2558 | /* | 2537 | /* |
| 2559 | * If the first object in the slab is leaked (it's allocated | 2538 | * If the first object in the slab is leaked (it's allocated |
| @@ -2561,31 +2540,31 @@ static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, | |||
| 2561 | * kmemleak does not treat the ->s_mem pointer as a reference | 2540 | * kmemleak does not treat the ->s_mem pointer as a reference |
| 2562 | * to the object. Otherwise we will not report the leak. | 2541 | * to the object. Otherwise we will not report the leak. |
| 2563 | */ | 2542 | */ |
| 2564 | kmemleak_scan_area(&slabp->list, sizeof(struct list_head), | 2543 | kmemleak_scan_area(&page->lru, sizeof(struct list_head), |
| 2565 | local_flags); | 2544 | local_flags); |
| 2566 | if (!slabp) | 2545 | if (!freelist) |
| 2567 | return NULL; | 2546 | return NULL; |
| 2568 | } else { | 2547 | } else { |
| 2569 | slabp = addr + colour_off; | 2548 | freelist = addr + colour_off; |
| 2570 | colour_off += cachep->slab_size; | 2549 | colour_off += cachep->freelist_size; |
| 2571 | } | 2550 | } |
| 2572 | slabp->active = 0; | 2551 | page->active = 0; |
| 2573 | slabp->s_mem = addr + colour_off; | 2552 | page->s_mem = addr + colour_off; |
| 2574 | return slabp; | 2553 | return freelist; |
| 2575 | } | 2554 | } |
| 2576 | 2555 | ||
| 2577 | static inline unsigned int *slab_bufctl(struct slab *slabp) | 2556 | static inline unsigned int *slab_bufctl(struct page *page) |
| 2578 | { | 2557 | { |
| 2579 | return (unsigned int *) (slabp + 1); | 2558 | return (unsigned int *)(page->freelist); |
| 2580 | } | 2559 | } |
| 2581 | 2560 | ||
| 2582 | static void cache_init_objs(struct kmem_cache *cachep, | 2561 | static void cache_init_objs(struct kmem_cache *cachep, |
| 2583 | struct slab *slabp) | 2562 | struct page *page) |
| 2584 | { | 2563 | { |
| 2585 | int i; | 2564 | int i; |
| 2586 | 2565 | ||
| 2587 | for (i = 0; i < cachep->num; i++) { | 2566 | for (i = 0; i < cachep->num; i++) { |
| 2588 | void *objp = index_to_obj(cachep, slabp, i); | 2567 | void *objp = index_to_obj(cachep, page, i); |
| 2589 | #if DEBUG | 2568 | #if DEBUG |
| 2590 | /* need to poison the objs? */ | 2569 | /* need to poison the objs? */ |
| 2591 | if (cachep->flags & SLAB_POISON) | 2570 | if (cachep->flags & SLAB_POISON) |
| @@ -2621,7 +2600,7 @@ static void cache_init_objs(struct kmem_cache *cachep, | |||
| 2621 | if (cachep->ctor) | 2600 | if (cachep->ctor) |
| 2622 | cachep->ctor(objp); | 2601 | cachep->ctor(objp); |
| 2623 | #endif | 2602 | #endif |
| 2624 | slab_bufctl(slabp)[i] = i; | 2603 | slab_bufctl(page)[i] = i; |
| 2625 | } | 2604 | } |
| 2626 | } | 2605 | } |
| 2627 | 2606 | ||
| @@ -2635,13 +2614,13 @@ static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags) | |||
| 2635 | } | 2614 | } |
| 2636 | } | 2615 | } |
| 2637 | 2616 | ||
| 2638 | static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp, | 2617 | static void *slab_get_obj(struct kmem_cache *cachep, struct page *page, |
| 2639 | int nodeid) | 2618 | int nodeid) |
| 2640 | { | 2619 | { |
| 2641 | void *objp; | 2620 | void *objp; |
| 2642 | 2621 | ||
| 2643 | objp = index_to_obj(cachep, slabp, slab_bufctl(slabp)[slabp->active]); | 2622 | objp = index_to_obj(cachep, page, slab_bufctl(page)[page->active]); |
| 2644 | slabp->active++; | 2623 | page->active++; |
| 2645 | #if DEBUG | 2624 | #if DEBUG |
| 2646 | WARN_ON(page_to_nid(virt_to_page(objp)) != nodeid); | 2625 | WARN_ON(page_to_nid(virt_to_page(objp)) != nodeid); |
| 2647 | #endif | 2626 | #endif |
| @@ -2649,10 +2628,10 @@ static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp, | |||
| 2649 | return objp; | 2628 | return objp; |
| 2650 | } | 2629 | } |
| 2651 | 2630 | ||
| 2652 | static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp, | 2631 | static void slab_put_obj(struct kmem_cache *cachep, struct page *page, |
| 2653 | void *objp, int nodeid) | 2632 | void *objp, int nodeid) |
| 2654 | { | 2633 | { |
| 2655 | unsigned int objnr = obj_to_index(cachep, slabp, objp); | 2634 | unsigned int objnr = obj_to_index(cachep, page, objp); |
| 2656 | #if DEBUG | 2635 | #if DEBUG |
| 2657 | unsigned int i; | 2636 | unsigned int i; |
| 2658 | 2637 | ||
| @@ -2660,16 +2639,16 @@ static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp, | |||
| 2660 | WARN_ON(page_to_nid(virt_to_page(objp)) != nodeid); | 2639 | WARN_ON(page_to_nid(virt_to_page(objp)) != nodeid); |
| 2661 | 2640 | ||
| 2662 | /* Verify double free bug */ | 2641 | /* Verify double free bug */ |
| 2663 | for (i = slabp->active; i < cachep->num; i++) { | 2642 | for (i = page->active; i < cachep->num; i++) { |
| 2664 | if (slab_bufctl(slabp)[i] == objnr) { | 2643 | if (slab_bufctl(page)[i] == objnr) { |
| 2665 | printk(KERN_ERR "slab: double free detected in cache " | 2644 | printk(KERN_ERR "slab: double free detected in cache " |
| 2666 | "'%s', objp %p\n", cachep->name, objp); | 2645 | "'%s', objp %p\n", cachep->name, objp); |
| 2667 | BUG(); | 2646 | BUG(); |
| 2668 | } | 2647 | } |
| 2669 | } | 2648 | } |
| 2670 | #endif | 2649 | #endif |
| 2671 | slabp->active--; | 2650 | page->active--; |
| 2672 | slab_bufctl(slabp)[slabp->active] = objnr; | 2651 | slab_bufctl(page)[page->active] = objnr; |
| 2673 | } | 2652 | } |
| 2674 | 2653 | ||
| 2675 | /* | 2654 | /* |
| @@ -2677,11 +2656,11 @@ static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp, | |||
| 2677 | * for the slab allocator to be able to lookup the cache and slab of a | 2656 | * for the slab allocator to be able to lookup the cache and slab of a |
| 2678 | * virtual address for kfree, ksize, and slab debugging. | 2657 | * virtual address for kfree, ksize, and slab debugging. |
| 2679 | */ | 2658 | */ |
| 2680 | static void slab_map_pages(struct kmem_cache *cache, struct slab *slab, | 2659 | static void slab_map_pages(struct kmem_cache *cache, struct page *page, |
| 2681 | struct page *page) | 2660 | struct freelist *freelist) |
| 2682 | { | 2661 | { |
| 2683 | page->slab_cache = cache; | 2662 | page->slab_cache = cache; |
| 2684 | page->slab_page = slab; | 2663 | page->freelist = freelist; |
| 2685 | } | 2664 | } |
| 2686 | 2665 | ||
| 2687 | /* | 2666 | /* |
| @@ -2691,7 +2670,7 @@ static void slab_map_pages(struct kmem_cache *cache, struct slab *slab, | |||
| 2691 | static int cache_grow(struct kmem_cache *cachep, | 2670 | static int cache_grow(struct kmem_cache *cachep, |
| 2692 | gfp_t flags, int nodeid, struct page *page) | 2671 | gfp_t flags, int nodeid, struct page *page) |
| 2693 | { | 2672 | { |
| 2694 | struct slab *slabp; | 2673 | struct freelist *freelist; |
| 2695 | size_t offset; | 2674 | size_t offset; |
| 2696 | gfp_t local_flags; | 2675 | gfp_t local_flags; |
| 2697 | struct kmem_cache_node *n; | 2676 | struct kmem_cache_node *n; |
| @@ -2738,14 +2717,14 @@ static int cache_grow(struct kmem_cache *cachep, | |||
| 2738 | goto failed; | 2717 | goto failed; |
| 2739 | 2718 | ||
| 2740 | /* Get slab management. */ | 2719 | /* Get slab management. */ |
| 2741 | slabp = alloc_slabmgmt(cachep, page, offset, | 2720 | freelist = alloc_slabmgmt(cachep, page, offset, |
| 2742 | local_flags & ~GFP_CONSTRAINT_MASK, nodeid); | 2721 | local_flags & ~GFP_CONSTRAINT_MASK, nodeid); |
| 2743 | if (!slabp) | 2722 | if (!freelist) |
| 2744 | goto opps1; | 2723 | goto opps1; |
| 2745 | 2724 | ||
| 2746 | slab_map_pages(cachep, slabp, page); | 2725 | slab_map_pages(cachep, page, freelist); |
| 2747 | 2726 | ||
| 2748 | cache_init_objs(cachep, slabp); | 2727 | cache_init_objs(cachep, page); |
| 2749 | 2728 | ||
| 2750 | if (local_flags & __GFP_WAIT) | 2729 | if (local_flags & __GFP_WAIT) |
| 2751 | local_irq_disable(); | 2730 | local_irq_disable(); |
| @@ -2753,7 +2732,7 @@ static int cache_grow(struct kmem_cache *cachep, | |||
| 2753 | spin_lock(&n->list_lock); | 2732 | spin_lock(&n->list_lock); |
| 2754 | 2733 | ||
| 2755 | /* Make slab active. */ | 2734 | /* Make slab active. */ |
| 2756 | list_add_tail(&slabp->list, &(n->slabs_free)); | 2735 | list_add_tail(&page->lru, &(n->slabs_free)); |
| 2757 | STATS_INC_GROWN(cachep); | 2736 | STATS_INC_GROWN(cachep); |
| 2758 | n->free_objects += cachep->num; | 2737 | n->free_objects += cachep->num; |
| 2759 | spin_unlock(&n->list_lock); | 2738 | spin_unlock(&n->list_lock); |
| @@ -2808,13 +2787,13 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, | |||
| 2808 | unsigned long caller) | 2787 | unsigned long caller) |
| 2809 | { | 2788 | { |
| 2810 | unsigned int objnr; | 2789 | unsigned int objnr; |
| 2811 | struct slab *slabp; | 2790 | struct page *page; |
| 2812 | 2791 | ||
| 2813 | BUG_ON(virt_to_cache(objp) != cachep); | 2792 | BUG_ON(virt_to_cache(objp) != cachep); |
| 2814 | 2793 | ||
| 2815 | objp -= obj_offset(cachep); | 2794 | objp -= obj_offset(cachep); |
| 2816 | kfree_debugcheck(objp); | 2795 | kfree_debugcheck(objp); |
| 2817 | slabp = virt_to_slab(objp); | 2796 | page = virt_to_head_page(objp); |
| 2818 | 2797 | ||
| 2819 | if (cachep->flags & SLAB_RED_ZONE) { | 2798 | if (cachep->flags & SLAB_RED_ZONE) { |
| 2820 | verify_redzone_free(cachep, objp); | 2799 | verify_redzone_free(cachep, objp); |
| @@ -2824,10 +2803,10 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, | |||
| 2824 | if (cachep->flags & SLAB_STORE_USER) | 2803 | if (cachep->flags & SLAB_STORE_USER) |
| 2825 | *dbg_userword(cachep, objp) = (void *)caller; | 2804 | *dbg_userword(cachep, objp) = (void *)caller; |
| 2826 | 2805 | ||
| 2827 | objnr = obj_to_index(cachep, slabp, objp); | 2806 | objnr = obj_to_index(cachep, page, objp); |
| 2828 | 2807 | ||
| 2829 | BUG_ON(objnr >= cachep->num); | 2808 | BUG_ON(objnr >= cachep->num); |
| 2830 | BUG_ON(objp != index_to_obj(cachep, slabp, objnr)); | 2809 | BUG_ON(objp != index_to_obj(cachep, page, objnr)); |
| 2831 | 2810 | ||
| 2832 | if (cachep->flags & SLAB_POISON) { | 2811 | if (cachep->flags & SLAB_POISON) { |
| 2833 | #ifdef CONFIG_DEBUG_PAGEALLOC | 2812 | #ifdef CONFIG_DEBUG_PAGEALLOC |
| @@ -2886,7 +2865,7 @@ retry: | |||
| 2886 | 2865 | ||
| 2887 | while (batchcount > 0) { | 2866 | while (batchcount > 0) { |
| 2888 | struct list_head *entry; | 2867 | struct list_head *entry; |
| 2889 | struct slab *slabp; | 2868 | struct page *page; |
| 2890 | /* Get slab alloc is to come from. */ | 2869 | /* Get slab alloc is to come from. */ |
| 2891 | entry = n->slabs_partial.next; | 2870 | entry = n->slabs_partial.next; |
| 2892 | if (entry == &n->slabs_partial) { | 2871 | if (entry == &n->slabs_partial) { |
| @@ -2896,7 +2875,7 @@ retry: | |||
| 2896 | goto must_grow; | 2875 | goto must_grow; |
| 2897 | } | 2876 | } |
| 2898 | 2877 | ||
| 2899 | slabp = list_entry(entry, struct slab, list); | 2878 | page = list_entry(entry, struct page, lru); |
| 2900 | check_spinlock_acquired(cachep); | 2879 | check_spinlock_acquired(cachep); |
| 2901 | 2880 | ||
| 2902 | /* | 2881 | /* |
| @@ -2904,23 +2883,23 @@ retry: | |||
| 2904 | * there must be at least one object available for | 2883 | * there must be at least one object available for |
| 2905 | * allocation. | 2884 | * allocation. |
| 2906 | */ | 2885 | */ |
| 2907 | BUG_ON(slabp->active >= cachep->num); | 2886 | BUG_ON(page->active >= cachep->num); |
| 2908 | 2887 | ||
| 2909 | while (slabp->active < cachep->num && batchcount--) { | 2888 | while (page->active < cachep->num && batchcount--) { |
| 2910 | STATS_INC_ALLOCED(cachep); | 2889 | STATS_INC_ALLOCED(cachep); |
| 2911 | STATS_INC_ACTIVE(cachep); | 2890 | STATS_INC_ACTIVE(cachep); |
| 2912 | STATS_SET_HIGH(cachep); | 2891 | STATS_SET_HIGH(cachep); |
| 2913 | 2892 | ||
| 2914 | ac_put_obj(cachep, ac, slab_get_obj(cachep, slabp, | 2893 | ac_put_obj(cachep, ac, slab_get_obj(cachep, page, |
| 2915 | node)); | 2894 | node)); |
| 2916 | } | 2895 | } |
| 2917 | 2896 | ||
| 2918 | /* move slabp to correct slabp list: */ | 2897 | /* move slabp to correct slabp list: */ |
| 2919 | list_del(&slabp->list); | 2898 | list_del(&page->lru); |
| 2920 | if (slabp->active == cachep->num) | 2899 | if (page->active == cachep->num) |
| 2921 | list_add(&slabp->list, &n->slabs_full); | 2900 | list_add(&page->list, &n->slabs_full); |
| 2922 | else | 2901 | else |
| 2923 | list_add(&slabp->list, &n->slabs_partial); | 2902 | list_add(&page->list, &n->slabs_partial); |
| 2924 | } | 2903 | } |
| 2925 | 2904 | ||
| 2926 | must_grow: | 2905 | must_grow: |
| @@ -3175,7 +3154,7 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, | |||
| 3175 | int nodeid) | 3154 | int nodeid) |
| 3176 | { | 3155 | { |
| 3177 | struct list_head *entry; | 3156 | struct list_head *entry; |
| 3178 | struct slab *slabp; | 3157 | struct page *page; |
| 3179 | struct kmem_cache_node *n; | 3158 | struct kmem_cache_node *n; |
| 3180 | void *obj; | 3159 | void *obj; |
| 3181 | int x; | 3160 | int x; |
| @@ -3195,24 +3174,24 @@ retry: | |||
| 3195 | goto must_grow; | 3174 | goto must_grow; |
| 3196 | } | 3175 | } |
| 3197 | 3176 | ||
| 3198 | slabp = list_entry(entry, struct slab, list); | 3177 | page = list_entry(entry, struct page, lru); |
| 3199 | check_spinlock_acquired_node(cachep, nodeid); | 3178 | check_spinlock_acquired_node(cachep, nodeid); |
| 3200 | 3179 | ||
| 3201 | STATS_INC_NODEALLOCS(cachep); | 3180 | STATS_INC_NODEALLOCS(cachep); |
| 3202 | STATS_INC_ACTIVE(cachep); | 3181 | STATS_INC_ACTIVE(cachep); |
| 3203 | STATS_SET_HIGH(cachep); | 3182 | STATS_SET_HIGH(cachep); |
| 3204 | 3183 | ||
| 3205 | BUG_ON(slabp->active == cachep->num); | 3184 | BUG_ON(page->active == cachep->num); |
| 3206 | 3185 | ||
| 3207 | obj = slab_get_obj(cachep, slabp, nodeid); | 3186 | obj = slab_get_obj(cachep, page, nodeid); |
| 3208 | n->free_objects--; | 3187 | n->free_objects--; |
| 3209 | /* move slabp to correct slabp list: */ | 3188 | /* move slabp to correct slabp list: */ |
| 3210 | list_del(&slabp->list); | 3189 | list_del(&page->lru); |
| 3211 | 3190 | ||
| 3212 | if (slabp->active == cachep->num) | 3191 | if (page->active == cachep->num) |
| 3213 | list_add(&slabp->list, &n->slabs_full); | 3192 | list_add(&page->lru, &n->slabs_full); |
| 3214 | else | 3193 | else |
| 3215 | list_add(&slabp->list, &n->slabs_partial); | 3194 | list_add(&page->lru, &n->slabs_partial); |
| 3216 | 3195 | ||
| 3217 | spin_unlock(&n->list_lock); | 3196 | spin_unlock(&n->list_lock); |
| 3218 | goto done; | 3197 | goto done; |
| @@ -3362,21 +3341,21 @@ static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects, | |||
| 3362 | 3341 | ||
| 3363 | for (i = 0; i < nr_objects; i++) { | 3342 | for (i = 0; i < nr_objects; i++) { |
| 3364 | void *objp; | 3343 | void *objp; |
| 3365 | struct slab *slabp; | 3344 | struct page *page; |
| 3366 | 3345 | ||
| 3367 | clear_obj_pfmemalloc(&objpp[i]); | 3346 | clear_obj_pfmemalloc(&objpp[i]); |
| 3368 | objp = objpp[i]; | 3347 | objp = objpp[i]; |
| 3369 | 3348 | ||
| 3370 | slabp = virt_to_slab(objp); | 3349 | page = virt_to_head_page(objp); |
| 3371 | n = cachep->node[node]; | 3350 | n = cachep->node[node]; |
| 3372 | list_del(&slabp->list); | 3351 | list_del(&page->lru); |
| 3373 | check_spinlock_acquired_node(cachep, node); | 3352 | check_spinlock_acquired_node(cachep, node); |
| 3374 | slab_put_obj(cachep, slabp, objp, node); | 3353 | slab_put_obj(cachep, page, objp, node); |
| 3375 | STATS_DEC_ACTIVE(cachep); | 3354 | STATS_DEC_ACTIVE(cachep); |
| 3376 | n->free_objects++; | 3355 | n->free_objects++; |
| 3377 | 3356 | ||
| 3378 | /* fixup slab chains */ | 3357 | /* fixup slab chains */ |
| 3379 | if (slabp->active == 0) { | 3358 | if (page->active == 0) { |
| 3380 | if (n->free_objects > n->free_limit) { | 3359 | if (n->free_objects > n->free_limit) { |
| 3381 | n->free_objects -= cachep->num; | 3360 | n->free_objects -= cachep->num; |
| 3382 | /* No need to drop any previously held | 3361 | /* No need to drop any previously held |
| @@ -3385,16 +3364,16 @@ static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects, | |||
| 3385 | * a different cache, refer to comments before | 3364 | * a different cache, refer to comments before |
| 3386 | * alloc_slabmgmt. | 3365 | * alloc_slabmgmt. |
| 3387 | */ | 3366 | */ |
| 3388 | slab_destroy(cachep, slabp); | 3367 | slab_destroy(cachep, page); |
| 3389 | } else { | 3368 | } else { |
| 3390 | list_add(&slabp->list, &n->slabs_free); | 3369 | list_add(&page->lru, &n->slabs_free); |
| 3391 | } | 3370 | } |
| 3392 | } else { | 3371 | } else { |
| 3393 | /* Unconditionally move a slab to the end of the | 3372 | /* Unconditionally move a slab to the end of the |
| 3394 | * partial list on free - maximum time for the | 3373 | * partial list on free - maximum time for the |
| 3395 | * other objects to be freed, too. | 3374 | * other objects to be freed, too. |
| 3396 | */ | 3375 | */ |
| 3397 | list_add_tail(&slabp->list, &n->slabs_partial); | 3376 | list_add_tail(&page->lru, &n->slabs_partial); |
| 3398 | } | 3377 | } |
| 3399 | } | 3378 | } |
| 3400 | } | 3379 | } |
| @@ -3434,10 +3413,10 @@ free_done: | |||
| 3434 | 3413 | ||
| 3435 | p = n->slabs_free.next; | 3414 | p = n->slabs_free.next; |
| 3436 | while (p != &(n->slabs_free)) { | 3415 | while (p != &(n->slabs_free)) { |
| 3437 | struct slab *slabp; | 3416 | struct page *page; |
| 3438 | 3417 | ||
| 3439 | slabp = list_entry(p, struct slab, list); | 3418 | page = list_entry(p, struct page, lru); |
| 3440 | BUG_ON(slabp->active); | 3419 | BUG_ON(page->active); |
| 3441 | 3420 | ||
| 3442 | i++; | 3421 | i++; |
| 3443 | p = p->next; | 3422 | p = p->next; |
| @@ -4041,7 +4020,7 @@ out: | |||
| 4041 | #ifdef CONFIG_SLABINFO | 4020 | #ifdef CONFIG_SLABINFO |
| 4042 | void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo) | 4021 | void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo) |
| 4043 | { | 4022 | { |
| 4044 | struct slab *slabp; | 4023 | struct page *page; |
| 4045 | unsigned long active_objs; | 4024 | unsigned long active_objs; |
| 4046 | unsigned long num_objs; | 4025 | unsigned long num_objs; |
| 4047 | unsigned long active_slabs = 0; | 4026 | unsigned long active_slabs = 0; |
| @@ -4061,22 +4040,22 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo) | |||
| 4061 | check_irq_on(); | 4040 | check_irq_on(); |
| 4062 | spin_lock_irq(&n->list_lock); | 4041 | spin_lock_irq(&n->list_lock); |
| 4063 | 4042 | ||
| 4064 | list_for_each_entry(slabp, &n->slabs_full, list) { | 4043 | list_for_each_entry(page, &n->slabs_full, lru) { |
| 4065 | if (slabp->active != cachep->num && !error) | 4044 | if (page->active != cachep->num && !error) |
| 4066 | error = "slabs_full accounting error"; | 4045 | error = "slabs_full accounting error"; |
| 4067 | active_objs += cachep->num; | 4046 | active_objs += cachep->num; |
| 4068 | active_slabs++; | 4047 | active_slabs++; |
| 4069 | } | 4048 | } |
| 4070 | list_for_each_entry(slabp, &n->slabs_partial, list) { | 4049 | list_for_each_entry(page, &n->slabs_partial, lru) { |
| 4071 | if (slabp->active == cachep->num && !error) | 4050 | if (page->active == cachep->num && !error) |
| 4072 | error = "slabs_partial accounting error"; | 4051 | error = "slabs_partial accounting error"; |
| 4073 | if (!slabp->active && !error) | 4052 | if (!page->active && !error) |
| 4074 | error = "slabs_partial accounting error"; | 4053 | error = "slabs_partial accounting error"; |
| 4075 | active_objs += slabp->active; | 4054 | active_objs += page->active; |
| 4076 | active_slabs++; | 4055 | active_slabs++; |
| 4077 | } | 4056 | } |
| 4078 | list_for_each_entry(slabp, &n->slabs_free, list) { | 4057 | list_for_each_entry(page, &n->slabs_free, lru) { |
| 4079 | if (slabp->active && !error) | 4058 | if (page->active && !error) |
| 4080 | error = "slabs_free accounting error"; | 4059 | error = "slabs_free accounting error"; |
| 4081 | num_slabs++; | 4060 | num_slabs++; |
| 4082 | } | 4061 | } |
| @@ -4229,19 +4208,20 @@ static inline int add_caller(unsigned long *n, unsigned long v) | |||
| 4229 | return 1; | 4208 | return 1; |
| 4230 | } | 4209 | } |
| 4231 | 4210 | ||
| 4232 | static void handle_slab(unsigned long *n, struct kmem_cache *c, struct slab *s) | 4211 | static void handle_slab(unsigned long *n, struct kmem_cache *c, |
| 4212 | struct page *page) | ||
| 4233 | { | 4213 | { |
| 4234 | void *p; | 4214 | void *p; |
| 4235 | int i, j; | 4215 | int i, j; |
| 4236 | 4216 | ||
| 4237 | if (n[0] == n[1]) | 4217 | if (n[0] == n[1]) |
| 4238 | return; | 4218 | return; |
| 4239 | for (i = 0, p = s->s_mem; i < c->num; i++, p += c->size) { | 4219 | for (i = 0, p = page->s_mem; i < c->num; i++, p += c->size) { |
| 4240 | bool active = true; | 4220 | bool active = true; |
| 4241 | 4221 | ||
| 4242 | for (j = s->active; j < c->num; j++) { | 4222 | for (j = page->active; j < c->num; j++) { |
| 4243 | /* Skip freed item */ | 4223 | /* Skip freed item */ |
| 4244 | if (slab_bufctl(s)[j] == i) { | 4224 | if (slab_bufctl(page)[j] == i) { |
| 4245 | active = false; | 4225 | active = false; |
| 4246 | break; | 4226 | break; |
| 4247 | } | 4227 | } |
| @@ -4273,7 +4253,7 @@ static void show_symbol(struct seq_file *m, unsigned long address) | |||
| 4273 | static int leaks_show(struct seq_file *m, void *p) | 4253 | static int leaks_show(struct seq_file *m, void *p) |
| 4274 | { | 4254 | { |
| 4275 | struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list); | 4255 | struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list); |
| 4276 | struct slab *slabp; | 4256 | struct page *page; |
| 4277 | struct kmem_cache_node *n; | 4257 | struct kmem_cache_node *n; |
| 4278 | const char *name; | 4258 | const char *name; |
| 4279 | unsigned long *x = m->private; | 4259 | unsigned long *x = m->private; |
| @@ -4297,10 +4277,10 @@ static int leaks_show(struct seq_file *m, void *p) | |||
| 4297 | check_irq_on(); | 4277 | check_irq_on(); |
| 4298 | spin_lock_irq(&n->list_lock); | 4278 | spin_lock_irq(&n->list_lock); |
| 4299 | 4279 | ||
| 4300 | list_for_each_entry(slabp, &n->slabs_full, list) | 4280 | list_for_each_entry(page, &n->slabs_full, lru) |
| 4301 | handle_slab(x, cachep, slabp); | 4281 | handle_slab(x, cachep, page); |
| 4302 | list_for_each_entry(slabp, &n->slabs_partial, list) | 4282 | list_for_each_entry(page, &n->slabs_partial, lru) |
| 4303 | handle_slab(x, cachep, slabp); | 4283 | handle_slab(x, cachep, page); |
| 4304 | spin_unlock_irq(&n->list_lock); | 4284 | spin_unlock_irq(&n->list_lock); |
| 4305 | } | 4285 | } |
| 4306 | name = cachep->name; | 4286 | name = cachep->name; |
