aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2012-06-13 11:24:55 -0400
committerPekka Enberg <penberg@kernel.org>2012-06-14 02:19:56 -0400
commite571b0ad3495be5793e54e21cd244c4545c49d88 (patch)
tree4ea18038e9543483d4f31519cf95f9d633ca0ef7
parentb5568280c9b9162b384be9d447013b74d682d4b3 (diff)
slab: Use page struct fields instead of casting
Add fields to the page struct so that it is properly documented that slab overlays the lru fields. This cleans up some casts in slab. Reviewed-by: Glauber Costa <glommer@parallels.com> Reviewed-by: Joonsoo Kim <js1304@gmail.com> Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
-rw-r--r--include/linux/mm_types.h4
-rw-r--r--mm/slab.c8
2 files changed, 8 insertions, 4 deletions
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 5922c3452592..680a5e4e8cd5 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -110,6 +110,10 @@ struct page {
110 }; 110 };
111 111
112 struct list_head list; /* slobs list of pages */ 112 struct list_head list; /* slobs list of pages */
113 struct { /* slab fields */
114 struct kmem_cache *slab_cache;
115 struct slab *slab_page;
116 };
113 }; 117 };
114 118
115 /* Remainder is not double word aligned */ 119 /* Remainder is not double word aligned */
diff --git a/mm/slab.c b/mm/slab.c
index e901a36e2520..af05147d7abd 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -496,25 +496,25 @@ static bool slab_max_order_set __initdata;
496 */ 496 */
497static inline void page_set_cache(struct page *page, struct kmem_cache *cache) 497static inline void page_set_cache(struct page *page, struct kmem_cache *cache)
498{ 498{
499 page->lru.next = (struct list_head *)cache; 499 page->slab_cache = cache;
500} 500}
501 501
502static inline struct kmem_cache *page_get_cache(struct page *page) 502static inline struct kmem_cache *page_get_cache(struct page *page)
503{ 503{
504 page = compound_head(page); 504 page = compound_head(page);
505 BUG_ON(!PageSlab(page)); 505 BUG_ON(!PageSlab(page));
506 return (struct kmem_cache *)page->lru.next; 506 return page->slab_cache;
507} 507}
508 508
509static inline void page_set_slab(struct page *page, struct slab *slab) 509static inline void page_set_slab(struct page *page, struct slab *slab)
510{ 510{
511 page->lru.prev = (struct list_head *)slab; 511 page->slab_page = slab;
512} 512}
513 513
514static inline struct slab *page_get_slab(struct page *page) 514static inline struct slab *page_get_slab(struct page *page)
515{ 515{
516 BUG_ON(!PageSlab(page)); 516 BUG_ON(!PageSlab(page));
517 return (struct slab *)page->lru.prev; 517 return page->slab_page;
518} 518}
519 519
520static inline struct kmem_cache *virt_to_cache(const void *obj) 520static inline struct kmem_cache *virt_to_cache(const void *obj)