diff options
author | Christoph Lameter <clameter@sgi.com> | 2007-05-06 17:49:41 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-05-07 15:12:54 -0400 |
commit | b49af68ff9fc5d6e0d96704a1843968b91cc73c6 (patch) | |
tree | eb5e6d9425a9069cdfc45b09a1d0f61f1419d2c2 | |
parent | 6d7779538f765963ced45a3fa4bed7ba8d2c277d (diff) |
Add virt_to_head_page and consolidate code in slab and slub
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | include/linux/mm.h | 6 | ||||
-rw-r--r-- | mm/slab.c | 9 | ||||
-rw-r--r-- | mm/slub.c | 10 |
3 files changed, 14 insertions, 11 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h index 695b90437bbc..4670ebd1f622 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -286,6 +286,12 @@ static inline void get_page(struct page *page) | |||
286 | atomic_inc(&page->_count); | 286 | atomic_inc(&page->_count); |
287 | } | 287 | } |
288 | 288 | ||
289 | static inline struct page *virt_to_head_page(const void *x) | ||
290 | { | ||
291 | struct page *page = virt_to_page(x); | ||
292 | return compound_head(page); | ||
293 | } | ||
294 | |||
289 | /* | 295 | /* |
290 | * Setup the page count before being freed into the page allocator for | 296 | * Setup the page count before being freed into the page allocator for |
291 | * the first time (boot or memory hotplug) | 297 | * the first time (boot or memory hotplug) |
@@ -614,20 +614,19 @@ static inline void page_set_slab(struct page *page, struct slab *slab) | |||
614 | 614 | ||
615 | static inline struct slab *page_get_slab(struct page *page) | 615 | static inline struct slab *page_get_slab(struct page *page) |
616 | { | 616 | { |
617 | page = compound_head(page); | ||
618 | BUG_ON(!PageSlab(page)); | 617 | BUG_ON(!PageSlab(page)); |
619 | return (struct slab *)page->lru.prev; | 618 | return (struct slab *)page->lru.prev; |
620 | } | 619 | } |
621 | 620 | ||
622 | static inline struct kmem_cache *virt_to_cache(const void *obj) | 621 | static inline struct kmem_cache *virt_to_cache(const void *obj) |
623 | { | 622 | { |
624 | struct page *page = virt_to_page(obj); | 623 | struct page *page = virt_to_head_page(obj); |
625 | return page_get_cache(page); | 624 | return page_get_cache(page); |
626 | } | 625 | } |
627 | 626 | ||
628 | static inline struct slab *virt_to_slab(const void *obj) | 627 | static inline struct slab *virt_to_slab(const void *obj) |
629 | { | 628 | { |
630 | struct page *page = virt_to_page(obj); | 629 | struct page *page = virt_to_head_page(obj); |
631 | return page_get_slab(page); | 630 | return page_get_slab(page); |
632 | } | 631 | } |
633 | 632 | ||
@@ -2876,7 +2875,7 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, | |||
2876 | 2875 | ||
2877 | objp -= obj_offset(cachep); | 2876 | objp -= obj_offset(cachep); |
2878 | kfree_debugcheck(objp); | 2877 | kfree_debugcheck(objp); |
2879 | page = virt_to_page(objp); | 2878 | page = virt_to_head_page(objp); |
2880 | 2879 | ||
2881 | slabp = page_get_slab(page); | 2880 | slabp = page_get_slab(page); |
2882 | 2881 | ||
@@ -3100,7 +3099,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, | |||
3100 | struct slab *slabp; | 3099 | struct slab *slabp; |
3101 | unsigned objnr; | 3100 | unsigned objnr; |
3102 | 3101 | ||
3103 | slabp = page_get_slab(virt_to_page(objp)); | 3102 | slabp = page_get_slab(virt_to_head_page(objp)); |
3104 | objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size; | 3103 | objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size; |
3105 | slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE; | 3104 | slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE; |
3106 | } | 3105 | } |
@@ -1323,9 +1323,7 @@ void kmem_cache_free(struct kmem_cache *s, void *x) | |||
1323 | { | 1323 | { |
1324 | struct page * page; | 1324 | struct page * page; |
1325 | 1325 | ||
1326 | page = virt_to_page(x); | 1326 | page = virt_to_head_page(x); |
1327 | |||
1328 | page = compound_head(page); | ||
1329 | 1327 | ||
1330 | if (unlikely(PageError(page) && (s->flags & SLAB_STORE_USER))) | 1328 | if (unlikely(PageError(page) && (s->flags & SLAB_STORE_USER))) |
1331 | set_tracking(s, x, TRACK_FREE); | 1329 | set_tracking(s, x, TRACK_FREE); |
@@ -1336,7 +1334,7 @@ EXPORT_SYMBOL(kmem_cache_free); | |||
1336 | /* Figure out on which slab object the object resides */ | 1334 | /* Figure out on which slab object the object resides */ |
1337 | static struct page *get_object_page(const void *x) | 1335 | static struct page *get_object_page(const void *x) |
1338 | { | 1336 | { |
1339 | struct page *page = compound_head(virt_to_page(x)); | 1337 | struct page *page = virt_to_head_page(x); |
1340 | 1338 | ||
1341 | if (!PageSlab(page)) | 1339 | if (!PageSlab(page)) |
1342 | return NULL; | 1340 | return NULL; |
@@ -2076,7 +2074,7 @@ void kfree(const void *x) | |||
2076 | if (!x) | 2074 | if (!x) |
2077 | return; | 2075 | return; |
2078 | 2076 | ||
2079 | page = compound_head(virt_to_page(x)); | 2077 | page = virt_to_head_page(x); |
2080 | 2078 | ||
2081 | s = page->slab; | 2079 | s = page->slab; |
2082 | 2080 | ||
@@ -2112,7 +2110,7 @@ void *krealloc(const void *p, size_t new_size, gfp_t flags) | |||
2112 | return NULL; | 2110 | return NULL; |
2113 | } | 2111 | } |
2114 | 2112 | ||
2115 | page = compound_head(virt_to_page(p)); | 2113 | page = virt_to_head_page(p); |
2116 | 2114 | ||
2117 | new_cache = get_slab(new_size, flags); | 2115 | new_cache = get_slab(new_size, flags); |
2118 | 2116 | ||