aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorJoonsoo Kim <iamjoonsoo.kim@lge.com>2013-10-23 21:07:42 -0400
committerPekka Enberg <penberg@iki.fi>2013-10-24 13:17:31 -0400
commit68126702b419fd26ef4946e314bb3a1f57d3a53f (patch)
treeaf7acaf8d13921ab34271cb500d8454940a12e86 /mm
parent07d417a1c6f1e386a2276b0cae8ae1d14b8a32cc (diff)
slab: overloading the RCU head over the LRU for RCU free
With build-time size checking, we can overload the RCU head over the LRU of struct page to free pages of a slab in rcu context. This really help to implement to overload the struct slab over the struct page and this eventually reduce memory usage and cache footprint of the SLAB. Acked-by: Andi Kleen <ak@linux.intel.com> Acked-by: Christoph Lameter <cl@linux.com> Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Pekka Enberg <penberg@iki.fi>
Diffstat (limited to 'mm')
-rw-r--r--mm/slab.c68
1 files changed, 30 insertions, 38 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 7e1aabe2b5d8..84c4ed62c10d 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -189,25 +189,6 @@ typedef unsigned int kmem_bufctl_t;
189#define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-3) 189#define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-3)
190 190
191/* 191/*
192 * struct slab_rcu
193 *
194 * slab_destroy on a SLAB_DESTROY_BY_RCU cache uses this structure to
195 * arrange for kmem_freepages to be called via RCU. This is useful if
196 * we need to approach a kernel structure obliquely, from its address
197 * obtained without the usual locking. We can lock the structure to
198 * stabilize it and check it's still at the given address, only if we
199 * can be sure that the memory has not been meanwhile reused for some
200 * other kind of object (which our subsystem's lock might corrupt).
201 *
202 * rcu_read_lock before reading the address, then rcu_read_unlock after
203 * taking the spinlock within the structure expected at that address.
204 */
205struct slab_rcu {
206 struct rcu_head head;
207 struct page *page;
208};
209
210/*
211 * struct slab 192 * struct slab
212 * 193 *
213 * Manages the objs in a slab. Placed either at the beginning of mem allocated 194 * Manages the objs in a slab. Placed either at the beginning of mem allocated
@@ -215,14 +196,11 @@ struct slab_rcu {
215 * Slabs are chained into three list: fully used, partial, fully free slabs. 196 * Slabs are chained into three list: fully used, partial, fully free slabs.
216 */ 197 */
217struct slab { 198struct slab {
218 union { 199 struct {
219 struct { 200 struct list_head list;
220 struct list_head list; 201 void *s_mem; /* including colour offset */
221 void *s_mem; /* including colour offset */ 202 unsigned int inuse; /* num of objs active in slab */
222 unsigned int inuse; /* num of objs active in slab */ 203 kmem_bufctl_t free;
223 kmem_bufctl_t free;
224 };
225 struct slab_rcu __slab_cover_slab_rcu;
226 }; 204 };
227}; 205};
228 206
@@ -1509,6 +1487,8 @@ void __init kmem_cache_init(void)
1509{ 1487{
1510 int i; 1488 int i;
1511 1489
1490 BUILD_BUG_ON(sizeof(((struct page *)NULL)->lru) <
1491 sizeof(struct rcu_head));
1512 kmem_cache = &kmem_cache_boot; 1492 kmem_cache = &kmem_cache_boot;
1513 setup_node_pointer(kmem_cache); 1493 setup_node_pointer(kmem_cache);
1514 1494
@@ -1822,12 +1802,13 @@ static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
1822 1802
1823static void kmem_rcu_free(struct rcu_head *head) 1803static void kmem_rcu_free(struct rcu_head *head)
1824{ 1804{
1825 struct slab_rcu *slab_rcu = (struct slab_rcu *)head; 1805 struct kmem_cache *cachep;
1826 struct kmem_cache *cachep = slab_rcu->page->slab_cache; 1806 struct page *page;
1827 1807
1828 kmem_freepages(cachep, slab_rcu->page); 1808 page = container_of(head, struct page, rcu_head);
1829 if (OFF_SLAB(cachep)) 1809 cachep = page->slab_cache;
1830 kmem_cache_free(cachep->slabp_cache, slab_rcu); 1810
1811 kmem_freepages(cachep, page);
1831} 1812}
1832 1813
1833#if DEBUG 1814#if DEBUG
@@ -2048,16 +2029,27 @@ static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
2048 2029
2049 slab_destroy_debugcheck(cachep, slabp); 2030 slab_destroy_debugcheck(cachep, slabp);
2050 if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) { 2031 if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) {
2051 struct slab_rcu *slab_rcu; 2032 struct rcu_head *head;
2033
2034 /*
2035 * RCU free overloads the RCU head over the LRU.
2036 * slab_page has been overloeaded over the LRU,
2037 * however it is not used from now on so that
2038 * we can use it safely.
2039 */
2040 head = (void *)&page->rcu_head;
2041 call_rcu(head, kmem_rcu_free);
2052 2042
2053 slab_rcu = (struct slab_rcu *)slabp;
2054 slab_rcu->page = page;
2055 call_rcu(&slab_rcu->head, kmem_rcu_free);
2056 } else { 2043 } else {
2057 kmem_freepages(cachep, page); 2044 kmem_freepages(cachep, page);
2058 if (OFF_SLAB(cachep))
2059 kmem_cache_free(cachep->slabp_cache, slabp);
2060 } 2045 }
2046
2047 /*
2048 * From now on, we don't use slab management
2049 * although actual page can be freed in rcu context
2050 */
2051 if (OFF_SLAB(cachep))
2052 kmem_cache_free(cachep->slabp_cache, slabp);
2061} 2053}
2062 2054
2063/** 2055/**