aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorJoonsoo Kim <iamjoonsoo.kim@lge.com>2013-10-23 21:07:42 -0400
committerPekka Enberg <penberg@iki.fi>2013-10-24 13:17:31 -0400
commit68126702b419fd26ef4946e314bb3a1f57d3a53f (patch)
treeaf7acaf8d13921ab34271cb500d8454940a12e86 /include
parent07d417a1c6f1e386a2276b0cae8ae1d14b8a32cc (diff)
slab: overloading the RCU head over the LRU for RCU free
With build-time size checking, we can overload the RCU head over the LRU of struct page to free pages of a slab in rcu context. This really help to implement to overload the struct slab over the struct page and this eventually reduce memory usage and cache footprint of the SLAB. Acked-by: Andi Kleen <ak@linux.intel.com> Acked-by: Christoph Lameter <cl@linux.com> Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Pekka Enberg <penberg@iki.fi>
Diffstat (limited to 'include')
-rw-r--r--include/linux/mm_types.h3
-rw-r--r--include/linux/slab.h9
2 files changed, 11 insertions, 1 deletions
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index faf4b7c1ad12..959cb369b197 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -130,6 +130,9 @@ struct page {
130 130
131 struct list_head list; /* slobs list of pages */ 131 struct list_head list; /* slobs list of pages */
132 struct slab *slab_page; /* slab fields */ 132 struct slab *slab_page; /* slab fields */
133 struct rcu_head rcu_head; /* Used by SLAB
134 * when destroying via RCU
135 */
133 }; 136 };
134 137
135 /* Remainder is not double word aligned */ 138 /* Remainder is not double word aligned */
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 6c5cc0ea8713..caaad51fee1f 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -51,7 +51,14 @@
51 * } 51 * }
52 * rcu_read_unlock(); 52 * rcu_read_unlock();
53 * 53 *
54 * See also the comment on struct slab_rcu in mm/slab.c. 54 * This is useful if we need to approach a kernel structure obliquely,
55 * from its address obtained without the usual locking. We can lock
56 * the structure to stabilize it and check it's still at the given address,
57 * only if we can be sure that the memory has not been meanwhile reused
58 * for some other kind of object (which our subsystem's lock might corrupt).
59 *
60 * rcu_read_lock before reading the address, then rcu_read_unlock after
61 * taking the spinlock within the structure expected at that address.
55 */ 62 */
56#define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */ 63#define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */
57#define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */ 64#define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */