diff options
Diffstat (limited to 'include/linux')
-rw-r--r-- | include/linux/mm_types.h | 24 | ||||
-rw-r--r-- | include/linux/slab.h | 9 | ||||
-rw-r--r-- | include/linux/slab_def.h | 4 | ||||
-rw-r--r-- | include/linux/slub_def.h | 2 |
4 files changed, 27 insertions, 12 deletions
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 011eb85d7b0f..bd299418a934 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h | |||
@@ -44,18 +44,22 @@ struct page { | |||
44 | /* First double word block */ | 44 | /* First double word block */ |
45 | unsigned long flags; /* Atomic flags, some possibly | 45 | unsigned long flags; /* Atomic flags, some possibly |
46 | * updated asynchronously */ | 46 | * updated asynchronously */ |
47 | struct address_space *mapping; /* If low bit clear, points to | 47 | union { |
48 | * inode address_space, or NULL. | 48 | struct address_space *mapping; /* If low bit clear, points to |
49 | * If page mapped as anonymous | 49 | * inode address_space, or NULL. |
50 | * memory, low bit is set, and | 50 | * If page mapped as anonymous |
51 | * it points to anon_vma object: | 51 | * memory, low bit is set, and |
52 | * see PAGE_MAPPING_ANON below. | 52 | * it points to anon_vma object: |
53 | */ | 53 | * see PAGE_MAPPING_ANON below. |
54 | */ | ||
55 | void *s_mem; /* slab first object */ | ||
56 | }; | ||
57 | |||
54 | /* Second double word */ | 58 | /* Second double word */ |
55 | struct { | 59 | struct { |
56 | union { | 60 | union { |
57 | pgoff_t index; /* Our offset within mapping. */ | 61 | pgoff_t index; /* Our offset within mapping. */ |
58 | void *freelist; /* slub/slob first free object */ | 62 | void *freelist; /* sl[aou]b first free object */ |
59 | bool pfmemalloc; /* If set by the page allocator, | 63 | bool pfmemalloc; /* If set by the page allocator, |
60 | * ALLOC_NO_WATERMARKS was set | 64 | * ALLOC_NO_WATERMARKS was set |
61 | * and the low watermark was not | 65 | * and the low watermark was not |
@@ -111,6 +115,7 @@ struct page { | |||
111 | }; | 115 | }; |
112 | atomic_t _count; /* Usage count, see below. */ | 116 | atomic_t _count; /* Usage count, see below. */ |
113 | }; | 117 | }; |
118 | unsigned int active; /* SLAB */ | ||
114 | }; | 119 | }; |
115 | }; | 120 | }; |
116 | 121 | ||
@@ -132,6 +137,9 @@ struct page { | |||
132 | 137 | ||
133 | struct list_head list; /* slobs list of pages */ | 138 | struct list_head list; /* slobs list of pages */ |
134 | struct slab *slab_page; /* slab fields */ | 139 | struct slab *slab_page; /* slab fields */ |
140 | struct rcu_head rcu_head; /* Used by SLAB | ||
141 | * when destroying via RCU | ||
142 | */ | ||
135 | #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS | 143 | #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS |
136 | pgtable_t pmd_huge_pte; /* protected by page->ptl */ | 144 | pgtable_t pmd_huge_pte; /* protected by page->ptl */ |
137 | #endif | 145 | #endif |
diff --git a/include/linux/slab.h b/include/linux/slab.h index 74f105847d13..c2bba248fa63 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h | |||
@@ -53,7 +53,14 @@ | |||
53 | * } | 53 | * } |
54 | * rcu_read_unlock(); | 54 | * rcu_read_unlock(); |
55 | * | 55 | * |
56 | * See also the comment on struct slab_rcu in mm/slab.c. | 56 | * This is useful if we need to approach a kernel structure obliquely, |
57 | * from its address obtained without the usual locking. We can lock | ||
58 | * the structure to stabilize it and check it's still at the given address, | ||
59 | * only if we can be sure that the memory has not been meanwhile reused | ||
60 | * for some other kind of object (which our subsystem's lock might corrupt). | ||
61 | * | ||
62 | * rcu_read_lock before reading the address, then rcu_read_unlock after | ||
63 | * taking the spinlock within the structure expected at that address. | ||
57 | */ | 64 | */ |
58 | #define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */ | 65 | #define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */ |
59 | #define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */ | 66 | #define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */ |
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index e9346b4f1ef4..09bfffb08a56 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h | |||
@@ -27,8 +27,8 @@ struct kmem_cache { | |||
27 | 27 | ||
28 | size_t colour; /* cache colouring range */ | 28 | size_t colour; /* cache colouring range */ |
29 | unsigned int colour_off; /* colour offset */ | 29 | unsigned int colour_off; /* colour offset */ |
30 | struct kmem_cache *slabp_cache; | 30 | struct kmem_cache *freelist_cache; |
31 | unsigned int slab_size; | 31 | unsigned int freelist_size; |
32 | 32 | ||
33 | /* constructor func */ | 33 | /* constructor func */ |
34 | void (*ctor)(void *obj); | 34 | void (*ctor)(void *obj); |
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index cc0b67eada42..f56bfa9e4526 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h | |||
@@ -11,7 +11,7 @@ | |||
11 | enum stat_item { | 11 | enum stat_item { |
12 | ALLOC_FASTPATH, /* Allocation from cpu slab */ | 12 | ALLOC_FASTPATH, /* Allocation from cpu slab */ |
13 | ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */ | 13 | ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */ |
14 | FREE_FASTPATH, /* Free to cpu slub */ | 14 | FREE_FASTPATH, /* Free to cpu slab */ |
15 | FREE_SLOWPATH, /* Freeing not to cpu slab */ | 15 | FREE_SLOWPATH, /* Freeing not to cpu slab */ |
16 | FREE_FROZEN, /* Freeing to frozen slab */ | 16 | FREE_FROZEN, /* Freeing to frozen slab */ |
17 | FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */ | 17 | FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */ |