aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/mm_types.h
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2011-06-01 13:25:48 -0400
committerPekka Enberg <penberg@kernel.org>2011-07-02 06:26:53 -0400
commitfc9bb8c768abe7ae10861c3510e01a95f98d5933 (patch)
treeb338a0c4e5673c4005898cf6f54270a6de9db399 /include/linux/mm_types.h
parent8cb0a5068f4108e8ca60d5e0bcfbe6901adcfaef (diff)
mm: Rearrange struct page
We need to be able to use cmpxchg_double on the freelist and object count field in struct page. Rearrange the fields in struct page according to doubleword entities so that the freelist pointer comes before the counters. Do the rearranging with a future in mind where we use more doubleword atomics to avoid locking of updates to flags/mapping or lru pointers. Create another union to allow access to counters in struct page as a single unsigned long value. The doublewords must be properly aligned for cmpxchg_double to work. Sadly this increases the size of page struct by one word on some architectures. But as a resultpage structs are now cacheline aligned on x86_64. Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'include/linux/mm_types.h')
-rw-r--r--include/linux/mm_types.h87
1 files changed, 59 insertions, 28 deletions
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index e5fb2a70518..3d76a433d52 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -30,24 +30,60 @@ struct address_space;
30 * moment. Note that we have no way to track which tasks are using 30 * moment. Note that we have no way to track which tasks are using
31 * a page, though if it is a pagecache page, rmap structures can tell us 31 * a page, though if it is a pagecache page, rmap structures can tell us
32 * who is mapping it. 32 * who is mapping it.
33 *
34 * The objects in struct page are organized in double word blocks in
35 * order to allows us to use atomic double word operations on portions
36 * of struct page. That is currently only used by slub but the arrangement
37 * allows the use of atomic double word operations on the flags/mapping
38 * and lru list pointers also.
33 */ 39 */
34struct page { 40struct page {
41 /* First double word block */
35 unsigned long flags; /* Atomic flags, some possibly 42 unsigned long flags; /* Atomic flags, some possibly
36 * updated asynchronously */ 43 * updated asynchronously */
37 atomic_t _count; /* Usage count, see below. */ 44 struct address_space *mapping; /* If low bit clear, points to
45 * inode address_space, or NULL.
46 * If page mapped as anonymous
47 * memory, low bit is set, and
48 * it points to anon_vma object:
49 * see PAGE_MAPPING_ANON below.
50 */
51 /* Second double word */
38 union { 52 union {
39 atomic_t _mapcount; /* Count of ptes mapped in mms, 53 struct {
40 * to show when page is mapped 54 pgoff_t index; /* Our offset within mapping. */
41 * & limit reverse map searches. 55 atomic_t _mapcount; /* Count of ptes mapped in mms,
56 * to show when page is mapped
57 * & limit reverse map searches.
58 */
59 atomic_t _count; /* Usage count, see below. */
60 };
61
62 struct { /* SLUB cmpxchg_double area */
63 void *freelist;
64 union {
65 unsigned long counters;
66 struct {
67 unsigned inuse:16;
68 unsigned objects:15;
69 unsigned frozen:1;
70 /*
71 * Kernel may make use of this field even when slub
72 * uses the rest of the double word!
42 */ 73 */
43 struct { /* SLUB */ 74 atomic_t _count;
44 unsigned inuse:16; 75 };
45 unsigned objects:15; 76 };
46 unsigned frozen:1;
47 }; 77 };
48 }; 78 };
79
80 /* Third double word block */
81 struct list_head lru; /* Pageout list, eg. active_list
82 * protected by zone->lru_lock !
83 */
84
85 /* Remainder is not double word aligned */
49 union { 86 union {
50 struct {
51 unsigned long private; /* Mapping-private opaque data: 87 unsigned long private; /* Mapping-private opaque data:
52 * usually used for buffer_heads 88 * usually used for buffer_heads
53 * if PagePrivate set; used for 89 * if PagePrivate set; used for
@@ -55,27 +91,13 @@ struct page {
55 * indicates order in the buddy 91 * indicates order in the buddy
56 * system if PG_buddy is set. 92 * system if PG_buddy is set.
57 */ 93 */
58 struct address_space *mapping; /* If low bit clear, points to
59 * inode address_space, or NULL.
60 * If page mapped as anonymous
61 * memory, low bit is set, and
62 * it points to anon_vma object:
63 * see PAGE_MAPPING_ANON below.
64 */
65 };
66#if USE_SPLIT_PTLOCKS 94#if USE_SPLIT_PTLOCKS
67 spinlock_t ptl; 95 spinlock_t ptl;
68#endif 96#endif
69 struct kmem_cache *slab; /* SLUB: Pointer to slab */ 97 struct kmem_cache *slab; /* SLUB: Pointer to slab */
70 struct page *first_page; /* Compound tail pages */ 98 struct page *first_page; /* Compound tail pages */
71 }; 99 };
72 union { 100
73 pgoff_t index; /* Our offset within mapping. */
74 void *freelist; /* SLUB: freelist req. slab lock */
75 };
76 struct list_head lru; /* Pageout list, eg. active_list
77 * protected by zone->lru_lock !
78 */
79 /* 101 /*
80 * On machines where all RAM is mapped into kernel address space, 102 * On machines where all RAM is mapped into kernel address space,
81 * we can simply calculate the virtual address. On machines with 103 * we can simply calculate the virtual address. On machines with
@@ -101,7 +123,16 @@ struct page {
101 */ 123 */
102 void *shadow; 124 void *shadow;
103#endif 125#endif
104}; 126}
127/*
128 * If another subsystem starts using the double word pairing for atomic
129 * operations on struct page then it must change the #if to ensure
130 * proper alignment of the page struct.
131 */
132#if defined(CONFIG_SLUB) && defined(CONFIG_CMPXCHG_LOCAL)
133 __attribute__((__aligned__(2*sizeof(unsigned long))))
134#endif
135;
105 136
106typedef unsigned long __nocast vm_flags_t; 137typedef unsigned long __nocast vm_flags_t;
107 138