diff options
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/mm_types.h | 87 |
1 files changed, 59 insertions, 28 deletions
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index e5fb2a70518b..3d76a433d52f 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h | |||
@@ -30,24 +30,60 @@ struct address_space; | |||
30 | * moment. Note that we have no way to track which tasks are using | 30 | * moment. Note that we have no way to track which tasks are using |
31 | * a page, though if it is a pagecache page, rmap structures can tell us | 31 | * a page, though if it is a pagecache page, rmap structures can tell us |
32 | * who is mapping it. | 32 | * who is mapping it. |
33 | * | ||
34 | * The objects in struct page are organized in double word blocks in | ||
35 | * order to allows us to use atomic double word operations on portions | ||
36 | * of struct page. That is currently only used by slub but the arrangement | ||
37 | * allows the use of atomic double word operations on the flags/mapping | ||
38 | * and lru list pointers also. | ||
33 | */ | 39 | */ |
34 | struct page { | 40 | struct page { |
41 | /* First double word block */ | ||
35 | unsigned long flags; /* Atomic flags, some possibly | 42 | unsigned long flags; /* Atomic flags, some possibly |
36 | * updated asynchronously */ | 43 | * updated asynchronously */ |
37 | atomic_t _count; /* Usage count, see below. */ | 44 | struct address_space *mapping; /* If low bit clear, points to |
45 | * inode address_space, or NULL. | ||
46 | * If page mapped as anonymous | ||
47 | * memory, low bit is set, and | ||
48 | * it points to anon_vma object: | ||
49 | * see PAGE_MAPPING_ANON below. | ||
50 | */ | ||
51 | /* Second double word */ | ||
38 | union { | 52 | union { |
39 | atomic_t _mapcount; /* Count of ptes mapped in mms, | 53 | struct { |
40 | * to show when page is mapped | 54 | pgoff_t index; /* Our offset within mapping. */ |
41 | * & limit reverse map searches. | 55 | atomic_t _mapcount; /* Count of ptes mapped in mms, |
56 | * to show when page is mapped | ||
57 | * & limit reverse map searches. | ||
58 | */ | ||
59 | atomic_t _count; /* Usage count, see below. */ | ||
60 | }; | ||
61 | |||
62 | struct { /* SLUB cmpxchg_double area */ | ||
63 | void *freelist; | ||
64 | union { | ||
65 | unsigned long counters; | ||
66 | struct { | ||
67 | unsigned inuse:16; | ||
68 | unsigned objects:15; | ||
69 | unsigned frozen:1; | ||
70 | /* | ||
71 | * Kernel may make use of this field even when slub | ||
72 | * uses the rest of the double word! | ||
42 | */ | 73 | */ |
43 | struct { /* SLUB */ | 74 | atomic_t _count; |
44 | unsigned inuse:16; | 75 | }; |
45 | unsigned objects:15; | 76 | }; |
46 | unsigned frozen:1; | ||
47 | }; | 77 | }; |
48 | }; | 78 | }; |
79 | |||
80 | /* Third double word block */ | ||
81 | struct list_head lru; /* Pageout list, eg. active_list | ||
82 | * protected by zone->lru_lock ! | ||
83 | */ | ||
84 | |||
85 | /* Remainder is not double word aligned */ | ||
49 | union { | 86 | union { |
50 | struct { | ||
51 | unsigned long private; /* Mapping-private opaque data: | 87 | unsigned long private; /* Mapping-private opaque data: |
52 | * usually used for buffer_heads | 88 | * usually used for buffer_heads |
53 | * if PagePrivate set; used for | 89 | * if PagePrivate set; used for |
@@ -55,27 +91,13 @@ struct page { | |||
55 | * indicates order in the buddy | 91 | * indicates order in the buddy |
56 | * system if PG_buddy is set. | 92 | * system if PG_buddy is set. |
57 | */ | 93 | */ |
58 | struct address_space *mapping; /* If low bit clear, points to | ||
59 | * inode address_space, or NULL. | ||
60 | * If page mapped as anonymous | ||
61 | * memory, low bit is set, and | ||
62 | * it points to anon_vma object: | ||
63 | * see PAGE_MAPPING_ANON below. | ||
64 | */ | ||
65 | }; | ||
66 | #if USE_SPLIT_PTLOCKS | 94 | #if USE_SPLIT_PTLOCKS |
67 | spinlock_t ptl; | 95 | spinlock_t ptl; |
68 | #endif | 96 | #endif |
69 | struct kmem_cache *slab; /* SLUB: Pointer to slab */ | 97 | struct kmem_cache *slab; /* SLUB: Pointer to slab */ |
70 | struct page *first_page; /* Compound tail pages */ | 98 | struct page *first_page; /* Compound tail pages */ |
71 | }; | 99 | }; |
72 | union { | 100 | |
73 | pgoff_t index; /* Our offset within mapping. */ | ||
74 | void *freelist; /* SLUB: freelist req. slab lock */ | ||
75 | }; | ||
76 | struct list_head lru; /* Pageout list, eg. active_list | ||
77 | * protected by zone->lru_lock ! | ||
78 | */ | ||
79 | /* | 101 | /* |
80 | * On machines where all RAM is mapped into kernel address space, | 102 | * On machines where all RAM is mapped into kernel address space, |
81 | * we can simply calculate the virtual address. On machines with | 103 | * we can simply calculate the virtual address. On machines with |
@@ -101,7 +123,16 @@ struct page { | |||
101 | */ | 123 | */ |
102 | void *shadow; | 124 | void *shadow; |
103 | #endif | 125 | #endif |
104 | }; | 126 | } |
127 | /* | ||
128 | * If another subsystem starts using the double word pairing for atomic | ||
129 | * operations on struct page then it must change the #if to ensure | ||
130 | * proper alignment of the page struct. | ||
131 | */ | ||
132 | #if defined(CONFIG_SLUB) && defined(CONFIG_CMPXCHG_LOCAL) | ||
133 | __attribute__((__aligned__(2*sizeof(unsigned long)))) | ||
134 | #endif | ||
135 | ; | ||
105 | 136 | ||
106 | typedef unsigned long __nocast vm_flags_t; | 137 | typedef unsigned long __nocast vm_flags_t; |
107 | 138 | ||