aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/mmzone.h
diff options
context:
space:
mode:
authorDave Hansen <haveblue@us.ibm.com>2005-10-29 21:16:53 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-10-30 00:40:44 -0400
commitbdc8cb984576ab5b550c8b24c6fa111a873503e3 (patch)
treef94548988874caa10d770e6e65bc50c925abf825 /include/linux/mmzone.h
parent208d54e5513c0c02d85af0990901354c74364d5c (diff)
[PATCH] memory hotplug locking: zone span seqlock
See the "fixup bad_range()" patch for more information, but this actually creates a the lock to protect things making assumptions about a zone's size staying constant at runtime. Signed-off-by: Dave Hansen <haveblue@us.ibm.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/linux/mmzone.h')
-rw-r--r--include/linux/mmzone.h15
1 files changed, 15 insertions, 0 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index e050d68963a1..f5fa3082fd6a 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -12,6 +12,7 @@
12#include <linux/threads.h> 12#include <linux/threads.h>
13#include <linux/numa.h> 13#include <linux/numa.h>
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/seqlock.h>
15#include <asm/atomic.h> 16#include <asm/atomic.h>
16 17
17/* Free memory management - zoned buddy allocator. */ 18/* Free memory management - zoned buddy allocator. */
@@ -137,6 +138,10 @@ struct zone {
137 * free areas of different sizes 138 * free areas of different sizes
138 */ 139 */
139 spinlock_t lock; 140 spinlock_t lock;
141#ifdef CONFIG_MEMORY_HOTPLUG
142 /* see spanned/present_pages for more description */
143 seqlock_t span_seqlock;
144#endif
140 struct free_area free_area[MAX_ORDER]; 145 struct free_area free_area[MAX_ORDER];
141 146
142 147
@@ -220,6 +225,16 @@ struct zone {
220 /* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */ 225 /* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */
221 unsigned long zone_start_pfn; 226 unsigned long zone_start_pfn;
222 227
228 /*
229 * zone_start_pfn, spanned_pages and present_pages are all
230 * protected by span_seqlock. It is a seqlock because it has
231 * to be read outside of zone->lock, and it is done in the main
232 * allocator path. But, it is written quite infrequently.
233 *
234 * The lock is declared along with zone->lock because it is
235 * frequently read in proximity to zone->lock. It's good to
236 * give them a chance of being in the same cacheline.
237 */
223 unsigned long spanned_pages; /* total size, including holes */ 238 unsigned long spanned_pages; /* total size, including holes */
224 unsigned long present_pages; /* amount of memory (excluding holes) */ 239 unsigned long present_pages; /* amount of memory (excluding holes) */
225 240