diff options
author | Dave Hansen <haveblue@us.ibm.com> | 2005-10-29 21:16:53 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-10-30 00:40:44 -0400 |
commit | bdc8cb984576ab5b550c8b24c6fa111a873503e3 (patch) | |
tree | f94548988874caa10d770e6e65bc50c925abf825 /include | |
parent | 208d54e5513c0c02d85af0990901354c74364d5c (diff) |
[PATCH] memory hotplug locking: zone span seqlock
See the "fixup bad_range()" patch for more information, but this actually
creates a the lock to protect things making assumptions about a zone's size
staying constant at runtime.
Signed-off-by: Dave Hansen <haveblue@us.ibm.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/memory_hotplug.h | 39 | ||||
-rw-r--r-- | include/linux/mmzone.h | 15 |
2 files changed, 52 insertions, 2 deletions
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index e8103be9d528..4b08bc947578 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h | |||
@@ -16,13 +16,36 @@ void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags) | |||
16 | static inline | 16 | static inline |
17 | void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags) | 17 | void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags) |
18 | { | 18 | { |
19 | spin_lock_irqrestore(&pgdat->node_size_lock, *flags); | 19 | spin_unlock_irqrestore(&pgdat->node_size_lock, *flags); |
20 | } | 20 | } |
21 | static inline | 21 | static inline |
22 | void pgdat_resize_init(struct pglist_data *pgdat) | 22 | void pgdat_resize_init(struct pglist_data *pgdat) |
23 | { | 23 | { |
24 | spin_lock_init(&pgdat->node_size_lock); | 24 | spin_lock_init(&pgdat->node_size_lock); |
25 | } | 25 | } |
26 | /* | ||
27 | * Zone resizing functions | ||
28 | */ | ||
29 | static inline unsigned zone_span_seqbegin(struct zone *zone) | ||
30 | { | ||
31 | return read_seqbegin(&zone->span_seqlock); | ||
32 | } | ||
33 | static inline int zone_span_seqretry(struct zone *zone, unsigned iv) | ||
34 | { | ||
35 | return read_seqretry(&zone->span_seqlock, iv); | ||
36 | } | ||
37 | static inline void zone_span_writelock(struct zone *zone) | ||
38 | { | ||
39 | write_seqlock(&zone->span_seqlock); | ||
40 | } | ||
41 | static inline void zone_span_writeunlock(struct zone *zone) | ||
42 | { | ||
43 | write_sequnlock(&zone->span_seqlock); | ||
44 | } | ||
45 | static inline void zone_seqlock_init(struct zone *zone) | ||
46 | { | ||
47 | seqlock_init(&zone->span_seqlock); | ||
48 | } | ||
26 | #else /* ! CONFIG_MEMORY_HOTPLUG */ | 49 | #else /* ! CONFIG_MEMORY_HOTPLUG */ |
27 | /* | 50 | /* |
28 | * Stub functions for when hotplug is off | 51 | * Stub functions for when hotplug is off |
@@ -30,5 +53,17 @@ void pgdat_resize_init(struct pglist_data *pgdat) | |||
30 | static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {} | 53 | static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {} |
31 | static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {} | 54 | static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {} |
32 | static inline void pgdat_resize_init(struct pglist_data *pgdat) {} | 55 | static inline void pgdat_resize_init(struct pglist_data *pgdat) {} |
33 | #endif | 56 | |
57 | static inline unsigned zone_span_seqbegin(struct zone *zone) | ||
58 | { | ||
59 | return 0; | ||
60 | } | ||
61 | static inline int zone_span_seqretry(struct zone *zone, unsigned iv) | ||
62 | { | ||
63 | return 0; | ||
64 | } | ||
65 | static inline void zone_span_writelock(struct zone *zone) {} | ||
66 | static inline void zone_span_writeunlock(struct zone *zone) {} | ||
67 | static inline void zone_seqlock_init(struct zone *zone) {} | ||
68 | #endif /* ! CONFIG_MEMORY_HOTPLUG */ | ||
34 | #endif /* __LINUX_MEMORY_HOTPLUG_H */ | 69 | #endif /* __LINUX_MEMORY_HOTPLUG_H */ |
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index e050d68963a1..f5fa3082fd6a 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/threads.h> | 12 | #include <linux/threads.h> |
13 | #include <linux/numa.h> | 13 | #include <linux/numa.h> |
14 | #include <linux/init.h> | 14 | #include <linux/init.h> |
15 | #include <linux/seqlock.h> | ||
15 | #include <asm/atomic.h> | 16 | #include <asm/atomic.h> |
16 | 17 | ||
17 | /* Free memory management - zoned buddy allocator. */ | 18 | /* Free memory management - zoned buddy allocator. */ |
@@ -137,6 +138,10 @@ struct zone { | |||
137 | * free areas of different sizes | 138 | * free areas of different sizes |
138 | */ | 139 | */ |
139 | spinlock_t lock; | 140 | spinlock_t lock; |
141 | #ifdef CONFIG_MEMORY_HOTPLUG | ||
142 | /* see spanned/present_pages for more description */ | ||
143 | seqlock_t span_seqlock; | ||
144 | #endif | ||
140 | struct free_area free_area[MAX_ORDER]; | 145 | struct free_area free_area[MAX_ORDER]; |
141 | 146 | ||
142 | 147 | ||
@@ -220,6 +225,16 @@ struct zone { | |||
220 | /* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */ | 225 | /* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */ |
221 | unsigned long zone_start_pfn; | 226 | unsigned long zone_start_pfn; |
222 | 227 | ||
228 | /* | ||
229 | * zone_start_pfn, spanned_pages and present_pages are all | ||
230 | * protected by span_seqlock. It is a seqlock because it has | ||
231 | * to be read outside of zone->lock, and it is done in the main | ||
232 | * allocator path. But, it is written quite infrequently. | ||
233 | * | ||
234 | * The lock is declared along with zone->lock because it is | ||
235 | * frequently read in proximity to zone->lock. It's good to | ||
236 | * give them a chance of being in the same cacheline. | ||
237 | */ | ||
223 | unsigned long spanned_pages; /* total size, including holes */ | 238 | unsigned long spanned_pages; /* total size, including holes */ |
224 | unsigned long present_pages; /* amount of memory (excluding holes) */ | 239 | unsigned long present_pages; /* amount of memory (excluding holes) */ |
225 | 240 | ||