diff options
-rw-r--r-- | include/linux/memory_hotplug.h | 39 | ||||
-rw-r--r-- | include/linux/mmzone.h | 15 | ||||
-rw-r--r-- | mm/page_alloc.c | 19 |
3 files changed, 66 insertions, 7 deletions
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index e8103be9d528..4b08bc947578 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h | |||
@@ -16,13 +16,36 @@ void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags) | |||
16 | static inline | 16 | static inline |
17 | void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags) | 17 | void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags) |
18 | { | 18 | { |
19 | spin_lock_irqrestore(&pgdat->node_size_lock, *flags); | 19 | spin_unlock_irqrestore(&pgdat->node_size_lock, *flags); |
20 | } | 20 | } |
21 | static inline | 21 | static inline |
22 | void pgdat_resize_init(struct pglist_data *pgdat) | 22 | void pgdat_resize_init(struct pglist_data *pgdat) |
23 | { | 23 | { |
24 | spin_lock_init(&pgdat->node_size_lock); | 24 | spin_lock_init(&pgdat->node_size_lock); |
25 | } | 25 | } |
26 | /* | ||
27 | * Zone resizing functions | ||
28 | */ | ||
29 | static inline unsigned zone_span_seqbegin(struct zone *zone) | ||
30 | { | ||
31 | return read_seqbegin(&zone->span_seqlock); | ||
32 | } | ||
33 | static inline int zone_span_seqretry(struct zone *zone, unsigned iv) | ||
34 | { | ||
35 | return read_seqretry(&zone->span_seqlock, iv); | ||
36 | } | ||
37 | static inline void zone_span_writelock(struct zone *zone) | ||
38 | { | ||
39 | write_seqlock(&zone->span_seqlock); | ||
40 | } | ||
41 | static inline void zone_span_writeunlock(struct zone *zone) | ||
42 | { | ||
43 | write_sequnlock(&zone->span_seqlock); | ||
44 | } | ||
45 | static inline void zone_seqlock_init(struct zone *zone) | ||
46 | { | ||
47 | seqlock_init(&zone->span_seqlock); | ||
48 | } | ||
26 | #else /* ! CONFIG_MEMORY_HOTPLUG */ | 49 | #else /* ! CONFIG_MEMORY_HOTPLUG */ |
27 | /* | 50 | /* |
28 | * Stub functions for when hotplug is off | 51 | * Stub functions for when hotplug is off |
@@ -30,5 +53,17 @@ void pgdat_resize_init(struct pglist_data *pgdat) | |||
30 | static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {} | 53 | static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {} |
31 | static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {} | 54 | static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {} |
32 | static inline void pgdat_resize_init(struct pglist_data *pgdat) {} | 55 | static inline void pgdat_resize_init(struct pglist_data *pgdat) {} |
33 | #endif | 56 | |
57 | static inline unsigned zone_span_seqbegin(struct zone *zone) | ||
58 | { | ||
59 | return 0; | ||
60 | } | ||
61 | static inline int zone_span_seqretry(struct zone *zone, unsigned iv) | ||
62 | { | ||
63 | return 0; | ||
64 | } | ||
65 | static inline void zone_span_writelock(struct zone *zone) {} | ||
66 | static inline void zone_span_writeunlock(struct zone *zone) {} | ||
67 | static inline void zone_seqlock_init(struct zone *zone) {} | ||
68 | #endif /* ! CONFIG_MEMORY_HOTPLUG */ | ||
34 | #endif /* __LINUX_MEMORY_HOTPLUG_H */ | 69 | #endif /* __LINUX_MEMORY_HOTPLUG_H */ |
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index e050d68963a1..f5fa3082fd6a 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/threads.h> | 12 | #include <linux/threads.h> |
13 | #include <linux/numa.h> | 13 | #include <linux/numa.h> |
14 | #include <linux/init.h> | 14 | #include <linux/init.h> |
15 | #include <linux/seqlock.h> | ||
15 | #include <asm/atomic.h> | 16 | #include <asm/atomic.h> |
16 | 17 | ||
17 | /* Free memory management - zoned buddy allocator. */ | 18 | /* Free memory management - zoned buddy allocator. */ |
@@ -137,6 +138,10 @@ struct zone { | |||
137 | * free areas of different sizes | 138 | * free areas of different sizes |
138 | */ | 139 | */ |
139 | spinlock_t lock; | 140 | spinlock_t lock; |
141 | #ifdef CONFIG_MEMORY_HOTPLUG | ||
142 | /* see spanned/present_pages for more description */ | ||
143 | seqlock_t span_seqlock; | ||
144 | #endif | ||
140 | struct free_area free_area[MAX_ORDER]; | 145 | struct free_area free_area[MAX_ORDER]; |
141 | 146 | ||
142 | 147 | ||
@@ -220,6 +225,16 @@ struct zone { | |||
220 | /* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */ | 225 | /* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */ |
221 | unsigned long zone_start_pfn; | 226 | unsigned long zone_start_pfn; |
222 | 227 | ||
228 | /* | ||
229 | * zone_start_pfn, spanned_pages and present_pages are all | ||
230 | * protected by span_seqlock. It is a seqlock because it has | ||
231 | * to be read outside of zone->lock, and it is done in the main | ||
232 | * allocator path. But, it is written quite infrequently. | ||
233 | * | ||
234 | * The lock is declared along with zone->lock because it is | ||
235 | * frequently read in proximity to zone->lock. It's good to | ||
236 | * give them a chance of being in the same cacheline. | ||
237 | */ | ||
223 | unsigned long spanned_pages; /* total size, including holes */ | 238 | unsigned long spanned_pages; /* total size, including holes */ |
224 | unsigned long present_pages; /* amount of memory (excluding holes) */ | 239 | unsigned long present_pages; /* amount of memory (excluding holes) */ |
225 | 240 | ||
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 32fad6d23200..817635f2ab62 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <linux/sysctl.h> | 33 | #include <linux/sysctl.h> |
34 | #include <linux/cpu.h> | 34 | #include <linux/cpu.h> |
35 | #include <linux/cpuset.h> | 35 | #include <linux/cpuset.h> |
36 | #include <linux/memory_hotplug.h> | ||
36 | #include <linux/nodemask.h> | 37 | #include <linux/nodemask.h> |
37 | #include <linux/vmalloc.h> | 38 | #include <linux/vmalloc.h> |
38 | 39 | ||
@@ -80,12 +81,19 @@ unsigned long __initdata nr_all_pages; | |||
80 | 81 | ||
81 | static int page_outside_zone_boundaries(struct zone *zone, struct page *page) | 82 | static int page_outside_zone_boundaries(struct zone *zone, struct page *page) |
82 | { | 83 | { |
83 | if (page_to_pfn(page) >= zone->zone_start_pfn + zone->spanned_pages) | 84 | int ret = 0; |
84 | return 1; | 85 | unsigned seq; |
85 | if (page_to_pfn(page) < zone->zone_start_pfn) | 86 | unsigned long pfn = page_to_pfn(page); |
86 | return 1; | ||
87 | 87 | ||
88 | return 0; | 88 | do { |
89 | seq = zone_span_seqbegin(zone); | ||
90 | if (pfn >= zone->zone_start_pfn + zone->spanned_pages) | ||
91 | ret = 1; | ||
92 | else if (pfn < zone->zone_start_pfn) | ||
93 | ret = 1; | ||
94 | } while (zone_span_seqretry(zone, seq)); | ||
95 | |||
96 | return ret; | ||
89 | } | 97 | } |
90 | 98 | ||
91 | static int page_is_consistent(struct zone *zone, struct page *page) | 99 | static int page_is_consistent(struct zone *zone, struct page *page) |
@@ -1980,6 +1988,7 @@ static void __init free_area_init_core(struct pglist_data *pgdat, | |||
1980 | zone->name = zone_names[j]; | 1988 | zone->name = zone_names[j]; |
1981 | spin_lock_init(&zone->lock); | 1989 | spin_lock_init(&zone->lock); |
1982 | spin_lock_init(&zone->lru_lock); | 1990 | spin_lock_init(&zone->lru_lock); |
1991 | zone_seqlock_init(zone); | ||
1983 | zone->zone_pgdat = pgdat; | 1992 | zone->zone_pgdat = pgdat; |
1984 | zone->free_pages = 0; | 1993 | zone->free_pages = 0; |
1985 | 1994 | ||