diff options
author | KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> | 2010-03-05 16:41:55 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-03-06 14:26:25 -0500 |
commit | 93e4a89a8c987189b168a530a331ef6d0fcf07a7 (patch) | |
tree | deb08017c0e4874539549d3ea9bf2d7b447a43be /include/linux/mmzone.h | |
parent | fc91668eaf9e7ba61e867fc2218b7e9fb67faa4f (diff) |
mm: restore zone->all_unreclaimable to independence word
commit e815af95 ("change all_unreclaimable zone member to flags") changed
all_unreclaimable member to bit flag. But it had an undesireble side
effect. free_one_page() is one of most hot path in linux kernel and
increasing atomic ops in it can reduce kernel performance a bit.
Thus, this patch revert such commit partially. at least
all_unreclaimable shouldn't share memory word with other zone flags.
[akpm@linux-foundation.org: fix patch interaction]
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: Huang Shijie <shijie8@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/mmzone.h')
-rw-r--r-- | include/linux/mmzone.h | 7 |
1 files changed, 1 insertions, 6 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index a01a103341bd..bc209d8b7b5c 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
@@ -306,6 +306,7 @@ struct zone { | |||
306 | * free areas of different sizes | 306 | * free areas of different sizes |
307 | */ | 307 | */ |
308 | spinlock_t lock; | 308 | spinlock_t lock; |
309 | int all_unreclaimable; /* All pages pinned */ | ||
309 | #ifdef CONFIG_MEMORY_HOTPLUG | 310 | #ifdef CONFIG_MEMORY_HOTPLUG |
310 | /* see spanned/present_pages for more description */ | 311 | /* see spanned/present_pages for more description */ |
311 | seqlock_t span_seqlock; | 312 | seqlock_t span_seqlock; |
@@ -417,7 +418,6 @@ struct zone { | |||
417 | } ____cacheline_internodealigned_in_smp; | 418 | } ____cacheline_internodealigned_in_smp; |
418 | 419 | ||
419 | typedef enum { | 420 | typedef enum { |
420 | ZONE_ALL_UNRECLAIMABLE, /* all pages pinned */ | ||
421 | ZONE_RECLAIM_LOCKED, /* prevents concurrent reclaim */ | 421 | ZONE_RECLAIM_LOCKED, /* prevents concurrent reclaim */ |
422 | ZONE_OOM_LOCKED, /* zone is in OOM killer zonelist */ | 422 | ZONE_OOM_LOCKED, /* zone is in OOM killer zonelist */ |
423 | } zone_flags_t; | 423 | } zone_flags_t; |
@@ -437,11 +437,6 @@ static inline void zone_clear_flag(struct zone *zone, zone_flags_t flag) | |||
437 | clear_bit(flag, &zone->flags); | 437 | clear_bit(flag, &zone->flags); |
438 | } | 438 | } |
439 | 439 | ||
440 | static inline int zone_is_all_unreclaimable(const struct zone *zone) | ||
441 | { | ||
442 | return test_bit(ZONE_ALL_UNRECLAIMABLE, &zone->flags); | ||
443 | } | ||
444 | |||
445 | static inline int zone_is_reclaim_locked(const struct zone *zone) | 440 | static inline int zone_is_reclaim_locked(const struct zone *zone) |
446 | { | 441 | { |
447 | return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags); | 442 | return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags); |