diff options
| author | Dan Williams <dan.j.williams@intel.com> | 2009-09-08 20:55:21 -0400 |
|---|---|---|
| committer | Dan Williams <dan.j.williams@intel.com> | 2009-09-08 20:55:21 -0400 |
| commit | bbb20089a3275a19e475dbc21320c3742e3ca423 (patch) | |
| tree | 216fdc1cbef450ca688135c5b8969169482d9a48 /include/linux/mmzone.h | |
| parent | 3e48e656903e9fd8bc805c6a2c4264d7808d315b (diff) | |
| parent | 657a77fa7284d8ae28dfa48f1dc5d919bf5b2843 (diff) | |
Merge branch 'dmaengine' into async-tx-next
Conflicts:
crypto/async_tx/async_xor.c
drivers/dma/ioat/dma_v2.h
drivers/dma/ioat/pci.c
drivers/md/raid5.c
Diffstat (limited to 'include/linux/mmzone.h')
| -rw-r--r-- | include/linux/mmzone.h | 36 |
1 files changed, 17 insertions, 19 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index a47c879e1304..889598537370 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
| @@ -50,9 +50,6 @@ extern int page_group_by_mobility_disabled; | |||
| 50 | 50 | ||
| 51 | static inline int get_pageblock_migratetype(struct page *page) | 51 | static inline int get_pageblock_migratetype(struct page *page) |
| 52 | { | 52 | { |
| 53 | if (unlikely(page_group_by_mobility_disabled)) | ||
| 54 | return MIGRATE_UNMOVABLE; | ||
| 55 | |||
| 56 | return get_pageblock_flags_group(page, PB_migrate, PB_migrate_end); | 53 | return get_pageblock_flags_group(page, PB_migrate, PB_migrate_end); |
| 57 | } | 54 | } |
| 58 | 55 | ||
| @@ -86,13 +83,8 @@ enum zone_stat_item { | |||
| 86 | NR_ACTIVE_ANON, /* " " " " " */ | 83 | NR_ACTIVE_ANON, /* " " " " " */ |
| 87 | NR_INACTIVE_FILE, /* " " " " " */ | 84 | NR_INACTIVE_FILE, /* " " " " " */ |
| 88 | NR_ACTIVE_FILE, /* " " " " " */ | 85 | NR_ACTIVE_FILE, /* " " " " " */ |
| 89 | #ifdef CONFIG_UNEVICTABLE_LRU | ||
| 90 | NR_UNEVICTABLE, /* " " " " " */ | 86 | NR_UNEVICTABLE, /* " " " " " */ |
| 91 | NR_MLOCK, /* mlock()ed pages found and moved off LRU */ | 87 | NR_MLOCK, /* mlock()ed pages found and moved off LRU */ |
| 92 | #else | ||
| 93 | NR_UNEVICTABLE = NR_ACTIVE_FILE, /* avoid compiler errors in dead code */ | ||
| 94 | NR_MLOCK = NR_ACTIVE_FILE, | ||
| 95 | #endif | ||
| 96 | NR_ANON_PAGES, /* Mapped anonymous pages */ | 88 | NR_ANON_PAGES, /* Mapped anonymous pages */ |
| 97 | NR_FILE_MAPPED, /* pagecache pages mapped into pagetables. | 89 | NR_FILE_MAPPED, /* pagecache pages mapped into pagetables. |
| 98 | only modified from process context */ | 90 | only modified from process context */ |
| @@ -135,11 +127,7 @@ enum lru_list { | |||
| 135 | LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE, | 127 | LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE, |
| 136 | LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE, | 128 | LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE, |
| 137 | LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE, | 129 | LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE, |
| 138 | #ifdef CONFIG_UNEVICTABLE_LRU | ||
| 139 | LRU_UNEVICTABLE, | 130 | LRU_UNEVICTABLE, |
| 140 | #else | ||
| 141 | LRU_UNEVICTABLE = LRU_ACTIVE_FILE, /* avoid compiler errors in dead code */ | ||
| 142 | #endif | ||
| 143 | NR_LRU_LISTS | 131 | NR_LRU_LISTS |
| 144 | }; | 132 | }; |
| 145 | 133 | ||
| @@ -159,13 +147,20 @@ static inline int is_active_lru(enum lru_list l) | |||
| 159 | 147 | ||
| 160 | static inline int is_unevictable_lru(enum lru_list l) | 148 | static inline int is_unevictable_lru(enum lru_list l) |
| 161 | { | 149 | { |
| 162 | #ifdef CONFIG_UNEVICTABLE_LRU | ||
| 163 | return (l == LRU_UNEVICTABLE); | 150 | return (l == LRU_UNEVICTABLE); |
| 164 | #else | ||
| 165 | return 0; | ||
| 166 | #endif | ||
| 167 | } | 151 | } |
| 168 | 152 | ||
| 153 | enum zone_watermarks { | ||
| 154 | WMARK_MIN, | ||
| 155 | WMARK_LOW, | ||
| 156 | WMARK_HIGH, | ||
| 157 | NR_WMARK | ||
| 158 | }; | ||
| 159 | |||
| 160 | #define min_wmark_pages(z) (z->watermark[WMARK_MIN]) | ||
| 161 | #define low_wmark_pages(z) (z->watermark[WMARK_LOW]) | ||
| 162 | #define high_wmark_pages(z) (z->watermark[WMARK_HIGH]) | ||
| 163 | |||
| 169 | struct per_cpu_pages { | 164 | struct per_cpu_pages { |
| 170 | int count; /* number of pages in the list */ | 165 | int count; /* number of pages in the list */ |
| 171 | int high; /* high watermark, emptying needed */ | 166 | int high; /* high watermark, emptying needed */ |
| @@ -278,7 +273,10 @@ struct zone_reclaim_stat { | |||
| 278 | 273 | ||
| 279 | struct zone { | 274 | struct zone { |
| 280 | /* Fields commonly accessed by the page allocator */ | 275 | /* Fields commonly accessed by the page allocator */ |
| 281 | unsigned long pages_min, pages_low, pages_high; | 276 | |
| 277 | /* zone watermarks, access with *_wmark_pages(zone) macros */ | ||
| 278 | unsigned long watermark[NR_WMARK]; | ||
| 279 | |||
| 282 | /* | 280 | /* |
| 283 | * We don't know if the memory that we're going to allocate will be freeable | 281 | * We don't know if the memory that we're going to allocate will be freeable |
| 284 | * or/and it will be released eventually, so to avoid totally wasting several | 282 | * or/and it will be released eventually, so to avoid totally wasting several |
| @@ -323,9 +321,9 @@ struct zone { | |||
| 323 | 321 | ||
| 324 | /* Fields commonly accessed by the page reclaim scanner */ | 322 | /* Fields commonly accessed by the page reclaim scanner */ |
| 325 | spinlock_t lru_lock; | 323 | spinlock_t lru_lock; |
| 326 | struct { | 324 | struct zone_lru { |
| 327 | struct list_head list; | 325 | struct list_head list; |
| 328 | unsigned long nr_scan; | 326 | unsigned long nr_saved_scan; /* accumulated for batching */ |
| 329 | } lru[NR_LRU_LISTS]; | 327 | } lru[NR_LRU_LISTS]; |
| 330 | 328 | ||
| 331 | struct zone_reclaim_stat reclaim_stat; | 329 | struct zone_reclaim_stat reclaim_stat; |
