diff options
| author | Len Brown <len.brown@intel.com> | 2012-06-04 00:35:19 -0400 |
|---|---|---|
| committer | Len Brown <len.brown@intel.com> | 2012-06-04 00:35:19 -0400 |
| commit | 7e1bd6e38b1f30860ce25a014c6d6adfb0079f4a (patch) | |
| tree | 65c5898ba93007d4399150c7a127a670bcfbc30d /include/linux/mmzone.h | |
| parent | 301f33fbcf4ced53b3de114846ecece5d6aafeeb (diff) | |
| parent | f8f5701bdaf9134b1f90e5044a82c66324d2073f (diff) | |
Merge branch 'upstream' into bugfix-video
Update bugfix-video branch to 2.5-rc1
so I don't have to again resolve the
conflict in these patches vs. upstream.
Conflicts:
drivers/gpu/drm/gma500/psb_drv.c
text conflict: add comment vs delete neighboring line
keep just this:
/* igd_opregion_init(&dev_priv->opregion_dev); */
/* acpi_video_register(); */
Signed-off-by: Len Brown <len.brown@intel.com>
Diffstat (limited to 'include/linux/mmzone.h')
| -rw-r--r-- | include/linux/mmzone.h | 102 |
1 files changed, 72 insertions, 30 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index dff711509661..2427706f78b4 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
| @@ -30,18 +30,44 @@ | |||
| 30 | /* | 30 | /* |
| 31 | * PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed | 31 | * PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed |
| 32 | * costly to service. That is between allocation orders which should | 32 | * costly to service. That is between allocation orders which should |
| 33 | * coelesce naturally under reasonable reclaim pressure and those which | 33 | * coalesce naturally under reasonable reclaim pressure and those which |
| 34 | * will not. | 34 | * will not. |
| 35 | */ | 35 | */ |
| 36 | #define PAGE_ALLOC_COSTLY_ORDER 3 | 36 | #define PAGE_ALLOC_COSTLY_ORDER 3 |
| 37 | 37 | ||
| 38 | #define MIGRATE_UNMOVABLE 0 | 38 | enum { |
| 39 | #define MIGRATE_RECLAIMABLE 1 | 39 | MIGRATE_UNMOVABLE, |
| 40 | #define MIGRATE_MOVABLE 2 | 40 | MIGRATE_RECLAIMABLE, |
| 41 | #define MIGRATE_PCPTYPES 3 /* the number of types on the pcp lists */ | 41 | MIGRATE_MOVABLE, |
| 42 | #define MIGRATE_RESERVE 3 | 42 | MIGRATE_PCPTYPES, /* the number of types on the pcp lists */ |
| 43 | #define MIGRATE_ISOLATE 4 /* can't allocate from here */ | 43 | MIGRATE_RESERVE = MIGRATE_PCPTYPES, |
| 44 | #define MIGRATE_TYPES 5 | 44 | #ifdef CONFIG_CMA |
| 45 | /* | ||
| 46 | * MIGRATE_CMA migration type is designed to mimic the way | ||
| 47 | * ZONE_MOVABLE works. Only movable pages can be allocated | ||
| 48 | * from MIGRATE_CMA pageblocks and page allocator never | ||
| 49 | * implicitly change migration type of MIGRATE_CMA pageblock. | ||
| 50 | * | ||
| 51 | * The way to use it is to change migratetype of a range of | ||
| 52 | * pageblocks to MIGRATE_CMA which can be done by | ||
| 53 | * __free_pageblock_cma() function. What is important though | ||
| 54 | * is that a range of pageblocks must be aligned to | ||
| 55 | * MAX_ORDER_NR_PAGES should biggest page be bigger then | ||
| 56 | * a single pageblock. | ||
| 57 | */ | ||
| 58 | MIGRATE_CMA, | ||
| 59 | #endif | ||
| 60 | MIGRATE_ISOLATE, /* can't allocate from here */ | ||
| 61 | MIGRATE_TYPES | ||
| 62 | }; | ||
| 63 | |||
| 64 | #ifdef CONFIG_CMA | ||
| 65 | # define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA) | ||
| 66 | # define cma_wmark_pages(zone) zone->min_cma_pages | ||
| 67 | #else | ||
| 68 | # define is_migrate_cma(migratetype) false | ||
| 69 | # define cma_wmark_pages(zone) 0 | ||
| 70 | #endif | ||
| 45 | 71 | ||
| 46 | #define for_each_migratetype_order(order, type) \ | 72 | #define for_each_migratetype_order(order, type) \ |
| 47 | for (order = 0; order < MAX_ORDER; order++) \ | 73 | for (order = 0; order < MAX_ORDER; order++) \ |
| @@ -159,8 +185,25 @@ static inline int is_unevictable_lru(enum lru_list lru) | |||
| 159 | return (lru == LRU_UNEVICTABLE); | 185 | return (lru == LRU_UNEVICTABLE); |
| 160 | } | 186 | } |
| 161 | 187 | ||
| 188 | struct zone_reclaim_stat { | ||
| 189 | /* | ||
| 190 | * The pageout code in vmscan.c keeps track of how many of the | ||
| 191 | * mem/swap backed and file backed pages are refeferenced. | ||
| 192 | * The higher the rotated/scanned ratio, the more valuable | ||
| 193 | * that cache is. | ||
| 194 | * | ||
| 195 | * The anon LRU stats live in [0], file LRU stats in [1] | ||
| 196 | */ | ||
| 197 | unsigned long recent_rotated[2]; | ||
| 198 | unsigned long recent_scanned[2]; | ||
| 199 | }; | ||
| 200 | |||
| 162 | struct lruvec { | 201 | struct lruvec { |
| 163 | struct list_head lists[NR_LRU_LISTS]; | 202 | struct list_head lists[NR_LRU_LISTS]; |
| 203 | struct zone_reclaim_stat reclaim_stat; | ||
| 204 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR | ||
| 205 | struct zone *zone; | ||
| 206 | #endif | ||
| 164 | }; | 207 | }; |
| 165 | 208 | ||
| 166 | /* Mask used at gathering information at once (see memcontrol.c) */ | 209 | /* Mask used at gathering information at once (see memcontrol.c) */ |
| @@ -169,16 +212,12 @@ struct lruvec { | |||
| 169 | #define LRU_ALL_EVICTABLE (LRU_ALL_FILE | LRU_ALL_ANON) | 212 | #define LRU_ALL_EVICTABLE (LRU_ALL_FILE | LRU_ALL_ANON) |
| 170 | #define LRU_ALL ((1 << NR_LRU_LISTS) - 1) | 213 | #define LRU_ALL ((1 << NR_LRU_LISTS) - 1) |
| 171 | 214 | ||
| 172 | /* Isolate inactive pages */ | ||
| 173 | #define ISOLATE_INACTIVE ((__force isolate_mode_t)0x1) | ||
| 174 | /* Isolate active pages */ | ||
| 175 | #define ISOLATE_ACTIVE ((__force isolate_mode_t)0x2) | ||
| 176 | /* Isolate clean file */ | 215 | /* Isolate clean file */ |
| 177 | #define ISOLATE_CLEAN ((__force isolate_mode_t)0x4) | 216 | #define ISOLATE_CLEAN ((__force isolate_mode_t)0x1) |
| 178 | /* Isolate unmapped file */ | 217 | /* Isolate unmapped file */ |
| 179 | #define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x8) | 218 | #define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x2) |
| 180 | /* Isolate for asynchronous migration */ | 219 | /* Isolate for asynchronous migration */ |
| 181 | #define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x10) | 220 | #define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x4) |
| 182 | 221 | ||
| 183 | /* LRU Isolation modes. */ | 222 | /* LRU Isolation modes. */ |
| 184 | typedef unsigned __bitwise__ isolate_mode_t; | 223 | typedef unsigned __bitwise__ isolate_mode_t; |
| @@ -287,19 +326,6 @@ enum zone_type { | |||
| 287 | #error ZONES_SHIFT -- too many zones configured adjust calculation | 326 | #error ZONES_SHIFT -- too many zones configured adjust calculation |
| 288 | #endif | 327 | #endif |
| 289 | 328 | ||
| 290 | struct zone_reclaim_stat { | ||
| 291 | /* | ||
| 292 | * The pageout code in vmscan.c keeps track of how many of the | ||
| 293 | * mem/swap backed and file backed pages are refeferenced. | ||
| 294 | * The higher the rotated/scanned ratio, the more valuable | ||
| 295 | * that cache is. | ||
| 296 | * | ||
| 297 | * The anon LRU stats live in [0], file LRU stats in [1] | ||
| 298 | */ | ||
| 299 | unsigned long recent_rotated[2]; | ||
| 300 | unsigned long recent_scanned[2]; | ||
| 301 | }; | ||
| 302 | |||
| 303 | struct zone { | 329 | struct zone { |
| 304 | /* Fields commonly accessed by the page allocator */ | 330 | /* Fields commonly accessed by the page allocator */ |
| 305 | 331 | ||
| @@ -347,6 +373,13 @@ struct zone { | |||
| 347 | /* see spanned/present_pages for more description */ | 373 | /* see spanned/present_pages for more description */ |
| 348 | seqlock_t span_seqlock; | 374 | seqlock_t span_seqlock; |
| 349 | #endif | 375 | #endif |
| 376 | #ifdef CONFIG_CMA | ||
| 377 | /* | ||
| 378 | * CMA needs to increase watermark levels during the allocation | ||
| 379 | * process to make sure that the system is not starved. | ||
| 380 | */ | ||
| 381 | unsigned long min_cma_pages; | ||
| 382 | #endif | ||
| 350 | struct free_area free_area[MAX_ORDER]; | 383 | struct free_area free_area[MAX_ORDER]; |
| 351 | 384 | ||
| 352 | #ifndef CONFIG_SPARSEMEM | 385 | #ifndef CONFIG_SPARSEMEM |
| @@ -374,8 +407,6 @@ struct zone { | |||
| 374 | spinlock_t lru_lock; | 407 | spinlock_t lru_lock; |
| 375 | struct lruvec lruvec; | 408 | struct lruvec lruvec; |
| 376 | 409 | ||
| 377 | struct zone_reclaim_stat reclaim_stat; | ||
| 378 | |||
| 379 | unsigned long pages_scanned; /* since last reclaim */ | 410 | unsigned long pages_scanned; /* since last reclaim */ |
| 380 | unsigned long flags; /* zone flags, see below */ | 411 | unsigned long flags; /* zone flags, see below */ |
| 381 | 412 | ||
| @@ -701,6 +732,17 @@ extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn, | |||
| 701 | unsigned long size, | 732 | unsigned long size, |
| 702 | enum memmap_context context); | 733 | enum memmap_context context); |
| 703 | 734 | ||
| 735 | extern void lruvec_init(struct lruvec *lruvec, struct zone *zone); | ||
| 736 | |||
| 737 | static inline struct zone *lruvec_zone(struct lruvec *lruvec) | ||
| 738 | { | ||
| 739 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR | ||
| 740 | return lruvec->zone; | ||
| 741 | #else | ||
| 742 | return container_of(lruvec, struct zone, lruvec); | ||
| 743 | #endif | ||
| 744 | } | ||
| 745 | |||
| 704 | #ifdef CONFIG_HAVE_MEMORY_PRESENT | 746 | #ifdef CONFIG_HAVE_MEMORY_PRESENT |
| 705 | void memory_present(int nid, unsigned long start, unsigned long end); | 747 | void memory_present(int nid, unsigned long start, unsigned long end); |
| 706 | #else | 748 | #else |
