diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
commit | c71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch) | |
tree | ecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /include/linux/mmzone.h | |
parent | ea53c912f8a86a8567697115b6a0d8152beee5c8 (diff) | |
parent | 6a00f206debf8a5c8899055726ad127dbeeed098 (diff) |
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts:
litmus/sched_cedf.c
Diffstat (limited to 'include/linux/mmzone.h')
-rw-r--r-- | include/linux/mmzone.h | 49 |
1 files changed, 32 insertions, 17 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 3984c4eb41fd..9f7c3ebcbbad 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
@@ -104,6 +104,8 @@ enum zone_stat_item { | |||
104 | NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */ | 104 | NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */ |
105 | NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */ | 105 | NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */ |
106 | NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */ | 106 | NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */ |
107 | NR_DIRTIED, /* page dirtyings since bootup */ | ||
108 | NR_WRITTEN, /* page writings since bootup */ | ||
107 | #ifdef CONFIG_NUMA | 109 | #ifdef CONFIG_NUMA |
108 | NUMA_HIT, /* allocated in intended node */ | 110 | NUMA_HIT, /* allocated in intended node */ |
109 | NUMA_MISS, /* allocated in non intended node */ | 111 | NUMA_MISS, /* allocated in non intended node */ |
@@ -112,6 +114,7 @@ enum zone_stat_item { | |||
112 | NUMA_LOCAL, /* allocation from local node */ | 114 | NUMA_LOCAL, /* allocation from local node */ |
113 | NUMA_OTHER, /* allocation from other node */ | 115 | NUMA_OTHER, /* allocation from other node */ |
114 | #endif | 116 | #endif |
117 | NR_ANON_TRANSPARENT_HUGEPAGES, | ||
115 | NR_VM_ZONE_STAT_ITEMS }; | 118 | NR_VM_ZONE_STAT_ITEMS }; |
116 | 119 | ||
117 | /* | 120 | /* |
@@ -270,11 +273,6 @@ struct zone_reclaim_stat { | |||
270 | */ | 273 | */ |
271 | unsigned long recent_rotated[2]; | 274 | unsigned long recent_rotated[2]; |
272 | unsigned long recent_scanned[2]; | 275 | unsigned long recent_scanned[2]; |
273 | |||
274 | /* | ||
275 | * accumulated for batching | ||
276 | */ | ||
277 | unsigned long nr_saved_scan[NR_LRU_LISTS]; | ||
278 | }; | 276 | }; |
279 | 277 | ||
280 | struct zone { | 278 | struct zone { |
@@ -421,6 +419,9 @@ struct zone { | |||
421 | typedef enum { | 419 | typedef enum { |
422 | ZONE_RECLAIM_LOCKED, /* prevents concurrent reclaim */ | 420 | ZONE_RECLAIM_LOCKED, /* prevents concurrent reclaim */ |
423 | ZONE_OOM_LOCKED, /* zone is in OOM killer zonelist */ | 421 | ZONE_OOM_LOCKED, /* zone is in OOM killer zonelist */ |
422 | ZONE_CONGESTED, /* zone has many dirty pages backed by | ||
423 | * a congested BDI | ||
424 | */ | ||
424 | } zone_flags_t; | 425 | } zone_flags_t; |
425 | 426 | ||
426 | static inline void zone_set_flag(struct zone *zone, zone_flags_t flag) | 427 | static inline void zone_set_flag(struct zone *zone, zone_flags_t flag) |
@@ -438,6 +439,11 @@ static inline void zone_clear_flag(struct zone *zone, zone_flags_t flag) | |||
438 | clear_bit(flag, &zone->flags); | 439 | clear_bit(flag, &zone->flags); |
439 | } | 440 | } |
440 | 441 | ||
442 | static inline int zone_is_reclaim_congested(const struct zone *zone) | ||
443 | { | ||
444 | return test_bit(ZONE_CONGESTED, &zone->flags); | ||
445 | } | ||
446 | |||
441 | static inline int zone_is_reclaim_locked(const struct zone *zone) | 447 | static inline int zone_is_reclaim_locked(const struct zone *zone) |
442 | { | 448 | { |
443 | return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags); | 449 | return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags); |
@@ -448,12 +454,6 @@ static inline int zone_is_oom_locked(const struct zone *zone) | |||
448 | return test_bit(ZONE_OOM_LOCKED, &zone->flags); | 454 | return test_bit(ZONE_OOM_LOCKED, &zone->flags); |
449 | } | 455 | } |
450 | 456 | ||
451 | #ifdef CONFIG_SMP | ||
452 | unsigned long zone_nr_free_pages(struct zone *zone); | ||
453 | #else | ||
454 | #define zone_nr_free_pages(zone) zone_page_state(zone, NR_FREE_PAGES) | ||
455 | #endif /* CONFIG_SMP */ | ||
456 | |||
457 | /* | 457 | /* |
458 | * The "priority" of VM scanning is how much of the queues we will scan in one | 458 | * The "priority" of VM scanning is how much of the queues we will scan in one |
459 | * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the | 459 | * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the |
@@ -467,7 +467,7 @@ unsigned long zone_nr_free_pages(struct zone *zone); | |||
467 | #ifdef CONFIG_NUMA | 467 | #ifdef CONFIG_NUMA |
468 | 468 | ||
469 | /* | 469 | /* |
470 | * The NUMA zonelists are doubled becausse we need zonelists that restrict the | 470 | * The NUMA zonelists are doubled because we need zonelists that restrict the |
471 | * allocations to a single node for GFP_THISNODE. | 471 | * allocations to a single node for GFP_THISNODE. |
472 | * | 472 | * |
473 | * [0] : Zonelist with fallback | 473 | * [0] : Zonelist with fallback |
@@ -635,6 +635,7 @@ typedef struct pglist_data { | |||
635 | wait_queue_head_t kswapd_wait; | 635 | wait_queue_head_t kswapd_wait; |
636 | struct task_struct *kswapd; | 636 | struct task_struct *kswapd; |
637 | int kswapd_max_order; | 637 | int kswapd_max_order; |
638 | enum zone_type classzone_idx; | ||
638 | } pg_data_t; | 639 | } pg_data_t; |
639 | 640 | ||
640 | #define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages) | 641 | #define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages) |
@@ -646,12 +647,21 @@ typedef struct pglist_data { | |||
646 | #endif | 647 | #endif |
647 | #define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr)) | 648 | #define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr)) |
648 | 649 | ||
650 | #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) | ||
651 | |||
652 | #define node_end_pfn(nid) ({\ | ||
653 | pg_data_t *__pgdat = NODE_DATA(nid);\ | ||
654 | __pgdat->node_start_pfn + __pgdat->node_spanned_pages;\ | ||
655 | }) | ||
656 | |||
649 | #include <linux/memory_hotplug.h> | 657 | #include <linux/memory_hotplug.h> |
650 | 658 | ||
651 | extern struct mutex zonelists_mutex; | 659 | extern struct mutex zonelists_mutex; |
652 | void build_all_zonelists(void *data); | 660 | void build_all_zonelists(void *data); |
653 | void wakeup_kswapd(struct zone *zone, int order); | 661 | void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx); |
654 | int zone_watermark_ok(struct zone *z, int order, unsigned long mark, | 662 | bool zone_watermark_ok(struct zone *z, int order, unsigned long mark, |
663 | int classzone_idx, int alloc_flags); | ||
664 | bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark, | ||
655 | int classzone_idx, int alloc_flags); | 665 | int classzone_idx, int alloc_flags); |
656 | enum memmap_context { | 666 | enum memmap_context { |
657 | MEMMAP_EARLY, | 667 | MEMMAP_EARLY, |
@@ -920,9 +930,6 @@ static inline unsigned long early_pfn_to_nid(unsigned long pfn) | |||
920 | #define pfn_to_nid(pfn) (0) | 930 | #define pfn_to_nid(pfn) (0) |
921 | #endif | 931 | #endif |
922 | 932 | ||
923 | #define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT) | ||
924 | #define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT) | ||
925 | |||
926 | #ifdef CONFIG_SPARSEMEM | 933 | #ifdef CONFIG_SPARSEMEM |
927 | 934 | ||
928 | /* | 935 | /* |
@@ -948,6 +955,12 @@ static inline unsigned long early_pfn_to_nid(unsigned long pfn) | |||
948 | #error Allocator MAX_ORDER exceeds SECTION_SIZE | 955 | #error Allocator MAX_ORDER exceeds SECTION_SIZE |
949 | #endif | 956 | #endif |
950 | 957 | ||
958 | #define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT) | ||
959 | #define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT) | ||
960 | |||
961 | #define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK) | ||
962 | #define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK) | ||
963 | |||
951 | struct page; | 964 | struct page; |
952 | struct page_cgroup; | 965 | struct page_cgroup; |
953 | struct mem_section { | 966 | struct mem_section { |
@@ -1045,12 +1058,14 @@ static inline struct mem_section *__pfn_to_section(unsigned long pfn) | |||
1045 | return __nr_to_section(pfn_to_section_nr(pfn)); | 1058 | return __nr_to_section(pfn_to_section_nr(pfn)); |
1046 | } | 1059 | } |
1047 | 1060 | ||
1061 | #ifndef CONFIG_HAVE_ARCH_PFN_VALID | ||
1048 | static inline int pfn_valid(unsigned long pfn) | 1062 | static inline int pfn_valid(unsigned long pfn) |
1049 | { | 1063 | { |
1050 | if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) | 1064 | if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) |
1051 | return 0; | 1065 | return 0; |
1052 | return valid_section(__nr_to_section(pfn_to_section_nr(pfn))); | 1066 | return valid_section(__nr_to_section(pfn_to_section_nr(pfn))); |
1053 | } | 1067 | } |
1068 | #endif | ||
1054 | 1069 | ||
1055 | static inline int pfn_present(unsigned long pfn) | 1070 | static inline int pfn_present(unsigned long pfn) |
1056 | { | 1071 | { |