diff options
Diffstat (limited to 'include/linux')
| -rw-r--r-- | include/linux/byteorder/big_endian.h | 3 | ||||
| -rw-r--r-- | include/linux/byteorder/little_endian.h | 3 | ||||
| -rw-r--r-- | include/linux/compaction.h | 89 | ||||
| -rw-r--r-- | include/linux/cpuset.h | 43 | ||||
| -rw-r--r-- | include/linux/dynamic_debug.h | 2 | ||||
| -rw-r--r-- | include/linux/err.h | 10 | ||||
| -rw-r--r-- | include/linux/fb.h | 2 | ||||
| -rw-r--r-- | include/linux/gfp.h | 18 | ||||
| -rw-r--r-- | include/linux/highmem.h | 2 | ||||
| -rw-r--r-- | include/linux/ivtvfb.h | 1 | ||||
| -rw-r--r-- | include/linux/kernel.h | 25 | ||||
| -rw-r--r-- | include/linux/lis3lv02d.h | 12 | ||||
| -rw-r--r-- | include/linux/matroxfb.h | 3 | ||||
| -rw-r--r-- | include/linux/memcontrol.h | 13 | ||||
| -rw-r--r-- | include/linux/memory_hotplug.h | 1 | ||||
| -rw-r--r-- | include/linux/mempolicy.h | 15 | ||||
| -rw-r--r-- | include/linux/migrate.h | 6 | ||||
| -rw-r--r-- | include/linux/mm.h | 7 | ||||
| -rw-r--r-- | include/linux/mmzone.h | 14 | ||||
| -rw-r--r-- | include/linux/ratelimit.h | 13 | ||||
| -rw-r--r-- | include/linux/rmap.h | 27 | ||||
| -rw-r--r-- | include/linux/sched.h | 3 | ||||
| -rw-r--r-- | include/linux/swap.h | 14 | ||||
| -rw-r--r-- | include/linux/vmstat.h | 4 |
24 files changed, 265 insertions, 65 deletions
diff --git a/include/linux/byteorder/big_endian.h b/include/linux/byteorder/big_endian.h index 3c80fd7e8b56..d53a67dff018 100644 --- a/include/linux/byteorder/big_endian.h +++ b/include/linux/byteorder/big_endian.h | |||
| @@ -7,6 +7,9 @@ | |||
| 7 | #ifndef __BIG_ENDIAN_BITFIELD | 7 | #ifndef __BIG_ENDIAN_BITFIELD |
| 8 | #define __BIG_ENDIAN_BITFIELD | 8 | #define __BIG_ENDIAN_BITFIELD |
| 9 | #endif | 9 | #endif |
| 10 | #ifndef __BYTE_ORDER | ||
| 11 | #define __BYTE_ORDER __BIG_ENDIAN | ||
| 12 | #endif | ||
| 10 | 13 | ||
| 11 | #include <linux/types.h> | 14 | #include <linux/types.h> |
| 12 | #include <linux/swab.h> | 15 | #include <linux/swab.h> |
diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h index 83195fb82962..f7f8ad13adb6 100644 --- a/include/linux/byteorder/little_endian.h +++ b/include/linux/byteorder/little_endian.h | |||
| @@ -7,6 +7,9 @@ | |||
| 7 | #ifndef __LITTLE_ENDIAN_BITFIELD | 7 | #ifndef __LITTLE_ENDIAN_BITFIELD |
| 8 | #define __LITTLE_ENDIAN_BITFIELD | 8 | #define __LITTLE_ENDIAN_BITFIELD |
| 9 | #endif | 9 | #endif |
| 10 | #ifndef __BYTE_ORDER | ||
| 11 | #define __BYTE_ORDER __LITTLE_ENDIAN | ||
| 12 | #endif | ||
| 10 | 13 | ||
| 11 | #include <linux/types.h> | 14 | #include <linux/types.h> |
| 12 | #include <linux/swab.h> | 15 | #include <linux/swab.h> |
diff --git a/include/linux/compaction.h b/include/linux/compaction.h new file mode 100644 index 000000000000..5ac51552d908 --- /dev/null +++ b/include/linux/compaction.h | |||
| @@ -0,0 +1,89 @@ | |||
| 1 | #ifndef _LINUX_COMPACTION_H | ||
| 2 | #define _LINUX_COMPACTION_H | ||
| 3 | |||
| 4 | /* Return values for compact_zone() and try_to_compact_pages() */ | ||
| 5 | /* compaction didn't start as it was not possible or direct reclaim was more suitable */ | ||
| 6 | #define COMPACT_SKIPPED 0 | ||
| 7 | /* compaction should continue to another pageblock */ | ||
| 8 | #define COMPACT_CONTINUE 1 | ||
| 9 | /* direct compaction partially compacted a zone and there are suitable pages */ | ||
| 10 | #define COMPACT_PARTIAL 2 | ||
| 11 | /* The full zone was compacted */ | ||
| 12 | #define COMPACT_COMPLETE 3 | ||
| 13 | |||
| 14 | #ifdef CONFIG_COMPACTION | ||
| 15 | extern int sysctl_compact_memory; | ||
| 16 | extern int sysctl_compaction_handler(struct ctl_table *table, int write, | ||
| 17 | void __user *buffer, size_t *length, loff_t *ppos); | ||
| 18 | extern int sysctl_extfrag_threshold; | ||
| 19 | extern int sysctl_extfrag_handler(struct ctl_table *table, int write, | ||
| 20 | void __user *buffer, size_t *length, loff_t *ppos); | ||
| 21 | |||
| 22 | extern int fragmentation_index(struct zone *zone, unsigned int order); | ||
| 23 | extern unsigned long try_to_compact_pages(struct zonelist *zonelist, | ||
| 24 | int order, gfp_t gfp_mask, nodemask_t *mask); | ||
| 25 | |||
| 26 | /* Do not skip compaction more than 64 times */ | ||
| 27 | #define COMPACT_MAX_DEFER_SHIFT 6 | ||
| 28 | |||
| 29 | /* | ||
| 30 | * Compaction is deferred when compaction fails to result in a page | ||
| 31 | * allocation success. 1 << compact_defer_limit compactions are skipped up | ||
| 32 | * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT | ||
| 33 | */ | ||
| 34 | static inline void defer_compaction(struct zone *zone) | ||
| 35 | { | ||
| 36 | zone->compact_considered = 0; | ||
| 37 | zone->compact_defer_shift++; | ||
| 38 | |||
| 39 | if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT) | ||
| 40 | zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT; | ||
| 41 | } | ||
| 42 | |||
| 43 | /* Returns true if compaction should be skipped this time */ | ||
| 44 | static inline bool compaction_deferred(struct zone *zone) | ||
| 45 | { | ||
| 46 | unsigned long defer_limit = 1UL << zone->compact_defer_shift; | ||
| 47 | |||
| 48 | /* Avoid possible overflow */ | ||
| 49 | if (++zone->compact_considered > defer_limit) | ||
| 50 | zone->compact_considered = defer_limit; | ||
| 51 | |||
| 52 | return zone->compact_considered < (1UL << zone->compact_defer_shift); | ||
| 53 | } | ||
| 54 | |||
| 55 | #else | ||
| 56 | static inline unsigned long try_to_compact_pages(struct zonelist *zonelist, | ||
| 57 | int order, gfp_t gfp_mask, nodemask_t *nodemask) | ||
| 58 | { | ||
| 59 | return COMPACT_CONTINUE; | ||
| 60 | } | ||
| 61 | |||
| 62 | static inline void defer_compaction(struct zone *zone) | ||
| 63 | { | ||
| 64 | } | ||
| 65 | |||
| 66 | static inline bool compaction_deferred(struct zone *zone) | ||
| 67 | { | ||
| 68 | return 1; | ||
| 69 | } | ||
| 70 | |||
| 71 | #endif /* CONFIG_COMPACTION */ | ||
| 72 | |||
| 73 | #if defined(CONFIG_COMPACTION) && defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) | ||
| 74 | extern int compaction_register_node(struct node *node); | ||
| 75 | extern void compaction_unregister_node(struct node *node); | ||
| 76 | |||
| 77 | #else | ||
| 78 | |||
| 79 | static inline int compaction_register_node(struct node *node) | ||
| 80 | { | ||
| 81 | return 0; | ||
| 82 | } | ||
| 83 | |||
| 84 | static inline void compaction_unregister_node(struct node *node) | ||
| 85 | { | ||
| 86 | } | ||
| 87 | #endif /* CONFIG_COMPACTION && CONFIG_SYSFS && CONFIG_NUMA */ | ||
| 88 | |||
| 89 | #endif /* _LINUX_COMPACTION_H */ | ||
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index a73454aec333..20b51cab6593 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h | |||
| @@ -86,9 +86,44 @@ extern void rebuild_sched_domains(void); | |||
| 86 | 86 | ||
| 87 | extern void cpuset_print_task_mems_allowed(struct task_struct *p); | 87 | extern void cpuset_print_task_mems_allowed(struct task_struct *p); |
| 88 | 88 | ||
| 89 | /* | ||
| 90 | * reading current mems_allowed and mempolicy in the fastpath must protected | ||
| 91 | * by get_mems_allowed() | ||
| 92 | */ | ||
| 93 | static inline void get_mems_allowed(void) | ||
| 94 | { | ||
| 95 | current->mems_allowed_change_disable++; | ||
| 96 | |||
| 97 | /* | ||
| 98 | * ensure that reading mems_allowed and mempolicy happens after the | ||
| 99 | * update of ->mems_allowed_change_disable. | ||
| 100 | * | ||
| 101 | * the write-side task finds ->mems_allowed_change_disable is not 0, | ||
| 102 | * and knows the read-side task is reading mems_allowed or mempolicy, | ||
| 103 | * so it will clear old bits lazily. | ||
| 104 | */ | ||
| 105 | smp_mb(); | ||
| 106 | } | ||
| 107 | |||
| 108 | static inline void put_mems_allowed(void) | ||
| 109 | { | ||
| 110 | /* | ||
| 111 | * ensure that reading mems_allowed and mempolicy before reducing | ||
| 112 | * mems_allowed_change_disable. | ||
| 113 | * | ||
| 114 | * the write-side task will know that the read-side task is still | ||
| 115 | * reading mems_allowed or mempolicy, don't clears old bits in the | ||
| 116 | * nodemask. | ||
| 117 | */ | ||
| 118 | smp_mb(); | ||
| 119 | --ACCESS_ONCE(current->mems_allowed_change_disable); | ||
| 120 | } | ||
| 121 | |||
| 89 | static inline void set_mems_allowed(nodemask_t nodemask) | 122 | static inline void set_mems_allowed(nodemask_t nodemask) |
| 90 | { | 123 | { |
| 124 | task_lock(current); | ||
| 91 | current->mems_allowed = nodemask; | 125 | current->mems_allowed = nodemask; |
| 126 | task_unlock(current); | ||
| 92 | } | 127 | } |
| 93 | 128 | ||
| 94 | #else /* !CONFIG_CPUSETS */ | 129 | #else /* !CONFIG_CPUSETS */ |
| @@ -187,6 +222,14 @@ static inline void set_mems_allowed(nodemask_t nodemask) | |||
| 187 | { | 222 | { |
| 188 | } | 223 | } |
| 189 | 224 | ||
| 225 | static inline void get_mems_allowed(void) | ||
| 226 | { | ||
| 227 | } | ||
| 228 | |||
| 229 | static inline void put_mems_allowed(void) | ||
| 230 | { | ||
| 231 | } | ||
| 232 | |||
| 190 | #endif /* !CONFIG_CPUSETS */ | 233 | #endif /* !CONFIG_CPUSETS */ |
| 191 | 234 | ||
| 192 | #endif /* _LINUX_CPUSET_H */ | 235 | #endif /* _LINUX_CPUSET_H */ |
diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h index f8c2e1767500..b3cd4de9432b 100644 --- a/include/linux/dynamic_debug.h +++ b/include/linux/dynamic_debug.h | |||
| @@ -28,7 +28,7 @@ struct _ddebug { | |||
| 28 | /* | 28 | /* |
| 29 | * The flags field controls the behaviour at the callsite. | 29 | * The flags field controls the behaviour at the callsite. |
| 30 | * The bits here are changed dynamically when the user | 30 | * The bits here are changed dynamically when the user |
| 31 | * writes commands to <debugfs>/dynamic_debug/ddebug | 31 | * writes commands to <debugfs>/dynamic_debug/control |
| 32 | */ | 32 | */ |
| 33 | #define _DPRINTK_FLAGS_PRINT (1<<0) /* printk() a message using the format */ | 33 | #define _DPRINTK_FLAGS_PRINT (1<<0) /* printk() a message using the format */ |
| 34 | #define _DPRINTK_FLAGS_DEFAULT 0 | 34 | #define _DPRINTK_FLAGS_DEFAULT 0 |
diff --git a/include/linux/err.h b/include/linux/err.h index 1b12642636c7..448afc12c78a 100644 --- a/include/linux/err.h +++ b/include/linux/err.h | |||
| @@ -19,22 +19,22 @@ | |||
| 19 | 19 | ||
| 20 | #define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO) | 20 | #define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO) |
| 21 | 21 | ||
| 22 | static inline void *ERR_PTR(long error) | 22 | static inline void * __must_check ERR_PTR(long error) |
| 23 | { | 23 | { |
| 24 | return (void *) error; | 24 | return (void *) error; |
| 25 | } | 25 | } |
| 26 | 26 | ||
| 27 | static inline long PTR_ERR(const void *ptr) | 27 | static inline long __must_check PTR_ERR(const void *ptr) |
| 28 | { | 28 | { |
| 29 | return (long) ptr; | 29 | return (long) ptr; |
| 30 | } | 30 | } |
| 31 | 31 | ||
| 32 | static inline long IS_ERR(const void *ptr) | 32 | static inline long __must_check IS_ERR(const void *ptr) |
| 33 | { | 33 | { |
| 34 | return IS_ERR_VALUE((unsigned long)ptr); | 34 | return IS_ERR_VALUE((unsigned long)ptr); |
| 35 | } | 35 | } |
| 36 | 36 | ||
| 37 | static inline long IS_ERR_OR_NULL(const void *ptr) | 37 | static inline long __must_check IS_ERR_OR_NULL(const void *ptr) |
| 38 | { | 38 | { |
| 39 | return !ptr || IS_ERR_VALUE((unsigned long)ptr); | 39 | return !ptr || IS_ERR_VALUE((unsigned long)ptr); |
| 40 | } | 40 | } |
| @@ -46,7 +46,7 @@ static inline long IS_ERR_OR_NULL(const void *ptr) | |||
| 46 | * Explicitly cast an error-valued pointer to another pointer type in such a | 46 | * Explicitly cast an error-valued pointer to another pointer type in such a |
| 47 | * way as to make it clear that's what's going on. | 47 | * way as to make it clear that's what's going on. |
| 48 | */ | 48 | */ |
| 49 | static inline void *ERR_CAST(const void *ptr) | 49 | static inline void * __must_check ERR_CAST(const void *ptr) |
| 50 | { | 50 | { |
| 51 | /* cast away the const */ | 51 | /* cast away the const */ |
| 52 | return (void *) ptr; | 52 | return (void *) ptr; |
diff --git a/include/linux/fb.h b/include/linux/fb.h index 1296af45169d..f3793ebc241c 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h | |||
| @@ -37,7 +37,7 @@ struct dentry; | |||
| 37 | #define FBIOGET_HWCINFO 0x4616 | 37 | #define FBIOGET_HWCINFO 0x4616 |
| 38 | #define FBIOPUT_MODEINFO 0x4617 | 38 | #define FBIOPUT_MODEINFO 0x4617 |
| 39 | #define FBIOGET_DISPINFO 0x4618 | 39 | #define FBIOGET_DISPINFO 0x4618 |
| 40 | 40 | #define FBIO_WAITFORVSYNC _IOW('F', 0x20, __u32) | |
| 41 | 41 | ||
| 42 | #define FB_TYPE_PACKED_PIXELS 0 /* Packed Pixels */ | 42 | #define FB_TYPE_PACKED_PIXELS 0 /* Packed Pixels */ |
| 43 | #define FB_TYPE_PLANES 1 /* Non interleaved planes */ | 43 | #define FB_TYPE_PLANES 1 /* Non interleaved planes */ |
diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 4c6d41333f98..975609cb8548 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h | |||
| @@ -15,7 +15,7 @@ struct vm_area_struct; | |||
| 15 | * Zone modifiers (see linux/mmzone.h - low three bits) | 15 | * Zone modifiers (see linux/mmzone.h - low three bits) |
| 16 | * | 16 | * |
| 17 | * Do not put any conditional on these. If necessary modify the definitions | 17 | * Do not put any conditional on these. If necessary modify the definitions |
| 18 | * without the underscores and use the consistently. The definitions here may | 18 | * without the underscores and use them consistently. The definitions here may |
| 19 | * be used in bit comparisons. | 19 | * be used in bit comparisons. |
| 20 | */ | 20 | */ |
| 21 | #define __GFP_DMA ((__force gfp_t)0x01u) | 21 | #define __GFP_DMA ((__force gfp_t)0x01u) |
| @@ -101,7 +101,7 @@ struct vm_area_struct; | |||
| 101 | __GFP_NORETRY|__GFP_NOMEMALLOC) | 101 | __GFP_NORETRY|__GFP_NOMEMALLOC) |
| 102 | 102 | ||
| 103 | /* Control slab gfp mask during early boot */ | 103 | /* Control slab gfp mask during early boot */ |
| 104 | #define GFP_BOOT_MASK __GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS) | 104 | #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS)) |
| 105 | 105 | ||
| 106 | /* Control allocation constraints */ | 106 | /* Control allocation constraints */ |
| 107 | #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE) | 107 | #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE) |
| @@ -152,12 +152,12 @@ static inline int allocflags_to_migratetype(gfp_t gfp_flags) | |||
| 152 | * GFP_ZONE_TABLE is a word size bitstring that is used for looking up the | 152 | * GFP_ZONE_TABLE is a word size bitstring that is used for looking up the |
| 153 | * zone to use given the lowest 4 bits of gfp_t. Entries are ZONE_SHIFT long | 153 | * zone to use given the lowest 4 bits of gfp_t. Entries are ZONE_SHIFT long |
| 154 | * and there are 16 of them to cover all possible combinations of | 154 | * and there are 16 of them to cover all possible combinations of |
| 155 | * __GFP_DMA, __GFP_DMA32, __GFP_MOVABLE and __GFP_HIGHMEM | 155 | * __GFP_DMA, __GFP_DMA32, __GFP_MOVABLE and __GFP_HIGHMEM. |
| 156 | * | 156 | * |
| 157 | * The zone fallback order is MOVABLE=>HIGHMEM=>NORMAL=>DMA32=>DMA. | 157 | * The zone fallback order is MOVABLE=>HIGHMEM=>NORMAL=>DMA32=>DMA. |
| 158 | * But GFP_MOVABLE is not only a zone specifier but also an allocation | 158 | * But GFP_MOVABLE is not only a zone specifier but also an allocation |
| 159 | * policy. Therefore __GFP_MOVABLE plus another zone selector is valid. | 159 | * policy. Therefore __GFP_MOVABLE plus another zone selector is valid. |
| 160 | * Only 1bit of the lowest 3 bit (DMA,DMA32,HIGHMEM) can be set to "1". | 160 | * Only 1 bit of the lowest 3 bits (DMA,DMA32,HIGHMEM) can be set to "1". |
| 161 | * | 161 | * |
| 162 | * bit result | 162 | * bit result |
| 163 | * ================= | 163 | * ================= |
| @@ -187,7 +187,7 @@ static inline int allocflags_to_migratetype(gfp_t gfp_flags) | |||
| 187 | 187 | ||
| 188 | #define GFP_ZONE_TABLE ( \ | 188 | #define GFP_ZONE_TABLE ( \ |
| 189 | (ZONE_NORMAL << 0 * ZONES_SHIFT) \ | 189 | (ZONE_NORMAL << 0 * ZONES_SHIFT) \ |
| 190 | | (OPT_ZONE_DMA << __GFP_DMA * ZONES_SHIFT) \ | 190 | | (OPT_ZONE_DMA << __GFP_DMA * ZONES_SHIFT) \ |
| 191 | | (OPT_ZONE_HIGHMEM << __GFP_HIGHMEM * ZONES_SHIFT) \ | 191 | | (OPT_ZONE_HIGHMEM << __GFP_HIGHMEM * ZONES_SHIFT) \ |
| 192 | | (OPT_ZONE_DMA32 << __GFP_DMA32 * ZONES_SHIFT) \ | 192 | | (OPT_ZONE_DMA32 << __GFP_DMA32 * ZONES_SHIFT) \ |
| 193 | | (ZONE_NORMAL << __GFP_MOVABLE * ZONES_SHIFT) \ | 193 | | (ZONE_NORMAL << __GFP_MOVABLE * ZONES_SHIFT) \ |
| @@ -197,7 +197,7 @@ static inline int allocflags_to_migratetype(gfp_t gfp_flags) | |||
| 197 | ) | 197 | ) |
| 198 | 198 | ||
| 199 | /* | 199 | /* |
| 200 | * GFP_ZONE_BAD is a bitmap for all combination of __GFP_DMA, __GFP_DMA32 | 200 | * GFP_ZONE_BAD is a bitmap for all combinations of __GFP_DMA, __GFP_DMA32 |
| 201 | * __GFP_HIGHMEM and __GFP_MOVABLE that are not permitted. One flag per | 201 | * __GFP_HIGHMEM and __GFP_MOVABLE that are not permitted. One flag per |
| 202 | * entry starting with bit 0. Bit is set if the combination is not | 202 | * entry starting with bit 0. Bit is set if the combination is not |
| 203 | * allowed. | 203 | * allowed. |
| @@ -320,17 +320,17 @@ void *alloc_pages_exact(size_t size, gfp_t gfp_mask); | |||
| 320 | void free_pages_exact(void *virt, size_t size); | 320 | void free_pages_exact(void *virt, size_t size); |
| 321 | 321 | ||
| 322 | #define __get_free_page(gfp_mask) \ | 322 | #define __get_free_page(gfp_mask) \ |
| 323 | __get_free_pages((gfp_mask),0) | 323 | __get_free_pages((gfp_mask), 0) |
| 324 | 324 | ||
| 325 | #define __get_dma_pages(gfp_mask, order) \ | 325 | #define __get_dma_pages(gfp_mask, order) \ |
| 326 | __get_free_pages((gfp_mask) | GFP_DMA,(order)) | 326 | __get_free_pages((gfp_mask) | GFP_DMA, (order)) |
| 327 | 327 | ||
| 328 | extern void __free_pages(struct page *page, unsigned int order); | 328 | extern void __free_pages(struct page *page, unsigned int order); |
| 329 | extern void free_pages(unsigned long addr, unsigned int order); | 329 | extern void free_pages(unsigned long addr, unsigned int order); |
| 330 | extern void free_hot_cold_page(struct page *page, int cold); | 330 | extern void free_hot_cold_page(struct page *page, int cold); |
| 331 | 331 | ||
| 332 | #define __free_page(page) __free_pages((page), 0) | 332 | #define __free_page(page) __free_pages((page), 0) |
| 333 | #define free_page(addr) free_pages((addr),0) | 333 | #define free_page(addr) free_pages((addr), 0) |
| 334 | 334 | ||
| 335 | void page_alloc_init(void); | 335 | void page_alloc_init(void); |
| 336 | void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp); | 336 | void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp); |
diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 74152c08ad07..caafd0561aa1 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h | |||
| @@ -27,7 +27,7 @@ static inline void invalidate_kernel_vmap_range(void *vaddr, int size) | |||
| 27 | 27 | ||
| 28 | #include <asm/kmap_types.h> | 28 | #include <asm/kmap_types.h> |
| 29 | 29 | ||
| 30 | #if defined(CONFIG_DEBUG_HIGHMEM) && defined(CONFIG_TRACE_IRQFLAGS_SUPPORT) | 30 | #ifdef CONFIG_DEBUG_HIGHMEM |
| 31 | 31 | ||
| 32 | void debug_kmap_atomic(enum km_type type); | 32 | void debug_kmap_atomic(enum km_type type); |
| 33 | 33 | ||
diff --git a/include/linux/ivtvfb.h b/include/linux/ivtvfb.h index 9d88b29ddf55..e8b92f67f10d 100644 --- a/include/linux/ivtvfb.h +++ b/include/linux/ivtvfb.h | |||
| @@ -33,6 +33,5 @@ struct ivtvfb_dma_frame { | |||
| 33 | }; | 33 | }; |
| 34 | 34 | ||
| 35 | #define IVTVFB_IOC_DMA_FRAME _IOW('V', BASE_VIDIOC_PRIVATE+0, struct ivtvfb_dma_frame) | 35 | #define IVTVFB_IOC_DMA_FRAME _IOW('V', BASE_VIDIOC_PRIVATE+0, struct ivtvfb_dma_frame) |
| 36 | #define FBIO_WAITFORVSYNC _IOW('F', 0x20, __u32) | ||
| 37 | 36 | ||
| 38 | #endif | 37 | #endif |
diff --git a/include/linux/kernel.h b/include/linux/kernel.h index cc5e3ffe9fce..8317ec4b9f3b 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h | |||
| @@ -24,9 +24,9 @@ | |||
| 24 | extern const char linux_banner[]; | 24 | extern const char linux_banner[]; |
| 25 | extern const char linux_proc_banner[]; | 25 | extern const char linux_proc_banner[]; |
| 26 | 26 | ||
| 27 | #define USHORT_MAX ((u16)(~0U)) | 27 | #define USHRT_MAX ((u16)(~0U)) |
| 28 | #define SHORT_MAX ((s16)(USHORT_MAX>>1)) | 28 | #define SHRT_MAX ((s16)(USHRT_MAX>>1)) |
| 29 | #define SHORT_MIN (-SHORT_MAX - 1) | 29 | #define SHRT_MIN ((s16)(-SHRT_MAX - 1)) |
| 30 | #define INT_MAX ((int)(~0U>>1)) | 30 | #define INT_MAX ((int)(~0U>>1)) |
| 31 | #define INT_MIN (-INT_MAX - 1) | 31 | #define INT_MIN (-INT_MAX - 1) |
| 32 | #define UINT_MAX (~0U) | 32 | #define UINT_MAX (~0U) |
| @@ -375,6 +375,8 @@ static inline char *pack_hex_byte(char *buf, u8 byte) | |||
| 375 | return buf; | 375 | return buf; |
| 376 | } | 376 | } |
| 377 | 377 | ||
| 378 | extern int hex_to_bin(char ch); | ||
| 379 | |||
| 378 | #ifndef pr_fmt | 380 | #ifndef pr_fmt |
| 379 | #define pr_fmt(fmt) fmt | 381 | #define pr_fmt(fmt) fmt |
| 380 | #endif | 382 | #endif |
| @@ -389,6 +391,7 @@ static inline char *pack_hex_byte(char *buf, u8 byte) | |||
| 389 | printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__) | 391 | printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__) |
| 390 | #define pr_warning(fmt, ...) \ | 392 | #define pr_warning(fmt, ...) \ |
| 391 | printk(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__) | 393 | printk(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__) |
| 394 | #define pr_warn pr_warning | ||
| 392 | #define pr_notice(fmt, ...) \ | 395 | #define pr_notice(fmt, ...) \ |
| 393 | printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__) | 396 | printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__) |
| 394 | #define pr_info(fmt, ...) \ | 397 | #define pr_info(fmt, ...) \ |
| @@ -423,14 +426,13 @@ static inline char *pack_hex_byte(char *buf, u8 byte) | |||
| 423 | * no local ratelimit_state used in the !PRINTK case | 426 | * no local ratelimit_state used in the !PRINTK case |
| 424 | */ | 427 | */ |
| 425 | #ifdef CONFIG_PRINTK | 428 | #ifdef CONFIG_PRINTK |
| 426 | #define printk_ratelimited(fmt, ...) ({ \ | 429 | #define printk_ratelimited(fmt, ...) ({ \ |
| 427 | static struct ratelimit_state _rs = { \ | 430 | static DEFINE_RATELIMIT_STATE(_rs, \ |
| 428 | .interval = DEFAULT_RATELIMIT_INTERVAL, \ | 431 | DEFAULT_RATELIMIT_INTERVAL, \ |
| 429 | .burst = DEFAULT_RATELIMIT_BURST, \ | 432 | DEFAULT_RATELIMIT_BURST); \ |
| 430 | }; \ | 433 | \ |
| 431 | \ | 434 | if (__ratelimit(&_rs)) \ |
| 432 | if (__ratelimit(&_rs)) \ | 435 | printk(fmt, ##__VA_ARGS__); \ |
| 433 | printk(fmt, ##__VA_ARGS__); \ | ||
| 434 | }) | 436 | }) |
| 435 | #else | 437 | #else |
| 436 | /* No effect, but we still get type checking even in the !PRINTK case: */ | 438 | /* No effect, but we still get type checking even in the !PRINTK case: */ |
| @@ -447,6 +449,7 @@ static inline char *pack_hex_byte(char *buf, u8 byte) | |||
| 447 | printk_ratelimited(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__) | 449 | printk_ratelimited(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__) |
| 448 | #define pr_warning_ratelimited(fmt, ...) \ | 450 | #define pr_warning_ratelimited(fmt, ...) \ |
| 449 | printk_ratelimited(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__) | 451 | printk_ratelimited(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__) |
| 452 | #define pr_warn_ratelimited pr_warning_ratelimited | ||
| 450 | #define pr_notice_ratelimited(fmt, ...) \ | 453 | #define pr_notice_ratelimited(fmt, ...) \ |
| 451 | printk_ratelimited(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__) | 454 | printk_ratelimited(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__) |
| 452 | #define pr_info_ratelimited(fmt, ...) \ | 455 | #define pr_info_ratelimited(fmt, ...) \ |
diff --git a/include/linux/lis3lv02d.h b/include/linux/lis3lv02d.h index f1ca0dcc1628..0e8a346424bb 100644 --- a/include/linux/lis3lv02d.h +++ b/include/linux/lis3lv02d.h | |||
| @@ -25,12 +25,14 @@ struct lis3lv02d_platform_data { | |||
| 25 | #define LIS3_IRQ1_FF_WU_12 (3 << 0) | 25 | #define LIS3_IRQ1_FF_WU_12 (3 << 0) |
| 26 | #define LIS3_IRQ1_DATA_READY (4 << 0) | 26 | #define LIS3_IRQ1_DATA_READY (4 << 0) |
| 27 | #define LIS3_IRQ1_CLICK (7 << 0) | 27 | #define LIS3_IRQ1_CLICK (7 << 0) |
| 28 | #define LIS3_IRQ1_MASK (7 << 0) | ||
| 28 | #define LIS3_IRQ2_DISABLE (0 << 3) | 29 | #define LIS3_IRQ2_DISABLE (0 << 3) |
| 29 | #define LIS3_IRQ2_FF_WU_1 (1 << 3) | 30 | #define LIS3_IRQ2_FF_WU_1 (1 << 3) |
| 30 | #define LIS3_IRQ2_FF_WU_2 (2 << 3) | 31 | #define LIS3_IRQ2_FF_WU_2 (2 << 3) |
| 31 | #define LIS3_IRQ2_FF_WU_12 (3 << 3) | 32 | #define LIS3_IRQ2_FF_WU_12 (3 << 3) |
| 32 | #define LIS3_IRQ2_DATA_READY (4 << 3) | 33 | #define LIS3_IRQ2_DATA_READY (4 << 3) |
| 33 | #define LIS3_IRQ2_CLICK (7 << 3) | 34 | #define LIS3_IRQ2_CLICK (7 << 3) |
| 35 | #define LIS3_IRQ2_MASK (7 << 3) | ||
| 34 | #define LIS3_IRQ_OPEN_DRAIN (1 << 6) | 36 | #define LIS3_IRQ_OPEN_DRAIN (1 << 6) |
| 35 | #define LIS3_IRQ_ACTIVE_LOW (1 << 7) | 37 | #define LIS3_IRQ_ACTIVE_LOW (1 << 7) |
| 36 | unsigned char irq_cfg; | 38 | unsigned char irq_cfg; |
| @@ -43,6 +45,15 @@ struct lis3lv02d_platform_data { | |||
| 43 | #define LIS3_WAKEUP_Z_HI (1 << 5) | 45 | #define LIS3_WAKEUP_Z_HI (1 << 5) |
| 44 | unsigned char wakeup_flags; | 46 | unsigned char wakeup_flags; |
| 45 | unsigned char wakeup_thresh; | 47 | unsigned char wakeup_thresh; |
| 48 | unsigned char wakeup_flags2; | ||
| 49 | unsigned char wakeup_thresh2; | ||
| 50 | #define LIS3_HIPASS_CUTFF_8HZ 0 | ||
| 51 | #define LIS3_HIPASS_CUTFF_4HZ 1 | ||
| 52 | #define LIS3_HIPASS_CUTFF_2HZ 2 | ||
| 53 | #define LIS3_HIPASS_CUTFF_1HZ 3 | ||
| 54 | #define LIS3_HIPASS1_DISABLE (1 << 2) | ||
| 55 | #define LIS3_HIPASS2_DISABLE (1 << 3) | ||
| 56 | unsigned char hipass_ctrl; | ||
| 46 | #define LIS3_NO_MAP 0 | 57 | #define LIS3_NO_MAP 0 |
| 47 | #define LIS3_DEV_X 1 | 58 | #define LIS3_DEV_X 1 |
| 48 | #define LIS3_DEV_Y 2 | 59 | #define LIS3_DEV_Y 2 |
| @@ -58,6 +69,7 @@ struct lis3lv02d_platform_data { | |||
| 58 | /* Limits for selftest are specified in chip data sheet */ | 69 | /* Limits for selftest are specified in chip data sheet */ |
| 59 | s16 st_min_limits[3]; /* min pass limit x, y, z */ | 70 | s16 st_min_limits[3]; /* min pass limit x, y, z */ |
| 60 | s16 st_max_limits[3]; /* max pass limit x, y, z */ | 71 | s16 st_max_limits[3]; /* max pass limit x, y, z */ |
| 72 | int irq2; | ||
| 61 | }; | 73 | }; |
| 62 | 74 | ||
| 63 | #endif /* __LIS3LV02D_H_ */ | 75 | #endif /* __LIS3LV02D_H_ */ |
diff --git a/include/linux/matroxfb.h b/include/linux/matroxfb.h index 2203121a43e9..8c22a8938642 100644 --- a/include/linux/matroxfb.h +++ b/include/linux/matroxfb.h | |||
| @@ -4,6 +4,7 @@ | |||
| 4 | #include <asm/ioctl.h> | 4 | #include <asm/ioctl.h> |
| 5 | #include <linux/types.h> | 5 | #include <linux/types.h> |
| 6 | #include <linux/videodev2.h> | 6 | #include <linux/videodev2.h> |
| 7 | #include <linux/fb.h> | ||
| 7 | 8 | ||
| 8 | struct matroxioc_output_mode { | 9 | struct matroxioc_output_mode { |
| 9 | __u32 output; /* which output */ | 10 | __u32 output; /* which output */ |
| @@ -37,7 +38,5 @@ enum matroxfb_ctrl_id { | |||
| 37 | MATROXFB_CID_LAST | 38 | MATROXFB_CID_LAST |
| 38 | }; | 39 | }; |
| 39 | 40 | ||
| 40 | #define FBIO_WAITFORVSYNC _IOW('F', 0x20, __u32) | ||
| 41 | |||
| 42 | #endif | 41 | #endif |
| 43 | 42 | ||
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 44301c6affa8..05894795fdc1 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h | |||
| @@ -25,6 +25,13 @@ struct page_cgroup; | |||
| 25 | struct page; | 25 | struct page; |
| 26 | struct mm_struct; | 26 | struct mm_struct; |
| 27 | 27 | ||
| 28 | extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, | ||
| 29 | struct list_head *dst, | ||
| 30 | unsigned long *scanned, int order, | ||
| 31 | int mode, struct zone *z, | ||
| 32 | struct mem_cgroup *mem_cont, | ||
| 33 | int active, int file); | ||
| 34 | |||
| 28 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR | 35 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR |
| 29 | /* | 36 | /* |
| 30 | * All "charge" functions with gfp_mask should use GFP_KERNEL or | 37 | * All "charge" functions with gfp_mask should use GFP_KERNEL or |
| @@ -64,12 +71,6 @@ extern void mem_cgroup_uncharge_cache_page(struct page *page); | |||
| 64 | extern int mem_cgroup_shmem_charge_fallback(struct page *page, | 71 | extern int mem_cgroup_shmem_charge_fallback(struct page *page, |
| 65 | struct mm_struct *mm, gfp_t gfp_mask); | 72 | struct mm_struct *mm, gfp_t gfp_mask); |
| 66 | 73 | ||
| 67 | extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, | ||
| 68 | struct list_head *dst, | ||
| 69 | unsigned long *scanned, int order, | ||
| 70 | int mode, struct zone *z, | ||
| 71 | struct mem_cgroup *mem_cont, | ||
| 72 | int active, int file); | ||
| 73 | extern void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask); | 74 | extern void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask); |
| 74 | int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem); | 75 | int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem); |
| 75 | 76 | ||
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index 35b07b773e6c..864035fb8f8a 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h | |||
| @@ -202,6 +202,7 @@ static inline int is_mem_section_removable(unsigned long pfn, | |||
| 202 | } | 202 | } |
| 203 | #endif /* CONFIG_MEMORY_HOTREMOVE */ | 203 | #endif /* CONFIG_MEMORY_HOTREMOVE */ |
| 204 | 204 | ||
| 205 | extern int mem_online_node(int nid); | ||
| 205 | extern int add_memory(int nid, u64 start, u64 size); | 206 | extern int add_memory(int nid, u64 start, u64 size); |
| 206 | extern int arch_add_memory(int nid, u64 start, u64 size); | 207 | extern int arch_add_memory(int nid, u64 start, u64 size); |
| 207 | extern int remove_memory(u64 start, u64 size); | 208 | extern int remove_memory(u64 start, u64 size); |
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index 1cc966cd3e5f..7b9ef6bf45aa 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h | |||
| @@ -23,6 +23,13 @@ enum { | |||
| 23 | MPOL_MAX, /* always last member of enum */ | 23 | MPOL_MAX, /* always last member of enum */ |
| 24 | }; | 24 | }; |
| 25 | 25 | ||
| 26 | enum mpol_rebind_step { | ||
| 27 | MPOL_REBIND_ONCE, /* do rebind work at once(not by two step) */ | ||
| 28 | MPOL_REBIND_STEP1, /* first step(set all the newly nodes) */ | ||
| 29 | MPOL_REBIND_STEP2, /* second step(clean all the disallowed nodes)*/ | ||
| 30 | MPOL_REBIND_NSTEP, | ||
| 31 | }; | ||
| 32 | |||
| 26 | /* Flags for set_mempolicy */ | 33 | /* Flags for set_mempolicy */ |
| 27 | #define MPOL_F_STATIC_NODES (1 << 15) | 34 | #define MPOL_F_STATIC_NODES (1 << 15) |
| 28 | #define MPOL_F_RELATIVE_NODES (1 << 14) | 35 | #define MPOL_F_RELATIVE_NODES (1 << 14) |
| @@ -51,6 +58,7 @@ enum { | |||
| 51 | */ | 58 | */ |
| 52 | #define MPOL_F_SHARED (1 << 0) /* identify shared policies */ | 59 | #define MPOL_F_SHARED (1 << 0) /* identify shared policies */ |
| 53 | #define MPOL_F_LOCAL (1 << 1) /* preferred local allocation */ | 60 | #define MPOL_F_LOCAL (1 << 1) /* preferred local allocation */ |
| 61 | #define MPOL_F_REBINDING (1 << 2) /* identify policies in rebinding */ | ||
| 54 | 62 | ||
| 55 | #ifdef __KERNEL__ | 63 | #ifdef __KERNEL__ |
| 56 | 64 | ||
| @@ -193,8 +201,8 @@ struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp, | |||
| 193 | 201 | ||
| 194 | extern void numa_default_policy(void); | 202 | extern void numa_default_policy(void); |
| 195 | extern void numa_policy_init(void); | 203 | extern void numa_policy_init(void); |
| 196 | extern void mpol_rebind_task(struct task_struct *tsk, | 204 | extern void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new, |
| 197 | const nodemask_t *new); | 205 | enum mpol_rebind_step step); |
| 198 | extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new); | 206 | extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new); |
| 199 | extern void mpol_fix_fork_child_flag(struct task_struct *p); | 207 | extern void mpol_fix_fork_child_flag(struct task_struct *p); |
| 200 | 208 | ||
| @@ -308,7 +316,8 @@ static inline void numa_default_policy(void) | |||
| 308 | } | 316 | } |
| 309 | 317 | ||
| 310 | static inline void mpol_rebind_task(struct task_struct *tsk, | 318 | static inline void mpol_rebind_task(struct task_struct *tsk, |
| 311 | const nodemask_t *new) | 319 | const nodemask_t *new, |
| 320 | enum mpol_rebind_step step) | ||
| 312 | { | 321 | { |
| 313 | } | 322 | } |
| 314 | 323 | ||
diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 7f085c97c799..7238231b8dd4 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h | |||
| @@ -9,7 +9,7 @@ typedef struct page *new_page_t(struct page *, unsigned long private, int **); | |||
| 9 | #ifdef CONFIG_MIGRATION | 9 | #ifdef CONFIG_MIGRATION |
| 10 | #define PAGE_MIGRATION 1 | 10 | #define PAGE_MIGRATION 1 |
| 11 | 11 | ||
| 12 | extern int putback_lru_pages(struct list_head *l); | 12 | extern void putback_lru_pages(struct list_head *l); |
| 13 | extern int migrate_page(struct address_space *, | 13 | extern int migrate_page(struct address_space *, |
| 14 | struct page *, struct page *); | 14 | struct page *, struct page *); |
| 15 | extern int migrate_pages(struct list_head *l, new_page_t x, | 15 | extern int migrate_pages(struct list_head *l, new_page_t x, |
| @@ -19,17 +19,19 @@ extern int fail_migrate_page(struct address_space *, | |||
| 19 | struct page *, struct page *); | 19 | struct page *, struct page *); |
| 20 | 20 | ||
| 21 | extern int migrate_prep(void); | 21 | extern int migrate_prep(void); |
| 22 | extern int migrate_prep_local(void); | ||
| 22 | extern int migrate_vmas(struct mm_struct *mm, | 23 | extern int migrate_vmas(struct mm_struct *mm, |
| 23 | const nodemask_t *from, const nodemask_t *to, | 24 | const nodemask_t *from, const nodemask_t *to, |
| 24 | unsigned long flags); | 25 | unsigned long flags); |
| 25 | #else | 26 | #else |
| 26 | #define PAGE_MIGRATION 0 | 27 | #define PAGE_MIGRATION 0 |
| 27 | 28 | ||
| 28 | static inline int putback_lru_pages(struct list_head *l) { return 0; } | 29 | static inline void putback_lru_pages(struct list_head *l) {} |
| 29 | static inline int migrate_pages(struct list_head *l, new_page_t x, | 30 | static inline int migrate_pages(struct list_head *l, new_page_t x, |
| 30 | unsigned long private, int offlining) { return -ENOSYS; } | 31 | unsigned long private, int offlining) { return -ENOSYS; } |
| 31 | 32 | ||
| 32 | static inline int migrate_prep(void) { return -ENOSYS; } | 33 | static inline int migrate_prep(void) { return -ENOSYS; } |
| 34 | static inline int migrate_prep_local(void) { return -ENOSYS; } | ||
| 33 | 35 | ||
| 34 | static inline int migrate_vmas(struct mm_struct *mm, | 36 | static inline int migrate_vmas(struct mm_struct *mm, |
| 35 | const nodemask_t *from, const nodemask_t *to, | 37 | const nodemask_t *from, const nodemask_t *to, |
diff --git a/include/linux/mm.h b/include/linux/mm.h index fb19bb92b809..b969efb03787 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
| @@ -13,6 +13,7 @@ | |||
| 13 | #include <linux/debug_locks.h> | 13 | #include <linux/debug_locks.h> |
| 14 | #include <linux/mm_types.h> | 14 | #include <linux/mm_types.h> |
| 15 | #include <linux/range.h> | 15 | #include <linux/range.h> |
| 16 | #include <linux/pfn.h> | ||
| 16 | 17 | ||
| 17 | struct mempolicy; | 18 | struct mempolicy; |
| 18 | struct anon_vma; | 19 | struct anon_vma; |
| @@ -106,6 +107,9 @@ extern unsigned int kobjsize(const void *objp); | |||
| 106 | #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */ | 107 | #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */ |
| 107 | #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */ | 108 | #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */ |
| 108 | 109 | ||
| 110 | /* Bits set in the VMA until the stack is in its final location */ | ||
| 111 | #define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ) | ||
| 112 | |||
| 109 | #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */ | 113 | #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */ |
| 110 | #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS | 114 | #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS |
| 111 | #endif | 115 | #endif |
| @@ -334,6 +338,7 @@ void put_page(struct page *page); | |||
| 334 | void put_pages_list(struct list_head *pages); | 338 | void put_pages_list(struct list_head *pages); |
| 335 | 339 | ||
| 336 | void split_page(struct page *page, unsigned int order); | 340 | void split_page(struct page *page, unsigned int order); |
| 341 | int split_free_page(struct page *page); | ||
| 337 | 342 | ||
| 338 | /* | 343 | /* |
| 339 | * Compound pages have a destructor function. Provide a | 344 | * Compound pages have a destructor function. Provide a |
| @@ -591,7 +596,7 @@ static inline void set_page_links(struct page *page, enum zone_type zone, | |||
| 591 | 596 | ||
| 592 | static __always_inline void *lowmem_page_address(struct page *page) | 597 | static __always_inline void *lowmem_page_address(struct page *page) |
| 593 | { | 598 | { |
| 594 | return __va(page_to_pfn(page) << PAGE_SHIFT); | 599 | return __va(PFN_PHYS(page_to_pfn(page))); |
| 595 | } | 600 | } |
| 596 | 601 | ||
| 597 | #if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL) | 602 | #if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL) |
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index cf9e458e96b0..0fa491326c4a 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
| @@ -321,6 +321,15 @@ struct zone { | |||
| 321 | unsigned long *pageblock_flags; | 321 | unsigned long *pageblock_flags; |
| 322 | #endif /* CONFIG_SPARSEMEM */ | 322 | #endif /* CONFIG_SPARSEMEM */ |
| 323 | 323 | ||
| 324 | #ifdef CONFIG_COMPACTION | ||
| 325 | /* | ||
| 326 | * On compaction failure, 1<<compact_defer_shift compactions | ||
| 327 | * are skipped before trying again. The number attempted since | ||
| 328 | * last failure is tracked with compact_considered. | ||
| 329 | */ | ||
| 330 | unsigned int compact_considered; | ||
| 331 | unsigned int compact_defer_shift; | ||
| 332 | #endif | ||
| 324 | 333 | ||
| 325 | ZONE_PADDING(_pad1_) | 334 | ZONE_PADDING(_pad1_) |
| 326 | 335 | ||
| @@ -641,9 +650,10 @@ typedef struct pglist_data { | |||
| 641 | 650 | ||
| 642 | #include <linux/memory_hotplug.h> | 651 | #include <linux/memory_hotplug.h> |
| 643 | 652 | ||
| 653 | extern struct mutex zonelists_mutex; | ||
| 644 | void get_zone_counts(unsigned long *active, unsigned long *inactive, | 654 | void get_zone_counts(unsigned long *active, unsigned long *inactive, |
| 645 | unsigned long *free); | 655 | unsigned long *free); |
| 646 | void build_all_zonelists(void); | 656 | void build_all_zonelists(void *data); |
| 647 | void wakeup_kswapd(struct zone *zone, int order); | 657 | void wakeup_kswapd(struct zone *zone, int order); |
| 648 | int zone_watermark_ok(struct zone *z, int order, unsigned long mark, | 658 | int zone_watermark_ok(struct zone *z, int order, unsigned long mark, |
| 649 | int classzone_idx, int alloc_flags); | 659 | int classzone_idx, int alloc_flags); |
| @@ -972,7 +982,7 @@ struct mem_section { | |||
| 972 | #endif | 982 | #endif |
| 973 | 983 | ||
| 974 | #define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT) | 984 | #define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT) |
| 975 | #define NR_SECTION_ROOTS (NR_MEM_SECTIONS / SECTIONS_PER_ROOT) | 985 | #define NR_SECTION_ROOTS DIV_ROUND_UP(NR_MEM_SECTIONS, SECTIONS_PER_ROOT) |
| 976 | #define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1) | 986 | #define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1) |
| 977 | 987 | ||
| 978 | #ifdef CONFIG_SPARSEMEM_EXTREME | 988 | #ifdef CONFIG_SPARSEMEM_EXTREME |
diff --git a/include/linux/ratelimit.h b/include/linux/ratelimit.h index 668cf1bef030..8f69d09a41a5 100644 --- a/include/linux/ratelimit.h +++ b/include/linux/ratelimit.h | |||
| @@ -2,7 +2,7 @@ | |||
| 2 | #define _LINUX_RATELIMIT_H | 2 | #define _LINUX_RATELIMIT_H |
| 3 | 3 | ||
| 4 | #include <linux/param.h> | 4 | #include <linux/param.h> |
| 5 | #include <linux/spinlock_types.h> | 5 | #include <linux/spinlock.h> |
| 6 | 6 | ||
| 7 | #define DEFAULT_RATELIMIT_INTERVAL (5 * HZ) | 7 | #define DEFAULT_RATELIMIT_INTERVAL (5 * HZ) |
| 8 | #define DEFAULT_RATELIMIT_BURST 10 | 8 | #define DEFAULT_RATELIMIT_BURST 10 |
| @@ -25,6 +25,17 @@ struct ratelimit_state { | |||
| 25 | .burst = burst_init, \ | 25 | .burst = burst_init, \ |
| 26 | } | 26 | } |
| 27 | 27 | ||
| 28 | static inline void ratelimit_state_init(struct ratelimit_state *rs, | ||
| 29 | int interval, int burst) | ||
| 30 | { | ||
| 31 | spin_lock_init(&rs->lock); | ||
| 32 | rs->interval = interval; | ||
| 33 | rs->burst = burst; | ||
| 34 | rs->printed = 0; | ||
| 35 | rs->missed = 0; | ||
| 36 | rs->begin = 0; | ||
| 37 | } | ||
| 38 | |||
| 28 | extern int ___ratelimit(struct ratelimit_state *rs, const char *func); | 39 | extern int ___ratelimit(struct ratelimit_state *rs, const char *func); |
| 29 | #define __ratelimit(state) ___ratelimit(state, __func__) | 40 | #define __ratelimit(state) ___ratelimit(state, __func__) |
| 30 | 41 | ||
diff --git a/include/linux/rmap.h b/include/linux/rmap.h index d25bd224d370..77216742c178 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h | |||
| @@ -26,8 +26,17 @@ | |||
| 26 | */ | 26 | */ |
| 27 | struct anon_vma { | 27 | struct anon_vma { |
| 28 | spinlock_t lock; /* Serialize access to vma list */ | 28 | spinlock_t lock; /* Serialize access to vma list */ |
| 29 | #ifdef CONFIG_KSM | 29 | #if defined(CONFIG_KSM) || defined(CONFIG_MIGRATION) |
| 30 | atomic_t ksm_refcount; | 30 | |
| 31 | /* | ||
| 32 | * The external_refcount is taken by either KSM or page migration | ||
| 33 | * to take a reference to an anon_vma when there is no | ||
| 34 | * guarantee that the vma of page tables will exist for | ||
| 35 | * the duration of the operation. A caller that takes | ||
| 36 | * the reference is responsible for clearing up the | ||
| 37 | * anon_vma if they are the last user on release | ||
| 38 | */ | ||
| 39 | atomic_t external_refcount; | ||
| 31 | #endif | 40 | #endif |
| 32 | /* | 41 | /* |
| 33 | * NOTE: the LSB of the head.next is set by | 42 | * NOTE: the LSB of the head.next is set by |
| @@ -61,22 +70,22 @@ struct anon_vma_chain { | |||
| 61 | }; | 70 | }; |
| 62 | 71 | ||
| 63 | #ifdef CONFIG_MMU | 72 | #ifdef CONFIG_MMU |
| 64 | #ifdef CONFIG_KSM | 73 | #if defined(CONFIG_KSM) || defined(CONFIG_MIGRATION) |
| 65 | static inline void ksm_refcount_init(struct anon_vma *anon_vma) | 74 | static inline void anonvma_external_refcount_init(struct anon_vma *anon_vma) |
| 66 | { | 75 | { |
| 67 | atomic_set(&anon_vma->ksm_refcount, 0); | 76 | atomic_set(&anon_vma->external_refcount, 0); |
| 68 | } | 77 | } |
| 69 | 78 | ||
| 70 | static inline int ksm_refcount(struct anon_vma *anon_vma) | 79 | static inline int anonvma_external_refcount(struct anon_vma *anon_vma) |
| 71 | { | 80 | { |
| 72 | return atomic_read(&anon_vma->ksm_refcount); | 81 | return atomic_read(&anon_vma->external_refcount); |
| 73 | } | 82 | } |
| 74 | #else | 83 | #else |
| 75 | static inline void ksm_refcount_init(struct anon_vma *anon_vma) | 84 | static inline void anonvma_external_refcount_init(struct anon_vma *anon_vma) |
| 76 | { | 85 | { |
| 77 | } | 86 | } |
| 78 | 87 | ||
| 79 | static inline int ksm_refcount(struct anon_vma *anon_vma) | 88 | static inline int anonvma_external_refcount(struct anon_vma *anon_vma) |
| 80 | { | 89 | { |
| 81 | return 0; | 90 | return 0; |
| 82 | } | 91 | } |
diff --git a/include/linux/sched.h b/include/linux/sched.h index b55e988988b5..c0151ffd3541 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -384,7 +384,7 @@ struct user_namespace; | |||
| 384 | * 1-3 now and depends on arch. We use "5" as safe margin, here. | 384 | * 1-3 now and depends on arch. We use "5" as safe margin, here. |
| 385 | */ | 385 | */ |
| 386 | #define MAPCOUNT_ELF_CORE_MARGIN (5) | 386 | #define MAPCOUNT_ELF_CORE_MARGIN (5) |
| 387 | #define DEFAULT_MAX_MAP_COUNT (USHORT_MAX - MAPCOUNT_ELF_CORE_MARGIN) | 387 | #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN) |
| 388 | 388 | ||
| 389 | extern int sysctl_max_map_count; | 389 | extern int sysctl_max_map_count; |
| 390 | 390 | ||
| @@ -1421,6 +1421,7 @@ struct task_struct { | |||
| 1421 | #endif | 1421 | #endif |
| 1422 | #ifdef CONFIG_CPUSETS | 1422 | #ifdef CONFIG_CPUSETS |
| 1423 | nodemask_t mems_allowed; /* Protected by alloc_lock */ | 1423 | nodemask_t mems_allowed; /* Protected by alloc_lock */ |
| 1424 | int mems_allowed_change_disable; | ||
| 1424 | int cpuset_mem_spread_rotor; | 1425 | int cpuset_mem_spread_rotor; |
| 1425 | #endif | 1426 | #endif |
| 1426 | #ifdef CONFIG_CGROUPS | 1427 | #ifdef CONFIG_CGROUPS |
diff --git a/include/linux/swap.h b/include/linux/swap.h index ec2b7a42b45f..b6b614364dd8 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h | |||
| @@ -152,6 +152,7 @@ enum { | |||
| 152 | }; | 152 | }; |
| 153 | 153 | ||
| 154 | #define SWAP_CLUSTER_MAX 32 | 154 | #define SWAP_CLUSTER_MAX 32 |
| 155 | #define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX | ||
| 155 | 156 | ||
| 156 | #define SWAP_MAP_MAX 0x3e /* Max duplication count, in first swap_map */ | 157 | #define SWAP_MAP_MAX 0x3e /* Max duplication count, in first swap_map */ |
| 157 | #define SWAP_MAP_BAD 0x3f /* Note pageblock is bad, in first swap_map */ | 158 | #define SWAP_MAP_BAD 0x3f /* Note pageblock is bad, in first swap_map */ |
| @@ -224,20 +225,15 @@ static inline void lru_cache_add_anon(struct page *page) | |||
| 224 | __lru_cache_add(page, LRU_INACTIVE_ANON); | 225 | __lru_cache_add(page, LRU_INACTIVE_ANON); |
| 225 | } | 226 | } |
| 226 | 227 | ||
| 227 | static inline void lru_cache_add_active_anon(struct page *page) | ||
| 228 | { | ||
| 229 | __lru_cache_add(page, LRU_ACTIVE_ANON); | ||
| 230 | } | ||
| 231 | |||
| 232 | static inline void lru_cache_add_file(struct page *page) | 228 | static inline void lru_cache_add_file(struct page *page) |
| 233 | { | 229 | { |
| 234 | __lru_cache_add(page, LRU_INACTIVE_FILE); | 230 | __lru_cache_add(page, LRU_INACTIVE_FILE); |
| 235 | } | 231 | } |
| 236 | 232 | ||
| 237 | static inline void lru_cache_add_active_file(struct page *page) | 233 | /* LRU Isolation modes. */ |
| 238 | { | 234 | #define ISOLATE_INACTIVE 0 /* Isolate inactive pages. */ |
| 239 | __lru_cache_add(page, LRU_ACTIVE_FILE); | 235 | #define ISOLATE_ACTIVE 1 /* Isolate active pages. */ |
| 240 | } | 236 | #define ISOLATE_BOTH 2 /* Isolate both active and inactive pages. */ |
| 241 | 237 | ||
| 242 | /* linux/mm/vmscan.c */ | 238 | /* linux/mm/vmscan.c */ |
| 243 | extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, | 239 | extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, |
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 117f0dd8ad03..7f43ccdc1d38 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h | |||
| @@ -43,6 +43,10 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, | |||
| 43 | KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY, | 43 | KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY, |
| 44 | KSWAPD_SKIP_CONGESTION_WAIT, | 44 | KSWAPD_SKIP_CONGESTION_WAIT, |
| 45 | PAGEOUTRUN, ALLOCSTALL, PGROTATED, | 45 | PAGEOUTRUN, ALLOCSTALL, PGROTATED, |
| 46 | #ifdef CONFIG_COMPACTION | ||
| 47 | COMPACTBLOCKS, COMPACTPAGES, COMPACTPAGEFAILED, | ||
| 48 | COMPACTSTALL, COMPACTFAIL, COMPACTSUCCESS, | ||
| 49 | #endif | ||
| 46 | #ifdef CONFIG_HUGETLB_PAGE | 50 | #ifdef CONFIG_HUGETLB_PAGE |
| 47 | HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL, | 51 | HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL, |
| 48 | #endif | 52 | #endif |
