diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-03-18 22:26:54 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-03-18 22:26:54 -0400 |
| commit | 814a2bf957739f367cbebfa1b60237387b72d0ee (patch) | |
| tree | 8d65c38d14beb8d6d2dc5b9d7f8dbe63c7cad31a /include | |
| parent | 237045fc3c67d44088f767dca5a9fa30815eba62 (diff) | |
| parent | f9310b2f9a19b7f16c7b1c1558f8b649b9b933c1 (diff) | |
Merge branch 'akpm' (patches from Andrew)
Merge second patch-bomb from Andrew Morton:
- a couple of hotfixes
- the rest of MM
- a new timer slack control in procfs
- a couple of procfs fixes
- a few misc things
- some printk tweaks
- lib/ updates, notably to radix-tree.
- add my and Nick Piggin's old userspace radix-tree test harness to
tools/testing/radix-tree/. Matthew said it was a godsend during the
radix-tree work he did.
- a few code-size improvements, switching to __always_inline where gcc
screwed up.
- partially implement character sets in sscanf
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (118 commits)
sscanf: implement basic character sets
lib/bug.c: use common WARN helper
param: convert some "on"/"off" users to strtobool
lib: add "on"/"off" support to kstrtobool
lib: update single-char callers of strtobool()
lib: move strtobool() to kstrtobool()
include/linux/unaligned: force inlining of byteswap operations
include/uapi/linux/byteorder, swab: force inlining of some byteswap operations
include/asm-generic/atomic-long.h: force inlining of some atomic_long operations
usb: common: convert to use match_string() helper
ide: hpt366: convert to use match_string() helper
ata: hpt366: convert to use match_string() helper
power: ab8500: convert to use match_string() helper
power: charger_manager: convert to use match_string() helper
drm/edid: convert to use match_string() helper
pinctrl: convert to use match_string() helper
device property: convert to use match_string() helper
lib/string: introduce match_string() helper
radix-tree tests: add test for radix_tree_iter_next
radix-tree tests: add regression3 test
...
Diffstat (limited to 'include')
36 files changed, 673 insertions, 203 deletions
diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h index eb1973bad80b..5e1f345b58dd 100644 --- a/include/asm-generic/atomic-long.h +++ b/include/asm-generic/atomic-long.h | |||
| @@ -98,14 +98,14 @@ ATOMIC_LONG_ADD_SUB_OP(sub, _release) | |||
| 98 | #define atomic_long_xchg(v, new) \ | 98 | #define atomic_long_xchg(v, new) \ |
| 99 | (ATOMIC_LONG_PFX(_xchg)((ATOMIC_LONG_PFX(_t) *)(v), (new))) | 99 | (ATOMIC_LONG_PFX(_xchg)((ATOMIC_LONG_PFX(_t) *)(v), (new))) |
| 100 | 100 | ||
| 101 | static inline void atomic_long_inc(atomic_long_t *l) | 101 | static __always_inline void atomic_long_inc(atomic_long_t *l) |
| 102 | { | 102 | { |
| 103 | ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; | 103 | ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; |
| 104 | 104 | ||
| 105 | ATOMIC_LONG_PFX(_inc)(v); | 105 | ATOMIC_LONG_PFX(_inc)(v); |
| 106 | } | 106 | } |
| 107 | 107 | ||
| 108 | static inline void atomic_long_dec(atomic_long_t *l) | 108 | static __always_inline void atomic_long_dec(atomic_long_t *l) |
| 109 | { | 109 | { |
| 110 | ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; | 110 | ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; |
| 111 | 111 | ||
| @@ -113,7 +113,7 @@ static inline void atomic_long_dec(atomic_long_t *l) | |||
| 113 | } | 113 | } |
| 114 | 114 | ||
| 115 | #define ATOMIC_LONG_OP(op) \ | 115 | #define ATOMIC_LONG_OP(op) \ |
| 116 | static inline void \ | 116 | static __always_inline void \ |
| 117 | atomic_long_##op(long i, atomic_long_t *l) \ | 117 | atomic_long_##op(long i, atomic_long_t *l) \ |
| 118 | { \ | 118 | { \ |
| 119 | ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \ | 119 | ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \ |
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h index 630dd2372238..f90588abbfd4 100644 --- a/include/asm-generic/bug.h +++ b/include/asm-generic/bug.h | |||
| @@ -81,6 +81,12 @@ extern void warn_slowpath_null(const char *file, const int line); | |||
| 81 | do { printk(arg); __WARN_TAINT(taint); } while (0) | 81 | do { printk(arg); __WARN_TAINT(taint); } while (0) |
| 82 | #endif | 82 | #endif |
| 83 | 83 | ||
| 84 | /* used internally by panic.c */ | ||
| 85 | struct warn_args; | ||
| 86 | |||
| 87 | void __warn(const char *file, int line, void *caller, unsigned taint, | ||
| 88 | struct pt_regs *regs, struct warn_args *args); | ||
| 89 | |||
| 84 | #ifndef WARN_ON | 90 | #ifndef WARN_ON |
| 85 | #define WARN_ON(condition) ({ \ | 91 | #define WARN_ON(condition) ({ \ |
| 86 | int __ret_warn_on = !!(condition); \ | 92 | int __ret_warn_on = !!(condition); \ |
| @@ -110,9 +116,10 @@ extern void warn_slowpath_null(const char *file, const int line); | |||
| 110 | static bool __section(.data.unlikely) __warned; \ | 116 | static bool __section(.data.unlikely) __warned; \ |
| 111 | int __ret_warn_once = !!(condition); \ | 117 | int __ret_warn_once = !!(condition); \ |
| 112 | \ | 118 | \ |
| 113 | if (unlikely(__ret_warn_once)) \ | 119 | if (unlikely(__ret_warn_once && !__warned)) { \ |
| 114 | if (WARN_ON(!__warned)) \ | 120 | __warned = true; \ |
| 115 | __warned = true; \ | 121 | WARN_ON(1); \ |
| 122 | } \ | ||
| 116 | unlikely(__ret_warn_once); \ | 123 | unlikely(__ret_warn_once); \ |
| 117 | }) | 124 | }) |
| 118 | 125 | ||
| @@ -120,9 +127,10 @@ extern void warn_slowpath_null(const char *file, const int line); | |||
| 120 | static bool __section(.data.unlikely) __warned; \ | 127 | static bool __section(.data.unlikely) __warned; \ |
| 121 | int __ret_warn_once = !!(condition); \ | 128 | int __ret_warn_once = !!(condition); \ |
| 122 | \ | 129 | \ |
| 123 | if (unlikely(__ret_warn_once)) \ | 130 | if (unlikely(__ret_warn_once && !__warned)) { \ |
| 124 | if (WARN(!__warned, format)) \ | 131 | __warned = true; \ |
| 125 | __warned = true; \ | 132 | WARN(1, format); \ |
| 133 | } \ | ||
| 126 | unlikely(__ret_warn_once); \ | 134 | unlikely(__ret_warn_once); \ |
| 127 | }) | 135 | }) |
| 128 | 136 | ||
| @@ -130,9 +138,10 @@ extern void warn_slowpath_null(const char *file, const int line); | |||
| 130 | static bool __section(.data.unlikely) __warned; \ | 138 | static bool __section(.data.unlikely) __warned; \ |
| 131 | int __ret_warn_once = !!(condition); \ | 139 | int __ret_warn_once = !!(condition); \ |
| 132 | \ | 140 | \ |
| 133 | if (unlikely(__ret_warn_once)) \ | 141 | if (unlikely(__ret_warn_once && !__warned)) { \ |
| 134 | if (WARN_TAINT(!__warned, taint, format)) \ | 142 | __warned = true; \ |
| 135 | __warned = true; \ | 143 | WARN_TAINT(1, taint, format); \ |
| 144 | } \ | ||
| 136 | unlikely(__ret_warn_once); \ | 145 | unlikely(__ret_warn_once); \ |
| 137 | }) | 146 | }) |
| 138 | 147 | ||
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index c370b261c720..9401f4819891 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h | |||
| @@ -783,6 +783,23 @@ static inline int pmd_clear_huge(pmd_t *pmd) | |||
| 783 | } | 783 | } |
| 784 | #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ | 784 | #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ |
| 785 | 785 | ||
| 786 | #ifndef __HAVE_ARCH_FLUSH_PMD_TLB_RANGE | ||
| 787 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||
| 788 | /* | ||
| 789 | * ARCHes with special requirements for evicting THP backing TLB entries can | ||
| 790 | * implement this. Otherwise also, it can help optimize normal TLB flush in | ||
| 791 | * THP regime. stock flush_tlb_range() typically has optimization to nuke the | ||
| 792 | * entire TLB TLB if flush span is greater than a threshold, which will | ||
| 793 | * likely be true for a single huge page. Thus a single thp flush will | ||
| 794 | * invalidate the entire TLB which is not desitable. | ||
| 795 | * e.g. see arch/arc: flush_pmd_tlb_range | ||
| 796 | */ | ||
| 797 | #define flush_pmd_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end) | ||
| 798 | #else | ||
| 799 | #define flush_pmd_tlb_range(vma, addr, end) BUILD_BUG() | ||
| 800 | #endif | ||
| 801 | #endif | ||
| 802 | |||
| 786 | #endif /* !__ASSEMBLY__ */ | 803 | #endif /* !__ASSEMBLY__ */ |
| 787 | 804 | ||
| 788 | #ifndef io_remap_pfn_range | 805 | #ifndef io_remap_pfn_range |
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 89d9aa9e79bf..c67f052cc5e5 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h | |||
| @@ -82,15 +82,15 @@ struct buffer_head { | |||
| 82 | * and buffer_foo() functions. | 82 | * and buffer_foo() functions. |
| 83 | */ | 83 | */ |
| 84 | #define BUFFER_FNS(bit, name) \ | 84 | #define BUFFER_FNS(bit, name) \ |
| 85 | static inline void set_buffer_##name(struct buffer_head *bh) \ | 85 | static __always_inline void set_buffer_##name(struct buffer_head *bh) \ |
| 86 | { \ | 86 | { \ |
| 87 | set_bit(BH_##bit, &(bh)->b_state); \ | 87 | set_bit(BH_##bit, &(bh)->b_state); \ |
| 88 | } \ | 88 | } \ |
| 89 | static inline void clear_buffer_##name(struct buffer_head *bh) \ | 89 | static __always_inline void clear_buffer_##name(struct buffer_head *bh) \ |
| 90 | { \ | 90 | { \ |
| 91 | clear_bit(BH_##bit, &(bh)->b_state); \ | 91 | clear_bit(BH_##bit, &(bh)->b_state); \ |
| 92 | } \ | 92 | } \ |
| 93 | static inline int buffer_##name(const struct buffer_head *bh) \ | 93 | static __always_inline int buffer_##name(const struct buffer_head *bh) \ |
| 94 | { \ | 94 | { \ |
| 95 | return test_bit(BH_##bit, &(bh)->b_state); \ | 95 | return test_bit(BH_##bit, &(bh)->b_state); \ |
| 96 | } | 96 | } |
| @@ -99,11 +99,11 @@ static inline int buffer_##name(const struct buffer_head *bh) \ | |||
| 99 | * test_set_buffer_foo() and test_clear_buffer_foo() | 99 | * test_set_buffer_foo() and test_clear_buffer_foo() |
| 100 | */ | 100 | */ |
| 101 | #define TAS_BUFFER_FNS(bit, name) \ | 101 | #define TAS_BUFFER_FNS(bit, name) \ |
| 102 | static inline int test_set_buffer_##name(struct buffer_head *bh) \ | 102 | static __always_inline int test_set_buffer_##name(struct buffer_head *bh) \ |
| 103 | { \ | 103 | { \ |
| 104 | return test_and_set_bit(BH_##bit, &(bh)->b_state); \ | 104 | return test_and_set_bit(BH_##bit, &(bh)->b_state); \ |
| 105 | } \ | 105 | } \ |
| 106 | static inline int test_clear_buffer_##name(struct buffer_head *bh) \ | 106 | static __always_inline int test_clear_buffer_##name(struct buffer_head *bh) \ |
| 107 | { \ | 107 | { \ |
| 108 | return test_and_clear_bit(BH_##bit, &(bh)->b_state); \ | 108 | return test_and_clear_bit(BH_##bit, &(bh)->b_state); \ |
| 109 | } \ | 109 | } \ |
diff --git a/include/linux/compaction.h b/include/linux/compaction.h index 4cd4ddf64cc7..d7c8de583a23 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h | |||
| @@ -52,6 +52,10 @@ extern void compaction_defer_reset(struct zone *zone, int order, | |||
| 52 | bool alloc_success); | 52 | bool alloc_success); |
| 53 | extern bool compaction_restarting(struct zone *zone, int order); | 53 | extern bool compaction_restarting(struct zone *zone, int order); |
| 54 | 54 | ||
| 55 | extern int kcompactd_run(int nid); | ||
| 56 | extern void kcompactd_stop(int nid); | ||
| 57 | extern void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx); | ||
| 58 | |||
| 55 | #else | 59 | #else |
| 56 | static inline unsigned long try_to_compact_pages(gfp_t gfp_mask, | 60 | static inline unsigned long try_to_compact_pages(gfp_t gfp_mask, |
| 57 | unsigned int order, int alloc_flags, | 61 | unsigned int order, int alloc_flags, |
| @@ -84,6 +88,18 @@ static inline bool compaction_deferred(struct zone *zone, int order) | |||
| 84 | return true; | 88 | return true; |
| 85 | } | 89 | } |
| 86 | 90 | ||
| 91 | static inline int kcompactd_run(int nid) | ||
| 92 | { | ||
| 93 | return 0; | ||
| 94 | } | ||
| 95 | static inline void kcompactd_stop(int nid) | ||
| 96 | { | ||
| 97 | } | ||
| 98 | |||
| 99 | static inline void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx) | ||
| 100 | { | ||
| 101 | } | ||
| 102 | |||
| 87 | #endif /* CONFIG_COMPACTION */ | 103 | #endif /* CONFIG_COMPACTION */ |
| 88 | 104 | ||
| 89 | #if defined(CONFIG_COMPACTION) && defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) | 105 | #if defined(CONFIG_COMPACTION) && defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) |
diff --git a/include/linux/freezer.h b/include/linux/freezer.h index 6b7fd9cf5ea2..dd03e837ebb7 100644 --- a/include/linux/freezer.h +++ b/include/linux/freezer.h | |||
| @@ -231,7 +231,7 @@ static inline long freezable_schedule_timeout_killable_unsafe(long timeout) | |||
| 231 | * call this with locks held. | 231 | * call this with locks held. |
| 232 | */ | 232 | */ |
| 233 | static inline int freezable_schedule_hrtimeout_range(ktime_t *expires, | 233 | static inline int freezable_schedule_hrtimeout_range(ktime_t *expires, |
| 234 | unsigned long delta, const enum hrtimer_mode mode) | 234 | u64 delta, const enum hrtimer_mode mode) |
| 235 | { | 235 | { |
| 236 | int __retval; | 236 | int __retval; |
| 237 | freezer_do_not_count(); | 237 | freezer_do_not_count(); |
diff --git a/include/linux/gfp.h b/include/linux/gfp.h index bb16dfeb917e..570383a41853 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h | |||
| @@ -105,8 +105,6 @@ struct vm_area_struct; | |||
| 105 | * | 105 | * |
| 106 | * __GFP_NOMEMALLOC is used to explicitly forbid access to emergency reserves. | 106 | * __GFP_NOMEMALLOC is used to explicitly forbid access to emergency reserves. |
| 107 | * This takes precedence over the __GFP_MEMALLOC flag if both are set. | 107 | * This takes precedence over the __GFP_MEMALLOC flag if both are set. |
| 108 | * | ||
| 109 | * __GFP_NOACCOUNT ignores the accounting for kmemcg limit enforcement. | ||
| 110 | */ | 108 | */ |
| 111 | #define __GFP_ATOMIC ((__force gfp_t)___GFP_ATOMIC) | 109 | #define __GFP_ATOMIC ((__force gfp_t)___GFP_ATOMIC) |
| 112 | #define __GFP_HIGH ((__force gfp_t)___GFP_HIGH) | 110 | #define __GFP_HIGH ((__force gfp_t)___GFP_HIGH) |
| @@ -259,7 +257,7 @@ struct vm_area_struct; | |||
| 259 | #define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE) | 257 | #define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE) |
| 260 | #define GFP_TRANSHUGE ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \ | 258 | #define GFP_TRANSHUGE ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \ |
| 261 | __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN) & \ | 259 | __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN) & \ |
| 262 | ~__GFP_KSWAPD_RECLAIM) | 260 | ~__GFP_RECLAIM) |
| 263 | 261 | ||
| 264 | /* Convert GFP flags to their corresponding migrate type */ | 262 | /* Convert GFP flags to their corresponding migrate type */ |
| 265 | #define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE) | 263 | #define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE) |
| @@ -333,22 +331,29 @@ static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags) | |||
| 333 | * 0xe => BAD (MOVABLE+DMA32+HIGHMEM) | 331 | * 0xe => BAD (MOVABLE+DMA32+HIGHMEM) |
| 334 | * 0xf => BAD (MOVABLE+DMA32+HIGHMEM+DMA) | 332 | * 0xf => BAD (MOVABLE+DMA32+HIGHMEM+DMA) |
| 335 | * | 333 | * |
| 336 | * ZONES_SHIFT must be <= 2 on 32 bit platforms. | 334 | * GFP_ZONES_SHIFT must be <= 2 on 32 bit platforms. |
| 337 | */ | 335 | */ |
| 338 | 336 | ||
| 339 | #if 16 * ZONES_SHIFT > BITS_PER_LONG | 337 | #if defined(CONFIG_ZONE_DEVICE) && (MAX_NR_ZONES-1) <= 4 |
| 340 | #error ZONES_SHIFT too large to create GFP_ZONE_TABLE integer | 338 | /* ZONE_DEVICE is not a valid GFP zone specifier */ |
| 339 | #define GFP_ZONES_SHIFT 2 | ||
| 340 | #else | ||
| 341 | #define GFP_ZONES_SHIFT ZONES_SHIFT | ||
| 342 | #endif | ||
| 343 | |||
| 344 | #if 16 * GFP_ZONES_SHIFT > BITS_PER_LONG | ||
| 345 | #error GFP_ZONES_SHIFT too large to create GFP_ZONE_TABLE integer | ||
| 341 | #endif | 346 | #endif |
| 342 | 347 | ||
| 343 | #define GFP_ZONE_TABLE ( \ | 348 | #define GFP_ZONE_TABLE ( \ |
| 344 | (ZONE_NORMAL << 0 * ZONES_SHIFT) \ | 349 | (ZONE_NORMAL << 0 * GFP_ZONES_SHIFT) \ |
| 345 | | (OPT_ZONE_DMA << ___GFP_DMA * ZONES_SHIFT) \ | 350 | | (OPT_ZONE_DMA << ___GFP_DMA * GFP_ZONES_SHIFT) \ |
| 346 | | (OPT_ZONE_HIGHMEM << ___GFP_HIGHMEM * ZONES_SHIFT) \ | 351 | | (OPT_ZONE_HIGHMEM << ___GFP_HIGHMEM * GFP_ZONES_SHIFT) \ |
| 347 | | (OPT_ZONE_DMA32 << ___GFP_DMA32 * ZONES_SHIFT) \ | 352 | | (OPT_ZONE_DMA32 << ___GFP_DMA32 * GFP_ZONES_SHIFT) \ |
| 348 | | (ZONE_NORMAL << ___GFP_MOVABLE * ZONES_SHIFT) \ | 353 | | (ZONE_NORMAL << ___GFP_MOVABLE * GFP_ZONES_SHIFT) \ |
| 349 | | (OPT_ZONE_DMA << (___GFP_MOVABLE | ___GFP_DMA) * ZONES_SHIFT) \ | 354 | | (OPT_ZONE_DMA << (___GFP_MOVABLE | ___GFP_DMA) * GFP_ZONES_SHIFT) \ |
| 350 | | (ZONE_MOVABLE << (___GFP_MOVABLE | ___GFP_HIGHMEM) * ZONES_SHIFT) \ | 355 | | (ZONE_MOVABLE << (___GFP_MOVABLE | ___GFP_HIGHMEM) * GFP_ZONES_SHIFT)\ |
| 351 | | (OPT_ZONE_DMA32 << (___GFP_MOVABLE | ___GFP_DMA32) * ZONES_SHIFT) \ | 356 | | (OPT_ZONE_DMA32 << (___GFP_MOVABLE | ___GFP_DMA32) * GFP_ZONES_SHIFT)\ |
| 352 | ) | 357 | ) |
| 353 | 358 | ||
| 354 | /* | 359 | /* |
| @@ -373,8 +378,8 @@ static inline enum zone_type gfp_zone(gfp_t flags) | |||
| 373 | enum zone_type z; | 378 | enum zone_type z; |
| 374 | int bit = (__force int) (flags & GFP_ZONEMASK); | 379 | int bit = (__force int) (flags & GFP_ZONEMASK); |
| 375 | 380 | ||
| 376 | z = (GFP_ZONE_TABLE >> (bit * ZONES_SHIFT)) & | 381 | z = (GFP_ZONE_TABLE >> (bit * GFP_ZONES_SHIFT)) & |
| 377 | ((1 << ZONES_SHIFT) - 1); | 382 | ((1 << GFP_ZONES_SHIFT) - 1); |
| 378 | VM_BUG_ON((GFP_ZONE_BAD >> bit) & 1); | 383 | VM_BUG_ON((GFP_ZONE_BAD >> bit) & 1); |
| 379 | return z; | 384 | return z; |
| 380 | } | 385 | } |
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 2ead22dd74a0..c98c6539e2c2 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h | |||
| @@ -220,7 +220,7 @@ static inline void hrtimer_set_expires_range(struct hrtimer *timer, ktime_t time | |||
| 220 | timer->node.expires = ktime_add_safe(time, delta); | 220 | timer->node.expires = ktime_add_safe(time, delta); |
| 221 | } | 221 | } |
| 222 | 222 | ||
| 223 | static inline void hrtimer_set_expires_range_ns(struct hrtimer *timer, ktime_t time, unsigned long delta) | 223 | static inline void hrtimer_set_expires_range_ns(struct hrtimer *timer, ktime_t time, u64 delta) |
| 224 | { | 224 | { |
| 225 | timer->_softexpires = time; | 225 | timer->_softexpires = time; |
| 226 | timer->node.expires = ktime_add_safe(time, ns_to_ktime(delta)); | 226 | timer->node.expires = ktime_add_safe(time, ns_to_ktime(delta)); |
| @@ -378,7 +378,7 @@ static inline void destroy_hrtimer_on_stack(struct hrtimer *timer) { } | |||
| 378 | 378 | ||
| 379 | /* Basic timer operations: */ | 379 | /* Basic timer operations: */ |
| 380 | extern void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, | 380 | extern void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, |
| 381 | unsigned long range_ns, const enum hrtimer_mode mode); | 381 | u64 range_ns, const enum hrtimer_mode mode); |
| 382 | 382 | ||
| 383 | /** | 383 | /** |
| 384 | * hrtimer_start - (re)start an hrtimer on the current CPU | 384 | * hrtimer_start - (re)start an hrtimer on the current CPU |
| @@ -399,7 +399,7 @@ extern int hrtimer_try_to_cancel(struct hrtimer *timer); | |||
| 399 | static inline void hrtimer_start_expires(struct hrtimer *timer, | 399 | static inline void hrtimer_start_expires(struct hrtimer *timer, |
| 400 | enum hrtimer_mode mode) | 400 | enum hrtimer_mode mode) |
| 401 | { | 401 | { |
| 402 | unsigned long delta; | 402 | u64 delta; |
| 403 | ktime_t soft, hard; | 403 | ktime_t soft, hard; |
| 404 | soft = hrtimer_get_softexpires(timer); | 404 | soft = hrtimer_get_softexpires(timer); |
| 405 | hard = hrtimer_get_expires(timer); | 405 | hard = hrtimer_get_expires(timer); |
| @@ -477,10 +477,12 @@ extern long hrtimer_nanosleep_restart(struct restart_block *restart_block); | |||
| 477 | extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, | 477 | extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, |
| 478 | struct task_struct *tsk); | 478 | struct task_struct *tsk); |
| 479 | 479 | ||
| 480 | extern int schedule_hrtimeout_range(ktime_t *expires, unsigned long delta, | 480 | extern int schedule_hrtimeout_range(ktime_t *expires, u64 delta, |
| 481 | const enum hrtimer_mode mode); | 481 | const enum hrtimer_mode mode); |
| 482 | extern int schedule_hrtimeout_range_clock(ktime_t *expires, | 482 | extern int schedule_hrtimeout_range_clock(ktime_t *expires, |
| 483 | unsigned long delta, const enum hrtimer_mode mode, int clock); | 483 | u64 delta, |
| 484 | const enum hrtimer_mode mode, | ||
| 485 | int clock); | ||
| 484 | extern int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode); | 486 | extern int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode); |
| 485 | 487 | ||
| 486 | /* Soft interrupt function to run the hrtimer queues: */ | 488 | /* Soft interrupt function to run the hrtimer queues: */ |
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 459fd25b378e..5307dfb3f8ec 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h | |||
| @@ -41,7 +41,8 @@ int vmf_insert_pfn_pmd(struct vm_area_struct *, unsigned long addr, pmd_t *, | |||
| 41 | enum transparent_hugepage_flag { | 41 | enum transparent_hugepage_flag { |
| 42 | TRANSPARENT_HUGEPAGE_FLAG, | 42 | TRANSPARENT_HUGEPAGE_FLAG, |
| 43 | TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, | 43 | TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, |
| 44 | TRANSPARENT_HUGEPAGE_DEFRAG_FLAG, | 44 | TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, |
| 45 | TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, | ||
| 45 | TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, | 46 | TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, |
| 46 | TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG, | 47 | TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG, |
| 47 | TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG, | 48 | TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG, |
| @@ -71,12 +72,6 @@ extern bool is_vma_temporary_stack(struct vm_area_struct *vma); | |||
| 71 | ((__vma)->vm_flags & VM_HUGEPAGE))) && \ | 72 | ((__vma)->vm_flags & VM_HUGEPAGE))) && \ |
| 72 | !((__vma)->vm_flags & VM_NOHUGEPAGE) && \ | 73 | !((__vma)->vm_flags & VM_NOHUGEPAGE) && \ |
| 73 | !is_vma_temporary_stack(__vma)) | 74 | !is_vma_temporary_stack(__vma)) |
| 74 | #define transparent_hugepage_defrag(__vma) \ | ||
| 75 | ((transparent_hugepage_flags & \ | ||
| 76 | (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)) || \ | ||
| 77 | (transparent_hugepage_flags & \ | ||
| 78 | (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG) && \ | ||
| 79 | (__vma)->vm_flags & VM_HUGEPAGE)) | ||
| 80 | #define transparent_hugepage_use_zero_page() \ | 75 | #define transparent_hugepage_use_zero_page() \ |
| 81 | (transparent_hugepage_flags & \ | 76 | (transparent_hugepage_flags & \ |
| 82 | (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG)) | 77 | (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG)) |
| @@ -101,16 +96,21 @@ static inline int split_huge_page(struct page *page) | |||
| 101 | void deferred_split_huge_page(struct page *page); | 96 | void deferred_split_huge_page(struct page *page); |
| 102 | 97 | ||
| 103 | void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, | 98 | void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, |
| 104 | unsigned long address); | 99 | unsigned long address, bool freeze); |
| 105 | 100 | ||
| 106 | #define split_huge_pmd(__vma, __pmd, __address) \ | 101 | #define split_huge_pmd(__vma, __pmd, __address) \ |
| 107 | do { \ | 102 | do { \ |
| 108 | pmd_t *____pmd = (__pmd); \ | 103 | pmd_t *____pmd = (__pmd); \ |
| 109 | if (pmd_trans_huge(*____pmd) \ | 104 | if (pmd_trans_huge(*____pmd) \ |
| 110 | || pmd_devmap(*____pmd)) \ | 105 | || pmd_devmap(*____pmd)) \ |
| 111 | __split_huge_pmd(__vma, __pmd, __address); \ | 106 | __split_huge_pmd(__vma, __pmd, __address, \ |
| 107 | false); \ | ||
| 112 | } while (0) | 108 | } while (0) |
| 113 | 109 | ||
| 110 | |||
| 111 | void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, | ||
| 112 | bool freeze, struct page *page); | ||
| 113 | |||
| 114 | #if HPAGE_PMD_ORDER >= MAX_ORDER | 114 | #if HPAGE_PMD_ORDER >= MAX_ORDER |
| 115 | #error "hugepages can't be allocated by the buddy allocator" | 115 | #error "hugepages can't be allocated by the buddy allocator" |
| 116 | #endif | 116 | #endif |
| @@ -178,6 +178,10 @@ static inline int split_huge_page(struct page *page) | |||
| 178 | static inline void deferred_split_huge_page(struct page *page) {} | 178 | static inline void deferred_split_huge_page(struct page *page) {} |
| 179 | #define split_huge_pmd(__vma, __pmd, __address) \ | 179 | #define split_huge_pmd(__vma, __pmd, __address) \ |
| 180 | do { } while (0) | 180 | do { } while (0) |
| 181 | |||
| 182 | static inline void split_huge_pmd_address(struct vm_area_struct *vma, | ||
| 183 | unsigned long address, bool freeze, struct page *page) {} | ||
| 184 | |||
| 181 | static inline int hugepage_madvise(struct vm_area_struct *vma, | 185 | static inline int hugepage_madvise(struct vm_area_struct *vma, |
| 182 | unsigned long *vm_flags, int advice) | 186 | unsigned long *vm_flags, int advice) |
| 183 | { | 187 | { |
diff --git a/include/linux/kernel.h b/include/linux/kernel.h index f31638c6e873..f4fa2b29c38c 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h | |||
| @@ -357,6 +357,7 @@ int __must_check kstrtou16(const char *s, unsigned int base, u16 *res); | |||
| 357 | int __must_check kstrtos16(const char *s, unsigned int base, s16 *res); | 357 | int __must_check kstrtos16(const char *s, unsigned int base, s16 *res); |
| 358 | int __must_check kstrtou8(const char *s, unsigned int base, u8 *res); | 358 | int __must_check kstrtou8(const char *s, unsigned int base, u8 *res); |
| 359 | int __must_check kstrtos8(const char *s, unsigned int base, s8 *res); | 359 | int __must_check kstrtos8(const char *s, unsigned int base, s8 *res); |
| 360 | int __must_check kstrtobool(const char *s, bool *res); | ||
| 360 | 361 | ||
| 361 | int __must_check kstrtoull_from_user(const char __user *s, size_t count, unsigned int base, unsigned long long *res); | 362 | int __must_check kstrtoull_from_user(const char __user *s, size_t count, unsigned int base, unsigned long long *res); |
| 362 | int __must_check kstrtoll_from_user(const char __user *s, size_t count, unsigned int base, long long *res); | 363 | int __must_check kstrtoll_from_user(const char __user *s, size_t count, unsigned int base, long long *res); |
| @@ -368,6 +369,7 @@ int __must_check kstrtou16_from_user(const char __user *s, size_t count, unsigne | |||
| 368 | int __must_check kstrtos16_from_user(const char __user *s, size_t count, unsigned int base, s16 *res); | 369 | int __must_check kstrtos16_from_user(const char __user *s, size_t count, unsigned int base, s16 *res); |
| 369 | int __must_check kstrtou8_from_user(const char __user *s, size_t count, unsigned int base, u8 *res); | 370 | int __must_check kstrtou8_from_user(const char __user *s, size_t count, unsigned int base, u8 *res); |
| 370 | int __must_check kstrtos8_from_user(const char __user *s, size_t count, unsigned int base, s8 *res); | 371 | int __must_check kstrtos8_from_user(const char __user *s, size_t count, unsigned int base, s8 *res); |
| 372 | int __must_check kstrtobool_from_user(const char __user *s, size_t count, bool *res); | ||
| 371 | 373 | ||
| 372 | static inline int __must_check kstrtou64_from_user(const char __user *s, size_t count, unsigned int base, u64 *res) | 374 | static inline int __must_check kstrtou64_from_user(const char __user *s, size_t count, unsigned int base, u64 *res) |
| 373 | { | 375 | { |
diff --git a/include/linux/list_bl.h b/include/linux/list_bl.h index ee7229a6c06a..cb483305e1f5 100644 --- a/include/linux/list_bl.h +++ b/include/linux/list_bl.h | |||
| @@ -48,7 +48,7 @@ static inline void INIT_HLIST_BL_NODE(struct hlist_bl_node *h) | |||
| 48 | 48 | ||
| 49 | #define hlist_bl_entry(ptr, type, member) container_of(ptr,type,member) | 49 | #define hlist_bl_entry(ptr, type, member) container_of(ptr,type,member) |
| 50 | 50 | ||
| 51 | static inline int hlist_bl_unhashed(const struct hlist_bl_node *h) | 51 | static inline bool hlist_bl_unhashed(const struct hlist_bl_node *h) |
| 52 | { | 52 | { |
| 53 | return !h->pprev; | 53 | return !h->pprev; |
| 54 | } | 54 | } |
| @@ -68,7 +68,7 @@ static inline void hlist_bl_set_first(struct hlist_bl_head *h, | |||
| 68 | h->first = (struct hlist_bl_node *)((unsigned long)n | LIST_BL_LOCKMASK); | 68 | h->first = (struct hlist_bl_node *)((unsigned long)n | LIST_BL_LOCKMASK); |
| 69 | } | 69 | } |
| 70 | 70 | ||
| 71 | static inline int hlist_bl_empty(const struct hlist_bl_head *h) | 71 | static inline bool hlist_bl_empty(const struct hlist_bl_head *h) |
| 72 | { | 72 | { |
| 73 | return !((unsigned long)READ_ONCE(h->first) & ~LIST_BL_LOCKMASK); | 73 | return !((unsigned long)READ_ONCE(h->first) & ~LIST_BL_LOCKMASK); |
| 74 | } | 74 | } |
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index f0c4bec6565b..1191d79aa495 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h | |||
| @@ -52,7 +52,10 @@ enum mem_cgroup_stat_index { | |||
| 52 | MEM_CGROUP_STAT_SWAP, /* # of pages, swapped out */ | 52 | MEM_CGROUP_STAT_SWAP, /* # of pages, swapped out */ |
| 53 | MEM_CGROUP_STAT_NSTATS, | 53 | MEM_CGROUP_STAT_NSTATS, |
| 54 | /* default hierarchy stats */ | 54 | /* default hierarchy stats */ |
| 55 | MEMCG_SOCK = MEM_CGROUP_STAT_NSTATS, | 55 | MEMCG_KERNEL_STACK = MEM_CGROUP_STAT_NSTATS, |
| 56 | MEMCG_SLAB_RECLAIMABLE, | ||
| 57 | MEMCG_SLAB_UNRECLAIMABLE, | ||
| 58 | MEMCG_SOCK, | ||
| 56 | MEMCG_NR_STAT, | 59 | MEMCG_NR_STAT, |
| 57 | }; | 60 | }; |
| 58 | 61 | ||
| @@ -400,6 +403,9 @@ int mem_cgroup_select_victim_node(struct mem_cgroup *memcg); | |||
| 400 | void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, | 403 | void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, |
| 401 | int nr_pages); | 404 | int nr_pages); |
| 402 | 405 | ||
| 406 | unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, | ||
| 407 | int nid, unsigned int lru_mask); | ||
| 408 | |||
| 403 | static inline | 409 | static inline |
| 404 | unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru) | 410 | unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru) |
| 405 | { | 411 | { |
| @@ -658,6 +664,13 @@ mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, | |||
| 658 | { | 664 | { |
| 659 | } | 665 | } |
| 660 | 666 | ||
| 667 | static inline unsigned long | ||
| 668 | mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, | ||
| 669 | int nid, unsigned int lru_mask) | ||
| 670 | { | ||
| 671 | return 0; | ||
| 672 | } | ||
| 673 | |||
| 661 | static inline void | 674 | static inline void |
| 662 | mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) | 675 | mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) |
| 663 | { | 676 | { |
| @@ -792,11 +805,6 @@ static inline bool memcg_kmem_enabled(void) | |||
| 792 | return static_branch_unlikely(&memcg_kmem_enabled_key); | 805 | return static_branch_unlikely(&memcg_kmem_enabled_key); |
| 793 | } | 806 | } |
| 794 | 807 | ||
| 795 | static inline bool memcg_kmem_online(struct mem_cgroup *memcg) | ||
| 796 | { | ||
| 797 | return memcg->kmem_state == KMEM_ONLINE; | ||
| 798 | } | ||
| 799 | |||
| 800 | /* | 808 | /* |
| 801 | * In general, we'll do everything in our power to not incur in any overhead | 809 | * In general, we'll do everything in our power to not incur in any overhead |
| 802 | * for non-memcg users for the kmem functions. Not even a function call, if we | 810 | * for non-memcg users for the kmem functions. Not even a function call, if we |
| @@ -883,6 +891,20 @@ static __always_inline void memcg_kmem_put_cache(struct kmem_cache *cachep) | |||
| 883 | if (memcg_kmem_enabled()) | 891 | if (memcg_kmem_enabled()) |
| 884 | __memcg_kmem_put_cache(cachep); | 892 | __memcg_kmem_put_cache(cachep); |
| 885 | } | 893 | } |
| 894 | |||
| 895 | /** | ||
| 896 | * memcg_kmem_update_page_stat - update kmem page state statistics | ||
| 897 | * @page: the page | ||
| 898 | * @idx: page state item to account | ||
| 899 | * @val: number of pages (positive or negative) | ||
| 900 | */ | ||
| 901 | static inline void memcg_kmem_update_page_stat(struct page *page, | ||
| 902 | enum mem_cgroup_stat_index idx, int val) | ||
| 903 | { | ||
| 904 | if (memcg_kmem_enabled() && page->mem_cgroup) | ||
| 905 | this_cpu_add(page->mem_cgroup->stat->count[idx], val); | ||
| 906 | } | ||
| 907 | |||
| 886 | #else | 908 | #else |
| 887 | #define for_each_memcg_cache_index(_idx) \ | 909 | #define for_each_memcg_cache_index(_idx) \ |
| 888 | for (; NULL; ) | 910 | for (; NULL; ) |
| @@ -892,11 +914,6 @@ static inline bool memcg_kmem_enabled(void) | |||
| 892 | return false; | 914 | return false; |
| 893 | } | 915 | } |
| 894 | 916 | ||
| 895 | static inline bool memcg_kmem_online(struct mem_cgroup *memcg) | ||
| 896 | { | ||
| 897 | return false; | ||
| 898 | } | ||
| 899 | |||
| 900 | static inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order) | 917 | static inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order) |
| 901 | { | 918 | { |
| 902 | return 0; | 919 | return 0; |
| @@ -928,6 +945,11 @@ memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) | |||
| 928 | static inline void memcg_kmem_put_cache(struct kmem_cache *cachep) | 945 | static inline void memcg_kmem_put_cache(struct kmem_cache *cachep) |
| 929 | { | 946 | { |
| 930 | } | 947 | } |
| 948 | |||
| 949 | static inline void memcg_kmem_update_page_stat(struct page *page, | ||
| 950 | enum mem_cgroup_stat_index idx, int val) | ||
| 951 | { | ||
| 952 | } | ||
| 931 | #endif /* CONFIG_MEMCG && !CONFIG_SLOB */ | 953 | #endif /* CONFIG_MEMCG && !CONFIG_SLOB */ |
| 932 | 954 | ||
| 933 | #endif /* _LINUX_MEMCONTROL_H */ | 955 | #endif /* _LINUX_MEMCONTROL_H */ |
diff --git a/include/linux/mm.h b/include/linux/mm.h index dbf1eddab964..7d42501c8bb4 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
| @@ -22,6 +22,7 @@ | |||
| 22 | #include <linux/resource.h> | 22 | #include <linux/resource.h> |
| 23 | #include <linux/page_ext.h> | 23 | #include <linux/page_ext.h> |
| 24 | #include <linux/err.h> | 24 | #include <linux/err.h> |
| 25 | #include <linux/page_ref.h> | ||
| 25 | 26 | ||
| 26 | struct mempolicy; | 27 | struct mempolicy; |
| 27 | struct anon_vma; | 28 | struct anon_vma; |
| @@ -82,6 +83,27 @@ extern int mmap_rnd_compat_bits __read_mostly; | |||
| 82 | #define mm_forbids_zeropage(X) (0) | 83 | #define mm_forbids_zeropage(X) (0) |
| 83 | #endif | 84 | #endif |
| 84 | 85 | ||
| 86 | /* | ||
| 87 | * Default maximum number of active map areas, this limits the number of vmas | ||
| 88 | * per mm struct. Users can overwrite this number by sysctl but there is a | ||
| 89 | * problem. | ||
| 90 | * | ||
| 91 | * When a program's coredump is generated as ELF format, a section is created | ||
| 92 | * per a vma. In ELF, the number of sections is represented in unsigned short. | ||
| 93 | * This means the number of sections should be smaller than 65535 at coredump. | ||
| 94 | * Because the kernel adds some informative sections to a image of program at | ||
| 95 | * generating coredump, we need some margin. The number of extra sections is | ||
| 96 | * 1-3 now and depends on arch. We use "5" as safe margin, here. | ||
| 97 | * | ||
| 98 | * ELF extended numbering allows more than 65535 sections, so 16-bit bound is | ||
| 99 | * not a hard limit any more. Although some userspace tools can be surprised by | ||
| 100 | * that. | ||
| 101 | */ | ||
| 102 | #define MAPCOUNT_ELF_CORE_MARGIN (5) | ||
| 103 | #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN) | ||
| 104 | |||
| 105 | extern int sysctl_max_map_count; | ||
| 106 | |||
| 85 | extern unsigned long sysctl_user_reserve_kbytes; | 107 | extern unsigned long sysctl_user_reserve_kbytes; |
| 86 | extern unsigned long sysctl_admin_reserve_kbytes; | 108 | extern unsigned long sysctl_admin_reserve_kbytes; |
| 87 | 109 | ||
| @@ -122,6 +144,7 @@ extern unsigned int kobjsize(const void *objp); | |||
| 122 | 144 | ||
| 123 | /* | 145 | /* |
| 124 | * vm_flags in vm_area_struct, see mm_types.h. | 146 | * vm_flags in vm_area_struct, see mm_types.h. |
| 147 | * When changing, update also include/trace/events/mmflags.h | ||
| 125 | */ | 148 | */ |
| 126 | #define VM_NONE 0x00000000 | 149 | #define VM_NONE 0x00000000 |
| 127 | 150 | ||
| @@ -364,8 +387,8 @@ static inline int pmd_devmap(pmd_t pmd) | |||
| 364 | */ | 387 | */ |
| 365 | static inline int put_page_testzero(struct page *page) | 388 | static inline int put_page_testzero(struct page *page) |
| 366 | { | 389 | { |
| 367 | VM_BUG_ON_PAGE(atomic_read(&page->_count) == 0, page); | 390 | VM_BUG_ON_PAGE(page_ref_count(page) == 0, page); |
| 368 | return atomic_dec_and_test(&page->_count); | 391 | return page_ref_dec_and_test(page); |
| 369 | } | 392 | } |
| 370 | 393 | ||
| 371 | /* | 394 | /* |
| @@ -376,7 +399,7 @@ static inline int put_page_testzero(struct page *page) | |||
| 376 | */ | 399 | */ |
| 377 | static inline int get_page_unless_zero(struct page *page) | 400 | static inline int get_page_unless_zero(struct page *page) |
| 378 | { | 401 | { |
| 379 | return atomic_inc_not_zero(&page->_count); | 402 | return page_ref_add_unless(page, 1, 0); |
| 380 | } | 403 | } |
| 381 | 404 | ||
| 382 | extern int page_is_ram(unsigned long pfn); | 405 | extern int page_is_ram(unsigned long pfn); |
| @@ -464,11 +487,6 @@ static inline int total_mapcount(struct page *page) | |||
| 464 | } | 487 | } |
| 465 | #endif | 488 | #endif |
| 466 | 489 | ||
| 467 | static inline int page_count(struct page *page) | ||
| 468 | { | ||
| 469 | return atomic_read(&compound_head(page)->_count); | ||
| 470 | } | ||
| 471 | |||
| 472 | static inline struct page *virt_to_head_page(const void *x) | 490 | static inline struct page *virt_to_head_page(const void *x) |
| 473 | { | 491 | { |
| 474 | struct page *page = virt_to_page(x); | 492 | struct page *page = virt_to_page(x); |
| @@ -476,15 +494,6 @@ static inline struct page *virt_to_head_page(const void *x) | |||
| 476 | return compound_head(page); | 494 | return compound_head(page); |
| 477 | } | 495 | } |
| 478 | 496 | ||
| 479 | /* | ||
| 480 | * Setup the page count before being freed into the page allocator for | ||
| 481 | * the first time (boot or memory hotplug) | ||
| 482 | */ | ||
| 483 | static inline void init_page_count(struct page *page) | ||
| 484 | { | ||
| 485 | atomic_set(&page->_count, 1); | ||
| 486 | } | ||
| 487 | |||
| 488 | void __put_page(struct page *page); | 497 | void __put_page(struct page *page); |
| 489 | 498 | ||
| 490 | void put_pages_list(struct list_head *pages); | 499 | void put_pages_list(struct list_head *pages); |
| @@ -694,8 +703,8 @@ static inline void get_page(struct page *page) | |||
| 694 | * Getting a normal page or the head of a compound page | 703 | * Getting a normal page or the head of a compound page |
| 695 | * requires to already have an elevated page->_count. | 704 | * requires to already have an elevated page->_count. |
| 696 | */ | 705 | */ |
| 697 | VM_BUG_ON_PAGE(atomic_read(&page->_count) <= 0, page); | 706 | VM_BUG_ON_PAGE(page_ref_count(page) <= 0, page); |
| 698 | atomic_inc(&page->_count); | 707 | page_ref_inc(page); |
| 699 | 708 | ||
| 700 | if (unlikely(is_zone_device_page(page))) | 709 | if (unlikely(is_zone_device_page(page))) |
| 701 | get_zone_device_page(page); | 710 | get_zone_device_page(page); |
| @@ -1043,8 +1052,6 @@ static inline void clear_page_pfmemalloc(struct page *page) | |||
| 1043 | * just gets major/minor fault counters bumped up. | 1052 | * just gets major/minor fault counters bumped up. |
| 1044 | */ | 1053 | */ |
| 1045 | 1054 | ||
| 1046 | #define VM_FAULT_MINOR 0 /* For backwards compat. Remove me quickly. */ | ||
| 1047 | |||
| 1048 | #define VM_FAULT_OOM 0x0001 | 1055 | #define VM_FAULT_OOM 0x0001 |
| 1049 | #define VM_FAULT_SIGBUS 0x0002 | 1056 | #define VM_FAULT_SIGBUS 0x0002 |
| 1050 | #define VM_FAULT_MAJOR 0x0004 | 1057 | #define VM_FAULT_MAJOR 0x0004 |
| @@ -1523,8 +1530,7 @@ static inline void mm_dec_nr_pmds(struct mm_struct *mm) | |||
| 1523 | } | 1530 | } |
| 1524 | #endif | 1531 | #endif |
| 1525 | 1532 | ||
| 1526 | int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, | 1533 | int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address); |
| 1527 | pmd_t *pmd, unsigned long address); | ||
| 1528 | int __pte_alloc_kernel(pmd_t *pmd, unsigned long address); | 1534 | int __pte_alloc_kernel(pmd_t *pmd, unsigned long address); |
| 1529 | 1535 | ||
| 1530 | /* | 1536 | /* |
| @@ -1650,15 +1656,15 @@ static inline void pgtable_page_dtor(struct page *page) | |||
| 1650 | pte_unmap(pte); \ | 1656 | pte_unmap(pte); \ |
| 1651 | } while (0) | 1657 | } while (0) |
| 1652 | 1658 | ||
| 1653 | #define pte_alloc_map(mm, vma, pmd, address) \ | 1659 | #define pte_alloc(mm, pmd, address) \ |
| 1654 | ((unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, vma, \ | 1660 | (unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, pmd, address)) |
| 1655 | pmd, address))? \ | 1661 | |
| 1656 | NULL: pte_offset_map(pmd, address)) | 1662 | #define pte_alloc_map(mm, pmd, address) \ |
| 1663 | (pte_alloc(mm, pmd, address) ? NULL : pte_offset_map(pmd, address)) | ||
| 1657 | 1664 | ||
| 1658 | #define pte_alloc_map_lock(mm, pmd, address, ptlp) \ | 1665 | #define pte_alloc_map_lock(mm, pmd, address, ptlp) \ |
| 1659 | ((unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, NULL, \ | 1666 | (pte_alloc(mm, pmd, address) ? \ |
| 1660 | pmd, address))? \ | 1667 | NULL : pte_offset_map_lock(mm, pmd, address, ptlp)) |
| 1661 | NULL: pte_offset_map_lock(mm, pmd, address, ptlp)) | ||
| 1662 | 1668 | ||
| 1663 | #define pte_alloc_kernel(pmd, address) \ | 1669 | #define pte_alloc_kernel(pmd, address) \ |
| 1664 | ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd, address))? \ | 1670 | ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd, address))? \ |
| @@ -1853,6 +1859,7 @@ extern int __meminit init_per_zone_wmark_min(void); | |||
| 1853 | extern void mem_init(void); | 1859 | extern void mem_init(void); |
| 1854 | extern void __init mmap_init(void); | 1860 | extern void __init mmap_init(void); |
| 1855 | extern void show_mem(unsigned int flags); | 1861 | extern void show_mem(unsigned int flags); |
| 1862 | extern long si_mem_available(void); | ||
| 1856 | extern void si_meminfo(struct sysinfo * val); | 1863 | extern void si_meminfo(struct sysinfo * val); |
| 1857 | extern void si_meminfo_node(struct sysinfo *val, int nid); | 1864 | extern void si_meminfo_node(struct sysinfo *val, int nid); |
| 1858 | 1865 | ||
| @@ -1867,6 +1874,7 @@ extern void zone_pcp_reset(struct zone *zone); | |||
| 1867 | 1874 | ||
| 1868 | /* page_alloc.c */ | 1875 | /* page_alloc.c */ |
| 1869 | extern int min_free_kbytes; | 1876 | extern int min_free_kbytes; |
| 1877 | extern int watermark_scale_factor; | ||
| 1870 | 1878 | ||
| 1871 | /* nommu.c */ | 1879 | /* nommu.c */ |
| 1872 | extern atomic_long_t mmap_pages_allocated; | 1880 | extern atomic_long_t mmap_pages_allocated; |
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 6de02ac378a0..c60df9257cc7 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
| @@ -668,6 +668,12 @@ typedef struct pglist_data { | |||
| 668 | mem_hotplug_begin/end() */ | 668 | mem_hotplug_begin/end() */ |
| 669 | int kswapd_max_order; | 669 | int kswapd_max_order; |
| 670 | enum zone_type classzone_idx; | 670 | enum zone_type classzone_idx; |
| 671 | #ifdef CONFIG_COMPACTION | ||
| 672 | int kcompactd_max_order; | ||
| 673 | enum zone_type kcompactd_classzone_idx; | ||
| 674 | wait_queue_head_t kcompactd_wait; | ||
| 675 | struct task_struct *kcompactd; | ||
| 676 | #endif | ||
| 671 | #ifdef CONFIG_NUMA_BALANCING | 677 | #ifdef CONFIG_NUMA_BALANCING |
| 672 | /* Lock serializing the migrate rate limiting window */ | 678 | /* Lock serializing the migrate rate limiting window */ |
| 673 | spinlock_t numabalancing_migrate_lock; | 679 | spinlock_t numabalancing_migrate_lock; |
| @@ -835,6 +841,8 @@ static inline int is_highmem(struct zone *zone) | |||
| 835 | struct ctl_table; | 841 | struct ctl_table; |
| 836 | int min_free_kbytes_sysctl_handler(struct ctl_table *, int, | 842 | int min_free_kbytes_sysctl_handler(struct ctl_table *, int, |
| 837 | void __user *, size_t *, loff_t *); | 843 | void __user *, size_t *, loff_t *); |
| 844 | int watermark_scale_factor_sysctl_handler(struct ctl_table *, int, | ||
| 845 | void __user *, size_t *, loff_t *); | ||
| 838 | extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1]; | 846 | extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1]; |
| 839 | int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, | 847 | int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, |
| 840 | void __user *, size_t *, loff_t *); | 848 | void __user *, size_t *, loff_t *); |
diff --git a/include/linux/page-flags-layout.h b/include/linux/page-flags-layout.h index da523661500a..77b078c103b2 100644 --- a/include/linux/page-flags-layout.h +++ b/include/linux/page-flags-layout.h | |||
| @@ -17,6 +17,8 @@ | |||
| 17 | #define ZONES_SHIFT 1 | 17 | #define ZONES_SHIFT 1 |
| 18 | #elif MAX_NR_ZONES <= 4 | 18 | #elif MAX_NR_ZONES <= 4 |
| 19 | #define ZONES_SHIFT 2 | 19 | #define ZONES_SHIFT 2 |
| 20 | #elif MAX_NR_ZONES <= 8 | ||
| 21 | #define ZONES_SHIFT 3 | ||
| 20 | #else | 22 | #else |
| 21 | #error ZONES_SHIFT -- too many zones configured adjust calculation | 23 | #error ZONES_SHIFT -- too many zones configured adjust calculation |
| 22 | #endif | 24 | #endif |
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 19724e6ebd26..f4ed4f1b0c77 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h | |||
| @@ -144,12 +144,12 @@ static inline struct page *compound_head(struct page *page) | |||
| 144 | return page; | 144 | return page; |
| 145 | } | 145 | } |
| 146 | 146 | ||
| 147 | static inline int PageTail(struct page *page) | 147 | static __always_inline int PageTail(struct page *page) |
| 148 | { | 148 | { |
| 149 | return READ_ONCE(page->compound_head) & 1; | 149 | return READ_ONCE(page->compound_head) & 1; |
| 150 | } | 150 | } |
| 151 | 151 | ||
| 152 | static inline int PageCompound(struct page *page) | 152 | static __always_inline int PageCompound(struct page *page) |
| 153 | { | 153 | { |
| 154 | return test_bit(PG_head, &page->flags) || PageTail(page); | 154 | return test_bit(PG_head, &page->flags) || PageTail(page); |
| 155 | } | 155 | } |
| @@ -184,31 +184,31 @@ static inline int PageCompound(struct page *page) | |||
| 184 | * Macros to create function definitions for page flags | 184 | * Macros to create function definitions for page flags |
| 185 | */ | 185 | */ |
| 186 | #define TESTPAGEFLAG(uname, lname, policy) \ | 186 | #define TESTPAGEFLAG(uname, lname, policy) \ |
| 187 | static inline int Page##uname(struct page *page) \ | 187 | static __always_inline int Page##uname(struct page *page) \ |
| 188 | { return test_bit(PG_##lname, &policy(page, 0)->flags); } | 188 | { return test_bit(PG_##lname, &policy(page, 0)->flags); } |
| 189 | 189 | ||
| 190 | #define SETPAGEFLAG(uname, lname, policy) \ | 190 | #define SETPAGEFLAG(uname, lname, policy) \ |
| 191 | static inline void SetPage##uname(struct page *page) \ | 191 | static __always_inline void SetPage##uname(struct page *page) \ |
| 192 | { set_bit(PG_##lname, &policy(page, 1)->flags); } | 192 | { set_bit(PG_##lname, &policy(page, 1)->flags); } |
| 193 | 193 | ||
| 194 | #define CLEARPAGEFLAG(uname, lname, policy) \ | 194 | #define CLEARPAGEFLAG(uname, lname, policy) \ |
| 195 | static inline void ClearPage##uname(struct page *page) \ | 195 | static __always_inline void ClearPage##uname(struct page *page) \ |
| 196 | { clear_bit(PG_##lname, &policy(page, 1)->flags); } | 196 | { clear_bit(PG_##lname, &policy(page, 1)->flags); } |
| 197 | 197 | ||
| 198 | #define __SETPAGEFLAG(uname, lname, policy) \ | 198 | #define __SETPAGEFLAG(uname, lname, policy) \ |
| 199 | static inline void __SetPage##uname(struct page *page) \ | 199 | static __always_inline void __SetPage##uname(struct page *page) \ |
| 200 | { __set_bit(PG_##lname, &policy(page, 1)->flags); } | 200 | { __set_bit(PG_##lname, &policy(page, 1)->flags); } |
| 201 | 201 | ||
| 202 | #define __CLEARPAGEFLAG(uname, lname, policy) \ | 202 | #define __CLEARPAGEFLAG(uname, lname, policy) \ |
| 203 | static inline void __ClearPage##uname(struct page *page) \ | 203 | static __always_inline void __ClearPage##uname(struct page *page) \ |
| 204 | { __clear_bit(PG_##lname, &policy(page, 1)->flags); } | 204 | { __clear_bit(PG_##lname, &policy(page, 1)->flags); } |
| 205 | 205 | ||
| 206 | #define TESTSETFLAG(uname, lname, policy) \ | 206 | #define TESTSETFLAG(uname, lname, policy) \ |
| 207 | static inline int TestSetPage##uname(struct page *page) \ | 207 | static __always_inline int TestSetPage##uname(struct page *page) \ |
| 208 | { return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); } | 208 | { return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); } |
| 209 | 209 | ||
| 210 | #define TESTCLEARFLAG(uname, lname, policy) \ | 210 | #define TESTCLEARFLAG(uname, lname, policy) \ |
| 211 | static inline int TestClearPage##uname(struct page *page) \ | 211 | static __always_inline int TestClearPage##uname(struct page *page) \ |
| 212 | { return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); } | 212 | { return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); } |
| 213 | 213 | ||
| 214 | #define PAGEFLAG(uname, lname, policy) \ | 214 | #define PAGEFLAG(uname, lname, policy) \ |
| @@ -371,7 +371,7 @@ PAGEFLAG(Idle, idle, PF_ANY) | |||
| 371 | #define PAGE_MAPPING_KSM 2 | 371 | #define PAGE_MAPPING_KSM 2 |
| 372 | #define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM) | 372 | #define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM) |
| 373 | 373 | ||
| 374 | static inline int PageAnon(struct page *page) | 374 | static __always_inline int PageAnon(struct page *page) |
| 375 | { | 375 | { |
| 376 | page = compound_head(page); | 376 | page = compound_head(page); |
| 377 | return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0; | 377 | return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0; |
| @@ -384,7 +384,7 @@ static inline int PageAnon(struct page *page) | |||
| 384 | * is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any | 384 | * is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any |
| 385 | * anon_vma, but to that page's node of the stable tree. | 385 | * anon_vma, but to that page's node of the stable tree. |
| 386 | */ | 386 | */ |
| 387 | static inline int PageKsm(struct page *page) | 387 | static __always_inline int PageKsm(struct page *page) |
| 388 | { | 388 | { |
| 389 | page = compound_head(page); | 389 | page = compound_head(page); |
| 390 | return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) == | 390 | return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) == |
| @@ -415,14 +415,14 @@ static inline int PageUptodate(struct page *page) | |||
| 415 | return ret; | 415 | return ret; |
| 416 | } | 416 | } |
| 417 | 417 | ||
| 418 | static inline void __SetPageUptodate(struct page *page) | 418 | static __always_inline void __SetPageUptodate(struct page *page) |
| 419 | { | 419 | { |
| 420 | VM_BUG_ON_PAGE(PageTail(page), page); | 420 | VM_BUG_ON_PAGE(PageTail(page), page); |
| 421 | smp_wmb(); | 421 | smp_wmb(); |
| 422 | __set_bit(PG_uptodate, &page->flags); | 422 | __set_bit(PG_uptodate, &page->flags); |
| 423 | } | 423 | } |
| 424 | 424 | ||
| 425 | static inline void SetPageUptodate(struct page *page) | 425 | static __always_inline void SetPageUptodate(struct page *page) |
| 426 | { | 426 | { |
| 427 | VM_BUG_ON_PAGE(PageTail(page), page); | 427 | VM_BUG_ON_PAGE(PageTail(page), page); |
| 428 | /* | 428 | /* |
| @@ -456,12 +456,12 @@ static inline void set_page_writeback_keepwrite(struct page *page) | |||
| 456 | 456 | ||
| 457 | __PAGEFLAG(Head, head, PF_ANY) CLEARPAGEFLAG(Head, head, PF_ANY) | 457 | __PAGEFLAG(Head, head, PF_ANY) CLEARPAGEFLAG(Head, head, PF_ANY) |
| 458 | 458 | ||
| 459 | static inline void set_compound_head(struct page *page, struct page *head) | 459 | static __always_inline void set_compound_head(struct page *page, struct page *head) |
| 460 | { | 460 | { |
| 461 | WRITE_ONCE(page->compound_head, (unsigned long)head + 1); | 461 | WRITE_ONCE(page->compound_head, (unsigned long)head + 1); |
| 462 | } | 462 | } |
| 463 | 463 | ||
| 464 | static inline void clear_compound_head(struct page *page) | 464 | static __always_inline void clear_compound_head(struct page *page) |
| 465 | { | 465 | { |
| 466 | WRITE_ONCE(page->compound_head, 0); | 466 | WRITE_ONCE(page->compound_head, 0); |
| 467 | } | 467 | } |
| @@ -593,6 +593,8 @@ static inline void __ClearPageBuddy(struct page *page) | |||
| 593 | atomic_set(&page->_mapcount, -1); | 593 | atomic_set(&page->_mapcount, -1); |
| 594 | } | 594 | } |
| 595 | 595 | ||
| 596 | extern bool is_free_buddy_page(struct page *page); | ||
| 597 | |||
| 596 | #define PAGE_BALLOON_MAPCOUNT_VALUE (-256) | 598 | #define PAGE_BALLOON_MAPCOUNT_VALUE (-256) |
| 597 | 599 | ||
| 598 | static inline int PageBalloon(struct page *page) | 600 | static inline int PageBalloon(struct page *page) |
diff --git a/include/linux/page_ref.h b/include/linux/page_ref.h new file mode 100644 index 000000000000..e596d5d9540e --- /dev/null +++ b/include/linux/page_ref.h | |||
| @@ -0,0 +1,173 @@ | |||
| 1 | #ifndef _LINUX_PAGE_REF_H | ||
| 2 | #define _LINUX_PAGE_REF_H | ||
| 3 | |||
| 4 | #include <linux/atomic.h> | ||
| 5 | #include <linux/mm_types.h> | ||
| 6 | #include <linux/page-flags.h> | ||
| 7 | #include <linux/tracepoint-defs.h> | ||
| 8 | |||
| 9 | extern struct tracepoint __tracepoint_page_ref_set; | ||
| 10 | extern struct tracepoint __tracepoint_page_ref_mod; | ||
| 11 | extern struct tracepoint __tracepoint_page_ref_mod_and_test; | ||
| 12 | extern struct tracepoint __tracepoint_page_ref_mod_and_return; | ||
| 13 | extern struct tracepoint __tracepoint_page_ref_mod_unless; | ||
| 14 | extern struct tracepoint __tracepoint_page_ref_freeze; | ||
| 15 | extern struct tracepoint __tracepoint_page_ref_unfreeze; | ||
| 16 | |||
| 17 | #ifdef CONFIG_DEBUG_PAGE_REF | ||
| 18 | |||
| 19 | /* | ||
| 20 | * Ideally we would want to use the trace_<tracepoint>_enabled() helper | ||
| 21 | * functions. But due to include header file issues, that is not | ||
| 22 | * feasible. Instead we have to open code the static key functions. | ||
| 23 | * | ||
| 24 | * See trace_##name##_enabled(void) in include/linux/tracepoint.h | ||
| 25 | */ | ||
| 26 | #define page_ref_tracepoint_active(t) static_key_false(&(t).key) | ||
| 27 | |||
| 28 | extern void __page_ref_set(struct page *page, int v); | ||
| 29 | extern void __page_ref_mod(struct page *page, int v); | ||
| 30 | extern void __page_ref_mod_and_test(struct page *page, int v, int ret); | ||
| 31 | extern void __page_ref_mod_and_return(struct page *page, int v, int ret); | ||
| 32 | extern void __page_ref_mod_unless(struct page *page, int v, int u); | ||
| 33 | extern void __page_ref_freeze(struct page *page, int v, int ret); | ||
| 34 | extern void __page_ref_unfreeze(struct page *page, int v); | ||
| 35 | |||
| 36 | #else | ||
| 37 | |||
| 38 | #define page_ref_tracepoint_active(t) false | ||
| 39 | |||
| 40 | static inline void __page_ref_set(struct page *page, int v) | ||
| 41 | { | ||
| 42 | } | ||
| 43 | static inline void __page_ref_mod(struct page *page, int v) | ||
| 44 | { | ||
| 45 | } | ||
| 46 | static inline void __page_ref_mod_and_test(struct page *page, int v, int ret) | ||
| 47 | { | ||
| 48 | } | ||
| 49 | static inline void __page_ref_mod_and_return(struct page *page, int v, int ret) | ||
| 50 | { | ||
| 51 | } | ||
| 52 | static inline void __page_ref_mod_unless(struct page *page, int v, int u) | ||
| 53 | { | ||
| 54 | } | ||
| 55 | static inline void __page_ref_freeze(struct page *page, int v, int ret) | ||
| 56 | { | ||
| 57 | } | ||
| 58 | static inline void __page_ref_unfreeze(struct page *page, int v) | ||
| 59 | { | ||
| 60 | } | ||
| 61 | |||
| 62 | #endif | ||
| 63 | |||
| 64 | static inline int page_ref_count(struct page *page) | ||
| 65 | { | ||
| 66 | return atomic_read(&page->_count); | ||
| 67 | } | ||
| 68 | |||
| 69 | static inline int page_count(struct page *page) | ||
| 70 | { | ||
| 71 | return atomic_read(&compound_head(page)->_count); | ||
| 72 | } | ||
| 73 | |||
| 74 | static inline void set_page_count(struct page *page, int v) | ||
| 75 | { | ||
| 76 | atomic_set(&page->_count, v); | ||
| 77 | if (page_ref_tracepoint_active(__tracepoint_page_ref_set)) | ||
| 78 | __page_ref_set(page, v); | ||
| 79 | } | ||
| 80 | |||
| 81 | /* | ||
| 82 | * Setup the page count before being freed into the page allocator for | ||
| 83 | * the first time (boot or memory hotplug) | ||
| 84 | */ | ||
| 85 | static inline void init_page_count(struct page *page) | ||
| 86 | { | ||
| 87 | set_page_count(page, 1); | ||
| 88 | } | ||
| 89 | |||
| 90 | static inline void page_ref_add(struct page *page, int nr) | ||
| 91 | { | ||
| 92 | atomic_add(nr, &page->_count); | ||
| 93 | if (page_ref_tracepoint_active(__tracepoint_page_ref_mod)) | ||
| 94 | __page_ref_mod(page, nr); | ||
| 95 | } | ||
| 96 | |||
| 97 | static inline void page_ref_sub(struct page *page, int nr) | ||
| 98 | { | ||
| 99 | atomic_sub(nr, &page->_count); | ||
| 100 | if (page_ref_tracepoint_active(__tracepoint_page_ref_mod)) | ||
| 101 | __page_ref_mod(page, -nr); | ||
| 102 | } | ||
| 103 | |||
| 104 | static inline void page_ref_inc(struct page *page) | ||
| 105 | { | ||
| 106 | atomic_inc(&page->_count); | ||
| 107 | if (page_ref_tracepoint_active(__tracepoint_page_ref_mod)) | ||
| 108 | __page_ref_mod(page, 1); | ||
| 109 | } | ||
| 110 | |||
| 111 | static inline void page_ref_dec(struct page *page) | ||
| 112 | { | ||
| 113 | atomic_dec(&page->_count); | ||
| 114 | if (page_ref_tracepoint_active(__tracepoint_page_ref_mod)) | ||
| 115 | __page_ref_mod(page, -1); | ||
| 116 | } | ||
| 117 | |||
| 118 | static inline int page_ref_sub_and_test(struct page *page, int nr) | ||
| 119 | { | ||
| 120 | int ret = atomic_sub_and_test(nr, &page->_count); | ||
| 121 | |||
| 122 | if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_test)) | ||
| 123 | __page_ref_mod_and_test(page, -nr, ret); | ||
| 124 | return ret; | ||
| 125 | } | ||
| 126 | |||
| 127 | static inline int page_ref_dec_and_test(struct page *page) | ||
| 128 | { | ||
| 129 | int ret = atomic_dec_and_test(&page->_count); | ||
| 130 | |||
| 131 | if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_test)) | ||
| 132 | __page_ref_mod_and_test(page, -1, ret); | ||
| 133 | return ret; | ||
| 134 | } | ||
| 135 | |||
| 136 | static inline int page_ref_dec_return(struct page *page) | ||
| 137 | { | ||
| 138 | int ret = atomic_dec_return(&page->_count); | ||
| 139 | |||
| 140 | if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_return)) | ||
| 141 | __page_ref_mod_and_return(page, -1, ret); | ||
| 142 | return ret; | ||
| 143 | } | ||
| 144 | |||
| 145 | static inline int page_ref_add_unless(struct page *page, int nr, int u) | ||
| 146 | { | ||
| 147 | int ret = atomic_add_unless(&page->_count, nr, u); | ||
| 148 | |||
| 149 | if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_unless)) | ||
| 150 | __page_ref_mod_unless(page, nr, ret); | ||
| 151 | return ret; | ||
| 152 | } | ||
| 153 | |||
| 154 | static inline int page_ref_freeze(struct page *page, int count) | ||
| 155 | { | ||
| 156 | int ret = likely(atomic_cmpxchg(&page->_count, count, 0) == count); | ||
| 157 | |||
| 158 | if (page_ref_tracepoint_active(__tracepoint_page_ref_freeze)) | ||
| 159 | __page_ref_freeze(page, count, ret); | ||
| 160 | return ret; | ||
| 161 | } | ||
| 162 | |||
| 163 | static inline void page_ref_unfreeze(struct page *page, int count) | ||
| 164 | { | ||
| 165 | VM_BUG_ON_PAGE(page_count(page) != 0, page); | ||
| 166 | VM_BUG_ON(count == 0); | ||
| 167 | |||
| 168 | atomic_set(&page->_count, count); | ||
| 169 | if (page_ref_tracepoint_active(__tracepoint_page_ref_unfreeze)) | ||
| 170 | __page_ref_unfreeze(page, count); | ||
| 171 | } | ||
| 172 | |||
| 173 | #endif | ||
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 183b15ea052b..1ebd65c91422 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h | |||
| @@ -165,7 +165,7 @@ static inline int page_cache_get_speculative(struct page *page) | |||
| 165 | * SMP requires. | 165 | * SMP requires. |
| 166 | */ | 166 | */ |
| 167 | VM_BUG_ON_PAGE(page_count(page) == 0, page); | 167 | VM_BUG_ON_PAGE(page_count(page) == 0, page); |
| 168 | atomic_inc(&page->_count); | 168 | page_ref_inc(page); |
| 169 | 169 | ||
| 170 | #else | 170 | #else |
| 171 | if (unlikely(!get_page_unless_zero(page))) { | 171 | if (unlikely(!get_page_unless_zero(page))) { |
| @@ -194,10 +194,10 @@ static inline int page_cache_add_speculative(struct page *page, int count) | |||
| 194 | VM_BUG_ON(!in_atomic()); | 194 | VM_BUG_ON(!in_atomic()); |
| 195 | # endif | 195 | # endif |
| 196 | VM_BUG_ON_PAGE(page_count(page) == 0, page); | 196 | VM_BUG_ON_PAGE(page_count(page) == 0, page); |
| 197 | atomic_add(count, &page->_count); | 197 | page_ref_add(page, count); |
| 198 | 198 | ||
| 199 | #else | 199 | #else |
| 200 | if (unlikely(!atomic_add_unless(&page->_count, count, 0))) | 200 | if (unlikely(!page_ref_add_unless(page, count, 0))) |
| 201 | return 0; | 201 | return 0; |
| 202 | #endif | 202 | #endif |
| 203 | VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page); | 203 | VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page); |
| @@ -205,19 +205,6 @@ static inline int page_cache_add_speculative(struct page *page, int count) | |||
| 205 | return 1; | 205 | return 1; |
| 206 | } | 206 | } |
| 207 | 207 | ||
| 208 | static inline int page_freeze_refs(struct page *page, int count) | ||
| 209 | { | ||
| 210 | return likely(atomic_cmpxchg(&page->_count, count, 0) == count); | ||
| 211 | } | ||
| 212 | |||
| 213 | static inline void page_unfreeze_refs(struct page *page, int count) | ||
| 214 | { | ||
| 215 | VM_BUG_ON_PAGE(page_count(page) != 0, page); | ||
| 216 | VM_BUG_ON(count == 0); | ||
| 217 | |||
| 218 | atomic_set(&page->_count, count); | ||
| 219 | } | ||
| 220 | |||
| 221 | #ifdef CONFIG_NUMA | 208 | #ifdef CONFIG_NUMA |
| 222 | extern struct page *__page_cache_alloc(gfp_t gfp); | 209 | extern struct page *__page_cache_alloc(gfp_t gfp); |
| 223 | #else | 210 | #else |
diff --git a/include/linux/poll.h b/include/linux/poll.h index c08386fb3e08..9fb4f40d9a26 100644 --- a/include/linux/poll.h +++ b/include/linux/poll.h | |||
| @@ -96,7 +96,7 @@ extern void poll_initwait(struct poll_wqueues *pwq); | |||
| 96 | extern void poll_freewait(struct poll_wqueues *pwq); | 96 | extern void poll_freewait(struct poll_wqueues *pwq); |
| 97 | extern int poll_schedule_timeout(struct poll_wqueues *pwq, int state, | 97 | extern int poll_schedule_timeout(struct poll_wqueues *pwq, int state, |
| 98 | ktime_t *expires, unsigned long slack); | 98 | ktime_t *expires, unsigned long slack); |
| 99 | extern long select_estimate_accuracy(struct timespec *tv); | 99 | extern u64 select_estimate_accuracy(struct timespec *tv); |
| 100 | 100 | ||
| 101 | 101 | ||
| 102 | static inline int poll_schedule(struct poll_wqueues *pwq, int state) | 102 | static inline int poll_schedule(struct poll_wqueues *pwq, int state) |
diff --git a/include/linux/quicklist.h b/include/linux/quicklist.h index bd466439c588..3bdfa70bc642 100644 --- a/include/linux/quicklist.h +++ b/include/linux/quicklist.h | |||
| @@ -5,7 +5,7 @@ | |||
| 5 | * as needed after allocation when they are freed. Per cpu lists of pages | 5 | * as needed after allocation when they are freed. Per cpu lists of pages |
| 6 | * are kept that only contain node local pages. | 6 | * are kept that only contain node local pages. |
| 7 | * | 7 | * |
| 8 | * (C) 2007, SGI. Christoph Lameter <clameter@sgi.com> | 8 | * (C) 2007, SGI. Christoph Lameter <cl@linux.com> |
| 9 | */ | 9 | */ |
| 10 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
| 11 | #include <linux/gfp.h> | 11 | #include <linux/gfp.h> |
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index f54be7082207..51a97ac8bfbf 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h | |||
| @@ -21,6 +21,7 @@ | |||
| 21 | #ifndef _LINUX_RADIX_TREE_H | 21 | #ifndef _LINUX_RADIX_TREE_H |
| 22 | #define _LINUX_RADIX_TREE_H | 22 | #define _LINUX_RADIX_TREE_H |
| 23 | 23 | ||
| 24 | #include <linux/bitops.h> | ||
| 24 | #include <linux/preempt.h> | 25 | #include <linux/preempt.h> |
| 25 | #include <linux/types.h> | 26 | #include <linux/types.h> |
| 26 | #include <linux/bug.h> | 27 | #include <linux/bug.h> |
| @@ -270,8 +271,15 @@ static inline void radix_tree_replace_slot(void **pslot, void *item) | |||
| 270 | } | 271 | } |
| 271 | 272 | ||
| 272 | int __radix_tree_create(struct radix_tree_root *root, unsigned long index, | 273 | int __radix_tree_create(struct radix_tree_root *root, unsigned long index, |
| 273 | struct radix_tree_node **nodep, void ***slotp); | 274 | unsigned order, struct radix_tree_node **nodep, |
| 274 | int radix_tree_insert(struct radix_tree_root *, unsigned long, void *); | 275 | void ***slotp); |
| 276 | int __radix_tree_insert(struct radix_tree_root *, unsigned long index, | ||
| 277 | unsigned order, void *); | ||
| 278 | static inline int radix_tree_insert(struct radix_tree_root *root, | ||
| 279 | unsigned long index, void *entry) | ||
| 280 | { | ||
| 281 | return __radix_tree_insert(root, index, 0, entry); | ||
| 282 | } | ||
| 275 | void *__radix_tree_lookup(struct radix_tree_root *root, unsigned long index, | 283 | void *__radix_tree_lookup(struct radix_tree_root *root, unsigned long index, |
| 276 | struct radix_tree_node **nodep, void ***slotp); | 284 | struct radix_tree_node **nodep, void ***slotp); |
| 277 | void *radix_tree_lookup(struct radix_tree_root *, unsigned long); | 285 | void *radix_tree_lookup(struct radix_tree_root *, unsigned long); |
| @@ -395,6 +403,22 @@ void **radix_tree_iter_retry(struct radix_tree_iter *iter) | |||
| 395 | } | 403 | } |
| 396 | 404 | ||
| 397 | /** | 405 | /** |
| 406 | * radix_tree_iter_next - resume iterating when the chunk may be invalid | ||
| 407 | * @iter: iterator state | ||
| 408 | * | ||
| 409 | * If the iterator needs to release then reacquire a lock, the chunk may | ||
| 410 | * have been invalidated by an insertion or deletion. Call this function | ||
| 411 | * to continue the iteration from the next index. | ||
| 412 | */ | ||
| 413 | static inline __must_check | ||
| 414 | void **radix_tree_iter_next(struct radix_tree_iter *iter) | ||
| 415 | { | ||
| 416 | iter->next_index = iter->index + 1; | ||
| 417 | iter->tags = 0; | ||
| 418 | return NULL; | ||
| 419 | } | ||
| 420 | |||
| 421 | /** | ||
| 398 | * radix_tree_chunk_size - get current chunk size | 422 | * radix_tree_chunk_size - get current chunk size |
| 399 | * | 423 | * |
| 400 | * @iter: pointer to radix tree iterator | 424 | * @iter: pointer to radix tree iterator |
diff --git a/include/linux/rmap.h b/include/linux/rmap.h index a07f42bedda3..49eb4f8ebac9 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h | |||
| @@ -86,6 +86,7 @@ enum ttu_flags { | |||
| 86 | TTU_MIGRATION = 2, /* migration mode */ | 86 | TTU_MIGRATION = 2, /* migration mode */ |
| 87 | TTU_MUNLOCK = 4, /* munlock mode */ | 87 | TTU_MUNLOCK = 4, /* munlock mode */ |
| 88 | TTU_LZFREE = 8, /* lazy free mode */ | 88 | TTU_LZFREE = 8, /* lazy free mode */ |
| 89 | TTU_SPLIT_HUGE_PMD = 16, /* split huge PMD if any */ | ||
| 89 | 90 | ||
| 90 | TTU_IGNORE_MLOCK = (1 << 8), /* ignore mlock */ | 91 | TTU_IGNORE_MLOCK = (1 << 8), /* ignore mlock */ |
| 91 | TTU_IGNORE_ACCESS = (1 << 9), /* don't age */ | 92 | TTU_IGNORE_ACCESS = (1 << 9), /* don't age */ |
| @@ -93,6 +94,8 @@ enum ttu_flags { | |||
| 93 | TTU_BATCH_FLUSH = (1 << 11), /* Batch TLB flushes where possible | 94 | TTU_BATCH_FLUSH = (1 << 11), /* Batch TLB flushes where possible |
| 94 | * and caller guarantees they will | 95 | * and caller guarantees they will |
| 95 | * do a final flush if necessary */ | 96 | * do a final flush if necessary */ |
| 97 | TTU_RMAP_LOCKED = (1 << 12) /* do not grab rmap lock: | ||
| 98 | * caller holds it */ | ||
| 96 | }; | 99 | }; |
| 97 | 100 | ||
| 98 | #ifdef CONFIG_MMU | 101 | #ifdef CONFIG_MMU |
| @@ -240,6 +243,8 @@ int page_mkclean(struct page *); | |||
| 240 | */ | 243 | */ |
| 241 | int try_to_munlock(struct page *); | 244 | int try_to_munlock(struct page *); |
| 242 | 245 | ||
| 246 | void remove_migration_ptes(struct page *old, struct page *new, bool locked); | ||
| 247 | |||
| 243 | /* | 248 | /* |
| 244 | * Called by memory-failure.c to kill processes. | 249 | * Called by memory-failure.c to kill processes. |
| 245 | */ | 250 | */ |
| @@ -266,6 +271,7 @@ struct rmap_walk_control { | |||
| 266 | }; | 271 | }; |
| 267 | 272 | ||
| 268 | int rmap_walk(struct page *page, struct rmap_walk_control *rwc); | 273 | int rmap_walk(struct page *page, struct rmap_walk_control *rwc); |
| 274 | int rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc); | ||
| 269 | 275 | ||
| 270 | #else /* !CONFIG_MMU */ | 276 | #else /* !CONFIG_MMU */ |
| 271 | 277 | ||
diff --git a/include/linux/sched.h b/include/linux/sched.h index bd242bed4abb..084ed9fba620 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -1791,8 +1791,8 @@ struct task_struct { | |||
| 1791 | * time slack values; these are used to round up poll() and | 1791 | * time slack values; these are used to round up poll() and |
| 1792 | * select() etc timeout values. These are in nanoseconds. | 1792 | * select() etc timeout values. These are in nanoseconds. |
| 1793 | */ | 1793 | */ |
| 1794 | unsigned long timer_slack_ns; | 1794 | u64 timer_slack_ns; |
| 1795 | unsigned long default_timer_slack_ns; | 1795 | u64 default_timer_slack_ns; |
| 1796 | 1796 | ||
| 1797 | #ifdef CONFIG_KASAN | 1797 | #ifdef CONFIG_KASAN |
| 1798 | unsigned int kasan_depth; | 1798 | unsigned int kasan_depth; |
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h index 4f080ab4f2cd..22db1e63707e 100644 --- a/include/linux/sched/sysctl.h +++ b/include/linux/sched/sysctl.h | |||
| @@ -14,27 +14,6 @@ extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write, | |||
| 14 | enum { sysctl_hung_task_timeout_secs = 0 }; | 14 | enum { sysctl_hung_task_timeout_secs = 0 }; |
| 15 | #endif | 15 | #endif |
| 16 | 16 | ||
| 17 | /* | ||
| 18 | * Default maximum number of active map areas, this limits the number of vmas | ||
| 19 | * per mm struct. Users can overwrite this number by sysctl but there is a | ||
| 20 | * problem. | ||
| 21 | * | ||
| 22 | * When a program's coredump is generated as ELF format, a section is created | ||
| 23 | * per a vma. In ELF, the number of sections is represented in unsigned short. | ||
| 24 | * This means the number of sections should be smaller than 65535 at coredump. | ||
| 25 | * Because the kernel adds some informative sections to a image of program at | ||
| 26 | * generating coredump, we need some margin. The number of extra sections is | ||
| 27 | * 1-3 now and depends on arch. We use "5" as safe margin, here. | ||
| 28 | * | ||
| 29 | * ELF extended numbering allows more than 65535 sections, so 16-bit bound is | ||
| 30 | * not a hard limit any more. Although some userspace tools can be surprised by | ||
| 31 | * that. | ||
| 32 | */ | ||
| 33 | #define MAPCOUNT_ELF_CORE_MARGIN (5) | ||
| 34 | #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN) | ||
| 35 | |||
| 36 | extern int sysctl_max_map_count; | ||
| 37 | |||
| 38 | extern unsigned int sysctl_sched_latency; | 17 | extern unsigned int sysctl_sched_latency; |
| 39 | extern unsigned int sysctl_sched_min_granularity; | 18 | extern unsigned int sysctl_sched_min_granularity; |
| 40 | extern unsigned int sysctl_sched_wakeup_granularity; | 19 | extern unsigned int sysctl_sched_wakeup_granularity; |
diff --git a/include/linux/string.h b/include/linux/string.h index 9eebc66d957a..d3993a79a325 100644 --- a/include/linux/string.h +++ b/include/linux/string.h | |||
| @@ -128,7 +128,13 @@ extern char **argv_split(gfp_t gfp, const char *str, int *argcp); | |||
| 128 | extern void argv_free(char **argv); | 128 | extern void argv_free(char **argv); |
| 129 | 129 | ||
| 130 | extern bool sysfs_streq(const char *s1, const char *s2); | 130 | extern bool sysfs_streq(const char *s1, const char *s2); |
| 131 | extern int strtobool(const char *s, bool *res); | 131 | extern int kstrtobool(const char *s, bool *res); |
| 132 | static inline int strtobool(const char *s, bool *res) | ||
| 133 | { | ||
| 134 | return kstrtobool(s, res); | ||
| 135 | } | ||
| 136 | |||
| 137 | int match_string(const char * const *array, size_t n, const char *string); | ||
| 132 | 138 | ||
| 133 | #ifdef CONFIG_BINARY_PRINTF | 139 | #ifdef CONFIG_BINARY_PRINTF |
| 134 | int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args); | 140 | int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args); |
diff --git a/include/linux/tick.h b/include/linux/tick.h index 21f73649a4dc..62be0786d6d0 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h | |||
| @@ -111,7 +111,7 @@ enum tick_dep_bits { | |||
| 111 | #define TICK_DEP_MASK_CLOCK_UNSTABLE (1 << TICK_DEP_BIT_CLOCK_UNSTABLE) | 111 | #define TICK_DEP_MASK_CLOCK_UNSTABLE (1 << TICK_DEP_BIT_CLOCK_UNSTABLE) |
| 112 | 112 | ||
| 113 | #ifdef CONFIG_NO_HZ_COMMON | 113 | #ifdef CONFIG_NO_HZ_COMMON |
| 114 | extern int tick_nohz_enabled; | 114 | extern bool tick_nohz_enabled; |
| 115 | extern int tick_nohz_tick_stopped(void); | 115 | extern int tick_nohz_tick_stopped(void); |
| 116 | extern void tick_nohz_idle_enter(void); | 116 | extern void tick_nohz_idle_enter(void); |
| 117 | extern void tick_nohz_idle_exit(void); | 117 | extern void tick_nohz_idle_exit(void); |
diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h index 99c1b4d20b0f..33383ca23837 100644 --- a/include/linux/unaligned/access_ok.h +++ b/include/linux/unaligned/access_ok.h | |||
| @@ -4,62 +4,62 @@ | |||
| 4 | #include <linux/kernel.h> | 4 | #include <linux/kernel.h> |
| 5 | #include <asm/byteorder.h> | 5 | #include <asm/byteorder.h> |
| 6 | 6 | ||
| 7 | static inline u16 get_unaligned_le16(const void *p) | 7 | static __always_inline u16 get_unaligned_le16(const void *p) |
| 8 | { | 8 | { |
| 9 | return le16_to_cpup((__le16 *)p); | 9 | return le16_to_cpup((__le16 *)p); |
| 10 | } | 10 | } |
| 11 | 11 | ||
| 12 | static inline u32 get_unaligned_le32(const void *p) | 12 | static __always_inline u32 get_unaligned_le32(const void *p) |
| 13 | { | 13 | { |
| 14 | return le32_to_cpup((__le32 *)p); | 14 | return le32_to_cpup((__le32 *)p); |
| 15 | } | 15 | } |
| 16 | 16 | ||
| 17 | static inline u64 get_unaligned_le64(const void *p) | 17 | static __always_inline u64 get_unaligned_le64(const void *p) |
| 18 | { | 18 | { |
| 19 | return le64_to_cpup((__le64 *)p); | 19 | return le64_to_cpup((__le64 *)p); |
| 20 | } | 20 | } |
| 21 | 21 | ||
| 22 | static inline u16 get_unaligned_be16(const void *p) | 22 | static __always_inline u16 get_unaligned_be16(const void *p) |
| 23 | { | 23 | { |
| 24 | return be16_to_cpup((__be16 *)p); | 24 | return be16_to_cpup((__be16 *)p); |
| 25 | } | 25 | } |
| 26 | 26 | ||
| 27 | static inline u32 get_unaligned_be32(const void *p) | 27 | static __always_inline u32 get_unaligned_be32(const void *p) |
| 28 | { | 28 | { |
| 29 | return be32_to_cpup((__be32 *)p); | 29 | return be32_to_cpup((__be32 *)p); |
| 30 | } | 30 | } |
| 31 | 31 | ||
| 32 | static inline u64 get_unaligned_be64(const void *p) | 32 | static __always_inline u64 get_unaligned_be64(const void *p) |
| 33 | { | 33 | { |
| 34 | return be64_to_cpup((__be64 *)p); | 34 | return be64_to_cpup((__be64 *)p); |
| 35 | } | 35 | } |
| 36 | 36 | ||
| 37 | static inline void put_unaligned_le16(u16 val, void *p) | 37 | static __always_inline void put_unaligned_le16(u16 val, void *p) |
| 38 | { | 38 | { |
| 39 | *((__le16 *)p) = cpu_to_le16(val); | 39 | *((__le16 *)p) = cpu_to_le16(val); |
| 40 | } | 40 | } |
| 41 | 41 | ||
| 42 | static inline void put_unaligned_le32(u32 val, void *p) | 42 | static __always_inline void put_unaligned_le32(u32 val, void *p) |
| 43 | { | 43 | { |
| 44 | *((__le32 *)p) = cpu_to_le32(val); | 44 | *((__le32 *)p) = cpu_to_le32(val); |
| 45 | } | 45 | } |
| 46 | 46 | ||
| 47 | static inline void put_unaligned_le64(u64 val, void *p) | 47 | static __always_inline void put_unaligned_le64(u64 val, void *p) |
| 48 | { | 48 | { |
| 49 | *((__le64 *)p) = cpu_to_le64(val); | 49 | *((__le64 *)p) = cpu_to_le64(val); |
| 50 | } | 50 | } |
| 51 | 51 | ||
| 52 | static inline void put_unaligned_be16(u16 val, void *p) | 52 | static __always_inline void put_unaligned_be16(u16 val, void *p) |
| 53 | { | 53 | { |
| 54 | *((__be16 *)p) = cpu_to_be16(val); | 54 | *((__be16 *)p) = cpu_to_be16(val); |
| 55 | } | 55 | } |
| 56 | 56 | ||
| 57 | static inline void put_unaligned_be32(u32 val, void *p) | 57 | static __always_inline void put_unaligned_be32(u32 val, void *p) |
| 58 | { | 58 | { |
| 59 | *((__be32 *)p) = cpu_to_be32(val); | 59 | *((__be32 *)p) = cpu_to_be32(val); |
| 60 | } | 60 | } |
| 61 | 61 | ||
| 62 | static inline void put_unaligned_be64(u64 val, void *p) | 62 | static __always_inline void put_unaligned_be64(u64 val, void *p) |
| 63 | { | 63 | { |
| 64 | *((__be64 *)p) = cpu_to_be64(val); | 64 | *((__be64 *)p) = cpu_to_be64(val); |
| 65 | } | 65 | } |
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h index 67c1dbd19c6d..ec084321fe09 100644 --- a/include/linux/vm_event_item.h +++ b/include/linux/vm_event_item.h | |||
| @@ -53,6 +53,7 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, | |||
| 53 | COMPACTMIGRATE_SCANNED, COMPACTFREE_SCANNED, | 53 | COMPACTMIGRATE_SCANNED, COMPACTFREE_SCANNED, |
| 54 | COMPACTISOLATED, | 54 | COMPACTISOLATED, |
| 55 | COMPACTSTALL, COMPACTFAIL, COMPACTSUCCESS, | 55 | COMPACTSTALL, COMPACTFAIL, COMPACTSUCCESS, |
| 56 | KCOMPACTD_WAKE, | ||
| 56 | #endif | 57 | #endif |
| 57 | #ifdef CONFIG_HUGETLB_PAGE | 58 | #ifdef CONFIG_HUGETLB_PAGE |
| 58 | HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL, | 59 | HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL, |
| @@ -71,6 +72,7 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, | |||
| 71 | THP_COLLAPSE_ALLOC_FAILED, | 72 | THP_COLLAPSE_ALLOC_FAILED, |
| 72 | THP_SPLIT_PAGE, | 73 | THP_SPLIT_PAGE, |
| 73 | THP_SPLIT_PAGE_FAILED, | 74 | THP_SPLIT_PAGE_FAILED, |
| 75 | THP_DEFERRED_SPLIT_PAGE, | ||
| 74 | THP_SPLIT_PMD, | 76 | THP_SPLIT_PMD, |
| 75 | THP_ZERO_PAGE_ALLOC, | 77 | THP_ZERO_PAGE_ALLOC, |
| 76 | THP_ZERO_PAGE_ALLOC_FAILED, | 78 | THP_ZERO_PAGE_ALLOC_FAILED, |
diff --git a/include/trace/events/compaction.h b/include/trace/events/compaction.h index 111e5666e5eb..e215bf68f521 100644 --- a/include/trace/events/compaction.h +++ b/include/trace/events/compaction.h | |||
| @@ -350,6 +350,61 @@ DEFINE_EVENT(mm_compaction_defer_template, mm_compaction_defer_reset, | |||
| 350 | ); | 350 | ); |
| 351 | #endif | 351 | #endif |
| 352 | 352 | ||
| 353 | TRACE_EVENT(mm_compaction_kcompactd_sleep, | ||
| 354 | |||
| 355 | TP_PROTO(int nid), | ||
| 356 | |||
| 357 | TP_ARGS(nid), | ||
| 358 | |||
| 359 | TP_STRUCT__entry( | ||
| 360 | __field(int, nid) | ||
| 361 | ), | ||
| 362 | |||
| 363 | TP_fast_assign( | ||
| 364 | __entry->nid = nid; | ||
| 365 | ), | ||
| 366 | |||
| 367 | TP_printk("nid=%d", __entry->nid) | ||
| 368 | ); | ||
| 369 | |||
| 370 | DECLARE_EVENT_CLASS(kcompactd_wake_template, | ||
| 371 | |||
| 372 | TP_PROTO(int nid, int order, enum zone_type classzone_idx), | ||
| 373 | |||
| 374 | TP_ARGS(nid, order, classzone_idx), | ||
| 375 | |||
| 376 | TP_STRUCT__entry( | ||
| 377 | __field(int, nid) | ||
| 378 | __field(int, order) | ||
| 379 | __field(enum zone_type, classzone_idx) | ||
| 380 | ), | ||
| 381 | |||
| 382 | TP_fast_assign( | ||
| 383 | __entry->nid = nid; | ||
| 384 | __entry->order = order; | ||
| 385 | __entry->classzone_idx = classzone_idx; | ||
| 386 | ), | ||
| 387 | |||
| 388 | TP_printk("nid=%d order=%d classzone_idx=%-8s", | ||
| 389 | __entry->nid, | ||
| 390 | __entry->order, | ||
| 391 | __print_symbolic(__entry->classzone_idx, ZONE_TYPE)) | ||
| 392 | ); | ||
| 393 | |||
| 394 | DEFINE_EVENT(kcompactd_wake_template, mm_compaction_wakeup_kcompactd, | ||
| 395 | |||
| 396 | TP_PROTO(int nid, int order, enum zone_type classzone_idx), | ||
| 397 | |||
| 398 | TP_ARGS(nid, order, classzone_idx) | ||
| 399 | ); | ||
| 400 | |||
| 401 | DEFINE_EVENT(kcompactd_wake_template, mm_compaction_kcompactd_wake, | ||
| 402 | |||
| 403 | TP_PROTO(int nid, int order, enum zone_type classzone_idx), | ||
| 404 | |||
| 405 | TP_ARGS(nid, order, classzone_idx) | ||
| 406 | ); | ||
| 407 | |||
| 353 | #endif /* _TRACE_COMPACTION_H */ | 408 | #endif /* _TRACE_COMPACTION_H */ |
| 354 | 409 | ||
| 355 | /* This part must be outside protection */ | 410 | /* This part must be outside protection */ |
diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h index a849185c82f0..43cedbf0c759 100644 --- a/include/trace/events/mmflags.h +++ b/include/trace/events/mmflags.h | |||
| @@ -111,15 +111,21 @@ IF_HAVE_PG_IDLE(PG_idle, "idle" ) | |||
| 111 | ) : "none" | 111 | ) : "none" |
| 112 | 112 | ||
| 113 | #if defined(CONFIG_X86) | 113 | #if defined(CONFIG_X86) |
| 114 | #define __VM_ARCH_SPECIFIC {VM_PAT, "pat" } | 114 | #define __VM_ARCH_SPECIFIC_1 {VM_PAT, "pat" } |
| 115 | #elif defined(CONFIG_PPC) | 115 | #elif defined(CONFIG_PPC) |
| 116 | #define __VM_ARCH_SPECIFIC {VM_SAO, "sao" } | 116 | #define __VM_ARCH_SPECIFIC_1 {VM_SAO, "sao" } |
| 117 | #elif defined(CONFIG_PARISC) || defined(CONFIG_METAG) || defined(CONFIG_IA64) | 117 | #elif defined(CONFIG_PARISC) || defined(CONFIG_METAG) || defined(CONFIG_IA64) |
| 118 | #define __VM_ARCH_SPECIFIC {VM_GROWSUP, "growsup" } | 118 | #define __VM_ARCH_SPECIFIC_1 {VM_GROWSUP, "growsup" } |
| 119 | #elif !defined(CONFIG_MMU) | 119 | #elif !defined(CONFIG_MMU) |
| 120 | #define __VM_ARCH_SPECIFIC {VM_MAPPED_COPY,"mappedcopy" } | 120 | #define __VM_ARCH_SPECIFIC_1 {VM_MAPPED_COPY,"mappedcopy" } |
| 121 | #else | 121 | #else |
| 122 | #define __VM_ARCH_SPECIFIC {VM_ARCH_1, "arch_1" } | 122 | #define __VM_ARCH_SPECIFIC_1 {VM_ARCH_1, "arch_1" } |
| 123 | #endif | ||
| 124 | |||
| 125 | #if defined(CONFIG_X86) | ||
| 126 | #define __VM_ARCH_SPECIFIC_2 {VM_MPX, "mpx" } | ||
| 127 | #else | ||
| 128 | #define __VM_ARCH_SPECIFIC_2 {VM_ARCH_2, "arch_2" } | ||
| 123 | #endif | 129 | #endif |
| 124 | 130 | ||
| 125 | #ifdef CONFIG_MEM_SOFT_DIRTY | 131 | #ifdef CONFIG_MEM_SOFT_DIRTY |
| @@ -138,19 +144,22 @@ IF_HAVE_PG_IDLE(PG_idle, "idle" ) | |||
| 138 | {VM_MAYEXEC, "mayexec" }, \ | 144 | {VM_MAYEXEC, "mayexec" }, \ |
| 139 | {VM_MAYSHARE, "mayshare" }, \ | 145 | {VM_MAYSHARE, "mayshare" }, \ |
| 140 | {VM_GROWSDOWN, "growsdown" }, \ | 146 | {VM_GROWSDOWN, "growsdown" }, \ |
| 147 | {VM_UFFD_MISSING, "uffd_missing" }, \ | ||
| 141 | {VM_PFNMAP, "pfnmap" }, \ | 148 | {VM_PFNMAP, "pfnmap" }, \ |
| 142 | {VM_DENYWRITE, "denywrite" }, \ | 149 | {VM_DENYWRITE, "denywrite" }, \ |
| 143 | {VM_LOCKONFAULT, "lockonfault" }, \ | 150 | {VM_UFFD_WP, "uffd_wp" }, \ |
| 144 | {VM_LOCKED, "locked" }, \ | 151 | {VM_LOCKED, "locked" }, \ |
| 145 | {VM_IO, "io" }, \ | 152 | {VM_IO, "io" }, \ |
| 146 | {VM_SEQ_READ, "seqread" }, \ | 153 | {VM_SEQ_READ, "seqread" }, \ |
| 147 | {VM_RAND_READ, "randread" }, \ | 154 | {VM_RAND_READ, "randread" }, \ |
| 148 | {VM_DONTCOPY, "dontcopy" }, \ | 155 | {VM_DONTCOPY, "dontcopy" }, \ |
| 149 | {VM_DONTEXPAND, "dontexpand" }, \ | 156 | {VM_DONTEXPAND, "dontexpand" }, \ |
| 157 | {VM_LOCKONFAULT, "lockonfault" }, \ | ||
| 150 | {VM_ACCOUNT, "account" }, \ | 158 | {VM_ACCOUNT, "account" }, \ |
| 151 | {VM_NORESERVE, "noreserve" }, \ | 159 | {VM_NORESERVE, "noreserve" }, \ |
| 152 | {VM_HUGETLB, "hugetlb" }, \ | 160 | {VM_HUGETLB, "hugetlb" }, \ |
| 153 | __VM_ARCH_SPECIFIC , \ | 161 | __VM_ARCH_SPECIFIC_1 , \ |
| 162 | __VM_ARCH_SPECIFIC_2 , \ | ||
| 154 | {VM_DONTDUMP, "dontdump" }, \ | 163 | {VM_DONTDUMP, "dontdump" }, \ |
| 155 | IF_HAVE_VM_SOFTDIRTY(VM_SOFTDIRTY, "softdirty" ) \ | 164 | IF_HAVE_VM_SOFTDIRTY(VM_SOFTDIRTY, "softdirty" ) \ |
| 156 | {VM_MIXEDMAP, "mixedmap" }, \ | 165 | {VM_MIXEDMAP, "mixedmap" }, \ |
diff --git a/include/trace/events/page_ref.h b/include/trace/events/page_ref.h new file mode 100644 index 000000000000..81001f8b0db4 --- /dev/null +++ b/include/trace/events/page_ref.h | |||
| @@ -0,0 +1,134 @@ | |||
| 1 | #undef TRACE_SYSTEM | ||
| 2 | #define TRACE_SYSTEM page_ref | ||
| 3 | |||
| 4 | #if !defined(_TRACE_PAGE_REF_H) || defined(TRACE_HEADER_MULTI_READ) | ||
| 5 | #define _TRACE_PAGE_REF_H | ||
| 6 | |||
| 7 | #include <linux/types.h> | ||
| 8 | #include <linux/page_ref.h> | ||
| 9 | #include <linux/tracepoint.h> | ||
| 10 | #include <trace/events/mmflags.h> | ||
| 11 | |||
| 12 | DECLARE_EVENT_CLASS(page_ref_mod_template, | ||
| 13 | |||
| 14 | TP_PROTO(struct page *page, int v), | ||
| 15 | |||
| 16 | TP_ARGS(page, v), | ||
| 17 | |||
| 18 | TP_STRUCT__entry( | ||
| 19 | __field(unsigned long, pfn) | ||
| 20 | __field(unsigned long, flags) | ||
| 21 | __field(int, count) | ||
| 22 | __field(int, mapcount) | ||
| 23 | __field(void *, mapping) | ||
| 24 | __field(int, mt) | ||
| 25 | __field(int, val) | ||
| 26 | ), | ||
| 27 | |||
| 28 | TP_fast_assign( | ||
| 29 | __entry->pfn = page_to_pfn(page); | ||
| 30 | __entry->flags = page->flags; | ||
| 31 | __entry->count = page_ref_count(page); | ||
| 32 | __entry->mapcount = page_mapcount(page); | ||
| 33 | __entry->mapping = page->mapping; | ||
| 34 | __entry->mt = get_pageblock_migratetype(page); | ||
| 35 | __entry->val = v; | ||
| 36 | ), | ||
| 37 | |||
| 38 | TP_printk("pfn=0x%lx flags=%s count=%d mapcount=%d mapping=%p mt=%d val=%d", | ||
| 39 | __entry->pfn, | ||
| 40 | show_page_flags(__entry->flags & ((1UL << NR_PAGEFLAGS) - 1)), | ||
| 41 | __entry->count, | ||
| 42 | __entry->mapcount, __entry->mapping, __entry->mt, | ||
| 43 | __entry->val) | ||
| 44 | ); | ||
| 45 | |||
| 46 | DEFINE_EVENT(page_ref_mod_template, page_ref_set, | ||
| 47 | |||
| 48 | TP_PROTO(struct page *page, int v), | ||
| 49 | |||
| 50 | TP_ARGS(page, v) | ||
| 51 | ); | ||
| 52 | |||
| 53 | DEFINE_EVENT(page_ref_mod_template, page_ref_mod, | ||
| 54 | |||
| 55 | TP_PROTO(struct page *page, int v), | ||
| 56 | |||
| 57 | TP_ARGS(page, v) | ||
| 58 | ); | ||
| 59 | |||
| 60 | DECLARE_EVENT_CLASS(page_ref_mod_and_test_template, | ||
| 61 | |||
| 62 | TP_PROTO(struct page *page, int v, int ret), | ||
| 63 | |||
| 64 | TP_ARGS(page, v, ret), | ||
| 65 | |||
| 66 | TP_STRUCT__entry( | ||
| 67 | __field(unsigned long, pfn) | ||
| 68 | __field(unsigned long, flags) | ||
| 69 | __field(int, count) | ||
| 70 | __field(int, mapcount) | ||
| 71 | __field(void *, mapping) | ||
| 72 | __field(int, mt) | ||
| 73 | __field(int, val) | ||
| 74 | __field(int, ret) | ||
| 75 | ), | ||
| 76 | |||
| 77 | TP_fast_assign( | ||
| 78 | __entry->pfn = page_to_pfn(page); | ||
| 79 | __entry->flags = page->flags; | ||
| 80 | __entry->count = page_ref_count(page); | ||
| 81 | __entry->mapcount = page_mapcount(page); | ||
| 82 | __entry->mapping = page->mapping; | ||
| 83 | __entry->mt = get_pageblock_migratetype(page); | ||
| 84 | __entry->val = v; | ||
| 85 | __entry->ret = ret; | ||
| 86 | ), | ||
| 87 | |||
| 88 | TP_printk("pfn=0x%lx flags=%s count=%d mapcount=%d mapping=%p mt=%d val=%d ret=%d", | ||
| 89 | __entry->pfn, | ||
| 90 | show_page_flags(__entry->flags & ((1UL << NR_PAGEFLAGS) - 1)), | ||
| 91 | __entry->count, | ||
| 92 | __entry->mapcount, __entry->mapping, __entry->mt, | ||
| 93 | __entry->val, __entry->ret) | ||
| 94 | ); | ||
| 95 | |||
| 96 | DEFINE_EVENT(page_ref_mod_and_test_template, page_ref_mod_and_test, | ||
| 97 | |||
| 98 | TP_PROTO(struct page *page, int v, int ret), | ||
| 99 | |||
| 100 | TP_ARGS(page, v, ret) | ||
| 101 | ); | ||
| 102 | |||
| 103 | DEFINE_EVENT(page_ref_mod_and_test_template, page_ref_mod_and_return, | ||
| 104 | |||
| 105 | TP_PROTO(struct page *page, int v, int ret), | ||
| 106 | |||
| 107 | TP_ARGS(page, v, ret) | ||
| 108 | ); | ||
| 109 | |||
| 110 | DEFINE_EVENT(page_ref_mod_and_test_template, page_ref_mod_unless, | ||
| 111 | |||
| 112 | TP_PROTO(struct page *page, int v, int ret), | ||
| 113 | |||
| 114 | TP_ARGS(page, v, ret) | ||
| 115 | ); | ||
| 116 | |||
| 117 | DEFINE_EVENT(page_ref_mod_and_test_template, page_ref_freeze, | ||
| 118 | |||
| 119 | TP_PROTO(struct page *page, int v, int ret), | ||
| 120 | |||
| 121 | TP_ARGS(page, v, ret) | ||
| 122 | ); | ||
| 123 | |||
| 124 | DEFINE_EVENT(page_ref_mod_template, page_ref_unfreeze, | ||
| 125 | |||
| 126 | TP_PROTO(struct page *page, int v), | ||
| 127 | |||
| 128 | TP_ARGS(page, v) | ||
| 129 | ); | ||
| 130 | |||
| 131 | #endif /* _TRACE_PAGE_COUNT_H */ | ||
| 132 | |||
| 133 | /* This part must be outside protection */ | ||
| 134 | #include <trace/define_trace.h> | ||
diff --git a/include/uapi/linux/byteorder/big_endian.h b/include/uapi/linux/byteorder/big_endian.h index 672374450095..cdab17ab907c 100644 --- a/include/uapi/linux/byteorder/big_endian.h +++ b/include/uapi/linux/byteorder/big_endian.h | |||
| @@ -40,51 +40,51 @@ | |||
| 40 | #define __cpu_to_be16(x) ((__force __be16)(__u16)(x)) | 40 | #define __cpu_to_be16(x) ((__force __be16)(__u16)(x)) |
| 41 | #define __be16_to_cpu(x) ((__force __u16)(__be16)(x)) | 41 | #define __be16_to_cpu(x) ((__force __u16)(__be16)(x)) |
| 42 | 42 | ||
| 43 | static inline __le64 __cpu_to_le64p(const __u64 *p) | 43 | static __always_inline __le64 __cpu_to_le64p(const __u64 *p) |
| 44 | { | 44 | { |
| 45 | return (__force __le64)__swab64p(p); | 45 | return (__force __le64)__swab64p(p); |
| 46 | } | 46 | } |
| 47 | static inline __u64 __le64_to_cpup(const __le64 *p) | 47 | static __always_inline __u64 __le64_to_cpup(const __le64 *p) |
| 48 | { | 48 | { |
| 49 | return __swab64p((__u64 *)p); | 49 | return __swab64p((__u64 *)p); |
| 50 | } | 50 | } |
| 51 | static inline __le32 __cpu_to_le32p(const __u32 *p) | 51 | static __always_inline __le32 __cpu_to_le32p(const __u32 *p) |
| 52 | { | 52 | { |
| 53 | return (__force __le32)__swab32p(p); | 53 | return (__force __le32)__swab32p(p); |
| 54 | } | 54 | } |
| 55 | static inline __u32 __le32_to_cpup(const __le32 *p) | 55 | static __always_inline __u32 __le32_to_cpup(const __le32 *p) |
| 56 | { | 56 | { |
| 57 | return __swab32p((__u32 *)p); | 57 | return __swab32p((__u32 *)p); |
| 58 | } | 58 | } |
| 59 | static inline __le16 __cpu_to_le16p(const __u16 *p) | 59 | static __always_inline __le16 __cpu_to_le16p(const __u16 *p) |
| 60 | { | 60 | { |
| 61 | return (__force __le16)__swab16p(p); | 61 | return (__force __le16)__swab16p(p); |
| 62 | } | 62 | } |
| 63 | static inline __u16 __le16_to_cpup(const __le16 *p) | 63 | static __always_inline __u16 __le16_to_cpup(const __le16 *p) |
| 64 | { | 64 | { |
| 65 | return __swab16p((__u16 *)p); | 65 | return __swab16p((__u16 *)p); |
| 66 | } | 66 | } |
| 67 | static inline __be64 __cpu_to_be64p(const __u64 *p) | 67 | static __always_inline __be64 __cpu_to_be64p(const __u64 *p) |
| 68 | { | 68 | { |
| 69 | return (__force __be64)*p; | 69 | return (__force __be64)*p; |
| 70 | } | 70 | } |
| 71 | static inline __u64 __be64_to_cpup(const __be64 *p) | 71 | static __always_inline __u64 __be64_to_cpup(const __be64 *p) |
| 72 | { | 72 | { |
| 73 | return (__force __u64)*p; | 73 | return (__force __u64)*p; |
| 74 | } | 74 | } |
| 75 | static inline __be32 __cpu_to_be32p(const __u32 *p) | 75 | static __always_inline __be32 __cpu_to_be32p(const __u32 *p) |
| 76 | { | 76 | { |
| 77 | return (__force __be32)*p; | 77 | return (__force __be32)*p; |
| 78 | } | 78 | } |
| 79 | static inline __u32 __be32_to_cpup(const __be32 *p) | 79 | static __always_inline __u32 __be32_to_cpup(const __be32 *p) |
| 80 | { | 80 | { |
| 81 | return (__force __u32)*p; | 81 | return (__force __u32)*p; |
| 82 | } | 82 | } |
| 83 | static inline __be16 __cpu_to_be16p(const __u16 *p) | 83 | static __always_inline __be16 __cpu_to_be16p(const __u16 *p) |
| 84 | { | 84 | { |
| 85 | return (__force __be16)*p; | 85 | return (__force __be16)*p; |
| 86 | } | 86 | } |
| 87 | static inline __u16 __be16_to_cpup(const __be16 *p) | 87 | static __always_inline __u16 __be16_to_cpup(const __be16 *p) |
| 88 | { | 88 | { |
| 89 | return (__force __u16)*p; | 89 | return (__force __u16)*p; |
| 90 | } | 90 | } |
diff --git a/include/uapi/linux/byteorder/little_endian.h b/include/uapi/linux/byteorder/little_endian.h index d876736a0017..4b93f2b260dd 100644 --- a/include/uapi/linux/byteorder/little_endian.h +++ b/include/uapi/linux/byteorder/little_endian.h | |||
| @@ -40,51 +40,51 @@ | |||
| 40 | #define __cpu_to_be16(x) ((__force __be16)__swab16((x))) | 40 | #define __cpu_to_be16(x) ((__force __be16)__swab16((x))) |
| 41 | #define __be16_to_cpu(x) __swab16((__force __u16)(__be16)(x)) | 41 | #define __be16_to_cpu(x) __swab16((__force __u16)(__be16)(x)) |
| 42 | 42 | ||
| 43 | static inline __le64 __cpu_to_le64p(const __u64 *p) | 43 | static __always_inline __le64 __cpu_to_le64p(const __u64 *p) |
| 44 | { | 44 | { |
| 45 | return (__force __le64)*p; | 45 | return (__force __le64)*p; |
| 46 | } | 46 | } |
| 47 | static inline __u64 __le64_to_cpup(const __le64 *p) | 47 | static __always_inline __u64 __le64_to_cpup(const __le64 *p) |
| 48 | { | 48 | { |
| 49 | return (__force __u64)*p; | 49 | return (__force __u64)*p; |
| 50 | } | 50 | } |
| 51 | static inline __le32 __cpu_to_le32p(const __u32 *p) | 51 | static __always_inline __le32 __cpu_to_le32p(const __u32 *p) |
| 52 | { | 52 | { |
| 53 | return (__force __le32)*p; | 53 | return (__force __le32)*p; |
| 54 | } | 54 | } |
| 55 | static inline __u32 __le32_to_cpup(const __le32 *p) | 55 | static __always_inline __u32 __le32_to_cpup(const __le32 *p) |
| 56 | { | 56 | { |
| 57 | return (__force __u32)*p; | 57 | return (__force __u32)*p; |
| 58 | } | 58 | } |
| 59 | static inline __le16 __cpu_to_le16p(const __u16 *p) | 59 | static __always_inline __le16 __cpu_to_le16p(const __u16 *p) |
| 60 | { | 60 | { |
| 61 | return (__force __le16)*p; | 61 | return (__force __le16)*p; |
| 62 | } | 62 | } |
| 63 | static inline __u16 __le16_to_cpup(const __le16 *p) | 63 | static __always_inline __u16 __le16_to_cpup(const __le16 *p) |
| 64 | { | 64 | { |
| 65 | return (__force __u16)*p; | 65 | return (__force __u16)*p; |
| 66 | } | 66 | } |
| 67 | static inline __be64 __cpu_to_be64p(const __u64 *p) | 67 | static __always_inline __be64 __cpu_to_be64p(const __u64 *p) |
| 68 | { | 68 | { |
| 69 | return (__force __be64)__swab64p(p); | 69 | return (__force __be64)__swab64p(p); |
| 70 | } | 70 | } |
| 71 | static inline __u64 __be64_to_cpup(const __be64 *p) | 71 | static __always_inline __u64 __be64_to_cpup(const __be64 *p) |
| 72 | { | 72 | { |
| 73 | return __swab64p((__u64 *)p); | 73 | return __swab64p((__u64 *)p); |
| 74 | } | 74 | } |
| 75 | static inline __be32 __cpu_to_be32p(const __u32 *p) | 75 | static __always_inline __be32 __cpu_to_be32p(const __u32 *p) |
| 76 | { | 76 | { |
| 77 | return (__force __be32)__swab32p(p); | 77 | return (__force __be32)__swab32p(p); |
| 78 | } | 78 | } |
| 79 | static inline __u32 __be32_to_cpup(const __be32 *p) | 79 | static __always_inline __u32 __be32_to_cpup(const __be32 *p) |
| 80 | { | 80 | { |
| 81 | return __swab32p((__u32 *)p); | 81 | return __swab32p((__u32 *)p); |
| 82 | } | 82 | } |
| 83 | static inline __be16 __cpu_to_be16p(const __u16 *p) | 83 | static __always_inline __be16 __cpu_to_be16p(const __u16 *p) |
| 84 | { | 84 | { |
| 85 | return (__force __be16)__swab16p(p); | 85 | return (__force __be16)__swab16p(p); |
| 86 | } | 86 | } |
| 87 | static inline __u16 __be16_to_cpup(const __be16 *p) | 87 | static __always_inline __u16 __be16_to_cpup(const __be16 *p) |
| 88 | { | 88 | { |
| 89 | return __swab16p((__u16 *)p); | 89 | return __swab16p((__u16 *)p); |
| 90 | } | 90 | } |
diff --git a/include/uapi/linux/elf-em.h b/include/uapi/linux/elf-em.h index b56dfcfe922a..c3fdfe79e5cc 100644 --- a/include/uapi/linux/elf-em.h +++ b/include/uapi/linux/elf-em.h | |||
| @@ -30,7 +30,6 @@ | |||
| 30 | #define EM_X86_64 62 /* AMD x86-64 */ | 30 | #define EM_X86_64 62 /* AMD x86-64 */ |
| 31 | #define EM_S390 22 /* IBM S/390 */ | 31 | #define EM_S390 22 /* IBM S/390 */ |
| 32 | #define EM_CRIS 76 /* Axis Communications 32-bit embedded processor */ | 32 | #define EM_CRIS 76 /* Axis Communications 32-bit embedded processor */ |
| 33 | #define EM_V850 87 /* NEC v850 */ | ||
| 34 | #define EM_M32R 88 /* Renesas M32R */ | 33 | #define EM_M32R 88 /* Renesas M32R */ |
| 35 | #define EM_MN10300 89 /* Panasonic/MEI MN10300, AM33 */ | 34 | #define EM_MN10300 89 /* Panasonic/MEI MN10300, AM33 */ |
| 36 | #define EM_OPENRISC 92 /* OpenRISC 32-bit embedded processor */ | 35 | #define EM_OPENRISC 92 /* OpenRISC 32-bit embedded processor */ |
| @@ -50,8 +49,6 @@ | |||
| 50 | */ | 49 | */ |
| 51 | #define EM_ALPHA 0x9026 | 50 | #define EM_ALPHA 0x9026 |
| 52 | 51 | ||
| 53 | /* Bogus old v850 magic number, used by old tools. */ | ||
| 54 | #define EM_CYGNUS_V850 0x9080 | ||
| 55 | /* Bogus old m32r magic number, used by old tools. */ | 52 | /* Bogus old m32r magic number, used by old tools. */ |
| 56 | #define EM_CYGNUS_M32R 0x9041 | 53 | #define EM_CYGNUS_M32R 0x9041 |
| 57 | /* This is the old interim value for S/390 architecture */ | 54 | /* This is the old interim value for S/390 architecture */ |
diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h index 0e011eb91b5d..3f10e5317b46 100644 --- a/include/uapi/linux/swab.h +++ b/include/uapi/linux/swab.h | |||
| @@ -151,7 +151,7 @@ static inline __attribute_const__ __u32 __fswahb32(__u32 val) | |||
| 151 | * __swab16p - return a byteswapped 16-bit value from a pointer | 151 | * __swab16p - return a byteswapped 16-bit value from a pointer |
| 152 | * @p: pointer to a naturally-aligned 16-bit value | 152 | * @p: pointer to a naturally-aligned 16-bit value |
| 153 | */ | 153 | */ |
| 154 | static inline __u16 __swab16p(const __u16 *p) | 154 | static __always_inline __u16 __swab16p(const __u16 *p) |
| 155 | { | 155 | { |
| 156 | #ifdef __arch_swab16p | 156 | #ifdef __arch_swab16p |
| 157 | return __arch_swab16p(p); | 157 | return __arch_swab16p(p); |
| @@ -164,7 +164,7 @@ static inline __u16 __swab16p(const __u16 *p) | |||
| 164 | * __swab32p - return a byteswapped 32-bit value from a pointer | 164 | * __swab32p - return a byteswapped 32-bit value from a pointer |
| 165 | * @p: pointer to a naturally-aligned 32-bit value | 165 | * @p: pointer to a naturally-aligned 32-bit value |
| 166 | */ | 166 | */ |
| 167 | static inline __u32 __swab32p(const __u32 *p) | 167 | static __always_inline __u32 __swab32p(const __u32 *p) |
| 168 | { | 168 | { |
| 169 | #ifdef __arch_swab32p | 169 | #ifdef __arch_swab32p |
| 170 | return __arch_swab32p(p); | 170 | return __arch_swab32p(p); |
| @@ -177,7 +177,7 @@ static inline __u32 __swab32p(const __u32 *p) | |||
| 177 | * __swab64p - return a byteswapped 64-bit value from a pointer | 177 | * __swab64p - return a byteswapped 64-bit value from a pointer |
| 178 | * @p: pointer to a naturally-aligned 64-bit value | 178 | * @p: pointer to a naturally-aligned 64-bit value |
| 179 | */ | 179 | */ |
| 180 | static inline __u64 __swab64p(const __u64 *p) | 180 | static __always_inline __u64 __swab64p(const __u64 *p) |
| 181 | { | 181 | { |
| 182 | #ifdef __arch_swab64p | 182 | #ifdef __arch_swab64p |
| 183 | return __arch_swab64p(p); | 183 | return __arch_swab64p(p); |
| @@ -232,7 +232,7 @@ static inline void __swab16s(__u16 *p) | |||
| 232 | * __swab32s - byteswap a 32-bit value in-place | 232 | * __swab32s - byteswap a 32-bit value in-place |
| 233 | * @p: pointer to a naturally-aligned 32-bit value | 233 | * @p: pointer to a naturally-aligned 32-bit value |
| 234 | */ | 234 | */ |
| 235 | static inline void __swab32s(__u32 *p) | 235 | static __always_inline void __swab32s(__u32 *p) |
| 236 | { | 236 | { |
| 237 | #ifdef __arch_swab32s | 237 | #ifdef __arch_swab32s |
| 238 | __arch_swab32s(p); | 238 | __arch_swab32s(p); |
| @@ -245,7 +245,7 @@ static inline void __swab32s(__u32 *p) | |||
| 245 | * __swab64s - byteswap a 64-bit value in-place | 245 | * __swab64s - byteswap a 64-bit value in-place |
| 246 | * @p: pointer to a naturally-aligned 64-bit value | 246 | * @p: pointer to a naturally-aligned 64-bit value |
| 247 | */ | 247 | */ |
| 248 | static inline void __swab64s(__u64 *p) | 248 | static __always_inline void __swab64s(__u64 *p) |
| 249 | { | 249 | { |
| 250 | #ifdef __arch_swab64s | 250 | #ifdef __arch_swab64s |
| 251 | __arch_swab64s(p); | 251 | __arch_swab64s(p); |
diff --git a/include/uapi/linux/virtio_balloon.h b/include/uapi/linux/virtio_balloon.h index d7f1cbc3766c..343d7ddefe04 100644 --- a/include/uapi/linux/virtio_balloon.h +++ b/include/uapi/linux/virtio_balloon.h | |||
| @@ -51,7 +51,8 @@ struct virtio_balloon_config { | |||
| 51 | #define VIRTIO_BALLOON_S_MINFLT 3 /* Number of minor faults */ | 51 | #define VIRTIO_BALLOON_S_MINFLT 3 /* Number of minor faults */ |
| 52 | #define VIRTIO_BALLOON_S_MEMFREE 4 /* Total amount of free memory */ | 52 | #define VIRTIO_BALLOON_S_MEMFREE 4 /* Total amount of free memory */ |
| 53 | #define VIRTIO_BALLOON_S_MEMTOT 5 /* Total amount of memory */ | 53 | #define VIRTIO_BALLOON_S_MEMTOT 5 /* Total amount of memory */ |
| 54 | #define VIRTIO_BALLOON_S_NR 6 | 54 | #define VIRTIO_BALLOON_S_AVAIL 6 /* Available memory as in /proc */ |
| 55 | #define VIRTIO_BALLOON_S_NR 7 | ||
| 55 | 56 | ||
| 56 | /* | 57 | /* |
| 57 | * Memory statistics structure. | 58 | * Memory statistics structure. |
