aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/bug.h60
-rw-r--r--include/asm-generic/early_ioremap.h42
-rw-r--r--include/asm-generic/io.h4
-rw-r--r--include/asm-generic/iomap.h2
-rw-r--r--include/asm-generic/percpu.h13
-rw-r--r--include/linux/binfmts.h1
-rw-r--r--include/linux/crash_dump.h1
-rw-r--r--include/linux/idr.h63
-rw-r--r--include/linux/io.h2
-rw-r--r--include/linux/lglock.h16
-rw-r--r--include/linux/memcontrol.h23
-rw-r--r--include/linux/mempolicy.h3
-rw-r--r--include/linux/mm.h17
-rw-r--r--include/linux/mm_types.h4
-rw-r--r--include/linux/mmdebug.h4
-rw-r--r--include/linux/percpu.h350
-rw-r--r--include/linux/res_counter.h6
-rw-r--r--include/linux/rio.h5
-rw-r--r--include/linux/sched.h15
-rw-r--r--include/linux/slab.h6
-rw-r--r--include/linux/slub_def.h3
-rw-r--r--include/linux/topology.h4
-rw-r--r--include/linux/vmacache.h38
-rw-r--r--include/linux/vmstat.h8
-rw-r--r--include/linux/writeback.h2
-rw-r--r--include/trace/events/task.h2
-rw-r--r--include/uapi/asm-generic/mman-common.h2
-rw-r--r--include/uapi/linux/prctl.h3
28 files changed, 417 insertions, 282 deletions
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index 7d10f962aa13..630dd2372238 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -52,7 +52,7 @@ struct bug_entry {
52#endif 52#endif
53 53
54#ifndef HAVE_ARCH_BUG_ON 54#ifndef HAVE_ARCH_BUG_ON
55#define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while(0) 55#define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while (0)
56#endif 56#endif
57 57
58/* 58/*
@@ -106,33 +106,6 @@ extern void warn_slowpath_null(const char *file, const int line);
106 unlikely(__ret_warn_on); \ 106 unlikely(__ret_warn_on); \
107}) 107})
108 108
109#else /* !CONFIG_BUG */
110#ifndef HAVE_ARCH_BUG
111#define BUG() do {} while(0)
112#endif
113
114#ifndef HAVE_ARCH_BUG_ON
115#define BUG_ON(condition) do { if (condition) ; } while(0)
116#endif
117
118#ifndef HAVE_ARCH_WARN_ON
119#define WARN_ON(condition) ({ \
120 int __ret_warn_on = !!(condition); \
121 unlikely(__ret_warn_on); \
122})
123#endif
124
125#ifndef WARN
126#define WARN(condition, format...) ({ \
127 int __ret_warn_on = !!(condition); \
128 unlikely(__ret_warn_on); \
129})
130#endif
131
132#define WARN_TAINT(condition, taint, format...) WARN_ON(condition)
133
134#endif
135
136#define WARN_ON_ONCE(condition) ({ \ 109#define WARN_ON_ONCE(condition) ({ \
137 static bool __section(.data.unlikely) __warned; \ 110 static bool __section(.data.unlikely) __warned; \
138 int __ret_warn_once = !!(condition); \ 111 int __ret_warn_once = !!(condition); \
@@ -163,6 +136,37 @@ extern void warn_slowpath_null(const char *file, const int line);
163 unlikely(__ret_warn_once); \ 136 unlikely(__ret_warn_once); \
164}) 137})
165 138
139#else /* !CONFIG_BUG */
140#ifndef HAVE_ARCH_BUG
141#define BUG() do {} while (1)
142#endif
143
144#ifndef HAVE_ARCH_BUG_ON
145#define BUG_ON(condition) do { if (condition) ; } while (0)
146#endif
147
148#ifndef HAVE_ARCH_WARN_ON
149#define WARN_ON(condition) ({ \
150 int __ret_warn_on = !!(condition); \
151 unlikely(__ret_warn_on); \
152})
153#endif
154
155#ifndef WARN
156#define WARN(condition, format...) ({ \
157 int __ret_warn_on = !!(condition); \
158 no_printk(format); \
159 unlikely(__ret_warn_on); \
160})
161#endif
162
163#define WARN_ON_ONCE(condition) WARN_ON(condition)
164#define WARN_ONCE(condition, format...) WARN(condition, format)
165#define WARN_TAINT(condition, taint, format...) WARN(condition, format)
166#define WARN_TAINT_ONCE(condition, taint, format...) WARN(condition, format)
167
168#endif
169
166/* 170/*
167 * WARN_ON_SMP() is for cases that the warning is either 171 * WARN_ON_SMP() is for cases that the warning is either
168 * meaningless for !SMP or may even cause failures. 172 * meaningless for !SMP or may even cause failures.
diff --git a/include/asm-generic/early_ioremap.h b/include/asm-generic/early_ioremap.h
new file mode 100644
index 000000000000..a5de55c04fb2
--- /dev/null
+++ b/include/asm-generic/early_ioremap.h
@@ -0,0 +1,42 @@
1#ifndef _ASM_EARLY_IOREMAP_H_
2#define _ASM_EARLY_IOREMAP_H_
3
4#include <linux/types.h>
5
6/*
7 * early_ioremap() and early_iounmap() are for temporary early boot-time
8 * mappings, before the real ioremap() is functional.
9 */
10extern void __iomem *early_ioremap(resource_size_t phys_addr,
11 unsigned long size);
12extern void *early_memremap(resource_size_t phys_addr,
13 unsigned long size);
14extern void early_iounmap(void __iomem *addr, unsigned long size);
15extern void early_memunmap(void *addr, unsigned long size);
16
17/*
18 * Weak function called by early_ioremap_reset(). It does nothing, but
19 * architectures may provide their own version to do any needed cleanups.
20 */
21extern void early_ioremap_shutdown(void);
22
23#if defined(CONFIG_GENERIC_EARLY_IOREMAP) && defined(CONFIG_MMU)
24/* Arch-specific initialization */
25extern void early_ioremap_init(void);
26
27/* Generic initialization called by architecture code */
28extern void early_ioremap_setup(void);
29
30/*
31 * Called as last step in paging_init() so library can act
32 * accordingly for subsequent map/unmap requests.
33 */
34extern void early_ioremap_reset(void);
35
36#else
37static inline void early_ioremap_init(void) { }
38static inline void early_ioremap_setup(void) { }
39static inline void early_ioremap_reset(void) { }
40#endif
41
42#endif /* _ASM_EARLY_IOREMAP_H_ */
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index d5afe96adba6..975e1cc75edb 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -327,7 +327,7 @@ static inline void iounmap(void __iomem *addr)
327} 327}
328#endif /* CONFIG_MMU */ 328#endif /* CONFIG_MMU */
329 329
330#ifdef CONFIG_HAS_IOPORT 330#ifdef CONFIG_HAS_IOPORT_MAP
331#ifndef CONFIG_GENERIC_IOMAP 331#ifndef CONFIG_GENERIC_IOMAP
332static inline void __iomem *ioport_map(unsigned long port, unsigned int nr) 332static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
333{ 333{
@@ -341,7 +341,7 @@ static inline void ioport_unmap(void __iomem *p)
341extern void __iomem *ioport_map(unsigned long port, unsigned int nr); 341extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
342extern void ioport_unmap(void __iomem *p); 342extern void ioport_unmap(void __iomem *p);
343#endif /* CONFIG_GENERIC_IOMAP */ 343#endif /* CONFIG_GENERIC_IOMAP */
344#endif /* CONFIG_HAS_IOPORT */ 344#endif /* CONFIG_HAS_IOPORT_MAP */
345 345
346#ifndef xlate_dev_kmem_ptr 346#ifndef xlate_dev_kmem_ptr
347#define xlate_dev_kmem_ptr(p) p 347#define xlate_dev_kmem_ptr(p) p
diff --git a/include/asm-generic/iomap.h b/include/asm-generic/iomap.h
index 6afd7d6a9899..1b41011643a5 100644
--- a/include/asm-generic/iomap.h
+++ b/include/asm-generic/iomap.h
@@ -56,7 +56,7 @@ extern void iowrite8_rep(void __iomem *port, const void *buf, unsigned long coun
56extern void iowrite16_rep(void __iomem *port, const void *buf, unsigned long count); 56extern void iowrite16_rep(void __iomem *port, const void *buf, unsigned long count);
57extern void iowrite32_rep(void __iomem *port, const void *buf, unsigned long count); 57extern void iowrite32_rep(void __iomem *port, const void *buf, unsigned long count);
58 58
59#ifdef CONFIG_HAS_IOPORT 59#ifdef CONFIG_HAS_IOPORT_MAP
60/* Create a virtual mapping cookie for an IO port range */ 60/* Create a virtual mapping cookie for an IO port range */
61extern void __iomem *ioport_map(unsigned long port, unsigned int nr); 61extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
62extern void ioport_unmap(void __iomem *); 62extern void ioport_unmap(void __iomem *);
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index d17784ea37ff..0703aa75b5e8 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -56,17 +56,17 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
56#define per_cpu(var, cpu) \ 56#define per_cpu(var, cpu) \
57 (*SHIFT_PERCPU_PTR(&(var), per_cpu_offset(cpu))) 57 (*SHIFT_PERCPU_PTR(&(var), per_cpu_offset(cpu)))
58 58
59#ifndef __this_cpu_ptr 59#ifndef raw_cpu_ptr
60#define __this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, __my_cpu_offset) 60#define raw_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, __my_cpu_offset)
61#endif 61#endif
62#ifdef CONFIG_DEBUG_PREEMPT 62#ifdef CONFIG_DEBUG_PREEMPT
63#define this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, my_cpu_offset) 63#define this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, my_cpu_offset)
64#else 64#else
65#define this_cpu_ptr(ptr) __this_cpu_ptr(ptr) 65#define this_cpu_ptr(ptr) raw_cpu_ptr(ptr)
66#endif 66#endif
67 67
68#define __get_cpu_var(var) (*this_cpu_ptr(&(var))) 68#define __get_cpu_var(var) (*this_cpu_ptr(&(var)))
69#define __raw_get_cpu_var(var) (*__this_cpu_ptr(&(var))) 69#define __raw_get_cpu_var(var) (*raw_cpu_ptr(&(var)))
70 70
71#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA 71#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
72extern void setup_per_cpu_areas(void); 72extern void setup_per_cpu_areas(void);
@@ -83,7 +83,7 @@ extern void setup_per_cpu_areas(void);
83#define __get_cpu_var(var) (*VERIFY_PERCPU_PTR(&(var))) 83#define __get_cpu_var(var) (*VERIFY_PERCPU_PTR(&(var)))
84#define __raw_get_cpu_var(var) (*VERIFY_PERCPU_PTR(&(var))) 84#define __raw_get_cpu_var(var) (*VERIFY_PERCPU_PTR(&(var)))
85#define this_cpu_ptr(ptr) per_cpu_ptr(ptr, 0) 85#define this_cpu_ptr(ptr) per_cpu_ptr(ptr, 0)
86#define __this_cpu_ptr(ptr) this_cpu_ptr(ptr) 86#define raw_cpu_ptr(ptr) this_cpu_ptr(ptr)
87 87
88#endif /* SMP */ 88#endif /* SMP */
89 89
@@ -122,4 +122,7 @@ extern void setup_per_cpu_areas(void);
122#define PER_CPU_DEF_ATTRIBUTES 122#define PER_CPU_DEF_ATTRIBUTES
123#endif 123#endif
124 124
125/* Keep until we have removed all uses of __this_cpu_ptr */
126#define __this_cpu_ptr raw_cpu_ptr
127
125#endif /* _ASM_GENERIC_PERCPU_H_ */ 128#endif /* _ASM_GENERIC_PERCPU_H_ */
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index b4a745d7d9a9..61f29e5ea840 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -44,7 +44,6 @@ struct linux_binprm {
44 unsigned interp_flags; 44 unsigned interp_flags;
45 unsigned interp_data; 45 unsigned interp_data;
46 unsigned long loader, exec; 46 unsigned long loader, exec;
47 char tcomm[TASK_COMM_LEN];
48}; 47};
49 48
50#define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0 49#define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0
diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
index 7032518f8542..72ab536ad3de 100644
--- a/include/linux/crash_dump.h
+++ b/include/linux/crash_dump.h
@@ -25,6 +25,7 @@ extern int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
25 25
26extern ssize_t copy_oldmem_page(unsigned long, char *, size_t, 26extern ssize_t copy_oldmem_page(unsigned long, char *, size_t,
27 unsigned long, int); 27 unsigned long, int);
28void vmcore_cleanup(void);
28 29
29/* Architecture code defines this if there are other possible ELF 30/* Architecture code defines this if there are other possible ELF
30 * machine types, e.g. on bi-arch capable hardware. */ 31 * machine types, e.g. on bi-arch capable hardware. */
diff --git a/include/linux/idr.h b/include/linux/idr.h
index f669585c4fc5..6af3400b9b2f 100644
--- a/include/linux/idr.h
+++ b/include/linux/idr.h
@@ -133,69 +133,6 @@ static inline void *idr_find(struct idr *idr, int id)
133 for (id = 0; ((entry) = idr_get_next(idp, &(id))) != NULL; ++id) 133 for (id = 0; ((entry) = idr_get_next(idp, &(id))) != NULL; ++id)
134 134
135/* 135/*
136 * Don't use the following functions. These exist only to suppress
137 * deprecated warnings on EXPORT_SYMBOL()s.
138 */
139int __idr_pre_get(struct idr *idp, gfp_t gfp_mask);
140int __idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id);
141void __idr_remove_all(struct idr *idp);
142
143/**
144 * idr_pre_get - reserve resources for idr allocation
145 * @idp: idr handle
146 * @gfp_mask: memory allocation flags
147 *
148 * Part of old alloc interface. This is going away. Use
149 * idr_preload[_end]() and idr_alloc() instead.
150 */
151static inline int __deprecated idr_pre_get(struct idr *idp, gfp_t gfp_mask)
152{
153 return __idr_pre_get(idp, gfp_mask);
154}
155
156/**
157 * idr_get_new_above - allocate new idr entry above or equal to a start id
158 * @idp: idr handle
159 * @ptr: pointer you want associated with the id
160 * @starting_id: id to start search at
161 * @id: pointer to the allocated handle
162 *
163 * Part of old alloc interface. This is going away. Use
164 * idr_preload[_end]() and idr_alloc() instead.
165 */
166static inline int __deprecated idr_get_new_above(struct idr *idp, void *ptr,
167 int starting_id, int *id)
168{
169 return __idr_get_new_above(idp, ptr, starting_id, id);
170}
171
172/**
173 * idr_get_new - allocate new idr entry
174 * @idp: idr handle
175 * @ptr: pointer you want associated with the id
176 * @id: pointer to the allocated handle
177 *
178 * Part of old alloc interface. This is going away. Use
179 * idr_preload[_end]() and idr_alloc() instead.
180 */
181static inline int __deprecated idr_get_new(struct idr *idp, void *ptr, int *id)
182{
183 return __idr_get_new_above(idp, ptr, 0, id);
184}
185
186/**
187 * idr_remove_all - remove all ids from the given idr tree
188 * @idp: idr handle
189 *
190 * If you're trying to destroy @idp, calling idr_destroy() is enough.
191 * This is going away. Don't use.
192 */
193static inline void __deprecated idr_remove_all(struct idr *idp)
194{
195 __idr_remove_all(idp);
196}
197
198/*
199 * IDA - IDR based id allocator, use when translation from id to 136 * IDA - IDR based id allocator, use when translation from id to
200 * pointer isn't necessary. 137 * pointer isn't necessary.
201 * 138 *
diff --git a/include/linux/io.h b/include/linux/io.h
index 8a18e75600cc..b76e6e545806 100644
--- a/include/linux/io.h
+++ b/include/linux/io.h
@@ -41,7 +41,7 @@ static inline int ioremap_page_range(unsigned long addr, unsigned long end,
41/* 41/*
42 * Managed iomap interface 42 * Managed iomap interface
43 */ 43 */
44#ifdef CONFIG_HAS_IOPORT 44#ifdef CONFIG_HAS_IOPORT_MAP
45void __iomem * devm_ioport_map(struct device *dev, unsigned long port, 45void __iomem * devm_ioport_map(struct device *dev, unsigned long port,
46 unsigned int nr); 46 unsigned int nr);
47void devm_ioport_unmap(struct device *dev, void __iomem *addr); 47void devm_ioport_unmap(struct device *dev, void __iomem *addr);
diff --git a/include/linux/lglock.h b/include/linux/lglock.h
index 96549abe8842..0081f000e34b 100644
--- a/include/linux/lglock.h
+++ b/include/linux/lglock.h
@@ -25,6 +25,8 @@
25#include <linux/cpu.h> 25#include <linux/cpu.h>
26#include <linux/notifier.h> 26#include <linux/notifier.h>
27 27
28#ifdef CONFIG_SMP
29
28#ifdef CONFIG_DEBUG_LOCK_ALLOC 30#ifdef CONFIG_DEBUG_LOCK_ALLOC
29#define LOCKDEP_INIT_MAP lockdep_init_map 31#define LOCKDEP_INIT_MAP lockdep_init_map
30#else 32#else
@@ -57,4 +59,18 @@ void lg_local_unlock_cpu(struct lglock *lg, int cpu);
57void lg_global_lock(struct lglock *lg); 59void lg_global_lock(struct lglock *lg);
58void lg_global_unlock(struct lglock *lg); 60void lg_global_unlock(struct lglock *lg);
59 61
62#else
63/* When !CONFIG_SMP, map lglock to spinlock */
64#define lglock spinlock
65#define DEFINE_LGLOCK(name) DEFINE_SPINLOCK(name)
66#define DEFINE_STATIC_LGLOCK(name) static DEFINE_SPINLOCK(name)
67#define lg_lock_init(lg, name) spin_lock_init(lg)
68#define lg_local_lock spin_lock
69#define lg_local_unlock spin_unlock
70#define lg_local_lock_cpu(lg, cpu) spin_lock(lg)
71#define lg_local_unlock_cpu(lg, cpu) spin_unlock(lg)
72#define lg_global_lock spin_lock
73#define lg_global_unlock spin_unlock
74#endif
75
60#endif 76#endif
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index eccfb4a4b379..b569b8be5c5a 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -65,7 +65,7 @@ struct mem_cgroup_reclaim_cookie {
65 * (Of course, if memcg does memory allocation in future, GFP_KERNEL is sane.) 65 * (Of course, if memcg does memory allocation in future, GFP_KERNEL is sane.)
66 */ 66 */
67 67
68extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm, 68extern int mem_cgroup_charge_anon(struct page *page, struct mm_struct *mm,
69 gfp_t gfp_mask); 69 gfp_t gfp_mask);
70/* for swap handling */ 70/* for swap handling */
71extern int mem_cgroup_try_charge_swapin(struct mm_struct *mm, 71extern int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
@@ -74,7 +74,7 @@ extern void mem_cgroup_commit_charge_swapin(struct page *page,
74 struct mem_cgroup *memcg); 74 struct mem_cgroup *memcg);
75extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg); 75extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg);
76 76
77extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, 77extern int mem_cgroup_charge_file(struct page *page, struct mm_struct *mm,
78 gfp_t gfp_mask); 78 gfp_t gfp_mask);
79 79
80struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *); 80struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *);
@@ -94,7 +94,6 @@ bool task_in_mem_cgroup(struct task_struct *task,
94 94
95extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page); 95extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page);
96extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); 96extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
97extern struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm);
98 97
99extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg); 98extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg);
100extern struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css); 99extern struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css);
@@ -234,13 +233,13 @@ void mem_cgroup_print_bad_page(struct page *page);
234#else /* CONFIG_MEMCG */ 233#else /* CONFIG_MEMCG */
235struct mem_cgroup; 234struct mem_cgroup;
236 235
237static inline int mem_cgroup_newpage_charge(struct page *page, 236static inline int mem_cgroup_charge_anon(struct page *page,
238 struct mm_struct *mm, gfp_t gfp_mask) 237 struct mm_struct *mm, gfp_t gfp_mask)
239{ 238{
240 return 0; 239 return 0;
241} 240}
242 241
243static inline int mem_cgroup_cache_charge(struct page *page, 242static inline int mem_cgroup_charge_file(struct page *page,
244 struct mm_struct *mm, gfp_t gfp_mask) 243 struct mm_struct *mm, gfp_t gfp_mask)
245{ 244{
246 return 0; 245 return 0;
@@ -294,11 +293,6 @@ static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
294 return NULL; 293 return NULL;
295} 294}
296 295
297static inline struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
298{
299 return NULL;
300}
301
302static inline bool mm_match_cgroup(struct mm_struct *mm, 296static inline bool mm_match_cgroup(struct mm_struct *mm,
303 struct mem_cgroup *memcg) 297 struct mem_cgroup *memcg)
304{ 298{
@@ -497,6 +491,9 @@ void __memcg_kmem_commit_charge(struct page *page,
497void __memcg_kmem_uncharge_pages(struct page *page, int order); 491void __memcg_kmem_uncharge_pages(struct page *page, int order);
498 492
499int memcg_cache_id(struct mem_cgroup *memcg); 493int memcg_cache_id(struct mem_cgroup *memcg);
494
495char *memcg_create_cache_name(struct mem_cgroup *memcg,
496 struct kmem_cache *root_cache);
500int memcg_alloc_cache_params(struct mem_cgroup *memcg, struct kmem_cache *s, 497int memcg_alloc_cache_params(struct mem_cgroup *memcg, struct kmem_cache *s,
501 struct kmem_cache *root_cache); 498 struct kmem_cache *root_cache);
502void memcg_free_cache_params(struct kmem_cache *s); 499void memcg_free_cache_params(struct kmem_cache *s);
@@ -510,7 +507,7 @@ struct kmem_cache *
510__memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp); 507__memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp);
511 508
512void mem_cgroup_destroy_cache(struct kmem_cache *cachep); 509void mem_cgroup_destroy_cache(struct kmem_cache *cachep);
513void kmem_cache_destroy_memcg_children(struct kmem_cache *s); 510int __kmem_cache_destroy_memcg_children(struct kmem_cache *s);
514 511
515/** 512/**
516 * memcg_kmem_newpage_charge: verify if a new kmem allocation is allowed. 513 * memcg_kmem_newpage_charge: verify if a new kmem allocation is allowed.
@@ -664,10 +661,6 @@ memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
664{ 661{
665 return cachep; 662 return cachep;
666} 663}
667
668static inline void kmem_cache_destroy_memcg_children(struct kmem_cache *s)
669{
670}
671#endif /* CONFIG_MEMCG_KMEM */ 664#endif /* CONFIG_MEMCG_KMEM */
672#endif /* _LINUX_MEMCONTROL_H */ 665#endif /* _LINUX_MEMCONTROL_H */
673 666
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 5f1ea756aace..3c1b968da0ca 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -143,7 +143,6 @@ extern void numa_policy_init(void);
143extern void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new, 143extern void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new,
144 enum mpol_rebind_step step); 144 enum mpol_rebind_step step);
145extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new); 145extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new);
146extern void mpol_fix_fork_child_flag(struct task_struct *p);
147 146
148extern struct zonelist *huge_zonelist(struct vm_area_struct *vma, 147extern struct zonelist *huge_zonelist(struct vm_area_struct *vma,
149 unsigned long addr, gfp_t gfp_flags, 148 unsigned long addr, gfp_t gfp_flags,
@@ -151,7 +150,7 @@ extern struct zonelist *huge_zonelist(struct vm_area_struct *vma,
151extern bool init_nodemask_of_mempolicy(nodemask_t *mask); 150extern bool init_nodemask_of_mempolicy(nodemask_t *mask);
152extern bool mempolicy_nodemask_intersects(struct task_struct *tsk, 151extern bool mempolicy_nodemask_intersects(struct task_struct *tsk,
153 const nodemask_t *mask); 152 const nodemask_t *mask);
154extern unsigned slab_node(void); 153extern unsigned int mempolicy_slab_node(void);
155 154
156extern enum zone_type policy_zone; 155extern enum zone_type policy_zone;
157 156
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 35300f390eb6..abc848412e3c 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -177,6 +177,9 @@ extern unsigned int kobjsize(const void *objp);
177 */ 177 */
178#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP) 178#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
179 179
180/* This mask defines which mm->def_flags a process can inherit its parent */
181#define VM_INIT_DEF_MASK VM_NOHUGEPAGE
182
180/* 183/*
181 * mapping from the currently active vm_flags protection bits (the 184 * mapping from the currently active vm_flags protection bits (the
182 * low four bits) to a page protection mask.. 185 * low four bits) to a page protection mask..
@@ -210,6 +213,10 @@ struct vm_fault {
210 * is set (which is also implied by 213 * is set (which is also implied by
211 * VM_FAULT_ERROR). 214 * VM_FAULT_ERROR).
212 */ 215 */
216 /* for ->map_pages() only */
217 pgoff_t max_pgoff; /* map pages for offset from pgoff till
218 * max_pgoff inclusive */
219 pte_t *pte; /* pte entry associated with ->pgoff */
213}; 220};
214 221
215/* 222/*
@@ -221,6 +228,7 @@ struct vm_operations_struct {
221 void (*open)(struct vm_area_struct * area); 228 void (*open)(struct vm_area_struct * area);
222 void (*close)(struct vm_area_struct * area); 229 void (*close)(struct vm_area_struct * area);
223 int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf); 230 int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
231 void (*map_pages)(struct vm_area_struct *vma, struct vm_fault *vmf);
224 232
225 /* notification that a previously read-only page is about to become 233 /* notification that a previously read-only page is about to become
226 * writable, if an error is returned it will cause a SIGBUS */ 234 * writable, if an error is returned it will cause a SIGBUS */
@@ -581,6 +589,9 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
581 pte = pte_mkwrite(pte); 589 pte = pte_mkwrite(pte);
582 return pte; 590 return pte;
583} 591}
592
593void do_set_pte(struct vm_area_struct *vma, unsigned long address,
594 struct page *page, pte_t *pte, bool write, bool anon);
584#endif 595#endif
585 596
586/* 597/*
@@ -684,7 +695,7 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
684#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1) 695#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1)
685#define NODES_MASK ((1UL << NODES_WIDTH) - 1) 696#define NODES_MASK ((1UL << NODES_WIDTH) - 1)
686#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1) 697#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
687#define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_WIDTH) - 1) 698#define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_SHIFT) - 1)
688#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1) 699#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1)
689 700
690static inline enum zone_type page_zonenum(const struct page *page) 701static inline enum zone_type page_zonenum(const struct page *page)
@@ -1836,6 +1847,7 @@ extern void truncate_inode_pages_final(struct address_space *);
1836 1847
1837/* generic vm_area_ops exported for stackable file systems */ 1848/* generic vm_area_ops exported for stackable file systems */
1838extern int filemap_fault(struct vm_area_struct *, struct vm_fault *); 1849extern int filemap_fault(struct vm_area_struct *, struct vm_fault *);
1850extern void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf);
1839extern int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); 1851extern int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
1840 1852
1841/* mm/page-writeback.c */ 1853/* mm/page-writeback.c */
@@ -1863,9 +1875,6 @@ void page_cache_async_readahead(struct address_space *mapping,
1863 unsigned long size); 1875 unsigned long size);
1864 1876
1865unsigned long max_sane_readahead(unsigned long nr); 1877unsigned long max_sane_readahead(unsigned long nr);
1866unsigned long ra_submit(struct file_ra_state *ra,
1867 struct address_space *mapping,
1868 struct file *filp);
1869 1878
1870/* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */ 1879/* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
1871extern int expand_stack(struct vm_area_struct *vma, unsigned long address); 1880extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 290901a8c1de..2b58d192ea24 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -342,9 +342,9 @@ struct mm_rss_stat {
342 342
343struct kioctx_table; 343struct kioctx_table;
344struct mm_struct { 344struct mm_struct {
345 struct vm_area_struct * mmap; /* list of VMAs */ 345 struct vm_area_struct *mmap; /* list of VMAs */
346 struct rb_root mm_rb; 346 struct rb_root mm_rb;
347 struct vm_area_struct * mmap_cache; /* last find_vma result */ 347 u32 vmacache_seqnum; /* per-thread vmacache */
348#ifdef CONFIG_MMU 348#ifdef CONFIG_MMU
349 unsigned long (*get_unmapped_area) (struct file *filp, 349 unsigned long (*get_unmapped_area) (struct file *filp,
350 unsigned long addr, unsigned long len, 350 unsigned long addr, unsigned long len,
diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h
index 5042c036dda9..2d57efa64cc1 100644
--- a/include/linux/mmdebug.h
+++ b/include/linux/mmdebug.h
@@ -3,8 +3,8 @@
3 3
4struct page; 4struct page;
5 5
6extern void dump_page(struct page *page, char *reason); 6extern void dump_page(struct page *page, const char *reason);
7extern void dump_page_badflags(struct page *page, char *reason, 7extern void dump_page_badflags(struct page *page, const char *reason,
8 unsigned long badflags); 8 unsigned long badflags);
9 9
10#ifdef CONFIG_DEBUG_VM 10#ifdef CONFIG_DEBUG_VM
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index e3817d2441b6..e7a0b95ed527 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -173,6 +173,12 @@ extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
173 173
174extern void __bad_size_call_parameter(void); 174extern void __bad_size_call_parameter(void);
175 175
176#ifdef CONFIG_DEBUG_PREEMPT
177extern void __this_cpu_preempt_check(const char *op);
178#else
179static inline void __this_cpu_preempt_check(const char *op) { }
180#endif
181
176#define __pcpu_size_call_return(stem, variable) \ 182#define __pcpu_size_call_return(stem, variable) \
177({ typeof(variable) pscr_ret__; \ 183({ typeof(variable) pscr_ret__; \
178 __verify_pcpu_ptr(&(variable)); \ 184 __verify_pcpu_ptr(&(variable)); \
@@ -243,6 +249,8 @@ do { \
243} while (0) 249} while (0)
244 250
245/* 251/*
252 * this_cpu operations (C) 2008-2013 Christoph Lameter <cl@linux.com>
253 *
246 * Optimized manipulation for memory allocated through the per cpu 254 * Optimized manipulation for memory allocated through the per cpu
247 * allocator or for addresses of per cpu variables. 255 * allocator or for addresses of per cpu variables.
248 * 256 *
@@ -296,7 +304,7 @@ do { \
296do { \ 304do { \
297 unsigned long flags; \ 305 unsigned long flags; \
298 raw_local_irq_save(flags); \ 306 raw_local_irq_save(flags); \
299 *__this_cpu_ptr(&(pcp)) op val; \ 307 *raw_cpu_ptr(&(pcp)) op val; \
300 raw_local_irq_restore(flags); \ 308 raw_local_irq_restore(flags); \
301} while (0) 309} while (0)
302 310
@@ -381,8 +389,8 @@ do { \
381 typeof(pcp) ret__; \ 389 typeof(pcp) ret__; \
382 unsigned long flags; \ 390 unsigned long flags; \
383 raw_local_irq_save(flags); \ 391 raw_local_irq_save(flags); \
384 __this_cpu_add(pcp, val); \ 392 raw_cpu_add(pcp, val); \
385 ret__ = __this_cpu_read(pcp); \ 393 ret__ = raw_cpu_read(pcp); \
386 raw_local_irq_restore(flags); \ 394 raw_local_irq_restore(flags); \
387 ret__; \ 395 ret__; \
388}) 396})
@@ -411,8 +419,8 @@ do { \
411({ typeof(pcp) ret__; \ 419({ typeof(pcp) ret__; \
412 unsigned long flags; \ 420 unsigned long flags; \
413 raw_local_irq_save(flags); \ 421 raw_local_irq_save(flags); \
414 ret__ = __this_cpu_read(pcp); \ 422 ret__ = raw_cpu_read(pcp); \
415 __this_cpu_write(pcp, nval); \ 423 raw_cpu_write(pcp, nval); \
416 raw_local_irq_restore(flags); \ 424 raw_local_irq_restore(flags); \
417 ret__; \ 425 ret__; \
418}) 426})
@@ -439,9 +447,9 @@ do { \
439 typeof(pcp) ret__; \ 447 typeof(pcp) ret__; \
440 unsigned long flags; \ 448 unsigned long flags; \
441 raw_local_irq_save(flags); \ 449 raw_local_irq_save(flags); \
442 ret__ = __this_cpu_read(pcp); \ 450 ret__ = raw_cpu_read(pcp); \
443 if (ret__ == (oval)) \ 451 if (ret__ == (oval)) \
444 __this_cpu_write(pcp, nval); \ 452 raw_cpu_write(pcp, nval); \
445 raw_local_irq_restore(flags); \ 453 raw_local_irq_restore(flags); \
446 ret__; \ 454 ret__; \
447}) 455})
@@ -476,7 +484,7 @@ do { \
476 int ret__; \ 484 int ret__; \
477 unsigned long flags; \ 485 unsigned long flags; \
478 raw_local_irq_save(flags); \ 486 raw_local_irq_save(flags); \
479 ret__ = __this_cpu_generic_cmpxchg_double(pcp1, pcp2, \ 487 ret__ = raw_cpu_generic_cmpxchg_double(pcp1, pcp2, \
480 oval1, oval2, nval1, nval2); \ 488 oval1, oval2, nval1, nval2); \
481 raw_local_irq_restore(flags); \ 489 raw_local_irq_restore(flags); \
482 ret__; \ 490 ret__; \
@@ -504,12 +512,8 @@ do { \
504#endif 512#endif
505 513
506/* 514/*
507 * Generic percpu operations for context that are safe from preemption/interrupts. 515 * Generic percpu operations for contexts where we do not want to do
508 * Either we do not care about races or the caller has the 516 * any checks for preemptiosn.
509 * responsibility of handling preemption/interrupt issues. Arch code can still
510 * override these instructions since the arch per cpu code may be more
511 * efficient and may actually get race freeness for free (that is the
512 * case for x86 for example).
513 * 517 *
514 * If there is no other protection through preempt disable and/or 518 * If there is no other protection through preempt disable and/or
515 * disabling interupts then one of these RMW operations can show unexpected 519 * disabling interupts then one of these RMW operations can show unexpected
@@ -517,211 +521,285 @@ do { \
517 * or an interrupt occurred and the same percpu variable was modified from 521 * or an interrupt occurred and the same percpu variable was modified from
518 * the interrupt context. 522 * the interrupt context.
519 */ 523 */
520#ifndef __this_cpu_read 524#ifndef raw_cpu_read
521# ifndef __this_cpu_read_1 525# ifndef raw_cpu_read_1
522# define __this_cpu_read_1(pcp) (*__this_cpu_ptr(&(pcp))) 526# define raw_cpu_read_1(pcp) (*raw_cpu_ptr(&(pcp)))
523# endif 527# endif
524# ifndef __this_cpu_read_2 528# ifndef raw_cpu_read_2
525# define __this_cpu_read_2(pcp) (*__this_cpu_ptr(&(pcp))) 529# define raw_cpu_read_2(pcp) (*raw_cpu_ptr(&(pcp)))
526# endif 530# endif
527# ifndef __this_cpu_read_4 531# ifndef raw_cpu_read_4
528# define __this_cpu_read_4(pcp) (*__this_cpu_ptr(&(pcp))) 532# define raw_cpu_read_4(pcp) (*raw_cpu_ptr(&(pcp)))
529# endif 533# endif
530# ifndef __this_cpu_read_8 534# ifndef raw_cpu_read_8
531# define __this_cpu_read_8(pcp) (*__this_cpu_ptr(&(pcp))) 535# define raw_cpu_read_8(pcp) (*raw_cpu_ptr(&(pcp)))
532# endif 536# endif
533# define __this_cpu_read(pcp) __pcpu_size_call_return(__this_cpu_read_, (pcp)) 537# define raw_cpu_read(pcp) __pcpu_size_call_return(raw_cpu_read_, (pcp))
534#endif 538#endif
535 539
536#define __this_cpu_generic_to_op(pcp, val, op) \ 540#define raw_cpu_generic_to_op(pcp, val, op) \
537do { \ 541do { \
538 *__this_cpu_ptr(&(pcp)) op val; \ 542 *raw_cpu_ptr(&(pcp)) op val; \
539} while (0) 543} while (0)
540 544
541#ifndef __this_cpu_write 545
542# ifndef __this_cpu_write_1 546#ifndef raw_cpu_write
543# define __this_cpu_write_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), =) 547# ifndef raw_cpu_write_1
548# define raw_cpu_write_1(pcp, val) raw_cpu_generic_to_op((pcp), (val), =)
544# endif 549# endif
545# ifndef __this_cpu_write_2 550# ifndef raw_cpu_write_2
546# define __this_cpu_write_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), =) 551# define raw_cpu_write_2(pcp, val) raw_cpu_generic_to_op((pcp), (val), =)
547# endif 552# endif
548# ifndef __this_cpu_write_4 553# ifndef raw_cpu_write_4
549# define __this_cpu_write_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), =) 554# define raw_cpu_write_4(pcp, val) raw_cpu_generic_to_op((pcp), (val), =)
550# endif 555# endif
551# ifndef __this_cpu_write_8 556# ifndef raw_cpu_write_8
552# define __this_cpu_write_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), =) 557# define raw_cpu_write_8(pcp, val) raw_cpu_generic_to_op((pcp), (val), =)
553# endif 558# endif
554# define __this_cpu_write(pcp, val) __pcpu_size_call(__this_cpu_write_, (pcp), (val)) 559# define raw_cpu_write(pcp, val) __pcpu_size_call(raw_cpu_write_, (pcp), (val))
555#endif 560#endif
556 561
557#ifndef __this_cpu_add 562#ifndef raw_cpu_add
558# ifndef __this_cpu_add_1 563# ifndef raw_cpu_add_1
559# define __this_cpu_add_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=) 564# define raw_cpu_add_1(pcp, val) raw_cpu_generic_to_op((pcp), (val), +=)
560# endif 565# endif
561# ifndef __this_cpu_add_2 566# ifndef raw_cpu_add_2
562# define __this_cpu_add_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=) 567# define raw_cpu_add_2(pcp, val) raw_cpu_generic_to_op((pcp), (val), +=)
563# endif 568# endif
564# ifndef __this_cpu_add_4 569# ifndef raw_cpu_add_4
565# define __this_cpu_add_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=) 570# define raw_cpu_add_4(pcp, val) raw_cpu_generic_to_op((pcp), (val), +=)
566# endif 571# endif
567# ifndef __this_cpu_add_8 572# ifndef raw_cpu_add_8
568# define __this_cpu_add_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=) 573# define raw_cpu_add_8(pcp, val) raw_cpu_generic_to_op((pcp), (val), +=)
569# endif 574# endif
570# define __this_cpu_add(pcp, val) __pcpu_size_call(__this_cpu_add_, (pcp), (val)) 575# define raw_cpu_add(pcp, val) __pcpu_size_call(raw_cpu_add_, (pcp), (val))
571#endif 576#endif
572 577
573#ifndef __this_cpu_sub 578#ifndef raw_cpu_sub
574# define __this_cpu_sub(pcp, val) __this_cpu_add((pcp), -(typeof(pcp))(val)) 579# define raw_cpu_sub(pcp, val) raw_cpu_add((pcp), -(val))
575#endif 580#endif
576 581
577#ifndef __this_cpu_inc 582#ifndef raw_cpu_inc
578# define __this_cpu_inc(pcp) __this_cpu_add((pcp), 1) 583# define raw_cpu_inc(pcp) raw_cpu_add((pcp), 1)
579#endif 584#endif
580 585
581#ifndef __this_cpu_dec 586#ifndef raw_cpu_dec
582# define __this_cpu_dec(pcp) __this_cpu_sub((pcp), 1) 587# define raw_cpu_dec(pcp) raw_cpu_sub((pcp), 1)
583#endif 588#endif
584 589
585#ifndef __this_cpu_and 590#ifndef raw_cpu_and
586# ifndef __this_cpu_and_1 591# ifndef raw_cpu_and_1
587# define __this_cpu_and_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=) 592# define raw_cpu_and_1(pcp, val) raw_cpu_generic_to_op((pcp), (val), &=)
588# endif 593# endif
589# ifndef __this_cpu_and_2 594# ifndef raw_cpu_and_2
590# define __this_cpu_and_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=) 595# define raw_cpu_and_2(pcp, val) raw_cpu_generic_to_op((pcp), (val), &=)
591# endif 596# endif
592# ifndef __this_cpu_and_4 597# ifndef raw_cpu_and_4
593# define __this_cpu_and_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=) 598# define raw_cpu_and_4(pcp, val) raw_cpu_generic_to_op((pcp), (val), &=)
594# endif 599# endif
595# ifndef __this_cpu_and_8 600# ifndef raw_cpu_and_8
596# define __this_cpu_and_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=) 601# define raw_cpu_and_8(pcp, val) raw_cpu_generic_to_op((pcp), (val), &=)
597# endif 602# endif
598# define __this_cpu_and(pcp, val) __pcpu_size_call(__this_cpu_and_, (pcp), (val)) 603# define raw_cpu_and(pcp, val) __pcpu_size_call(raw_cpu_and_, (pcp), (val))
599#endif 604#endif
600 605
601#ifndef __this_cpu_or 606#ifndef raw_cpu_or
602# ifndef __this_cpu_or_1 607# ifndef raw_cpu_or_1
603# define __this_cpu_or_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=) 608# define raw_cpu_or_1(pcp, val) raw_cpu_generic_to_op((pcp), (val), |=)
604# endif 609# endif
605# ifndef __this_cpu_or_2 610# ifndef raw_cpu_or_2
606# define __this_cpu_or_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=) 611# define raw_cpu_or_2(pcp, val) raw_cpu_generic_to_op((pcp), (val), |=)
607# endif 612# endif
608# ifndef __this_cpu_or_4 613# ifndef raw_cpu_or_4
609# define __this_cpu_or_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=) 614# define raw_cpu_or_4(pcp, val) raw_cpu_generic_to_op((pcp), (val), |=)
610# endif 615# endif
611# ifndef __this_cpu_or_8 616# ifndef raw_cpu_or_8
612# define __this_cpu_or_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=) 617# define raw_cpu_or_8(pcp, val) raw_cpu_generic_to_op((pcp), (val), |=)
613# endif 618# endif
614# define __this_cpu_or(pcp, val) __pcpu_size_call(__this_cpu_or_, (pcp), (val)) 619# define raw_cpu_or(pcp, val) __pcpu_size_call(raw_cpu_or_, (pcp), (val))
615#endif 620#endif
616 621
617#define __this_cpu_generic_add_return(pcp, val) \ 622#define raw_cpu_generic_add_return(pcp, val) \
618({ \ 623({ \
619 __this_cpu_add(pcp, val); \ 624 raw_cpu_add(pcp, val); \
620 __this_cpu_read(pcp); \ 625 raw_cpu_read(pcp); \
621}) 626})
622 627
623#ifndef __this_cpu_add_return 628#ifndef raw_cpu_add_return
624# ifndef __this_cpu_add_return_1 629# ifndef raw_cpu_add_return_1
625# define __this_cpu_add_return_1(pcp, val) __this_cpu_generic_add_return(pcp, val) 630# define raw_cpu_add_return_1(pcp, val) raw_cpu_generic_add_return(pcp, val)
626# endif 631# endif
627# ifndef __this_cpu_add_return_2 632# ifndef raw_cpu_add_return_2
628# define __this_cpu_add_return_2(pcp, val) __this_cpu_generic_add_return(pcp, val) 633# define raw_cpu_add_return_2(pcp, val) raw_cpu_generic_add_return(pcp, val)
629# endif 634# endif
630# ifndef __this_cpu_add_return_4 635# ifndef raw_cpu_add_return_4
631# define __this_cpu_add_return_4(pcp, val) __this_cpu_generic_add_return(pcp, val) 636# define raw_cpu_add_return_4(pcp, val) raw_cpu_generic_add_return(pcp, val)
632# endif 637# endif
633# ifndef __this_cpu_add_return_8 638# ifndef raw_cpu_add_return_8
634# define __this_cpu_add_return_8(pcp, val) __this_cpu_generic_add_return(pcp, val) 639# define raw_cpu_add_return_8(pcp, val) raw_cpu_generic_add_return(pcp, val)
635# endif 640# endif
636# define __this_cpu_add_return(pcp, val) \ 641# define raw_cpu_add_return(pcp, val) \
637 __pcpu_size_call_return2(__this_cpu_add_return_, pcp, val) 642 __pcpu_size_call_return2(raw_add_return_, pcp, val)
638#endif 643#endif
639 644
640#define __this_cpu_sub_return(pcp, val) __this_cpu_add_return(pcp, -(typeof(pcp))(val)) 645#define raw_cpu_sub_return(pcp, val) raw_cpu_add_return(pcp, -(typeof(pcp))(val))
641#define __this_cpu_inc_return(pcp) __this_cpu_add_return(pcp, 1) 646#define raw_cpu_inc_return(pcp) raw_cpu_add_return(pcp, 1)
642#define __this_cpu_dec_return(pcp) __this_cpu_add_return(pcp, -1) 647#define raw_cpu_dec_return(pcp) raw_cpu_add_return(pcp, -1)
643 648
644#define __this_cpu_generic_xchg(pcp, nval) \ 649#define raw_cpu_generic_xchg(pcp, nval) \
645({ typeof(pcp) ret__; \ 650({ typeof(pcp) ret__; \
646 ret__ = __this_cpu_read(pcp); \ 651 ret__ = raw_cpu_read(pcp); \
647 __this_cpu_write(pcp, nval); \ 652 raw_cpu_write(pcp, nval); \
648 ret__; \ 653 ret__; \
649}) 654})
650 655
651#ifndef __this_cpu_xchg 656#ifndef raw_cpu_xchg
652# ifndef __this_cpu_xchg_1 657# ifndef raw_cpu_xchg_1
653# define __this_cpu_xchg_1(pcp, nval) __this_cpu_generic_xchg(pcp, nval) 658# define raw_cpu_xchg_1(pcp, nval) raw_cpu_generic_xchg(pcp, nval)
654# endif 659# endif
655# ifndef __this_cpu_xchg_2 660# ifndef raw_cpu_xchg_2
656# define __this_cpu_xchg_2(pcp, nval) __this_cpu_generic_xchg(pcp, nval) 661# define raw_cpu_xchg_2(pcp, nval) raw_cpu_generic_xchg(pcp, nval)
657# endif 662# endif
658# ifndef __this_cpu_xchg_4 663# ifndef raw_cpu_xchg_4
659# define __this_cpu_xchg_4(pcp, nval) __this_cpu_generic_xchg(pcp, nval) 664# define raw_cpu_xchg_4(pcp, nval) raw_cpu_generic_xchg(pcp, nval)
660# endif 665# endif
661# ifndef __this_cpu_xchg_8 666# ifndef raw_cpu_xchg_8
662# define __this_cpu_xchg_8(pcp, nval) __this_cpu_generic_xchg(pcp, nval) 667# define raw_cpu_xchg_8(pcp, nval) raw_cpu_generic_xchg(pcp, nval)
663# endif 668# endif
664# define __this_cpu_xchg(pcp, nval) \ 669# define raw_cpu_xchg(pcp, nval) \
665 __pcpu_size_call_return2(__this_cpu_xchg_, (pcp), nval) 670 __pcpu_size_call_return2(raw_cpu_xchg_, (pcp), nval)
666#endif 671#endif
667 672
668#define __this_cpu_generic_cmpxchg(pcp, oval, nval) \ 673#define raw_cpu_generic_cmpxchg(pcp, oval, nval) \
669({ \ 674({ \
670 typeof(pcp) ret__; \ 675 typeof(pcp) ret__; \
671 ret__ = __this_cpu_read(pcp); \ 676 ret__ = raw_cpu_read(pcp); \
672 if (ret__ == (oval)) \ 677 if (ret__ == (oval)) \
673 __this_cpu_write(pcp, nval); \ 678 raw_cpu_write(pcp, nval); \
674 ret__; \ 679 ret__; \
675}) 680})
676 681
677#ifndef __this_cpu_cmpxchg 682#ifndef raw_cpu_cmpxchg
678# ifndef __this_cpu_cmpxchg_1 683# ifndef raw_cpu_cmpxchg_1
679# define __this_cpu_cmpxchg_1(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval) 684# define raw_cpu_cmpxchg_1(pcp, oval, nval) raw_cpu_generic_cmpxchg(pcp, oval, nval)
680# endif 685# endif
681# ifndef __this_cpu_cmpxchg_2 686# ifndef raw_cpu_cmpxchg_2
682# define __this_cpu_cmpxchg_2(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval) 687# define raw_cpu_cmpxchg_2(pcp, oval, nval) raw_cpu_generic_cmpxchg(pcp, oval, nval)
683# endif 688# endif
684# ifndef __this_cpu_cmpxchg_4 689# ifndef raw_cpu_cmpxchg_4
685# define __this_cpu_cmpxchg_4(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval) 690# define raw_cpu_cmpxchg_4(pcp, oval, nval) raw_cpu_generic_cmpxchg(pcp, oval, nval)
686# endif 691# endif
687# ifndef __this_cpu_cmpxchg_8 692# ifndef raw_cpu_cmpxchg_8
688# define __this_cpu_cmpxchg_8(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval) 693# define raw_cpu_cmpxchg_8(pcp, oval, nval) raw_cpu_generic_cmpxchg(pcp, oval, nval)
689# endif 694# endif
690# define __this_cpu_cmpxchg(pcp, oval, nval) \ 695# define raw_cpu_cmpxchg(pcp, oval, nval) \
691 __pcpu_size_call_return2(__this_cpu_cmpxchg_, pcp, oval, nval) 696 __pcpu_size_call_return2(raw_cpu_cmpxchg_, pcp, oval, nval)
692#endif 697#endif
693 698
694#define __this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ 699#define raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
695({ \ 700({ \
696 int __ret = 0; \ 701 int __ret = 0; \
697 if (__this_cpu_read(pcp1) == (oval1) && \ 702 if (raw_cpu_read(pcp1) == (oval1) && \
698 __this_cpu_read(pcp2) == (oval2)) { \ 703 raw_cpu_read(pcp2) == (oval2)) { \
699 __this_cpu_write(pcp1, (nval1)); \ 704 raw_cpu_write(pcp1, (nval1)); \
700 __this_cpu_write(pcp2, (nval2)); \ 705 raw_cpu_write(pcp2, (nval2)); \
701 __ret = 1; \ 706 __ret = 1; \
702 } \ 707 } \
703 (__ret); \ 708 (__ret); \
704}) 709})
705 710
706#ifndef __this_cpu_cmpxchg_double 711#ifndef raw_cpu_cmpxchg_double
707# ifndef __this_cpu_cmpxchg_double_1 712# ifndef raw_cpu_cmpxchg_double_1
708# define __this_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \ 713# define raw_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \
709 __this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) 714 raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
710# endif 715# endif
711# ifndef __this_cpu_cmpxchg_double_2 716# ifndef raw_cpu_cmpxchg_double_2
712# define __this_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \ 717# define raw_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \
713 __this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) 718 raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
714# endif 719# endif
715# ifndef __this_cpu_cmpxchg_double_4 720# ifndef raw_cpu_cmpxchg_double_4
716# define __this_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \ 721# define raw_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \
717 __this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) 722 raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
718# endif 723# endif
719# ifndef __this_cpu_cmpxchg_double_8 724# ifndef raw_cpu_cmpxchg_double_8
720# define __this_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \ 725# define raw_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \
721 __this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) 726 raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
722# endif 727# endif
728# define raw_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
729 __pcpu_double_call_return_bool(raw_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
730#endif
731
732/*
733 * Generic percpu operations for context that are safe from preemption/interrupts.
734 */
735#ifndef __this_cpu_read
736# define __this_cpu_read(pcp) \
737 (__this_cpu_preempt_check("read"),__pcpu_size_call_return(raw_cpu_read_, (pcp)))
738#endif
739
740#ifndef __this_cpu_write
741# define __this_cpu_write(pcp, val) \
742do { __this_cpu_preempt_check("write"); \
743 __pcpu_size_call(raw_cpu_write_, (pcp), (val)); \
744} while (0)
745#endif
746
747#ifndef __this_cpu_add
748# define __this_cpu_add(pcp, val) \
749do { __this_cpu_preempt_check("add"); \
750 __pcpu_size_call(raw_cpu_add_, (pcp), (val)); \
751} while (0)
752#endif
753
754#ifndef __this_cpu_sub
755# define __this_cpu_sub(pcp, val) __this_cpu_add((pcp), -(typeof(pcp))(val))
756#endif
757
758#ifndef __this_cpu_inc
759# define __this_cpu_inc(pcp) __this_cpu_add((pcp), 1)
760#endif
761
762#ifndef __this_cpu_dec
763# define __this_cpu_dec(pcp) __this_cpu_sub((pcp), 1)
764#endif
765
766#ifndef __this_cpu_and
767# define __this_cpu_and(pcp, val) \
768do { __this_cpu_preempt_check("and"); \
769 __pcpu_size_call(raw_cpu_and_, (pcp), (val)); \
770} while (0)
771
772#endif
773
774#ifndef __this_cpu_or
775# define __this_cpu_or(pcp, val) \
776do { __this_cpu_preempt_check("or"); \
777 __pcpu_size_call(raw_cpu_or_, (pcp), (val)); \
778} while (0)
779#endif
780
781#ifndef __this_cpu_add_return
782# define __this_cpu_add_return(pcp, val) \
783 (__this_cpu_preempt_check("add_return"),__pcpu_size_call_return2(raw_cpu_add_return_, pcp, val))
784#endif
785
786#define __this_cpu_sub_return(pcp, val) __this_cpu_add_return(pcp, -(typeof(pcp))(val))
787#define __this_cpu_inc_return(pcp) __this_cpu_add_return(pcp, 1)
788#define __this_cpu_dec_return(pcp) __this_cpu_add_return(pcp, -1)
789
790#ifndef __this_cpu_xchg
791# define __this_cpu_xchg(pcp, nval) \
792 (__this_cpu_preempt_check("xchg"),__pcpu_size_call_return2(raw_cpu_xchg_, (pcp), nval))
793#endif
794
795#ifndef __this_cpu_cmpxchg
796# define __this_cpu_cmpxchg(pcp, oval, nval) \
797 (__this_cpu_preempt_check("cmpxchg"),__pcpu_size_call_return2(raw_cpu_cmpxchg_, pcp, oval, nval))
798#endif
799
800#ifndef __this_cpu_cmpxchg_double
723# define __this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ 801# define __this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
724 __pcpu_double_call_return_bool(__this_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2)) 802 (__this_cpu_preempt_check("cmpxchg_double"),__pcpu_double_call_return_bool(raw_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2)))
725#endif 803#endif
726 804
727#endif /* __LINUX_PERCPU_H */ 805#endif /* __LINUX_PERCPU_H */
diff --git a/include/linux/res_counter.h b/include/linux/res_counter.h
index 201a69749659..56b7bc32db4f 100644
--- a/include/linux/res_counter.h
+++ b/include/linux/res_counter.h
@@ -104,15 +104,13 @@ void res_counter_init(struct res_counter *counter, struct res_counter *parent);
104 * units, e.g. numbers, bytes, Kbytes, etc 104 * units, e.g. numbers, bytes, Kbytes, etc
105 * 105 *
106 * returns 0 on success and <0 if the counter->usage will exceed the 106 * returns 0 on success and <0 if the counter->usage will exceed the
107 * counter->limit _locked call expects the counter->lock to be taken 107 * counter->limit
108 * 108 *
109 * charge_nofail works the same, except that it charges the resource 109 * charge_nofail works the same, except that it charges the resource
110 * counter unconditionally, and returns < 0 if the after the current 110 * counter unconditionally, and returns < 0 if the after the current
111 * charge we are over limit. 111 * charge we are over limit.
112 */ 112 */
113 113
114int __must_check res_counter_charge_locked(struct res_counter *counter,
115 unsigned long val, bool force);
116int __must_check res_counter_charge(struct res_counter *counter, 114int __must_check res_counter_charge(struct res_counter *counter,
117 unsigned long val, struct res_counter **limit_fail_at); 115 unsigned long val, struct res_counter **limit_fail_at);
118int res_counter_charge_nofail(struct res_counter *counter, 116int res_counter_charge_nofail(struct res_counter *counter,
@@ -125,12 +123,10 @@ int res_counter_charge_nofail(struct res_counter *counter,
125 * @val: the amount of the resource 123 * @val: the amount of the resource
126 * 124 *
127 * these calls check for usage underflow and show a warning on the console 125 * these calls check for usage underflow and show a warning on the console
128 * _locked call expects the counter->lock to be taken
129 * 126 *
130 * returns the total charges still present in @counter. 127 * returns the total charges still present in @counter.
131 */ 128 */
132 129
133u64 res_counter_uncharge_locked(struct res_counter *counter, unsigned long val);
134u64 res_counter_uncharge(struct res_counter *counter, unsigned long val); 130u64 res_counter_uncharge(struct res_counter *counter, unsigned long val);
135 131
136u64 res_counter_uncharge_until(struct res_counter *counter, 132u64 res_counter_uncharge_until(struct res_counter *counter,
diff --git a/include/linux/rio.h b/include/linux/rio.h
index b71d5738e683..6bda06f21930 100644
--- a/include/linux/rio.h
+++ b/include/linux/rio.h
@@ -83,7 +83,7 @@
83#define RIO_CTAG_UDEVID 0x0001ffff /* Unique device identifier */ 83#define RIO_CTAG_UDEVID 0x0001ffff /* Unique device identifier */
84 84
85extern struct bus_type rio_bus_type; 85extern struct bus_type rio_bus_type;
86extern struct device rio_bus; 86extern struct class rio_mport_class;
87 87
88struct rio_mport; 88struct rio_mport;
89struct rio_dev; 89struct rio_dev;
@@ -201,6 +201,7 @@ struct rio_dev {
201#define rio_dev_f(n) list_entry(n, struct rio_dev, net_list) 201#define rio_dev_f(n) list_entry(n, struct rio_dev, net_list)
202#define to_rio_dev(n) container_of(n, struct rio_dev, dev) 202#define to_rio_dev(n) container_of(n, struct rio_dev, dev)
203#define sw_to_rio_dev(n) container_of(n, struct rio_dev, rswitch[0]) 203#define sw_to_rio_dev(n) container_of(n, struct rio_dev, rswitch[0])
204#define to_rio_mport(n) container_of(n, struct rio_mport, dev)
204 205
205/** 206/**
206 * struct rio_msg - RIO message event 207 * struct rio_msg - RIO message event
@@ -248,6 +249,7 @@ enum rio_phy_type {
248 * @phy_type: RapidIO phy type 249 * @phy_type: RapidIO phy type
249 * @phys_efptr: RIO port extended features pointer 250 * @phys_efptr: RIO port extended features pointer
250 * @name: Port name string 251 * @name: Port name string
252 * @dev: device structure associated with an mport
251 * @priv: Master port private data 253 * @priv: Master port private data
252 * @dma: DMA device associated with mport 254 * @dma: DMA device associated with mport
253 * @nscan: RapidIO network enumeration/discovery operations 255 * @nscan: RapidIO network enumeration/discovery operations
@@ -272,6 +274,7 @@ struct rio_mport {
272 enum rio_phy_type phy_type; /* RapidIO phy type */ 274 enum rio_phy_type phy_type; /* RapidIO phy type */
273 u32 phys_efptr; 275 u32 phys_efptr;
274 unsigned char name[RIO_MAX_MPORT_NAME]; 276 unsigned char name[RIO_MAX_MPORT_NAME];
277 struct device dev;
275 void *priv; /* Master port private data */ 278 void *priv; /* Master port private data */
276#ifdef CONFIG_RAPIDIO_DMA_ENGINE 279#ifdef CONFIG_RAPIDIO_DMA_ENGINE
277 struct dma_device dma; 280 struct dma_device dma;
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 7cb07fd26680..075b3056c0c0 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -132,6 +132,10 @@ struct perf_event_context;
132struct blk_plug; 132struct blk_plug;
133struct filename; 133struct filename;
134 134
135#define VMACACHE_BITS 2
136#define VMACACHE_SIZE (1U << VMACACHE_BITS)
137#define VMACACHE_MASK (VMACACHE_SIZE - 1)
138
135/* 139/*
136 * List of flags we want to share for kernel threads, 140 * List of flags we want to share for kernel threads,
137 * if only because they are not used by them anyway. 141 * if only because they are not used by them anyway.
@@ -206,8 +210,9 @@ print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
206#define __TASK_STOPPED 4 210#define __TASK_STOPPED 4
207#define __TASK_TRACED 8 211#define __TASK_TRACED 8
208/* in tsk->exit_state */ 212/* in tsk->exit_state */
209#define EXIT_ZOMBIE 16 213#define EXIT_DEAD 16
210#define EXIT_DEAD 32 214#define EXIT_ZOMBIE 32
215#define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD)
211/* in tsk->state again */ 216/* in tsk->state again */
212#define TASK_DEAD 64 217#define TASK_DEAD 64
213#define TASK_WAKEKILL 128 218#define TASK_WAKEKILL 128
@@ -1235,6 +1240,9 @@ struct task_struct {
1235#ifdef CONFIG_COMPAT_BRK 1240#ifdef CONFIG_COMPAT_BRK
1236 unsigned brk_randomized:1; 1241 unsigned brk_randomized:1;
1237#endif 1242#endif
1243 /* per-thread vma caching */
1244 u32 vmacache_seqnum;
1245 struct vm_area_struct *vmacache[VMACACHE_SIZE];
1238#if defined(SPLIT_RSS_COUNTING) 1246#if defined(SPLIT_RSS_COUNTING)
1239 struct task_rss_stat rss_stat; 1247 struct task_rss_stat rss_stat;
1240#endif 1248#endif
@@ -1844,7 +1852,6 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut,
1844#define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */ 1852#define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */
1845#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */ 1853#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */
1846#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ 1854#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
1847#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */
1848#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ 1855#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
1849#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */ 1856#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */
1850#define PF_SUSPEND_TASK 0x80000000 /* this thread called freeze_processes and should not be frozen */ 1857#define PF_SUSPEND_TASK 0x80000000 /* this thread called freeze_processes and should not be frozen */
@@ -2351,7 +2358,7 @@ extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, i
2351struct task_struct *fork_idle(int); 2358struct task_struct *fork_idle(int);
2352extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); 2359extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
2353 2360
2354extern void set_task_comm(struct task_struct *tsk, char *from); 2361extern void set_task_comm(struct task_struct *tsk, const char *from);
2355extern char *get_task_comm(char *to, struct task_struct *tsk); 2362extern char *get_task_comm(char *to, struct task_struct *tsk);
2356 2363
2357#ifdef CONFIG_SMP 2364#ifdef CONFIG_SMP
diff --git a/include/linux/slab.h b/include/linux/slab.h
index b5b2df60299e..3dd389aa91c7 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -115,9 +115,9 @@ int slab_is_available(void);
115struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, 115struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
116 unsigned long, 116 unsigned long,
117 void (*)(void *)); 117 void (*)(void *));
118struct kmem_cache * 118#ifdef CONFIG_MEMCG_KMEM
119kmem_cache_create_memcg(struct mem_cgroup *, const char *, size_t, size_t, 119void kmem_cache_create_memcg(struct mem_cgroup *, struct kmem_cache *);
120 unsigned long, void (*)(void *), struct kmem_cache *); 120#endif
121void kmem_cache_destroy(struct kmem_cache *); 121void kmem_cache_destroy(struct kmem_cache *);
122int kmem_cache_shrink(struct kmem_cache *); 122int kmem_cache_shrink(struct kmem_cache *);
123void kmem_cache_free(struct kmem_cache *, void *); 123void kmem_cache_free(struct kmem_cache *, void *);
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index f56bfa9e4526..f2f7398848cf 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -87,6 +87,9 @@ struct kmem_cache {
87#ifdef CONFIG_MEMCG_KMEM 87#ifdef CONFIG_MEMCG_KMEM
88 struct memcg_cache_params *memcg_params; 88 struct memcg_cache_params *memcg_params;
89 int max_attr_size; /* for propagation, maximum size of a stored attr */ 89 int max_attr_size; /* for propagation, maximum size of a stored attr */
90#ifdef CONFIG_SYSFS
91 struct kset *memcg_kset;
92#endif
90#endif 93#endif
91 94
92#ifdef CONFIG_NUMA 95#ifdef CONFIG_NUMA
diff --git a/include/linux/topology.h b/include/linux/topology.h
index 12ae6ce997d6..7062330a1329 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -188,7 +188,7 @@ DECLARE_PER_CPU(int, numa_node);
188/* Returns the number of the current Node. */ 188/* Returns the number of the current Node. */
189static inline int numa_node_id(void) 189static inline int numa_node_id(void)
190{ 190{
191 return __this_cpu_read(numa_node); 191 return raw_cpu_read(numa_node);
192} 192}
193#endif 193#endif
194 194
@@ -245,7 +245,7 @@ static inline void set_numa_mem(int node)
245/* Returns the number of the nearest Node with memory */ 245/* Returns the number of the nearest Node with memory */
246static inline int numa_mem_id(void) 246static inline int numa_mem_id(void)
247{ 247{
248 return __this_cpu_read(_numa_mem_); 248 return raw_cpu_read(_numa_mem_);
249} 249}
250#endif 250#endif
251 251
diff --git a/include/linux/vmacache.h b/include/linux/vmacache.h
new file mode 100644
index 000000000000..c3fa0fd43949
--- /dev/null
+++ b/include/linux/vmacache.h
@@ -0,0 +1,38 @@
1#ifndef __LINUX_VMACACHE_H
2#define __LINUX_VMACACHE_H
3
4#include <linux/sched.h>
5#include <linux/mm.h>
6
7/*
8 * Hash based on the page number. Provides a good hit rate for
9 * workloads with good locality and those with random accesses as well.
10 */
11#define VMACACHE_HASH(addr) ((addr >> PAGE_SHIFT) & VMACACHE_MASK)
12
13static inline void vmacache_flush(struct task_struct *tsk)
14{
15 memset(tsk->vmacache, 0, sizeof(tsk->vmacache));
16}
17
18extern void vmacache_flush_all(struct mm_struct *mm);
19extern void vmacache_update(unsigned long addr, struct vm_area_struct *newvma);
20extern struct vm_area_struct *vmacache_find(struct mm_struct *mm,
21 unsigned long addr);
22
23#ifndef CONFIG_MMU
24extern struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm,
25 unsigned long start,
26 unsigned long end);
27#endif
28
29static inline void vmacache_invalidate(struct mm_struct *mm)
30{
31 mm->vmacache_seqnum++;
32
33 /* deal with overflows */
34 if (unlikely(mm->vmacache_seqnum == 0))
35 vmacache_flush_all(mm);
36}
37
38#endif /* __LINUX_VMACACHE_H */
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index ea4476157e00..45c9cd1daf7a 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -27,9 +27,13 @@ struct vm_event_state {
27 27
28DECLARE_PER_CPU(struct vm_event_state, vm_event_states); 28DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
29 29
30/*
31 * vm counters are allowed to be racy. Use raw_cpu_ops to avoid the
32 * local_irq_disable overhead.
33 */
30static inline void __count_vm_event(enum vm_event_item item) 34static inline void __count_vm_event(enum vm_event_item item)
31{ 35{
32 __this_cpu_inc(vm_event_states.event[item]); 36 raw_cpu_inc(vm_event_states.event[item]);
33} 37}
34 38
35static inline void count_vm_event(enum vm_event_item item) 39static inline void count_vm_event(enum vm_event_item item)
@@ -39,7 +43,7 @@ static inline void count_vm_event(enum vm_event_item item)
39 43
40static inline void __count_vm_events(enum vm_event_item item, long delta) 44static inline void __count_vm_events(enum vm_event_item item, long delta)
41{ 45{
42 __this_cpu_add(vm_event_states.event[item], delta); 46 raw_cpu_add(vm_event_states.event[item], delta);
43} 47}
44 48
45static inline void count_vm_events(enum vm_event_item item, long delta) 49static inline void count_vm_events(enum vm_event_item item, long delta)
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 021b8a319b9e..5777c13849ba 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -178,7 +178,7 @@ int write_cache_pages(struct address_space *mapping,
178 struct writeback_control *wbc, writepage_t writepage, 178 struct writeback_control *wbc, writepage_t writepage,
179 void *data); 179 void *data);
180int do_writepages(struct address_space *mapping, struct writeback_control *wbc); 180int do_writepages(struct address_space *mapping, struct writeback_control *wbc);
181void set_page_dirty_balance(struct page *page, int page_mkwrite); 181void set_page_dirty_balance(struct page *page);
182void writeback_set_ratelimit(void); 182void writeback_set_ratelimit(void);
183void tag_pages_for_writeback(struct address_space *mapping, 183void tag_pages_for_writeback(struct address_space *mapping,
184 pgoff_t start, pgoff_t end); 184 pgoff_t start, pgoff_t end);
diff --git a/include/trace/events/task.h b/include/trace/events/task.h
index 102a646e1996..dee3bb1d5a6b 100644
--- a/include/trace/events/task.h
+++ b/include/trace/events/task.h
@@ -32,7 +32,7 @@ TRACE_EVENT(task_newtask,
32 32
33TRACE_EVENT(task_rename, 33TRACE_EVENT(task_rename,
34 34
35 TP_PROTO(struct task_struct *task, char *comm), 35 TP_PROTO(struct task_struct *task, const char *comm),
36 36
37 TP_ARGS(task, comm), 37 TP_ARGS(task, comm),
38 38
diff --git a/include/uapi/asm-generic/mman-common.h b/include/uapi/asm-generic/mman-common.h
index 4164529a94f9..ddc3b36f1046 100644
--- a/include/uapi/asm-generic/mman-common.h
+++ b/include/uapi/asm-generic/mman-common.h
@@ -50,7 +50,7 @@
50 50
51#define MADV_DONTDUMP 16 /* Explicity exclude from the core dump, 51#define MADV_DONTDUMP 16 /* Explicity exclude from the core dump,
52 overrides the coredump filter bits */ 52 overrides the coredump filter bits */
53#define MADV_DODUMP 17 /* Clear the MADV_NODUMP flag */ 53#define MADV_DODUMP 17 /* Clear the MADV_DONTDUMP flag */
54 54
55/* compatibility flags */ 55/* compatibility flags */
56#define MAP_FILE 0 56#define MAP_FILE 0
diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h
index 289760f424aa..58afc04c107e 100644
--- a/include/uapi/linux/prctl.h
+++ b/include/uapi/linux/prctl.h
@@ -149,4 +149,7 @@
149 149
150#define PR_GET_TID_ADDRESS 40 150#define PR_GET_TID_ADDRESS 40
151 151
152#define PR_SET_THP_DISABLE 41
153#define PR_GET_THP_DISABLE 42
154
152#endif /* _LINUX_PRCTL_H */ 155#endif /* _LINUX_PRCTL_H */