aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/amba/mmci.h18
-rw-r--r--include/linux/amba/pl022.h8
-rw-r--r--include/linux/async_tx.h129
-rw-r--r--include/linux/binfmts.h2
-rw-r--r--include/linux/cgroup.h53
-rw-r--r--include/linux/configfs.h4
-rw-r--r--include/linux/dca.h11
-rw-r--r--include/linux/debugfs.h2
-rw-r--r--include/linux/dmaengine.h179
-rw-r--r--include/linux/fb.h1
-rw-r--r--include/linux/fs.h11
-rw-r--r--include/linux/ftrace.h4
-rw-r--r--include/linux/futex.h10
-rw-r--r--include/linux/hugetlb.h18
-rw-r--r--include/linux/memcontrol.h10
-rw-r--r--include/linux/mm.h24
-rw-r--r--include/linux/mm_types.h7
-rw-r--r--include/linux/mmzone.h13
-rw-r--r--include/linux/module.h17
-rw-r--r--include/linux/page-flags.h17
-rw-r--r--include/linux/page_cgroup.h13
-rw-r--r--include/linux/pci_ids.h10
-rw-r--r--include/linux/prctl.h2
-rw-r--r--include/linux/relay.h2
-rw-r--r--include/linux/res_counter.h64
-rw-r--r--include/linux/rmap.h21
-rw-r--r--include/linux/sched.h17
-rw-r--r--include/linux/security.h2
-rw-r--r--include/linux/seq_file.h38
-rw-r--r--include/linux/signal.h2
-rw-r--r--include/linux/swap.h41
-rw-r--r--include/linux/swapops.h38
-rw-r--r--include/linux/sysctl.h19
-rw-r--r--include/linux/time.h28
-rw-r--r--include/linux/tracehook.h34
-rw-r--r--include/linux/tracepoint.h2
-rw-r--r--include/linux/unaligned/be_byteshift.h2
-rw-r--r--include/linux/unaligned/le_byteshift.h2
-rw-r--r--include/linux/vgaarb.h3
-rw-r--r--include/linux/writeback.h11
40 files changed, 736 insertions, 153 deletions
diff --git a/include/linux/amba/mmci.h b/include/linux/amba/mmci.h
new file mode 100644
index 000000000000..6b4241748dda
--- /dev/null
+++ b/include/linux/amba/mmci.h
@@ -0,0 +1,18 @@
1/*
2 * include/linux/amba/mmci.h
3 */
4#ifndef AMBA_MMCI_H
5#define AMBA_MMCI_H
6
7#include <linux/mmc/host.h>
8
9struct mmci_platform_data {
10 unsigned int ocr_mask; /* available voltages */
11 u32 (*translate_vdd)(struct device *, unsigned int);
12 unsigned int (*status)(struct device *);
13 int gpio_wp;
14 int gpio_cd;
15 unsigned long capabilities;
16};
17
18#endif
diff --git a/include/linux/amba/pl022.h b/include/linux/amba/pl022.h
index dcad0ffd1755..e4836c6b3dd7 100644
--- a/include/linux/amba/pl022.h
+++ b/include/linux/amba/pl022.h
@@ -136,12 +136,12 @@ enum ssp_tx_level_trig {
136 136
137/** 137/**
138 * enum SPI Clock Phase - clock phase (Motorola SPI interface only) 138 * enum SPI Clock Phase - clock phase (Motorola SPI interface only)
139 * @SSP_CLK_RISING_EDGE: Receive data on rising edge 139 * @SSP_CLK_FIRST_EDGE: Receive data on first edge transition (actual direction depends on polarity)
140 * @SSP_CLK_FALLING_EDGE: Receive data on falling edge 140 * @SSP_CLK_SECOND_EDGE: Receive data on second edge transition (actual direction depends on polarity)
141 */ 141 */
142enum ssp_spi_clk_phase { 142enum ssp_spi_clk_phase {
143 SSP_CLK_RISING_EDGE, 143 SSP_CLK_FIRST_EDGE,
144 SSP_CLK_FALLING_EDGE 144 SSP_CLK_SECOND_EDGE
145}; 145};
146 146
147/** 147/**
diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h
index 5fc2ef8d97fa..a1c486a88e88 100644
--- a/include/linux/async_tx.h
+++ b/include/linux/async_tx.h
@@ -58,25 +58,60 @@ struct dma_chan_ref {
58 * array. 58 * array.
59 * @ASYNC_TX_ACK: immediately ack the descriptor, precludes setting up a 59 * @ASYNC_TX_ACK: immediately ack the descriptor, precludes setting up a
60 * dependency chain 60 * dependency chain
61 * @ASYNC_TX_DEP_ACK: ack the dependency descriptor. Useful for chaining. 61 * @ASYNC_TX_FENCE: specify that the next operation in the dependency
62 * chain uses this operation's result as an input
62 */ 63 */
63enum async_tx_flags { 64enum async_tx_flags {
64 ASYNC_TX_XOR_ZERO_DST = (1 << 0), 65 ASYNC_TX_XOR_ZERO_DST = (1 << 0),
65 ASYNC_TX_XOR_DROP_DST = (1 << 1), 66 ASYNC_TX_XOR_DROP_DST = (1 << 1),
66 ASYNC_TX_ACK = (1 << 3), 67 ASYNC_TX_ACK = (1 << 2),
67 ASYNC_TX_DEP_ACK = (1 << 4), 68 ASYNC_TX_FENCE = (1 << 3),
69};
70
71/**
72 * struct async_submit_ctl - async_tx submission/completion modifiers
73 * @flags: submission modifiers
74 * @depend_tx: parent dependency of the current operation being submitted
75 * @cb_fn: callback routine to run at operation completion
76 * @cb_param: parameter for the callback routine
77 * @scribble: caller provided space for dma/page address conversions
78 */
79struct async_submit_ctl {
80 enum async_tx_flags flags;
81 struct dma_async_tx_descriptor *depend_tx;
82 dma_async_tx_callback cb_fn;
83 void *cb_param;
84 void *scribble;
68}; 85};
69 86
70#ifdef CONFIG_DMA_ENGINE 87#ifdef CONFIG_DMA_ENGINE
71#define async_tx_issue_pending_all dma_issue_pending_all 88#define async_tx_issue_pending_all dma_issue_pending_all
89
90/**
91 * async_tx_issue_pending - send pending descriptor to the hardware channel
92 * @tx: descriptor handle to retrieve hardware context
93 *
94 * Note: any dependent operations will have already been issued by
95 * async_tx_channel_switch, or (in the case of no channel switch) will
96 * be already pending on this channel.
97 */
98static inline void async_tx_issue_pending(struct dma_async_tx_descriptor *tx)
99{
100 if (likely(tx)) {
101 struct dma_chan *chan = tx->chan;
102 struct dma_device *dma = chan->device;
103
104 dma->device_issue_pending(chan);
105 }
106}
72#ifdef CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL 107#ifdef CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL
73#include <asm/async_tx.h> 108#include <asm/async_tx.h>
74#else 109#else
75#define async_tx_find_channel(dep, type, dst, dst_count, src, src_count, len) \ 110#define async_tx_find_channel(dep, type, dst, dst_count, src, src_count, len) \
76 __async_tx_find_channel(dep, type) 111 __async_tx_find_channel(dep, type)
77struct dma_chan * 112struct dma_chan *
78__async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, 113__async_tx_find_channel(struct async_submit_ctl *submit,
79 enum dma_transaction_type tx_type); 114 enum dma_transaction_type tx_type);
80#endif /* CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL */ 115#endif /* CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL */
81#else 116#else
82static inline void async_tx_issue_pending_all(void) 117static inline void async_tx_issue_pending_all(void)
@@ -84,10 +119,16 @@ static inline void async_tx_issue_pending_all(void)
84 do { } while (0); 119 do { } while (0);
85} 120}
86 121
122static inline void async_tx_issue_pending(struct dma_async_tx_descriptor *tx)
123{
124 do { } while (0);
125}
126
87static inline struct dma_chan * 127static inline struct dma_chan *
88async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, 128async_tx_find_channel(struct async_submit_ctl *submit,
89 enum dma_transaction_type tx_type, struct page **dst, int dst_count, 129 enum dma_transaction_type tx_type, struct page **dst,
90 struct page **src, int src_count, size_t len) 130 int dst_count, struct page **src, int src_count,
131 size_t len)
91{ 132{
92 return NULL; 133 return NULL;
93} 134}
@@ -99,46 +140,70 @@ async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx,
99 * @cb_fn_param: parameter to pass to the callback routine 140 * @cb_fn_param: parameter to pass to the callback routine
100 */ 141 */
101static inline void 142static inline void
102async_tx_sync_epilog(dma_async_tx_callback cb_fn, void *cb_fn_param) 143async_tx_sync_epilog(struct async_submit_ctl *submit)
103{ 144{
104 if (cb_fn) 145 if (submit->cb_fn)
105 cb_fn(cb_fn_param); 146 submit->cb_fn(submit->cb_param);
106} 147}
107 148
108void 149typedef union {
109async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, 150 unsigned long addr;
110 enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx, 151 struct page *page;
111 dma_async_tx_callback cb_fn, void *cb_fn_param); 152 dma_addr_t dma;
153} addr_conv_t;
154
155static inline void
156init_async_submit(struct async_submit_ctl *args, enum async_tx_flags flags,
157 struct dma_async_tx_descriptor *tx,
158 dma_async_tx_callback cb_fn, void *cb_param,
159 addr_conv_t *scribble)
160{
161 args->flags = flags;
162 args->depend_tx = tx;
163 args->cb_fn = cb_fn;
164 args->cb_param = cb_param;
165 args->scribble = scribble;
166}
167
168void async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx,
169 struct async_submit_ctl *submit);
112 170
113struct dma_async_tx_descriptor * 171struct dma_async_tx_descriptor *
114async_xor(struct page *dest, struct page **src_list, unsigned int offset, 172async_xor(struct page *dest, struct page **src_list, unsigned int offset,
115 int src_cnt, size_t len, enum async_tx_flags flags, 173 int src_cnt, size_t len, struct async_submit_ctl *submit);
116 struct dma_async_tx_descriptor *depend_tx,
117 dma_async_tx_callback cb_fn, void *cb_fn_param);
118 174
119struct dma_async_tx_descriptor * 175struct dma_async_tx_descriptor *
120async_xor_zero_sum(struct page *dest, struct page **src_list, 176async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
121 unsigned int offset, int src_cnt, size_t len, 177 int src_cnt, size_t len, enum sum_check_flags *result,
122 u32 *result, enum async_tx_flags flags, 178 struct async_submit_ctl *submit);
123 struct dma_async_tx_descriptor *depend_tx,
124 dma_async_tx_callback cb_fn, void *cb_fn_param);
125 179
126struct dma_async_tx_descriptor * 180struct dma_async_tx_descriptor *
127async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, 181async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
128 unsigned int src_offset, size_t len, enum async_tx_flags flags, 182 unsigned int src_offset, size_t len,
129 struct dma_async_tx_descriptor *depend_tx, 183 struct async_submit_ctl *submit);
130 dma_async_tx_callback cb_fn, void *cb_fn_param);
131 184
132struct dma_async_tx_descriptor * 185struct dma_async_tx_descriptor *
133async_memset(struct page *dest, int val, unsigned int offset, 186async_memset(struct page *dest, int val, unsigned int offset,
134 size_t len, enum async_tx_flags flags, 187 size_t len, struct async_submit_ctl *submit);
135 struct dma_async_tx_descriptor *depend_tx, 188
136 dma_async_tx_callback cb_fn, void *cb_fn_param); 189struct dma_async_tx_descriptor *async_trigger_callback(struct async_submit_ctl *submit);
190
191struct dma_async_tx_descriptor *
192async_gen_syndrome(struct page **blocks, unsigned int offset, int src_cnt,
193 size_t len, struct async_submit_ctl *submit);
194
195struct dma_async_tx_descriptor *
196async_syndrome_val(struct page **blocks, unsigned int offset, int src_cnt,
197 size_t len, enum sum_check_flags *pqres, struct page *spare,
198 struct async_submit_ctl *submit);
199
200struct dma_async_tx_descriptor *
201async_raid6_2data_recov(int src_num, size_t bytes, int faila, int failb,
202 struct page **ptrs, struct async_submit_ctl *submit);
137 203
138struct dma_async_tx_descriptor * 204struct dma_async_tx_descriptor *
139async_trigger_callback(enum async_tx_flags flags, 205async_raid6_datap_recov(int src_num, size_t bytes, int faila,
140 struct dma_async_tx_descriptor *depend_tx, 206 struct page **ptrs, struct async_submit_ctl *submit);
141 dma_async_tx_callback cb_fn, void *cb_fn_param);
142 207
143void async_tx_quiesce(struct dma_async_tx_descriptor **tx); 208void async_tx_quiesce(struct dma_async_tx_descriptor **tx);
144#endif /* _ASYNC_TX_H_ */ 209#endif /* _ASYNC_TX_H_ */
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index 2046b5b8af48..aece486ac734 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -120,7 +120,7 @@ extern int copy_strings_kernel(int argc,char ** argv,struct linux_binprm *bprm);
120extern int prepare_bprm_creds(struct linux_binprm *bprm); 120extern int prepare_bprm_creds(struct linux_binprm *bprm);
121extern void install_exec_creds(struct linux_binprm *bprm); 121extern void install_exec_creds(struct linux_binprm *bprm);
122extern void do_coredump(long signr, int exit_code, struct pt_regs *regs); 122extern void do_coredump(long signr, int exit_code, struct pt_regs *regs);
123extern int set_binfmt(struct linux_binfmt *new); 123extern void set_binfmt(struct linux_binfmt *new);
124extern void free_bprm(struct linux_binprm *); 124extern void free_bprm(struct linux_binprm *);
125 125
126#endif /* __KERNEL__ */ 126#endif /* __KERNEL__ */
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 90bba9e62286..b62bb9294d0c 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -141,6 +141,38 @@ enum {
141 CGRP_WAIT_ON_RMDIR, 141 CGRP_WAIT_ON_RMDIR,
142}; 142};
143 143
144/* which pidlist file are we talking about? */
145enum cgroup_filetype {
146 CGROUP_FILE_PROCS,
147 CGROUP_FILE_TASKS,
148};
149
150/*
151 * A pidlist is a list of pids that virtually represents the contents of one
152 * of the cgroup files ("procs" or "tasks"). We keep a list of such pidlists,
153 * a pair (one each for procs, tasks) for each pid namespace that's relevant
154 * to the cgroup.
155 */
156struct cgroup_pidlist {
157 /*
158 * used to find which pidlist is wanted. doesn't change as long as
159 * this particular list stays in the list.
160 */
161 struct { enum cgroup_filetype type; struct pid_namespace *ns; } key;
162 /* array of xids */
163 pid_t *list;
164 /* how many elements the above list has */
165 int length;
166 /* how many files are using the current array */
167 int use_count;
168 /* each of these stored in a list by its cgroup */
169 struct list_head links;
170 /* pointer to the cgroup we belong to, for list removal purposes */
171 struct cgroup *owner;
172 /* protects the other fields */
173 struct rw_semaphore mutex;
174};
175
144struct cgroup { 176struct cgroup {
145 unsigned long flags; /* "unsigned long" so bitops work */ 177 unsigned long flags; /* "unsigned long" so bitops work */
146 178
@@ -179,11 +211,12 @@ struct cgroup {
179 */ 211 */
180 struct list_head release_list; 212 struct list_head release_list;
181 213
182 /* pids_mutex protects pids_list and cached pid arrays. */ 214 /*
183 struct rw_semaphore pids_mutex; 215 * list of pidlists, up to two for each namespace (one for procs, one
184 216 * for tasks); created on demand.
185 /* Linked list of struct cgroup_pids */ 217 */
186 struct list_head pids_list; 218 struct list_head pidlists;
219 struct mutex pidlist_mutex;
187 220
188 /* For RCU-protected deletion */ 221 /* For RCU-protected deletion */
189 struct rcu_head rcu_head; 222 struct rcu_head rcu_head;
@@ -227,6 +260,9 @@ struct css_set {
227 * during subsystem registration (at boot time). 260 * during subsystem registration (at boot time).
228 */ 261 */
229 struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT]; 262 struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT];
263
264 /* For RCU-protected deletion */
265 struct rcu_head rcu_head;
230}; 266};
231 267
232/* 268/*
@@ -389,10 +425,11 @@ struct cgroup_subsys {
389 struct cgroup *cgrp); 425 struct cgroup *cgrp);
390 int (*pre_destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp); 426 int (*pre_destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp);
391 void (*destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp); 427 void (*destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp);
392 int (*can_attach)(struct cgroup_subsys *ss, 428 int (*can_attach)(struct cgroup_subsys *ss, struct cgroup *cgrp,
393 struct cgroup *cgrp, struct task_struct *tsk); 429 struct task_struct *tsk, bool threadgroup);
394 void (*attach)(struct cgroup_subsys *ss, struct cgroup *cgrp, 430 void (*attach)(struct cgroup_subsys *ss, struct cgroup *cgrp,
395 struct cgroup *old_cgrp, struct task_struct *tsk); 431 struct cgroup *old_cgrp, struct task_struct *tsk,
432 bool threadgroup);
396 void (*fork)(struct cgroup_subsys *ss, struct task_struct *task); 433 void (*fork)(struct cgroup_subsys *ss, struct task_struct *task);
397 void (*exit)(struct cgroup_subsys *ss, struct task_struct *task); 434 void (*exit)(struct cgroup_subsys *ss, struct task_struct *task);
398 int (*populate)(struct cgroup_subsys *ss, 435 int (*populate)(struct cgroup_subsys *ss,
diff --git a/include/linux/configfs.h b/include/linux/configfs.h
index 7f627775c947..ddb7a97c78c2 100644
--- a/include/linux/configfs.h
+++ b/include/linux/configfs.h
@@ -27,8 +27,8 @@
27 * 27 *
28 * configfs Copyright (C) 2005 Oracle. All rights reserved. 28 * configfs Copyright (C) 2005 Oracle. All rights reserved.
29 * 29 *
30 * Please read Documentation/filesystems/configfs.txt before using the 30 * Please read Documentation/filesystems/configfs/configfs.txt before using
31 * configfs interface, ESPECIALLY the parts about reference counts and 31 * the configfs interface, ESPECIALLY the parts about reference counts and
32 * item destructors. 32 * item destructors.
33 */ 33 */
34 34
diff --git a/include/linux/dca.h b/include/linux/dca.h
index 9c20c7e87d0a..d27a7a05718d 100644
--- a/include/linux/dca.h
+++ b/include/linux/dca.h
@@ -20,6 +20,9 @@
20 */ 20 */
21#ifndef DCA_H 21#ifndef DCA_H
22#define DCA_H 22#define DCA_H
23
24#include <linux/pci.h>
25
23/* DCA Provider API */ 26/* DCA Provider API */
24 27
25/* DCA Notifier Interface */ 28/* DCA Notifier Interface */
@@ -36,6 +39,12 @@ struct dca_provider {
36 int id; 39 int id;
37}; 40};
38 41
42struct dca_domain {
43 struct list_head node;
44 struct list_head dca_providers;
45 struct pci_bus *pci_rc;
46};
47
39struct dca_ops { 48struct dca_ops {
40 int (*add_requester) (struct dca_provider *, struct device *); 49 int (*add_requester) (struct dca_provider *, struct device *);
41 int (*remove_requester) (struct dca_provider *, struct device *); 50 int (*remove_requester) (struct dca_provider *, struct device *);
@@ -47,7 +56,7 @@ struct dca_ops {
47struct dca_provider *alloc_dca_provider(struct dca_ops *ops, int priv_size); 56struct dca_provider *alloc_dca_provider(struct dca_ops *ops, int priv_size);
48void free_dca_provider(struct dca_provider *dca); 57void free_dca_provider(struct dca_provider *dca);
49int register_dca_provider(struct dca_provider *dca, struct device *dev); 58int register_dca_provider(struct dca_provider *dca, struct device *dev);
50void unregister_dca_provider(struct dca_provider *dca); 59void unregister_dca_provider(struct dca_provider *dca, struct device *dev);
51 60
52static inline void *dca_priv(struct dca_provider *dca) 61static inline void *dca_priv(struct dca_provider *dca)
53{ 62{
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index eb5c2ba2f81a..fc1b930f246c 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -9,7 +9,7 @@
9 * 2 as published by the Free Software Foundation. 9 * 2 as published by the Free Software Foundation.
10 * 10 *
11 * debugfs is for people to use instead of /proc or /sys. 11 * debugfs is for people to use instead of /proc or /sys.
12 * See Documentation/DocBook/kernel-api for more details. 12 * See Documentation/DocBook/filesystems for more details.
13 */ 13 */
14 14
15#ifndef _DEBUGFS_H_ 15#ifndef _DEBUGFS_H_
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index ffefba81c818..2b9f2ac7ed60 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -48,19 +48,20 @@ enum dma_status {
48 48
49/** 49/**
50 * enum dma_transaction_type - DMA transaction types/indexes 50 * enum dma_transaction_type - DMA transaction types/indexes
51 *
52 * Note: The DMA_ASYNC_TX capability is not to be set by drivers. It is
53 * automatically set as dma devices are registered.
51 */ 54 */
52enum dma_transaction_type { 55enum dma_transaction_type {
53 DMA_MEMCPY, 56 DMA_MEMCPY,
54 DMA_XOR, 57 DMA_XOR,
55 DMA_PQ_XOR, 58 DMA_PQ,
56 DMA_DUAL_XOR, 59 DMA_XOR_VAL,
57 DMA_PQ_UPDATE, 60 DMA_PQ_VAL,
58 DMA_ZERO_SUM,
59 DMA_PQ_ZERO_SUM,
60 DMA_MEMSET, 61 DMA_MEMSET,
61 DMA_MEMCPY_CRC32C,
62 DMA_INTERRUPT, 62 DMA_INTERRUPT,
63 DMA_PRIVATE, 63 DMA_PRIVATE,
64 DMA_ASYNC_TX,
64 DMA_SLAVE, 65 DMA_SLAVE,
65}; 66};
66 67
@@ -70,18 +71,25 @@ enum dma_transaction_type {
70 71
71/** 72/**
72 * enum dma_ctrl_flags - DMA flags to augment operation preparation, 73 * enum dma_ctrl_flags - DMA flags to augment operation preparation,
73 * control completion, and communicate status. 74 * control completion, and communicate status.
74 * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of 75 * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of
75 * this transaction 76 * this transaction
76 * @DMA_CTRL_ACK - the descriptor cannot be reused until the client 77 * @DMA_CTRL_ACK - the descriptor cannot be reused until the client
77 * acknowledges receipt, i.e. has has a chance to establish any 78 * acknowledges receipt, i.e. has has a chance to establish any dependency
78 * dependency chains 79 * chains
79 * @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s) 80 * @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s)
80 * @DMA_COMPL_SKIP_DEST_UNMAP - set to disable dma-unmapping the destination(s) 81 * @DMA_COMPL_SKIP_DEST_UNMAP - set to disable dma-unmapping the destination(s)
81 * @DMA_COMPL_SRC_UNMAP_SINGLE - set to do the source dma-unmapping as single 82 * @DMA_COMPL_SRC_UNMAP_SINGLE - set to do the source dma-unmapping as single
82 * (if not set, do the source dma-unmapping as page) 83 * (if not set, do the source dma-unmapping as page)
83 * @DMA_COMPL_DEST_UNMAP_SINGLE - set to do the destination dma-unmapping as single 84 * @DMA_COMPL_DEST_UNMAP_SINGLE - set to do the destination dma-unmapping as single
84 * (if not set, do the destination dma-unmapping as page) 85 * (if not set, do the destination dma-unmapping as page)
86 * @DMA_PREP_PQ_DISABLE_P - prevent generation of P while generating Q
87 * @DMA_PREP_PQ_DISABLE_Q - prevent generation of Q while generating P
88 * @DMA_PREP_CONTINUE - indicate to a driver that it is reusing buffers as
89 * sources that were the result of a previous operation, in the case of a PQ
90 * operation it continues the calculation with new sources
91 * @DMA_PREP_FENCE - tell the driver that subsequent operations depend
92 * on the result of this operation
85 */ 93 */
86enum dma_ctrl_flags { 94enum dma_ctrl_flags {
87 DMA_PREP_INTERRUPT = (1 << 0), 95 DMA_PREP_INTERRUPT = (1 << 0),
@@ -90,9 +98,32 @@ enum dma_ctrl_flags {
90 DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3), 98 DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3),
91 DMA_COMPL_SRC_UNMAP_SINGLE = (1 << 4), 99 DMA_COMPL_SRC_UNMAP_SINGLE = (1 << 4),
92 DMA_COMPL_DEST_UNMAP_SINGLE = (1 << 5), 100 DMA_COMPL_DEST_UNMAP_SINGLE = (1 << 5),
101 DMA_PREP_PQ_DISABLE_P = (1 << 6),
102 DMA_PREP_PQ_DISABLE_Q = (1 << 7),
103 DMA_PREP_CONTINUE = (1 << 8),
104 DMA_PREP_FENCE = (1 << 9),
93}; 105};
94 106
95/** 107/**
108 * enum sum_check_bits - bit position of pq_check_flags
109 */
110enum sum_check_bits {
111 SUM_CHECK_P = 0,
112 SUM_CHECK_Q = 1,
113};
114
115/**
116 * enum pq_check_flags - result of async_{xor,pq}_zero_sum operations
117 * @SUM_CHECK_P_RESULT - 1 if xor zero sum error, 0 otherwise
118 * @SUM_CHECK_Q_RESULT - 1 if reed-solomon zero sum error, 0 otherwise
119 */
120enum sum_check_flags {
121 SUM_CHECK_P_RESULT = (1 << SUM_CHECK_P),
122 SUM_CHECK_Q_RESULT = (1 << SUM_CHECK_Q),
123};
124
125
126/**
96 * dma_cap_mask_t - capabilities bitmap modeled after cpumask_t. 127 * dma_cap_mask_t - capabilities bitmap modeled after cpumask_t.
97 * See linux/cpumask.h 128 * See linux/cpumask.h
98 */ 129 */
@@ -180,8 +211,6 @@ typedef void (*dma_async_tx_callback)(void *dma_async_param);
180 * @flags: flags to augment operation preparation, control completion, and 211 * @flags: flags to augment operation preparation, control completion, and
181 * communicate status 212 * communicate status
182 * @phys: physical address of the descriptor 213 * @phys: physical address of the descriptor
183 * @tx_list: driver common field for operations that require multiple
184 * descriptors
185 * @chan: target channel for this operation 214 * @chan: target channel for this operation
186 * @tx_submit: set the prepared descriptor(s) to be executed by the engine 215 * @tx_submit: set the prepared descriptor(s) to be executed by the engine
187 * @callback: routine to call after this operation is complete 216 * @callback: routine to call after this operation is complete
@@ -195,7 +224,6 @@ struct dma_async_tx_descriptor {
195 dma_cookie_t cookie; 224 dma_cookie_t cookie;
196 enum dma_ctrl_flags flags; /* not a 'long' to pack with cookie */ 225 enum dma_ctrl_flags flags; /* not a 'long' to pack with cookie */
197 dma_addr_t phys; 226 dma_addr_t phys;
198 struct list_head tx_list;
199 struct dma_chan *chan; 227 struct dma_chan *chan;
200 dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); 228 dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
201 dma_async_tx_callback callback; 229 dma_async_tx_callback callback;
@@ -213,6 +241,11 @@ struct dma_async_tx_descriptor {
213 * @global_node: list_head for global dma_device_list 241 * @global_node: list_head for global dma_device_list
214 * @cap_mask: one or more dma_capability flags 242 * @cap_mask: one or more dma_capability flags
215 * @max_xor: maximum number of xor sources, 0 if no capability 243 * @max_xor: maximum number of xor sources, 0 if no capability
244 * @max_pq: maximum number of PQ sources and PQ-continue capability
245 * @copy_align: alignment shift for memcpy operations
246 * @xor_align: alignment shift for xor operations
247 * @pq_align: alignment shift for pq operations
248 * @fill_align: alignment shift for memset operations
216 * @dev_id: unique device ID 249 * @dev_id: unique device ID
217 * @dev: struct device reference for dma mapping api 250 * @dev: struct device reference for dma mapping api
218 * @device_alloc_chan_resources: allocate resources and return the 251 * @device_alloc_chan_resources: allocate resources and return the
@@ -220,7 +253,9 @@ struct dma_async_tx_descriptor {
220 * @device_free_chan_resources: release DMA channel's resources 253 * @device_free_chan_resources: release DMA channel's resources
221 * @device_prep_dma_memcpy: prepares a memcpy operation 254 * @device_prep_dma_memcpy: prepares a memcpy operation
222 * @device_prep_dma_xor: prepares a xor operation 255 * @device_prep_dma_xor: prepares a xor operation
223 * @device_prep_dma_zero_sum: prepares a zero_sum operation 256 * @device_prep_dma_xor_val: prepares a xor validation operation
257 * @device_prep_dma_pq: prepares a pq operation
258 * @device_prep_dma_pq_val: prepares a pqzero_sum operation
224 * @device_prep_dma_memset: prepares a memset operation 259 * @device_prep_dma_memset: prepares a memset operation
225 * @device_prep_dma_interrupt: prepares an end of chain interrupt operation 260 * @device_prep_dma_interrupt: prepares an end of chain interrupt operation
226 * @device_prep_slave_sg: prepares a slave dma operation 261 * @device_prep_slave_sg: prepares a slave dma operation
@@ -235,7 +270,13 @@ struct dma_device {
235 struct list_head channels; 270 struct list_head channels;
236 struct list_head global_node; 271 struct list_head global_node;
237 dma_cap_mask_t cap_mask; 272 dma_cap_mask_t cap_mask;
238 int max_xor; 273 unsigned short max_xor;
274 unsigned short max_pq;
275 u8 copy_align;
276 u8 xor_align;
277 u8 pq_align;
278 u8 fill_align;
279 #define DMA_HAS_PQ_CONTINUE (1 << 15)
239 280
240 int dev_id; 281 int dev_id;
241 struct device *dev; 282 struct device *dev;
@@ -249,9 +290,17 @@ struct dma_device {
249 struct dma_async_tx_descriptor *(*device_prep_dma_xor)( 290 struct dma_async_tx_descriptor *(*device_prep_dma_xor)(
250 struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, 291 struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
251 unsigned int src_cnt, size_t len, unsigned long flags); 292 unsigned int src_cnt, size_t len, unsigned long flags);
252 struct dma_async_tx_descriptor *(*device_prep_dma_zero_sum)( 293 struct dma_async_tx_descriptor *(*device_prep_dma_xor_val)(
253 struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt, 294 struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt,
254 size_t len, u32 *result, unsigned long flags); 295 size_t len, enum sum_check_flags *result, unsigned long flags);
296 struct dma_async_tx_descriptor *(*device_prep_dma_pq)(
297 struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
298 unsigned int src_cnt, const unsigned char *scf,
299 size_t len, unsigned long flags);
300 struct dma_async_tx_descriptor *(*device_prep_dma_pq_val)(
301 struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
302 unsigned int src_cnt, const unsigned char *scf, size_t len,
303 enum sum_check_flags *pqres, unsigned long flags);
255 struct dma_async_tx_descriptor *(*device_prep_dma_memset)( 304 struct dma_async_tx_descriptor *(*device_prep_dma_memset)(
256 struct dma_chan *chan, dma_addr_t dest, int value, size_t len, 305 struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
257 unsigned long flags); 306 unsigned long flags);
@@ -270,6 +319,96 @@ struct dma_device {
270 void (*device_issue_pending)(struct dma_chan *chan); 319 void (*device_issue_pending)(struct dma_chan *chan);
271}; 320};
272 321
322static inline bool dmaengine_check_align(u8 align, size_t off1, size_t off2, size_t len)
323{
324 size_t mask;
325
326 if (!align)
327 return true;
328 mask = (1 << align) - 1;
329 if (mask & (off1 | off2 | len))
330 return false;
331 return true;
332}
333
334static inline bool is_dma_copy_aligned(struct dma_device *dev, size_t off1,
335 size_t off2, size_t len)
336{
337 return dmaengine_check_align(dev->copy_align, off1, off2, len);
338}
339
340static inline bool is_dma_xor_aligned(struct dma_device *dev, size_t off1,
341 size_t off2, size_t len)
342{
343 return dmaengine_check_align(dev->xor_align, off1, off2, len);
344}
345
346static inline bool is_dma_pq_aligned(struct dma_device *dev, size_t off1,
347 size_t off2, size_t len)
348{
349 return dmaengine_check_align(dev->pq_align, off1, off2, len);
350}
351
352static inline bool is_dma_fill_aligned(struct dma_device *dev, size_t off1,
353 size_t off2, size_t len)
354{
355 return dmaengine_check_align(dev->fill_align, off1, off2, len);
356}
357
358static inline void
359dma_set_maxpq(struct dma_device *dma, int maxpq, int has_pq_continue)
360{
361 dma->max_pq = maxpq;
362 if (has_pq_continue)
363 dma->max_pq |= DMA_HAS_PQ_CONTINUE;
364}
365
366static inline bool dmaf_continue(enum dma_ctrl_flags flags)
367{
368 return (flags & DMA_PREP_CONTINUE) == DMA_PREP_CONTINUE;
369}
370
371static inline bool dmaf_p_disabled_continue(enum dma_ctrl_flags flags)
372{
373 enum dma_ctrl_flags mask = DMA_PREP_CONTINUE | DMA_PREP_PQ_DISABLE_P;
374
375 return (flags & mask) == mask;
376}
377
378static inline bool dma_dev_has_pq_continue(struct dma_device *dma)
379{
380 return (dma->max_pq & DMA_HAS_PQ_CONTINUE) == DMA_HAS_PQ_CONTINUE;
381}
382
383static unsigned short dma_dev_to_maxpq(struct dma_device *dma)
384{
385 return dma->max_pq & ~DMA_HAS_PQ_CONTINUE;
386}
387
388/* dma_maxpq - reduce maxpq in the face of continued operations
389 * @dma - dma device with PQ capability
390 * @flags - to check if DMA_PREP_CONTINUE and DMA_PREP_PQ_DISABLE_P are set
391 *
392 * When an engine does not support native continuation we need 3 extra
393 * source slots to reuse P and Q with the following coefficients:
394 * 1/ {00} * P : remove P from Q', but use it as a source for P'
395 * 2/ {01} * Q : use Q to continue Q' calculation
396 * 3/ {00} * Q : subtract Q from P' to cancel (2)
397 *
398 * In the case where P is disabled we only need 1 extra source:
399 * 1/ {01} * Q : use Q to continue Q' calculation
400 */
401static inline int dma_maxpq(struct dma_device *dma, enum dma_ctrl_flags flags)
402{
403 if (dma_dev_has_pq_continue(dma) || !dmaf_continue(flags))
404 return dma_dev_to_maxpq(dma);
405 else if (dmaf_p_disabled_continue(flags))
406 return dma_dev_to_maxpq(dma) - 1;
407 else if (dmaf_continue(flags))
408 return dma_dev_to_maxpq(dma) - 3;
409 BUG();
410}
411
273/* --- public DMA engine API --- */ 412/* --- public DMA engine API --- */
274 413
275#ifdef CONFIG_DMA_ENGINE 414#ifdef CONFIG_DMA_ENGINE
@@ -299,7 +438,11 @@ static inline void net_dmaengine_put(void)
299#ifdef CONFIG_ASYNC_TX_DMA 438#ifdef CONFIG_ASYNC_TX_DMA
300#define async_dmaengine_get() dmaengine_get() 439#define async_dmaengine_get() dmaengine_get()
301#define async_dmaengine_put() dmaengine_put() 440#define async_dmaengine_put() dmaengine_put()
441#ifdef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH
442#define async_dma_find_channel(type) dma_find_channel(DMA_ASYNC_TX)
443#else
302#define async_dma_find_channel(type) dma_find_channel(type) 444#define async_dma_find_channel(type) dma_find_channel(type)
445#endif /* CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH */
303#else 446#else
304static inline void async_dmaengine_get(void) 447static inline void async_dmaengine_get(void)
305{ 448{
@@ -312,7 +455,7 @@ async_dma_find_channel(enum dma_transaction_type type)
312{ 455{
313 return NULL; 456 return NULL;
314} 457}
315#endif 458#endif /* CONFIG_ASYNC_TX_DMA */
316 459
317dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan, 460dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,
318 void *dest, void *src, size_t len); 461 void *dest, void *src, size_t len);
diff --git a/include/linux/fb.h b/include/linux/fb.h
index f847df9e99b6..a34bdf5a9d23 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -133,6 +133,7 @@ struct dentry;
133#define FB_ACCEL_NEOMAGIC_NM2230 96 /* NeoMagic NM2230 */ 133#define FB_ACCEL_NEOMAGIC_NM2230 96 /* NeoMagic NM2230 */
134#define FB_ACCEL_NEOMAGIC_NM2360 97 /* NeoMagic NM2360 */ 134#define FB_ACCEL_NEOMAGIC_NM2360 97 /* NeoMagic NM2360 */
135#define FB_ACCEL_NEOMAGIC_NM2380 98 /* NeoMagic NM2380 */ 135#define FB_ACCEL_NEOMAGIC_NM2380 98 /* NeoMagic NM2380 */
136#define FB_ACCEL_PXA3XX 99 /* PXA3xx */
136 137
137#define FB_ACCEL_SAVAGE4 0x80 /* S3 Savage4 */ 138#define FB_ACCEL_SAVAGE4 0x80 /* S3 Savage4 */
138#define FB_ACCEL_SAVAGE3D 0x81 /* S3 Savage3D */ 139#define FB_ACCEL_SAVAGE3D 0x81 /* S3 Savage3D */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 51803528b095..2adaa2529f18 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -595,6 +595,7 @@ struct address_space_operations {
595 int (*launder_page) (struct page *); 595 int (*launder_page) (struct page *);
596 int (*is_partially_uptodate) (struct page *, read_descriptor_t *, 596 int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
597 unsigned long); 597 unsigned long);
598 int (*error_remove_page)(struct address_space *, struct page *);
598}; 599};
599 600
600/* 601/*
@@ -640,7 +641,6 @@ struct block_device {
640 struct super_block * bd_super; 641 struct super_block * bd_super;
641 int bd_openers; 642 int bd_openers;
642 struct mutex bd_mutex; /* open/close mutex */ 643 struct mutex bd_mutex; /* open/close mutex */
643 struct semaphore bd_mount_sem;
644 struct list_head bd_inodes; 644 struct list_head bd_inodes;
645 void * bd_holder; 645 void * bd_holder;
646 int bd_holders; 646 int bd_holders;
@@ -1315,7 +1315,7 @@ struct super_block {
1315 unsigned long s_blocksize; 1315 unsigned long s_blocksize;
1316 unsigned char s_blocksize_bits; 1316 unsigned char s_blocksize_bits;
1317 unsigned char s_dirt; 1317 unsigned char s_dirt;
1318 unsigned long long s_maxbytes; /* Max file size */ 1318 loff_t s_maxbytes; /* Max file size */
1319 struct file_system_type *s_type; 1319 struct file_system_type *s_type;
1320 const struct super_operations *s_op; 1320 const struct super_operations *s_op;
1321 const struct dquot_operations *dq_op; 1321 const struct dquot_operations *dq_op;
@@ -2156,6 +2156,7 @@ extern ino_t iunique(struct super_block *, ino_t);
2156extern int inode_needs_sync(struct inode *inode); 2156extern int inode_needs_sync(struct inode *inode);
2157extern void generic_delete_inode(struct inode *inode); 2157extern void generic_delete_inode(struct inode *inode);
2158extern void generic_drop_inode(struct inode *inode); 2158extern void generic_drop_inode(struct inode *inode);
2159extern int generic_detach_inode(struct inode *inode);
2159 2160
2160extern struct inode *ilookup5_nowait(struct super_block *sb, 2161extern struct inode *ilookup5_nowait(struct super_block *sb,
2161 unsigned long hashval, int (*test)(struct inode *, void *), 2162 unsigned long hashval, int (*test)(struct inode *, void *),
@@ -2334,6 +2335,7 @@ extern void get_filesystem(struct file_system_type *fs);
2334extern void put_filesystem(struct file_system_type *fs); 2335extern void put_filesystem(struct file_system_type *fs);
2335extern struct file_system_type *get_fs_type(const char *name); 2336extern struct file_system_type *get_fs_type(const char *name);
2336extern struct super_block *get_super(struct block_device *); 2337extern struct super_block *get_super(struct block_device *);
2338extern struct super_block *get_active_super(struct block_device *bdev);
2337extern struct super_block *user_get_super(dev_t); 2339extern struct super_block *user_get_super(dev_t);
2338extern void drop_super(struct super_block *sb); 2340extern void drop_super(struct super_block *sb);
2339 2341
@@ -2381,7 +2383,8 @@ extern int buffer_migrate_page(struct address_space *,
2381#define buffer_migrate_page NULL 2383#define buffer_migrate_page NULL
2382#endif 2384#endif
2383 2385
2384extern int inode_change_ok(struct inode *, struct iattr *); 2386extern int inode_change_ok(const struct inode *, struct iattr *);
2387extern int inode_newsize_ok(const struct inode *, loff_t offset);
2385extern int __must_check inode_setattr(struct inode *, struct iattr *); 2388extern int __must_check inode_setattr(struct inode *, struct iattr *);
2386 2389
2387extern void file_update_time(struct file *file); 2390extern void file_update_time(struct file *file);
@@ -2467,7 +2470,7 @@ ssize_t simple_attr_write(struct file *file, const char __user *buf,
2467 size_t len, loff_t *ppos); 2470 size_t len, loff_t *ppos);
2468 2471
2469struct ctl_table; 2472struct ctl_table;
2470int proc_nr_files(struct ctl_table *table, int write, struct file *filp, 2473int proc_nr_files(struct ctl_table *table, int write,
2471 void __user *buffer, size_t *lenp, loff_t *ppos); 2474 void __user *buffer, size_t *lenp, loff_t *ppos);
2472 2475
2473int __init get_filesystem_list(char *buf); 2476int __init get_filesystem_list(char *buf);
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 3c0924a18daf..cd3d2abaf30a 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -19,7 +19,7 @@
19extern int ftrace_enabled; 19extern int ftrace_enabled;
20extern int 20extern int
21ftrace_enable_sysctl(struct ctl_table *table, int write, 21ftrace_enable_sysctl(struct ctl_table *table, int write,
22 struct file *filp, void __user *buffer, size_t *lenp, 22 void __user *buffer, size_t *lenp,
23 loff_t *ppos); 23 loff_t *ppos);
24 24
25typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip); 25typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip);
@@ -94,7 +94,7 @@ static inline void ftrace_start(void) { }
94extern int stack_tracer_enabled; 94extern int stack_tracer_enabled;
95int 95int
96stack_trace_sysctl(struct ctl_table *table, int write, 96stack_trace_sysctl(struct ctl_table *table, int write,
97 struct file *file, void __user *buffer, size_t *lenp, 97 void __user *buffer, size_t *lenp,
98 loff_t *ppos); 98 loff_t *ppos);
99#endif 99#endif
100 100
diff --git a/include/linux/futex.h b/include/linux/futex.h
index 34956c8fdebf..8ec17997d94f 100644
--- a/include/linux/futex.h
+++ b/include/linux/futex.h
@@ -4,11 +4,6 @@
4#include <linux/compiler.h> 4#include <linux/compiler.h>
5#include <linux/types.h> 5#include <linux/types.h>
6 6
7struct inode;
8struct mm_struct;
9struct task_struct;
10union ktime;
11
12/* Second argument to futex syscall */ 7/* Second argument to futex syscall */
13 8
14 9
@@ -129,6 +124,11 @@ struct robust_list_head {
129#define FUTEX_BITSET_MATCH_ANY 0xffffffff 124#define FUTEX_BITSET_MATCH_ANY 0xffffffff
130 125
131#ifdef __KERNEL__ 126#ifdef __KERNEL__
127struct inode;
128struct mm_struct;
129struct task_struct;
130union ktime;
131
132long do_futex(u32 __user *uaddr, int op, u32 val, union ktime *timeout, 132long do_futex(u32 __user *uaddr, int op, u32 val, union ktime *timeout,
133 u32 __user *uaddr2, u32 val2, u32 val3); 133 u32 __user *uaddr2, u32 val2, u32 val3);
134 134
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 176e7ee73eff..16937995abd4 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -3,15 +3,15 @@
3 3
4#include <linux/fs.h> 4#include <linux/fs.h>
5 5
6struct ctl_table;
7struct user_struct;
8
6#ifdef CONFIG_HUGETLB_PAGE 9#ifdef CONFIG_HUGETLB_PAGE
7 10
8#include <linux/mempolicy.h> 11#include <linux/mempolicy.h>
9#include <linux/shm.h> 12#include <linux/shm.h>
10#include <asm/tlbflush.h> 13#include <asm/tlbflush.h>
11 14
12struct ctl_table;
13struct user_struct;
14
15int PageHuge(struct page *page); 15int PageHuge(struct page *page);
16 16
17static inline int is_vm_hugetlb_page(struct vm_area_struct *vma) 17static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
@@ -20,9 +20,9 @@ static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
20} 20}
21 21
22void reset_vma_resv_huge_pages(struct vm_area_struct *vma); 22void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
23int hugetlb_sysctl_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *); 23int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
24int hugetlb_overcommit_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *); 24int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
25int hugetlb_treat_movable_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *); 25int hugetlb_treat_movable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
26int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *); 26int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
27int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, 27int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
28 struct page **, struct vm_area_struct **, 28 struct page **, struct vm_area_struct **,
@@ -187,7 +187,11 @@ static inline void set_file_hugepages(struct file *file)
187 187
188#define is_file_hugepages(file) 0 188#define is_file_hugepages(file) 0
189#define set_file_hugepages(file) BUG() 189#define set_file_hugepages(file) BUG()
190#define hugetlb_file_setup(name,size,acct,user,creat) ERR_PTR(-ENOSYS) 190static inline struct file *hugetlb_file_setup(const char *name, size_t size,
191 int acctflag, struct user_struct **user, int creat_flags)
192{
193 return ERR_PTR(-ENOSYS);
194}
191 195
192#endif /* !CONFIG_HUGETLBFS */ 196#endif /* !CONFIG_HUGETLBFS */
193 197
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index e46a0734ab6e..bf9213b2db8f 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -118,6 +118,9 @@ static inline bool mem_cgroup_disabled(void)
118 118
119extern bool mem_cgroup_oom_called(struct task_struct *task); 119extern bool mem_cgroup_oom_called(struct task_struct *task);
120void mem_cgroup_update_mapped_file_stat(struct page *page, int val); 120void mem_cgroup_update_mapped_file_stat(struct page *page, int val);
121unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
122 gfp_t gfp_mask, int nid,
123 int zid);
121#else /* CONFIG_CGROUP_MEM_RES_CTLR */ 124#else /* CONFIG_CGROUP_MEM_RES_CTLR */
122struct mem_cgroup; 125struct mem_cgroup;
123 126
@@ -276,6 +279,13 @@ static inline void mem_cgroup_update_mapped_file_stat(struct page *page,
276{ 279{
277} 280}
278 281
282static inline
283unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
284 gfp_t gfp_mask, int nid, int zid)
285{
286 return 0;
287}
288
279#endif /* CONFIG_CGROUP_MEM_CONT */ 289#endif /* CONFIG_CGROUP_MEM_CONT */
280 290
281#endif /* _LINUX_MEMCONTROL_H */ 291#endif /* _LINUX_MEMCONTROL_H */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index b6eae5e3144b..24c395694f4d 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -288,7 +288,7 @@ static inline int is_vmalloc_addr(const void *x)
288#ifdef CONFIG_MMU 288#ifdef CONFIG_MMU
289extern int is_vmalloc_or_module_addr(const void *x); 289extern int is_vmalloc_or_module_addr(const void *x);
290#else 290#else
291static int is_vmalloc_or_module_addr(const void *x) 291static inline int is_vmalloc_or_module_addr(const void *x)
292{ 292{
293 return 0; 293 return 0;
294} 294}
@@ -695,11 +695,12 @@ static inline int page_mapped(struct page *page)
695#define VM_FAULT_SIGBUS 0x0002 695#define VM_FAULT_SIGBUS 0x0002
696#define VM_FAULT_MAJOR 0x0004 696#define VM_FAULT_MAJOR 0x0004
697#define VM_FAULT_WRITE 0x0008 /* Special case for get_user_pages */ 697#define VM_FAULT_WRITE 0x0008 /* Special case for get_user_pages */
698#define VM_FAULT_HWPOISON 0x0010 /* Hit poisoned page */
698 699
699#define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */ 700#define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */
700#define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */ 701#define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */
701 702
702#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS) 703#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_HWPOISON)
703 704
704/* 705/*
705 * Can be called by the pagefault handler when it gets a VM_FAULT_OOM. 706 * Can be called by the pagefault handler when it gets a VM_FAULT_OOM.
@@ -791,8 +792,14 @@ static inline void unmap_shared_mapping_range(struct address_space *mapping,
791 unmap_mapping_range(mapping, holebegin, holelen, 0); 792 unmap_mapping_range(mapping, holebegin, holelen, 0);
792} 793}
793 794
794extern int vmtruncate(struct inode * inode, loff_t offset); 795extern void truncate_pagecache(struct inode *inode, loff_t old, loff_t new);
795extern int vmtruncate_range(struct inode * inode, loff_t offset, loff_t end); 796extern int vmtruncate(struct inode *inode, loff_t offset);
797extern int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end);
798
799int truncate_inode_page(struct address_space *mapping, struct page *page);
800int generic_error_remove_page(struct address_space *mapping, struct page *page);
801
802int invalidate_inode_page(struct page *page);
796 803
797#ifdef CONFIG_MMU 804#ifdef CONFIG_MMU
798extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, 805extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
@@ -1279,7 +1286,7 @@ int in_gate_area_no_task(unsigned long addr);
1279#define in_gate_area(task, addr) ({(void)task; in_gate_area_no_task(addr);}) 1286#define in_gate_area(task, addr) ({(void)task; in_gate_area_no_task(addr);})
1280#endif /* __HAVE_ARCH_GATE_AREA */ 1287#endif /* __HAVE_ARCH_GATE_AREA */
1281 1288
1282int drop_caches_sysctl_handler(struct ctl_table *, int, struct file *, 1289int drop_caches_sysctl_handler(struct ctl_table *, int,
1283 void __user *, size_t *, loff_t *); 1290 void __user *, size_t *, loff_t *);
1284unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask, 1291unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
1285 unsigned long lru_pages); 1292 unsigned long lru_pages);
@@ -1308,5 +1315,12 @@ void vmemmap_populate_print_last(void);
1308extern int account_locked_memory(struct mm_struct *mm, struct rlimit *rlim, 1315extern int account_locked_memory(struct mm_struct *mm, struct rlimit *rlim,
1309 size_t size); 1316 size_t size);
1310extern void refund_locked_memory(struct mm_struct *mm, size_t size); 1317extern void refund_locked_memory(struct mm_struct *mm, size_t size);
1318
1319extern void memory_failure(unsigned long pfn, int trapno);
1320extern int __memory_failure(unsigned long pfn, int trapno, int ref);
1321extern int sysctl_memory_failure_early_kill;
1322extern int sysctl_memory_failure_recovery;
1323extern atomic_long_t mce_bad_pages;
1324
1311#endif /* __KERNEL__ */ 1325#endif /* __KERNEL__ */
1312#endif /* _LINUX_MM_H */ 1326#endif /* _LINUX_MM_H */
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 0042090a4d70..21d6aa45206a 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -240,6 +240,8 @@ struct mm_struct {
240 240
241 unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */ 241 unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
242 242
243 struct linux_binfmt *binfmt;
244
243 cpumask_t cpu_vm_mask; 245 cpumask_t cpu_vm_mask;
244 246
245 /* Architecture-specific MM context */ 247 /* Architecture-specific MM context */
@@ -259,11 +261,10 @@ struct mm_struct {
259 unsigned long flags; /* Must use atomic bitops to access the bits */ 261 unsigned long flags; /* Must use atomic bitops to access the bits */
260 262
261 struct core_state *core_state; /* coredumping support */ 263 struct core_state *core_state; /* coredumping support */
262 264#ifdef CONFIG_AIO
263 /* aio bits */
264 spinlock_t ioctx_lock; 265 spinlock_t ioctx_lock;
265 struct hlist_head ioctx_list; 266 struct hlist_head ioctx_list;
266 267#endif
267#ifdef CONFIG_MM_OWNER 268#ifdef CONFIG_MM_OWNER
268 /* 269 /*
269 * "owner" points to a task that is regarded as the canonical 270 * "owner" points to a task that is regarded as the canonical
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 652ef01be582..6f7561730d88 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -755,21 +755,20 @@ static inline int is_dma(struct zone *zone)
755 755
756/* These two functions are used to setup the per zone pages min values */ 756/* These two functions are used to setup the per zone pages min values */
757struct ctl_table; 757struct ctl_table;
758struct file; 758int min_free_kbytes_sysctl_handler(struct ctl_table *, int,
759int min_free_kbytes_sysctl_handler(struct ctl_table *, int, struct file *,
760 void __user *, size_t *, loff_t *); 759 void __user *, size_t *, loff_t *);
761extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1]; 760extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1];
762int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, struct file *, 761int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int,
763 void __user *, size_t *, loff_t *); 762 void __user *, size_t *, loff_t *);
764int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int, struct file *, 763int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int,
765 void __user *, size_t *, loff_t *); 764 void __user *, size_t *, loff_t *);
766int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int, 765int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int,
767 struct file *, void __user *, size_t *, loff_t *); 766 void __user *, size_t *, loff_t *);
768int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int, 767int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int,
769 struct file *, void __user *, size_t *, loff_t *); 768 void __user *, size_t *, loff_t *);
770 769
771extern int numa_zonelist_order_handler(struct ctl_table *, int, 770extern int numa_zonelist_order_handler(struct ctl_table *, int,
772 struct file *, void __user *, size_t *, loff_t *); 771 void __user *, size_t *, loff_t *);
773extern char numa_zonelist_order[]; 772extern char numa_zonelist_order[];
774#define NUMA_ZONELIST_ORDER_LEN 16 /* string buffer size */ 773#define NUMA_ZONELIST_ORDER_LEN 16 /* string buffer size */
775 774
diff --git a/include/linux/module.h b/include/linux/module.h
index 1c755b2f937d..482efc865acf 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -128,7 +128,10 @@ extern struct module __this_module;
128 */ 128 */
129#define MODULE_LICENSE(_license) MODULE_INFO(license, _license) 129#define MODULE_LICENSE(_license) MODULE_INFO(license, _license)
130 130
131/* Author, ideally of form NAME[, NAME]*[ and NAME] */ 131/*
132 * Author(s), use "Name <email>" or just "Name", for multiple
133 * authors use multiple MODULE_AUTHOR() statements/lines.
134 */
132#define MODULE_AUTHOR(_author) MODULE_INFO(author, _author) 135#define MODULE_AUTHOR(_author) MODULE_INFO(author, _author)
133 136
134/* What your module does. */ 137/* What your module does. */
@@ -308,10 +311,14 @@ struct module
308#endif 311#endif
309 312
310#ifdef CONFIG_KALLSYMS 313#ifdef CONFIG_KALLSYMS
311 /* We keep the symbol and string tables for kallsyms. */ 314 /*
312 Elf_Sym *symtab; 315 * We keep the symbol and string tables for kallsyms.
313 unsigned int num_symtab; 316 * The core_* fields below are temporary, loader-only (they
314 char *strtab; 317 * could really be discarded after module init).
318 */
319 Elf_Sym *symtab, *core_symtab;
320 unsigned int num_symtab, core_num_syms;
321 char *strtab, *core_strtab;
315 322
316 /* Section attributes */ 323 /* Section attributes */
317 struct module_sect_attrs *sect_attrs; 324 struct module_sect_attrs *sect_attrs;
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 13de789f0a5c..6b202b173955 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -51,6 +51,9 @@
51 * PG_buddy is set to indicate that the page is free and in the buddy system 51 * PG_buddy is set to indicate that the page is free and in the buddy system
52 * (see mm/page_alloc.c). 52 * (see mm/page_alloc.c).
53 * 53 *
54 * PG_hwpoison indicates that a page got corrupted in hardware and contains
55 * data with incorrect ECC bits that triggered a machine check. Accessing is
56 * not safe since it may cause another machine check. Don't touch!
54 */ 57 */
55 58
56/* 59/*
@@ -102,6 +105,9 @@ enum pageflags {
102#ifdef CONFIG_ARCH_USES_PG_UNCACHED 105#ifdef CONFIG_ARCH_USES_PG_UNCACHED
103 PG_uncached, /* Page has been mapped as uncached */ 106 PG_uncached, /* Page has been mapped as uncached */
104#endif 107#endif
108#ifdef CONFIG_MEMORY_FAILURE
109 PG_hwpoison, /* hardware poisoned page. Don't touch */
110#endif
105 __NR_PAGEFLAGS, 111 __NR_PAGEFLAGS,
106 112
107 /* Filesystems */ 113 /* Filesystems */
@@ -269,6 +275,15 @@ PAGEFLAG(Uncached, uncached)
269PAGEFLAG_FALSE(Uncached) 275PAGEFLAG_FALSE(Uncached)
270#endif 276#endif
271 277
278#ifdef CONFIG_MEMORY_FAILURE
279PAGEFLAG(HWPoison, hwpoison)
280TESTSETFLAG(HWPoison, hwpoison)
281#define __PG_HWPOISON (1UL << PG_hwpoison)
282#else
283PAGEFLAG_FALSE(HWPoison)
284#define __PG_HWPOISON 0
285#endif
286
272static inline int PageUptodate(struct page *page) 287static inline int PageUptodate(struct page *page)
273{ 288{
274 int ret = test_bit(PG_uptodate, &(page)->flags); 289 int ret = test_bit(PG_uptodate, &(page)->flags);
@@ -393,7 +408,7 @@ static inline void __ClearPageTail(struct page *page)
393 1 << PG_private | 1 << PG_private_2 | \ 408 1 << PG_private | 1 << PG_private_2 | \
394 1 << PG_buddy | 1 << PG_writeback | 1 << PG_reserved | \ 409 1 << PG_buddy | 1 << PG_writeback | 1 << PG_reserved | \
395 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \ 410 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \
396 1 << PG_unevictable | __PG_MLOCKED) 411 1 << PG_unevictable | __PG_MLOCKED | __PG_HWPOISON)
397 412
398/* 413/*
399 * Flags checked when a page is prepped for return by the page allocator. 414 * Flags checked when a page is prepped for return by the page allocator.
diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h
index ada779f24178..4b938d4f3ac2 100644
--- a/include/linux/page_cgroup.h
+++ b/include/linux/page_cgroup.h
@@ -38,6 +38,7 @@ enum {
38 PCG_LOCK, /* page cgroup is locked */ 38 PCG_LOCK, /* page cgroup is locked */
39 PCG_CACHE, /* charged as cache */ 39 PCG_CACHE, /* charged as cache */
40 PCG_USED, /* this object is in use. */ 40 PCG_USED, /* this object is in use. */
41 PCG_ACCT_LRU, /* page has been accounted for */
41}; 42};
42 43
43#define TESTPCGFLAG(uname, lname) \ 44#define TESTPCGFLAG(uname, lname) \
@@ -52,11 +53,23 @@ static inline void SetPageCgroup##uname(struct page_cgroup *pc)\
52static inline void ClearPageCgroup##uname(struct page_cgroup *pc) \ 53static inline void ClearPageCgroup##uname(struct page_cgroup *pc) \
53 { clear_bit(PCG_##lname, &pc->flags); } 54 { clear_bit(PCG_##lname, &pc->flags); }
54 55
56#define TESTCLEARPCGFLAG(uname, lname) \
57static inline int TestClearPageCgroup##uname(struct page_cgroup *pc) \
58 { return test_and_clear_bit(PCG_##lname, &pc->flags); }
59
55/* Cache flag is set only once (at allocation) */ 60/* Cache flag is set only once (at allocation) */
56TESTPCGFLAG(Cache, CACHE) 61TESTPCGFLAG(Cache, CACHE)
62CLEARPCGFLAG(Cache, CACHE)
63SETPCGFLAG(Cache, CACHE)
57 64
58TESTPCGFLAG(Used, USED) 65TESTPCGFLAG(Used, USED)
59CLEARPCGFLAG(Used, USED) 66CLEARPCGFLAG(Used, USED)
67SETPCGFLAG(Used, USED)
68
69SETPCGFLAG(AcctLRU, ACCT_LRU)
70CLEARPCGFLAG(AcctLRU, ACCT_LRU)
71TESTPCGFLAG(AcctLRU, ACCT_LRU)
72TESTCLEARPCGFLAG(AcctLRU, ACCT_LRU)
60 73
61static inline int page_cgroup_nid(struct page_cgroup *pc) 74static inline int page_cgroup_nid(struct page_cgroup *pc)
62{ 75{
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 7803565aa877..da1fda8623e0 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2527,6 +2527,16 @@
2527#define PCI_DEVICE_ID_INTEL_E7525_MCH 0x359e 2527#define PCI_DEVICE_ID_INTEL_E7525_MCH 0x359e
2528#define PCI_DEVICE_ID_INTEL_IOAT_CNB 0x360b 2528#define PCI_DEVICE_ID_INTEL_IOAT_CNB 0x360b
2529#define PCI_DEVICE_ID_INTEL_FBD_CNB 0x360c 2529#define PCI_DEVICE_ID_INTEL_FBD_CNB 0x360c
2530#define PCI_DEVICE_ID_INTEL_IOAT_JSF0 0x3710
2531#define PCI_DEVICE_ID_INTEL_IOAT_JSF1 0x3711
2532#define PCI_DEVICE_ID_INTEL_IOAT_JSF2 0x3712
2533#define PCI_DEVICE_ID_INTEL_IOAT_JSF3 0x3713
2534#define PCI_DEVICE_ID_INTEL_IOAT_JSF4 0x3714
2535#define PCI_DEVICE_ID_INTEL_IOAT_JSF5 0x3715
2536#define PCI_DEVICE_ID_INTEL_IOAT_JSF6 0x3716
2537#define PCI_DEVICE_ID_INTEL_IOAT_JSF7 0x3717
2538#define PCI_DEVICE_ID_INTEL_IOAT_JSF8 0x3718
2539#define PCI_DEVICE_ID_INTEL_IOAT_JSF9 0x3719
2530#define PCI_DEVICE_ID_INTEL_ICH10_0 0x3a14 2540#define PCI_DEVICE_ID_INTEL_ICH10_0 0x3a14
2531#define PCI_DEVICE_ID_INTEL_ICH10_1 0x3a16 2541#define PCI_DEVICE_ID_INTEL_ICH10_1 0x3a16
2532#define PCI_DEVICE_ID_INTEL_ICH10_2 0x3a18 2542#define PCI_DEVICE_ID_INTEL_ICH10_2 0x3a18
diff --git a/include/linux/prctl.h b/include/linux/prctl.h
index 07bff666e65b..931150566ade 100644
--- a/include/linux/prctl.h
+++ b/include/linux/prctl.h
@@ -88,4 +88,6 @@
88#define PR_TASK_PERF_EVENTS_DISABLE 31 88#define PR_TASK_PERF_EVENTS_DISABLE 31
89#define PR_TASK_PERF_EVENTS_ENABLE 32 89#define PR_TASK_PERF_EVENTS_ENABLE 32
90 90
91#define PR_MCE_KILL 33
92
91#endif /* _LINUX_PRCTL_H */ 93#endif /* _LINUX_PRCTL_H */
diff --git a/include/linux/relay.h b/include/linux/relay.h
index 953fc055e875..14a86bc7102b 100644
--- a/include/linux/relay.h
+++ b/include/linux/relay.h
@@ -140,7 +140,7 @@ struct rchan_callbacks
140 * cause relay_open() to create a single global buffer rather 140 * cause relay_open() to create a single global buffer rather
141 * than the default set of per-cpu buffers. 141 * than the default set of per-cpu buffers.
142 * 142 *
143 * See Documentation/filesystems/relayfs.txt for more info. 143 * See Documentation/filesystems/relay.txt for more info.
144 */ 144 */
145 struct dentry *(*create_buf_file)(const char *filename, 145 struct dentry *(*create_buf_file)(const char *filename,
146 struct dentry *parent, 146 struct dentry *parent,
diff --git a/include/linux/res_counter.h b/include/linux/res_counter.h
index 511f42fc6816..731af71cddc9 100644
--- a/include/linux/res_counter.h
+++ b/include/linux/res_counter.h
@@ -35,6 +35,10 @@ struct res_counter {
35 */ 35 */
36 unsigned long long limit; 36 unsigned long long limit;
37 /* 37 /*
38 * the limit that usage can be exceed
39 */
40 unsigned long long soft_limit;
41 /*
38 * the number of unsuccessful attempts to consume the resource 42 * the number of unsuccessful attempts to consume the resource
39 */ 43 */
40 unsigned long long failcnt; 44 unsigned long long failcnt;
@@ -87,6 +91,7 @@ enum {
87 RES_MAX_USAGE, 91 RES_MAX_USAGE,
88 RES_LIMIT, 92 RES_LIMIT,
89 RES_FAILCNT, 93 RES_FAILCNT,
94 RES_SOFT_LIMIT,
90}; 95};
91 96
92/* 97/*
@@ -109,7 +114,8 @@ void res_counter_init(struct res_counter *counter, struct res_counter *parent);
109int __must_check res_counter_charge_locked(struct res_counter *counter, 114int __must_check res_counter_charge_locked(struct res_counter *counter,
110 unsigned long val); 115 unsigned long val);
111int __must_check res_counter_charge(struct res_counter *counter, 116int __must_check res_counter_charge(struct res_counter *counter,
112 unsigned long val, struct res_counter **limit_fail_at); 117 unsigned long val, struct res_counter **limit_fail_at,
118 struct res_counter **soft_limit_at);
113 119
114/* 120/*
115 * uncharge - tell that some portion of the resource is released 121 * uncharge - tell that some portion of the resource is released
@@ -122,7 +128,8 @@ int __must_check res_counter_charge(struct res_counter *counter,
122 */ 128 */
123 129
124void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val); 130void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val);
125void res_counter_uncharge(struct res_counter *counter, unsigned long val); 131void res_counter_uncharge(struct res_counter *counter, unsigned long val,
132 bool *was_soft_limit_excess);
126 133
127static inline bool res_counter_limit_check_locked(struct res_counter *cnt) 134static inline bool res_counter_limit_check_locked(struct res_counter *cnt)
128{ 135{
@@ -132,6 +139,36 @@ static inline bool res_counter_limit_check_locked(struct res_counter *cnt)
132 return false; 139 return false;
133} 140}
134 141
142static inline bool res_counter_soft_limit_check_locked(struct res_counter *cnt)
143{
144 if (cnt->usage < cnt->soft_limit)
145 return true;
146
147 return false;
148}
149
150/**
151 * Get the difference between the usage and the soft limit
152 * @cnt: The counter
153 *
154 * Returns 0 if usage is less than or equal to soft limit
155 * The difference between usage and soft limit, otherwise.
156 */
157static inline unsigned long long
158res_counter_soft_limit_excess(struct res_counter *cnt)
159{
160 unsigned long long excess;
161 unsigned long flags;
162
163 spin_lock_irqsave(&cnt->lock, flags);
164 if (cnt->usage <= cnt->soft_limit)
165 excess = 0;
166 else
167 excess = cnt->usage - cnt->soft_limit;
168 spin_unlock_irqrestore(&cnt->lock, flags);
169 return excess;
170}
171
135/* 172/*
136 * Helper function to detect if the cgroup is within it's limit or 173 * Helper function to detect if the cgroup is within it's limit or
137 * not. It's currently called from cgroup_rss_prepare() 174 * not. It's currently called from cgroup_rss_prepare()
@@ -147,6 +184,17 @@ static inline bool res_counter_check_under_limit(struct res_counter *cnt)
147 return ret; 184 return ret;
148} 185}
149 186
187static inline bool res_counter_check_under_soft_limit(struct res_counter *cnt)
188{
189 bool ret;
190 unsigned long flags;
191
192 spin_lock_irqsave(&cnt->lock, flags);
193 ret = res_counter_soft_limit_check_locked(cnt);
194 spin_unlock_irqrestore(&cnt->lock, flags);
195 return ret;
196}
197
150static inline void res_counter_reset_max(struct res_counter *cnt) 198static inline void res_counter_reset_max(struct res_counter *cnt)
151{ 199{
152 unsigned long flags; 200 unsigned long flags;
@@ -180,4 +228,16 @@ static inline int res_counter_set_limit(struct res_counter *cnt,
180 return ret; 228 return ret;
181} 229}
182 230
231static inline int
232res_counter_set_soft_limit(struct res_counter *cnt,
233 unsigned long long soft_limit)
234{
235 unsigned long flags;
236
237 spin_lock_irqsave(&cnt->lock, flags);
238 cnt->soft_limit = soft_limit;
239 spin_unlock_irqrestore(&cnt->lock, flags);
240 return 0;
241}
242
183#endif 243#endif
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 477841d29fce..cb0ba7032609 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -81,7 +81,19 @@ static inline void page_dup_rmap(struct page *page)
81 */ 81 */
82int page_referenced(struct page *, int is_locked, 82int page_referenced(struct page *, int is_locked,
83 struct mem_cgroup *cnt, unsigned long *vm_flags); 83 struct mem_cgroup *cnt, unsigned long *vm_flags);
84int try_to_unmap(struct page *, int ignore_refs); 84enum ttu_flags {
85 TTU_UNMAP = 0, /* unmap mode */
86 TTU_MIGRATION = 1, /* migration mode */
87 TTU_MUNLOCK = 2, /* munlock mode */
88 TTU_ACTION_MASK = 0xff,
89
90 TTU_IGNORE_MLOCK = (1 << 8), /* ignore mlock */
91 TTU_IGNORE_ACCESS = (1 << 9), /* don't age */
92 TTU_IGNORE_HWPOISON = (1 << 10),/* corrupted page is recoverable */
93};
94#define TTU_ACTION(x) ((x) & TTU_ACTION_MASK)
95
96int try_to_unmap(struct page *, enum ttu_flags flags);
85 97
86/* 98/*
87 * Called from mm/filemap_xip.c to unmap empty zero page 99 * Called from mm/filemap_xip.c to unmap empty zero page
@@ -108,6 +120,13 @@ int page_mkclean(struct page *);
108 */ 120 */
109int try_to_munlock(struct page *); 121int try_to_munlock(struct page *);
110 122
123/*
124 * Called by memory-failure.c to kill processes.
125 */
126struct anon_vma *page_lock_anon_vma(struct page *page);
127void page_unlock_anon_vma(struct anon_vma *anon_vma);
128int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
129
111#else /* !CONFIG_MMU */ 130#else /* !CONFIG_MMU */
112 131
113#define anon_vma_init() do {} while (0) 132#define anon_vma_init() do {} while (0)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 848d1f20086e..75e6e60bf583 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -309,7 +309,7 @@ extern void softlockup_tick(void);
309extern void touch_softlockup_watchdog(void); 309extern void touch_softlockup_watchdog(void);
310extern void touch_all_softlockup_watchdogs(void); 310extern void touch_all_softlockup_watchdogs(void);
311extern int proc_dosoftlockup_thresh(struct ctl_table *table, int write, 311extern int proc_dosoftlockup_thresh(struct ctl_table *table, int write,
312 struct file *filp, void __user *buffer, 312 void __user *buffer,
313 size_t *lenp, loff_t *ppos); 313 size_t *lenp, loff_t *ppos);
314extern unsigned int softlockup_panic; 314extern unsigned int softlockup_panic;
315extern int softlockup_thresh; 315extern int softlockup_thresh;
@@ -331,7 +331,7 @@ extern unsigned long sysctl_hung_task_check_count;
331extern unsigned long sysctl_hung_task_timeout_secs; 331extern unsigned long sysctl_hung_task_timeout_secs;
332extern unsigned long sysctl_hung_task_warnings; 332extern unsigned long sysctl_hung_task_warnings;
333extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write, 333extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
334 struct file *filp, void __user *buffer, 334 void __user *buffer,
335 size_t *lenp, loff_t *ppos); 335 size_t *lenp, loff_t *ppos);
336#endif 336#endif
337 337
@@ -1271,7 +1271,6 @@ struct task_struct {
1271 struct mm_struct *mm, *active_mm; 1271 struct mm_struct *mm, *active_mm;
1272 1272
1273/* task state */ 1273/* task state */
1274 struct linux_binfmt *binfmt;
1275 int exit_state; 1274 int exit_state;
1276 int exit_code, exit_signal; 1275 int exit_code, exit_signal;
1277 int pdeath_signal; /* The signal sent when the parent dies */ 1276 int pdeath_signal; /* The signal sent when the parent dies */
@@ -1735,6 +1734,7 @@ extern cputime_t task_gtime(struct task_struct *p);
1735#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ 1734#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
1736#define PF_VCPU 0x00000010 /* I'm a virtual CPU */ 1735#define PF_VCPU 0x00000010 /* I'm a virtual CPU */
1737#define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */ 1736#define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */
1737#define PF_MCE_PROCESS 0x00000080 /* process policy on mce errors */
1738#define PF_SUPERPRIV 0x00000100 /* used super-user privileges */ 1738#define PF_SUPERPRIV 0x00000100 /* used super-user privileges */
1739#define PF_DUMPCORE 0x00000200 /* dumped core */ 1739#define PF_DUMPCORE 0x00000200 /* dumped core */
1740#define PF_SIGNALED 0x00000400 /* killed by a signal */ 1740#define PF_SIGNALED 0x00000400 /* killed by a signal */
@@ -1754,6 +1754,7 @@ extern cputime_t task_gtime(struct task_struct *p);
1754#define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */ 1754#define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */
1755#define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */ 1755#define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */
1756#define PF_THREAD_BOUND 0x04000000 /* Thread bound to specific cpu */ 1756#define PF_THREAD_BOUND 0x04000000 /* Thread bound to specific cpu */
1757#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
1757#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ 1758#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */
1758#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ 1759#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
1759#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezeable */ 1760#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezeable */
@@ -1906,7 +1907,7 @@ extern unsigned int sysctl_sched_time_avg;
1906extern unsigned int sysctl_timer_migration; 1907extern unsigned int sysctl_timer_migration;
1907 1908
1908int sched_nr_latency_handler(struct ctl_table *table, int write, 1909int sched_nr_latency_handler(struct ctl_table *table, int write,
1909 struct file *file, void __user *buffer, size_t *length, 1910 void __user *buffer, size_t *length,
1910 loff_t *ppos); 1911 loff_t *ppos);
1911#endif 1912#endif
1912#ifdef CONFIG_SCHED_DEBUG 1913#ifdef CONFIG_SCHED_DEBUG
@@ -1924,7 +1925,7 @@ extern unsigned int sysctl_sched_rt_period;
1924extern int sysctl_sched_rt_runtime; 1925extern int sysctl_sched_rt_runtime;
1925 1926
1926int sched_rt_handler(struct ctl_table *table, int write, 1927int sched_rt_handler(struct ctl_table *table, int write,
1927 struct file *filp, void __user *buffer, size_t *lenp, 1928 void __user *buffer, size_t *lenp,
1928 loff_t *ppos); 1929 loff_t *ppos);
1929 1930
1930extern unsigned int sysctl_sched_compat_yield; 1931extern unsigned int sysctl_sched_compat_yield;
@@ -2059,6 +2060,7 @@ extern int kill_pgrp(struct pid *pid, int sig, int priv);
2059extern int kill_pid(struct pid *pid, int sig, int priv); 2060extern int kill_pid(struct pid *pid, int sig, int priv);
2060extern int kill_proc_info(int, struct siginfo *, pid_t); 2061extern int kill_proc_info(int, struct siginfo *, pid_t);
2061extern int do_notify_parent(struct task_struct *, int); 2062extern int do_notify_parent(struct task_struct *, int);
2063extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
2062extern void force_sig(int, struct task_struct *); 2064extern void force_sig(int, struct task_struct *);
2063extern void force_sig_specific(int, struct task_struct *); 2065extern void force_sig_specific(int, struct task_struct *);
2064extern int send_sig(int, struct task_struct *, int); 2066extern int send_sig(int, struct task_struct *, int);
@@ -2336,7 +2338,10 @@ static inline int signal_pending(struct task_struct *p)
2336 return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING)); 2338 return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
2337} 2339}
2338 2340
2339extern int __fatal_signal_pending(struct task_struct *p); 2341static inline int __fatal_signal_pending(struct task_struct *p)
2342{
2343 return unlikely(sigismember(&p->pending.signal, SIGKILL));
2344}
2340 2345
2341static inline int fatal_signal_pending(struct task_struct *p) 2346static inline int fatal_signal_pending(struct task_struct *p)
2342{ 2347{
diff --git a/include/linux/security.h b/include/linux/security.h
index d050b66ab9ef..239e40d0450b 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -133,7 +133,7 @@ static inline unsigned long round_hint_to_min(unsigned long hint)
133 return PAGE_ALIGN(mmap_min_addr); 133 return PAGE_ALIGN(mmap_min_addr);
134 return hint; 134 return hint;
135} 135}
136extern int mmap_min_addr_handler(struct ctl_table *table, int write, struct file *filp, 136extern int mmap_min_addr_handler(struct ctl_table *table, int write,
137 void __user *buffer, size_t *lenp, loff_t *ppos); 137 void __user *buffer, size_t *lenp, loff_t *ppos);
138 138
139#ifdef CONFIG_SECURITY 139#ifdef CONFIG_SECURITY
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index 0c6a86b79596..8366d8f12e53 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -35,6 +35,44 @@ struct seq_operations {
35 35
36#define SEQ_SKIP 1 36#define SEQ_SKIP 1
37 37
38/**
39 * seq_get_buf - get buffer to write arbitrary data to
40 * @m: the seq_file handle
41 * @bufp: the beginning of the buffer is stored here
42 *
43 * Return the number of bytes available in the buffer, or zero if
44 * there's no space.
45 */
46static inline size_t seq_get_buf(struct seq_file *m, char **bufp)
47{
48 BUG_ON(m->count > m->size);
49 if (m->count < m->size)
50 *bufp = m->buf + m->count;
51 else
52 *bufp = NULL;
53
54 return m->size - m->count;
55}
56
57/**
58 * seq_commit - commit data to the buffer
59 * @m: the seq_file handle
60 * @num: the number of bytes to commit
61 *
62 * Commit @num bytes of data written to a buffer previously acquired
63 * by seq_buf_get. To signal an error condition, or that the data
64 * didn't fit in the available space, pass a negative @num value.
65 */
66static inline void seq_commit(struct seq_file *m, int num)
67{
68 if (num < 0) {
69 m->count = m->size;
70 } else {
71 BUG_ON(m->count + num > m->size);
72 m->count += num;
73 }
74}
75
38char *mangle_path(char *s, char *p, char *esc); 76char *mangle_path(char *s, char *p, char *esc);
39int seq_open(struct file *, const struct seq_operations *); 77int seq_open(struct file *, const struct seq_operations *);
40ssize_t seq_read(struct file *, char __user *, size_t, loff_t *); 78ssize_t seq_read(struct file *, char __user *, size_t, loff_t *);
diff --git a/include/linux/signal.h b/include/linux/signal.h
index c7552836bd95..ab9272cc270c 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -233,6 +233,8 @@ static inline int valid_signal(unsigned long sig)
233} 233}
234 234
235extern int next_signal(struct sigpending *pending, sigset_t *mask); 235extern int next_signal(struct sigpending *pending, sigset_t *mask);
236extern int do_send_sig_info(int sig, struct siginfo *info,
237 struct task_struct *p, bool group);
236extern int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p); 238extern int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p);
237extern int __group_send_sig_info(int, struct siginfo *, struct task_struct *); 239extern int __group_send_sig_info(int, struct siginfo *, struct task_struct *);
238extern long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, 240extern long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig,
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 6c990e658f4e..4ec90019c1a4 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -34,16 +34,38 @@ static inline int current_is_kswapd(void)
34 * the type/offset into the pte as 5/27 as well. 34 * the type/offset into the pte as 5/27 as well.
35 */ 35 */
36#define MAX_SWAPFILES_SHIFT 5 36#define MAX_SWAPFILES_SHIFT 5
37#ifndef CONFIG_MIGRATION 37
38#define MAX_SWAPFILES (1 << MAX_SWAPFILES_SHIFT) 38/*
39 * Use some of the swap files numbers for other purposes. This
40 * is a convenient way to hook into the VM to trigger special
41 * actions on faults.
42 */
43
44/*
45 * NUMA node memory migration support
46 */
47#ifdef CONFIG_MIGRATION
48#define SWP_MIGRATION_NUM 2
49#define SWP_MIGRATION_READ (MAX_SWAPFILES + SWP_HWPOISON_NUM)
50#define SWP_MIGRATION_WRITE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 1)
39#else 51#else
40/* Use last two entries for page migration swap entries */ 52#define SWP_MIGRATION_NUM 0
41#define MAX_SWAPFILES ((1 << MAX_SWAPFILES_SHIFT)-2)
42#define SWP_MIGRATION_READ MAX_SWAPFILES
43#define SWP_MIGRATION_WRITE (MAX_SWAPFILES + 1)
44#endif 53#endif
45 54
46/* 55/*
56 * Handling of hardware poisoned pages with memory corruption.
57 */
58#ifdef CONFIG_MEMORY_FAILURE
59#define SWP_HWPOISON_NUM 1
60#define SWP_HWPOISON MAX_SWAPFILES
61#else
62#define SWP_HWPOISON_NUM 0
63#endif
64
65#define MAX_SWAPFILES \
66 ((1 << MAX_SWAPFILES_SHIFT) - SWP_MIGRATION_NUM - SWP_HWPOISON_NUM)
67
68/*
47 * Magic header for a swap area. The first part of the union is 69 * Magic header for a swap area. The first part of the union is
48 * what the swap magic looks like for the old (limited to 128MB) 70 * what the swap magic looks like for the old (limited to 128MB)
49 * swap area format, the second part of the union adds - in the 71 * swap area format, the second part of the union adds - in the
@@ -217,6 +239,11 @@ extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
217extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem, 239extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem,
218 gfp_t gfp_mask, bool noswap, 240 gfp_t gfp_mask, bool noswap,
219 unsigned int swappiness); 241 unsigned int swappiness);
242extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
243 gfp_t gfp_mask, bool noswap,
244 unsigned int swappiness,
245 struct zone *zone,
246 int nid);
220extern int __isolate_lru_page(struct page *page, int mode, int file); 247extern int __isolate_lru_page(struct page *page, int mode, int file);
221extern unsigned long shrink_all_memory(unsigned long nr_pages); 248extern unsigned long shrink_all_memory(unsigned long nr_pages);
222extern int vm_swappiness; 249extern int vm_swappiness;
@@ -240,7 +267,7 @@ extern int page_evictable(struct page *page, struct vm_area_struct *vma);
240extern void scan_mapping_unevictable_pages(struct address_space *); 267extern void scan_mapping_unevictable_pages(struct address_space *);
241 268
242extern unsigned long scan_unevictable_pages; 269extern unsigned long scan_unevictable_pages;
243extern int scan_unevictable_handler(struct ctl_table *, int, struct file *, 270extern int scan_unevictable_handler(struct ctl_table *, int,
244 void __user *, size_t *, loff_t *); 271 void __user *, size_t *, loff_t *);
245extern int scan_unevictable_register_node(struct node *node); 272extern int scan_unevictable_register_node(struct node *node);
246extern void scan_unevictable_unregister_node(struct node *node); 273extern void scan_unevictable_unregister_node(struct node *node);
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index 6ec39ab27b4b..cd42e30b7c6e 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -131,3 +131,41 @@ static inline int is_write_migration_entry(swp_entry_t entry)
131 131
132#endif 132#endif
133 133
134#ifdef CONFIG_MEMORY_FAILURE
135/*
136 * Support for hardware poisoned pages
137 */
138static inline swp_entry_t make_hwpoison_entry(struct page *page)
139{
140 BUG_ON(!PageLocked(page));
141 return swp_entry(SWP_HWPOISON, page_to_pfn(page));
142}
143
144static inline int is_hwpoison_entry(swp_entry_t entry)
145{
146 return swp_type(entry) == SWP_HWPOISON;
147}
148#else
149
150static inline swp_entry_t make_hwpoison_entry(struct page *page)
151{
152 return swp_entry(0, 0);
153}
154
155static inline int is_hwpoison_entry(swp_entry_t swp)
156{
157 return 0;
158}
159#endif
160
161#if defined(CONFIG_MEMORY_FAILURE) || defined(CONFIG_MIGRATION)
162static inline int non_swap_entry(swp_entry_t entry)
163{
164 return swp_type(entry) >= MAX_SWAPFILES;
165}
166#else
167static inline int non_swap_entry(swp_entry_t entry)
168{
169 return 0;
170}
171#endif
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index e76d3b22a466..1e4743ee6831 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -29,7 +29,6 @@
29#include <linux/types.h> 29#include <linux/types.h>
30#include <linux/compiler.h> 30#include <linux/compiler.h>
31 31
32struct file;
33struct completion; 32struct completion;
34 33
35#define CTL_MAXNAME 10 /* how many path components do we allow in a 34#define CTL_MAXNAME 10 /* how many path components do we allow in a
@@ -977,25 +976,25 @@ typedef int ctl_handler (struct ctl_table *table,
977 void __user *oldval, size_t __user *oldlenp, 976 void __user *oldval, size_t __user *oldlenp,
978 void __user *newval, size_t newlen); 977 void __user *newval, size_t newlen);
979 978
980typedef int proc_handler (struct ctl_table *ctl, int write, struct file * filp, 979typedef int proc_handler (struct ctl_table *ctl, int write,
981 void __user *buffer, size_t *lenp, loff_t *ppos); 980 void __user *buffer, size_t *lenp, loff_t *ppos);
982 981
983extern int proc_dostring(struct ctl_table *, int, struct file *, 982extern int proc_dostring(struct ctl_table *, int,
984 void __user *, size_t *, loff_t *); 983 void __user *, size_t *, loff_t *);
985extern int proc_dointvec(struct ctl_table *, int, struct file *, 984extern int proc_dointvec(struct ctl_table *, int,
986 void __user *, size_t *, loff_t *); 985 void __user *, size_t *, loff_t *);
987extern int proc_dointvec_minmax(struct ctl_table *, int, struct file *, 986extern int proc_dointvec_minmax(struct ctl_table *, int,
988 void __user *, size_t *, loff_t *); 987 void __user *, size_t *, loff_t *);
989extern int proc_dointvec_jiffies(struct ctl_table *, int, struct file *, 988extern int proc_dointvec_jiffies(struct ctl_table *, int,
990 void __user *, size_t *, loff_t *); 989 void __user *, size_t *, loff_t *);
991extern int proc_dointvec_userhz_jiffies(struct ctl_table *, int, struct file *, 990extern int proc_dointvec_userhz_jiffies(struct ctl_table *, int,
992 void __user *, size_t *, loff_t *); 991 void __user *, size_t *, loff_t *);
993extern int proc_dointvec_ms_jiffies(struct ctl_table *, int, struct file *, 992extern int proc_dointvec_ms_jiffies(struct ctl_table *, int,
994 void __user *, size_t *, loff_t *); 993 void __user *, size_t *, loff_t *);
995extern int proc_doulongvec_minmax(struct ctl_table *, int, struct file *, 994extern int proc_doulongvec_minmax(struct ctl_table *, int,
996 void __user *, size_t *, loff_t *); 995 void __user *, size_t *, loff_t *);
997extern int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int, 996extern int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int,
998 struct file *, void __user *, size_t *, loff_t *); 997 void __user *, size_t *, loff_t *);
999 998
1000extern int do_sysctl (int __user *name, int nlen, 999extern int do_sysctl (int __user *name, int nlen,
1001 void __user *oldval, size_t __user *oldlenp, 1000 void __user *oldval, size_t __user *oldlenp,
diff --git a/include/linux/time.h b/include/linux/time.h
index 56787c093345..fe04e5ef6a59 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -155,6 +155,34 @@ extern void timekeeping_leap_insert(int leapsecond);
155struct tms; 155struct tms;
156extern void do_sys_times(struct tms *); 156extern void do_sys_times(struct tms *);
157 157
158/*
159 * Similar to the struct tm in userspace <time.h>, but it needs to be here so
160 * that the kernel source is self contained.
161 */
162struct tm {
163 /*
164 * the number of seconds after the minute, normally in the range
165 * 0 to 59, but can be up to 60 to allow for leap seconds
166 */
167 int tm_sec;
168 /* the number of minutes after the hour, in the range 0 to 59*/
169 int tm_min;
170 /* the number of hours past midnight, in the range 0 to 23 */
171 int tm_hour;
172 /* the day of the month, in the range 1 to 31 */
173 int tm_mday;
174 /* the number of months since January, in the range 0 to 11 */
175 int tm_mon;
176 /* the number of years since 1900 */
177 long tm_year;
178 /* the number of days since Sunday, in the range 0 to 6 */
179 int tm_wday;
180 /* the number of days since January 1, in the range 0 to 365 */
181 int tm_yday;
182};
183
184void time_to_tm(time_t totalsecs, int offset, struct tm *result);
185
158/** 186/**
159 * timespec_to_ns - Convert timespec to nanoseconds 187 * timespec_to_ns - Convert timespec to nanoseconds
160 * @ts: pointer to the timespec variable to be converted 188 * @ts: pointer to the timespec variable to be converted
diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
index 17ba82efa483..1eb44a924e56 100644
--- a/include/linux/tracehook.h
+++ b/include/linux/tracehook.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * Tracing hooks 2 * Tracing hooks
3 * 3 *
4 * Copyright (C) 2008 Red Hat, Inc. All rights reserved. 4 * Copyright (C) 2008-2009 Red Hat, Inc. All rights reserved.
5 * 5 *
6 * This copyrighted material is made available to anyone wishing to use, 6 * This copyrighted material is made available to anyone wishing to use,
7 * modify, copy, or redistribute it subject to the terms and conditions 7 * modify, copy, or redistribute it subject to the terms and conditions
@@ -463,22 +463,38 @@ static inline int tracehook_get_signal(struct task_struct *task,
463 463
464/** 464/**
465 * tracehook_notify_jctl - report about job control stop/continue 465 * tracehook_notify_jctl - report about job control stop/continue
466 * @notify: nonzero if this is the last thread in the group to stop 466 * @notify: zero, %CLD_STOPPED or %CLD_CONTINUED
467 * @why: %CLD_STOPPED or %CLD_CONTINUED 467 * @why: %CLD_STOPPED or %CLD_CONTINUED
468 * 468 *
469 * This is called when we might call do_notify_parent_cldstop(). 469 * This is called when we might call do_notify_parent_cldstop().
470 * It's called when about to stop for job control; we are already in
471 * %TASK_STOPPED state, about to call schedule(). It's also called when
472 * a delayed %CLD_STOPPED or %CLD_CONTINUED report is ready to be made.
473 * 470 *
474 * Return nonzero to generate a %SIGCHLD with @why, which is 471 * @notify is zero if we would not ordinarily send a %SIGCHLD,
475 * normal if @notify is nonzero. 472 * or is the %CLD_STOPPED or %CLD_CONTINUED .si_code for %SIGCHLD.
476 * 473 *
477 * Called with no locks held. 474 * @why is %CLD_STOPPED when about to stop for job control;
475 * we are already in %TASK_STOPPED state, about to call schedule().
476 * It might also be that we have just exited (check %PF_EXITING),
477 * but need to report that a group-wide stop is complete.
478 *
479 * @why is %CLD_CONTINUED when waking up after job control stop and
480 * ready to make a delayed @notify report.
481 *
482 * Return the %CLD_* value for %SIGCHLD, or zero to generate no signal.
483 *
484 * Called with the siglock held.
478 */ 485 */
479static inline int tracehook_notify_jctl(int notify, int why) 486static inline int tracehook_notify_jctl(int notify, int why)
480{ 487{
481 return notify || (current->ptrace & PT_PTRACED); 488 return notify ?: (current->ptrace & PT_PTRACED) ? why : 0;
489}
490
491/**
492 * tracehook_finish_jctl - report about return from job control stop
493 *
494 * This is called by do_signal_stop() after wakeup.
495 */
496static inline void tracehook_finish_jctl(void)
497{
482} 498}
483 499
484#define DEATH_REAP -1 500#define DEATH_REAP -1
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index 63a3f7a80580..660a9de96f81 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -4,7 +4,7 @@
4/* 4/*
5 * Kernel Tracepoint API. 5 * Kernel Tracepoint API.
6 * 6 *
7 * See Documentation/tracepoint.txt. 7 * See Documentation/trace/tracepoints.txt.
8 * 8 *
9 * (C) Copyright 2008 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> 9 * (C) Copyright 2008 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
10 * 10 *
diff --git a/include/linux/unaligned/be_byteshift.h b/include/linux/unaligned/be_byteshift.h
index 46dd12c5709e..9356b24223ac 100644
--- a/include/linux/unaligned/be_byteshift.h
+++ b/include/linux/unaligned/be_byteshift.h
@@ -1,7 +1,7 @@
1#ifndef _LINUX_UNALIGNED_BE_BYTESHIFT_H 1#ifndef _LINUX_UNALIGNED_BE_BYTESHIFT_H
2#define _LINUX_UNALIGNED_BE_BYTESHIFT_H 2#define _LINUX_UNALIGNED_BE_BYTESHIFT_H
3 3
4#include <linux/kernel.h> 4#include <linux/types.h>
5 5
6static inline u16 __get_unaligned_be16(const u8 *p) 6static inline u16 __get_unaligned_be16(const u8 *p)
7{ 7{
diff --git a/include/linux/unaligned/le_byteshift.h b/include/linux/unaligned/le_byteshift.h
index 59777e951baf..be376fb79b64 100644
--- a/include/linux/unaligned/le_byteshift.h
+++ b/include/linux/unaligned/le_byteshift.h
@@ -1,7 +1,7 @@
1#ifndef _LINUX_UNALIGNED_LE_BYTESHIFT_H 1#ifndef _LINUX_UNALIGNED_LE_BYTESHIFT_H
2#define _LINUX_UNALIGNED_LE_BYTESHIFT_H 2#define _LINUX_UNALIGNED_LE_BYTESHIFT_H
3 3
4#include <linux/kernel.h> 4#include <linux/types.h>
5 5
6static inline u16 __get_unaligned_le16(const u8 *p) 6static inline u16 __get_unaligned_le16(const u8 *p)
7{ 7{
diff --git a/include/linux/vgaarb.h b/include/linux/vgaarb.h
index 923f9040ea20..2dfaa293ae8c 100644
--- a/include/linux/vgaarb.h
+++ b/include/linux/vgaarb.h
@@ -1,5 +1,6 @@
1/* 1/*
2 * vgaarb.c 2 * The VGA aribiter manages VGA space routing and VGA resource decode to
3 * allow multiple VGA devices to be used in a system in a safe way.
3 * 4 *
4 * (C) Copyright 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org> 5 * (C) Copyright 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org>
5 * (C) Copyright 2007 Paulo R. Zanoni <przanoni@gmail.com> 6 * (C) Copyright 2007 Paulo R. Zanoni <przanoni@gmail.com>
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 75cf58666ff9..66ebddcff664 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -110,21 +110,20 @@ extern int laptop_mode;
110extern unsigned long determine_dirtyable_memory(void); 110extern unsigned long determine_dirtyable_memory(void);
111 111
112extern int dirty_background_ratio_handler(struct ctl_table *table, int write, 112extern int dirty_background_ratio_handler(struct ctl_table *table, int write,
113 struct file *filp, void __user *buffer, size_t *lenp, 113 void __user *buffer, size_t *lenp,
114 loff_t *ppos); 114 loff_t *ppos);
115extern int dirty_background_bytes_handler(struct ctl_table *table, int write, 115extern int dirty_background_bytes_handler(struct ctl_table *table, int write,
116 struct file *filp, void __user *buffer, size_t *lenp, 116 void __user *buffer, size_t *lenp,
117 loff_t *ppos); 117 loff_t *ppos);
118extern int dirty_ratio_handler(struct ctl_table *table, int write, 118extern int dirty_ratio_handler(struct ctl_table *table, int write,
119 struct file *filp, void __user *buffer, size_t *lenp, 119 void __user *buffer, size_t *lenp,
120 loff_t *ppos); 120 loff_t *ppos);
121extern int dirty_bytes_handler(struct ctl_table *table, int write, 121extern int dirty_bytes_handler(struct ctl_table *table, int write,
122 struct file *filp, void __user *buffer, size_t *lenp, 122 void __user *buffer, size_t *lenp,
123 loff_t *ppos); 123 loff_t *ppos);
124 124
125struct ctl_table; 125struct ctl_table;
126struct file; 126int dirty_writeback_centisecs_handler(struct ctl_table *, int,
127int dirty_writeback_centisecs_handler(struct ctl_table *, int, struct file *,
128 void __user *, size_t *, loff_t *); 127 void __user *, size_t *, loff_t *);
129 128
130void get_dirty_limits(unsigned long *pbackground, unsigned long *pdirty, 129void get_dirty_limits(unsigned long *pbackground, unsigned long *pdirty,