diff options
Diffstat (limited to 'include/linux')
40 files changed, 736 insertions, 153 deletions
diff --git a/include/linux/amba/mmci.h b/include/linux/amba/mmci.h new file mode 100644 index 000000000000..6b4241748dda --- /dev/null +++ b/include/linux/amba/mmci.h | |||
@@ -0,0 +1,18 @@ | |||
1 | /* | ||
2 | * include/linux/amba/mmci.h | ||
3 | */ | ||
4 | #ifndef AMBA_MMCI_H | ||
5 | #define AMBA_MMCI_H | ||
6 | |||
7 | #include <linux/mmc/host.h> | ||
8 | |||
9 | struct mmci_platform_data { | ||
10 | unsigned int ocr_mask; /* available voltages */ | ||
11 | u32 (*translate_vdd)(struct device *, unsigned int); | ||
12 | unsigned int (*status)(struct device *); | ||
13 | int gpio_wp; | ||
14 | int gpio_cd; | ||
15 | unsigned long capabilities; | ||
16 | }; | ||
17 | |||
18 | #endif | ||
diff --git a/include/linux/amba/pl022.h b/include/linux/amba/pl022.h index dcad0ffd1755..e4836c6b3dd7 100644 --- a/include/linux/amba/pl022.h +++ b/include/linux/amba/pl022.h | |||
@@ -136,12 +136,12 @@ enum ssp_tx_level_trig { | |||
136 | 136 | ||
137 | /** | 137 | /** |
138 | * enum SPI Clock Phase - clock phase (Motorola SPI interface only) | 138 | * enum SPI Clock Phase - clock phase (Motorola SPI interface only) |
139 | * @SSP_CLK_RISING_EDGE: Receive data on rising edge | 139 | * @SSP_CLK_FIRST_EDGE: Receive data on first edge transition (actual direction depends on polarity) |
140 | * @SSP_CLK_FALLING_EDGE: Receive data on falling edge | 140 | * @SSP_CLK_SECOND_EDGE: Receive data on second edge transition (actual direction depends on polarity) |
141 | */ | 141 | */ |
142 | enum ssp_spi_clk_phase { | 142 | enum ssp_spi_clk_phase { |
143 | SSP_CLK_RISING_EDGE, | 143 | SSP_CLK_FIRST_EDGE, |
144 | SSP_CLK_FALLING_EDGE | 144 | SSP_CLK_SECOND_EDGE |
145 | }; | 145 | }; |
146 | 146 | ||
147 | /** | 147 | /** |
diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h index 5fc2ef8d97fa..a1c486a88e88 100644 --- a/include/linux/async_tx.h +++ b/include/linux/async_tx.h | |||
@@ -58,25 +58,60 @@ struct dma_chan_ref { | |||
58 | * array. | 58 | * array. |
59 | * @ASYNC_TX_ACK: immediately ack the descriptor, precludes setting up a | 59 | * @ASYNC_TX_ACK: immediately ack the descriptor, precludes setting up a |
60 | * dependency chain | 60 | * dependency chain |
61 | * @ASYNC_TX_DEP_ACK: ack the dependency descriptor. Useful for chaining. | 61 | * @ASYNC_TX_FENCE: specify that the next operation in the dependency |
62 | * chain uses this operation's result as an input | ||
62 | */ | 63 | */ |
63 | enum async_tx_flags { | 64 | enum async_tx_flags { |
64 | ASYNC_TX_XOR_ZERO_DST = (1 << 0), | 65 | ASYNC_TX_XOR_ZERO_DST = (1 << 0), |
65 | ASYNC_TX_XOR_DROP_DST = (1 << 1), | 66 | ASYNC_TX_XOR_DROP_DST = (1 << 1), |
66 | ASYNC_TX_ACK = (1 << 3), | 67 | ASYNC_TX_ACK = (1 << 2), |
67 | ASYNC_TX_DEP_ACK = (1 << 4), | 68 | ASYNC_TX_FENCE = (1 << 3), |
69 | }; | ||
70 | |||
71 | /** | ||
72 | * struct async_submit_ctl - async_tx submission/completion modifiers | ||
73 | * @flags: submission modifiers | ||
74 | * @depend_tx: parent dependency of the current operation being submitted | ||
75 | * @cb_fn: callback routine to run at operation completion | ||
76 | * @cb_param: parameter for the callback routine | ||
77 | * @scribble: caller provided space for dma/page address conversions | ||
78 | */ | ||
79 | struct async_submit_ctl { | ||
80 | enum async_tx_flags flags; | ||
81 | struct dma_async_tx_descriptor *depend_tx; | ||
82 | dma_async_tx_callback cb_fn; | ||
83 | void *cb_param; | ||
84 | void *scribble; | ||
68 | }; | 85 | }; |
69 | 86 | ||
70 | #ifdef CONFIG_DMA_ENGINE | 87 | #ifdef CONFIG_DMA_ENGINE |
71 | #define async_tx_issue_pending_all dma_issue_pending_all | 88 | #define async_tx_issue_pending_all dma_issue_pending_all |
89 | |||
90 | /** | ||
91 | * async_tx_issue_pending - send pending descriptor to the hardware channel | ||
92 | * @tx: descriptor handle to retrieve hardware context | ||
93 | * | ||
94 | * Note: any dependent operations will have already been issued by | ||
95 | * async_tx_channel_switch, or (in the case of no channel switch) will | ||
96 | * be already pending on this channel. | ||
97 | */ | ||
98 | static inline void async_tx_issue_pending(struct dma_async_tx_descriptor *tx) | ||
99 | { | ||
100 | if (likely(tx)) { | ||
101 | struct dma_chan *chan = tx->chan; | ||
102 | struct dma_device *dma = chan->device; | ||
103 | |||
104 | dma->device_issue_pending(chan); | ||
105 | } | ||
106 | } | ||
72 | #ifdef CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL | 107 | #ifdef CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL |
73 | #include <asm/async_tx.h> | 108 | #include <asm/async_tx.h> |
74 | #else | 109 | #else |
75 | #define async_tx_find_channel(dep, type, dst, dst_count, src, src_count, len) \ | 110 | #define async_tx_find_channel(dep, type, dst, dst_count, src, src_count, len) \ |
76 | __async_tx_find_channel(dep, type) | 111 | __async_tx_find_channel(dep, type) |
77 | struct dma_chan * | 112 | struct dma_chan * |
78 | __async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, | 113 | __async_tx_find_channel(struct async_submit_ctl *submit, |
79 | enum dma_transaction_type tx_type); | 114 | enum dma_transaction_type tx_type); |
80 | #endif /* CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL */ | 115 | #endif /* CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL */ |
81 | #else | 116 | #else |
82 | static inline void async_tx_issue_pending_all(void) | 117 | static inline void async_tx_issue_pending_all(void) |
@@ -84,10 +119,16 @@ static inline void async_tx_issue_pending_all(void) | |||
84 | do { } while (0); | 119 | do { } while (0); |
85 | } | 120 | } |
86 | 121 | ||
122 | static inline void async_tx_issue_pending(struct dma_async_tx_descriptor *tx) | ||
123 | { | ||
124 | do { } while (0); | ||
125 | } | ||
126 | |||
87 | static inline struct dma_chan * | 127 | static inline struct dma_chan * |
88 | async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, | 128 | async_tx_find_channel(struct async_submit_ctl *submit, |
89 | enum dma_transaction_type tx_type, struct page **dst, int dst_count, | 129 | enum dma_transaction_type tx_type, struct page **dst, |
90 | struct page **src, int src_count, size_t len) | 130 | int dst_count, struct page **src, int src_count, |
131 | size_t len) | ||
91 | { | 132 | { |
92 | return NULL; | 133 | return NULL; |
93 | } | 134 | } |
@@ -99,46 +140,70 @@ async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, | |||
99 | * @cb_fn_param: parameter to pass to the callback routine | 140 | * @cb_fn_param: parameter to pass to the callback routine |
100 | */ | 141 | */ |
101 | static inline void | 142 | static inline void |
102 | async_tx_sync_epilog(dma_async_tx_callback cb_fn, void *cb_fn_param) | 143 | async_tx_sync_epilog(struct async_submit_ctl *submit) |
103 | { | 144 | { |
104 | if (cb_fn) | 145 | if (submit->cb_fn) |
105 | cb_fn(cb_fn_param); | 146 | submit->cb_fn(submit->cb_param); |
106 | } | 147 | } |
107 | 148 | ||
108 | void | 149 | typedef union { |
109 | async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, | 150 | unsigned long addr; |
110 | enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx, | 151 | struct page *page; |
111 | dma_async_tx_callback cb_fn, void *cb_fn_param); | 152 | dma_addr_t dma; |
153 | } addr_conv_t; | ||
154 | |||
155 | static inline void | ||
156 | init_async_submit(struct async_submit_ctl *args, enum async_tx_flags flags, | ||
157 | struct dma_async_tx_descriptor *tx, | ||
158 | dma_async_tx_callback cb_fn, void *cb_param, | ||
159 | addr_conv_t *scribble) | ||
160 | { | ||
161 | args->flags = flags; | ||
162 | args->depend_tx = tx; | ||
163 | args->cb_fn = cb_fn; | ||
164 | args->cb_param = cb_param; | ||
165 | args->scribble = scribble; | ||
166 | } | ||
167 | |||
168 | void async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, | ||
169 | struct async_submit_ctl *submit); | ||
112 | 170 | ||
113 | struct dma_async_tx_descriptor * | 171 | struct dma_async_tx_descriptor * |
114 | async_xor(struct page *dest, struct page **src_list, unsigned int offset, | 172 | async_xor(struct page *dest, struct page **src_list, unsigned int offset, |
115 | int src_cnt, size_t len, enum async_tx_flags flags, | 173 | int src_cnt, size_t len, struct async_submit_ctl *submit); |
116 | struct dma_async_tx_descriptor *depend_tx, | ||
117 | dma_async_tx_callback cb_fn, void *cb_fn_param); | ||
118 | 174 | ||
119 | struct dma_async_tx_descriptor * | 175 | struct dma_async_tx_descriptor * |
120 | async_xor_zero_sum(struct page *dest, struct page **src_list, | 176 | async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, |
121 | unsigned int offset, int src_cnt, size_t len, | 177 | int src_cnt, size_t len, enum sum_check_flags *result, |
122 | u32 *result, enum async_tx_flags flags, | 178 | struct async_submit_ctl *submit); |
123 | struct dma_async_tx_descriptor *depend_tx, | ||
124 | dma_async_tx_callback cb_fn, void *cb_fn_param); | ||
125 | 179 | ||
126 | struct dma_async_tx_descriptor * | 180 | struct dma_async_tx_descriptor * |
127 | async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, | 181 | async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, |
128 | unsigned int src_offset, size_t len, enum async_tx_flags flags, | 182 | unsigned int src_offset, size_t len, |
129 | struct dma_async_tx_descriptor *depend_tx, | 183 | struct async_submit_ctl *submit); |
130 | dma_async_tx_callback cb_fn, void *cb_fn_param); | ||
131 | 184 | ||
132 | struct dma_async_tx_descriptor * | 185 | struct dma_async_tx_descriptor * |
133 | async_memset(struct page *dest, int val, unsigned int offset, | 186 | async_memset(struct page *dest, int val, unsigned int offset, |
134 | size_t len, enum async_tx_flags flags, | 187 | size_t len, struct async_submit_ctl *submit); |
135 | struct dma_async_tx_descriptor *depend_tx, | 188 | |
136 | dma_async_tx_callback cb_fn, void *cb_fn_param); | 189 | struct dma_async_tx_descriptor *async_trigger_callback(struct async_submit_ctl *submit); |
190 | |||
191 | struct dma_async_tx_descriptor * | ||
192 | async_gen_syndrome(struct page **blocks, unsigned int offset, int src_cnt, | ||
193 | size_t len, struct async_submit_ctl *submit); | ||
194 | |||
195 | struct dma_async_tx_descriptor * | ||
196 | async_syndrome_val(struct page **blocks, unsigned int offset, int src_cnt, | ||
197 | size_t len, enum sum_check_flags *pqres, struct page *spare, | ||
198 | struct async_submit_ctl *submit); | ||
199 | |||
200 | struct dma_async_tx_descriptor * | ||
201 | async_raid6_2data_recov(int src_num, size_t bytes, int faila, int failb, | ||
202 | struct page **ptrs, struct async_submit_ctl *submit); | ||
137 | 203 | ||
138 | struct dma_async_tx_descriptor * | 204 | struct dma_async_tx_descriptor * |
139 | async_trigger_callback(enum async_tx_flags flags, | 205 | async_raid6_datap_recov(int src_num, size_t bytes, int faila, |
140 | struct dma_async_tx_descriptor *depend_tx, | 206 | struct page **ptrs, struct async_submit_ctl *submit); |
141 | dma_async_tx_callback cb_fn, void *cb_fn_param); | ||
142 | 207 | ||
143 | void async_tx_quiesce(struct dma_async_tx_descriptor **tx); | 208 | void async_tx_quiesce(struct dma_async_tx_descriptor **tx); |
144 | #endif /* _ASYNC_TX_H_ */ | 209 | #endif /* _ASYNC_TX_H_ */ |
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h index 2046b5b8af48..aece486ac734 100644 --- a/include/linux/binfmts.h +++ b/include/linux/binfmts.h | |||
@@ -120,7 +120,7 @@ extern int copy_strings_kernel(int argc,char ** argv,struct linux_binprm *bprm); | |||
120 | extern int prepare_bprm_creds(struct linux_binprm *bprm); | 120 | extern int prepare_bprm_creds(struct linux_binprm *bprm); |
121 | extern void install_exec_creds(struct linux_binprm *bprm); | 121 | extern void install_exec_creds(struct linux_binprm *bprm); |
122 | extern void do_coredump(long signr, int exit_code, struct pt_regs *regs); | 122 | extern void do_coredump(long signr, int exit_code, struct pt_regs *regs); |
123 | extern int set_binfmt(struct linux_binfmt *new); | 123 | extern void set_binfmt(struct linux_binfmt *new); |
124 | extern void free_bprm(struct linux_binprm *); | 124 | extern void free_bprm(struct linux_binprm *); |
125 | 125 | ||
126 | #endif /* __KERNEL__ */ | 126 | #endif /* __KERNEL__ */ |
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 90bba9e62286..b62bb9294d0c 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h | |||
@@ -141,6 +141,38 @@ enum { | |||
141 | CGRP_WAIT_ON_RMDIR, | 141 | CGRP_WAIT_ON_RMDIR, |
142 | }; | 142 | }; |
143 | 143 | ||
144 | /* which pidlist file are we talking about? */ | ||
145 | enum cgroup_filetype { | ||
146 | CGROUP_FILE_PROCS, | ||
147 | CGROUP_FILE_TASKS, | ||
148 | }; | ||
149 | |||
150 | /* | ||
151 | * A pidlist is a list of pids that virtually represents the contents of one | ||
152 | * of the cgroup files ("procs" or "tasks"). We keep a list of such pidlists, | ||
153 | * a pair (one each for procs, tasks) for each pid namespace that's relevant | ||
154 | * to the cgroup. | ||
155 | */ | ||
156 | struct cgroup_pidlist { | ||
157 | /* | ||
158 | * used to find which pidlist is wanted. doesn't change as long as | ||
159 | * this particular list stays in the list. | ||
160 | */ | ||
161 | struct { enum cgroup_filetype type; struct pid_namespace *ns; } key; | ||
162 | /* array of xids */ | ||
163 | pid_t *list; | ||
164 | /* how many elements the above list has */ | ||
165 | int length; | ||
166 | /* how many files are using the current array */ | ||
167 | int use_count; | ||
168 | /* each of these stored in a list by its cgroup */ | ||
169 | struct list_head links; | ||
170 | /* pointer to the cgroup we belong to, for list removal purposes */ | ||
171 | struct cgroup *owner; | ||
172 | /* protects the other fields */ | ||
173 | struct rw_semaphore mutex; | ||
174 | }; | ||
175 | |||
144 | struct cgroup { | 176 | struct cgroup { |
145 | unsigned long flags; /* "unsigned long" so bitops work */ | 177 | unsigned long flags; /* "unsigned long" so bitops work */ |
146 | 178 | ||
@@ -179,11 +211,12 @@ struct cgroup { | |||
179 | */ | 211 | */ |
180 | struct list_head release_list; | 212 | struct list_head release_list; |
181 | 213 | ||
182 | /* pids_mutex protects pids_list and cached pid arrays. */ | 214 | /* |
183 | struct rw_semaphore pids_mutex; | 215 | * list of pidlists, up to two for each namespace (one for procs, one |
184 | 216 | * for tasks); created on demand. | |
185 | /* Linked list of struct cgroup_pids */ | 217 | */ |
186 | struct list_head pids_list; | 218 | struct list_head pidlists; |
219 | struct mutex pidlist_mutex; | ||
187 | 220 | ||
188 | /* For RCU-protected deletion */ | 221 | /* For RCU-protected deletion */ |
189 | struct rcu_head rcu_head; | 222 | struct rcu_head rcu_head; |
@@ -227,6 +260,9 @@ struct css_set { | |||
227 | * during subsystem registration (at boot time). | 260 | * during subsystem registration (at boot time). |
228 | */ | 261 | */ |
229 | struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT]; | 262 | struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT]; |
263 | |||
264 | /* For RCU-protected deletion */ | ||
265 | struct rcu_head rcu_head; | ||
230 | }; | 266 | }; |
231 | 267 | ||
232 | /* | 268 | /* |
@@ -389,10 +425,11 @@ struct cgroup_subsys { | |||
389 | struct cgroup *cgrp); | 425 | struct cgroup *cgrp); |
390 | int (*pre_destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp); | 426 | int (*pre_destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp); |
391 | void (*destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp); | 427 | void (*destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp); |
392 | int (*can_attach)(struct cgroup_subsys *ss, | 428 | int (*can_attach)(struct cgroup_subsys *ss, struct cgroup *cgrp, |
393 | struct cgroup *cgrp, struct task_struct *tsk); | 429 | struct task_struct *tsk, bool threadgroup); |
394 | void (*attach)(struct cgroup_subsys *ss, struct cgroup *cgrp, | 430 | void (*attach)(struct cgroup_subsys *ss, struct cgroup *cgrp, |
395 | struct cgroup *old_cgrp, struct task_struct *tsk); | 431 | struct cgroup *old_cgrp, struct task_struct *tsk, |
432 | bool threadgroup); | ||
396 | void (*fork)(struct cgroup_subsys *ss, struct task_struct *task); | 433 | void (*fork)(struct cgroup_subsys *ss, struct task_struct *task); |
397 | void (*exit)(struct cgroup_subsys *ss, struct task_struct *task); | 434 | void (*exit)(struct cgroup_subsys *ss, struct task_struct *task); |
398 | int (*populate)(struct cgroup_subsys *ss, | 435 | int (*populate)(struct cgroup_subsys *ss, |
diff --git a/include/linux/configfs.h b/include/linux/configfs.h index 7f627775c947..ddb7a97c78c2 100644 --- a/include/linux/configfs.h +++ b/include/linux/configfs.h | |||
@@ -27,8 +27,8 @@ | |||
27 | * | 27 | * |
28 | * configfs Copyright (C) 2005 Oracle. All rights reserved. | 28 | * configfs Copyright (C) 2005 Oracle. All rights reserved. |
29 | * | 29 | * |
30 | * Please read Documentation/filesystems/configfs.txt before using the | 30 | * Please read Documentation/filesystems/configfs/configfs.txt before using |
31 | * configfs interface, ESPECIALLY the parts about reference counts and | 31 | * the configfs interface, ESPECIALLY the parts about reference counts and |
32 | * item destructors. | 32 | * item destructors. |
33 | */ | 33 | */ |
34 | 34 | ||
diff --git a/include/linux/dca.h b/include/linux/dca.h index 9c20c7e87d0a..d27a7a05718d 100644 --- a/include/linux/dca.h +++ b/include/linux/dca.h | |||
@@ -20,6 +20,9 @@ | |||
20 | */ | 20 | */ |
21 | #ifndef DCA_H | 21 | #ifndef DCA_H |
22 | #define DCA_H | 22 | #define DCA_H |
23 | |||
24 | #include <linux/pci.h> | ||
25 | |||
23 | /* DCA Provider API */ | 26 | /* DCA Provider API */ |
24 | 27 | ||
25 | /* DCA Notifier Interface */ | 28 | /* DCA Notifier Interface */ |
@@ -36,6 +39,12 @@ struct dca_provider { | |||
36 | int id; | 39 | int id; |
37 | }; | 40 | }; |
38 | 41 | ||
42 | struct dca_domain { | ||
43 | struct list_head node; | ||
44 | struct list_head dca_providers; | ||
45 | struct pci_bus *pci_rc; | ||
46 | }; | ||
47 | |||
39 | struct dca_ops { | 48 | struct dca_ops { |
40 | int (*add_requester) (struct dca_provider *, struct device *); | 49 | int (*add_requester) (struct dca_provider *, struct device *); |
41 | int (*remove_requester) (struct dca_provider *, struct device *); | 50 | int (*remove_requester) (struct dca_provider *, struct device *); |
@@ -47,7 +56,7 @@ struct dca_ops { | |||
47 | struct dca_provider *alloc_dca_provider(struct dca_ops *ops, int priv_size); | 56 | struct dca_provider *alloc_dca_provider(struct dca_ops *ops, int priv_size); |
48 | void free_dca_provider(struct dca_provider *dca); | 57 | void free_dca_provider(struct dca_provider *dca); |
49 | int register_dca_provider(struct dca_provider *dca, struct device *dev); | 58 | int register_dca_provider(struct dca_provider *dca, struct device *dev); |
50 | void unregister_dca_provider(struct dca_provider *dca); | 59 | void unregister_dca_provider(struct dca_provider *dca, struct device *dev); |
51 | 60 | ||
52 | static inline void *dca_priv(struct dca_provider *dca) | 61 | static inline void *dca_priv(struct dca_provider *dca) |
53 | { | 62 | { |
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h index eb5c2ba2f81a..fc1b930f246c 100644 --- a/include/linux/debugfs.h +++ b/include/linux/debugfs.h | |||
@@ -9,7 +9,7 @@ | |||
9 | * 2 as published by the Free Software Foundation. | 9 | * 2 as published by the Free Software Foundation. |
10 | * | 10 | * |
11 | * debugfs is for people to use instead of /proc or /sys. | 11 | * debugfs is for people to use instead of /proc or /sys. |
12 | * See Documentation/DocBook/kernel-api for more details. | 12 | * See Documentation/DocBook/filesystems for more details. |
13 | */ | 13 | */ |
14 | 14 | ||
15 | #ifndef _DEBUGFS_H_ | 15 | #ifndef _DEBUGFS_H_ |
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index ffefba81c818..2b9f2ac7ed60 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h | |||
@@ -48,19 +48,20 @@ enum dma_status { | |||
48 | 48 | ||
49 | /** | 49 | /** |
50 | * enum dma_transaction_type - DMA transaction types/indexes | 50 | * enum dma_transaction_type - DMA transaction types/indexes |
51 | * | ||
52 | * Note: The DMA_ASYNC_TX capability is not to be set by drivers. It is | ||
53 | * automatically set as dma devices are registered. | ||
51 | */ | 54 | */ |
52 | enum dma_transaction_type { | 55 | enum dma_transaction_type { |
53 | DMA_MEMCPY, | 56 | DMA_MEMCPY, |
54 | DMA_XOR, | 57 | DMA_XOR, |
55 | DMA_PQ_XOR, | 58 | DMA_PQ, |
56 | DMA_DUAL_XOR, | 59 | DMA_XOR_VAL, |
57 | DMA_PQ_UPDATE, | 60 | DMA_PQ_VAL, |
58 | DMA_ZERO_SUM, | ||
59 | DMA_PQ_ZERO_SUM, | ||
60 | DMA_MEMSET, | 61 | DMA_MEMSET, |
61 | DMA_MEMCPY_CRC32C, | ||
62 | DMA_INTERRUPT, | 62 | DMA_INTERRUPT, |
63 | DMA_PRIVATE, | 63 | DMA_PRIVATE, |
64 | DMA_ASYNC_TX, | ||
64 | DMA_SLAVE, | 65 | DMA_SLAVE, |
65 | }; | 66 | }; |
66 | 67 | ||
@@ -70,18 +71,25 @@ enum dma_transaction_type { | |||
70 | 71 | ||
71 | /** | 72 | /** |
72 | * enum dma_ctrl_flags - DMA flags to augment operation preparation, | 73 | * enum dma_ctrl_flags - DMA flags to augment operation preparation, |
73 | * control completion, and communicate status. | 74 | * control completion, and communicate status. |
74 | * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of | 75 | * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of |
75 | * this transaction | 76 | * this transaction |
76 | * @DMA_CTRL_ACK - the descriptor cannot be reused until the client | 77 | * @DMA_CTRL_ACK - the descriptor cannot be reused until the client |
77 | * acknowledges receipt, i.e. has has a chance to establish any | 78 | * acknowledges receipt, i.e. has has a chance to establish any dependency |
78 | * dependency chains | 79 | * chains |
79 | * @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s) | 80 | * @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s) |
80 | * @DMA_COMPL_SKIP_DEST_UNMAP - set to disable dma-unmapping the destination(s) | 81 | * @DMA_COMPL_SKIP_DEST_UNMAP - set to disable dma-unmapping the destination(s) |
81 | * @DMA_COMPL_SRC_UNMAP_SINGLE - set to do the source dma-unmapping as single | 82 | * @DMA_COMPL_SRC_UNMAP_SINGLE - set to do the source dma-unmapping as single |
82 | * (if not set, do the source dma-unmapping as page) | 83 | * (if not set, do the source dma-unmapping as page) |
83 | * @DMA_COMPL_DEST_UNMAP_SINGLE - set to do the destination dma-unmapping as single | 84 | * @DMA_COMPL_DEST_UNMAP_SINGLE - set to do the destination dma-unmapping as single |
84 | * (if not set, do the destination dma-unmapping as page) | 85 | * (if not set, do the destination dma-unmapping as page) |
86 | * @DMA_PREP_PQ_DISABLE_P - prevent generation of P while generating Q | ||
87 | * @DMA_PREP_PQ_DISABLE_Q - prevent generation of Q while generating P | ||
88 | * @DMA_PREP_CONTINUE - indicate to a driver that it is reusing buffers as | ||
89 | * sources that were the result of a previous operation, in the case of a PQ | ||
90 | * operation it continues the calculation with new sources | ||
91 | * @DMA_PREP_FENCE - tell the driver that subsequent operations depend | ||
92 | * on the result of this operation | ||
85 | */ | 93 | */ |
86 | enum dma_ctrl_flags { | 94 | enum dma_ctrl_flags { |
87 | DMA_PREP_INTERRUPT = (1 << 0), | 95 | DMA_PREP_INTERRUPT = (1 << 0), |
@@ -90,9 +98,32 @@ enum dma_ctrl_flags { | |||
90 | DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3), | 98 | DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3), |
91 | DMA_COMPL_SRC_UNMAP_SINGLE = (1 << 4), | 99 | DMA_COMPL_SRC_UNMAP_SINGLE = (1 << 4), |
92 | DMA_COMPL_DEST_UNMAP_SINGLE = (1 << 5), | 100 | DMA_COMPL_DEST_UNMAP_SINGLE = (1 << 5), |
101 | DMA_PREP_PQ_DISABLE_P = (1 << 6), | ||
102 | DMA_PREP_PQ_DISABLE_Q = (1 << 7), | ||
103 | DMA_PREP_CONTINUE = (1 << 8), | ||
104 | DMA_PREP_FENCE = (1 << 9), | ||
93 | }; | 105 | }; |
94 | 106 | ||
95 | /** | 107 | /** |
108 | * enum sum_check_bits - bit position of pq_check_flags | ||
109 | */ | ||
110 | enum sum_check_bits { | ||
111 | SUM_CHECK_P = 0, | ||
112 | SUM_CHECK_Q = 1, | ||
113 | }; | ||
114 | |||
115 | /** | ||
116 | * enum pq_check_flags - result of async_{xor,pq}_zero_sum operations | ||
117 | * @SUM_CHECK_P_RESULT - 1 if xor zero sum error, 0 otherwise | ||
118 | * @SUM_CHECK_Q_RESULT - 1 if reed-solomon zero sum error, 0 otherwise | ||
119 | */ | ||
120 | enum sum_check_flags { | ||
121 | SUM_CHECK_P_RESULT = (1 << SUM_CHECK_P), | ||
122 | SUM_CHECK_Q_RESULT = (1 << SUM_CHECK_Q), | ||
123 | }; | ||
124 | |||
125 | |||
126 | /** | ||
96 | * dma_cap_mask_t - capabilities bitmap modeled after cpumask_t. | 127 | * dma_cap_mask_t - capabilities bitmap modeled after cpumask_t. |
97 | * See linux/cpumask.h | 128 | * See linux/cpumask.h |
98 | */ | 129 | */ |
@@ -180,8 +211,6 @@ typedef void (*dma_async_tx_callback)(void *dma_async_param); | |||
180 | * @flags: flags to augment operation preparation, control completion, and | 211 | * @flags: flags to augment operation preparation, control completion, and |
181 | * communicate status | 212 | * communicate status |
182 | * @phys: physical address of the descriptor | 213 | * @phys: physical address of the descriptor |
183 | * @tx_list: driver common field for operations that require multiple | ||
184 | * descriptors | ||
185 | * @chan: target channel for this operation | 214 | * @chan: target channel for this operation |
186 | * @tx_submit: set the prepared descriptor(s) to be executed by the engine | 215 | * @tx_submit: set the prepared descriptor(s) to be executed by the engine |
187 | * @callback: routine to call after this operation is complete | 216 | * @callback: routine to call after this operation is complete |
@@ -195,7 +224,6 @@ struct dma_async_tx_descriptor { | |||
195 | dma_cookie_t cookie; | 224 | dma_cookie_t cookie; |
196 | enum dma_ctrl_flags flags; /* not a 'long' to pack with cookie */ | 225 | enum dma_ctrl_flags flags; /* not a 'long' to pack with cookie */ |
197 | dma_addr_t phys; | 226 | dma_addr_t phys; |
198 | struct list_head tx_list; | ||
199 | struct dma_chan *chan; | 227 | struct dma_chan *chan; |
200 | dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); | 228 | dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); |
201 | dma_async_tx_callback callback; | 229 | dma_async_tx_callback callback; |
@@ -213,6 +241,11 @@ struct dma_async_tx_descriptor { | |||
213 | * @global_node: list_head for global dma_device_list | 241 | * @global_node: list_head for global dma_device_list |
214 | * @cap_mask: one or more dma_capability flags | 242 | * @cap_mask: one or more dma_capability flags |
215 | * @max_xor: maximum number of xor sources, 0 if no capability | 243 | * @max_xor: maximum number of xor sources, 0 if no capability |
244 | * @max_pq: maximum number of PQ sources and PQ-continue capability | ||
245 | * @copy_align: alignment shift for memcpy operations | ||
246 | * @xor_align: alignment shift for xor operations | ||
247 | * @pq_align: alignment shift for pq operations | ||
248 | * @fill_align: alignment shift for memset operations | ||
216 | * @dev_id: unique device ID | 249 | * @dev_id: unique device ID |
217 | * @dev: struct device reference for dma mapping api | 250 | * @dev: struct device reference for dma mapping api |
218 | * @device_alloc_chan_resources: allocate resources and return the | 251 | * @device_alloc_chan_resources: allocate resources and return the |
@@ -220,7 +253,9 @@ struct dma_async_tx_descriptor { | |||
220 | * @device_free_chan_resources: release DMA channel's resources | 253 | * @device_free_chan_resources: release DMA channel's resources |
221 | * @device_prep_dma_memcpy: prepares a memcpy operation | 254 | * @device_prep_dma_memcpy: prepares a memcpy operation |
222 | * @device_prep_dma_xor: prepares a xor operation | 255 | * @device_prep_dma_xor: prepares a xor operation |
223 | * @device_prep_dma_zero_sum: prepares a zero_sum operation | 256 | * @device_prep_dma_xor_val: prepares a xor validation operation |
257 | * @device_prep_dma_pq: prepares a pq operation | ||
258 | * @device_prep_dma_pq_val: prepares a pqzero_sum operation | ||
224 | * @device_prep_dma_memset: prepares a memset operation | 259 | * @device_prep_dma_memset: prepares a memset operation |
225 | * @device_prep_dma_interrupt: prepares an end of chain interrupt operation | 260 | * @device_prep_dma_interrupt: prepares an end of chain interrupt operation |
226 | * @device_prep_slave_sg: prepares a slave dma operation | 261 | * @device_prep_slave_sg: prepares a slave dma operation |
@@ -235,7 +270,13 @@ struct dma_device { | |||
235 | struct list_head channels; | 270 | struct list_head channels; |
236 | struct list_head global_node; | 271 | struct list_head global_node; |
237 | dma_cap_mask_t cap_mask; | 272 | dma_cap_mask_t cap_mask; |
238 | int max_xor; | 273 | unsigned short max_xor; |
274 | unsigned short max_pq; | ||
275 | u8 copy_align; | ||
276 | u8 xor_align; | ||
277 | u8 pq_align; | ||
278 | u8 fill_align; | ||
279 | #define DMA_HAS_PQ_CONTINUE (1 << 15) | ||
239 | 280 | ||
240 | int dev_id; | 281 | int dev_id; |
241 | struct device *dev; | 282 | struct device *dev; |
@@ -249,9 +290,17 @@ struct dma_device { | |||
249 | struct dma_async_tx_descriptor *(*device_prep_dma_xor)( | 290 | struct dma_async_tx_descriptor *(*device_prep_dma_xor)( |
250 | struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, | 291 | struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, |
251 | unsigned int src_cnt, size_t len, unsigned long flags); | 292 | unsigned int src_cnt, size_t len, unsigned long flags); |
252 | struct dma_async_tx_descriptor *(*device_prep_dma_zero_sum)( | 293 | struct dma_async_tx_descriptor *(*device_prep_dma_xor_val)( |
253 | struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt, | 294 | struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt, |
254 | size_t len, u32 *result, unsigned long flags); | 295 | size_t len, enum sum_check_flags *result, unsigned long flags); |
296 | struct dma_async_tx_descriptor *(*device_prep_dma_pq)( | ||
297 | struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, | ||
298 | unsigned int src_cnt, const unsigned char *scf, | ||
299 | size_t len, unsigned long flags); | ||
300 | struct dma_async_tx_descriptor *(*device_prep_dma_pq_val)( | ||
301 | struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, | ||
302 | unsigned int src_cnt, const unsigned char *scf, size_t len, | ||
303 | enum sum_check_flags *pqres, unsigned long flags); | ||
255 | struct dma_async_tx_descriptor *(*device_prep_dma_memset)( | 304 | struct dma_async_tx_descriptor *(*device_prep_dma_memset)( |
256 | struct dma_chan *chan, dma_addr_t dest, int value, size_t len, | 305 | struct dma_chan *chan, dma_addr_t dest, int value, size_t len, |
257 | unsigned long flags); | 306 | unsigned long flags); |
@@ -270,6 +319,96 @@ struct dma_device { | |||
270 | void (*device_issue_pending)(struct dma_chan *chan); | 319 | void (*device_issue_pending)(struct dma_chan *chan); |
271 | }; | 320 | }; |
272 | 321 | ||
322 | static inline bool dmaengine_check_align(u8 align, size_t off1, size_t off2, size_t len) | ||
323 | { | ||
324 | size_t mask; | ||
325 | |||
326 | if (!align) | ||
327 | return true; | ||
328 | mask = (1 << align) - 1; | ||
329 | if (mask & (off1 | off2 | len)) | ||
330 | return false; | ||
331 | return true; | ||
332 | } | ||
333 | |||
334 | static inline bool is_dma_copy_aligned(struct dma_device *dev, size_t off1, | ||
335 | size_t off2, size_t len) | ||
336 | { | ||
337 | return dmaengine_check_align(dev->copy_align, off1, off2, len); | ||
338 | } | ||
339 | |||
340 | static inline bool is_dma_xor_aligned(struct dma_device *dev, size_t off1, | ||
341 | size_t off2, size_t len) | ||
342 | { | ||
343 | return dmaengine_check_align(dev->xor_align, off1, off2, len); | ||
344 | } | ||
345 | |||
346 | static inline bool is_dma_pq_aligned(struct dma_device *dev, size_t off1, | ||
347 | size_t off2, size_t len) | ||
348 | { | ||
349 | return dmaengine_check_align(dev->pq_align, off1, off2, len); | ||
350 | } | ||
351 | |||
352 | static inline bool is_dma_fill_aligned(struct dma_device *dev, size_t off1, | ||
353 | size_t off2, size_t len) | ||
354 | { | ||
355 | return dmaengine_check_align(dev->fill_align, off1, off2, len); | ||
356 | } | ||
357 | |||
358 | static inline void | ||
359 | dma_set_maxpq(struct dma_device *dma, int maxpq, int has_pq_continue) | ||
360 | { | ||
361 | dma->max_pq = maxpq; | ||
362 | if (has_pq_continue) | ||
363 | dma->max_pq |= DMA_HAS_PQ_CONTINUE; | ||
364 | } | ||
365 | |||
366 | static inline bool dmaf_continue(enum dma_ctrl_flags flags) | ||
367 | { | ||
368 | return (flags & DMA_PREP_CONTINUE) == DMA_PREP_CONTINUE; | ||
369 | } | ||
370 | |||
371 | static inline bool dmaf_p_disabled_continue(enum dma_ctrl_flags flags) | ||
372 | { | ||
373 | enum dma_ctrl_flags mask = DMA_PREP_CONTINUE | DMA_PREP_PQ_DISABLE_P; | ||
374 | |||
375 | return (flags & mask) == mask; | ||
376 | } | ||
377 | |||
378 | static inline bool dma_dev_has_pq_continue(struct dma_device *dma) | ||
379 | { | ||
380 | return (dma->max_pq & DMA_HAS_PQ_CONTINUE) == DMA_HAS_PQ_CONTINUE; | ||
381 | } | ||
382 | |||
383 | static unsigned short dma_dev_to_maxpq(struct dma_device *dma) | ||
384 | { | ||
385 | return dma->max_pq & ~DMA_HAS_PQ_CONTINUE; | ||
386 | } | ||
387 | |||
388 | /* dma_maxpq - reduce maxpq in the face of continued operations | ||
389 | * @dma - dma device with PQ capability | ||
390 | * @flags - to check if DMA_PREP_CONTINUE and DMA_PREP_PQ_DISABLE_P are set | ||
391 | * | ||
392 | * When an engine does not support native continuation we need 3 extra | ||
393 | * source slots to reuse P and Q with the following coefficients: | ||
394 | * 1/ {00} * P : remove P from Q', but use it as a source for P' | ||
395 | * 2/ {01} * Q : use Q to continue Q' calculation | ||
396 | * 3/ {00} * Q : subtract Q from P' to cancel (2) | ||
397 | * | ||
398 | * In the case where P is disabled we only need 1 extra source: | ||
399 | * 1/ {01} * Q : use Q to continue Q' calculation | ||
400 | */ | ||
401 | static inline int dma_maxpq(struct dma_device *dma, enum dma_ctrl_flags flags) | ||
402 | { | ||
403 | if (dma_dev_has_pq_continue(dma) || !dmaf_continue(flags)) | ||
404 | return dma_dev_to_maxpq(dma); | ||
405 | else if (dmaf_p_disabled_continue(flags)) | ||
406 | return dma_dev_to_maxpq(dma) - 1; | ||
407 | else if (dmaf_continue(flags)) | ||
408 | return dma_dev_to_maxpq(dma) - 3; | ||
409 | BUG(); | ||
410 | } | ||
411 | |||
273 | /* --- public DMA engine API --- */ | 412 | /* --- public DMA engine API --- */ |
274 | 413 | ||
275 | #ifdef CONFIG_DMA_ENGINE | 414 | #ifdef CONFIG_DMA_ENGINE |
@@ -299,7 +438,11 @@ static inline void net_dmaengine_put(void) | |||
299 | #ifdef CONFIG_ASYNC_TX_DMA | 438 | #ifdef CONFIG_ASYNC_TX_DMA |
300 | #define async_dmaengine_get() dmaengine_get() | 439 | #define async_dmaengine_get() dmaengine_get() |
301 | #define async_dmaengine_put() dmaengine_put() | 440 | #define async_dmaengine_put() dmaengine_put() |
441 | #ifdef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH | ||
442 | #define async_dma_find_channel(type) dma_find_channel(DMA_ASYNC_TX) | ||
443 | #else | ||
302 | #define async_dma_find_channel(type) dma_find_channel(type) | 444 | #define async_dma_find_channel(type) dma_find_channel(type) |
445 | #endif /* CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH */ | ||
303 | #else | 446 | #else |
304 | static inline void async_dmaengine_get(void) | 447 | static inline void async_dmaengine_get(void) |
305 | { | 448 | { |
@@ -312,7 +455,7 @@ async_dma_find_channel(enum dma_transaction_type type) | |||
312 | { | 455 | { |
313 | return NULL; | 456 | return NULL; |
314 | } | 457 | } |
315 | #endif | 458 | #endif /* CONFIG_ASYNC_TX_DMA */ |
316 | 459 | ||
317 | dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan, | 460 | dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan, |
318 | void *dest, void *src, size_t len); | 461 | void *dest, void *src, size_t len); |
diff --git a/include/linux/fb.h b/include/linux/fb.h index f847df9e99b6..a34bdf5a9d23 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h | |||
@@ -133,6 +133,7 @@ struct dentry; | |||
133 | #define FB_ACCEL_NEOMAGIC_NM2230 96 /* NeoMagic NM2230 */ | 133 | #define FB_ACCEL_NEOMAGIC_NM2230 96 /* NeoMagic NM2230 */ |
134 | #define FB_ACCEL_NEOMAGIC_NM2360 97 /* NeoMagic NM2360 */ | 134 | #define FB_ACCEL_NEOMAGIC_NM2360 97 /* NeoMagic NM2360 */ |
135 | #define FB_ACCEL_NEOMAGIC_NM2380 98 /* NeoMagic NM2380 */ | 135 | #define FB_ACCEL_NEOMAGIC_NM2380 98 /* NeoMagic NM2380 */ |
136 | #define FB_ACCEL_PXA3XX 99 /* PXA3xx */ | ||
136 | 137 | ||
137 | #define FB_ACCEL_SAVAGE4 0x80 /* S3 Savage4 */ | 138 | #define FB_ACCEL_SAVAGE4 0x80 /* S3 Savage4 */ |
138 | #define FB_ACCEL_SAVAGE3D 0x81 /* S3 Savage3D */ | 139 | #define FB_ACCEL_SAVAGE3D 0x81 /* S3 Savage3D */ |
diff --git a/include/linux/fs.h b/include/linux/fs.h index 51803528b095..2adaa2529f18 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -595,6 +595,7 @@ struct address_space_operations { | |||
595 | int (*launder_page) (struct page *); | 595 | int (*launder_page) (struct page *); |
596 | int (*is_partially_uptodate) (struct page *, read_descriptor_t *, | 596 | int (*is_partially_uptodate) (struct page *, read_descriptor_t *, |
597 | unsigned long); | 597 | unsigned long); |
598 | int (*error_remove_page)(struct address_space *, struct page *); | ||
598 | }; | 599 | }; |
599 | 600 | ||
600 | /* | 601 | /* |
@@ -640,7 +641,6 @@ struct block_device { | |||
640 | struct super_block * bd_super; | 641 | struct super_block * bd_super; |
641 | int bd_openers; | 642 | int bd_openers; |
642 | struct mutex bd_mutex; /* open/close mutex */ | 643 | struct mutex bd_mutex; /* open/close mutex */ |
643 | struct semaphore bd_mount_sem; | ||
644 | struct list_head bd_inodes; | 644 | struct list_head bd_inodes; |
645 | void * bd_holder; | 645 | void * bd_holder; |
646 | int bd_holders; | 646 | int bd_holders; |
@@ -1315,7 +1315,7 @@ struct super_block { | |||
1315 | unsigned long s_blocksize; | 1315 | unsigned long s_blocksize; |
1316 | unsigned char s_blocksize_bits; | 1316 | unsigned char s_blocksize_bits; |
1317 | unsigned char s_dirt; | 1317 | unsigned char s_dirt; |
1318 | unsigned long long s_maxbytes; /* Max file size */ | 1318 | loff_t s_maxbytes; /* Max file size */ |
1319 | struct file_system_type *s_type; | 1319 | struct file_system_type *s_type; |
1320 | const struct super_operations *s_op; | 1320 | const struct super_operations *s_op; |
1321 | const struct dquot_operations *dq_op; | 1321 | const struct dquot_operations *dq_op; |
@@ -2156,6 +2156,7 @@ extern ino_t iunique(struct super_block *, ino_t); | |||
2156 | extern int inode_needs_sync(struct inode *inode); | 2156 | extern int inode_needs_sync(struct inode *inode); |
2157 | extern void generic_delete_inode(struct inode *inode); | 2157 | extern void generic_delete_inode(struct inode *inode); |
2158 | extern void generic_drop_inode(struct inode *inode); | 2158 | extern void generic_drop_inode(struct inode *inode); |
2159 | extern int generic_detach_inode(struct inode *inode); | ||
2159 | 2160 | ||
2160 | extern struct inode *ilookup5_nowait(struct super_block *sb, | 2161 | extern struct inode *ilookup5_nowait(struct super_block *sb, |
2161 | unsigned long hashval, int (*test)(struct inode *, void *), | 2162 | unsigned long hashval, int (*test)(struct inode *, void *), |
@@ -2334,6 +2335,7 @@ extern void get_filesystem(struct file_system_type *fs); | |||
2334 | extern void put_filesystem(struct file_system_type *fs); | 2335 | extern void put_filesystem(struct file_system_type *fs); |
2335 | extern struct file_system_type *get_fs_type(const char *name); | 2336 | extern struct file_system_type *get_fs_type(const char *name); |
2336 | extern struct super_block *get_super(struct block_device *); | 2337 | extern struct super_block *get_super(struct block_device *); |
2338 | extern struct super_block *get_active_super(struct block_device *bdev); | ||
2337 | extern struct super_block *user_get_super(dev_t); | 2339 | extern struct super_block *user_get_super(dev_t); |
2338 | extern void drop_super(struct super_block *sb); | 2340 | extern void drop_super(struct super_block *sb); |
2339 | 2341 | ||
@@ -2381,7 +2383,8 @@ extern int buffer_migrate_page(struct address_space *, | |||
2381 | #define buffer_migrate_page NULL | 2383 | #define buffer_migrate_page NULL |
2382 | #endif | 2384 | #endif |
2383 | 2385 | ||
2384 | extern int inode_change_ok(struct inode *, struct iattr *); | 2386 | extern int inode_change_ok(const struct inode *, struct iattr *); |
2387 | extern int inode_newsize_ok(const struct inode *, loff_t offset); | ||
2385 | extern int __must_check inode_setattr(struct inode *, struct iattr *); | 2388 | extern int __must_check inode_setattr(struct inode *, struct iattr *); |
2386 | 2389 | ||
2387 | extern void file_update_time(struct file *file); | 2390 | extern void file_update_time(struct file *file); |
@@ -2467,7 +2470,7 @@ ssize_t simple_attr_write(struct file *file, const char __user *buf, | |||
2467 | size_t len, loff_t *ppos); | 2470 | size_t len, loff_t *ppos); |
2468 | 2471 | ||
2469 | struct ctl_table; | 2472 | struct ctl_table; |
2470 | int proc_nr_files(struct ctl_table *table, int write, struct file *filp, | 2473 | int proc_nr_files(struct ctl_table *table, int write, |
2471 | void __user *buffer, size_t *lenp, loff_t *ppos); | 2474 | void __user *buffer, size_t *lenp, loff_t *ppos); |
2472 | 2475 | ||
2473 | int __init get_filesystem_list(char *buf); | 2476 | int __init get_filesystem_list(char *buf); |
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 3c0924a18daf..cd3d2abaf30a 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h | |||
@@ -19,7 +19,7 @@ | |||
19 | extern int ftrace_enabled; | 19 | extern int ftrace_enabled; |
20 | extern int | 20 | extern int |
21 | ftrace_enable_sysctl(struct ctl_table *table, int write, | 21 | ftrace_enable_sysctl(struct ctl_table *table, int write, |
22 | struct file *filp, void __user *buffer, size_t *lenp, | 22 | void __user *buffer, size_t *lenp, |
23 | loff_t *ppos); | 23 | loff_t *ppos); |
24 | 24 | ||
25 | typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip); | 25 | typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip); |
@@ -94,7 +94,7 @@ static inline void ftrace_start(void) { } | |||
94 | extern int stack_tracer_enabled; | 94 | extern int stack_tracer_enabled; |
95 | int | 95 | int |
96 | stack_trace_sysctl(struct ctl_table *table, int write, | 96 | stack_trace_sysctl(struct ctl_table *table, int write, |
97 | struct file *file, void __user *buffer, size_t *lenp, | 97 | void __user *buffer, size_t *lenp, |
98 | loff_t *ppos); | 98 | loff_t *ppos); |
99 | #endif | 99 | #endif |
100 | 100 | ||
diff --git a/include/linux/futex.h b/include/linux/futex.h index 34956c8fdebf..8ec17997d94f 100644 --- a/include/linux/futex.h +++ b/include/linux/futex.h | |||
@@ -4,11 +4,6 @@ | |||
4 | #include <linux/compiler.h> | 4 | #include <linux/compiler.h> |
5 | #include <linux/types.h> | 5 | #include <linux/types.h> |
6 | 6 | ||
7 | struct inode; | ||
8 | struct mm_struct; | ||
9 | struct task_struct; | ||
10 | union ktime; | ||
11 | |||
12 | /* Second argument to futex syscall */ | 7 | /* Second argument to futex syscall */ |
13 | 8 | ||
14 | 9 | ||
@@ -129,6 +124,11 @@ struct robust_list_head { | |||
129 | #define FUTEX_BITSET_MATCH_ANY 0xffffffff | 124 | #define FUTEX_BITSET_MATCH_ANY 0xffffffff |
130 | 125 | ||
131 | #ifdef __KERNEL__ | 126 | #ifdef __KERNEL__ |
127 | struct inode; | ||
128 | struct mm_struct; | ||
129 | struct task_struct; | ||
130 | union ktime; | ||
131 | |||
132 | long do_futex(u32 __user *uaddr, int op, u32 val, union ktime *timeout, | 132 | long do_futex(u32 __user *uaddr, int op, u32 val, union ktime *timeout, |
133 | u32 __user *uaddr2, u32 val2, u32 val3); | 133 | u32 __user *uaddr2, u32 val2, u32 val3); |
134 | 134 | ||
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 176e7ee73eff..16937995abd4 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h | |||
@@ -3,15 +3,15 @@ | |||
3 | 3 | ||
4 | #include <linux/fs.h> | 4 | #include <linux/fs.h> |
5 | 5 | ||
6 | struct ctl_table; | ||
7 | struct user_struct; | ||
8 | |||
6 | #ifdef CONFIG_HUGETLB_PAGE | 9 | #ifdef CONFIG_HUGETLB_PAGE |
7 | 10 | ||
8 | #include <linux/mempolicy.h> | 11 | #include <linux/mempolicy.h> |
9 | #include <linux/shm.h> | 12 | #include <linux/shm.h> |
10 | #include <asm/tlbflush.h> | 13 | #include <asm/tlbflush.h> |
11 | 14 | ||
12 | struct ctl_table; | ||
13 | struct user_struct; | ||
14 | |||
15 | int PageHuge(struct page *page); | 15 | int PageHuge(struct page *page); |
16 | 16 | ||
17 | static inline int is_vm_hugetlb_page(struct vm_area_struct *vma) | 17 | static inline int is_vm_hugetlb_page(struct vm_area_struct *vma) |
@@ -20,9 +20,9 @@ static inline int is_vm_hugetlb_page(struct vm_area_struct *vma) | |||
20 | } | 20 | } |
21 | 21 | ||
22 | void reset_vma_resv_huge_pages(struct vm_area_struct *vma); | 22 | void reset_vma_resv_huge_pages(struct vm_area_struct *vma); |
23 | int hugetlb_sysctl_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *); | 23 | int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); |
24 | int hugetlb_overcommit_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *); | 24 | int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); |
25 | int hugetlb_treat_movable_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *); | 25 | int hugetlb_treat_movable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); |
26 | int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *); | 26 | int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *); |
27 | int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, | 27 | int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, |
28 | struct page **, struct vm_area_struct **, | 28 | struct page **, struct vm_area_struct **, |
@@ -187,7 +187,11 @@ static inline void set_file_hugepages(struct file *file) | |||
187 | 187 | ||
188 | #define is_file_hugepages(file) 0 | 188 | #define is_file_hugepages(file) 0 |
189 | #define set_file_hugepages(file) BUG() | 189 | #define set_file_hugepages(file) BUG() |
190 | #define hugetlb_file_setup(name,size,acct,user,creat) ERR_PTR(-ENOSYS) | 190 | static inline struct file *hugetlb_file_setup(const char *name, size_t size, |
191 | int acctflag, struct user_struct **user, int creat_flags) | ||
192 | { | ||
193 | return ERR_PTR(-ENOSYS); | ||
194 | } | ||
191 | 195 | ||
192 | #endif /* !CONFIG_HUGETLBFS */ | 196 | #endif /* !CONFIG_HUGETLBFS */ |
193 | 197 | ||
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index e46a0734ab6e..bf9213b2db8f 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h | |||
@@ -118,6 +118,9 @@ static inline bool mem_cgroup_disabled(void) | |||
118 | 118 | ||
119 | extern bool mem_cgroup_oom_called(struct task_struct *task); | 119 | extern bool mem_cgroup_oom_called(struct task_struct *task); |
120 | void mem_cgroup_update_mapped_file_stat(struct page *page, int val); | 120 | void mem_cgroup_update_mapped_file_stat(struct page *page, int val); |
121 | unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, | ||
122 | gfp_t gfp_mask, int nid, | ||
123 | int zid); | ||
121 | #else /* CONFIG_CGROUP_MEM_RES_CTLR */ | 124 | #else /* CONFIG_CGROUP_MEM_RES_CTLR */ |
122 | struct mem_cgroup; | 125 | struct mem_cgroup; |
123 | 126 | ||
@@ -276,6 +279,13 @@ static inline void mem_cgroup_update_mapped_file_stat(struct page *page, | |||
276 | { | 279 | { |
277 | } | 280 | } |
278 | 281 | ||
282 | static inline | ||
283 | unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, | ||
284 | gfp_t gfp_mask, int nid, int zid) | ||
285 | { | ||
286 | return 0; | ||
287 | } | ||
288 | |||
279 | #endif /* CONFIG_CGROUP_MEM_CONT */ | 289 | #endif /* CONFIG_CGROUP_MEM_CONT */ |
280 | 290 | ||
281 | #endif /* _LINUX_MEMCONTROL_H */ | 291 | #endif /* _LINUX_MEMCONTROL_H */ |
diff --git a/include/linux/mm.h b/include/linux/mm.h index b6eae5e3144b..24c395694f4d 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -288,7 +288,7 @@ static inline int is_vmalloc_addr(const void *x) | |||
288 | #ifdef CONFIG_MMU | 288 | #ifdef CONFIG_MMU |
289 | extern int is_vmalloc_or_module_addr(const void *x); | 289 | extern int is_vmalloc_or_module_addr(const void *x); |
290 | #else | 290 | #else |
291 | static int is_vmalloc_or_module_addr(const void *x) | 291 | static inline int is_vmalloc_or_module_addr(const void *x) |
292 | { | 292 | { |
293 | return 0; | 293 | return 0; |
294 | } | 294 | } |
@@ -695,11 +695,12 @@ static inline int page_mapped(struct page *page) | |||
695 | #define VM_FAULT_SIGBUS 0x0002 | 695 | #define VM_FAULT_SIGBUS 0x0002 |
696 | #define VM_FAULT_MAJOR 0x0004 | 696 | #define VM_FAULT_MAJOR 0x0004 |
697 | #define VM_FAULT_WRITE 0x0008 /* Special case for get_user_pages */ | 697 | #define VM_FAULT_WRITE 0x0008 /* Special case for get_user_pages */ |
698 | #define VM_FAULT_HWPOISON 0x0010 /* Hit poisoned page */ | ||
698 | 699 | ||
699 | #define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */ | 700 | #define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */ |
700 | #define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */ | 701 | #define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */ |
701 | 702 | ||
702 | #define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS) | 703 | #define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_HWPOISON) |
703 | 704 | ||
704 | /* | 705 | /* |
705 | * Can be called by the pagefault handler when it gets a VM_FAULT_OOM. | 706 | * Can be called by the pagefault handler when it gets a VM_FAULT_OOM. |
@@ -791,8 +792,14 @@ static inline void unmap_shared_mapping_range(struct address_space *mapping, | |||
791 | unmap_mapping_range(mapping, holebegin, holelen, 0); | 792 | unmap_mapping_range(mapping, holebegin, holelen, 0); |
792 | } | 793 | } |
793 | 794 | ||
794 | extern int vmtruncate(struct inode * inode, loff_t offset); | 795 | extern void truncate_pagecache(struct inode *inode, loff_t old, loff_t new); |
795 | extern int vmtruncate_range(struct inode * inode, loff_t offset, loff_t end); | 796 | extern int vmtruncate(struct inode *inode, loff_t offset); |
797 | extern int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end); | ||
798 | |||
799 | int truncate_inode_page(struct address_space *mapping, struct page *page); | ||
800 | int generic_error_remove_page(struct address_space *mapping, struct page *page); | ||
801 | |||
802 | int invalidate_inode_page(struct page *page); | ||
796 | 803 | ||
797 | #ifdef CONFIG_MMU | 804 | #ifdef CONFIG_MMU |
798 | extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, | 805 | extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, |
@@ -1279,7 +1286,7 @@ int in_gate_area_no_task(unsigned long addr); | |||
1279 | #define in_gate_area(task, addr) ({(void)task; in_gate_area_no_task(addr);}) | 1286 | #define in_gate_area(task, addr) ({(void)task; in_gate_area_no_task(addr);}) |
1280 | #endif /* __HAVE_ARCH_GATE_AREA */ | 1287 | #endif /* __HAVE_ARCH_GATE_AREA */ |
1281 | 1288 | ||
1282 | int drop_caches_sysctl_handler(struct ctl_table *, int, struct file *, | 1289 | int drop_caches_sysctl_handler(struct ctl_table *, int, |
1283 | void __user *, size_t *, loff_t *); | 1290 | void __user *, size_t *, loff_t *); |
1284 | unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask, | 1291 | unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask, |
1285 | unsigned long lru_pages); | 1292 | unsigned long lru_pages); |
@@ -1308,5 +1315,12 @@ void vmemmap_populate_print_last(void); | |||
1308 | extern int account_locked_memory(struct mm_struct *mm, struct rlimit *rlim, | 1315 | extern int account_locked_memory(struct mm_struct *mm, struct rlimit *rlim, |
1309 | size_t size); | 1316 | size_t size); |
1310 | extern void refund_locked_memory(struct mm_struct *mm, size_t size); | 1317 | extern void refund_locked_memory(struct mm_struct *mm, size_t size); |
1318 | |||
1319 | extern void memory_failure(unsigned long pfn, int trapno); | ||
1320 | extern int __memory_failure(unsigned long pfn, int trapno, int ref); | ||
1321 | extern int sysctl_memory_failure_early_kill; | ||
1322 | extern int sysctl_memory_failure_recovery; | ||
1323 | extern atomic_long_t mce_bad_pages; | ||
1324 | |||
1311 | #endif /* __KERNEL__ */ | 1325 | #endif /* __KERNEL__ */ |
1312 | #endif /* _LINUX_MM_H */ | 1326 | #endif /* _LINUX_MM_H */ |
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 0042090a4d70..21d6aa45206a 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h | |||
@@ -240,6 +240,8 @@ struct mm_struct { | |||
240 | 240 | ||
241 | unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */ | 241 | unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */ |
242 | 242 | ||
243 | struct linux_binfmt *binfmt; | ||
244 | |||
243 | cpumask_t cpu_vm_mask; | 245 | cpumask_t cpu_vm_mask; |
244 | 246 | ||
245 | /* Architecture-specific MM context */ | 247 | /* Architecture-specific MM context */ |
@@ -259,11 +261,10 @@ struct mm_struct { | |||
259 | unsigned long flags; /* Must use atomic bitops to access the bits */ | 261 | unsigned long flags; /* Must use atomic bitops to access the bits */ |
260 | 262 | ||
261 | struct core_state *core_state; /* coredumping support */ | 263 | struct core_state *core_state; /* coredumping support */ |
262 | 264 | #ifdef CONFIG_AIO | |
263 | /* aio bits */ | ||
264 | spinlock_t ioctx_lock; | 265 | spinlock_t ioctx_lock; |
265 | struct hlist_head ioctx_list; | 266 | struct hlist_head ioctx_list; |
266 | 267 | #endif | |
267 | #ifdef CONFIG_MM_OWNER | 268 | #ifdef CONFIG_MM_OWNER |
268 | /* | 269 | /* |
269 | * "owner" points to a task that is regarded as the canonical | 270 | * "owner" points to a task that is regarded as the canonical |
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 652ef01be582..6f7561730d88 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
@@ -755,21 +755,20 @@ static inline int is_dma(struct zone *zone) | |||
755 | 755 | ||
756 | /* These two functions are used to setup the per zone pages min values */ | 756 | /* These two functions are used to setup the per zone pages min values */ |
757 | struct ctl_table; | 757 | struct ctl_table; |
758 | struct file; | 758 | int min_free_kbytes_sysctl_handler(struct ctl_table *, int, |
759 | int min_free_kbytes_sysctl_handler(struct ctl_table *, int, struct file *, | ||
760 | void __user *, size_t *, loff_t *); | 759 | void __user *, size_t *, loff_t *); |
761 | extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1]; | 760 | extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1]; |
762 | int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, struct file *, | 761 | int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, |
763 | void __user *, size_t *, loff_t *); | 762 | void __user *, size_t *, loff_t *); |
764 | int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int, struct file *, | 763 | int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int, |
765 | void __user *, size_t *, loff_t *); | 764 | void __user *, size_t *, loff_t *); |
766 | int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int, | 765 | int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int, |
767 | struct file *, void __user *, size_t *, loff_t *); | 766 | void __user *, size_t *, loff_t *); |
768 | int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int, | 767 | int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int, |
769 | struct file *, void __user *, size_t *, loff_t *); | 768 | void __user *, size_t *, loff_t *); |
770 | 769 | ||
771 | extern int numa_zonelist_order_handler(struct ctl_table *, int, | 770 | extern int numa_zonelist_order_handler(struct ctl_table *, int, |
772 | struct file *, void __user *, size_t *, loff_t *); | 771 | void __user *, size_t *, loff_t *); |
773 | extern char numa_zonelist_order[]; | 772 | extern char numa_zonelist_order[]; |
774 | #define NUMA_ZONELIST_ORDER_LEN 16 /* string buffer size */ | 773 | #define NUMA_ZONELIST_ORDER_LEN 16 /* string buffer size */ |
775 | 774 | ||
diff --git a/include/linux/module.h b/include/linux/module.h index 1c755b2f937d..482efc865acf 100644 --- a/include/linux/module.h +++ b/include/linux/module.h | |||
@@ -128,7 +128,10 @@ extern struct module __this_module; | |||
128 | */ | 128 | */ |
129 | #define MODULE_LICENSE(_license) MODULE_INFO(license, _license) | 129 | #define MODULE_LICENSE(_license) MODULE_INFO(license, _license) |
130 | 130 | ||
131 | /* Author, ideally of form NAME[, NAME]*[ and NAME] */ | 131 | /* |
132 | * Author(s), use "Name <email>" or just "Name", for multiple | ||
133 | * authors use multiple MODULE_AUTHOR() statements/lines. | ||
134 | */ | ||
132 | #define MODULE_AUTHOR(_author) MODULE_INFO(author, _author) | 135 | #define MODULE_AUTHOR(_author) MODULE_INFO(author, _author) |
133 | 136 | ||
134 | /* What your module does. */ | 137 | /* What your module does. */ |
@@ -308,10 +311,14 @@ struct module | |||
308 | #endif | 311 | #endif |
309 | 312 | ||
310 | #ifdef CONFIG_KALLSYMS | 313 | #ifdef CONFIG_KALLSYMS |
311 | /* We keep the symbol and string tables for kallsyms. */ | 314 | /* |
312 | Elf_Sym *symtab; | 315 | * We keep the symbol and string tables for kallsyms. |
313 | unsigned int num_symtab; | 316 | * The core_* fields below are temporary, loader-only (they |
314 | char *strtab; | 317 | * could really be discarded after module init). |
318 | */ | ||
319 | Elf_Sym *symtab, *core_symtab; | ||
320 | unsigned int num_symtab, core_num_syms; | ||
321 | char *strtab, *core_strtab; | ||
315 | 322 | ||
316 | /* Section attributes */ | 323 | /* Section attributes */ |
317 | struct module_sect_attrs *sect_attrs; | 324 | struct module_sect_attrs *sect_attrs; |
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 13de789f0a5c..6b202b173955 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h | |||
@@ -51,6 +51,9 @@ | |||
51 | * PG_buddy is set to indicate that the page is free and in the buddy system | 51 | * PG_buddy is set to indicate that the page is free and in the buddy system |
52 | * (see mm/page_alloc.c). | 52 | * (see mm/page_alloc.c). |
53 | * | 53 | * |
54 | * PG_hwpoison indicates that a page got corrupted in hardware and contains | ||
55 | * data with incorrect ECC bits that triggered a machine check. Accessing is | ||
56 | * not safe since it may cause another machine check. Don't touch! | ||
54 | */ | 57 | */ |
55 | 58 | ||
56 | /* | 59 | /* |
@@ -102,6 +105,9 @@ enum pageflags { | |||
102 | #ifdef CONFIG_ARCH_USES_PG_UNCACHED | 105 | #ifdef CONFIG_ARCH_USES_PG_UNCACHED |
103 | PG_uncached, /* Page has been mapped as uncached */ | 106 | PG_uncached, /* Page has been mapped as uncached */ |
104 | #endif | 107 | #endif |
108 | #ifdef CONFIG_MEMORY_FAILURE | ||
109 | PG_hwpoison, /* hardware poisoned page. Don't touch */ | ||
110 | #endif | ||
105 | __NR_PAGEFLAGS, | 111 | __NR_PAGEFLAGS, |
106 | 112 | ||
107 | /* Filesystems */ | 113 | /* Filesystems */ |
@@ -269,6 +275,15 @@ PAGEFLAG(Uncached, uncached) | |||
269 | PAGEFLAG_FALSE(Uncached) | 275 | PAGEFLAG_FALSE(Uncached) |
270 | #endif | 276 | #endif |
271 | 277 | ||
278 | #ifdef CONFIG_MEMORY_FAILURE | ||
279 | PAGEFLAG(HWPoison, hwpoison) | ||
280 | TESTSETFLAG(HWPoison, hwpoison) | ||
281 | #define __PG_HWPOISON (1UL << PG_hwpoison) | ||
282 | #else | ||
283 | PAGEFLAG_FALSE(HWPoison) | ||
284 | #define __PG_HWPOISON 0 | ||
285 | #endif | ||
286 | |||
272 | static inline int PageUptodate(struct page *page) | 287 | static inline int PageUptodate(struct page *page) |
273 | { | 288 | { |
274 | int ret = test_bit(PG_uptodate, &(page)->flags); | 289 | int ret = test_bit(PG_uptodate, &(page)->flags); |
@@ -393,7 +408,7 @@ static inline void __ClearPageTail(struct page *page) | |||
393 | 1 << PG_private | 1 << PG_private_2 | \ | 408 | 1 << PG_private | 1 << PG_private_2 | \ |
394 | 1 << PG_buddy | 1 << PG_writeback | 1 << PG_reserved | \ | 409 | 1 << PG_buddy | 1 << PG_writeback | 1 << PG_reserved | \ |
395 | 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \ | 410 | 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \ |
396 | 1 << PG_unevictable | __PG_MLOCKED) | 411 | 1 << PG_unevictable | __PG_MLOCKED | __PG_HWPOISON) |
397 | 412 | ||
398 | /* | 413 | /* |
399 | * Flags checked when a page is prepped for return by the page allocator. | 414 | * Flags checked when a page is prepped for return by the page allocator. |
diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h index ada779f24178..4b938d4f3ac2 100644 --- a/include/linux/page_cgroup.h +++ b/include/linux/page_cgroup.h | |||
@@ -38,6 +38,7 @@ enum { | |||
38 | PCG_LOCK, /* page cgroup is locked */ | 38 | PCG_LOCK, /* page cgroup is locked */ |
39 | PCG_CACHE, /* charged as cache */ | 39 | PCG_CACHE, /* charged as cache */ |
40 | PCG_USED, /* this object is in use. */ | 40 | PCG_USED, /* this object is in use. */ |
41 | PCG_ACCT_LRU, /* page has been accounted for */ | ||
41 | }; | 42 | }; |
42 | 43 | ||
43 | #define TESTPCGFLAG(uname, lname) \ | 44 | #define TESTPCGFLAG(uname, lname) \ |
@@ -52,11 +53,23 @@ static inline void SetPageCgroup##uname(struct page_cgroup *pc)\ | |||
52 | static inline void ClearPageCgroup##uname(struct page_cgroup *pc) \ | 53 | static inline void ClearPageCgroup##uname(struct page_cgroup *pc) \ |
53 | { clear_bit(PCG_##lname, &pc->flags); } | 54 | { clear_bit(PCG_##lname, &pc->flags); } |
54 | 55 | ||
56 | #define TESTCLEARPCGFLAG(uname, lname) \ | ||
57 | static inline int TestClearPageCgroup##uname(struct page_cgroup *pc) \ | ||
58 | { return test_and_clear_bit(PCG_##lname, &pc->flags); } | ||
59 | |||
55 | /* Cache flag is set only once (at allocation) */ | 60 | /* Cache flag is set only once (at allocation) */ |
56 | TESTPCGFLAG(Cache, CACHE) | 61 | TESTPCGFLAG(Cache, CACHE) |
62 | CLEARPCGFLAG(Cache, CACHE) | ||
63 | SETPCGFLAG(Cache, CACHE) | ||
57 | 64 | ||
58 | TESTPCGFLAG(Used, USED) | 65 | TESTPCGFLAG(Used, USED) |
59 | CLEARPCGFLAG(Used, USED) | 66 | CLEARPCGFLAG(Used, USED) |
67 | SETPCGFLAG(Used, USED) | ||
68 | |||
69 | SETPCGFLAG(AcctLRU, ACCT_LRU) | ||
70 | CLEARPCGFLAG(AcctLRU, ACCT_LRU) | ||
71 | TESTPCGFLAG(AcctLRU, ACCT_LRU) | ||
72 | TESTCLEARPCGFLAG(AcctLRU, ACCT_LRU) | ||
60 | 73 | ||
61 | static inline int page_cgroup_nid(struct page_cgroup *pc) | 74 | static inline int page_cgroup_nid(struct page_cgroup *pc) |
62 | { | 75 | { |
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 7803565aa877..da1fda8623e0 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h | |||
@@ -2527,6 +2527,16 @@ | |||
2527 | #define PCI_DEVICE_ID_INTEL_E7525_MCH 0x359e | 2527 | #define PCI_DEVICE_ID_INTEL_E7525_MCH 0x359e |
2528 | #define PCI_DEVICE_ID_INTEL_IOAT_CNB 0x360b | 2528 | #define PCI_DEVICE_ID_INTEL_IOAT_CNB 0x360b |
2529 | #define PCI_DEVICE_ID_INTEL_FBD_CNB 0x360c | 2529 | #define PCI_DEVICE_ID_INTEL_FBD_CNB 0x360c |
2530 | #define PCI_DEVICE_ID_INTEL_IOAT_JSF0 0x3710 | ||
2531 | #define PCI_DEVICE_ID_INTEL_IOAT_JSF1 0x3711 | ||
2532 | #define PCI_DEVICE_ID_INTEL_IOAT_JSF2 0x3712 | ||
2533 | #define PCI_DEVICE_ID_INTEL_IOAT_JSF3 0x3713 | ||
2534 | #define PCI_DEVICE_ID_INTEL_IOAT_JSF4 0x3714 | ||
2535 | #define PCI_DEVICE_ID_INTEL_IOAT_JSF5 0x3715 | ||
2536 | #define PCI_DEVICE_ID_INTEL_IOAT_JSF6 0x3716 | ||
2537 | #define PCI_DEVICE_ID_INTEL_IOAT_JSF7 0x3717 | ||
2538 | #define PCI_DEVICE_ID_INTEL_IOAT_JSF8 0x3718 | ||
2539 | #define PCI_DEVICE_ID_INTEL_IOAT_JSF9 0x3719 | ||
2530 | #define PCI_DEVICE_ID_INTEL_ICH10_0 0x3a14 | 2540 | #define PCI_DEVICE_ID_INTEL_ICH10_0 0x3a14 |
2531 | #define PCI_DEVICE_ID_INTEL_ICH10_1 0x3a16 | 2541 | #define PCI_DEVICE_ID_INTEL_ICH10_1 0x3a16 |
2532 | #define PCI_DEVICE_ID_INTEL_ICH10_2 0x3a18 | 2542 | #define PCI_DEVICE_ID_INTEL_ICH10_2 0x3a18 |
diff --git a/include/linux/prctl.h b/include/linux/prctl.h index 07bff666e65b..931150566ade 100644 --- a/include/linux/prctl.h +++ b/include/linux/prctl.h | |||
@@ -88,4 +88,6 @@ | |||
88 | #define PR_TASK_PERF_EVENTS_DISABLE 31 | 88 | #define PR_TASK_PERF_EVENTS_DISABLE 31 |
89 | #define PR_TASK_PERF_EVENTS_ENABLE 32 | 89 | #define PR_TASK_PERF_EVENTS_ENABLE 32 |
90 | 90 | ||
91 | #define PR_MCE_KILL 33 | ||
92 | |||
91 | #endif /* _LINUX_PRCTL_H */ | 93 | #endif /* _LINUX_PRCTL_H */ |
diff --git a/include/linux/relay.h b/include/linux/relay.h index 953fc055e875..14a86bc7102b 100644 --- a/include/linux/relay.h +++ b/include/linux/relay.h | |||
@@ -140,7 +140,7 @@ struct rchan_callbacks | |||
140 | * cause relay_open() to create a single global buffer rather | 140 | * cause relay_open() to create a single global buffer rather |
141 | * than the default set of per-cpu buffers. | 141 | * than the default set of per-cpu buffers. |
142 | * | 142 | * |
143 | * See Documentation/filesystems/relayfs.txt for more info. | 143 | * See Documentation/filesystems/relay.txt for more info. |
144 | */ | 144 | */ |
145 | struct dentry *(*create_buf_file)(const char *filename, | 145 | struct dentry *(*create_buf_file)(const char *filename, |
146 | struct dentry *parent, | 146 | struct dentry *parent, |
diff --git a/include/linux/res_counter.h b/include/linux/res_counter.h index 511f42fc6816..731af71cddc9 100644 --- a/include/linux/res_counter.h +++ b/include/linux/res_counter.h | |||
@@ -35,6 +35,10 @@ struct res_counter { | |||
35 | */ | 35 | */ |
36 | unsigned long long limit; | 36 | unsigned long long limit; |
37 | /* | 37 | /* |
38 | * the limit that usage can be exceed | ||
39 | */ | ||
40 | unsigned long long soft_limit; | ||
41 | /* | ||
38 | * the number of unsuccessful attempts to consume the resource | 42 | * the number of unsuccessful attempts to consume the resource |
39 | */ | 43 | */ |
40 | unsigned long long failcnt; | 44 | unsigned long long failcnt; |
@@ -87,6 +91,7 @@ enum { | |||
87 | RES_MAX_USAGE, | 91 | RES_MAX_USAGE, |
88 | RES_LIMIT, | 92 | RES_LIMIT, |
89 | RES_FAILCNT, | 93 | RES_FAILCNT, |
94 | RES_SOFT_LIMIT, | ||
90 | }; | 95 | }; |
91 | 96 | ||
92 | /* | 97 | /* |
@@ -109,7 +114,8 @@ void res_counter_init(struct res_counter *counter, struct res_counter *parent); | |||
109 | int __must_check res_counter_charge_locked(struct res_counter *counter, | 114 | int __must_check res_counter_charge_locked(struct res_counter *counter, |
110 | unsigned long val); | 115 | unsigned long val); |
111 | int __must_check res_counter_charge(struct res_counter *counter, | 116 | int __must_check res_counter_charge(struct res_counter *counter, |
112 | unsigned long val, struct res_counter **limit_fail_at); | 117 | unsigned long val, struct res_counter **limit_fail_at, |
118 | struct res_counter **soft_limit_at); | ||
113 | 119 | ||
114 | /* | 120 | /* |
115 | * uncharge - tell that some portion of the resource is released | 121 | * uncharge - tell that some portion of the resource is released |
@@ -122,7 +128,8 @@ int __must_check res_counter_charge(struct res_counter *counter, | |||
122 | */ | 128 | */ |
123 | 129 | ||
124 | void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val); | 130 | void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val); |
125 | void res_counter_uncharge(struct res_counter *counter, unsigned long val); | 131 | void res_counter_uncharge(struct res_counter *counter, unsigned long val, |
132 | bool *was_soft_limit_excess); | ||
126 | 133 | ||
127 | static inline bool res_counter_limit_check_locked(struct res_counter *cnt) | 134 | static inline bool res_counter_limit_check_locked(struct res_counter *cnt) |
128 | { | 135 | { |
@@ -132,6 +139,36 @@ static inline bool res_counter_limit_check_locked(struct res_counter *cnt) | |||
132 | return false; | 139 | return false; |
133 | } | 140 | } |
134 | 141 | ||
142 | static inline bool res_counter_soft_limit_check_locked(struct res_counter *cnt) | ||
143 | { | ||
144 | if (cnt->usage < cnt->soft_limit) | ||
145 | return true; | ||
146 | |||
147 | return false; | ||
148 | } | ||
149 | |||
150 | /** | ||
151 | * Get the difference between the usage and the soft limit | ||
152 | * @cnt: The counter | ||
153 | * | ||
154 | * Returns 0 if usage is less than or equal to soft limit | ||
155 | * The difference between usage and soft limit, otherwise. | ||
156 | */ | ||
157 | static inline unsigned long long | ||
158 | res_counter_soft_limit_excess(struct res_counter *cnt) | ||
159 | { | ||
160 | unsigned long long excess; | ||
161 | unsigned long flags; | ||
162 | |||
163 | spin_lock_irqsave(&cnt->lock, flags); | ||
164 | if (cnt->usage <= cnt->soft_limit) | ||
165 | excess = 0; | ||
166 | else | ||
167 | excess = cnt->usage - cnt->soft_limit; | ||
168 | spin_unlock_irqrestore(&cnt->lock, flags); | ||
169 | return excess; | ||
170 | } | ||
171 | |||
135 | /* | 172 | /* |
136 | * Helper function to detect if the cgroup is within it's limit or | 173 | * Helper function to detect if the cgroup is within it's limit or |
137 | * not. It's currently called from cgroup_rss_prepare() | 174 | * not. It's currently called from cgroup_rss_prepare() |
@@ -147,6 +184,17 @@ static inline bool res_counter_check_under_limit(struct res_counter *cnt) | |||
147 | return ret; | 184 | return ret; |
148 | } | 185 | } |
149 | 186 | ||
187 | static inline bool res_counter_check_under_soft_limit(struct res_counter *cnt) | ||
188 | { | ||
189 | bool ret; | ||
190 | unsigned long flags; | ||
191 | |||
192 | spin_lock_irqsave(&cnt->lock, flags); | ||
193 | ret = res_counter_soft_limit_check_locked(cnt); | ||
194 | spin_unlock_irqrestore(&cnt->lock, flags); | ||
195 | return ret; | ||
196 | } | ||
197 | |||
150 | static inline void res_counter_reset_max(struct res_counter *cnt) | 198 | static inline void res_counter_reset_max(struct res_counter *cnt) |
151 | { | 199 | { |
152 | unsigned long flags; | 200 | unsigned long flags; |
@@ -180,4 +228,16 @@ static inline int res_counter_set_limit(struct res_counter *cnt, | |||
180 | return ret; | 228 | return ret; |
181 | } | 229 | } |
182 | 230 | ||
231 | static inline int | ||
232 | res_counter_set_soft_limit(struct res_counter *cnt, | ||
233 | unsigned long long soft_limit) | ||
234 | { | ||
235 | unsigned long flags; | ||
236 | |||
237 | spin_lock_irqsave(&cnt->lock, flags); | ||
238 | cnt->soft_limit = soft_limit; | ||
239 | spin_unlock_irqrestore(&cnt->lock, flags); | ||
240 | return 0; | ||
241 | } | ||
242 | |||
183 | #endif | 243 | #endif |
diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 477841d29fce..cb0ba7032609 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h | |||
@@ -81,7 +81,19 @@ static inline void page_dup_rmap(struct page *page) | |||
81 | */ | 81 | */ |
82 | int page_referenced(struct page *, int is_locked, | 82 | int page_referenced(struct page *, int is_locked, |
83 | struct mem_cgroup *cnt, unsigned long *vm_flags); | 83 | struct mem_cgroup *cnt, unsigned long *vm_flags); |
84 | int try_to_unmap(struct page *, int ignore_refs); | 84 | enum ttu_flags { |
85 | TTU_UNMAP = 0, /* unmap mode */ | ||
86 | TTU_MIGRATION = 1, /* migration mode */ | ||
87 | TTU_MUNLOCK = 2, /* munlock mode */ | ||
88 | TTU_ACTION_MASK = 0xff, | ||
89 | |||
90 | TTU_IGNORE_MLOCK = (1 << 8), /* ignore mlock */ | ||
91 | TTU_IGNORE_ACCESS = (1 << 9), /* don't age */ | ||
92 | TTU_IGNORE_HWPOISON = (1 << 10),/* corrupted page is recoverable */ | ||
93 | }; | ||
94 | #define TTU_ACTION(x) ((x) & TTU_ACTION_MASK) | ||
95 | |||
96 | int try_to_unmap(struct page *, enum ttu_flags flags); | ||
85 | 97 | ||
86 | /* | 98 | /* |
87 | * Called from mm/filemap_xip.c to unmap empty zero page | 99 | * Called from mm/filemap_xip.c to unmap empty zero page |
@@ -108,6 +120,13 @@ int page_mkclean(struct page *); | |||
108 | */ | 120 | */ |
109 | int try_to_munlock(struct page *); | 121 | int try_to_munlock(struct page *); |
110 | 122 | ||
123 | /* | ||
124 | * Called by memory-failure.c to kill processes. | ||
125 | */ | ||
126 | struct anon_vma *page_lock_anon_vma(struct page *page); | ||
127 | void page_unlock_anon_vma(struct anon_vma *anon_vma); | ||
128 | int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma); | ||
129 | |||
111 | #else /* !CONFIG_MMU */ | 130 | #else /* !CONFIG_MMU */ |
112 | 131 | ||
113 | #define anon_vma_init() do {} while (0) | 132 | #define anon_vma_init() do {} while (0) |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 848d1f20086e..75e6e60bf583 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -309,7 +309,7 @@ extern void softlockup_tick(void); | |||
309 | extern void touch_softlockup_watchdog(void); | 309 | extern void touch_softlockup_watchdog(void); |
310 | extern void touch_all_softlockup_watchdogs(void); | 310 | extern void touch_all_softlockup_watchdogs(void); |
311 | extern int proc_dosoftlockup_thresh(struct ctl_table *table, int write, | 311 | extern int proc_dosoftlockup_thresh(struct ctl_table *table, int write, |
312 | struct file *filp, void __user *buffer, | 312 | void __user *buffer, |
313 | size_t *lenp, loff_t *ppos); | 313 | size_t *lenp, loff_t *ppos); |
314 | extern unsigned int softlockup_panic; | 314 | extern unsigned int softlockup_panic; |
315 | extern int softlockup_thresh; | 315 | extern int softlockup_thresh; |
@@ -331,7 +331,7 @@ extern unsigned long sysctl_hung_task_check_count; | |||
331 | extern unsigned long sysctl_hung_task_timeout_secs; | 331 | extern unsigned long sysctl_hung_task_timeout_secs; |
332 | extern unsigned long sysctl_hung_task_warnings; | 332 | extern unsigned long sysctl_hung_task_warnings; |
333 | extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write, | 333 | extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write, |
334 | struct file *filp, void __user *buffer, | 334 | void __user *buffer, |
335 | size_t *lenp, loff_t *ppos); | 335 | size_t *lenp, loff_t *ppos); |
336 | #endif | 336 | #endif |
337 | 337 | ||
@@ -1271,7 +1271,6 @@ struct task_struct { | |||
1271 | struct mm_struct *mm, *active_mm; | 1271 | struct mm_struct *mm, *active_mm; |
1272 | 1272 | ||
1273 | /* task state */ | 1273 | /* task state */ |
1274 | struct linux_binfmt *binfmt; | ||
1275 | int exit_state; | 1274 | int exit_state; |
1276 | int exit_code, exit_signal; | 1275 | int exit_code, exit_signal; |
1277 | int pdeath_signal; /* The signal sent when the parent dies */ | 1276 | int pdeath_signal; /* The signal sent when the parent dies */ |
@@ -1735,6 +1734,7 @@ extern cputime_t task_gtime(struct task_struct *p); | |||
1735 | #define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ | 1734 | #define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ |
1736 | #define PF_VCPU 0x00000010 /* I'm a virtual CPU */ | 1735 | #define PF_VCPU 0x00000010 /* I'm a virtual CPU */ |
1737 | #define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */ | 1736 | #define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */ |
1737 | #define PF_MCE_PROCESS 0x00000080 /* process policy on mce errors */ | ||
1738 | #define PF_SUPERPRIV 0x00000100 /* used super-user privileges */ | 1738 | #define PF_SUPERPRIV 0x00000100 /* used super-user privileges */ |
1739 | #define PF_DUMPCORE 0x00000200 /* dumped core */ | 1739 | #define PF_DUMPCORE 0x00000200 /* dumped core */ |
1740 | #define PF_SIGNALED 0x00000400 /* killed by a signal */ | 1740 | #define PF_SIGNALED 0x00000400 /* killed by a signal */ |
@@ -1754,6 +1754,7 @@ extern cputime_t task_gtime(struct task_struct *p); | |||
1754 | #define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */ | 1754 | #define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */ |
1755 | #define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */ | 1755 | #define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */ |
1756 | #define PF_THREAD_BOUND 0x04000000 /* Thread bound to specific cpu */ | 1756 | #define PF_THREAD_BOUND 0x04000000 /* Thread bound to specific cpu */ |
1757 | #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ | ||
1757 | #define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ | 1758 | #define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ |
1758 | #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ | 1759 | #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ |
1759 | #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezeable */ | 1760 | #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezeable */ |
@@ -1906,7 +1907,7 @@ extern unsigned int sysctl_sched_time_avg; | |||
1906 | extern unsigned int sysctl_timer_migration; | 1907 | extern unsigned int sysctl_timer_migration; |
1907 | 1908 | ||
1908 | int sched_nr_latency_handler(struct ctl_table *table, int write, | 1909 | int sched_nr_latency_handler(struct ctl_table *table, int write, |
1909 | struct file *file, void __user *buffer, size_t *length, | 1910 | void __user *buffer, size_t *length, |
1910 | loff_t *ppos); | 1911 | loff_t *ppos); |
1911 | #endif | 1912 | #endif |
1912 | #ifdef CONFIG_SCHED_DEBUG | 1913 | #ifdef CONFIG_SCHED_DEBUG |
@@ -1924,7 +1925,7 @@ extern unsigned int sysctl_sched_rt_period; | |||
1924 | extern int sysctl_sched_rt_runtime; | 1925 | extern int sysctl_sched_rt_runtime; |
1925 | 1926 | ||
1926 | int sched_rt_handler(struct ctl_table *table, int write, | 1927 | int sched_rt_handler(struct ctl_table *table, int write, |
1927 | struct file *filp, void __user *buffer, size_t *lenp, | 1928 | void __user *buffer, size_t *lenp, |
1928 | loff_t *ppos); | 1929 | loff_t *ppos); |
1929 | 1930 | ||
1930 | extern unsigned int sysctl_sched_compat_yield; | 1931 | extern unsigned int sysctl_sched_compat_yield; |
@@ -2059,6 +2060,7 @@ extern int kill_pgrp(struct pid *pid, int sig, int priv); | |||
2059 | extern int kill_pid(struct pid *pid, int sig, int priv); | 2060 | extern int kill_pid(struct pid *pid, int sig, int priv); |
2060 | extern int kill_proc_info(int, struct siginfo *, pid_t); | 2061 | extern int kill_proc_info(int, struct siginfo *, pid_t); |
2061 | extern int do_notify_parent(struct task_struct *, int); | 2062 | extern int do_notify_parent(struct task_struct *, int); |
2063 | extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent); | ||
2062 | extern void force_sig(int, struct task_struct *); | 2064 | extern void force_sig(int, struct task_struct *); |
2063 | extern void force_sig_specific(int, struct task_struct *); | 2065 | extern void force_sig_specific(int, struct task_struct *); |
2064 | extern int send_sig(int, struct task_struct *, int); | 2066 | extern int send_sig(int, struct task_struct *, int); |
@@ -2336,7 +2338,10 @@ static inline int signal_pending(struct task_struct *p) | |||
2336 | return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING)); | 2338 | return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING)); |
2337 | } | 2339 | } |
2338 | 2340 | ||
2339 | extern int __fatal_signal_pending(struct task_struct *p); | 2341 | static inline int __fatal_signal_pending(struct task_struct *p) |
2342 | { | ||
2343 | return unlikely(sigismember(&p->pending.signal, SIGKILL)); | ||
2344 | } | ||
2340 | 2345 | ||
2341 | static inline int fatal_signal_pending(struct task_struct *p) | 2346 | static inline int fatal_signal_pending(struct task_struct *p) |
2342 | { | 2347 | { |
diff --git a/include/linux/security.h b/include/linux/security.h index d050b66ab9ef..239e40d0450b 100644 --- a/include/linux/security.h +++ b/include/linux/security.h | |||
@@ -133,7 +133,7 @@ static inline unsigned long round_hint_to_min(unsigned long hint) | |||
133 | return PAGE_ALIGN(mmap_min_addr); | 133 | return PAGE_ALIGN(mmap_min_addr); |
134 | return hint; | 134 | return hint; |
135 | } | 135 | } |
136 | extern int mmap_min_addr_handler(struct ctl_table *table, int write, struct file *filp, | 136 | extern int mmap_min_addr_handler(struct ctl_table *table, int write, |
137 | void __user *buffer, size_t *lenp, loff_t *ppos); | 137 | void __user *buffer, size_t *lenp, loff_t *ppos); |
138 | 138 | ||
139 | #ifdef CONFIG_SECURITY | 139 | #ifdef CONFIG_SECURITY |
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h index 0c6a86b79596..8366d8f12e53 100644 --- a/include/linux/seq_file.h +++ b/include/linux/seq_file.h | |||
@@ -35,6 +35,44 @@ struct seq_operations { | |||
35 | 35 | ||
36 | #define SEQ_SKIP 1 | 36 | #define SEQ_SKIP 1 |
37 | 37 | ||
38 | /** | ||
39 | * seq_get_buf - get buffer to write arbitrary data to | ||
40 | * @m: the seq_file handle | ||
41 | * @bufp: the beginning of the buffer is stored here | ||
42 | * | ||
43 | * Return the number of bytes available in the buffer, or zero if | ||
44 | * there's no space. | ||
45 | */ | ||
46 | static inline size_t seq_get_buf(struct seq_file *m, char **bufp) | ||
47 | { | ||
48 | BUG_ON(m->count > m->size); | ||
49 | if (m->count < m->size) | ||
50 | *bufp = m->buf + m->count; | ||
51 | else | ||
52 | *bufp = NULL; | ||
53 | |||
54 | return m->size - m->count; | ||
55 | } | ||
56 | |||
57 | /** | ||
58 | * seq_commit - commit data to the buffer | ||
59 | * @m: the seq_file handle | ||
60 | * @num: the number of bytes to commit | ||
61 | * | ||
62 | * Commit @num bytes of data written to a buffer previously acquired | ||
63 | * by seq_buf_get. To signal an error condition, or that the data | ||
64 | * didn't fit in the available space, pass a negative @num value. | ||
65 | */ | ||
66 | static inline void seq_commit(struct seq_file *m, int num) | ||
67 | { | ||
68 | if (num < 0) { | ||
69 | m->count = m->size; | ||
70 | } else { | ||
71 | BUG_ON(m->count + num > m->size); | ||
72 | m->count += num; | ||
73 | } | ||
74 | } | ||
75 | |||
38 | char *mangle_path(char *s, char *p, char *esc); | 76 | char *mangle_path(char *s, char *p, char *esc); |
39 | int seq_open(struct file *, const struct seq_operations *); | 77 | int seq_open(struct file *, const struct seq_operations *); |
40 | ssize_t seq_read(struct file *, char __user *, size_t, loff_t *); | 78 | ssize_t seq_read(struct file *, char __user *, size_t, loff_t *); |
diff --git a/include/linux/signal.h b/include/linux/signal.h index c7552836bd95..ab9272cc270c 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h | |||
@@ -233,6 +233,8 @@ static inline int valid_signal(unsigned long sig) | |||
233 | } | 233 | } |
234 | 234 | ||
235 | extern int next_signal(struct sigpending *pending, sigset_t *mask); | 235 | extern int next_signal(struct sigpending *pending, sigset_t *mask); |
236 | extern int do_send_sig_info(int sig, struct siginfo *info, | ||
237 | struct task_struct *p, bool group); | ||
236 | extern int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p); | 238 | extern int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p); |
237 | extern int __group_send_sig_info(int, struct siginfo *, struct task_struct *); | 239 | extern int __group_send_sig_info(int, struct siginfo *, struct task_struct *); |
238 | extern long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, | 240 | extern long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, |
diff --git a/include/linux/swap.h b/include/linux/swap.h index 6c990e658f4e..4ec90019c1a4 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h | |||
@@ -34,16 +34,38 @@ static inline int current_is_kswapd(void) | |||
34 | * the type/offset into the pte as 5/27 as well. | 34 | * the type/offset into the pte as 5/27 as well. |
35 | */ | 35 | */ |
36 | #define MAX_SWAPFILES_SHIFT 5 | 36 | #define MAX_SWAPFILES_SHIFT 5 |
37 | #ifndef CONFIG_MIGRATION | 37 | |
38 | #define MAX_SWAPFILES (1 << MAX_SWAPFILES_SHIFT) | 38 | /* |
39 | * Use some of the swap files numbers for other purposes. This | ||
40 | * is a convenient way to hook into the VM to trigger special | ||
41 | * actions on faults. | ||
42 | */ | ||
43 | |||
44 | /* | ||
45 | * NUMA node memory migration support | ||
46 | */ | ||
47 | #ifdef CONFIG_MIGRATION | ||
48 | #define SWP_MIGRATION_NUM 2 | ||
49 | #define SWP_MIGRATION_READ (MAX_SWAPFILES + SWP_HWPOISON_NUM) | ||
50 | #define SWP_MIGRATION_WRITE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 1) | ||
39 | #else | 51 | #else |
40 | /* Use last two entries for page migration swap entries */ | 52 | #define SWP_MIGRATION_NUM 0 |
41 | #define MAX_SWAPFILES ((1 << MAX_SWAPFILES_SHIFT)-2) | ||
42 | #define SWP_MIGRATION_READ MAX_SWAPFILES | ||
43 | #define SWP_MIGRATION_WRITE (MAX_SWAPFILES + 1) | ||
44 | #endif | 53 | #endif |
45 | 54 | ||
46 | /* | 55 | /* |
56 | * Handling of hardware poisoned pages with memory corruption. | ||
57 | */ | ||
58 | #ifdef CONFIG_MEMORY_FAILURE | ||
59 | #define SWP_HWPOISON_NUM 1 | ||
60 | #define SWP_HWPOISON MAX_SWAPFILES | ||
61 | #else | ||
62 | #define SWP_HWPOISON_NUM 0 | ||
63 | #endif | ||
64 | |||
65 | #define MAX_SWAPFILES \ | ||
66 | ((1 << MAX_SWAPFILES_SHIFT) - SWP_MIGRATION_NUM - SWP_HWPOISON_NUM) | ||
67 | |||
68 | /* | ||
47 | * Magic header for a swap area. The first part of the union is | 69 | * Magic header for a swap area. The first part of the union is |
48 | * what the swap magic looks like for the old (limited to 128MB) | 70 | * what the swap magic looks like for the old (limited to 128MB) |
49 | * swap area format, the second part of the union adds - in the | 71 | * swap area format, the second part of the union adds - in the |
@@ -217,6 +239,11 @@ extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, | |||
217 | extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem, | 239 | extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem, |
218 | gfp_t gfp_mask, bool noswap, | 240 | gfp_t gfp_mask, bool noswap, |
219 | unsigned int swappiness); | 241 | unsigned int swappiness); |
242 | extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, | ||
243 | gfp_t gfp_mask, bool noswap, | ||
244 | unsigned int swappiness, | ||
245 | struct zone *zone, | ||
246 | int nid); | ||
220 | extern int __isolate_lru_page(struct page *page, int mode, int file); | 247 | extern int __isolate_lru_page(struct page *page, int mode, int file); |
221 | extern unsigned long shrink_all_memory(unsigned long nr_pages); | 248 | extern unsigned long shrink_all_memory(unsigned long nr_pages); |
222 | extern int vm_swappiness; | 249 | extern int vm_swappiness; |
@@ -240,7 +267,7 @@ extern int page_evictable(struct page *page, struct vm_area_struct *vma); | |||
240 | extern void scan_mapping_unevictable_pages(struct address_space *); | 267 | extern void scan_mapping_unevictable_pages(struct address_space *); |
241 | 268 | ||
242 | extern unsigned long scan_unevictable_pages; | 269 | extern unsigned long scan_unevictable_pages; |
243 | extern int scan_unevictable_handler(struct ctl_table *, int, struct file *, | 270 | extern int scan_unevictable_handler(struct ctl_table *, int, |
244 | void __user *, size_t *, loff_t *); | 271 | void __user *, size_t *, loff_t *); |
245 | extern int scan_unevictable_register_node(struct node *node); | 272 | extern int scan_unevictable_register_node(struct node *node); |
246 | extern void scan_unevictable_unregister_node(struct node *node); | 273 | extern void scan_unevictable_unregister_node(struct node *node); |
diff --git a/include/linux/swapops.h b/include/linux/swapops.h index 6ec39ab27b4b..cd42e30b7c6e 100644 --- a/include/linux/swapops.h +++ b/include/linux/swapops.h | |||
@@ -131,3 +131,41 @@ static inline int is_write_migration_entry(swp_entry_t entry) | |||
131 | 131 | ||
132 | #endif | 132 | #endif |
133 | 133 | ||
134 | #ifdef CONFIG_MEMORY_FAILURE | ||
135 | /* | ||
136 | * Support for hardware poisoned pages | ||
137 | */ | ||
138 | static inline swp_entry_t make_hwpoison_entry(struct page *page) | ||
139 | { | ||
140 | BUG_ON(!PageLocked(page)); | ||
141 | return swp_entry(SWP_HWPOISON, page_to_pfn(page)); | ||
142 | } | ||
143 | |||
144 | static inline int is_hwpoison_entry(swp_entry_t entry) | ||
145 | { | ||
146 | return swp_type(entry) == SWP_HWPOISON; | ||
147 | } | ||
148 | #else | ||
149 | |||
150 | static inline swp_entry_t make_hwpoison_entry(struct page *page) | ||
151 | { | ||
152 | return swp_entry(0, 0); | ||
153 | } | ||
154 | |||
155 | static inline int is_hwpoison_entry(swp_entry_t swp) | ||
156 | { | ||
157 | return 0; | ||
158 | } | ||
159 | #endif | ||
160 | |||
161 | #if defined(CONFIG_MEMORY_FAILURE) || defined(CONFIG_MIGRATION) | ||
162 | static inline int non_swap_entry(swp_entry_t entry) | ||
163 | { | ||
164 | return swp_type(entry) >= MAX_SWAPFILES; | ||
165 | } | ||
166 | #else | ||
167 | static inline int non_swap_entry(swp_entry_t entry) | ||
168 | { | ||
169 | return 0; | ||
170 | } | ||
171 | #endif | ||
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index e76d3b22a466..1e4743ee6831 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h | |||
@@ -29,7 +29,6 @@ | |||
29 | #include <linux/types.h> | 29 | #include <linux/types.h> |
30 | #include <linux/compiler.h> | 30 | #include <linux/compiler.h> |
31 | 31 | ||
32 | struct file; | ||
33 | struct completion; | 32 | struct completion; |
34 | 33 | ||
35 | #define CTL_MAXNAME 10 /* how many path components do we allow in a | 34 | #define CTL_MAXNAME 10 /* how many path components do we allow in a |
@@ -977,25 +976,25 @@ typedef int ctl_handler (struct ctl_table *table, | |||
977 | void __user *oldval, size_t __user *oldlenp, | 976 | void __user *oldval, size_t __user *oldlenp, |
978 | void __user *newval, size_t newlen); | 977 | void __user *newval, size_t newlen); |
979 | 978 | ||
980 | typedef int proc_handler (struct ctl_table *ctl, int write, struct file * filp, | 979 | typedef int proc_handler (struct ctl_table *ctl, int write, |
981 | void __user *buffer, size_t *lenp, loff_t *ppos); | 980 | void __user *buffer, size_t *lenp, loff_t *ppos); |
982 | 981 | ||
983 | extern int proc_dostring(struct ctl_table *, int, struct file *, | 982 | extern int proc_dostring(struct ctl_table *, int, |
984 | void __user *, size_t *, loff_t *); | 983 | void __user *, size_t *, loff_t *); |
985 | extern int proc_dointvec(struct ctl_table *, int, struct file *, | 984 | extern int proc_dointvec(struct ctl_table *, int, |
986 | void __user *, size_t *, loff_t *); | 985 | void __user *, size_t *, loff_t *); |
987 | extern int proc_dointvec_minmax(struct ctl_table *, int, struct file *, | 986 | extern int proc_dointvec_minmax(struct ctl_table *, int, |
988 | void __user *, size_t *, loff_t *); | 987 | void __user *, size_t *, loff_t *); |
989 | extern int proc_dointvec_jiffies(struct ctl_table *, int, struct file *, | 988 | extern int proc_dointvec_jiffies(struct ctl_table *, int, |
990 | void __user *, size_t *, loff_t *); | 989 | void __user *, size_t *, loff_t *); |
991 | extern int proc_dointvec_userhz_jiffies(struct ctl_table *, int, struct file *, | 990 | extern int proc_dointvec_userhz_jiffies(struct ctl_table *, int, |
992 | void __user *, size_t *, loff_t *); | 991 | void __user *, size_t *, loff_t *); |
993 | extern int proc_dointvec_ms_jiffies(struct ctl_table *, int, struct file *, | 992 | extern int proc_dointvec_ms_jiffies(struct ctl_table *, int, |
994 | void __user *, size_t *, loff_t *); | 993 | void __user *, size_t *, loff_t *); |
995 | extern int proc_doulongvec_minmax(struct ctl_table *, int, struct file *, | 994 | extern int proc_doulongvec_minmax(struct ctl_table *, int, |
996 | void __user *, size_t *, loff_t *); | 995 | void __user *, size_t *, loff_t *); |
997 | extern int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int, | 996 | extern int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int, |
998 | struct file *, void __user *, size_t *, loff_t *); | 997 | void __user *, size_t *, loff_t *); |
999 | 998 | ||
1000 | extern int do_sysctl (int __user *name, int nlen, | 999 | extern int do_sysctl (int __user *name, int nlen, |
1001 | void __user *oldval, size_t __user *oldlenp, | 1000 | void __user *oldval, size_t __user *oldlenp, |
diff --git a/include/linux/time.h b/include/linux/time.h index 56787c093345..fe04e5ef6a59 100644 --- a/include/linux/time.h +++ b/include/linux/time.h | |||
@@ -155,6 +155,34 @@ extern void timekeeping_leap_insert(int leapsecond); | |||
155 | struct tms; | 155 | struct tms; |
156 | extern void do_sys_times(struct tms *); | 156 | extern void do_sys_times(struct tms *); |
157 | 157 | ||
158 | /* | ||
159 | * Similar to the struct tm in userspace <time.h>, but it needs to be here so | ||
160 | * that the kernel source is self contained. | ||
161 | */ | ||
162 | struct tm { | ||
163 | /* | ||
164 | * the number of seconds after the minute, normally in the range | ||
165 | * 0 to 59, but can be up to 60 to allow for leap seconds | ||
166 | */ | ||
167 | int tm_sec; | ||
168 | /* the number of minutes after the hour, in the range 0 to 59*/ | ||
169 | int tm_min; | ||
170 | /* the number of hours past midnight, in the range 0 to 23 */ | ||
171 | int tm_hour; | ||
172 | /* the day of the month, in the range 1 to 31 */ | ||
173 | int tm_mday; | ||
174 | /* the number of months since January, in the range 0 to 11 */ | ||
175 | int tm_mon; | ||
176 | /* the number of years since 1900 */ | ||
177 | long tm_year; | ||
178 | /* the number of days since Sunday, in the range 0 to 6 */ | ||
179 | int tm_wday; | ||
180 | /* the number of days since January 1, in the range 0 to 365 */ | ||
181 | int tm_yday; | ||
182 | }; | ||
183 | |||
184 | void time_to_tm(time_t totalsecs, int offset, struct tm *result); | ||
185 | |||
158 | /** | 186 | /** |
159 | * timespec_to_ns - Convert timespec to nanoseconds | 187 | * timespec_to_ns - Convert timespec to nanoseconds |
160 | * @ts: pointer to the timespec variable to be converted | 188 | * @ts: pointer to the timespec variable to be converted |
diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h index 17ba82efa483..1eb44a924e56 100644 --- a/include/linux/tracehook.h +++ b/include/linux/tracehook.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Tracing hooks | 2 | * Tracing hooks |
3 | * | 3 | * |
4 | * Copyright (C) 2008 Red Hat, Inc. All rights reserved. | 4 | * Copyright (C) 2008-2009 Red Hat, Inc. All rights reserved. |
5 | * | 5 | * |
6 | * This copyrighted material is made available to anyone wishing to use, | 6 | * This copyrighted material is made available to anyone wishing to use, |
7 | * modify, copy, or redistribute it subject to the terms and conditions | 7 | * modify, copy, or redistribute it subject to the terms and conditions |
@@ -463,22 +463,38 @@ static inline int tracehook_get_signal(struct task_struct *task, | |||
463 | 463 | ||
464 | /** | 464 | /** |
465 | * tracehook_notify_jctl - report about job control stop/continue | 465 | * tracehook_notify_jctl - report about job control stop/continue |
466 | * @notify: nonzero if this is the last thread in the group to stop | 466 | * @notify: zero, %CLD_STOPPED or %CLD_CONTINUED |
467 | * @why: %CLD_STOPPED or %CLD_CONTINUED | 467 | * @why: %CLD_STOPPED or %CLD_CONTINUED |
468 | * | 468 | * |
469 | * This is called when we might call do_notify_parent_cldstop(). | 469 | * This is called when we might call do_notify_parent_cldstop(). |
470 | * It's called when about to stop for job control; we are already in | ||
471 | * %TASK_STOPPED state, about to call schedule(). It's also called when | ||
472 | * a delayed %CLD_STOPPED or %CLD_CONTINUED report is ready to be made. | ||
473 | * | 470 | * |
474 | * Return nonzero to generate a %SIGCHLD with @why, which is | 471 | * @notify is zero if we would not ordinarily send a %SIGCHLD, |
475 | * normal if @notify is nonzero. | 472 | * or is the %CLD_STOPPED or %CLD_CONTINUED .si_code for %SIGCHLD. |
476 | * | 473 | * |
477 | * Called with no locks held. | 474 | * @why is %CLD_STOPPED when about to stop for job control; |
475 | * we are already in %TASK_STOPPED state, about to call schedule(). | ||
476 | * It might also be that we have just exited (check %PF_EXITING), | ||
477 | * but need to report that a group-wide stop is complete. | ||
478 | * | ||
479 | * @why is %CLD_CONTINUED when waking up after job control stop and | ||
480 | * ready to make a delayed @notify report. | ||
481 | * | ||
482 | * Return the %CLD_* value for %SIGCHLD, or zero to generate no signal. | ||
483 | * | ||
484 | * Called with the siglock held. | ||
478 | */ | 485 | */ |
479 | static inline int tracehook_notify_jctl(int notify, int why) | 486 | static inline int tracehook_notify_jctl(int notify, int why) |
480 | { | 487 | { |
481 | return notify || (current->ptrace & PT_PTRACED); | 488 | return notify ?: (current->ptrace & PT_PTRACED) ? why : 0; |
489 | } | ||
490 | |||
491 | /** | ||
492 | * tracehook_finish_jctl - report about return from job control stop | ||
493 | * | ||
494 | * This is called by do_signal_stop() after wakeup. | ||
495 | */ | ||
496 | static inline void tracehook_finish_jctl(void) | ||
497 | { | ||
482 | } | 498 | } |
483 | 499 | ||
484 | #define DEATH_REAP -1 | 500 | #define DEATH_REAP -1 |
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index 63a3f7a80580..660a9de96f81 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h | |||
@@ -4,7 +4,7 @@ | |||
4 | /* | 4 | /* |
5 | * Kernel Tracepoint API. | 5 | * Kernel Tracepoint API. |
6 | * | 6 | * |
7 | * See Documentation/tracepoint.txt. | 7 | * See Documentation/trace/tracepoints.txt. |
8 | * | 8 | * |
9 | * (C) Copyright 2008 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> | 9 | * (C) Copyright 2008 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> |
10 | * | 10 | * |
diff --git a/include/linux/unaligned/be_byteshift.h b/include/linux/unaligned/be_byteshift.h index 46dd12c5709e..9356b24223ac 100644 --- a/include/linux/unaligned/be_byteshift.h +++ b/include/linux/unaligned/be_byteshift.h | |||
@@ -1,7 +1,7 @@ | |||
1 | #ifndef _LINUX_UNALIGNED_BE_BYTESHIFT_H | 1 | #ifndef _LINUX_UNALIGNED_BE_BYTESHIFT_H |
2 | #define _LINUX_UNALIGNED_BE_BYTESHIFT_H | 2 | #define _LINUX_UNALIGNED_BE_BYTESHIFT_H |
3 | 3 | ||
4 | #include <linux/kernel.h> | 4 | #include <linux/types.h> |
5 | 5 | ||
6 | static inline u16 __get_unaligned_be16(const u8 *p) | 6 | static inline u16 __get_unaligned_be16(const u8 *p) |
7 | { | 7 | { |
diff --git a/include/linux/unaligned/le_byteshift.h b/include/linux/unaligned/le_byteshift.h index 59777e951baf..be376fb79b64 100644 --- a/include/linux/unaligned/le_byteshift.h +++ b/include/linux/unaligned/le_byteshift.h | |||
@@ -1,7 +1,7 @@ | |||
1 | #ifndef _LINUX_UNALIGNED_LE_BYTESHIFT_H | 1 | #ifndef _LINUX_UNALIGNED_LE_BYTESHIFT_H |
2 | #define _LINUX_UNALIGNED_LE_BYTESHIFT_H | 2 | #define _LINUX_UNALIGNED_LE_BYTESHIFT_H |
3 | 3 | ||
4 | #include <linux/kernel.h> | 4 | #include <linux/types.h> |
5 | 5 | ||
6 | static inline u16 __get_unaligned_le16(const u8 *p) | 6 | static inline u16 __get_unaligned_le16(const u8 *p) |
7 | { | 7 | { |
diff --git a/include/linux/vgaarb.h b/include/linux/vgaarb.h index 923f9040ea20..2dfaa293ae8c 100644 --- a/include/linux/vgaarb.h +++ b/include/linux/vgaarb.h | |||
@@ -1,5 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * vgaarb.c | 2 | * The VGA aribiter manages VGA space routing and VGA resource decode to |
3 | * allow multiple VGA devices to be used in a system in a safe way. | ||
3 | * | 4 | * |
4 | * (C) Copyright 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org> | 5 | * (C) Copyright 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org> |
5 | * (C) Copyright 2007 Paulo R. Zanoni <przanoni@gmail.com> | 6 | * (C) Copyright 2007 Paulo R. Zanoni <przanoni@gmail.com> |
diff --git a/include/linux/writeback.h b/include/linux/writeback.h index 75cf58666ff9..66ebddcff664 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h | |||
@@ -110,21 +110,20 @@ extern int laptop_mode; | |||
110 | extern unsigned long determine_dirtyable_memory(void); | 110 | extern unsigned long determine_dirtyable_memory(void); |
111 | 111 | ||
112 | extern int dirty_background_ratio_handler(struct ctl_table *table, int write, | 112 | extern int dirty_background_ratio_handler(struct ctl_table *table, int write, |
113 | struct file *filp, void __user *buffer, size_t *lenp, | 113 | void __user *buffer, size_t *lenp, |
114 | loff_t *ppos); | 114 | loff_t *ppos); |
115 | extern int dirty_background_bytes_handler(struct ctl_table *table, int write, | 115 | extern int dirty_background_bytes_handler(struct ctl_table *table, int write, |
116 | struct file *filp, void __user *buffer, size_t *lenp, | 116 | void __user *buffer, size_t *lenp, |
117 | loff_t *ppos); | 117 | loff_t *ppos); |
118 | extern int dirty_ratio_handler(struct ctl_table *table, int write, | 118 | extern int dirty_ratio_handler(struct ctl_table *table, int write, |
119 | struct file *filp, void __user *buffer, size_t *lenp, | 119 | void __user *buffer, size_t *lenp, |
120 | loff_t *ppos); | 120 | loff_t *ppos); |
121 | extern int dirty_bytes_handler(struct ctl_table *table, int write, | 121 | extern int dirty_bytes_handler(struct ctl_table *table, int write, |
122 | struct file *filp, void __user *buffer, size_t *lenp, | 122 | void __user *buffer, size_t *lenp, |
123 | loff_t *ppos); | 123 | loff_t *ppos); |
124 | 124 | ||
125 | struct ctl_table; | 125 | struct ctl_table; |
126 | struct file; | 126 | int dirty_writeback_centisecs_handler(struct ctl_table *, int, |
127 | int dirty_writeback_centisecs_handler(struct ctl_table *, int, struct file *, | ||
128 | void __user *, size_t *, loff_t *); | 127 | void __user *, size_t *, loff_t *); |
129 | 128 | ||
130 | void get_dirty_limits(unsigned long *pbackground, unsigned long *pdirty, | 129 | void get_dirty_limits(unsigned long *pbackground, unsigned long *pdirty, |