diff options
Diffstat (limited to 'include/linux')
38 files changed, 1142 insertions, 949 deletions
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index 1d52425a6118..f169bcb90b58 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h | |||
@@ -13,6 +13,8 @@ | |||
13 | #include <linux/proportions.h> | 13 | #include <linux/proportions.h> |
14 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
15 | #include <linux/fs.h> | 15 | #include <linux/fs.h> |
16 | #include <linux/sched.h> | ||
17 | #include <linux/writeback.h> | ||
16 | #include <asm/atomic.h> | 18 | #include <asm/atomic.h> |
17 | 19 | ||
18 | struct page; | 20 | struct page; |
@@ -23,9 +25,11 @@ struct dentry; | |||
23 | * Bits in backing_dev_info.state | 25 | * Bits in backing_dev_info.state |
24 | */ | 26 | */ |
25 | enum bdi_state { | 27 | enum bdi_state { |
26 | BDI_pdflush, /* A pdflush thread is working this device */ | 28 | BDI_pending, /* On its way to being activated */ |
29 | BDI_wb_alloc, /* Default embedded wb allocated */ | ||
27 | BDI_async_congested, /* The async (write) queue is getting full */ | 30 | BDI_async_congested, /* The async (write) queue is getting full */ |
28 | BDI_sync_congested, /* The sync queue is getting full */ | 31 | BDI_sync_congested, /* The sync queue is getting full */ |
32 | BDI_registered, /* bdi_register() was done */ | ||
29 | BDI_unused, /* Available bits start here */ | 33 | BDI_unused, /* Available bits start here */ |
30 | }; | 34 | }; |
31 | 35 | ||
@@ -39,7 +43,22 @@ enum bdi_stat_item { | |||
39 | 43 | ||
40 | #define BDI_STAT_BATCH (8*(1+ilog2(nr_cpu_ids))) | 44 | #define BDI_STAT_BATCH (8*(1+ilog2(nr_cpu_ids))) |
41 | 45 | ||
46 | struct bdi_writeback { | ||
47 | struct list_head list; /* hangs off the bdi */ | ||
48 | |||
49 | struct backing_dev_info *bdi; /* our parent bdi */ | ||
50 | unsigned int nr; | ||
51 | |||
52 | unsigned long last_old_flush; /* last old data flush */ | ||
53 | |||
54 | struct task_struct *task; /* writeback task */ | ||
55 | struct list_head b_dirty; /* dirty inodes */ | ||
56 | struct list_head b_io; /* parked for writeback */ | ||
57 | struct list_head b_more_io; /* parked for more writeback */ | ||
58 | }; | ||
59 | |||
42 | struct backing_dev_info { | 60 | struct backing_dev_info { |
61 | struct list_head bdi_list; | ||
43 | unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */ | 62 | unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */ |
44 | unsigned long state; /* Always use atomic bitops on this */ | 63 | unsigned long state; /* Always use atomic bitops on this */ |
45 | unsigned int capabilities; /* Device capabilities */ | 64 | unsigned int capabilities; /* Device capabilities */ |
@@ -48,6 +67,8 @@ struct backing_dev_info { | |||
48 | void (*unplug_io_fn)(struct backing_dev_info *, struct page *); | 67 | void (*unplug_io_fn)(struct backing_dev_info *, struct page *); |
49 | void *unplug_io_data; | 68 | void *unplug_io_data; |
50 | 69 | ||
70 | char *name; | ||
71 | |||
51 | struct percpu_counter bdi_stat[NR_BDI_STAT_ITEMS]; | 72 | struct percpu_counter bdi_stat[NR_BDI_STAT_ITEMS]; |
52 | 73 | ||
53 | struct prop_local_percpu completions; | 74 | struct prop_local_percpu completions; |
@@ -56,6 +77,14 @@ struct backing_dev_info { | |||
56 | unsigned int min_ratio; | 77 | unsigned int min_ratio; |
57 | unsigned int max_ratio, max_prop_frac; | 78 | unsigned int max_ratio, max_prop_frac; |
58 | 79 | ||
80 | struct bdi_writeback wb; /* default writeback info for this bdi */ | ||
81 | spinlock_t wb_lock; /* protects update side of wb_list */ | ||
82 | struct list_head wb_list; /* the flusher threads hanging off this bdi */ | ||
83 | unsigned long wb_mask; /* bitmask of registered tasks */ | ||
84 | unsigned int wb_cnt; /* number of registered tasks */ | ||
85 | |||
86 | struct list_head work_list; | ||
87 | |||
59 | struct device *dev; | 88 | struct device *dev; |
60 | 89 | ||
61 | #ifdef CONFIG_DEBUG_FS | 90 | #ifdef CONFIG_DEBUG_FS |
@@ -71,6 +100,19 @@ int bdi_register(struct backing_dev_info *bdi, struct device *parent, | |||
71 | const char *fmt, ...); | 100 | const char *fmt, ...); |
72 | int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev); | 101 | int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev); |
73 | void bdi_unregister(struct backing_dev_info *bdi); | 102 | void bdi_unregister(struct backing_dev_info *bdi); |
103 | void bdi_start_writeback(struct writeback_control *wbc); | ||
104 | int bdi_writeback_task(struct bdi_writeback *wb); | ||
105 | int bdi_has_dirty_io(struct backing_dev_info *bdi); | ||
106 | |||
107 | extern spinlock_t bdi_lock; | ||
108 | extern struct list_head bdi_list; | ||
109 | |||
110 | static inline int wb_has_dirty_io(struct bdi_writeback *wb) | ||
111 | { | ||
112 | return !list_empty(&wb->b_dirty) || | ||
113 | !list_empty(&wb->b_io) || | ||
114 | !list_empty(&wb->b_more_io); | ||
115 | } | ||
74 | 116 | ||
75 | static inline void __add_bdi_stat(struct backing_dev_info *bdi, | 117 | static inline void __add_bdi_stat(struct backing_dev_info *bdi, |
76 | enum bdi_stat_item item, s64 amount) | 118 | enum bdi_stat_item item, s64 amount) |
@@ -261,6 +303,11 @@ static inline bool bdi_cap_swap_backed(struct backing_dev_info *bdi) | |||
261 | return bdi->capabilities & BDI_CAP_SWAP_BACKED; | 303 | return bdi->capabilities & BDI_CAP_SWAP_BACKED; |
262 | } | 304 | } |
263 | 305 | ||
306 | static inline bool bdi_cap_flush_forker(struct backing_dev_info *bdi) | ||
307 | { | ||
308 | return bdi == &default_backing_dev_info; | ||
309 | } | ||
310 | |||
264 | static inline bool mapping_cap_writeback_dirty(struct address_space *mapping) | 311 | static inline bool mapping_cap_writeback_dirty(struct address_space *mapping) |
265 | { | 312 | { |
266 | return bdi_cap_writeback_dirty(mapping->backing_dev_info); | 313 | return bdi_cap_writeback_dirty(mapping->backing_dev_info); |
@@ -276,4 +323,10 @@ static inline bool mapping_cap_swap_backed(struct address_space *mapping) | |||
276 | return bdi_cap_swap_backed(mapping->backing_dev_info); | 323 | return bdi_cap_swap_backed(mapping->backing_dev_info); |
277 | } | 324 | } |
278 | 325 | ||
326 | static inline int bdi_sched_wait(void *word) | ||
327 | { | ||
328 | schedule(); | ||
329 | return 0; | ||
330 | } | ||
331 | |||
279 | #endif /* _LINUX_BACKING_DEV_H */ | 332 | #endif /* _LINUX_BACKING_DEV_H */ |
diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 4d668e05d458..47536197ffdd 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h | |||
@@ -48,6 +48,15 @@ struct notifier_block; | |||
48 | 48 | ||
49 | #ifdef CONFIG_SMP | 49 | #ifdef CONFIG_SMP |
50 | /* Need to know about CPUs going up/down? */ | 50 | /* Need to know about CPUs going up/down? */ |
51 | #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) | ||
52 | #define cpu_notifier(fn, pri) { \ | ||
53 | static struct notifier_block fn##_nb __cpuinitdata = \ | ||
54 | { .notifier_call = fn, .priority = pri }; \ | ||
55 | register_cpu_notifier(&fn##_nb); \ | ||
56 | } | ||
57 | #else /* #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */ | ||
58 | #define cpu_notifier(fn, pri) do { (void)(fn); } while (0) | ||
59 | #endif /* #else #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */ | ||
51 | #ifdef CONFIG_HOTPLUG_CPU | 60 | #ifdef CONFIG_HOTPLUG_CPU |
52 | extern int register_cpu_notifier(struct notifier_block *nb); | 61 | extern int register_cpu_notifier(struct notifier_block *nb); |
53 | extern void unregister_cpu_notifier(struct notifier_block *nb); | 62 | extern void unregister_cpu_notifier(struct notifier_block *nb); |
@@ -74,6 +83,8 @@ extern void cpu_maps_update_done(void); | |||
74 | 83 | ||
75 | #else /* CONFIG_SMP */ | 84 | #else /* CONFIG_SMP */ |
76 | 85 | ||
86 | #define cpu_notifier(fn, pri) do { (void)(fn); } while (0) | ||
87 | |||
77 | static inline int register_cpu_notifier(struct notifier_block *nb) | 88 | static inline int register_cpu_notifier(struct notifier_block *nb) |
78 | { | 89 | { |
79 | return 0; | 90 | return 0; |
@@ -99,11 +110,7 @@ extern struct sysdev_class cpu_sysdev_class; | |||
99 | 110 | ||
100 | extern void get_online_cpus(void); | 111 | extern void get_online_cpus(void); |
101 | extern void put_online_cpus(void); | 112 | extern void put_online_cpus(void); |
102 | #define hotcpu_notifier(fn, pri) { \ | 113 | #define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri) |
103 | static struct notifier_block fn##_nb __cpuinitdata = \ | ||
104 | { .notifier_call = fn, .priority = pri }; \ | ||
105 | register_cpu_notifier(&fn##_nb); \ | ||
106 | } | ||
107 | #define register_hotcpu_notifier(nb) register_cpu_notifier(nb) | 114 | #define register_hotcpu_notifier(nb) register_cpu_notifier(nb) |
108 | #define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb) | 115 | #define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb) |
109 | int cpu_down(unsigned int cpu); | 116 | int cpu_down(unsigned int cpu); |
diff --git a/include/linux/cred.h b/include/linux/cred.h index 4fa999696310..24520a539c6f 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h | |||
@@ -114,6 +114,13 @@ struct thread_group_cred { | |||
114 | */ | 114 | */ |
115 | struct cred { | 115 | struct cred { |
116 | atomic_t usage; | 116 | atomic_t usage; |
117 | #ifdef CONFIG_DEBUG_CREDENTIALS | ||
118 | atomic_t subscribers; /* number of processes subscribed */ | ||
119 | void *put_addr; | ||
120 | unsigned magic; | ||
121 | #define CRED_MAGIC 0x43736564 | ||
122 | #define CRED_MAGIC_DEAD 0x44656144 | ||
123 | #endif | ||
117 | uid_t uid; /* real UID of the task */ | 124 | uid_t uid; /* real UID of the task */ |
118 | gid_t gid; /* real GID of the task */ | 125 | gid_t gid; /* real GID of the task */ |
119 | uid_t suid; /* saved UID of the task */ | 126 | uid_t suid; /* saved UID of the task */ |
@@ -143,7 +150,9 @@ struct cred { | |||
143 | }; | 150 | }; |
144 | 151 | ||
145 | extern void __put_cred(struct cred *); | 152 | extern void __put_cred(struct cred *); |
153 | extern void exit_creds(struct task_struct *); | ||
146 | extern int copy_creds(struct task_struct *, unsigned long); | 154 | extern int copy_creds(struct task_struct *, unsigned long); |
155 | extern struct cred *cred_alloc_blank(void); | ||
147 | extern struct cred *prepare_creds(void); | 156 | extern struct cred *prepare_creds(void); |
148 | extern struct cred *prepare_exec_creds(void); | 157 | extern struct cred *prepare_exec_creds(void); |
149 | extern struct cred *prepare_usermodehelper_creds(void); | 158 | extern struct cred *prepare_usermodehelper_creds(void); |
@@ -158,6 +167,60 @@ extern int set_security_override_from_ctx(struct cred *, const char *); | |||
158 | extern int set_create_files_as(struct cred *, struct inode *); | 167 | extern int set_create_files_as(struct cred *, struct inode *); |
159 | extern void __init cred_init(void); | 168 | extern void __init cred_init(void); |
160 | 169 | ||
170 | /* | ||
171 | * check for validity of credentials | ||
172 | */ | ||
173 | #ifdef CONFIG_DEBUG_CREDENTIALS | ||
174 | extern void __invalid_creds(const struct cred *, const char *, unsigned); | ||
175 | extern void __validate_process_creds(struct task_struct *, | ||
176 | const char *, unsigned); | ||
177 | |||
178 | static inline bool creds_are_invalid(const struct cred *cred) | ||
179 | { | ||
180 | if (cred->magic != CRED_MAGIC) | ||
181 | return true; | ||
182 | if (atomic_read(&cred->usage) < atomic_read(&cred->subscribers)) | ||
183 | return true; | ||
184 | #ifdef CONFIG_SECURITY_SELINUX | ||
185 | if ((unsigned long) cred->security < PAGE_SIZE) | ||
186 | return true; | ||
187 | if ((*(u32*)cred->security & 0xffffff00) == | ||
188 | (POISON_FREE << 24 | POISON_FREE << 16 | POISON_FREE << 8)) | ||
189 | return true; | ||
190 | #endif | ||
191 | return false; | ||
192 | } | ||
193 | |||
194 | static inline void __validate_creds(const struct cred *cred, | ||
195 | const char *file, unsigned line) | ||
196 | { | ||
197 | if (unlikely(creds_are_invalid(cred))) | ||
198 | __invalid_creds(cred, file, line); | ||
199 | } | ||
200 | |||
201 | #define validate_creds(cred) \ | ||
202 | do { \ | ||
203 | __validate_creds((cred), __FILE__, __LINE__); \ | ||
204 | } while(0) | ||
205 | |||
206 | #define validate_process_creds() \ | ||
207 | do { \ | ||
208 | __validate_process_creds(current, __FILE__, __LINE__); \ | ||
209 | } while(0) | ||
210 | |||
211 | extern void validate_creds_for_do_exit(struct task_struct *); | ||
212 | #else | ||
213 | static inline void validate_creds(const struct cred *cred) | ||
214 | { | ||
215 | } | ||
216 | static inline void validate_creds_for_do_exit(struct task_struct *tsk) | ||
217 | { | ||
218 | } | ||
219 | static inline void validate_process_creds(void) | ||
220 | { | ||
221 | } | ||
222 | #endif | ||
223 | |||
161 | /** | 224 | /** |
162 | * get_new_cred - Get a reference on a new set of credentials | 225 | * get_new_cred - Get a reference on a new set of credentials |
163 | * @cred: The new credentials to reference | 226 | * @cred: The new credentials to reference |
@@ -186,7 +249,9 @@ static inline struct cred *get_new_cred(struct cred *cred) | |||
186 | */ | 249 | */ |
187 | static inline const struct cred *get_cred(const struct cred *cred) | 250 | static inline const struct cred *get_cred(const struct cred *cred) |
188 | { | 251 | { |
189 | return get_new_cred((struct cred *) cred); | 252 | struct cred *nonconst_cred = (struct cred *) cred; |
253 | validate_creds(cred); | ||
254 | return get_new_cred(nonconst_cred); | ||
190 | } | 255 | } |
191 | 256 | ||
192 | /** | 257 | /** |
@@ -204,7 +269,7 @@ static inline void put_cred(const struct cred *_cred) | |||
204 | { | 269 | { |
205 | struct cred *cred = (struct cred *) _cred; | 270 | struct cred *cred = (struct cred *) _cred; |
206 | 271 | ||
207 | BUG_ON(atomic_read(&(cred)->usage) <= 0); | 272 | validate_creds(cred); |
208 | if (atomic_dec_and_test(&(cred)->usage)) | 273 | if (atomic_dec_and_test(&(cred)->usage)) |
209 | __put_cred(cred); | 274 | __put_cred(cred); |
210 | } | 275 | } |
diff --git a/include/linux/crypto.h b/include/linux/crypto.h index ec29fa268b94..fd929889e8dc 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h | |||
@@ -115,7 +115,6 @@ struct crypto_async_request; | |||
115 | struct crypto_aead; | 115 | struct crypto_aead; |
116 | struct crypto_blkcipher; | 116 | struct crypto_blkcipher; |
117 | struct crypto_hash; | 117 | struct crypto_hash; |
118 | struct crypto_ahash; | ||
119 | struct crypto_rng; | 118 | struct crypto_rng; |
120 | struct crypto_tfm; | 119 | struct crypto_tfm; |
121 | struct crypto_type; | 120 | struct crypto_type; |
@@ -146,16 +145,6 @@ struct ablkcipher_request { | |||
146 | void *__ctx[] CRYPTO_MINALIGN_ATTR; | 145 | void *__ctx[] CRYPTO_MINALIGN_ATTR; |
147 | }; | 146 | }; |
148 | 147 | ||
149 | struct ahash_request { | ||
150 | struct crypto_async_request base; | ||
151 | |||
152 | unsigned int nbytes; | ||
153 | struct scatterlist *src; | ||
154 | u8 *result; | ||
155 | |||
156 | void *__ctx[] CRYPTO_MINALIGN_ATTR; | ||
157 | }; | ||
158 | |||
159 | /** | 148 | /** |
160 | * struct aead_request - AEAD request | 149 | * struct aead_request - AEAD request |
161 | * @base: Common attributes for async crypto requests | 150 | * @base: Common attributes for async crypto requests |
@@ -220,18 +209,6 @@ struct ablkcipher_alg { | |||
220 | unsigned int ivsize; | 209 | unsigned int ivsize; |
221 | }; | 210 | }; |
222 | 211 | ||
223 | struct ahash_alg { | ||
224 | int (*init)(struct ahash_request *req); | ||
225 | int (*reinit)(struct ahash_request *req); | ||
226 | int (*update)(struct ahash_request *req); | ||
227 | int (*final)(struct ahash_request *req); | ||
228 | int (*digest)(struct ahash_request *req); | ||
229 | int (*setkey)(struct crypto_ahash *tfm, const u8 *key, | ||
230 | unsigned int keylen); | ||
231 | |||
232 | unsigned int digestsize; | ||
233 | }; | ||
234 | |||
235 | struct aead_alg { | 212 | struct aead_alg { |
236 | int (*setkey)(struct crypto_aead *tfm, const u8 *key, | 213 | int (*setkey)(struct crypto_aead *tfm, const u8 *key, |
237 | unsigned int keylen); | 214 | unsigned int keylen); |
@@ -318,7 +295,6 @@ struct rng_alg { | |||
318 | #define cra_cipher cra_u.cipher | 295 | #define cra_cipher cra_u.cipher |
319 | #define cra_digest cra_u.digest | 296 | #define cra_digest cra_u.digest |
320 | #define cra_hash cra_u.hash | 297 | #define cra_hash cra_u.hash |
321 | #define cra_ahash cra_u.ahash | ||
322 | #define cra_compress cra_u.compress | 298 | #define cra_compress cra_u.compress |
323 | #define cra_rng cra_u.rng | 299 | #define cra_rng cra_u.rng |
324 | 300 | ||
@@ -346,7 +322,6 @@ struct crypto_alg { | |||
346 | struct cipher_alg cipher; | 322 | struct cipher_alg cipher; |
347 | struct digest_alg digest; | 323 | struct digest_alg digest; |
348 | struct hash_alg hash; | 324 | struct hash_alg hash; |
349 | struct ahash_alg ahash; | ||
350 | struct compress_alg compress; | 325 | struct compress_alg compress; |
351 | struct rng_alg rng; | 326 | struct rng_alg rng; |
352 | } cra_u; | 327 | } cra_u; |
@@ -433,18 +408,6 @@ struct hash_tfm { | |||
433 | unsigned int digestsize; | 408 | unsigned int digestsize; |
434 | }; | 409 | }; |
435 | 410 | ||
436 | struct ahash_tfm { | ||
437 | int (*init)(struct ahash_request *req); | ||
438 | int (*update)(struct ahash_request *req); | ||
439 | int (*final)(struct ahash_request *req); | ||
440 | int (*digest)(struct ahash_request *req); | ||
441 | int (*setkey)(struct crypto_ahash *tfm, const u8 *key, | ||
442 | unsigned int keylen); | ||
443 | |||
444 | unsigned int digestsize; | ||
445 | unsigned int reqsize; | ||
446 | }; | ||
447 | |||
448 | struct compress_tfm { | 411 | struct compress_tfm { |
449 | int (*cot_compress)(struct crypto_tfm *tfm, | 412 | int (*cot_compress)(struct crypto_tfm *tfm, |
450 | const u8 *src, unsigned int slen, | 413 | const u8 *src, unsigned int slen, |
@@ -465,7 +428,6 @@ struct rng_tfm { | |||
465 | #define crt_blkcipher crt_u.blkcipher | 428 | #define crt_blkcipher crt_u.blkcipher |
466 | #define crt_cipher crt_u.cipher | 429 | #define crt_cipher crt_u.cipher |
467 | #define crt_hash crt_u.hash | 430 | #define crt_hash crt_u.hash |
468 | #define crt_ahash crt_u.ahash | ||
469 | #define crt_compress crt_u.compress | 431 | #define crt_compress crt_u.compress |
470 | #define crt_rng crt_u.rng | 432 | #define crt_rng crt_u.rng |
471 | 433 | ||
@@ -479,7 +441,6 @@ struct crypto_tfm { | |||
479 | struct blkcipher_tfm blkcipher; | 441 | struct blkcipher_tfm blkcipher; |
480 | struct cipher_tfm cipher; | 442 | struct cipher_tfm cipher; |
481 | struct hash_tfm hash; | 443 | struct hash_tfm hash; |
482 | struct ahash_tfm ahash; | ||
483 | struct compress_tfm compress; | 444 | struct compress_tfm compress; |
484 | struct rng_tfm rng; | 445 | struct rng_tfm rng; |
485 | } crt_u; | 446 | } crt_u; |
@@ -770,7 +731,7 @@ static inline struct ablkcipher_request *ablkcipher_request_alloc( | |||
770 | 731 | ||
771 | static inline void ablkcipher_request_free(struct ablkcipher_request *req) | 732 | static inline void ablkcipher_request_free(struct ablkcipher_request *req) |
772 | { | 733 | { |
773 | kfree(req); | 734 | kzfree(req); |
774 | } | 735 | } |
775 | 736 | ||
776 | static inline void ablkcipher_request_set_callback( | 737 | static inline void ablkcipher_request_set_callback( |
@@ -901,7 +862,7 @@ static inline struct aead_request *aead_request_alloc(struct crypto_aead *tfm, | |||
901 | 862 | ||
902 | static inline void aead_request_free(struct aead_request *req) | 863 | static inline void aead_request_free(struct aead_request *req) |
903 | { | 864 | { |
904 | kfree(req); | 865 | kzfree(req); |
905 | } | 866 | } |
906 | 867 | ||
907 | static inline void aead_request_set_callback(struct aead_request *req, | 868 | static inline void aead_request_set_callback(struct aead_request *req, |
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 07dfd460d286..c0f6c3cd788c 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h | |||
@@ -98,11 +98,6 @@ static inline int is_device_dma_capable(struct device *dev) | |||
98 | return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE; | 98 | return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE; |
99 | } | 99 | } |
100 | 100 | ||
101 | static inline int is_buffer_dma_capable(u64 mask, dma_addr_t addr, size_t size) | ||
102 | { | ||
103 | return addr + size <= mask; | ||
104 | } | ||
105 | |||
106 | #ifdef CONFIG_HAS_DMA | 101 | #ifdef CONFIG_HAS_DMA |
107 | #include <asm/dma-mapping.h> | 102 | #include <asm/dma-mapping.h> |
108 | #else | 103 | #else |
diff --git a/include/linux/fips.h b/include/linux/fips.h new file mode 100644 index 000000000000..f8fb07b0b6b8 --- /dev/null +++ b/include/linux/fips.h | |||
@@ -0,0 +1,10 @@ | |||
1 | #ifndef _FIPS_H | ||
2 | #define _FIPS_H | ||
3 | |||
4 | #ifdef CONFIG_CRYPTO_FIPS | ||
5 | extern int fips_enabled; | ||
6 | #else | ||
7 | #define fips_enabled 0 | ||
8 | #endif | ||
9 | |||
10 | #endif | ||
diff --git a/include/linux/fs.h b/include/linux/fs.h index 73e9b643e455..a79f48373e7e 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -715,7 +715,7 @@ struct posix_acl; | |||
715 | 715 | ||
716 | struct inode { | 716 | struct inode { |
717 | struct hlist_node i_hash; | 717 | struct hlist_node i_hash; |
718 | struct list_head i_list; | 718 | struct list_head i_list; /* backing dev IO list */ |
719 | struct list_head i_sb_list; | 719 | struct list_head i_sb_list; |
720 | struct list_head i_dentry; | 720 | struct list_head i_dentry; |
721 | unsigned long i_ino; | 721 | unsigned long i_ino; |
@@ -1336,9 +1336,6 @@ struct super_block { | |||
1336 | struct xattr_handler **s_xattr; | 1336 | struct xattr_handler **s_xattr; |
1337 | 1337 | ||
1338 | struct list_head s_inodes; /* all inodes */ | 1338 | struct list_head s_inodes; /* all inodes */ |
1339 | struct list_head s_dirty; /* dirty inodes */ | ||
1340 | struct list_head s_io; /* parked for writeback */ | ||
1341 | struct list_head s_more_io; /* parked for more writeback */ | ||
1342 | struct hlist_head s_anon; /* anonymous dentries for (nfs) exporting */ | 1339 | struct hlist_head s_anon; /* anonymous dentries for (nfs) exporting */ |
1343 | struct list_head s_files; | 1340 | struct list_head s_files; |
1344 | /* s_dentry_lru and s_nr_dentry_unused are protected by dcache_lock */ | 1341 | /* s_dentry_lru and s_nr_dentry_unused are protected by dcache_lock */ |
@@ -1528,6 +1525,7 @@ struct inode_operations { | |||
1528 | void (*put_link) (struct dentry *, struct nameidata *, void *); | 1525 | void (*put_link) (struct dentry *, struct nameidata *, void *); |
1529 | void (*truncate) (struct inode *); | 1526 | void (*truncate) (struct inode *); |
1530 | int (*permission) (struct inode *, int); | 1527 | int (*permission) (struct inode *, int); |
1528 | int (*check_acl)(struct inode *, int); | ||
1531 | int (*setattr) (struct dentry *, struct iattr *); | 1529 | int (*setattr) (struct dentry *, struct iattr *); |
1532 | int (*getattr) (struct vfsmount *mnt, struct dentry *, struct kstat *); | 1530 | int (*getattr) (struct vfsmount *mnt, struct dentry *, struct kstat *); |
1533 | int (*setxattr) (struct dentry *, const char *,const void *,size_t,int); | 1531 | int (*setxattr) (struct dentry *, const char *,const void *,size_t,int); |
@@ -1788,6 +1786,7 @@ extern int get_sb_pseudo(struct file_system_type *, char *, | |||
1788 | struct vfsmount *mnt); | 1786 | struct vfsmount *mnt); |
1789 | extern void simple_set_mnt(struct vfsmount *mnt, struct super_block *sb); | 1787 | extern void simple_set_mnt(struct vfsmount *mnt, struct super_block *sb); |
1790 | int __put_super_and_need_restart(struct super_block *sb); | 1788 | int __put_super_and_need_restart(struct super_block *sb); |
1789 | void put_super(struct super_block *sb); | ||
1791 | 1790 | ||
1792 | /* Alas, no aliases. Too much hassle with bringing module.h everywhere */ | 1791 | /* Alas, no aliases. Too much hassle with bringing module.h everywhere */ |
1793 | #define fops_get(fops) \ | 1792 | #define fops_get(fops) \ |
@@ -1998,12 +1997,25 @@ extern void bd_release_from_disk(struct block_device *, struct gendisk *); | |||
1998 | #define CHRDEV_MAJOR_HASH_SIZE 255 | 1997 | #define CHRDEV_MAJOR_HASH_SIZE 255 |
1999 | extern int alloc_chrdev_region(dev_t *, unsigned, unsigned, const char *); | 1998 | extern int alloc_chrdev_region(dev_t *, unsigned, unsigned, const char *); |
2000 | extern int register_chrdev_region(dev_t, unsigned, const char *); | 1999 | extern int register_chrdev_region(dev_t, unsigned, const char *); |
2001 | extern int register_chrdev(unsigned int, const char *, | 2000 | extern int __register_chrdev(unsigned int major, unsigned int baseminor, |
2002 | const struct file_operations *); | 2001 | unsigned int count, const char *name, |
2003 | extern void unregister_chrdev(unsigned int, const char *); | 2002 | const struct file_operations *fops); |
2003 | extern void __unregister_chrdev(unsigned int major, unsigned int baseminor, | ||
2004 | unsigned int count, const char *name); | ||
2004 | extern void unregister_chrdev_region(dev_t, unsigned); | 2005 | extern void unregister_chrdev_region(dev_t, unsigned); |
2005 | extern void chrdev_show(struct seq_file *,off_t); | 2006 | extern void chrdev_show(struct seq_file *,off_t); |
2006 | 2007 | ||
2008 | static inline int register_chrdev(unsigned int major, const char *name, | ||
2009 | const struct file_operations *fops) | ||
2010 | { | ||
2011 | return __register_chrdev(major, 0, 256, name, fops); | ||
2012 | } | ||
2013 | |||
2014 | static inline void unregister_chrdev(unsigned int major, const char *name) | ||
2015 | { | ||
2016 | __unregister_chrdev(major, 0, 256, name); | ||
2017 | } | ||
2018 | |||
2007 | /* fs/block_dev.c */ | 2019 | /* fs/block_dev.c */ |
2008 | #define BDEVNAME_SIZE 32 /* Largest string for a blockdev identifier */ | 2020 | #define BDEVNAME_SIZE 32 /* Largest string for a blockdev identifier */ |
2009 | #define BDEVT_SIZE 10 /* Largest string for MAJ:MIN for blkdev */ | 2021 | #define BDEVT_SIZE 10 /* Largest string for MAJ:MIN for blkdev */ |
@@ -2070,8 +2082,6 @@ static inline void invalidate_remote_inode(struct inode *inode) | |||
2070 | extern int invalidate_inode_pages2(struct address_space *mapping); | 2082 | extern int invalidate_inode_pages2(struct address_space *mapping); |
2071 | extern int invalidate_inode_pages2_range(struct address_space *mapping, | 2083 | extern int invalidate_inode_pages2_range(struct address_space *mapping, |
2072 | pgoff_t start, pgoff_t end); | 2084 | pgoff_t start, pgoff_t end); |
2073 | extern void generic_sync_sb_inodes(struct super_block *sb, | ||
2074 | struct writeback_control *wbc); | ||
2075 | extern int write_inode_now(struct inode *, int); | 2085 | extern int write_inode_now(struct inode *, int); |
2076 | extern int filemap_fdatawrite(struct address_space *); | 2086 | extern int filemap_fdatawrite(struct address_space *); |
2077 | extern int filemap_flush(struct address_space *); | 2087 | extern int filemap_flush(struct address_space *); |
@@ -2186,7 +2196,6 @@ extern int bdev_read_only(struct block_device *); | |||
2186 | extern int set_blocksize(struct block_device *, int); | 2196 | extern int set_blocksize(struct block_device *, int); |
2187 | extern int sb_set_blocksize(struct super_block *, int); | 2197 | extern int sb_set_blocksize(struct super_block *, int); |
2188 | extern int sb_min_blocksize(struct super_block *, int); | 2198 | extern int sb_min_blocksize(struct super_block *, int); |
2189 | extern int sb_has_dirty_inodes(struct super_block *); | ||
2190 | 2199 | ||
2191 | extern int generic_file_mmap(struct file *, struct vm_area_struct *); | 2200 | extern int generic_file_mmap(struct file *, struct vm_area_struct *); |
2192 | extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *); | 2201 | extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *); |
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index 8246c697863d..6d527ee82b2b 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h | |||
@@ -64,6 +64,12 @@ | |||
64 | #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) | 64 | #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) |
65 | #define NMI_OFFSET (1UL << NMI_SHIFT) | 65 | #define NMI_OFFSET (1UL << NMI_SHIFT) |
66 | 66 | ||
67 | #ifndef PREEMPT_ACTIVE | ||
68 | #define PREEMPT_ACTIVE_BITS 1 | ||
69 | #define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS) | ||
70 | #define PREEMPT_ACTIVE (__IRQ_MASK(PREEMPT_ACTIVE_BITS) << PREEMPT_ACTIVE_SHIFT) | ||
71 | #endif | ||
72 | |||
67 | #if PREEMPT_ACTIVE < (1 << (NMI_SHIFT + NMI_BITS)) | 73 | #if PREEMPT_ACTIVE < (1 << (NMI_SHIFT + NMI_BITS)) |
68 | #error PREEMPT_ACTIVE is too low! | 74 | #error PREEMPT_ACTIVE is too low! |
69 | #endif | 75 | #endif |
@@ -132,7 +138,7 @@ static inline void account_system_vtime(struct task_struct *tsk) | |||
132 | } | 138 | } |
133 | #endif | 139 | #endif |
134 | 140 | ||
135 | #if defined(CONFIG_NO_HZ) && !defined(CONFIG_CLASSIC_RCU) | 141 | #if defined(CONFIG_NO_HZ) |
136 | extern void rcu_irq_enter(void); | 142 | extern void rcu_irq_enter(void); |
137 | extern void rcu_irq_exit(void); | 143 | extern void rcu_irq_exit(void); |
138 | extern void rcu_nmi_enter(void); | 144 | extern void rcu_nmi_enter(void); |
@@ -142,7 +148,7 @@ extern void rcu_nmi_exit(void); | |||
142 | # define rcu_irq_exit() do { } while (0) | 148 | # define rcu_irq_exit() do { } while (0) |
143 | # define rcu_nmi_enter() do { } while (0) | 149 | # define rcu_nmi_enter() do { } while (0) |
144 | # define rcu_nmi_exit() do { } while (0) | 150 | # define rcu_nmi_exit() do { } while (0) |
145 | #endif /* #if defined(CONFIG_NO_HZ) && !defined(CONFIG_CLASSIC_RCU) */ | 151 | #endif /* #if defined(CONFIG_NO_HZ) */ |
146 | 152 | ||
147 | /* | 153 | /* |
148 | * It is safe to do non-atomic ops on ->hardirq_context, | 154 | * It is safe to do non-atomic ops on ->hardirq_context, |
diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 7fc01b13be43..9e7f2e8fc66e 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h | |||
@@ -94,6 +94,16 @@ extern struct group_info init_groups; | |||
94 | # define CAP_INIT_BSET CAP_INIT_EFF_SET | 94 | # define CAP_INIT_BSET CAP_INIT_EFF_SET |
95 | #endif | 95 | #endif |
96 | 96 | ||
97 | #ifdef CONFIG_TREE_PREEMPT_RCU | ||
98 | #define INIT_TASK_RCU_PREEMPT(tsk) \ | ||
99 | .rcu_read_lock_nesting = 0, \ | ||
100 | .rcu_read_unlock_special = 0, \ | ||
101 | .rcu_blocked_node = NULL, \ | ||
102 | .rcu_node_entry = LIST_HEAD_INIT(tsk.rcu_node_entry), | ||
103 | #else | ||
104 | #define INIT_TASK_RCU_PREEMPT(tsk) | ||
105 | #endif | ||
106 | |||
97 | extern struct cred init_cred; | 107 | extern struct cred init_cred; |
98 | 108 | ||
99 | #ifdef CONFIG_PERF_COUNTERS | 109 | #ifdef CONFIG_PERF_COUNTERS |
@@ -173,6 +183,7 @@ extern struct cred init_cred; | |||
173 | INIT_LOCKDEP \ | 183 | INIT_LOCKDEP \ |
174 | INIT_FTRACE_GRAPH \ | 184 | INIT_FTRACE_GRAPH \ |
175 | INIT_TRACE_RECURSION \ | 185 | INIT_TRACE_RECURSION \ |
186 | INIT_TASK_RCU_PREEMPT(tsk) \ | ||
176 | } | 187 | } |
177 | 188 | ||
178 | 189 | ||
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 35e7df1e9f30..1ac57e522a1f 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h | |||
@@ -50,6 +50,9 @@ | |||
50 | * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is | 50 | * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is |
51 | * registered first in an shared interrupt is considered for | 51 | * registered first in an shared interrupt is considered for |
52 | * performance reasons) | 52 | * performance reasons) |
53 | * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished. | ||
54 | * Used by threaded interrupts which need to keep the | ||
55 | * irq line disabled until the threaded handler has been run. | ||
53 | */ | 56 | */ |
54 | #define IRQF_DISABLED 0x00000020 | 57 | #define IRQF_DISABLED 0x00000020 |
55 | #define IRQF_SAMPLE_RANDOM 0x00000040 | 58 | #define IRQF_SAMPLE_RANDOM 0x00000040 |
@@ -59,6 +62,7 @@ | |||
59 | #define IRQF_PERCPU 0x00000400 | 62 | #define IRQF_PERCPU 0x00000400 |
60 | #define IRQF_NOBALANCING 0x00000800 | 63 | #define IRQF_NOBALANCING 0x00000800 |
61 | #define IRQF_IRQPOLL 0x00001000 | 64 | #define IRQF_IRQPOLL 0x00001000 |
65 | #define IRQF_ONESHOT 0x00002000 | ||
62 | 66 | ||
63 | /* | 67 | /* |
64 | * Bits used by threaded handlers: | 68 | * Bits used by threaded handlers: |
diff --git a/include/linux/irq.h b/include/linux/irq.h index cb2e77a3f7f7..ae9653dbcd78 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h | |||
@@ -69,6 +69,8 @@ typedef void (*irq_flow_handler_t)(unsigned int irq, | |||
69 | #define IRQ_MOVE_PCNTXT 0x01000000 /* IRQ migration from process context */ | 69 | #define IRQ_MOVE_PCNTXT 0x01000000 /* IRQ migration from process context */ |
70 | #define IRQ_AFFINITY_SET 0x02000000 /* IRQ affinity was set from userspace*/ | 70 | #define IRQ_AFFINITY_SET 0x02000000 /* IRQ affinity was set from userspace*/ |
71 | #define IRQ_SUSPENDED 0x04000000 /* IRQ has gone through suspend sequence */ | 71 | #define IRQ_SUSPENDED 0x04000000 /* IRQ has gone through suspend sequence */ |
72 | #define IRQ_ONESHOT 0x08000000 /* IRQ is not unmasked after hardirq */ | ||
73 | #define IRQ_NESTED_THREAD 0x10000000 /* IRQ is nested into another, no own handler thread */ | ||
72 | 74 | ||
73 | #ifdef CONFIG_IRQ_PER_CPU | 75 | #ifdef CONFIG_IRQ_PER_CPU |
74 | # define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU) | 76 | # define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU) |
@@ -100,6 +102,9 @@ struct msi_desc; | |||
100 | * @set_type: set the flow type (IRQ_TYPE_LEVEL/etc.) of an IRQ | 102 | * @set_type: set the flow type (IRQ_TYPE_LEVEL/etc.) of an IRQ |
101 | * @set_wake: enable/disable power-management wake-on of an IRQ | 103 | * @set_wake: enable/disable power-management wake-on of an IRQ |
102 | * | 104 | * |
105 | * @bus_lock: function to lock access to slow bus (i2c) chips | ||
106 | * @bus_sync_unlock: function to sync and unlock slow bus (i2c) chips | ||
107 | * | ||
103 | * @release: release function solely used by UML | 108 | * @release: release function solely used by UML |
104 | * @typename: obsoleted by name, kept as migration helper | 109 | * @typename: obsoleted by name, kept as migration helper |
105 | */ | 110 | */ |
@@ -123,6 +128,9 @@ struct irq_chip { | |||
123 | int (*set_type)(unsigned int irq, unsigned int flow_type); | 128 | int (*set_type)(unsigned int irq, unsigned int flow_type); |
124 | int (*set_wake)(unsigned int irq, unsigned int on); | 129 | int (*set_wake)(unsigned int irq, unsigned int on); |
125 | 130 | ||
131 | void (*bus_lock)(unsigned int irq); | ||
132 | void (*bus_sync_unlock)(unsigned int irq); | ||
133 | |||
126 | /* Currently used only by UML, might disappear one day.*/ | 134 | /* Currently used only by UML, might disappear one day.*/ |
127 | #ifdef CONFIG_IRQ_RELEASE_METHOD | 135 | #ifdef CONFIG_IRQ_RELEASE_METHOD |
128 | void (*release)(unsigned int irq, void *dev_id); | 136 | void (*release)(unsigned int irq, void *dev_id); |
@@ -220,13 +228,6 @@ static inline struct irq_desc *move_irq_desc(struct irq_desc *desc, int node) | |||
220 | extern struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node); | 228 | extern struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node); |
221 | 229 | ||
222 | /* | 230 | /* |
223 | * Migration helpers for obsolete names, they will go away: | ||
224 | */ | ||
225 | #define hw_interrupt_type irq_chip | ||
226 | #define no_irq_type no_irq_chip | ||
227 | typedef struct irq_desc irq_desc_t; | ||
228 | |||
229 | /* | ||
230 | * Pick up the arch-dependent methods: | 231 | * Pick up the arch-dependent methods: |
231 | */ | 232 | */ |
232 | #include <asm/hw_irq.h> | 233 | #include <asm/hw_irq.h> |
@@ -289,6 +290,7 @@ extern void handle_edge_irq(unsigned int irq, struct irq_desc *desc); | |||
289 | extern void handle_simple_irq(unsigned int irq, struct irq_desc *desc); | 290 | extern void handle_simple_irq(unsigned int irq, struct irq_desc *desc); |
290 | extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc); | 291 | extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc); |
291 | extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc); | 292 | extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc); |
293 | extern void handle_nested_irq(unsigned int irq); | ||
292 | 294 | ||
293 | /* | 295 | /* |
294 | * Monolithic do_IRQ implementation. | 296 | * Monolithic do_IRQ implementation. |
@@ -379,6 +381,8 @@ set_irq_chained_handler(unsigned int irq, | |||
379 | __set_irq_handler(irq, handle, 1, NULL); | 381 | __set_irq_handler(irq, handle, 1, NULL); |
380 | } | 382 | } |
381 | 383 | ||
384 | extern void set_irq_nested_thread(unsigned int irq, int nest); | ||
385 | |||
382 | extern void set_irq_noprobe(unsigned int irq); | 386 | extern void set_irq_noprobe(unsigned int irq); |
383 | extern void set_irq_probe(unsigned int irq); | 387 | extern void set_irq_probe(unsigned int irq); |
384 | 388 | ||
diff --git a/include/linux/irqnr.h b/include/linux/irqnr.h index ec87b212ff7d..7bf89bc8cbca 100644 --- a/include/linux/irqnr.h +++ b/include/linux/irqnr.h | |||
@@ -41,6 +41,12 @@ extern struct irq_desc *irq_to_desc(unsigned int irq); | |||
41 | ; \ | 41 | ; \ |
42 | else | 42 | else |
43 | 43 | ||
44 | #ifdef CONFIG_SMP | ||
45 | #define irq_node(irq) (irq_to_desc(irq)->node) | ||
46 | #else | ||
47 | #define irq_node(irq) 0 | ||
48 | #endif | ||
49 | |||
44 | #endif /* CONFIG_GENERIC_HARDIRQS */ | 50 | #endif /* CONFIG_GENERIC_HARDIRQS */ |
45 | 51 | ||
46 | #define for_each_irq_nr(irq) \ | 52 | #define for_each_irq_nr(irq) \ |
diff --git a/include/linux/kernel.h b/include/linux/kernel.h index d6320a3e8def..2b5b1e0899a8 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h | |||
@@ -125,7 +125,7 @@ extern int _cond_resched(void); | |||
125 | #endif | 125 | #endif |
126 | 126 | ||
127 | #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP | 127 | #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP |
128 | void __might_sleep(char *file, int line); | 128 | void __might_sleep(char *file, int line, int preempt_offset); |
129 | /** | 129 | /** |
130 | * might_sleep - annotation for functions that can sleep | 130 | * might_sleep - annotation for functions that can sleep |
131 | * | 131 | * |
@@ -137,8 +137,9 @@ extern int _cond_resched(void); | |||
137 | * supposed to. | 137 | * supposed to. |
138 | */ | 138 | */ |
139 | # define might_sleep() \ | 139 | # define might_sleep() \ |
140 | do { __might_sleep(__FILE__, __LINE__); might_resched(); } while (0) | 140 | do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0) |
141 | #else | 141 | #else |
142 | static inline void __might_sleep(char *file, int line, int preempt_offset) { } | ||
142 | # define might_sleep() do { might_resched(); } while (0) | 143 | # define might_sleep() do { might_resched(); } while (0) |
143 | #endif | 144 | #endif |
144 | 145 | ||
diff --git a/include/linux/key.h b/include/linux/key.h index e544f466d69a..cd50dfa1d4c2 100644 --- a/include/linux/key.h +++ b/include/linux/key.h | |||
@@ -129,7 +129,10 @@ struct key { | |||
129 | struct rw_semaphore sem; /* change vs change sem */ | 129 | struct rw_semaphore sem; /* change vs change sem */ |
130 | struct key_user *user; /* owner of this key */ | 130 | struct key_user *user; /* owner of this key */ |
131 | void *security; /* security data for this key */ | 131 | void *security; /* security data for this key */ |
132 | time_t expiry; /* time at which key expires (or 0) */ | 132 | union { |
133 | time_t expiry; /* time at which key expires (or 0) */ | ||
134 | time_t revoked_at; /* time at which key was revoked */ | ||
135 | }; | ||
133 | uid_t uid; | 136 | uid_t uid; |
134 | gid_t gid; | 137 | gid_t gid; |
135 | key_perm_t perm; /* access permissions */ | 138 | key_perm_t perm; /* access permissions */ |
@@ -275,6 +278,8 @@ static inline key_serial_t key_serial(struct key *key) | |||
275 | extern ctl_table key_sysctls[]; | 278 | extern ctl_table key_sysctls[]; |
276 | #endif | 279 | #endif |
277 | 280 | ||
281 | extern void key_replace_session_keyring(void); | ||
282 | |||
278 | /* | 283 | /* |
279 | * the userspace interface | 284 | * the userspace interface |
280 | */ | 285 | */ |
@@ -297,6 +302,7 @@ extern void key_init(void); | |||
297 | #define key_fsuid_changed(t) do { } while(0) | 302 | #define key_fsuid_changed(t) do { } while(0) |
298 | #define key_fsgid_changed(t) do { } while(0) | 303 | #define key_fsgid_changed(t) do { } while(0) |
299 | #define key_init() do { } while(0) | 304 | #define key_init() do { } while(0) |
305 | #define key_replace_session_keyring() do { } while(0) | ||
300 | 306 | ||
301 | #endif /* CONFIG_KEYS */ | 307 | #endif /* CONFIG_KEYS */ |
302 | #endif /* __KERNEL__ */ | 308 | #endif /* __KERNEL__ */ |
diff --git a/include/linux/keyctl.h b/include/linux/keyctl.h index c0688eb72093..bd383f1944fb 100644 --- a/include/linux/keyctl.h +++ b/include/linux/keyctl.h | |||
@@ -52,5 +52,6 @@ | |||
52 | #define KEYCTL_SET_TIMEOUT 15 /* set key timeout */ | 52 | #define KEYCTL_SET_TIMEOUT 15 /* set key timeout */ |
53 | #define KEYCTL_ASSUME_AUTHORITY 16 /* assume request_key() authorisation */ | 53 | #define KEYCTL_ASSUME_AUTHORITY 16 /* assume request_key() authorisation */ |
54 | #define KEYCTL_GET_SECURITY 17 /* get key security label */ | 54 | #define KEYCTL_GET_SECURITY 17 /* get key security label */ |
55 | #define KEYCTL_SESSION_TO_PARENT 18 /* apply session keyring to parent process */ | ||
55 | 56 | ||
56 | #endif /* _LINUX_KEYCTL_H */ | 57 | #endif /* _LINUX_KEYCTL_H */ |
diff --git a/include/linux/kmemcheck.h b/include/linux/kmemcheck.h index 47b39b7c7e84..dc2fd545db00 100644 --- a/include/linux/kmemcheck.h +++ b/include/linux/kmemcheck.h | |||
@@ -34,6 +34,8 @@ void kmemcheck_mark_initialized_pages(struct page *p, unsigned int n); | |||
34 | int kmemcheck_show_addr(unsigned long address); | 34 | int kmemcheck_show_addr(unsigned long address); |
35 | int kmemcheck_hide_addr(unsigned long address); | 35 | int kmemcheck_hide_addr(unsigned long address); |
36 | 36 | ||
37 | bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size); | ||
38 | |||
37 | #else | 39 | #else |
38 | #define kmemcheck_enabled 0 | 40 | #define kmemcheck_enabled 0 |
39 | 41 | ||
@@ -99,6 +101,11 @@ static inline void kmemcheck_mark_initialized_pages(struct page *p, | |||
99 | { | 101 | { |
100 | } | 102 | } |
101 | 103 | ||
104 | static inline bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size) | ||
105 | { | ||
106 | return true; | ||
107 | } | ||
108 | |||
102 | #endif /* CONFIG_KMEMCHECK */ | 109 | #endif /* CONFIG_KMEMCHECK */ |
103 | 110 | ||
104 | /* | 111 | /* |
diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h index 6a63807f714e..3c7497d46ee9 100644 --- a/include/linux/kmemleak.h +++ b/include/linux/kmemleak.h | |||
@@ -23,18 +23,18 @@ | |||
23 | 23 | ||
24 | #ifdef CONFIG_DEBUG_KMEMLEAK | 24 | #ifdef CONFIG_DEBUG_KMEMLEAK |
25 | 25 | ||
26 | extern void kmemleak_init(void); | 26 | extern void kmemleak_init(void) __ref; |
27 | extern void kmemleak_alloc(const void *ptr, size_t size, int min_count, | 27 | extern void kmemleak_alloc(const void *ptr, size_t size, int min_count, |
28 | gfp_t gfp); | 28 | gfp_t gfp) __ref; |
29 | extern void kmemleak_free(const void *ptr); | 29 | extern void kmemleak_free(const void *ptr) __ref; |
30 | extern void kmemleak_free_part(const void *ptr, size_t size); | 30 | extern void kmemleak_free_part(const void *ptr, size_t size) __ref; |
31 | extern void kmemleak_padding(const void *ptr, unsigned long offset, | 31 | extern void kmemleak_padding(const void *ptr, unsigned long offset, |
32 | size_t size); | 32 | size_t size) __ref; |
33 | extern void kmemleak_not_leak(const void *ptr); | 33 | extern void kmemleak_not_leak(const void *ptr) __ref; |
34 | extern void kmemleak_ignore(const void *ptr); | 34 | extern void kmemleak_ignore(const void *ptr) __ref; |
35 | extern void kmemleak_scan_area(const void *ptr, unsigned long offset, | 35 | extern void kmemleak_scan_area(const void *ptr, unsigned long offset, |
36 | size_t length, gfp_t gfp); | 36 | size_t length, gfp_t gfp) __ref; |
37 | extern void kmemleak_no_scan(const void *ptr); | 37 | extern void kmemleak_no_scan(const void *ptr) __ref; |
38 | 38 | ||
39 | static inline void kmemleak_alloc_recursive(const void *ptr, size_t size, | 39 | static inline void kmemleak_alloc_recursive(const void *ptr, size_t size, |
40 | int min_count, unsigned long flags, | 40 | int min_count, unsigned long flags, |
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index b25d1b53df0d..9ccf0e286b2a 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h | |||
@@ -149,6 +149,12 @@ struct lock_list { | |||
149 | struct lock_class *class; | 149 | struct lock_class *class; |
150 | struct stack_trace trace; | 150 | struct stack_trace trace; |
151 | int distance; | 151 | int distance; |
152 | |||
153 | /* | ||
154 | * The parent field is used to implement breadth-first search, and the | ||
155 | * bit 0 is reused to indicate if the lock has been accessed in BFS. | ||
156 | */ | ||
157 | struct lock_list *parent; | ||
152 | }; | 158 | }; |
153 | 159 | ||
154 | /* | 160 | /* |
@@ -208,10 +214,12 @@ struct held_lock { | |||
208 | * interrupt context: | 214 | * interrupt context: |
209 | */ | 215 | */ |
210 | unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */ | 216 | unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */ |
211 | unsigned int trylock:1; | 217 | unsigned int trylock:1; /* 16 bits */ |
218 | |||
212 | unsigned int read:2; /* see lock_acquire() comment */ | 219 | unsigned int read:2; /* see lock_acquire() comment */ |
213 | unsigned int check:2; /* see lock_acquire() comment */ | 220 | unsigned int check:2; /* see lock_acquire() comment */ |
214 | unsigned int hardirqs_off:1; | 221 | unsigned int hardirqs_off:1; |
222 | unsigned int references:11; /* 32 bits */ | ||
215 | }; | 223 | }; |
216 | 224 | ||
217 | /* | 225 | /* |
@@ -291,6 +299,10 @@ extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
291 | extern void lock_release(struct lockdep_map *lock, int nested, | 299 | extern void lock_release(struct lockdep_map *lock, int nested, |
292 | unsigned long ip); | 300 | unsigned long ip); |
293 | 301 | ||
302 | #define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map) | ||
303 | |||
304 | extern int lock_is_held(struct lockdep_map *lock); | ||
305 | |||
294 | extern void lock_set_class(struct lockdep_map *lock, const char *name, | 306 | extern void lock_set_class(struct lockdep_map *lock, const char *name, |
295 | struct lock_class_key *key, unsigned int subclass, | 307 | struct lock_class_key *key, unsigned int subclass, |
296 | unsigned long ip); | 308 | unsigned long ip); |
@@ -309,6 +321,8 @@ extern void lockdep_trace_alloc(gfp_t mask); | |||
309 | 321 | ||
310 | #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) | 322 | #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) |
311 | 323 | ||
324 | #define lockdep_assert_held(l) WARN_ON(debug_locks && !lockdep_is_held(l)) | ||
325 | |||
312 | #else /* !LOCKDEP */ | 326 | #else /* !LOCKDEP */ |
313 | 327 | ||
314 | static inline void lockdep_off(void) | 328 | static inline void lockdep_off(void) |
@@ -353,6 +367,8 @@ struct lock_class_key { }; | |||
353 | 367 | ||
354 | #define lockdep_depth(tsk) (0) | 368 | #define lockdep_depth(tsk) (0) |
355 | 369 | ||
370 | #define lockdep_assert_held(l) do { } while (0) | ||
371 | |||
356 | #endif /* !LOCKDEP */ | 372 | #endif /* !LOCKDEP */ |
357 | 373 | ||
358 | #ifdef CONFIG_LOCK_STAT | 374 | #ifdef CONFIG_LOCK_STAT |
diff --git a/include/linux/lsm_audit.h b/include/linux/lsm_audit.h index e461b2c3d711..190c37854870 100644 --- a/include/linux/lsm_audit.h +++ b/include/linux/lsm_audit.h | |||
@@ -33,6 +33,7 @@ struct common_audit_data { | |||
33 | #define LSM_AUDIT_DATA_IPC 4 | 33 | #define LSM_AUDIT_DATA_IPC 4 |
34 | #define LSM_AUDIT_DATA_TASK 5 | 34 | #define LSM_AUDIT_DATA_TASK 5 |
35 | #define LSM_AUDIT_DATA_KEY 6 | 35 | #define LSM_AUDIT_DATA_KEY 6 |
36 | #define LSM_AUDIT_NO_AUDIT 7 | ||
36 | struct task_struct *tsk; | 37 | struct task_struct *tsk; |
37 | union { | 38 | union { |
38 | struct { | 39 | struct { |
@@ -66,16 +67,19 @@ struct common_audit_data { | |||
66 | } key_struct; | 67 | } key_struct; |
67 | #endif | 68 | #endif |
68 | } u; | 69 | } u; |
69 | const char *function; | ||
70 | /* this union contains LSM specific data */ | 70 | /* this union contains LSM specific data */ |
71 | union { | 71 | union { |
72 | #ifdef CONFIG_SECURITY_SMACK | ||
72 | /* SMACK data */ | 73 | /* SMACK data */ |
73 | struct smack_audit_data { | 74 | struct smack_audit_data { |
75 | const char *function; | ||
74 | char *subject; | 76 | char *subject; |
75 | char *object; | 77 | char *object; |
76 | char *request; | 78 | char *request; |
77 | int result; | 79 | int result; |
78 | } smack_audit_data; | 80 | } smack_audit_data; |
81 | #endif | ||
82 | #ifdef CONFIG_SECURITY_SELINUX | ||
79 | /* SELinux data */ | 83 | /* SELinux data */ |
80 | struct { | 84 | struct { |
81 | u32 ssid; | 85 | u32 ssid; |
@@ -83,10 +87,12 @@ struct common_audit_data { | |||
83 | u16 tclass; | 87 | u16 tclass; |
84 | u32 requested; | 88 | u32 requested; |
85 | u32 audited; | 89 | u32 audited; |
90 | u32 denied; | ||
86 | struct av_decision *avd; | 91 | struct av_decision *avd; |
87 | int result; | 92 | int result; |
88 | } selinux_audit_data; | 93 | } selinux_audit_data; |
89 | } lsm_priv; | 94 | #endif |
95 | }; | ||
90 | /* these callback will be implemented by a specific LSM */ | 96 | /* these callback will be implemented by a specific LSM */ |
91 | void (*lsm_pre_audit)(struct audit_buffer *, void *); | 97 | void (*lsm_pre_audit)(struct audit_buffer *, void *); |
92 | void (*lsm_post_audit)(struct audit_buffer *, void *); | 98 | void (*lsm_post_audit)(struct audit_buffer *, void *); |
@@ -104,7 +110,7 @@ int ipv6_skb_to_auditdata(struct sk_buff *skb, | |||
104 | /* Initialize an LSM audit data structure. */ | 110 | /* Initialize an LSM audit data structure. */ |
105 | #define COMMON_AUDIT_DATA_INIT(_d, _t) \ | 111 | #define COMMON_AUDIT_DATA_INIT(_d, _t) \ |
106 | { memset((_d), 0, sizeof(struct common_audit_data)); \ | 112 | { memset((_d), 0, sizeof(struct common_audit_data)); \ |
107 | (_d)->type = LSM_AUDIT_DATA_##_t; (_d)->function = __func__; } | 113 | (_d)->type = LSM_AUDIT_DATA_##_t; } |
108 | 114 | ||
109 | void common_lsm_audit(struct common_audit_data *a); | 115 | void common_lsm_audit(struct common_audit_data *a); |
110 | 116 | ||
diff --git a/include/linux/nmi.h b/include/linux/nmi.h index 29af2d5df097..b752e807adde 100644 --- a/include/linux/nmi.h +++ b/include/linux/nmi.h | |||
@@ -28,8 +28,23 @@ static inline void acpi_nmi_disable(void) { } | |||
28 | static inline void acpi_nmi_enable(void) { } | 28 | static inline void acpi_nmi_enable(void) { } |
29 | #endif | 29 | #endif |
30 | 30 | ||
31 | #ifndef trigger_all_cpu_backtrace | 31 | /* |
32 | #define trigger_all_cpu_backtrace() do { } while (0) | 32 | * Create trigger_all_cpu_backtrace() out of the arch-provided |
33 | * base function. Return whether such support was available, | ||
34 | * to allow calling code to fall back to some other mechanism: | ||
35 | */ | ||
36 | #ifdef arch_trigger_all_cpu_backtrace | ||
37 | static inline bool trigger_all_cpu_backtrace(void) | ||
38 | { | ||
39 | arch_trigger_all_cpu_backtrace(); | ||
40 | |||
41 | return true; | ||
42 | } | ||
43 | #else | ||
44 | static inline bool trigger_all_cpu_backtrace(void) | ||
45 | { | ||
46 | return false; | ||
47 | } | ||
33 | #endif | 48 | #endif |
34 | 49 | ||
35 | #endif | 50 | #endif |
diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h index 1d9518bc4c58..5171639ecf0f 100644 --- a/include/linux/oprofile.h +++ b/include/linux/oprofile.h | |||
@@ -67,6 +67,9 @@ struct oprofile_operations { | |||
67 | 67 | ||
68 | /* Initiate a stack backtrace. Optional. */ | 68 | /* Initiate a stack backtrace. Optional. */ |
69 | void (*backtrace)(struct pt_regs * const regs, unsigned int depth); | 69 | void (*backtrace)(struct pt_regs * const regs, unsigned int depth); |
70 | |||
71 | /* Multiplex between different events. Optional. */ | ||
72 | int (*switch_events)(void); | ||
70 | /* CPU identification string. */ | 73 | /* CPU identification string. */ |
71 | char * cpu_type; | 74 | char * cpu_type; |
72 | }; | 75 | }; |
@@ -171,7 +174,6 @@ struct op_sample; | |||
171 | struct op_entry { | 174 | struct op_entry { |
172 | struct ring_buffer_event *event; | 175 | struct ring_buffer_event *event; |
173 | struct op_sample *sample; | 176 | struct op_sample *sample; |
174 | unsigned long irq_flags; | ||
175 | unsigned long size; | 177 | unsigned long size; |
176 | unsigned long *data; | 178 | unsigned long *data; |
177 | }; | 179 | }; |
@@ -180,6 +182,7 @@ void oprofile_write_reserve(struct op_entry *entry, | |||
180 | struct pt_regs * const regs, | 182 | struct pt_regs * const regs, |
181 | unsigned long pc, int code, int size); | 183 | unsigned long pc, int code, int size); |
182 | int oprofile_add_data(struct op_entry *entry, unsigned long val); | 184 | int oprofile_add_data(struct op_entry *entry, unsigned long val); |
185 | int oprofile_add_data64(struct op_entry *entry, u64 val); | ||
183 | int oprofile_write_commit(struct op_entry *entry); | 186 | int oprofile_write_commit(struct op_entry *entry); |
184 | 187 | ||
185 | #endif /* OPROFILE_H */ | 188 | #endif /* OPROFILE_H */ |
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index aec3252afcf5..ed5d7501e181 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h | |||
@@ -132,7 +132,7 @@ static inline int page_cache_get_speculative(struct page *page) | |||
132 | { | 132 | { |
133 | VM_BUG_ON(in_interrupt()); | 133 | VM_BUG_ON(in_interrupt()); |
134 | 134 | ||
135 | #if !defined(CONFIG_SMP) && defined(CONFIG_CLASSIC_RCU) | 135 | #if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU) |
136 | # ifdef CONFIG_PREEMPT | 136 | # ifdef CONFIG_PREEMPT |
137 | VM_BUG_ON(!in_atomic()); | 137 | VM_BUG_ON(!in_atomic()); |
138 | # endif | 138 | # endif |
@@ -170,7 +170,7 @@ static inline int page_cache_add_speculative(struct page *page, int count) | |||
170 | { | 170 | { |
171 | VM_BUG_ON(in_interrupt()); | 171 | VM_BUG_ON(in_interrupt()); |
172 | 172 | ||
173 | #if !defined(CONFIG_SMP) && defined(CONFIG_CLASSIC_RCU) | 173 | #if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU) |
174 | # ifdef CONFIG_PREEMPT | 174 | # ifdef CONFIG_PREEMPT |
175 | VM_BUG_ON(!in_atomic()); | 175 | VM_BUG_ON(!in_atomic()); |
176 | # endif | 176 | # endif |
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 9ba1822272c7..972f90d7a32f 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h | |||
@@ -216,6 +216,7 @@ struct perf_counter_attr { | |||
216 | #define PERF_COUNTER_IOC_REFRESH _IO ('$', 2) | 216 | #define PERF_COUNTER_IOC_REFRESH _IO ('$', 2) |
217 | #define PERF_COUNTER_IOC_RESET _IO ('$', 3) | 217 | #define PERF_COUNTER_IOC_RESET _IO ('$', 3) |
218 | #define PERF_COUNTER_IOC_PERIOD _IOW('$', 4, u64) | 218 | #define PERF_COUNTER_IOC_PERIOD _IOW('$', 4, u64) |
219 | #define PERF_COUNTER_IOC_SET_OUTPUT _IO ('$', 5) | ||
219 | 220 | ||
220 | enum perf_counter_ioc_flags { | 221 | enum perf_counter_ioc_flags { |
221 | PERF_IOC_FLAG_GROUP = 1U << 0, | 222 | PERF_IOC_FLAG_GROUP = 1U << 0, |
@@ -415,6 +416,9 @@ enum perf_callchain_context { | |||
415 | PERF_CONTEXT_MAX = (__u64)-4095, | 416 | PERF_CONTEXT_MAX = (__u64)-4095, |
416 | }; | 417 | }; |
417 | 418 | ||
419 | #define PERF_FLAG_FD_NO_GROUP (1U << 0) | ||
420 | #define PERF_FLAG_FD_OUTPUT (1U << 1) | ||
421 | |||
418 | #ifdef __KERNEL__ | 422 | #ifdef __KERNEL__ |
419 | /* | 423 | /* |
420 | * Kernel-internal data types and definitions: | 424 | * Kernel-internal data types and definitions: |
@@ -536,6 +540,7 @@ struct perf_counter { | |||
536 | struct list_head sibling_list; | 540 | struct list_head sibling_list; |
537 | int nr_siblings; | 541 | int nr_siblings; |
538 | struct perf_counter *group_leader; | 542 | struct perf_counter *group_leader; |
543 | struct perf_counter *output; | ||
539 | const struct pmu *pmu; | 544 | const struct pmu *pmu; |
540 | 545 | ||
541 | enum perf_counter_active_state state; | 546 | enum perf_counter_active_state state; |
diff --git a/include/linux/rcuclassic.h b/include/linux/rcuclassic.h deleted file mode 100644 index bfd92e1e5d2c..000000000000 --- a/include/linux/rcuclassic.h +++ /dev/null | |||
@@ -1,178 +0,0 @@ | |||
1 | /* | ||
2 | * Read-Copy Update mechanism for mutual exclusion (classic version) | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
17 | * | ||
18 | * Copyright IBM Corporation, 2001 | ||
19 | * | ||
20 | * Author: Dipankar Sarma <dipankar@in.ibm.com> | ||
21 | * | ||
22 | * Based on the original work by Paul McKenney <paulmck@us.ibm.com> | ||
23 | * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. | ||
24 | * Papers: | ||
25 | * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf | ||
26 | * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) | ||
27 | * | ||
28 | * For detailed explanation of Read-Copy Update mechanism see - | ||
29 | * Documentation/RCU | ||
30 | * | ||
31 | */ | ||
32 | |||
33 | #ifndef __LINUX_RCUCLASSIC_H | ||
34 | #define __LINUX_RCUCLASSIC_H | ||
35 | |||
36 | #include <linux/cache.h> | ||
37 | #include <linux/spinlock.h> | ||
38 | #include <linux/threads.h> | ||
39 | #include <linux/cpumask.h> | ||
40 | #include <linux/seqlock.h> | ||
41 | |||
42 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | ||
43 | #define RCU_SECONDS_TILL_STALL_CHECK (10 * HZ) /* for rcp->jiffies_stall */ | ||
44 | #define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ) /* for rcp->jiffies_stall */ | ||
45 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | ||
46 | |||
47 | /* Global control variables for rcupdate callback mechanism. */ | ||
48 | struct rcu_ctrlblk { | ||
49 | long cur; /* Current batch number. */ | ||
50 | long completed; /* Number of the last completed batch */ | ||
51 | long pending; /* Number of the last pending batch */ | ||
52 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | ||
53 | unsigned long gp_start; /* Time at which GP started in jiffies. */ | ||
54 | unsigned long jiffies_stall; | ||
55 | /* Time at which to check for CPU stalls. */ | ||
56 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | ||
57 | |||
58 | int signaled; | ||
59 | |||
60 | spinlock_t lock ____cacheline_internodealigned_in_smp; | ||
61 | DECLARE_BITMAP(cpumask, NR_CPUS); /* CPUs that need to switch for */ | ||
62 | /* current batch to proceed. */ | ||
63 | } ____cacheline_internodealigned_in_smp; | ||
64 | |||
65 | /* Is batch a before batch b ? */ | ||
66 | static inline int rcu_batch_before(long a, long b) | ||
67 | { | ||
68 | return (a - b) < 0; | ||
69 | } | ||
70 | |||
71 | /* Is batch a after batch b ? */ | ||
72 | static inline int rcu_batch_after(long a, long b) | ||
73 | { | ||
74 | return (a - b) > 0; | ||
75 | } | ||
76 | |||
77 | /* Per-CPU data for Read-Copy UPdate. */ | ||
78 | struct rcu_data { | ||
79 | /* 1) quiescent state handling : */ | ||
80 | long quiescbatch; /* Batch # for grace period */ | ||
81 | int passed_quiesc; /* User-mode/idle loop etc. */ | ||
82 | int qs_pending; /* core waits for quiesc state */ | ||
83 | |||
84 | /* 2) batch handling */ | ||
85 | /* | ||
86 | * if nxtlist is not NULL, then: | ||
87 | * batch: | ||
88 | * The batch # for the last entry of nxtlist | ||
89 | * [*nxttail[1], NULL = *nxttail[2]): | ||
90 | * Entries that batch # <= batch | ||
91 | * [*nxttail[0], *nxttail[1]): | ||
92 | * Entries that batch # <= batch - 1 | ||
93 | * [nxtlist, *nxttail[0]): | ||
94 | * Entries that batch # <= batch - 2 | ||
95 | * The grace period for these entries has completed, and | ||
96 | * the other grace-period-completed entries may be moved | ||
97 | * here temporarily in rcu_process_callbacks(). | ||
98 | */ | ||
99 | long batch; | ||
100 | struct rcu_head *nxtlist; | ||
101 | struct rcu_head **nxttail[3]; | ||
102 | long qlen; /* # of queued callbacks */ | ||
103 | struct rcu_head *donelist; | ||
104 | struct rcu_head **donetail; | ||
105 | long blimit; /* Upper limit on a processed batch */ | ||
106 | int cpu; | ||
107 | struct rcu_head barrier; | ||
108 | }; | ||
109 | |||
110 | /* | ||
111 | * Increment the quiescent state counter. | ||
112 | * The counter is a bit degenerated: We do not need to know | ||
113 | * how many quiescent states passed, just if there was at least | ||
114 | * one since the start of the grace period. Thus just a flag. | ||
115 | */ | ||
116 | extern void rcu_qsctr_inc(int cpu); | ||
117 | extern void rcu_bh_qsctr_inc(int cpu); | ||
118 | |||
119 | extern int rcu_pending(int cpu); | ||
120 | extern int rcu_needs_cpu(int cpu); | ||
121 | |||
122 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
123 | extern struct lockdep_map rcu_lock_map; | ||
124 | # define rcu_read_acquire() \ | ||
125 | lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_) | ||
126 | # define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_) | ||
127 | #else | ||
128 | # define rcu_read_acquire() do { } while (0) | ||
129 | # define rcu_read_release() do { } while (0) | ||
130 | #endif | ||
131 | |||
132 | #define __rcu_read_lock() \ | ||
133 | do { \ | ||
134 | preempt_disable(); \ | ||
135 | __acquire(RCU); \ | ||
136 | rcu_read_acquire(); \ | ||
137 | } while (0) | ||
138 | #define __rcu_read_unlock() \ | ||
139 | do { \ | ||
140 | rcu_read_release(); \ | ||
141 | __release(RCU); \ | ||
142 | preempt_enable(); \ | ||
143 | } while (0) | ||
144 | #define __rcu_read_lock_bh() \ | ||
145 | do { \ | ||
146 | local_bh_disable(); \ | ||
147 | __acquire(RCU_BH); \ | ||
148 | rcu_read_acquire(); \ | ||
149 | } while (0) | ||
150 | #define __rcu_read_unlock_bh() \ | ||
151 | do { \ | ||
152 | rcu_read_release(); \ | ||
153 | __release(RCU_BH); \ | ||
154 | local_bh_enable(); \ | ||
155 | } while (0) | ||
156 | |||
157 | #define __synchronize_sched() synchronize_rcu() | ||
158 | |||
159 | #define call_rcu_sched(head, func) call_rcu(head, func) | ||
160 | |||
161 | extern void __rcu_init(void); | ||
162 | #define rcu_init_sched() do { } while (0) | ||
163 | extern void rcu_check_callbacks(int cpu, int user); | ||
164 | extern void rcu_restart_cpu(int cpu); | ||
165 | |||
166 | extern long rcu_batches_completed(void); | ||
167 | extern long rcu_batches_completed_bh(void); | ||
168 | |||
169 | #define rcu_enter_nohz() do { } while (0) | ||
170 | #define rcu_exit_nohz() do { } while (0) | ||
171 | |||
172 | /* A context switch is a grace period for rcuclassic. */ | ||
173 | static inline int rcu_blocking_is_gp(void) | ||
174 | { | ||
175 | return num_online_cpus() == 1; | ||
176 | } | ||
177 | |||
178 | #endif /* __LINUX_RCUCLASSIC_H */ | ||
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 15fbb3ca634d..95e0615f4d75 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
@@ -51,18 +51,26 @@ struct rcu_head { | |||
51 | void (*func)(struct rcu_head *head); | 51 | void (*func)(struct rcu_head *head); |
52 | }; | 52 | }; |
53 | 53 | ||
54 | /* Internal to kernel, but needed by rcupreempt.h. */ | 54 | /* Exported common interfaces */ |
55 | extern void synchronize_rcu(void); | ||
56 | extern void synchronize_rcu_bh(void); | ||
57 | extern void rcu_barrier(void); | ||
58 | extern void rcu_barrier_bh(void); | ||
59 | extern void rcu_barrier_sched(void); | ||
60 | extern void synchronize_sched_expedited(void); | ||
61 | extern int sched_expedited_torture_stats(char *page); | ||
62 | |||
63 | /* Internal to kernel */ | ||
64 | extern void rcu_init(void); | ||
65 | extern void rcu_scheduler_starting(void); | ||
66 | extern int rcu_needs_cpu(int cpu); | ||
55 | extern int rcu_scheduler_active; | 67 | extern int rcu_scheduler_active; |
56 | 68 | ||
57 | #if defined(CONFIG_CLASSIC_RCU) | 69 | #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) |
58 | #include <linux/rcuclassic.h> | ||
59 | #elif defined(CONFIG_TREE_RCU) | ||
60 | #include <linux/rcutree.h> | 70 | #include <linux/rcutree.h> |
61 | #elif defined(CONFIG_PREEMPT_RCU) | ||
62 | #include <linux/rcupreempt.h> | ||
63 | #else | 71 | #else |
64 | #error "Unknown RCU implementation specified to kernel configuration" | 72 | #error "Unknown RCU implementation specified to kernel configuration" |
65 | #endif /* #else #if defined(CONFIG_CLASSIC_RCU) */ | 73 | #endif |
66 | 74 | ||
67 | #define RCU_HEAD_INIT { .next = NULL, .func = NULL } | 75 | #define RCU_HEAD_INIT { .next = NULL, .func = NULL } |
68 | #define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT | 76 | #define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT |
@@ -70,6 +78,16 @@ extern int rcu_scheduler_active; | |||
70 | (ptr)->next = NULL; (ptr)->func = NULL; \ | 78 | (ptr)->next = NULL; (ptr)->func = NULL; \ |
71 | } while (0) | 79 | } while (0) |
72 | 80 | ||
81 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
82 | extern struct lockdep_map rcu_lock_map; | ||
83 | # define rcu_read_acquire() \ | ||
84 | lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_) | ||
85 | # define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_) | ||
86 | #else | ||
87 | # define rcu_read_acquire() do { } while (0) | ||
88 | # define rcu_read_release() do { } while (0) | ||
89 | #endif | ||
90 | |||
73 | /** | 91 | /** |
74 | * rcu_read_lock - mark the beginning of an RCU read-side critical section. | 92 | * rcu_read_lock - mark the beginning of an RCU read-side critical section. |
75 | * | 93 | * |
@@ -99,7 +117,12 @@ extern int rcu_scheduler_active; | |||
99 | * | 117 | * |
100 | * It is illegal to block while in an RCU read-side critical section. | 118 | * It is illegal to block while in an RCU read-side critical section. |
101 | */ | 119 | */ |
102 | #define rcu_read_lock() __rcu_read_lock() | 120 | static inline void rcu_read_lock(void) |
121 | { | ||
122 | __rcu_read_lock(); | ||
123 | __acquire(RCU); | ||
124 | rcu_read_acquire(); | ||
125 | } | ||
103 | 126 | ||
104 | /** | 127 | /** |
105 | * rcu_read_unlock - marks the end of an RCU read-side critical section. | 128 | * rcu_read_unlock - marks the end of an RCU read-side critical section. |
@@ -116,7 +139,12 @@ extern int rcu_scheduler_active; | |||
116 | * used as well. RCU does not care how the writers keep out of each | 139 | * used as well. RCU does not care how the writers keep out of each |
117 | * others' way, as long as they do so. | 140 | * others' way, as long as they do so. |
118 | */ | 141 | */ |
119 | #define rcu_read_unlock() __rcu_read_unlock() | 142 | static inline void rcu_read_unlock(void) |
143 | { | ||
144 | rcu_read_release(); | ||
145 | __release(RCU); | ||
146 | __rcu_read_unlock(); | ||
147 | } | ||
120 | 148 | ||
121 | /** | 149 | /** |
122 | * rcu_read_lock_bh - mark the beginning of a softirq-only RCU critical section | 150 | * rcu_read_lock_bh - mark the beginning of a softirq-only RCU critical section |
@@ -129,14 +157,24 @@ extern int rcu_scheduler_active; | |||
129 | * can use just rcu_read_lock(). | 157 | * can use just rcu_read_lock(). |
130 | * | 158 | * |
131 | */ | 159 | */ |
132 | #define rcu_read_lock_bh() __rcu_read_lock_bh() | 160 | static inline void rcu_read_lock_bh(void) |
161 | { | ||
162 | __rcu_read_lock_bh(); | ||
163 | __acquire(RCU_BH); | ||
164 | rcu_read_acquire(); | ||
165 | } | ||
133 | 166 | ||
134 | /* | 167 | /* |
135 | * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section | 168 | * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section |
136 | * | 169 | * |
137 | * See rcu_read_lock_bh() for more information. | 170 | * See rcu_read_lock_bh() for more information. |
138 | */ | 171 | */ |
139 | #define rcu_read_unlock_bh() __rcu_read_unlock_bh() | 172 | static inline void rcu_read_unlock_bh(void) |
173 | { | ||
174 | rcu_read_release(); | ||
175 | __release(RCU_BH); | ||
176 | __rcu_read_unlock_bh(); | ||
177 | } | ||
140 | 178 | ||
141 | /** | 179 | /** |
142 | * rcu_read_lock_sched - mark the beginning of a RCU-classic critical section | 180 | * rcu_read_lock_sched - mark the beginning of a RCU-classic critical section |
@@ -147,17 +185,34 @@ extern int rcu_scheduler_active; | |||
147 | * - call_rcu_sched() and rcu_barrier_sched() | 185 | * - call_rcu_sched() and rcu_barrier_sched() |
148 | * on the write-side to insure proper synchronization. | 186 | * on the write-side to insure proper synchronization. |
149 | */ | 187 | */ |
150 | #define rcu_read_lock_sched() preempt_disable() | 188 | static inline void rcu_read_lock_sched(void) |
151 | #define rcu_read_lock_sched_notrace() preempt_disable_notrace() | 189 | { |
190 | preempt_disable(); | ||
191 | __acquire(RCU_SCHED); | ||
192 | rcu_read_acquire(); | ||
193 | } | ||
194 | static inline notrace void rcu_read_lock_sched_notrace(void) | ||
195 | { | ||
196 | preempt_disable_notrace(); | ||
197 | __acquire(RCU_SCHED); | ||
198 | } | ||
152 | 199 | ||
153 | /* | 200 | /* |
154 | * rcu_read_unlock_sched - marks the end of a RCU-classic critical section | 201 | * rcu_read_unlock_sched - marks the end of a RCU-classic critical section |
155 | * | 202 | * |
156 | * See rcu_read_lock_sched for more information. | 203 | * See rcu_read_lock_sched for more information. |
157 | */ | 204 | */ |
158 | #define rcu_read_unlock_sched() preempt_enable() | 205 | static inline void rcu_read_unlock_sched(void) |
159 | #define rcu_read_unlock_sched_notrace() preempt_enable_notrace() | 206 | { |
160 | 207 | rcu_read_release(); | |
208 | __release(RCU_SCHED); | ||
209 | preempt_enable(); | ||
210 | } | ||
211 | static inline notrace void rcu_read_unlock_sched_notrace(void) | ||
212 | { | ||
213 | __release(RCU_SCHED); | ||
214 | preempt_enable_notrace(); | ||
215 | } | ||
161 | 216 | ||
162 | 217 | ||
163 | /** | 218 | /** |
@@ -259,15 +314,4 @@ extern void call_rcu(struct rcu_head *head, | |||
259 | extern void call_rcu_bh(struct rcu_head *head, | 314 | extern void call_rcu_bh(struct rcu_head *head, |
260 | void (*func)(struct rcu_head *head)); | 315 | void (*func)(struct rcu_head *head)); |
261 | 316 | ||
262 | /* Exported common interfaces */ | ||
263 | extern void synchronize_rcu(void); | ||
264 | extern void rcu_barrier(void); | ||
265 | extern void rcu_barrier_bh(void); | ||
266 | extern void rcu_barrier_sched(void); | ||
267 | |||
268 | /* Internal to kernel */ | ||
269 | extern void rcu_init(void); | ||
270 | extern void rcu_scheduler_starting(void); | ||
271 | extern int rcu_needs_cpu(int cpu); | ||
272 | |||
273 | #endif /* __LINUX_RCUPDATE_H */ | 317 | #endif /* __LINUX_RCUPDATE_H */ |
diff --git a/include/linux/rcupreempt.h b/include/linux/rcupreempt.h deleted file mode 100644 index fce522782ffa..000000000000 --- a/include/linux/rcupreempt.h +++ /dev/null | |||
@@ -1,127 +0,0 @@ | |||
1 | /* | ||
2 | * Read-Copy Update mechanism for mutual exclusion (RT implementation) | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
17 | * | ||
18 | * Copyright (C) IBM Corporation, 2006 | ||
19 | * | ||
20 | * Author: Paul McKenney <paulmck@us.ibm.com> | ||
21 | * | ||
22 | * Based on the original work by Paul McKenney <paul.mckenney@us.ibm.com> | ||
23 | * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. | ||
24 | * Papers: | ||
25 | * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf | ||
26 | * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) | ||
27 | * | ||
28 | * For detailed explanation of Read-Copy Update mechanism see - | ||
29 | * Documentation/RCU | ||
30 | * | ||
31 | */ | ||
32 | |||
33 | #ifndef __LINUX_RCUPREEMPT_H | ||
34 | #define __LINUX_RCUPREEMPT_H | ||
35 | |||
36 | #include <linux/cache.h> | ||
37 | #include <linux/spinlock.h> | ||
38 | #include <linux/threads.h> | ||
39 | #include <linux/smp.h> | ||
40 | #include <linux/cpumask.h> | ||
41 | #include <linux/seqlock.h> | ||
42 | |||
43 | extern void rcu_qsctr_inc(int cpu); | ||
44 | static inline void rcu_bh_qsctr_inc(int cpu) { } | ||
45 | |||
46 | /* | ||
47 | * Someone might want to pass call_rcu_bh as a function pointer. | ||
48 | * So this needs to just be a rename and not a macro function. | ||
49 | * (no parentheses) | ||
50 | */ | ||
51 | #define call_rcu_bh call_rcu | ||
52 | |||
53 | /** | ||
54 | * call_rcu_sched - Queue RCU callback for invocation after sched grace period. | ||
55 | * @head: structure to be used for queueing the RCU updates. | ||
56 | * @func: actual update function to be invoked after the grace period | ||
57 | * | ||
58 | * The update function will be invoked some time after a full | ||
59 | * synchronize_sched()-style grace period elapses, in other words after | ||
60 | * all currently executing preempt-disabled sections of code (including | ||
61 | * hardirq handlers, NMI handlers, and local_irq_save() blocks) have | ||
62 | * completed. | ||
63 | */ | ||
64 | extern void call_rcu_sched(struct rcu_head *head, | ||
65 | void (*func)(struct rcu_head *head)); | ||
66 | |||
67 | extern void __rcu_read_lock(void) __acquires(RCU); | ||
68 | extern void __rcu_read_unlock(void) __releases(RCU); | ||
69 | extern int rcu_pending(int cpu); | ||
70 | extern int rcu_needs_cpu(int cpu); | ||
71 | |||
72 | #define __rcu_read_lock_bh() { rcu_read_lock(); local_bh_disable(); } | ||
73 | #define __rcu_read_unlock_bh() { local_bh_enable(); rcu_read_unlock(); } | ||
74 | |||
75 | extern void __synchronize_sched(void); | ||
76 | |||
77 | extern void __rcu_init(void); | ||
78 | extern void rcu_init_sched(void); | ||
79 | extern void rcu_check_callbacks(int cpu, int user); | ||
80 | extern void rcu_restart_cpu(int cpu); | ||
81 | extern long rcu_batches_completed(void); | ||
82 | |||
83 | /* | ||
84 | * Return the number of RCU batches processed thus far. Useful for debug | ||
85 | * and statistic. The _bh variant is identifcal to straight RCU | ||
86 | */ | ||
87 | static inline long rcu_batches_completed_bh(void) | ||
88 | { | ||
89 | return rcu_batches_completed(); | ||
90 | } | ||
91 | |||
92 | #ifdef CONFIG_RCU_TRACE | ||
93 | struct rcupreempt_trace; | ||
94 | extern long *rcupreempt_flipctr(int cpu); | ||
95 | extern long rcupreempt_data_completed(void); | ||
96 | extern int rcupreempt_flip_flag(int cpu); | ||
97 | extern int rcupreempt_mb_flag(int cpu); | ||
98 | extern char *rcupreempt_try_flip_state_name(void); | ||
99 | extern struct rcupreempt_trace *rcupreempt_trace_cpu(int cpu); | ||
100 | #endif | ||
101 | |||
102 | struct softirq_action; | ||
103 | |||
104 | #ifdef CONFIG_NO_HZ | ||
105 | extern void rcu_enter_nohz(void); | ||
106 | extern void rcu_exit_nohz(void); | ||
107 | #else | ||
108 | # define rcu_enter_nohz() do { } while (0) | ||
109 | # define rcu_exit_nohz() do { } while (0) | ||
110 | #endif | ||
111 | |||
112 | /* | ||
113 | * A context switch is a grace period for rcupreempt synchronize_rcu() | ||
114 | * only during early boot, before the scheduler has been initialized. | ||
115 | * So, how the heck do we get a context switch? Well, if the caller | ||
116 | * invokes synchronize_rcu(), they are willing to accept a context | ||
117 | * switch, so we simply pretend that one happened. | ||
118 | * | ||
119 | * After boot, there might be a blocked or preempted task in an RCU | ||
120 | * read-side critical section, so we cannot then take the fastpath. | ||
121 | */ | ||
122 | static inline int rcu_blocking_is_gp(void) | ||
123 | { | ||
124 | return num_online_cpus() == 1 && !rcu_scheduler_active; | ||
125 | } | ||
126 | |||
127 | #endif /* __LINUX_RCUPREEMPT_H */ | ||
diff --git a/include/linux/rcupreempt_trace.h b/include/linux/rcupreempt_trace.h deleted file mode 100644 index b99ae073192a..000000000000 --- a/include/linux/rcupreempt_trace.h +++ /dev/null | |||
@@ -1,97 +0,0 @@ | |||
1 | /* | ||
2 | * Read-Copy Update mechanism for mutual exclusion (RT implementation) | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
17 | * | ||
18 | * Copyright (C) IBM Corporation, 2006 | ||
19 | * | ||
20 | * Author: Paul McKenney <paulmck@us.ibm.com> | ||
21 | * | ||
22 | * Based on the original work by Paul McKenney <paul.mckenney@us.ibm.com> | ||
23 | * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. | ||
24 | * Papers: | ||
25 | * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf | ||
26 | * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) | ||
27 | * | ||
28 | * For detailed explanation of the Preemptible Read-Copy Update mechanism see - | ||
29 | * http://lwn.net/Articles/253651/ | ||
30 | */ | ||
31 | |||
32 | #ifndef __LINUX_RCUPREEMPT_TRACE_H | ||
33 | #define __LINUX_RCUPREEMPT_TRACE_H | ||
34 | |||
35 | #include <linux/types.h> | ||
36 | #include <linux/kernel.h> | ||
37 | |||
38 | #include <asm/atomic.h> | ||
39 | |||
40 | /* | ||
41 | * PREEMPT_RCU data structures. | ||
42 | */ | ||
43 | |||
44 | struct rcupreempt_trace { | ||
45 | long next_length; | ||
46 | long next_add; | ||
47 | long wait_length; | ||
48 | long wait_add; | ||
49 | long done_length; | ||
50 | long done_add; | ||
51 | long done_remove; | ||
52 | atomic_t done_invoked; | ||
53 | long rcu_check_callbacks; | ||
54 | atomic_t rcu_try_flip_1; | ||
55 | atomic_t rcu_try_flip_e1; | ||
56 | long rcu_try_flip_i1; | ||
57 | long rcu_try_flip_ie1; | ||
58 | long rcu_try_flip_g1; | ||
59 | long rcu_try_flip_a1; | ||
60 | long rcu_try_flip_ae1; | ||
61 | long rcu_try_flip_a2; | ||
62 | long rcu_try_flip_z1; | ||
63 | long rcu_try_flip_ze1; | ||
64 | long rcu_try_flip_z2; | ||
65 | long rcu_try_flip_m1; | ||
66 | long rcu_try_flip_me1; | ||
67 | long rcu_try_flip_m2; | ||
68 | }; | ||
69 | |||
70 | #ifdef CONFIG_RCU_TRACE | ||
71 | #define RCU_TRACE(fn, arg) fn(arg); | ||
72 | #else | ||
73 | #define RCU_TRACE(fn, arg) | ||
74 | #endif | ||
75 | |||
76 | extern void rcupreempt_trace_move2done(struct rcupreempt_trace *trace); | ||
77 | extern void rcupreempt_trace_move2wait(struct rcupreempt_trace *trace); | ||
78 | extern void rcupreempt_trace_try_flip_1(struct rcupreempt_trace *trace); | ||
79 | extern void rcupreempt_trace_try_flip_e1(struct rcupreempt_trace *trace); | ||
80 | extern void rcupreempt_trace_try_flip_i1(struct rcupreempt_trace *trace); | ||
81 | extern void rcupreempt_trace_try_flip_ie1(struct rcupreempt_trace *trace); | ||
82 | extern void rcupreempt_trace_try_flip_g1(struct rcupreempt_trace *trace); | ||
83 | extern void rcupreempt_trace_try_flip_a1(struct rcupreempt_trace *trace); | ||
84 | extern void rcupreempt_trace_try_flip_ae1(struct rcupreempt_trace *trace); | ||
85 | extern void rcupreempt_trace_try_flip_a2(struct rcupreempt_trace *trace); | ||
86 | extern void rcupreempt_trace_try_flip_z1(struct rcupreempt_trace *trace); | ||
87 | extern void rcupreempt_trace_try_flip_ze1(struct rcupreempt_trace *trace); | ||
88 | extern void rcupreempt_trace_try_flip_z2(struct rcupreempt_trace *trace); | ||
89 | extern void rcupreempt_trace_try_flip_m1(struct rcupreempt_trace *trace); | ||
90 | extern void rcupreempt_trace_try_flip_me1(struct rcupreempt_trace *trace); | ||
91 | extern void rcupreempt_trace_try_flip_m2(struct rcupreempt_trace *trace); | ||
92 | extern void rcupreempt_trace_check_callbacks(struct rcupreempt_trace *trace); | ||
93 | extern void rcupreempt_trace_done_remove(struct rcupreempt_trace *trace); | ||
94 | extern void rcupreempt_trace_invoke(struct rcupreempt_trace *trace); | ||
95 | extern void rcupreempt_trace_next_add(struct rcupreempt_trace *trace); | ||
96 | |||
97 | #endif /* __LINUX_RCUPREEMPT_TRACE_H */ | ||
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 5a5153806c42..a89307717825 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h | |||
@@ -30,264 +30,57 @@ | |||
30 | #ifndef __LINUX_RCUTREE_H | 30 | #ifndef __LINUX_RCUTREE_H |
31 | #define __LINUX_RCUTREE_H | 31 | #define __LINUX_RCUTREE_H |
32 | 32 | ||
33 | #include <linux/cache.h> | 33 | extern void rcu_sched_qs(int cpu); |
34 | #include <linux/spinlock.h> | 34 | extern void rcu_bh_qs(int cpu); |
35 | #include <linux/threads.h> | ||
36 | #include <linux/cpumask.h> | ||
37 | #include <linux/seqlock.h> | ||
38 | 35 | ||
39 | /* | 36 | extern int rcu_needs_cpu(int cpu); |
40 | * Define shape of hierarchy based on NR_CPUS and CONFIG_RCU_FANOUT. | ||
41 | * In theory, it should be possible to add more levels straightforwardly. | ||
42 | * In practice, this has not been tested, so there is probably some | ||
43 | * bug somewhere. | ||
44 | */ | ||
45 | #define MAX_RCU_LVLS 3 | ||
46 | #define RCU_FANOUT (CONFIG_RCU_FANOUT) | ||
47 | #define RCU_FANOUT_SQ (RCU_FANOUT * RCU_FANOUT) | ||
48 | #define RCU_FANOUT_CUBE (RCU_FANOUT_SQ * RCU_FANOUT) | ||
49 | |||
50 | #if NR_CPUS <= RCU_FANOUT | ||
51 | # define NUM_RCU_LVLS 1 | ||
52 | # define NUM_RCU_LVL_0 1 | ||
53 | # define NUM_RCU_LVL_1 (NR_CPUS) | ||
54 | # define NUM_RCU_LVL_2 0 | ||
55 | # define NUM_RCU_LVL_3 0 | ||
56 | #elif NR_CPUS <= RCU_FANOUT_SQ | ||
57 | # define NUM_RCU_LVLS 2 | ||
58 | # define NUM_RCU_LVL_0 1 | ||
59 | # define NUM_RCU_LVL_1 (((NR_CPUS) + RCU_FANOUT - 1) / RCU_FANOUT) | ||
60 | # define NUM_RCU_LVL_2 (NR_CPUS) | ||
61 | # define NUM_RCU_LVL_3 0 | ||
62 | #elif NR_CPUS <= RCU_FANOUT_CUBE | ||
63 | # define NUM_RCU_LVLS 3 | ||
64 | # define NUM_RCU_LVL_0 1 | ||
65 | # define NUM_RCU_LVL_1 (((NR_CPUS) + RCU_FANOUT_SQ - 1) / RCU_FANOUT_SQ) | ||
66 | # define NUM_RCU_LVL_2 (((NR_CPUS) + (RCU_FANOUT) - 1) / (RCU_FANOUT)) | ||
67 | # define NUM_RCU_LVL_3 NR_CPUS | ||
68 | #else | ||
69 | # error "CONFIG_RCU_FANOUT insufficient for NR_CPUS" | ||
70 | #endif /* #if (NR_CPUS) <= RCU_FANOUT */ | ||
71 | |||
72 | #define RCU_SUM (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3) | ||
73 | #define NUM_RCU_NODES (RCU_SUM - NR_CPUS) | ||
74 | |||
75 | /* | ||
76 | * Dynticks per-CPU state. | ||
77 | */ | ||
78 | struct rcu_dynticks { | ||
79 | int dynticks_nesting; /* Track nesting level, sort of. */ | ||
80 | int dynticks; /* Even value for dynticks-idle, else odd. */ | ||
81 | int dynticks_nmi; /* Even value for either dynticks-idle or */ | ||
82 | /* not in nmi handler, else odd. So this */ | ||
83 | /* remains even for nmi from irq handler. */ | ||
84 | }; | ||
85 | |||
86 | /* | ||
87 | * Definition for node within the RCU grace-period-detection hierarchy. | ||
88 | */ | ||
89 | struct rcu_node { | ||
90 | spinlock_t lock; | ||
91 | unsigned long qsmask; /* CPUs or groups that need to switch in */ | ||
92 | /* order for current grace period to proceed.*/ | ||
93 | unsigned long qsmaskinit; | ||
94 | /* Per-GP initialization for qsmask. */ | ||
95 | unsigned long grpmask; /* Mask to apply to parent qsmask. */ | ||
96 | int grplo; /* lowest-numbered CPU or group here. */ | ||
97 | int grphi; /* highest-numbered CPU or group here. */ | ||
98 | u8 grpnum; /* CPU/group number for next level up. */ | ||
99 | u8 level; /* root is at level 0. */ | ||
100 | struct rcu_node *parent; | ||
101 | } ____cacheline_internodealigned_in_smp; | ||
102 | |||
103 | /* Index values for nxttail array in struct rcu_data. */ | ||
104 | #define RCU_DONE_TAIL 0 /* Also RCU_WAIT head. */ | ||
105 | #define RCU_WAIT_TAIL 1 /* Also RCU_NEXT_READY head. */ | ||
106 | #define RCU_NEXT_READY_TAIL 2 /* Also RCU_NEXT head. */ | ||
107 | #define RCU_NEXT_TAIL 3 | ||
108 | #define RCU_NEXT_SIZE 4 | ||
109 | |||
110 | /* Per-CPU data for read-copy update. */ | ||
111 | struct rcu_data { | ||
112 | /* 1) quiescent-state and grace-period handling : */ | ||
113 | long completed; /* Track rsp->completed gp number */ | ||
114 | /* in order to detect GP end. */ | ||
115 | long gpnum; /* Highest gp number that this CPU */ | ||
116 | /* is aware of having started. */ | ||
117 | long passed_quiesc_completed; | ||
118 | /* Value of completed at time of qs. */ | ||
119 | bool passed_quiesc; /* User-mode/idle loop etc. */ | ||
120 | bool qs_pending; /* Core waits for quiesc state. */ | ||
121 | bool beenonline; /* CPU online at least once. */ | ||
122 | struct rcu_node *mynode; /* This CPU's leaf of hierarchy */ | ||
123 | unsigned long grpmask; /* Mask to apply to leaf qsmask. */ | ||
124 | |||
125 | /* 2) batch handling */ | ||
126 | /* | ||
127 | * If nxtlist is not NULL, it is partitioned as follows. | ||
128 | * Any of the partitions might be empty, in which case the | ||
129 | * pointer to that partition will be equal to the pointer for | ||
130 | * the following partition. When the list is empty, all of | ||
131 | * the nxttail elements point to nxtlist, which is NULL. | ||
132 | * | ||
133 | * [*nxttail[RCU_NEXT_READY_TAIL], NULL = *nxttail[RCU_NEXT_TAIL]): | ||
134 | * Entries that might have arrived after current GP ended | ||
135 | * [*nxttail[RCU_WAIT_TAIL], *nxttail[RCU_NEXT_READY_TAIL]): | ||
136 | * Entries known to have arrived before current GP ended | ||
137 | * [*nxttail[RCU_DONE_TAIL], *nxttail[RCU_WAIT_TAIL]): | ||
138 | * Entries that batch # <= ->completed - 1: waiting for current GP | ||
139 | * [nxtlist, *nxttail[RCU_DONE_TAIL]): | ||
140 | * Entries that batch # <= ->completed | ||
141 | * The grace period for these entries has completed, and | ||
142 | * the other grace-period-completed entries may be moved | ||
143 | * here temporarily in rcu_process_callbacks(). | ||
144 | */ | ||
145 | struct rcu_head *nxtlist; | ||
146 | struct rcu_head **nxttail[RCU_NEXT_SIZE]; | ||
147 | long qlen; /* # of queued callbacks */ | ||
148 | long blimit; /* Upper limit on a processed batch */ | ||
149 | |||
150 | #ifdef CONFIG_NO_HZ | ||
151 | /* 3) dynticks interface. */ | ||
152 | struct rcu_dynticks *dynticks; /* Shared per-CPU dynticks state. */ | ||
153 | int dynticks_snap; /* Per-GP tracking for dynticks. */ | ||
154 | int dynticks_nmi_snap; /* Per-GP tracking for dynticks_nmi. */ | ||
155 | #endif /* #ifdef CONFIG_NO_HZ */ | ||
156 | |||
157 | /* 4) reasons this CPU needed to be kicked by force_quiescent_state */ | ||
158 | #ifdef CONFIG_NO_HZ | ||
159 | unsigned long dynticks_fqs; /* Kicked due to dynticks idle. */ | ||
160 | #endif /* #ifdef CONFIG_NO_HZ */ | ||
161 | unsigned long offline_fqs; /* Kicked due to being offline. */ | ||
162 | unsigned long resched_ipi; /* Sent a resched IPI. */ | ||
163 | |||
164 | /* 5) __rcu_pending() statistics. */ | ||
165 | long n_rcu_pending; /* rcu_pending() calls since boot. */ | ||
166 | long n_rp_qs_pending; | ||
167 | long n_rp_cb_ready; | ||
168 | long n_rp_cpu_needs_gp; | ||
169 | long n_rp_gp_completed; | ||
170 | long n_rp_gp_started; | ||
171 | long n_rp_need_fqs; | ||
172 | long n_rp_need_nothing; | ||
173 | |||
174 | int cpu; | ||
175 | }; | ||
176 | |||
177 | /* Values for signaled field in struct rcu_state. */ | ||
178 | #define RCU_GP_INIT 0 /* Grace period being initialized. */ | ||
179 | #define RCU_SAVE_DYNTICK 1 /* Need to scan dyntick state. */ | ||
180 | #define RCU_FORCE_QS 2 /* Need to force quiescent state. */ | ||
181 | #ifdef CONFIG_NO_HZ | ||
182 | #define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK | ||
183 | #else /* #ifdef CONFIG_NO_HZ */ | ||
184 | #define RCU_SIGNAL_INIT RCU_FORCE_QS | ||
185 | #endif /* #else #ifdef CONFIG_NO_HZ */ | ||
186 | |||
187 | #define RCU_JIFFIES_TILL_FORCE_QS 3 /* for rsp->jiffies_force_qs */ | ||
188 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | ||
189 | #define RCU_SECONDS_TILL_STALL_CHECK (10 * HZ) /* for rsp->jiffies_stall */ | ||
190 | #define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ) /* for rsp->jiffies_stall */ | ||
191 | #define RCU_STALL_RAT_DELAY 2 /* Allow other CPUs time */ | ||
192 | /* to take at least one */ | ||
193 | /* scheduling clock irq */ | ||
194 | /* before ratting on them. */ | ||
195 | |||
196 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | ||
197 | |||
198 | /* | ||
199 | * RCU global state, including node hierarchy. This hierarchy is | ||
200 | * represented in "heap" form in a dense array. The root (first level) | ||
201 | * of the hierarchy is in ->node[0] (referenced by ->level[0]), the second | ||
202 | * level in ->node[1] through ->node[m] (->node[1] referenced by ->level[1]), | ||
203 | * and the third level in ->node[m+1] and following (->node[m+1] referenced | ||
204 | * by ->level[2]). The number of levels is determined by the number of | ||
205 | * CPUs and by CONFIG_RCU_FANOUT. Small systems will have a "hierarchy" | ||
206 | * consisting of a single rcu_node. | ||
207 | */ | ||
208 | struct rcu_state { | ||
209 | struct rcu_node node[NUM_RCU_NODES]; /* Hierarchy. */ | ||
210 | struct rcu_node *level[NUM_RCU_LVLS]; /* Hierarchy levels. */ | ||
211 | u32 levelcnt[MAX_RCU_LVLS + 1]; /* # nodes in each level. */ | ||
212 | u8 levelspread[NUM_RCU_LVLS]; /* kids/node in each level. */ | ||
213 | struct rcu_data *rda[NR_CPUS]; /* array of rdp pointers. */ | ||
214 | |||
215 | /* The following fields are guarded by the root rcu_node's lock. */ | ||
216 | |||
217 | u8 signaled ____cacheline_internodealigned_in_smp; | ||
218 | /* Force QS state. */ | ||
219 | long gpnum; /* Current gp number. */ | ||
220 | long completed; /* # of last completed gp. */ | ||
221 | spinlock_t onofflock; /* exclude on/offline and */ | ||
222 | /* starting new GP. */ | ||
223 | spinlock_t fqslock; /* Only one task forcing */ | ||
224 | /* quiescent states. */ | ||
225 | unsigned long jiffies_force_qs; /* Time at which to invoke */ | ||
226 | /* force_quiescent_state(). */ | ||
227 | unsigned long n_force_qs; /* Number of calls to */ | ||
228 | /* force_quiescent_state(). */ | ||
229 | unsigned long n_force_qs_lh; /* ~Number of calls leaving */ | ||
230 | /* due to lock unavailable. */ | ||
231 | unsigned long n_force_qs_ngp; /* Number of calls leaving */ | ||
232 | /* due to no GP active. */ | ||
233 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | ||
234 | unsigned long gp_start; /* Time at which GP started, */ | ||
235 | /* but in jiffies. */ | ||
236 | unsigned long jiffies_stall; /* Time at which to check */ | ||
237 | /* for CPU stalls. */ | ||
238 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | ||
239 | #ifdef CONFIG_NO_HZ | ||
240 | long dynticks_completed; /* Value of completed @ snap. */ | ||
241 | #endif /* #ifdef CONFIG_NO_HZ */ | ||
242 | }; | ||
243 | 37 | ||
244 | extern void rcu_qsctr_inc(int cpu); | 38 | #ifdef CONFIG_TREE_PREEMPT_RCU |
245 | extern void rcu_bh_qsctr_inc(int cpu); | ||
246 | 39 | ||
247 | extern int rcu_pending(int cpu); | 40 | extern void __rcu_read_lock(void); |
248 | extern int rcu_needs_cpu(int cpu); | 41 | extern void __rcu_read_unlock(void); |
42 | extern void exit_rcu(void); | ||
249 | 43 | ||
250 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 44 | #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ |
251 | extern struct lockdep_map rcu_lock_map; | ||
252 | # define rcu_read_acquire() \ | ||
253 | lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_) | ||
254 | # define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_) | ||
255 | #else | ||
256 | # define rcu_read_acquire() do { } while (0) | ||
257 | # define rcu_read_release() do { } while (0) | ||
258 | #endif | ||
259 | 45 | ||
260 | static inline void __rcu_read_lock(void) | 46 | static inline void __rcu_read_lock(void) |
261 | { | 47 | { |
262 | preempt_disable(); | 48 | preempt_disable(); |
263 | __acquire(RCU); | ||
264 | rcu_read_acquire(); | ||
265 | } | 49 | } |
50 | |||
266 | static inline void __rcu_read_unlock(void) | 51 | static inline void __rcu_read_unlock(void) |
267 | { | 52 | { |
268 | rcu_read_release(); | ||
269 | __release(RCU); | ||
270 | preempt_enable(); | 53 | preempt_enable(); |
271 | } | 54 | } |
55 | |||
56 | static inline void exit_rcu(void) | ||
57 | { | ||
58 | } | ||
59 | |||
60 | #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */ | ||
61 | |||
272 | static inline void __rcu_read_lock_bh(void) | 62 | static inline void __rcu_read_lock_bh(void) |
273 | { | 63 | { |
274 | local_bh_disable(); | 64 | local_bh_disable(); |
275 | __acquire(RCU_BH); | ||
276 | rcu_read_acquire(); | ||
277 | } | 65 | } |
278 | static inline void __rcu_read_unlock_bh(void) | 66 | static inline void __rcu_read_unlock_bh(void) |
279 | { | 67 | { |
280 | rcu_read_release(); | ||
281 | __release(RCU_BH); | ||
282 | local_bh_enable(); | 68 | local_bh_enable(); |
283 | } | 69 | } |
284 | 70 | ||
285 | #define __synchronize_sched() synchronize_rcu() | 71 | #define __synchronize_sched() synchronize_rcu() |
286 | 72 | ||
287 | #define call_rcu_sched(head, func) call_rcu(head, func) | 73 | extern void call_rcu_sched(struct rcu_head *head, |
74 | void (*func)(struct rcu_head *rcu)); | ||
288 | 75 | ||
289 | static inline void rcu_init_sched(void) | 76 | static inline void synchronize_rcu_expedited(void) |
290 | { | 77 | { |
78 | synchronize_sched_expedited(); | ||
79 | } | ||
80 | |||
81 | static inline void synchronize_rcu_bh_expedited(void) | ||
82 | { | ||
83 | synchronize_sched_expedited(); | ||
291 | } | 84 | } |
292 | 85 | ||
293 | extern void __rcu_init(void); | 86 | extern void __rcu_init(void); |
@@ -296,6 +89,11 @@ extern void rcu_restart_cpu(int cpu); | |||
296 | 89 | ||
297 | extern long rcu_batches_completed(void); | 90 | extern long rcu_batches_completed(void); |
298 | extern long rcu_batches_completed_bh(void); | 91 | extern long rcu_batches_completed_bh(void); |
92 | extern long rcu_batches_completed_sched(void); | ||
93 | |||
94 | static inline void rcu_init_sched(void) | ||
95 | { | ||
96 | } | ||
299 | 97 | ||
300 | #ifdef CONFIG_NO_HZ | 98 | #ifdef CONFIG_NO_HZ |
301 | void rcu_enter_nohz(void); | 99 | void rcu_enter_nohz(void); |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 0f1ea4a66957..f3d74bd04d18 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -38,6 +38,8 @@ | |||
38 | #define SCHED_BATCH 3 | 38 | #define SCHED_BATCH 3 |
39 | /* SCHED_ISO: reserved but not implemented yet */ | 39 | /* SCHED_ISO: reserved but not implemented yet */ |
40 | #define SCHED_IDLE 5 | 40 | #define SCHED_IDLE 5 |
41 | /* Can be ORed in to make sure the process is reverted back to SCHED_NORMAL on fork */ | ||
42 | #define SCHED_RESET_ON_FORK 0x40000000 | ||
41 | 43 | ||
42 | #ifdef __KERNEL__ | 44 | #ifdef __KERNEL__ |
43 | 45 | ||
@@ -796,18 +798,19 @@ enum cpu_idle_type { | |||
796 | #define SCHED_LOAD_SCALE_FUZZ SCHED_LOAD_SCALE | 798 | #define SCHED_LOAD_SCALE_FUZZ SCHED_LOAD_SCALE |
797 | 799 | ||
798 | #ifdef CONFIG_SMP | 800 | #ifdef CONFIG_SMP |
799 | #define SD_LOAD_BALANCE 1 /* Do load balancing on this domain. */ | 801 | #define SD_LOAD_BALANCE 0x0001 /* Do load balancing on this domain. */ |
800 | #define SD_BALANCE_NEWIDLE 2 /* Balance when about to become idle */ | 802 | #define SD_BALANCE_NEWIDLE 0x0002 /* Balance when about to become idle */ |
801 | #define SD_BALANCE_EXEC 4 /* Balance on exec */ | 803 | #define SD_BALANCE_EXEC 0x0004 /* Balance on exec */ |
802 | #define SD_BALANCE_FORK 8 /* Balance on fork, clone */ | 804 | #define SD_BALANCE_FORK 0x0008 /* Balance on fork, clone */ |
803 | #define SD_WAKE_IDLE 16 /* Wake to idle CPU on task wakeup */ | 805 | #define SD_WAKE_IDLE 0x0010 /* Wake to idle CPU on task wakeup */ |
804 | #define SD_WAKE_AFFINE 32 /* Wake task to waking CPU */ | 806 | #define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */ |
805 | #define SD_WAKE_BALANCE 64 /* Perform balancing at task wakeup */ | 807 | #define SD_WAKE_BALANCE 0x0040 /* Perform balancing at task wakeup */ |
806 | #define SD_SHARE_CPUPOWER 128 /* Domain members share cpu power */ | 808 | #define SD_SHARE_CPUPOWER 0x0080 /* Domain members share cpu power */ |
807 | #define SD_POWERSAVINGS_BALANCE 256 /* Balance for power savings */ | 809 | #define SD_POWERSAVINGS_BALANCE 0x0100 /* Balance for power savings */ |
808 | #define SD_SHARE_PKG_RESOURCES 512 /* Domain members share cpu pkg resources */ | 810 | #define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */ |
809 | #define SD_SERIALIZE 1024 /* Only a single load balancing instance */ | 811 | #define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */ |
810 | #define SD_WAKE_IDLE_FAR 2048 /* Gain latency sacrificing cache hit */ | 812 | #define SD_WAKE_IDLE_FAR 0x0800 /* Gain latency sacrificing cache hit */ |
813 | #define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */ | ||
811 | 814 | ||
812 | enum powersavings_balance_level { | 815 | enum powersavings_balance_level { |
813 | POWERSAVINGS_BALANCE_NONE = 0, /* No power saving load balance */ | 816 | POWERSAVINGS_BALANCE_NONE = 0, /* No power saving load balance */ |
@@ -827,7 +830,7 @@ static inline int sd_balance_for_mc_power(void) | |||
827 | if (sched_smt_power_savings) | 830 | if (sched_smt_power_savings) |
828 | return SD_POWERSAVINGS_BALANCE; | 831 | return SD_POWERSAVINGS_BALANCE; |
829 | 832 | ||
830 | return 0; | 833 | return SD_PREFER_SIBLING; |
831 | } | 834 | } |
832 | 835 | ||
833 | static inline int sd_balance_for_package_power(void) | 836 | static inline int sd_balance_for_package_power(void) |
@@ -835,7 +838,7 @@ static inline int sd_balance_for_package_power(void) | |||
835 | if (sched_mc_power_savings | sched_smt_power_savings) | 838 | if (sched_mc_power_savings | sched_smt_power_savings) |
836 | return SD_POWERSAVINGS_BALANCE; | 839 | return SD_POWERSAVINGS_BALANCE; |
837 | 840 | ||
838 | return 0; | 841 | return SD_PREFER_SIBLING; |
839 | } | 842 | } |
840 | 843 | ||
841 | /* | 844 | /* |
@@ -857,15 +860,9 @@ struct sched_group { | |||
857 | 860 | ||
858 | /* | 861 | /* |
859 | * CPU power of this group, SCHED_LOAD_SCALE being max power for a | 862 | * CPU power of this group, SCHED_LOAD_SCALE being max power for a |
860 | * single CPU. This is read only (except for setup, hotplug CPU). | 863 | * single CPU. |
861 | * Note : Never change cpu_power without recompute its reciprocal | ||
862 | */ | 864 | */ |
863 | unsigned int __cpu_power; | 865 | unsigned int cpu_power; |
864 | /* | ||
865 | * reciprocal value of cpu_power to avoid expensive divides | ||
866 | * (see include/linux/reciprocal_div.h) | ||
867 | */ | ||
868 | u32 reciprocal_cpu_power; | ||
869 | 866 | ||
870 | /* | 867 | /* |
871 | * The CPUs this group covers. | 868 | * The CPUs this group covers. |
@@ -918,6 +915,7 @@ struct sched_domain { | |||
918 | unsigned int newidle_idx; | 915 | unsigned int newidle_idx; |
919 | unsigned int wake_idx; | 916 | unsigned int wake_idx; |
920 | unsigned int forkexec_idx; | 917 | unsigned int forkexec_idx; |
918 | unsigned int smt_gain; | ||
921 | int flags; /* See SD_* */ | 919 | int flags; /* See SD_* */ |
922 | enum sched_domain_level level; | 920 | enum sched_domain_level level; |
923 | 921 | ||
@@ -1045,7 +1043,6 @@ struct sched_class { | |||
1045 | struct rq *busiest, struct sched_domain *sd, | 1043 | struct rq *busiest, struct sched_domain *sd, |
1046 | enum cpu_idle_type idle); | 1044 | enum cpu_idle_type idle); |
1047 | void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); | 1045 | void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); |
1048 | int (*needs_post_schedule) (struct rq *this_rq); | ||
1049 | void (*post_schedule) (struct rq *this_rq); | 1046 | void (*post_schedule) (struct rq *this_rq); |
1050 | void (*task_wake_up) (struct rq *this_rq, struct task_struct *task); | 1047 | void (*task_wake_up) (struct rq *this_rq, struct task_struct *task); |
1051 | 1048 | ||
@@ -1110,6 +1107,8 @@ struct sched_entity { | |||
1110 | u64 wait_max; | 1107 | u64 wait_max; |
1111 | u64 wait_count; | 1108 | u64 wait_count; |
1112 | u64 wait_sum; | 1109 | u64 wait_sum; |
1110 | u64 iowait_count; | ||
1111 | u64 iowait_sum; | ||
1113 | 1112 | ||
1114 | u64 sleep_start; | 1113 | u64 sleep_start; |
1115 | u64 sleep_max; | 1114 | u64 sleep_max; |
@@ -1163,6 +1162,8 @@ struct sched_rt_entity { | |||
1163 | #endif | 1162 | #endif |
1164 | }; | 1163 | }; |
1165 | 1164 | ||
1165 | struct rcu_node; | ||
1166 | |||
1166 | struct task_struct { | 1167 | struct task_struct { |
1167 | volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ | 1168 | volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ |
1168 | void *stack; | 1169 | void *stack; |
@@ -1206,10 +1207,12 @@ struct task_struct { | |||
1206 | unsigned int policy; | 1207 | unsigned int policy; |
1207 | cpumask_t cpus_allowed; | 1208 | cpumask_t cpus_allowed; |
1208 | 1209 | ||
1209 | #ifdef CONFIG_PREEMPT_RCU | 1210 | #ifdef CONFIG_TREE_PREEMPT_RCU |
1210 | int rcu_read_lock_nesting; | 1211 | int rcu_read_lock_nesting; |
1211 | int rcu_flipctr_idx; | 1212 | char rcu_read_unlock_special; |
1212 | #endif /* #ifdef CONFIG_PREEMPT_RCU */ | 1213 | struct rcu_node *rcu_blocked_node; |
1214 | struct list_head rcu_node_entry; | ||
1215 | #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ | ||
1213 | 1216 | ||
1214 | #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) | 1217 | #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) |
1215 | struct sched_info sched_info; | 1218 | struct sched_info sched_info; |
@@ -1230,11 +1233,19 @@ struct task_struct { | |||
1230 | unsigned did_exec:1; | 1233 | unsigned did_exec:1; |
1231 | unsigned in_execve:1; /* Tell the LSMs that the process is doing an | 1234 | unsigned in_execve:1; /* Tell the LSMs that the process is doing an |
1232 | * execve */ | 1235 | * execve */ |
1236 | unsigned in_iowait:1; | ||
1237 | |||
1238 | |||
1239 | /* Revert to default priority/policy when forking */ | ||
1240 | unsigned sched_reset_on_fork:1; | ||
1241 | |||
1233 | pid_t pid; | 1242 | pid_t pid; |
1234 | pid_t tgid; | 1243 | pid_t tgid; |
1235 | 1244 | ||
1245 | #ifdef CONFIG_CC_STACKPROTECTOR | ||
1236 | /* Canary value for the -fstack-protector gcc feature */ | 1246 | /* Canary value for the -fstack-protector gcc feature */ |
1237 | unsigned long stack_canary; | 1247 | unsigned long stack_canary; |
1248 | #endif | ||
1238 | 1249 | ||
1239 | /* | 1250 | /* |
1240 | * pointers to (original) parent process, youngest child, younger sibling, | 1251 | * pointers to (original) parent process, youngest child, younger sibling, |
@@ -1292,6 +1303,7 @@ struct task_struct { | |||
1292 | struct mutex cred_guard_mutex; /* guard against foreign influences on | 1303 | struct mutex cred_guard_mutex; /* guard against foreign influences on |
1293 | * credential calculations | 1304 | * credential calculations |
1294 | * (notably. ptrace) */ | 1305 | * (notably. ptrace) */ |
1306 | struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */ | ||
1295 | 1307 | ||
1296 | char comm[TASK_COMM_LEN]; /* executable name excluding path | 1308 | char comm[TASK_COMM_LEN]; /* executable name excluding path |
1297 | - access with [gs]et_task_comm (which lock | 1309 | - access with [gs]et_task_comm (which lock |
@@ -1724,6 +1736,28 @@ extern cputime_t task_gtime(struct task_struct *p); | |||
1724 | #define tsk_used_math(p) ((p)->flags & PF_USED_MATH) | 1736 | #define tsk_used_math(p) ((p)->flags & PF_USED_MATH) |
1725 | #define used_math() tsk_used_math(current) | 1737 | #define used_math() tsk_used_math(current) |
1726 | 1738 | ||
1739 | #ifdef CONFIG_TREE_PREEMPT_RCU | ||
1740 | |||
1741 | #define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */ | ||
1742 | #define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */ | ||
1743 | #define RCU_READ_UNLOCK_GOT_QS (1 << 2) /* CPU has responded to RCU core. */ | ||
1744 | |||
1745 | static inline void rcu_copy_process(struct task_struct *p) | ||
1746 | { | ||
1747 | p->rcu_read_lock_nesting = 0; | ||
1748 | p->rcu_read_unlock_special = 0; | ||
1749 | p->rcu_blocked_node = NULL; | ||
1750 | INIT_LIST_HEAD(&p->rcu_node_entry); | ||
1751 | } | ||
1752 | |||
1753 | #else | ||
1754 | |||
1755 | static inline void rcu_copy_process(struct task_struct *p) | ||
1756 | { | ||
1757 | } | ||
1758 | |||
1759 | #endif | ||
1760 | |||
1727 | #ifdef CONFIG_SMP | 1761 | #ifdef CONFIG_SMP |
1728 | extern int set_cpus_allowed_ptr(struct task_struct *p, | 1762 | extern int set_cpus_allowed_ptr(struct task_struct *p, |
1729 | const struct cpumask *new_mask); | 1763 | const struct cpumask *new_mask); |
@@ -1813,11 +1847,12 @@ extern unsigned int sysctl_sched_min_granularity; | |||
1813 | extern unsigned int sysctl_sched_wakeup_granularity; | 1847 | extern unsigned int sysctl_sched_wakeup_granularity; |
1814 | extern unsigned int sysctl_sched_shares_ratelimit; | 1848 | extern unsigned int sysctl_sched_shares_ratelimit; |
1815 | extern unsigned int sysctl_sched_shares_thresh; | 1849 | extern unsigned int sysctl_sched_shares_thresh; |
1816 | #ifdef CONFIG_SCHED_DEBUG | ||
1817 | extern unsigned int sysctl_sched_child_runs_first; | 1850 | extern unsigned int sysctl_sched_child_runs_first; |
1851 | #ifdef CONFIG_SCHED_DEBUG | ||
1818 | extern unsigned int sysctl_sched_features; | 1852 | extern unsigned int sysctl_sched_features; |
1819 | extern unsigned int sysctl_sched_migration_cost; | 1853 | extern unsigned int sysctl_sched_migration_cost; |
1820 | extern unsigned int sysctl_sched_nr_migrate; | 1854 | extern unsigned int sysctl_sched_nr_migrate; |
1855 | extern unsigned int sysctl_sched_time_avg; | ||
1821 | extern unsigned int sysctl_timer_migration; | 1856 | extern unsigned int sysctl_timer_migration; |
1822 | 1857 | ||
1823 | int sched_nr_latency_handler(struct ctl_table *table, int write, | 1858 | int sched_nr_latency_handler(struct ctl_table *table, int write, |
@@ -2077,7 +2112,7 @@ static inline unsigned long wait_task_inactive(struct task_struct *p, | |||
2077 | #define for_each_process(p) \ | 2112 | #define for_each_process(p) \ |
2078 | for (p = &init_task ; (p = next_task(p)) != &init_task ; ) | 2113 | for (p = &init_task ; (p = next_task(p)) != &init_task ; ) |
2079 | 2114 | ||
2080 | extern bool is_single_threaded(struct task_struct *); | 2115 | extern bool current_is_single_threaded(void); |
2081 | 2116 | ||
2082 | /* | 2117 | /* |
2083 | * Careful: do_each_thread/while_each_thread is a double loop so | 2118 | * Careful: do_each_thread/while_each_thread is a double loop so |
@@ -2281,23 +2316,31 @@ static inline int need_resched(void) | |||
2281 | * cond_resched_softirq() will enable bhs before scheduling. | 2316 | * cond_resched_softirq() will enable bhs before scheduling. |
2282 | */ | 2317 | */ |
2283 | extern int _cond_resched(void); | 2318 | extern int _cond_resched(void); |
2284 | #ifdef CONFIG_PREEMPT_BKL | 2319 | |
2285 | static inline int cond_resched(void) | 2320 | #define cond_resched() ({ \ |
2286 | { | 2321 | __might_sleep(__FILE__, __LINE__, 0); \ |
2287 | return 0; | 2322 | _cond_resched(); \ |
2288 | } | 2323 | }) |
2324 | |||
2325 | extern int __cond_resched_lock(spinlock_t *lock); | ||
2326 | |||
2327 | #ifdef CONFIG_PREEMPT | ||
2328 | #define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET | ||
2289 | #else | 2329 | #else |
2290 | static inline int cond_resched(void) | 2330 | #define PREEMPT_LOCK_OFFSET 0 |
2291 | { | ||
2292 | return _cond_resched(); | ||
2293 | } | ||
2294 | #endif | 2331 | #endif |
2295 | extern int cond_resched_lock(spinlock_t * lock); | 2332 | |
2296 | extern int cond_resched_softirq(void); | 2333 | #define cond_resched_lock(lock) ({ \ |
2297 | static inline int cond_resched_bkl(void) | 2334 | __might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \ |
2298 | { | 2335 | __cond_resched_lock(lock); \ |
2299 | return _cond_resched(); | 2336 | }) |
2300 | } | 2337 | |
2338 | extern int __cond_resched_softirq(void); | ||
2339 | |||
2340 | #define cond_resched_softirq() ({ \ | ||
2341 | __might_sleep(__FILE__, __LINE__, SOFTIRQ_OFFSET); \ | ||
2342 | __cond_resched_softirq(); \ | ||
2343 | }) | ||
2301 | 2344 | ||
2302 | /* | 2345 | /* |
2303 | * Does a critical section need to be broken due to another | 2346 | * Does a critical section need to be broken due to another |
diff --git a/include/linux/security.h b/include/linux/security.h index 1f16eea2017b..d050b66ab9ef 100644 --- a/include/linux/security.h +++ b/include/linux/security.h | |||
@@ -53,7 +53,7 @@ struct audit_krule; | |||
53 | extern int cap_capable(struct task_struct *tsk, const struct cred *cred, | 53 | extern int cap_capable(struct task_struct *tsk, const struct cred *cred, |
54 | int cap, int audit); | 54 | int cap, int audit); |
55 | extern int cap_settime(struct timespec *ts, struct timezone *tz); | 55 | extern int cap_settime(struct timespec *ts, struct timezone *tz); |
56 | extern int cap_ptrace_may_access(struct task_struct *child, unsigned int mode); | 56 | extern int cap_ptrace_access_check(struct task_struct *child, unsigned int mode); |
57 | extern int cap_ptrace_traceme(struct task_struct *parent); | 57 | extern int cap_ptrace_traceme(struct task_struct *parent); |
58 | extern int cap_capget(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted); | 58 | extern int cap_capget(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted); |
59 | extern int cap_capset(struct cred *new, const struct cred *old, | 59 | extern int cap_capset(struct cred *new, const struct cred *old, |
@@ -653,6 +653,11 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) | |||
653 | * manual page for definitions of the @clone_flags. | 653 | * manual page for definitions of the @clone_flags. |
654 | * @clone_flags contains the flags indicating what should be shared. | 654 | * @clone_flags contains the flags indicating what should be shared. |
655 | * Return 0 if permission is granted. | 655 | * Return 0 if permission is granted. |
656 | * @cred_alloc_blank: | ||
657 | * @cred points to the credentials. | ||
658 | * @gfp indicates the atomicity of any memory allocations. | ||
659 | * Only allocate sufficient memory and attach to @cred such that | ||
660 | * cred_transfer() will not get ENOMEM. | ||
656 | * @cred_free: | 661 | * @cred_free: |
657 | * @cred points to the credentials. | 662 | * @cred points to the credentials. |
658 | * Deallocate and clear the cred->security field in a set of credentials. | 663 | * Deallocate and clear the cred->security field in a set of credentials. |
@@ -665,6 +670,10 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) | |||
665 | * @new points to the new credentials. | 670 | * @new points to the new credentials. |
666 | * @old points to the original credentials. | 671 | * @old points to the original credentials. |
667 | * Install a new set of credentials. | 672 | * Install a new set of credentials. |
673 | * @cred_transfer: | ||
674 | * @new points to the new credentials. | ||
675 | * @old points to the original credentials. | ||
676 | * Transfer data from original creds to new creds | ||
668 | * @kernel_act_as: | 677 | * @kernel_act_as: |
669 | * Set the credentials for a kernel service to act as (subjective context). | 678 | * Set the credentials for a kernel service to act as (subjective context). |
670 | * @new points to the credentials to be modified. | 679 | * @new points to the credentials to be modified. |
@@ -678,6 +687,10 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) | |||
678 | * @inode points to the inode to use as a reference. | 687 | * @inode points to the inode to use as a reference. |
679 | * The current task must be the one that nominated @inode. | 688 | * The current task must be the one that nominated @inode. |
680 | * Return 0 if successful. | 689 | * Return 0 if successful. |
690 | * @kernel_module_request: | ||
691 | * Ability to trigger the kernel to automatically upcall to userspace for | ||
692 | * userspace to load a kernel module with the given name. | ||
693 | * Return 0 if successful. | ||
681 | * @task_setuid: | 694 | * @task_setuid: |
682 | * Check permission before setting one or more of the user identity | 695 | * Check permission before setting one or more of the user identity |
683 | * attributes of the current process. The @flags parameter indicates | 696 | * attributes of the current process. The @flags parameter indicates |
@@ -994,6 +1007,17 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) | |||
994 | * Sets the connection's peersid to the secmark on skb. | 1007 | * Sets the connection's peersid to the secmark on skb. |
995 | * @req_classify_flow: | 1008 | * @req_classify_flow: |
996 | * Sets the flow's sid to the openreq sid. | 1009 | * Sets the flow's sid to the openreq sid. |
1010 | * @tun_dev_create: | ||
1011 | * Check permissions prior to creating a new TUN device. | ||
1012 | * @tun_dev_post_create: | ||
1013 | * This hook allows a module to update or allocate a per-socket security | ||
1014 | * structure. | ||
1015 | * @sk contains the newly created sock structure. | ||
1016 | * @tun_dev_attach: | ||
1017 | * Check permissions prior to attaching to a persistent TUN device. This | ||
1018 | * hook can also be used by the module to update any security state | ||
1019 | * associated with the TUN device's sock structure. | ||
1020 | * @sk contains the existing sock structure. | ||
997 | * | 1021 | * |
998 | * Security hooks for XFRM operations. | 1022 | * Security hooks for XFRM operations. |
999 | * | 1023 | * |
@@ -1088,6 +1112,13 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) | |||
1088 | * Return the length of the string (including terminating NUL) or -ve if | 1112 | * Return the length of the string (including terminating NUL) or -ve if |
1089 | * an error. | 1113 | * an error. |
1090 | * May also return 0 (and a NULL buffer pointer) if there is no label. | 1114 | * May also return 0 (and a NULL buffer pointer) if there is no label. |
1115 | * @key_session_to_parent: | ||
1116 | * Forcibly assign the session keyring from a process to its parent | ||
1117 | * process. | ||
1118 | * @cred: Pointer to process's credentials | ||
1119 | * @parent_cred: Pointer to parent process's credentials | ||
1120 | * @keyring: Proposed new session keyring | ||
1121 | * Return 0 if permission is granted, -ve error otherwise. | ||
1091 | * | 1122 | * |
1092 | * Security hooks affecting all System V IPC operations. | 1123 | * Security hooks affecting all System V IPC operations. |
1093 | * | 1124 | * |
@@ -1229,7 +1260,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) | |||
1229 | * @alter contains the flag indicating whether changes are to be made. | 1260 | * @alter contains the flag indicating whether changes are to be made. |
1230 | * Return 0 if permission is granted. | 1261 | * Return 0 if permission is granted. |
1231 | * | 1262 | * |
1232 | * @ptrace_may_access: | 1263 | * @ptrace_access_check: |
1233 | * Check permission before allowing the current process to trace the | 1264 | * Check permission before allowing the current process to trace the |
1234 | * @child process. | 1265 | * @child process. |
1235 | * Security modules may also want to perform a process tracing check | 1266 | * Security modules may also want to perform a process tracing check |
@@ -1244,7 +1275,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) | |||
1244 | * Check that the @parent process has sufficient permission to trace the | 1275 | * Check that the @parent process has sufficient permission to trace the |
1245 | * current process before allowing the current process to present itself | 1276 | * current process before allowing the current process to present itself |
1246 | * to the @parent process for tracing. | 1277 | * to the @parent process for tracing. |
1247 | * The parent process will still have to undergo the ptrace_may_access | 1278 | * The parent process will still have to undergo the ptrace_access_check |
1248 | * checks before it is allowed to trace this one. | 1279 | * checks before it is allowed to trace this one. |
1249 | * @parent contains the task_struct structure for debugger process. | 1280 | * @parent contains the task_struct structure for debugger process. |
1250 | * Return 0 if permission is granted. | 1281 | * Return 0 if permission is granted. |
@@ -1351,12 +1382,47 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) | |||
1351 | * audit_rule_init. | 1382 | * audit_rule_init. |
1352 | * @rule contains the allocated rule | 1383 | * @rule contains the allocated rule |
1353 | * | 1384 | * |
1385 | * @inode_notifysecctx: | ||
1386 | * Notify the security module of what the security context of an inode | ||
1387 | * should be. Initializes the incore security context managed by the | ||
1388 | * security module for this inode. Example usage: NFS client invokes | ||
1389 | * this hook to initialize the security context in its incore inode to the | ||
1390 | * value provided by the server for the file when the server returned the | ||
1391 | * file's attributes to the client. | ||
1392 | * | ||
1393 | * Must be called with inode->i_mutex locked. | ||
1394 | * | ||
1395 | * @inode we wish to set the security context of. | ||
1396 | * @ctx contains the string which we wish to set in the inode. | ||
1397 | * @ctxlen contains the length of @ctx. | ||
1398 | * | ||
1399 | * @inode_setsecctx: | ||
1400 | * Change the security context of an inode. Updates the | ||
1401 | * incore security context managed by the security module and invokes the | ||
1402 | * fs code as needed (via __vfs_setxattr_noperm) to update any backing | ||
1403 | * xattrs that represent the context. Example usage: NFS server invokes | ||
1404 | * this hook to change the security context in its incore inode and on the | ||
1405 | * backing filesystem to a value provided by the client on a SETATTR | ||
1406 | * operation. | ||
1407 | * | ||
1408 | * Must be called with inode->i_mutex locked. | ||
1409 | * | ||
1410 | * @dentry contains the inode we wish to set the security context of. | ||
1411 | * @ctx contains the string which we wish to set in the inode. | ||
1412 | * @ctxlen contains the length of @ctx. | ||
1413 | * | ||
1414 | * @inode_getsecctx: | ||
1415 | * Returns a string containing all relavent security context information | ||
1416 | * | ||
1417 | * @inode we wish to set the security context of. | ||
1418 | * @ctx is a pointer in which to place the allocated security context. | ||
1419 | * @ctxlen points to the place to put the length of @ctx. | ||
1354 | * This is the main security structure. | 1420 | * This is the main security structure. |
1355 | */ | 1421 | */ |
1356 | struct security_operations { | 1422 | struct security_operations { |
1357 | char name[SECURITY_NAME_MAX + 1]; | 1423 | char name[SECURITY_NAME_MAX + 1]; |
1358 | 1424 | ||
1359 | int (*ptrace_may_access) (struct task_struct *child, unsigned int mode); | 1425 | int (*ptrace_access_check) (struct task_struct *child, unsigned int mode); |
1360 | int (*ptrace_traceme) (struct task_struct *parent); | 1426 | int (*ptrace_traceme) (struct task_struct *parent); |
1361 | int (*capget) (struct task_struct *target, | 1427 | int (*capget) (struct task_struct *target, |
1362 | kernel_cap_t *effective, | 1428 | kernel_cap_t *effective, |
@@ -1483,12 +1549,15 @@ struct security_operations { | |||
1483 | int (*dentry_open) (struct file *file, const struct cred *cred); | 1549 | int (*dentry_open) (struct file *file, const struct cred *cred); |
1484 | 1550 | ||
1485 | int (*task_create) (unsigned long clone_flags); | 1551 | int (*task_create) (unsigned long clone_flags); |
1552 | int (*cred_alloc_blank) (struct cred *cred, gfp_t gfp); | ||
1486 | void (*cred_free) (struct cred *cred); | 1553 | void (*cred_free) (struct cred *cred); |
1487 | int (*cred_prepare)(struct cred *new, const struct cred *old, | 1554 | int (*cred_prepare)(struct cred *new, const struct cred *old, |
1488 | gfp_t gfp); | 1555 | gfp_t gfp); |
1489 | void (*cred_commit)(struct cred *new, const struct cred *old); | 1556 | void (*cred_commit)(struct cred *new, const struct cred *old); |
1557 | void (*cred_transfer)(struct cred *new, const struct cred *old); | ||
1490 | int (*kernel_act_as)(struct cred *new, u32 secid); | 1558 | int (*kernel_act_as)(struct cred *new, u32 secid); |
1491 | int (*kernel_create_files_as)(struct cred *new, struct inode *inode); | 1559 | int (*kernel_create_files_as)(struct cred *new, struct inode *inode); |
1560 | int (*kernel_module_request)(void); | ||
1492 | int (*task_setuid) (uid_t id0, uid_t id1, uid_t id2, int flags); | 1561 | int (*task_setuid) (uid_t id0, uid_t id1, uid_t id2, int flags); |
1493 | int (*task_fix_setuid) (struct cred *new, const struct cred *old, | 1562 | int (*task_fix_setuid) (struct cred *new, const struct cred *old, |
1494 | int flags); | 1563 | int flags); |
@@ -1556,6 +1625,10 @@ struct security_operations { | |||
1556 | int (*secctx_to_secid) (const char *secdata, u32 seclen, u32 *secid); | 1625 | int (*secctx_to_secid) (const char *secdata, u32 seclen, u32 *secid); |
1557 | void (*release_secctx) (char *secdata, u32 seclen); | 1626 | void (*release_secctx) (char *secdata, u32 seclen); |
1558 | 1627 | ||
1628 | int (*inode_notifysecctx)(struct inode *inode, void *ctx, u32 ctxlen); | ||
1629 | int (*inode_setsecctx)(struct dentry *dentry, void *ctx, u32 ctxlen); | ||
1630 | int (*inode_getsecctx)(struct inode *inode, void **ctx, u32 *ctxlen); | ||
1631 | |||
1559 | #ifdef CONFIG_SECURITY_NETWORK | 1632 | #ifdef CONFIG_SECURITY_NETWORK |
1560 | int (*unix_stream_connect) (struct socket *sock, | 1633 | int (*unix_stream_connect) (struct socket *sock, |
1561 | struct socket *other, struct sock *newsk); | 1634 | struct socket *other, struct sock *newsk); |
@@ -1592,6 +1665,9 @@ struct security_operations { | |||
1592 | void (*inet_csk_clone) (struct sock *newsk, const struct request_sock *req); | 1665 | void (*inet_csk_clone) (struct sock *newsk, const struct request_sock *req); |
1593 | void (*inet_conn_established) (struct sock *sk, struct sk_buff *skb); | 1666 | void (*inet_conn_established) (struct sock *sk, struct sk_buff *skb); |
1594 | void (*req_classify_flow) (const struct request_sock *req, struct flowi *fl); | 1667 | void (*req_classify_flow) (const struct request_sock *req, struct flowi *fl); |
1668 | int (*tun_dev_create)(void); | ||
1669 | void (*tun_dev_post_create)(struct sock *sk); | ||
1670 | int (*tun_dev_attach)(struct sock *sk); | ||
1595 | #endif /* CONFIG_SECURITY_NETWORK */ | 1671 | #endif /* CONFIG_SECURITY_NETWORK */ |
1596 | 1672 | ||
1597 | #ifdef CONFIG_SECURITY_NETWORK_XFRM | 1673 | #ifdef CONFIG_SECURITY_NETWORK_XFRM |
@@ -1620,6 +1696,9 @@ struct security_operations { | |||
1620 | const struct cred *cred, | 1696 | const struct cred *cred, |
1621 | key_perm_t perm); | 1697 | key_perm_t perm); |
1622 | int (*key_getsecurity)(struct key *key, char **_buffer); | 1698 | int (*key_getsecurity)(struct key *key, char **_buffer); |
1699 | int (*key_session_to_parent)(const struct cred *cred, | ||
1700 | const struct cred *parent_cred, | ||
1701 | struct key *key); | ||
1623 | #endif /* CONFIG_KEYS */ | 1702 | #endif /* CONFIG_KEYS */ |
1624 | 1703 | ||
1625 | #ifdef CONFIG_AUDIT | 1704 | #ifdef CONFIG_AUDIT |
@@ -1637,7 +1716,7 @@ extern int security_module_enable(struct security_operations *ops); | |||
1637 | extern int register_security(struct security_operations *ops); | 1716 | extern int register_security(struct security_operations *ops); |
1638 | 1717 | ||
1639 | /* Security operations */ | 1718 | /* Security operations */ |
1640 | int security_ptrace_may_access(struct task_struct *child, unsigned int mode); | 1719 | int security_ptrace_access_check(struct task_struct *child, unsigned int mode); |
1641 | int security_ptrace_traceme(struct task_struct *parent); | 1720 | int security_ptrace_traceme(struct task_struct *parent); |
1642 | int security_capget(struct task_struct *target, | 1721 | int security_capget(struct task_struct *target, |
1643 | kernel_cap_t *effective, | 1722 | kernel_cap_t *effective, |
@@ -1736,11 +1815,14 @@ int security_file_send_sigiotask(struct task_struct *tsk, | |||
1736 | int security_file_receive(struct file *file); | 1815 | int security_file_receive(struct file *file); |
1737 | int security_dentry_open(struct file *file, const struct cred *cred); | 1816 | int security_dentry_open(struct file *file, const struct cred *cred); |
1738 | int security_task_create(unsigned long clone_flags); | 1817 | int security_task_create(unsigned long clone_flags); |
1818 | int security_cred_alloc_blank(struct cred *cred, gfp_t gfp); | ||
1739 | void security_cred_free(struct cred *cred); | 1819 | void security_cred_free(struct cred *cred); |
1740 | int security_prepare_creds(struct cred *new, const struct cred *old, gfp_t gfp); | 1820 | int security_prepare_creds(struct cred *new, const struct cred *old, gfp_t gfp); |
1741 | void security_commit_creds(struct cred *new, const struct cred *old); | 1821 | void security_commit_creds(struct cred *new, const struct cred *old); |
1822 | void security_transfer_creds(struct cred *new, const struct cred *old); | ||
1742 | int security_kernel_act_as(struct cred *new, u32 secid); | 1823 | int security_kernel_act_as(struct cred *new, u32 secid); |
1743 | int security_kernel_create_files_as(struct cred *new, struct inode *inode); | 1824 | int security_kernel_create_files_as(struct cred *new, struct inode *inode); |
1825 | int security_kernel_module_request(void); | ||
1744 | int security_task_setuid(uid_t id0, uid_t id1, uid_t id2, int flags); | 1826 | int security_task_setuid(uid_t id0, uid_t id1, uid_t id2, int flags); |
1745 | int security_task_fix_setuid(struct cred *new, const struct cred *old, | 1827 | int security_task_fix_setuid(struct cred *new, const struct cred *old, |
1746 | int flags); | 1828 | int flags); |
@@ -1796,6 +1878,9 @@ int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen); | |||
1796 | int security_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid); | 1878 | int security_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid); |
1797 | void security_release_secctx(char *secdata, u32 seclen); | 1879 | void security_release_secctx(char *secdata, u32 seclen); |
1798 | 1880 | ||
1881 | int security_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen); | ||
1882 | int security_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen); | ||
1883 | int security_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen); | ||
1799 | #else /* CONFIG_SECURITY */ | 1884 | #else /* CONFIG_SECURITY */ |
1800 | struct security_mnt_opts { | 1885 | struct security_mnt_opts { |
1801 | }; | 1886 | }; |
@@ -1818,10 +1903,10 @@ static inline int security_init(void) | |||
1818 | return 0; | 1903 | return 0; |
1819 | } | 1904 | } |
1820 | 1905 | ||
1821 | static inline int security_ptrace_may_access(struct task_struct *child, | 1906 | static inline int security_ptrace_access_check(struct task_struct *child, |
1822 | unsigned int mode) | 1907 | unsigned int mode) |
1823 | { | 1908 | { |
1824 | return cap_ptrace_may_access(child, mode); | 1909 | return cap_ptrace_access_check(child, mode); |
1825 | } | 1910 | } |
1826 | 1911 | ||
1827 | static inline int security_ptrace_traceme(struct task_struct *parent) | 1912 | static inline int security_ptrace_traceme(struct task_struct *parent) |
@@ -2266,6 +2351,11 @@ static inline int security_task_create(unsigned long clone_flags) | |||
2266 | return 0; | 2351 | return 0; |
2267 | } | 2352 | } |
2268 | 2353 | ||
2354 | static inline int security_cred_alloc_blank(struct cred *cred, gfp_t gfp) | ||
2355 | { | ||
2356 | return 0; | ||
2357 | } | ||
2358 | |||
2269 | static inline void security_cred_free(struct cred *cred) | 2359 | static inline void security_cred_free(struct cred *cred) |
2270 | { } | 2360 | { } |
2271 | 2361 | ||
@@ -2281,6 +2371,11 @@ static inline void security_commit_creds(struct cred *new, | |||
2281 | { | 2371 | { |
2282 | } | 2372 | } |
2283 | 2373 | ||
2374 | static inline void security_transfer_creds(struct cred *new, | ||
2375 | const struct cred *old) | ||
2376 | { | ||
2377 | } | ||
2378 | |||
2284 | static inline int security_kernel_act_as(struct cred *cred, u32 secid) | 2379 | static inline int security_kernel_act_as(struct cred *cred, u32 secid) |
2285 | { | 2380 | { |
2286 | return 0; | 2381 | return 0; |
@@ -2292,6 +2387,11 @@ static inline int security_kernel_create_files_as(struct cred *cred, | |||
2292 | return 0; | 2387 | return 0; |
2293 | } | 2388 | } |
2294 | 2389 | ||
2390 | static inline int security_kernel_module_request(void) | ||
2391 | { | ||
2392 | return 0; | ||
2393 | } | ||
2394 | |||
2295 | static inline int security_task_setuid(uid_t id0, uid_t id1, uid_t id2, | 2395 | static inline int security_task_setuid(uid_t id0, uid_t id1, uid_t id2, |
2296 | int flags) | 2396 | int flags) |
2297 | { | 2397 | { |
@@ -2537,6 +2637,19 @@ static inline int security_secctx_to_secid(const char *secdata, | |||
2537 | static inline void security_release_secctx(char *secdata, u32 seclen) | 2637 | static inline void security_release_secctx(char *secdata, u32 seclen) |
2538 | { | 2638 | { |
2539 | } | 2639 | } |
2640 | |||
2641 | static inline int security_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen) | ||
2642 | { | ||
2643 | return -EOPNOTSUPP; | ||
2644 | } | ||
2645 | static inline int security_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen) | ||
2646 | { | ||
2647 | return -EOPNOTSUPP; | ||
2648 | } | ||
2649 | static inline int security_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen) | ||
2650 | { | ||
2651 | return -EOPNOTSUPP; | ||
2652 | } | ||
2540 | #endif /* CONFIG_SECURITY */ | 2653 | #endif /* CONFIG_SECURITY */ |
2541 | 2654 | ||
2542 | #ifdef CONFIG_SECURITY_NETWORK | 2655 | #ifdef CONFIG_SECURITY_NETWORK |
@@ -2575,6 +2688,9 @@ void security_inet_csk_clone(struct sock *newsk, | |||
2575 | const struct request_sock *req); | 2688 | const struct request_sock *req); |
2576 | void security_inet_conn_established(struct sock *sk, | 2689 | void security_inet_conn_established(struct sock *sk, |
2577 | struct sk_buff *skb); | 2690 | struct sk_buff *skb); |
2691 | int security_tun_dev_create(void); | ||
2692 | void security_tun_dev_post_create(struct sock *sk); | ||
2693 | int security_tun_dev_attach(struct sock *sk); | ||
2578 | 2694 | ||
2579 | #else /* CONFIG_SECURITY_NETWORK */ | 2695 | #else /* CONFIG_SECURITY_NETWORK */ |
2580 | static inline int security_unix_stream_connect(struct socket *sock, | 2696 | static inline int security_unix_stream_connect(struct socket *sock, |
@@ -2725,6 +2841,20 @@ static inline void security_inet_conn_established(struct sock *sk, | |||
2725 | struct sk_buff *skb) | 2841 | struct sk_buff *skb) |
2726 | { | 2842 | { |
2727 | } | 2843 | } |
2844 | |||
2845 | static inline int security_tun_dev_create(void) | ||
2846 | { | ||
2847 | return 0; | ||
2848 | } | ||
2849 | |||
2850 | static inline void security_tun_dev_post_create(struct sock *sk) | ||
2851 | { | ||
2852 | } | ||
2853 | |||
2854 | static inline int security_tun_dev_attach(struct sock *sk) | ||
2855 | { | ||
2856 | return 0; | ||
2857 | } | ||
2728 | #endif /* CONFIG_SECURITY_NETWORK */ | 2858 | #endif /* CONFIG_SECURITY_NETWORK */ |
2729 | 2859 | ||
2730 | #ifdef CONFIG_SECURITY_NETWORK_XFRM | 2860 | #ifdef CONFIG_SECURITY_NETWORK_XFRM |
@@ -2881,6 +3011,9 @@ void security_key_free(struct key *key); | |||
2881 | int security_key_permission(key_ref_t key_ref, | 3011 | int security_key_permission(key_ref_t key_ref, |
2882 | const struct cred *cred, key_perm_t perm); | 3012 | const struct cred *cred, key_perm_t perm); |
2883 | int security_key_getsecurity(struct key *key, char **_buffer); | 3013 | int security_key_getsecurity(struct key *key, char **_buffer); |
3014 | int security_key_session_to_parent(const struct cred *cred, | ||
3015 | const struct cred *parent_cred, | ||
3016 | struct key *key); | ||
2884 | 3017 | ||
2885 | #else | 3018 | #else |
2886 | 3019 | ||
@@ -2908,6 +3041,13 @@ static inline int security_key_getsecurity(struct key *key, char **_buffer) | |||
2908 | return 0; | 3041 | return 0; |
2909 | } | 3042 | } |
2910 | 3043 | ||
3044 | static inline int security_key_session_to_parent(const struct cred *cred, | ||
3045 | const struct cred *parent_cred, | ||
3046 | struct key *key) | ||
3047 | { | ||
3048 | return 0; | ||
3049 | } | ||
3050 | |||
2911 | #endif | 3051 | #endif |
2912 | #endif /* CONFIG_KEYS */ | 3052 | #endif /* CONFIG_KEYS */ |
2913 | 3053 | ||
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index abff6c9b413c..6d3f2f449ead 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h | |||
@@ -39,7 +39,7 @@ static inline struct shmem_inode_info *SHMEM_I(struct inode *inode) | |||
39 | } | 39 | } |
40 | 40 | ||
41 | #ifdef CONFIG_TMPFS_POSIX_ACL | 41 | #ifdef CONFIG_TMPFS_POSIX_ACL |
42 | int shmem_permission(struct inode *, int); | 42 | int shmem_check_acl(struct inode *, int); |
43 | int shmem_acl_init(struct inode *, struct inode *); | 43 | int shmem_acl_init(struct inode *, struct inode *); |
44 | 44 | ||
45 | extern struct xattr_handler shmem_xattr_acl_access_handler; | 45 | extern struct xattr_handler shmem_xattr_acl_access_handler; |
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 4be57ab03478..f0ca7a7a1757 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h | |||
@@ -143,15 +143,6 @@ static inline void smp_mb__after_lock(void) { smp_mb(); } | |||
143 | */ | 143 | */ |
144 | #define spin_unlock_wait(lock) __raw_spin_unlock_wait(&(lock)->raw_lock) | 144 | #define spin_unlock_wait(lock) __raw_spin_unlock_wait(&(lock)->raw_lock) |
145 | 145 | ||
146 | /* | ||
147 | * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: | ||
148 | */ | ||
149 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | ||
150 | # include <linux/spinlock_api_smp.h> | ||
151 | #else | ||
152 | # include <linux/spinlock_api_up.h> | ||
153 | #endif | ||
154 | |||
155 | #ifdef CONFIG_DEBUG_SPINLOCK | 146 | #ifdef CONFIG_DEBUG_SPINLOCK |
156 | extern void _raw_spin_lock(spinlock_t *lock); | 147 | extern void _raw_spin_lock(spinlock_t *lock); |
157 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) | 148 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) |
@@ -268,50 +259,16 @@ static inline void smp_mb__after_lock(void) { smp_mb(); } | |||
268 | 259 | ||
269 | #define spin_lock_irq(lock) _spin_lock_irq(lock) | 260 | #define spin_lock_irq(lock) _spin_lock_irq(lock) |
270 | #define spin_lock_bh(lock) _spin_lock_bh(lock) | 261 | #define spin_lock_bh(lock) _spin_lock_bh(lock) |
271 | |||
272 | #define read_lock_irq(lock) _read_lock_irq(lock) | 262 | #define read_lock_irq(lock) _read_lock_irq(lock) |
273 | #define read_lock_bh(lock) _read_lock_bh(lock) | 263 | #define read_lock_bh(lock) _read_lock_bh(lock) |
274 | |||
275 | #define write_lock_irq(lock) _write_lock_irq(lock) | 264 | #define write_lock_irq(lock) _write_lock_irq(lock) |
276 | #define write_lock_bh(lock) _write_lock_bh(lock) | 265 | #define write_lock_bh(lock) _write_lock_bh(lock) |
277 | 266 | #define spin_unlock(lock) _spin_unlock(lock) | |
278 | /* | 267 | #define read_unlock(lock) _read_unlock(lock) |
279 | * We inline the unlock functions in the nondebug case: | 268 | #define write_unlock(lock) _write_unlock(lock) |
280 | */ | 269 | #define spin_unlock_irq(lock) _spin_unlock_irq(lock) |
281 | #if defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PREEMPT) || \ | 270 | #define read_unlock_irq(lock) _read_unlock_irq(lock) |
282 | !defined(CONFIG_SMP) | 271 | #define write_unlock_irq(lock) _write_unlock_irq(lock) |
283 | # define spin_unlock(lock) _spin_unlock(lock) | ||
284 | # define read_unlock(lock) _read_unlock(lock) | ||
285 | # define write_unlock(lock) _write_unlock(lock) | ||
286 | # define spin_unlock_irq(lock) _spin_unlock_irq(lock) | ||
287 | # define read_unlock_irq(lock) _read_unlock_irq(lock) | ||
288 | # define write_unlock_irq(lock) _write_unlock_irq(lock) | ||
289 | #else | ||
290 | # define spin_unlock(lock) \ | ||
291 | do {__raw_spin_unlock(&(lock)->raw_lock); __release(lock); } while (0) | ||
292 | # define read_unlock(lock) \ | ||
293 | do {__raw_read_unlock(&(lock)->raw_lock); __release(lock); } while (0) | ||
294 | # define write_unlock(lock) \ | ||
295 | do {__raw_write_unlock(&(lock)->raw_lock); __release(lock); } while (0) | ||
296 | # define spin_unlock_irq(lock) \ | ||
297 | do { \ | ||
298 | __raw_spin_unlock(&(lock)->raw_lock); \ | ||
299 | __release(lock); \ | ||
300 | local_irq_enable(); \ | ||
301 | } while (0) | ||
302 | # define read_unlock_irq(lock) \ | ||
303 | do { \ | ||
304 | __raw_read_unlock(&(lock)->raw_lock); \ | ||
305 | __release(lock); \ | ||
306 | local_irq_enable(); \ | ||
307 | } while (0) | ||
308 | # define write_unlock_irq(lock) \ | ||
309 | do { \ | ||
310 | __raw_write_unlock(&(lock)->raw_lock); \ | ||
311 | __release(lock); \ | ||
312 | local_irq_enable(); \ | ||
313 | } while (0) | ||
314 | #endif | ||
315 | 272 | ||
316 | #define spin_unlock_irqrestore(lock, flags) \ | 273 | #define spin_unlock_irqrestore(lock, flags) \ |
317 | do { \ | 274 | do { \ |
@@ -380,4 +337,13 @@ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); | |||
380 | */ | 337 | */ |
381 | #define spin_can_lock(lock) (!spin_is_locked(lock)) | 338 | #define spin_can_lock(lock) (!spin_is_locked(lock)) |
382 | 339 | ||
340 | /* | ||
341 | * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: | ||
342 | */ | ||
343 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | ||
344 | # include <linux/spinlock_api_smp.h> | ||
345 | #else | ||
346 | # include <linux/spinlock_api_up.h> | ||
347 | #endif | ||
348 | |||
383 | #endif /* __LINUX_SPINLOCK_H */ | 349 | #endif /* __LINUX_SPINLOCK_H */ |
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h index d79845d034b5..7a7e18fc2415 100644 --- a/include/linux/spinlock_api_smp.h +++ b/include/linux/spinlock_api_smp.h | |||
@@ -60,4 +60,398 @@ void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | |||
60 | void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | 60 | void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) |
61 | __releases(lock); | 61 | __releases(lock); |
62 | 62 | ||
63 | /* | ||
64 | * We inline the unlock functions in the nondebug case: | ||
65 | */ | ||
66 | #if !defined(CONFIG_DEBUG_SPINLOCK) && !defined(CONFIG_PREEMPT) | ||
67 | #define __always_inline__spin_unlock | ||
68 | #define __always_inline__read_unlock | ||
69 | #define __always_inline__write_unlock | ||
70 | #define __always_inline__spin_unlock_irq | ||
71 | #define __always_inline__read_unlock_irq | ||
72 | #define __always_inline__write_unlock_irq | ||
73 | #endif | ||
74 | |||
75 | #ifndef CONFIG_DEBUG_SPINLOCK | ||
76 | #ifndef CONFIG_GENERIC_LOCKBREAK | ||
77 | |||
78 | #ifdef __always_inline__spin_lock | ||
79 | #define _spin_lock(lock) __spin_lock(lock) | ||
80 | #endif | ||
81 | |||
82 | #ifdef __always_inline__read_lock | ||
83 | #define _read_lock(lock) __read_lock(lock) | ||
84 | #endif | ||
85 | |||
86 | #ifdef __always_inline__write_lock | ||
87 | #define _write_lock(lock) __write_lock(lock) | ||
88 | #endif | ||
89 | |||
90 | #ifdef __always_inline__spin_lock_bh | ||
91 | #define _spin_lock_bh(lock) __spin_lock_bh(lock) | ||
92 | #endif | ||
93 | |||
94 | #ifdef __always_inline__read_lock_bh | ||
95 | #define _read_lock_bh(lock) __read_lock_bh(lock) | ||
96 | #endif | ||
97 | |||
98 | #ifdef __always_inline__write_lock_bh | ||
99 | #define _write_lock_bh(lock) __write_lock_bh(lock) | ||
100 | #endif | ||
101 | |||
102 | #ifdef __always_inline__spin_lock_irq | ||
103 | #define _spin_lock_irq(lock) __spin_lock_irq(lock) | ||
104 | #endif | ||
105 | |||
106 | #ifdef __always_inline__read_lock_irq | ||
107 | #define _read_lock_irq(lock) __read_lock_irq(lock) | ||
108 | #endif | ||
109 | |||
110 | #ifdef __always_inline__write_lock_irq | ||
111 | #define _write_lock_irq(lock) __write_lock_irq(lock) | ||
112 | #endif | ||
113 | |||
114 | #ifdef __always_inline__spin_lock_irqsave | ||
115 | #define _spin_lock_irqsave(lock) __spin_lock_irqsave(lock) | ||
116 | #endif | ||
117 | |||
118 | #ifdef __always_inline__read_lock_irqsave | ||
119 | #define _read_lock_irqsave(lock) __read_lock_irqsave(lock) | ||
120 | #endif | ||
121 | |||
122 | #ifdef __always_inline__write_lock_irqsave | ||
123 | #define _write_lock_irqsave(lock) __write_lock_irqsave(lock) | ||
124 | #endif | ||
125 | |||
126 | #endif /* !CONFIG_GENERIC_LOCKBREAK */ | ||
127 | |||
128 | #ifdef __always_inline__spin_trylock | ||
129 | #define _spin_trylock(lock) __spin_trylock(lock) | ||
130 | #endif | ||
131 | |||
132 | #ifdef __always_inline__read_trylock | ||
133 | #define _read_trylock(lock) __read_trylock(lock) | ||
134 | #endif | ||
135 | |||
136 | #ifdef __always_inline__write_trylock | ||
137 | #define _write_trylock(lock) __write_trylock(lock) | ||
138 | #endif | ||
139 | |||
140 | #ifdef __always_inline__spin_trylock_bh | ||
141 | #define _spin_trylock_bh(lock) __spin_trylock_bh(lock) | ||
142 | #endif | ||
143 | |||
144 | #ifdef __always_inline__spin_unlock | ||
145 | #define _spin_unlock(lock) __spin_unlock(lock) | ||
146 | #endif | ||
147 | |||
148 | #ifdef __always_inline__read_unlock | ||
149 | #define _read_unlock(lock) __read_unlock(lock) | ||
150 | #endif | ||
151 | |||
152 | #ifdef __always_inline__write_unlock | ||
153 | #define _write_unlock(lock) __write_unlock(lock) | ||
154 | #endif | ||
155 | |||
156 | #ifdef __always_inline__spin_unlock_bh | ||
157 | #define _spin_unlock_bh(lock) __spin_unlock_bh(lock) | ||
158 | #endif | ||
159 | |||
160 | #ifdef __always_inline__read_unlock_bh | ||
161 | #define _read_unlock_bh(lock) __read_unlock_bh(lock) | ||
162 | #endif | ||
163 | |||
164 | #ifdef __always_inline__write_unlock_bh | ||
165 | #define _write_unlock_bh(lock) __write_unlock_bh(lock) | ||
166 | #endif | ||
167 | |||
168 | #ifdef __always_inline__spin_unlock_irq | ||
169 | #define _spin_unlock_irq(lock) __spin_unlock_irq(lock) | ||
170 | #endif | ||
171 | |||
172 | #ifdef __always_inline__read_unlock_irq | ||
173 | #define _read_unlock_irq(lock) __read_unlock_irq(lock) | ||
174 | #endif | ||
175 | |||
176 | #ifdef __always_inline__write_unlock_irq | ||
177 | #define _write_unlock_irq(lock) __write_unlock_irq(lock) | ||
178 | #endif | ||
179 | |||
180 | #ifdef __always_inline__spin_unlock_irqrestore | ||
181 | #define _spin_unlock_irqrestore(lock, flags) __spin_unlock_irqrestore(lock, flags) | ||
182 | #endif | ||
183 | |||
184 | #ifdef __always_inline__read_unlock_irqrestore | ||
185 | #define _read_unlock_irqrestore(lock, flags) __read_unlock_irqrestore(lock, flags) | ||
186 | #endif | ||
187 | |||
188 | #ifdef __always_inline__write_unlock_irqrestore | ||
189 | #define _write_unlock_irqrestore(lock, flags) __write_unlock_irqrestore(lock, flags) | ||
190 | #endif | ||
191 | |||
192 | #endif /* CONFIG_DEBUG_SPINLOCK */ | ||
193 | |||
194 | static inline int __spin_trylock(spinlock_t *lock) | ||
195 | { | ||
196 | preempt_disable(); | ||
197 | if (_raw_spin_trylock(lock)) { | ||
198 | spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); | ||
199 | return 1; | ||
200 | } | ||
201 | preempt_enable(); | ||
202 | return 0; | ||
203 | } | ||
204 | |||
205 | static inline int __read_trylock(rwlock_t *lock) | ||
206 | { | ||
207 | preempt_disable(); | ||
208 | if (_raw_read_trylock(lock)) { | ||
209 | rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_); | ||
210 | return 1; | ||
211 | } | ||
212 | preempt_enable(); | ||
213 | return 0; | ||
214 | } | ||
215 | |||
216 | static inline int __write_trylock(rwlock_t *lock) | ||
217 | { | ||
218 | preempt_disable(); | ||
219 | if (_raw_write_trylock(lock)) { | ||
220 | rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_); | ||
221 | return 1; | ||
222 | } | ||
223 | preempt_enable(); | ||
224 | return 0; | ||
225 | } | ||
226 | |||
227 | /* | ||
228 | * If lockdep is enabled then we use the non-preemption spin-ops | ||
229 | * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are | ||
230 | * not re-enabled during lock-acquire (which the preempt-spin-ops do): | ||
231 | */ | ||
232 | #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) | ||
233 | |||
234 | static inline void __read_lock(rwlock_t *lock) | ||
235 | { | ||
236 | preempt_disable(); | ||
237 | rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); | ||
238 | LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock); | ||
239 | } | ||
240 | |||
241 | static inline unsigned long __spin_lock_irqsave(spinlock_t *lock) | ||
242 | { | ||
243 | unsigned long flags; | ||
244 | |||
245 | local_irq_save(flags); | ||
246 | preempt_disable(); | ||
247 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
248 | /* | ||
249 | * On lockdep we dont want the hand-coded irq-enable of | ||
250 | * _raw_spin_lock_flags() code, because lockdep assumes | ||
251 | * that interrupts are not re-enabled during lock-acquire: | ||
252 | */ | ||
253 | #ifdef CONFIG_LOCKDEP | ||
254 | LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); | ||
255 | #else | ||
256 | _raw_spin_lock_flags(lock, &flags); | ||
257 | #endif | ||
258 | return flags; | ||
259 | } | ||
260 | |||
261 | static inline void __spin_lock_irq(spinlock_t *lock) | ||
262 | { | ||
263 | local_irq_disable(); | ||
264 | preempt_disable(); | ||
265 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
266 | LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); | ||
267 | } | ||
268 | |||
269 | static inline void __spin_lock_bh(spinlock_t *lock) | ||
270 | { | ||
271 | local_bh_disable(); | ||
272 | preempt_disable(); | ||
273 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
274 | LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); | ||
275 | } | ||
276 | |||
277 | static inline unsigned long __read_lock_irqsave(rwlock_t *lock) | ||
278 | { | ||
279 | unsigned long flags; | ||
280 | |||
281 | local_irq_save(flags); | ||
282 | preempt_disable(); | ||
283 | rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); | ||
284 | LOCK_CONTENDED_FLAGS(lock, _raw_read_trylock, _raw_read_lock, | ||
285 | _raw_read_lock_flags, &flags); | ||
286 | return flags; | ||
287 | } | ||
288 | |||
289 | static inline void __read_lock_irq(rwlock_t *lock) | ||
290 | { | ||
291 | local_irq_disable(); | ||
292 | preempt_disable(); | ||
293 | rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); | ||
294 | LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock); | ||
295 | } | ||
296 | |||
297 | static inline void __read_lock_bh(rwlock_t *lock) | ||
298 | { | ||
299 | local_bh_disable(); | ||
300 | preempt_disable(); | ||
301 | rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); | ||
302 | LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock); | ||
303 | } | ||
304 | |||
305 | static inline unsigned long __write_lock_irqsave(rwlock_t *lock) | ||
306 | { | ||
307 | unsigned long flags; | ||
308 | |||
309 | local_irq_save(flags); | ||
310 | preempt_disable(); | ||
311 | rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
312 | LOCK_CONTENDED_FLAGS(lock, _raw_write_trylock, _raw_write_lock, | ||
313 | _raw_write_lock_flags, &flags); | ||
314 | return flags; | ||
315 | } | ||
316 | |||
317 | static inline void __write_lock_irq(rwlock_t *lock) | ||
318 | { | ||
319 | local_irq_disable(); | ||
320 | preempt_disable(); | ||
321 | rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
322 | LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock); | ||
323 | } | ||
324 | |||
325 | static inline void __write_lock_bh(rwlock_t *lock) | ||
326 | { | ||
327 | local_bh_disable(); | ||
328 | preempt_disable(); | ||
329 | rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
330 | LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock); | ||
331 | } | ||
332 | |||
333 | static inline void __spin_lock(spinlock_t *lock) | ||
334 | { | ||
335 | preempt_disable(); | ||
336 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
337 | LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); | ||
338 | } | ||
339 | |||
340 | static inline void __write_lock(rwlock_t *lock) | ||
341 | { | ||
342 | preempt_disable(); | ||
343 | rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
344 | LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock); | ||
345 | } | ||
346 | |||
347 | #endif /* CONFIG_PREEMPT */ | ||
348 | |||
349 | static inline void __spin_unlock(spinlock_t *lock) | ||
350 | { | ||
351 | spin_release(&lock->dep_map, 1, _RET_IP_); | ||
352 | _raw_spin_unlock(lock); | ||
353 | preempt_enable(); | ||
354 | } | ||
355 | |||
356 | static inline void __write_unlock(rwlock_t *lock) | ||
357 | { | ||
358 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
359 | _raw_write_unlock(lock); | ||
360 | preempt_enable(); | ||
361 | } | ||
362 | |||
363 | static inline void __read_unlock(rwlock_t *lock) | ||
364 | { | ||
365 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
366 | _raw_read_unlock(lock); | ||
367 | preempt_enable(); | ||
368 | } | ||
369 | |||
370 | static inline void __spin_unlock_irqrestore(spinlock_t *lock, | ||
371 | unsigned long flags) | ||
372 | { | ||
373 | spin_release(&lock->dep_map, 1, _RET_IP_); | ||
374 | _raw_spin_unlock(lock); | ||
375 | local_irq_restore(flags); | ||
376 | preempt_enable(); | ||
377 | } | ||
378 | |||
379 | static inline void __spin_unlock_irq(spinlock_t *lock) | ||
380 | { | ||
381 | spin_release(&lock->dep_map, 1, _RET_IP_); | ||
382 | _raw_spin_unlock(lock); | ||
383 | local_irq_enable(); | ||
384 | preempt_enable(); | ||
385 | } | ||
386 | |||
387 | static inline void __spin_unlock_bh(spinlock_t *lock) | ||
388 | { | ||
389 | spin_release(&lock->dep_map, 1, _RET_IP_); | ||
390 | _raw_spin_unlock(lock); | ||
391 | preempt_enable_no_resched(); | ||
392 | local_bh_enable_ip((unsigned long)__builtin_return_address(0)); | ||
393 | } | ||
394 | |||
395 | static inline void __read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | ||
396 | { | ||
397 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
398 | _raw_read_unlock(lock); | ||
399 | local_irq_restore(flags); | ||
400 | preempt_enable(); | ||
401 | } | ||
402 | |||
403 | static inline void __read_unlock_irq(rwlock_t *lock) | ||
404 | { | ||
405 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
406 | _raw_read_unlock(lock); | ||
407 | local_irq_enable(); | ||
408 | preempt_enable(); | ||
409 | } | ||
410 | |||
411 | static inline void __read_unlock_bh(rwlock_t *lock) | ||
412 | { | ||
413 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
414 | _raw_read_unlock(lock); | ||
415 | preempt_enable_no_resched(); | ||
416 | local_bh_enable_ip((unsigned long)__builtin_return_address(0)); | ||
417 | } | ||
418 | |||
419 | static inline void __write_unlock_irqrestore(rwlock_t *lock, | ||
420 | unsigned long flags) | ||
421 | { | ||
422 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
423 | _raw_write_unlock(lock); | ||
424 | local_irq_restore(flags); | ||
425 | preempt_enable(); | ||
426 | } | ||
427 | |||
428 | static inline void __write_unlock_irq(rwlock_t *lock) | ||
429 | { | ||
430 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
431 | _raw_write_unlock(lock); | ||
432 | local_irq_enable(); | ||
433 | preempt_enable(); | ||
434 | } | ||
435 | |||
436 | static inline void __write_unlock_bh(rwlock_t *lock) | ||
437 | { | ||
438 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
439 | _raw_write_unlock(lock); | ||
440 | preempt_enable_no_resched(); | ||
441 | local_bh_enable_ip((unsigned long)__builtin_return_address(0)); | ||
442 | } | ||
443 | |||
444 | static inline int __spin_trylock_bh(spinlock_t *lock) | ||
445 | { | ||
446 | local_bh_disable(); | ||
447 | preempt_disable(); | ||
448 | if (_raw_spin_trylock(lock)) { | ||
449 | spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); | ||
450 | return 1; | ||
451 | } | ||
452 | preempt_enable_no_resched(); | ||
453 | local_bh_enable_ip((unsigned long)__builtin_return_address(0)); | ||
454 | return 0; | ||
455 | } | ||
456 | |||
63 | #endif /* __LINUX_SPINLOCK_API_SMP_H */ | 457 | #endif /* __LINUX_SPINLOCK_API_SMP_H */ |
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index cb1a6631b8f4..73b1f1cec423 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h | |||
@@ -14,7 +14,6 @@ struct scatterlist; | |||
14 | */ | 14 | */ |
15 | #define IO_TLB_SEGSIZE 128 | 15 | #define IO_TLB_SEGSIZE 128 |
16 | 16 | ||
17 | |||
18 | /* | 17 | /* |
19 | * log of the size of each IO TLB slab. The number of slabs is command line | 18 | * log of the size of each IO TLB slab. The number of slabs is command line |
20 | * controllable. | 19 | * controllable. |
@@ -24,16 +23,6 @@ struct scatterlist; | |||
24 | extern void | 23 | extern void |
25 | swiotlb_init(void); | 24 | swiotlb_init(void); |
26 | 25 | ||
27 | extern void *swiotlb_alloc_boot(size_t bytes, unsigned long nslabs); | ||
28 | extern void *swiotlb_alloc(unsigned order, unsigned long nslabs); | ||
29 | |||
30 | extern dma_addr_t swiotlb_phys_to_bus(struct device *hwdev, | ||
31 | phys_addr_t address); | ||
32 | extern phys_addr_t swiotlb_bus_to_phys(struct device *hwdev, | ||
33 | dma_addr_t address); | ||
34 | |||
35 | extern int swiotlb_arch_range_needs_mapping(phys_addr_t paddr, size_t size); | ||
36 | |||
37 | extern void | 26 | extern void |
38 | *swiotlb_alloc_coherent(struct device *hwdev, size_t size, | 27 | *swiotlb_alloc_coherent(struct device *hwdev, size_t size, |
39 | dma_addr_t *dma_handle, gfp_t flags); | 28 | dma_addr_t *dma_handle, gfp_t flags); |
diff --git a/include/linux/topology.h b/include/linux/topology.h index 7402c1a27c4f..85e8cf7d393c 100644 --- a/include/linux/topology.h +++ b/include/linux/topology.h | |||
@@ -85,20 +85,29 @@ int arch_update_cpu_topology(void); | |||
85 | #define ARCH_HAS_SCHED_WAKE_IDLE | 85 | #define ARCH_HAS_SCHED_WAKE_IDLE |
86 | /* Common values for SMT siblings */ | 86 | /* Common values for SMT siblings */ |
87 | #ifndef SD_SIBLING_INIT | 87 | #ifndef SD_SIBLING_INIT |
88 | #define SD_SIBLING_INIT (struct sched_domain) { \ | 88 | #define SD_SIBLING_INIT (struct sched_domain) { \ |
89 | .min_interval = 1, \ | 89 | .min_interval = 1, \ |
90 | .max_interval = 2, \ | 90 | .max_interval = 2, \ |
91 | .busy_factor = 64, \ | 91 | .busy_factor = 64, \ |
92 | .imbalance_pct = 110, \ | 92 | .imbalance_pct = 110, \ |
93 | .flags = SD_LOAD_BALANCE \ | 93 | \ |
94 | | SD_BALANCE_NEWIDLE \ | 94 | .flags = 1*SD_LOAD_BALANCE \ |
95 | | SD_BALANCE_FORK \ | 95 | | 1*SD_BALANCE_NEWIDLE \ |
96 | | SD_BALANCE_EXEC \ | 96 | | 1*SD_BALANCE_EXEC \ |
97 | | SD_WAKE_AFFINE \ | 97 | | 1*SD_BALANCE_FORK \ |
98 | | SD_WAKE_BALANCE \ | 98 | | 0*SD_WAKE_IDLE \ |
99 | | SD_SHARE_CPUPOWER, \ | 99 | | 1*SD_WAKE_AFFINE \ |
100 | .last_balance = jiffies, \ | 100 | | 1*SD_WAKE_BALANCE \ |
101 | .balance_interval = 1, \ | 101 | | 1*SD_SHARE_CPUPOWER \ |
102 | | 0*SD_POWERSAVINGS_BALANCE \ | ||
103 | | 0*SD_SHARE_PKG_RESOURCES \ | ||
104 | | 0*SD_SERIALIZE \ | ||
105 | | 0*SD_WAKE_IDLE_FAR \ | ||
106 | | 0*SD_PREFER_SIBLING \ | ||
107 | , \ | ||
108 | .last_balance = jiffies, \ | ||
109 | .balance_interval = 1, \ | ||
110 | .smt_gain = 1178, /* 15% */ \ | ||
102 | } | 111 | } |
103 | #endif | 112 | #endif |
104 | #endif /* CONFIG_SCHED_SMT */ | 113 | #endif /* CONFIG_SCHED_SMT */ |
@@ -106,69 +115,94 @@ int arch_update_cpu_topology(void); | |||
106 | #ifdef CONFIG_SCHED_MC | 115 | #ifdef CONFIG_SCHED_MC |
107 | /* Common values for MC siblings. for now mostly derived from SD_CPU_INIT */ | 116 | /* Common values for MC siblings. for now mostly derived from SD_CPU_INIT */ |
108 | #ifndef SD_MC_INIT | 117 | #ifndef SD_MC_INIT |
109 | #define SD_MC_INIT (struct sched_domain) { \ | 118 | #define SD_MC_INIT (struct sched_domain) { \ |
110 | .min_interval = 1, \ | 119 | .min_interval = 1, \ |
111 | .max_interval = 4, \ | 120 | .max_interval = 4, \ |
112 | .busy_factor = 64, \ | 121 | .busy_factor = 64, \ |
113 | .imbalance_pct = 125, \ | 122 | .imbalance_pct = 125, \ |
114 | .cache_nice_tries = 1, \ | 123 | .cache_nice_tries = 1, \ |
115 | .busy_idx = 2, \ | 124 | .busy_idx = 2, \ |
116 | .wake_idx = 1, \ | 125 | .wake_idx = 1, \ |
117 | .forkexec_idx = 1, \ | 126 | .forkexec_idx = 1, \ |
118 | .flags = SD_LOAD_BALANCE \ | 127 | \ |
119 | | SD_BALANCE_FORK \ | 128 | .flags = 1*SD_LOAD_BALANCE \ |
120 | | SD_BALANCE_EXEC \ | 129 | | 1*SD_BALANCE_NEWIDLE \ |
121 | | SD_WAKE_AFFINE \ | 130 | | 1*SD_BALANCE_EXEC \ |
122 | | SD_WAKE_BALANCE \ | 131 | | 1*SD_BALANCE_FORK \ |
123 | | SD_SHARE_PKG_RESOURCES\ | 132 | | 1*SD_WAKE_IDLE \ |
124 | | sd_balance_for_mc_power()\ | 133 | | 1*SD_WAKE_AFFINE \ |
125 | | sd_power_saving_flags(),\ | 134 | | 1*SD_WAKE_BALANCE \ |
126 | .last_balance = jiffies, \ | 135 | | 0*SD_SHARE_CPUPOWER \ |
127 | .balance_interval = 1, \ | 136 | | 1*SD_SHARE_PKG_RESOURCES \ |
137 | | 0*SD_SERIALIZE \ | ||
138 | | 0*SD_WAKE_IDLE_FAR \ | ||
139 | | sd_balance_for_mc_power() \ | ||
140 | | sd_power_saving_flags() \ | ||
141 | , \ | ||
142 | .last_balance = jiffies, \ | ||
143 | .balance_interval = 1, \ | ||
128 | } | 144 | } |
129 | #endif | 145 | #endif |
130 | #endif /* CONFIG_SCHED_MC */ | 146 | #endif /* CONFIG_SCHED_MC */ |
131 | 147 | ||
132 | /* Common values for CPUs */ | 148 | /* Common values for CPUs */ |
133 | #ifndef SD_CPU_INIT | 149 | #ifndef SD_CPU_INIT |
134 | #define SD_CPU_INIT (struct sched_domain) { \ | 150 | #define SD_CPU_INIT (struct sched_domain) { \ |
135 | .min_interval = 1, \ | 151 | .min_interval = 1, \ |
136 | .max_interval = 4, \ | 152 | .max_interval = 4, \ |
137 | .busy_factor = 64, \ | 153 | .busy_factor = 64, \ |
138 | .imbalance_pct = 125, \ | 154 | .imbalance_pct = 125, \ |
139 | .cache_nice_tries = 1, \ | 155 | .cache_nice_tries = 1, \ |
140 | .busy_idx = 2, \ | 156 | .busy_idx = 2, \ |
141 | .idle_idx = 1, \ | 157 | .idle_idx = 1, \ |
142 | .newidle_idx = 2, \ | 158 | .newidle_idx = 2, \ |
143 | .wake_idx = 1, \ | 159 | .wake_idx = 1, \ |
144 | .forkexec_idx = 1, \ | 160 | .forkexec_idx = 1, \ |
145 | .flags = SD_LOAD_BALANCE \ | 161 | \ |
146 | | SD_BALANCE_EXEC \ | 162 | .flags = 1*SD_LOAD_BALANCE \ |
147 | | SD_BALANCE_FORK \ | 163 | | 1*SD_BALANCE_NEWIDLE \ |
148 | | SD_WAKE_AFFINE \ | 164 | | 1*SD_BALANCE_EXEC \ |
149 | | SD_WAKE_BALANCE \ | 165 | | 1*SD_BALANCE_FORK \ |
150 | | sd_balance_for_package_power()\ | 166 | | 1*SD_WAKE_IDLE \ |
151 | | sd_power_saving_flags(),\ | 167 | | 0*SD_WAKE_AFFINE \ |
152 | .last_balance = jiffies, \ | 168 | | 1*SD_WAKE_BALANCE \ |
153 | .balance_interval = 1, \ | 169 | | 0*SD_SHARE_CPUPOWER \ |
170 | | 0*SD_SHARE_PKG_RESOURCES \ | ||
171 | | 0*SD_SERIALIZE \ | ||
172 | | 0*SD_WAKE_IDLE_FAR \ | ||
173 | | sd_balance_for_package_power() \ | ||
174 | | sd_power_saving_flags() \ | ||
175 | , \ | ||
176 | .last_balance = jiffies, \ | ||
177 | .balance_interval = 1, \ | ||
154 | } | 178 | } |
155 | #endif | 179 | #endif |
156 | 180 | ||
157 | /* sched_domains SD_ALLNODES_INIT for NUMA machines */ | 181 | /* sched_domains SD_ALLNODES_INIT for NUMA machines */ |
158 | #define SD_ALLNODES_INIT (struct sched_domain) { \ | 182 | #define SD_ALLNODES_INIT (struct sched_domain) { \ |
159 | .min_interval = 64, \ | 183 | .min_interval = 64, \ |
160 | .max_interval = 64*num_online_cpus(), \ | 184 | .max_interval = 64*num_online_cpus(), \ |
161 | .busy_factor = 128, \ | 185 | .busy_factor = 128, \ |
162 | .imbalance_pct = 133, \ | 186 | .imbalance_pct = 133, \ |
163 | .cache_nice_tries = 1, \ | 187 | .cache_nice_tries = 1, \ |
164 | .busy_idx = 3, \ | 188 | .busy_idx = 3, \ |
165 | .idle_idx = 3, \ | 189 | .idle_idx = 3, \ |
166 | .flags = SD_LOAD_BALANCE \ | 190 | .flags = 1*SD_LOAD_BALANCE \ |
167 | | SD_BALANCE_NEWIDLE \ | 191 | | 1*SD_BALANCE_NEWIDLE \ |
168 | | SD_WAKE_AFFINE \ | 192 | | 0*SD_BALANCE_EXEC \ |
169 | | SD_SERIALIZE, \ | 193 | | 0*SD_BALANCE_FORK \ |
170 | .last_balance = jiffies, \ | 194 | | 0*SD_WAKE_IDLE \ |
171 | .balance_interval = 64, \ | 195 | | 1*SD_WAKE_AFFINE \ |
196 | | 0*SD_WAKE_BALANCE \ | ||
197 | | 0*SD_SHARE_CPUPOWER \ | ||
198 | | 0*SD_POWERSAVINGS_BALANCE \ | ||
199 | | 0*SD_SHARE_PKG_RESOURCES \ | ||
200 | | 1*SD_SERIALIZE \ | ||
201 | | 1*SD_WAKE_IDLE_FAR \ | ||
202 | | 0*SD_PREFER_SIBLING \ | ||
203 | , \ | ||
204 | .last_balance = jiffies, \ | ||
205 | .balance_interval = 64, \ | ||
172 | } | 206 | } |
173 | 207 | ||
174 | #ifdef CONFIG_NUMA | 208 | #ifdef CONFIG_NUMA |
diff --git a/include/linux/tty.h b/include/linux/tty.h index e8c6c9136c97..0d3974f59c53 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h | |||
@@ -23,7 +23,7 @@ | |||
23 | */ | 23 | */ |
24 | #define NR_UNIX98_PTY_DEFAULT 4096 /* Default maximum for Unix98 ptys */ | 24 | #define NR_UNIX98_PTY_DEFAULT 4096 /* Default maximum for Unix98 ptys */ |
25 | #define NR_UNIX98_PTY_MAX (1 << MINORBITS) /* Absolute limit */ | 25 | #define NR_UNIX98_PTY_MAX (1 << MINORBITS) /* Absolute limit */ |
26 | #define NR_LDISCS 19 | 26 | #define NR_LDISCS 20 |
27 | 27 | ||
28 | /* line disciplines */ | 28 | /* line disciplines */ |
29 | #define N_TTY 0 | 29 | #define N_TTY 0 |
@@ -47,6 +47,8 @@ | |||
47 | #define N_SLCAN 17 /* Serial / USB serial CAN Adaptors */ | 47 | #define N_SLCAN 17 /* Serial / USB serial CAN Adaptors */ |
48 | #define N_PPS 18 /* Pulse per Second */ | 48 | #define N_PPS 18 /* Pulse per Second */ |
49 | 49 | ||
50 | #define N_V253 19 /* Codec control over voice modem */ | ||
51 | |||
50 | /* | 52 | /* |
51 | * This character is the same as _POSIX_VDISABLE: it cannot be used as | 53 | * This character is the same as _POSIX_VDISABLE: it cannot be used as |
52 | * a c_cc[] character, but indicates that a particular special character | 54 | * a c_cc[] character, but indicates that a particular special character |
diff --git a/include/linux/writeback.h b/include/linux/writeback.h index 3224820c8514..78b1e4684cc9 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h | |||
@@ -14,17 +14,6 @@ extern struct list_head inode_in_use; | |||
14 | extern struct list_head inode_unused; | 14 | extern struct list_head inode_unused; |
15 | 15 | ||
16 | /* | 16 | /* |
17 | * Yes, writeback.h requires sched.h | ||
18 | * No, sched.h is not included from here. | ||
19 | */ | ||
20 | static inline int task_is_pdflush(struct task_struct *task) | ||
21 | { | ||
22 | return task->flags & PF_FLUSHER; | ||
23 | } | ||
24 | |||
25 | #define current_is_pdflush() task_is_pdflush(current) | ||
26 | |||
27 | /* | ||
28 | * fs/fs-writeback.c | 17 | * fs/fs-writeback.c |
29 | */ | 18 | */ |
30 | enum writeback_sync_modes { | 19 | enum writeback_sync_modes { |
@@ -40,6 +29,8 @@ enum writeback_sync_modes { | |||
40 | struct writeback_control { | 29 | struct writeback_control { |
41 | struct backing_dev_info *bdi; /* If !NULL, only write back this | 30 | struct backing_dev_info *bdi; /* If !NULL, only write back this |
42 | queue */ | 31 | queue */ |
32 | struct super_block *sb; /* if !NULL, only write inodes from | ||
33 | this super_block */ | ||
43 | enum writeback_sync_modes sync_mode; | 34 | enum writeback_sync_modes sync_mode; |
44 | unsigned long *older_than_this; /* If !NULL, only write back inodes | 35 | unsigned long *older_than_this; /* If !NULL, only write back inodes |
45 | older than this */ | 36 | older than this */ |
@@ -76,9 +67,13 @@ struct writeback_control { | |||
76 | /* | 67 | /* |
77 | * fs/fs-writeback.c | 68 | * fs/fs-writeback.c |
78 | */ | 69 | */ |
79 | void writeback_inodes(struct writeback_control *wbc); | 70 | struct bdi_writeback; |
80 | int inode_wait(void *); | 71 | int inode_wait(void *); |
81 | void sync_inodes_sb(struct super_block *, int wait); | 72 | long writeback_inodes_sb(struct super_block *); |
73 | long sync_inodes_sb(struct super_block *); | ||
74 | void writeback_inodes_wbc(struct writeback_control *wbc); | ||
75 | long wb_do_writeback(struct bdi_writeback *wb, int force_wait); | ||
76 | void wakeup_flusher_threads(long nr_pages); | ||
82 | 77 | ||
83 | /* writeback.h requires fs.h; it, too, is not included from here. */ | 78 | /* writeback.h requires fs.h; it, too, is not included from here. */ |
84 | static inline void wait_on_inode(struct inode *inode) | 79 | static inline void wait_on_inode(struct inode *inode) |
@@ -98,7 +93,6 @@ static inline void inode_sync_wait(struct inode *inode) | |||
98 | /* | 93 | /* |
99 | * mm/page-writeback.c | 94 | * mm/page-writeback.c |
100 | */ | 95 | */ |
101 | int wakeup_pdflush(long nr_pages); | ||
102 | void laptop_io_completion(void); | 96 | void laptop_io_completion(void); |
103 | void laptop_sync_completion(void); | 97 | void laptop_sync_completion(void); |
104 | void throttle_vm_writeout(gfp_t gfp_mask); | 98 | void throttle_vm_writeout(gfp_t gfp_mask); |
@@ -150,7 +144,6 @@ balance_dirty_pages_ratelimited(struct address_space *mapping) | |||
150 | typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc, | 144 | typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc, |
151 | void *data); | 145 | void *data); |
152 | 146 | ||
153 | int pdflush_operation(void (*fn)(unsigned long), unsigned long arg0); | ||
154 | int generic_writepages(struct address_space *mapping, | 147 | int generic_writepages(struct address_space *mapping, |
155 | struct writeback_control *wbc); | 148 | struct writeback_control *wbc); |
156 | int write_cache_pages(struct address_space *mapping, | 149 | int write_cache_pages(struct address_space *mapping, |
diff --git a/include/linux/xattr.h b/include/linux/xattr.h index d131e352cfe1..5c84af8c5f6f 100644 --- a/include/linux/xattr.h +++ b/include/linux/xattr.h | |||
@@ -49,6 +49,7 @@ struct xattr_handler { | |||
49 | ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t); | 49 | ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t); |
50 | ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t); | 50 | ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t); |
51 | ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size); | 51 | ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size); |
52 | int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, int); | ||
52 | int vfs_setxattr(struct dentry *, const char *, const void *, size_t, int); | 53 | int vfs_setxattr(struct dentry *, const char *, const void *, size_t, int); |
53 | int vfs_removexattr(struct dentry *, const char *); | 54 | int vfs_removexattr(struct dentry *, const char *); |
54 | 55 | ||