diff options
Diffstat (limited to 'include/linux')
85 files changed, 2140 insertions, 1848 deletions
diff --git a/include/linux/Kbuild b/include/linux/Kbuild index a67b6227d272..ca9b9b9bd331 100644 --- a/include/linux/Kbuild +++ b/include/linux/Kbuild | |||
@@ -67,6 +67,7 @@ header-y += falloc.h | |||
67 | header-y += fd.h | 67 | header-y += fd.h |
68 | header-y += fdreg.h | 68 | header-y += fdreg.h |
69 | header-y += fib_rules.h | 69 | header-y += fib_rules.h |
70 | header-y += fiemap.h | ||
70 | header-y += firewire-cdev.h | 71 | header-y += firewire-cdev.h |
71 | header-y += firewire-constants.h | 72 | header-y += firewire-constants.h |
72 | header-y += fuse.h | 73 | header-y += fuse.h |
diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h index 45f6297821bd..5fc2ef8d97fa 100644 --- a/include/linux/async_tx.h +++ b/include/linux/async_tx.h | |||
@@ -21,6 +21,15 @@ | |||
21 | #include <linux/spinlock.h> | 21 | #include <linux/spinlock.h> |
22 | #include <linux/interrupt.h> | 22 | #include <linux/interrupt.h> |
23 | 23 | ||
24 | /* on architectures without dma-mapping capabilities we need to ensure | ||
25 | * that the asynchronous path compiles away | ||
26 | */ | ||
27 | #ifdef CONFIG_HAS_DMA | ||
28 | #define __async_inline | ||
29 | #else | ||
30 | #define __async_inline __always_inline | ||
31 | #endif | ||
32 | |||
24 | /** | 33 | /** |
25 | * dma_chan_ref - object used to manage dma channels received from the | 34 | * dma_chan_ref - object used to manage dma channels received from the |
26 | * dmaengine core. | 35 | * dmaengine core. |
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h index 77b4a9e46004..6638b8148de7 100644 --- a/include/linux/binfmts.h +++ b/include/linux/binfmts.h | |||
@@ -35,8 +35,7 @@ struct linux_binprm{ | |||
35 | #endif | 35 | #endif |
36 | struct mm_struct *mm; | 36 | struct mm_struct *mm; |
37 | unsigned long p; /* current top of mem */ | 37 | unsigned long p; /* current top of mem */ |
38 | unsigned int sh_bang:1, | 38 | unsigned int |
39 | misc_bang:1, | ||
40 | cred_prepared:1,/* true if creds already prepared (multiple | 39 | cred_prepared:1,/* true if creds already prepared (multiple |
41 | * preps happen for interpreters) */ | 40 | * preps happen for interpreters) */ |
42 | cap_effective:1;/* true if has elevated effective capabilities, | 41 | cap_effective:1;/* true if has elevated effective capabilities, |
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 3d7bcde2e332..7b73bb8f1970 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h | |||
@@ -332,22 +332,10 @@ extern int __set_page_dirty_buffers(struct page *page); | |||
332 | 332 | ||
333 | static inline void buffer_init(void) {} | 333 | static inline void buffer_init(void) {} |
334 | static inline int try_to_free_buffers(struct page *page) { return 1; } | 334 | static inline int try_to_free_buffers(struct page *page) { return 1; } |
335 | static inline int sync_blockdev(struct block_device *bdev) { return 0; } | ||
336 | static inline int inode_has_buffers(struct inode *inode) { return 0; } | 335 | static inline int inode_has_buffers(struct inode *inode) { return 0; } |
337 | static inline void invalidate_inode_buffers(struct inode *inode) {} | 336 | static inline void invalidate_inode_buffers(struct inode *inode) {} |
338 | static inline int remove_inode_buffers(struct inode *inode) { return 1; } | 337 | static inline int remove_inode_buffers(struct inode *inode) { return 1; } |
339 | static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; } | 338 | static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; } |
340 | static inline void invalidate_bdev(struct block_device *bdev) {} | ||
341 | |||
342 | static inline struct super_block *freeze_bdev(struct block_device *sb) | ||
343 | { | ||
344 | return NULL; | ||
345 | } | ||
346 | |||
347 | static inline int thaw_bdev(struct block_device *bdev, struct super_block *sb) | ||
348 | { | ||
349 | return 0; | ||
350 | } | ||
351 | 339 | ||
352 | #endif /* CONFIG_BLOCK */ | 340 | #endif /* CONFIG_BLOCK */ |
353 | #endif /* _LINUX_BUFFER_HEAD_H */ | 341 | #endif /* _LINUX_BUFFER_HEAD_H */ |
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 499900d0cee7..665fa70e4094 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/cgroupstats.h> | 15 | #include <linux/cgroupstats.h> |
16 | #include <linux/prio_heap.h> | 16 | #include <linux/prio_heap.h> |
17 | #include <linux/rwsem.h> | 17 | #include <linux/rwsem.h> |
18 | #include <linux/idr.h> | ||
18 | 19 | ||
19 | #ifdef CONFIG_CGROUPS | 20 | #ifdef CONFIG_CGROUPS |
20 | 21 | ||
@@ -22,6 +23,7 @@ struct cgroupfs_root; | |||
22 | struct cgroup_subsys; | 23 | struct cgroup_subsys; |
23 | struct inode; | 24 | struct inode; |
24 | struct cgroup; | 25 | struct cgroup; |
26 | struct css_id; | ||
25 | 27 | ||
26 | extern int cgroup_init_early(void); | 28 | extern int cgroup_init_early(void); |
27 | extern int cgroup_init(void); | 29 | extern int cgroup_init(void); |
@@ -47,18 +49,24 @@ enum cgroup_subsys_id { | |||
47 | 49 | ||
48 | /* Per-subsystem/per-cgroup state maintained by the system. */ | 50 | /* Per-subsystem/per-cgroup state maintained by the system. */ |
49 | struct cgroup_subsys_state { | 51 | struct cgroup_subsys_state { |
50 | /* The cgroup that this subsystem is attached to. Useful | 52 | /* |
53 | * The cgroup that this subsystem is attached to. Useful | ||
51 | * for subsystems that want to know about the cgroup | 54 | * for subsystems that want to know about the cgroup |
52 | * hierarchy structure */ | 55 | * hierarchy structure |
56 | */ | ||
53 | struct cgroup *cgroup; | 57 | struct cgroup *cgroup; |
54 | 58 | ||
55 | /* State maintained by the cgroup system to allow subsystems | 59 | /* |
60 | * State maintained by the cgroup system to allow subsystems | ||
56 | * to be "busy". Should be accessed via css_get(), | 61 | * to be "busy". Should be accessed via css_get(), |
57 | * css_tryget() and and css_put(). */ | 62 | * css_tryget() and and css_put(). |
63 | */ | ||
58 | 64 | ||
59 | atomic_t refcnt; | 65 | atomic_t refcnt; |
60 | 66 | ||
61 | unsigned long flags; | 67 | unsigned long flags; |
68 | /* ID for this css, if possible */ | ||
69 | struct css_id *id; | ||
62 | }; | 70 | }; |
63 | 71 | ||
64 | /* bits in struct cgroup_subsys_state flags field */ | 72 | /* bits in struct cgroup_subsys_state flags field */ |
@@ -120,19 +128,26 @@ static inline void css_put(struct cgroup_subsys_state *css) | |||
120 | enum { | 128 | enum { |
121 | /* Control Group is dead */ | 129 | /* Control Group is dead */ |
122 | CGRP_REMOVED, | 130 | CGRP_REMOVED, |
123 | /* Control Group has previously had a child cgroup or a task, | 131 | /* |
124 | * but no longer (only if CGRP_NOTIFY_ON_RELEASE is set) */ | 132 | * Control Group has previously had a child cgroup or a task, |
133 | * but no longer (only if CGRP_NOTIFY_ON_RELEASE is set) | ||
134 | */ | ||
125 | CGRP_RELEASABLE, | 135 | CGRP_RELEASABLE, |
126 | /* Control Group requires release notifications to userspace */ | 136 | /* Control Group requires release notifications to userspace */ |
127 | CGRP_NOTIFY_ON_RELEASE, | 137 | CGRP_NOTIFY_ON_RELEASE, |
138 | /* | ||
139 | * A thread in rmdir() is wating for this cgroup. | ||
140 | */ | ||
141 | CGRP_WAIT_ON_RMDIR, | ||
128 | }; | 142 | }; |
129 | 143 | ||
130 | struct cgroup { | 144 | struct cgroup { |
131 | unsigned long flags; /* "unsigned long" so bitops work */ | 145 | unsigned long flags; /* "unsigned long" so bitops work */ |
132 | 146 | ||
133 | /* count users of this cgroup. >0 means busy, but doesn't | 147 | /* |
134 | * necessarily indicate the number of tasks in the | 148 | * count users of this cgroup. >0 means busy, but doesn't |
135 | * cgroup */ | 149 | * necessarily indicate the number of tasks in the cgroup |
150 | */ | ||
136 | atomic_t count; | 151 | atomic_t count; |
137 | 152 | ||
138 | /* | 153 | /* |
@@ -142,7 +157,7 @@ struct cgroup { | |||
142 | struct list_head sibling; /* my parent's children */ | 157 | struct list_head sibling; /* my parent's children */ |
143 | struct list_head children; /* my children */ | 158 | struct list_head children; /* my children */ |
144 | 159 | ||
145 | struct cgroup *parent; /* my parent */ | 160 | struct cgroup *parent; /* my parent */ |
146 | struct dentry *dentry; /* cgroup fs entry, RCU protected */ | 161 | struct dentry *dentry; /* cgroup fs entry, RCU protected */ |
147 | 162 | ||
148 | /* Private pointers for each registered subsystem */ | 163 | /* Private pointers for each registered subsystem */ |
@@ -177,11 +192,12 @@ struct cgroup { | |||
177 | struct rcu_head rcu_head; | 192 | struct rcu_head rcu_head; |
178 | }; | 193 | }; |
179 | 194 | ||
180 | /* A css_set is a structure holding pointers to a set of | 195 | /* |
196 | * A css_set is a structure holding pointers to a set of | ||
181 | * cgroup_subsys_state objects. This saves space in the task struct | 197 | * cgroup_subsys_state objects. This saves space in the task struct |
182 | * object and speeds up fork()/exit(), since a single inc/dec and a | 198 | * object and speeds up fork()/exit(), since a single inc/dec and a |
183 | * list_add()/del() can bump the reference count on the entire | 199 | * list_add()/del() can bump the reference count on the entire cgroup |
184 | * cgroup set for a task. | 200 | * set for a task. |
185 | */ | 201 | */ |
186 | 202 | ||
187 | struct css_set { | 203 | struct css_set { |
@@ -226,13 +242,8 @@ struct cgroup_map_cb { | |||
226 | void *state; | 242 | void *state; |
227 | }; | 243 | }; |
228 | 244 | ||
229 | /* struct cftype: | 245 | /* |
230 | * | 246 | * struct cftype: handler definitions for cgroup control files |
231 | * The files in the cgroup filesystem mostly have a very simple read/write | ||
232 | * handling, some common function will take care of it. Nevertheless some cases | ||
233 | * (read tasks) are special and therefore I define this structure for every | ||
234 | * kind of file. | ||
235 | * | ||
236 | * | 247 | * |
237 | * When reading/writing to a file: | 248 | * When reading/writing to a file: |
238 | * - the cgroup to use is file->f_dentry->d_parent->d_fsdata | 249 | * - the cgroup to use is file->f_dentry->d_parent->d_fsdata |
@@ -241,10 +252,17 @@ struct cgroup_map_cb { | |||
241 | 252 | ||
242 | #define MAX_CFTYPE_NAME 64 | 253 | #define MAX_CFTYPE_NAME 64 |
243 | struct cftype { | 254 | struct cftype { |
244 | /* By convention, the name should begin with the name of the | 255 | /* |
245 | * subsystem, followed by a period */ | 256 | * By convention, the name should begin with the name of the |
257 | * subsystem, followed by a period | ||
258 | */ | ||
246 | char name[MAX_CFTYPE_NAME]; | 259 | char name[MAX_CFTYPE_NAME]; |
247 | int private; | 260 | int private; |
261 | /* | ||
262 | * If not 0, file mode is set to this value, otherwise it will | ||
263 | * be figured out automatically | ||
264 | */ | ||
265 | mode_t mode; | ||
248 | 266 | ||
249 | /* | 267 | /* |
250 | * If non-zero, defines the maximum length of string that can | 268 | * If non-zero, defines the maximum length of string that can |
@@ -319,15 +337,20 @@ struct cgroup_scanner { | |||
319 | void (*process_task)(struct task_struct *p, | 337 | void (*process_task)(struct task_struct *p, |
320 | struct cgroup_scanner *scan); | 338 | struct cgroup_scanner *scan); |
321 | struct ptr_heap *heap; | 339 | struct ptr_heap *heap; |
340 | void *data; | ||
322 | }; | 341 | }; |
323 | 342 | ||
324 | /* Add a new file to the given cgroup directory. Should only be | 343 | /* |
325 | * called by subsystems from within a populate() method */ | 344 | * Add a new file to the given cgroup directory. Should only be |
345 | * called by subsystems from within a populate() method | ||
346 | */ | ||
326 | int cgroup_add_file(struct cgroup *cgrp, struct cgroup_subsys *subsys, | 347 | int cgroup_add_file(struct cgroup *cgrp, struct cgroup_subsys *subsys, |
327 | const struct cftype *cft); | 348 | const struct cftype *cft); |
328 | 349 | ||
329 | /* Add a set of new files to the given cgroup directory. Should | 350 | /* |
330 | * only be called by subsystems from within a populate() method */ | 351 | * Add a set of new files to the given cgroup directory. Should |
352 | * only be called by subsystems from within a populate() method | ||
353 | */ | ||
331 | int cgroup_add_files(struct cgroup *cgrp, | 354 | int cgroup_add_files(struct cgroup *cgrp, |
332 | struct cgroup_subsys *subsys, | 355 | struct cgroup_subsys *subsys, |
333 | const struct cftype cft[], | 356 | const struct cftype cft[], |
@@ -339,15 +362,18 @@ int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen); | |||
339 | 362 | ||
340 | int cgroup_task_count(const struct cgroup *cgrp); | 363 | int cgroup_task_count(const struct cgroup *cgrp); |
341 | 364 | ||
342 | /* Return true if the cgroup is a descendant of the current cgroup */ | 365 | /* Return true if cgrp is a descendant of the task's cgroup */ |
343 | int cgroup_is_descendant(const struct cgroup *cgrp); | 366 | int cgroup_is_descendant(const struct cgroup *cgrp, struct task_struct *task); |
344 | 367 | ||
345 | /* Control Group subsystem type. See Documentation/cgroups.txt for details */ | 368 | /* |
369 | * Control Group subsystem type. | ||
370 | * See Documentation/cgroups/cgroups.txt for details | ||
371 | */ | ||
346 | 372 | ||
347 | struct cgroup_subsys { | 373 | struct cgroup_subsys { |
348 | struct cgroup_subsys_state *(*create)(struct cgroup_subsys *ss, | 374 | struct cgroup_subsys_state *(*create)(struct cgroup_subsys *ss, |
349 | struct cgroup *cgrp); | 375 | struct cgroup *cgrp); |
350 | void (*pre_destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp); | 376 | int (*pre_destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp); |
351 | void (*destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp); | 377 | void (*destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp); |
352 | int (*can_attach)(struct cgroup_subsys *ss, | 378 | int (*can_attach)(struct cgroup_subsys *ss, |
353 | struct cgroup *cgrp, struct task_struct *tsk); | 379 | struct cgroup *cgrp, struct task_struct *tsk); |
@@ -364,6 +390,11 @@ struct cgroup_subsys { | |||
364 | int active; | 390 | int active; |
365 | int disabled; | 391 | int disabled; |
366 | int early_init; | 392 | int early_init; |
393 | /* | ||
394 | * True if this subsys uses ID. ID is not available before cgroup_init() | ||
395 | * (not available in early_init time.) | ||
396 | */ | ||
397 | bool use_id; | ||
367 | #define MAX_CGROUP_TYPE_NAMELEN 32 | 398 | #define MAX_CGROUP_TYPE_NAMELEN 32 |
368 | const char *name; | 399 | const char *name; |
369 | 400 | ||
@@ -386,6 +417,9 @@ struct cgroup_subsys { | |||
386 | */ | 417 | */ |
387 | struct cgroupfs_root *root; | 418 | struct cgroupfs_root *root; |
388 | struct list_head sibling; | 419 | struct list_head sibling; |
420 | /* used when use_id == true */ | ||
421 | struct idr idr; | ||
422 | spinlock_t id_lock; | ||
389 | }; | 423 | }; |
390 | 424 | ||
391 | #define SUBSYS(_x) extern struct cgroup_subsys _x ## _subsys; | 425 | #define SUBSYS(_x) extern struct cgroup_subsys _x ## _subsys; |
@@ -419,7 +453,8 @@ struct cgroup_iter { | |||
419 | struct list_head *task; | 453 | struct list_head *task; |
420 | }; | 454 | }; |
421 | 455 | ||
422 | /* To iterate across the tasks in a cgroup: | 456 | /* |
457 | * To iterate across the tasks in a cgroup: | ||
423 | * | 458 | * |
424 | * 1) call cgroup_iter_start to intialize an iterator | 459 | * 1) call cgroup_iter_start to intialize an iterator |
425 | * | 460 | * |
@@ -428,9 +463,10 @@ struct cgroup_iter { | |||
428 | * | 463 | * |
429 | * 3) call cgroup_iter_end() to destroy the iterator. | 464 | * 3) call cgroup_iter_end() to destroy the iterator. |
430 | * | 465 | * |
431 | * Or, call cgroup_scan_tasks() to iterate through every task in a cpuset. | 466 | * Or, call cgroup_scan_tasks() to iterate through every task in a |
432 | * - cgroup_scan_tasks() holds the css_set_lock when calling the test_task() | 467 | * cgroup - cgroup_scan_tasks() holds the css_set_lock when calling |
433 | * callback, but not while calling the process_task() callback. | 468 | * the test_task() callback, but not while calling the process_task() |
469 | * callback. | ||
434 | */ | 470 | */ |
435 | void cgroup_iter_start(struct cgroup *cgrp, struct cgroup_iter *it); | 471 | void cgroup_iter_start(struct cgroup *cgrp, struct cgroup_iter *it); |
436 | struct task_struct *cgroup_iter_next(struct cgroup *cgrp, | 472 | struct task_struct *cgroup_iter_next(struct cgroup *cgrp, |
@@ -439,6 +475,44 @@ void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it); | |||
439 | int cgroup_scan_tasks(struct cgroup_scanner *scan); | 475 | int cgroup_scan_tasks(struct cgroup_scanner *scan); |
440 | int cgroup_attach_task(struct cgroup *, struct task_struct *); | 476 | int cgroup_attach_task(struct cgroup *, struct task_struct *); |
441 | 477 | ||
478 | /* | ||
479 | * CSS ID is ID for cgroup_subsys_state structs under subsys. This only works | ||
480 | * if cgroup_subsys.use_id == true. It can be used for looking up and scanning. | ||
481 | * CSS ID is assigned at cgroup allocation (create) automatically | ||
482 | * and removed when subsys calls free_css_id() function. This is because | ||
483 | * the lifetime of cgroup_subsys_state is subsys's matter. | ||
484 | * | ||
485 | * Looking up and scanning function should be called under rcu_read_lock(). | ||
486 | * Taking cgroup_mutex()/hierarchy_mutex() is not necessary for following calls. | ||
487 | * But the css returned by this routine can be "not populated yet" or "being | ||
488 | * destroyed". The caller should check css and cgroup's status. | ||
489 | */ | ||
490 | |||
491 | /* | ||
492 | * Typically Called at ->destroy(), or somewhere the subsys frees | ||
493 | * cgroup_subsys_state. | ||
494 | */ | ||
495 | void free_css_id(struct cgroup_subsys *ss, struct cgroup_subsys_state *css); | ||
496 | |||
497 | /* Find a cgroup_subsys_state which has given ID */ | ||
498 | |||
499 | struct cgroup_subsys_state *css_lookup(struct cgroup_subsys *ss, int id); | ||
500 | |||
501 | /* | ||
502 | * Get a cgroup whose id is greater than or equal to id under tree of root. | ||
503 | * Returning a cgroup_subsys_state or NULL. | ||
504 | */ | ||
505 | struct cgroup_subsys_state *css_get_next(struct cgroup_subsys *ss, int id, | ||
506 | struct cgroup_subsys_state *root, int *foundid); | ||
507 | |||
508 | /* Returns true if root is ancestor of cg */ | ||
509 | bool css_is_ancestor(struct cgroup_subsys_state *cg, | ||
510 | const struct cgroup_subsys_state *root); | ||
511 | |||
512 | /* Get id and depth of css */ | ||
513 | unsigned short css_id(struct cgroup_subsys_state *css); | ||
514 | unsigned short css_depth(struct cgroup_subsys_state *css); | ||
515 | |||
442 | #else /* !CONFIG_CGROUPS */ | 516 | #else /* !CONFIG_CGROUPS */ |
443 | 517 | ||
444 | static inline int cgroup_init_early(void) { return 0; } | 518 | static inline int cgroup_init_early(void) { return 0; } |
diff --git a/include/linux/compat.h b/include/linux/compat.h index b880864672de..f2ded21f9a3c 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h | |||
@@ -191,6 +191,12 @@ asmlinkage ssize_t compat_sys_readv(unsigned long fd, | |||
191 | const struct compat_iovec __user *vec, unsigned long vlen); | 191 | const struct compat_iovec __user *vec, unsigned long vlen); |
192 | asmlinkage ssize_t compat_sys_writev(unsigned long fd, | 192 | asmlinkage ssize_t compat_sys_writev(unsigned long fd, |
193 | const struct compat_iovec __user *vec, unsigned long vlen); | 193 | const struct compat_iovec __user *vec, unsigned long vlen); |
194 | asmlinkage ssize_t compat_sys_preadv(unsigned long fd, | ||
195 | const struct compat_iovec __user *vec, | ||
196 | unsigned long vlen, u32 pos_low, u32 pos_high); | ||
197 | asmlinkage ssize_t compat_sys_pwritev(unsigned long fd, | ||
198 | const struct compat_iovec __user *vec, | ||
199 | unsigned long vlen, u32 pos_low, u32 pos_high); | ||
194 | 200 | ||
195 | int compat_do_execve(char * filename, compat_uptr_t __user *argv, | 201 | int compat_do_execve(char * filename, compat_uptr_t __user *argv, |
196 | compat_uptr_t __user *envp, struct pt_regs * regs); | 202 | compat_uptr_t __user *envp, struct pt_regs * regs); |
diff --git a/include/linux/cpu.h b/include/linux/cpu.h index c2747ac2ae43..2643d848df90 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h | |||
@@ -23,7 +23,6 @@ | |||
23 | #include <linux/node.h> | 23 | #include <linux/node.h> |
24 | #include <linux/compiler.h> | 24 | #include <linux/compiler.h> |
25 | #include <linux/cpumask.h> | 25 | #include <linux/cpumask.h> |
26 | #include <linux/mutex.h> | ||
27 | 26 | ||
28 | struct cpu { | 27 | struct cpu { |
29 | int node_id; /* The node which contains the CPU */ | 28 | int node_id; /* The node which contains the CPU */ |
@@ -103,16 +102,6 @@ extern struct sysdev_class cpu_sysdev_class; | |||
103 | #ifdef CONFIG_HOTPLUG_CPU | 102 | #ifdef CONFIG_HOTPLUG_CPU |
104 | /* Stop CPUs going up and down. */ | 103 | /* Stop CPUs going up and down. */ |
105 | 104 | ||
106 | static inline void cpuhotplug_mutex_lock(struct mutex *cpu_hp_mutex) | ||
107 | { | ||
108 | mutex_lock(cpu_hp_mutex); | ||
109 | } | ||
110 | |||
111 | static inline void cpuhotplug_mutex_unlock(struct mutex *cpu_hp_mutex) | ||
112 | { | ||
113 | mutex_unlock(cpu_hp_mutex); | ||
114 | } | ||
115 | |||
116 | extern void get_online_cpus(void); | 105 | extern void get_online_cpus(void); |
117 | extern void put_online_cpus(void); | 106 | extern void put_online_cpus(void); |
118 | #define hotcpu_notifier(fn, pri) { \ | 107 | #define hotcpu_notifier(fn, pri) { \ |
@@ -126,11 +115,6 @@ int cpu_down(unsigned int cpu); | |||
126 | 115 | ||
127 | #else /* CONFIG_HOTPLUG_CPU */ | 116 | #else /* CONFIG_HOTPLUG_CPU */ |
128 | 117 | ||
129 | static inline void cpuhotplug_mutex_lock(struct mutex *cpu_hp_mutex) | ||
130 | { } | ||
131 | static inline void cpuhotplug_mutex_unlock(struct mutex *cpu_hp_mutex) | ||
132 | { } | ||
133 | |||
134 | #define get_online_cpus() do { } while (0) | 118 | #define get_online_cpus() do { } while (0) |
135 | #define put_online_cpus() do { } while (0) | 119 | #define put_online_cpus() do { } while (0) |
136 | #define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0) | 120 | #define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0) |
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index 2e0d79678deb..05ea1dd7d681 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/cpumask.h> | 12 | #include <linux/cpumask.h> |
13 | #include <linux/nodemask.h> | 13 | #include <linux/nodemask.h> |
14 | #include <linux/cgroup.h> | 14 | #include <linux/cgroup.h> |
15 | #include <linux/mm.h> | ||
15 | 16 | ||
16 | #ifdef CONFIG_CPUSETS | 17 | #ifdef CONFIG_CPUSETS |
17 | 18 | ||
@@ -29,19 +30,29 @@ void cpuset_init_current_mems_allowed(void); | |||
29 | void cpuset_update_task_memory_state(void); | 30 | void cpuset_update_task_memory_state(void); |
30 | int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask); | 31 | int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask); |
31 | 32 | ||
32 | extern int __cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask); | 33 | extern int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask); |
33 | extern int __cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask); | 34 | extern int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask); |
34 | 35 | ||
35 | static int inline cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) | 36 | static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) |
36 | { | 37 | { |
37 | return number_of_cpusets <= 1 || | 38 | return number_of_cpusets <= 1 || |
38 | __cpuset_zone_allowed_softwall(z, gfp_mask); | 39 | __cpuset_node_allowed_softwall(node, gfp_mask); |
39 | } | 40 | } |
40 | 41 | ||
41 | static int inline cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask) | 42 | static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask) |
42 | { | 43 | { |
43 | return number_of_cpusets <= 1 || | 44 | return number_of_cpusets <= 1 || |
44 | __cpuset_zone_allowed_hardwall(z, gfp_mask); | 45 | __cpuset_node_allowed_hardwall(node, gfp_mask); |
46 | } | ||
47 | |||
48 | static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) | ||
49 | { | ||
50 | return cpuset_node_allowed_softwall(zone_to_nid(z), gfp_mask); | ||
51 | } | ||
52 | |||
53 | static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask) | ||
54 | { | ||
55 | return cpuset_node_allowed_hardwall(zone_to_nid(z), gfp_mask); | ||
45 | } | 56 | } |
46 | 57 | ||
47 | extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, | 58 | extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, |
@@ -112,6 +123,16 @@ static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask) | |||
112 | return 1; | 123 | return 1; |
113 | } | 124 | } |
114 | 125 | ||
126 | static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) | ||
127 | { | ||
128 | return 1; | ||
129 | } | ||
130 | |||
131 | static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask) | ||
132 | { | ||
133 | return 1; | ||
134 | } | ||
135 | |||
115 | static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) | 136 | static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) |
116 | { | 137 | { |
117 | return 1; | 138 | return 1; |
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 8209e08969f9..66ec05a57955 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h | |||
@@ -139,6 +139,9 @@ struct target_type { | |||
139 | dm_ioctl_fn ioctl; | 139 | dm_ioctl_fn ioctl; |
140 | dm_merge_fn merge; | 140 | dm_merge_fn merge; |
141 | dm_busy_fn busy; | 141 | dm_busy_fn busy; |
142 | |||
143 | /* For internal device-mapper use. */ | ||
144 | struct list_head list; | ||
142 | }; | 145 | }; |
143 | 146 | ||
144 | struct io_restrictions { | 147 | struct io_restrictions { |
diff --git a/include/linux/dm-dirty-log.h b/include/linux/dm-dirty-log.h index 600c5fb2daad..5e8b11d88f6f 100644 --- a/include/linux/dm-dirty-log.h +++ b/include/linux/dm-dirty-log.h | |||
@@ -28,6 +28,9 @@ struct dm_dirty_log_type { | |||
28 | const char *name; | 28 | const char *name; |
29 | struct module *module; | 29 | struct module *module; |
30 | 30 | ||
31 | /* For internal device-mapper use */ | ||
32 | struct list_head list; | ||
33 | |||
31 | int (*ctr)(struct dm_dirty_log *log, struct dm_target *ti, | 34 | int (*ctr)(struct dm_dirty_log *log, struct dm_target *ti, |
32 | unsigned argc, char **argv); | 35 | unsigned argc, char **argv); |
33 | void (*dtr)(struct dm_dirty_log *log); | 36 | void (*dtr)(struct dm_dirty_log *log); |
@@ -113,6 +116,16 @@ struct dm_dirty_log_type { | |||
113 | */ | 116 | */ |
114 | int (*status)(struct dm_dirty_log *log, status_type_t status_type, | 117 | int (*status)(struct dm_dirty_log *log, status_type_t status_type, |
115 | char *result, unsigned maxlen); | 118 | char *result, unsigned maxlen); |
119 | |||
120 | /* | ||
121 | * is_remote_recovering is necessary for cluster mirroring. It provides | ||
122 | * a way to detect recovery on another node, so we aren't writing | ||
123 | * concurrently. This function is likely to block (when a cluster log | ||
124 | * is used). | ||
125 | * | ||
126 | * Returns: 0, 1 | ||
127 | */ | ||
128 | int (*is_remote_recovering)(struct dm_dirty_log *log, region_t region); | ||
116 | }; | 129 | }; |
117 | 130 | ||
118 | int dm_dirty_log_type_register(struct dm_dirty_log_type *type); | 131 | int dm_dirty_log_type_register(struct dm_dirty_log_type *type); |
diff --git a/include/linux/dma_remapping.h b/include/linux/dma_remapping.h index af1dab41674b..1a455f1f86d7 100644 --- a/include/linux/dma_remapping.h +++ b/include/linux/dma_remapping.h | |||
@@ -11,6 +11,7 @@ | |||
11 | 11 | ||
12 | #define DMA_PTE_READ (1) | 12 | #define DMA_PTE_READ (1) |
13 | #define DMA_PTE_WRITE (2) | 13 | #define DMA_PTE_WRITE (2) |
14 | #define DMA_PTE_SNP (1 << 11) | ||
14 | 15 | ||
15 | struct intel_iommu; | 16 | struct intel_iommu; |
16 | struct dmar_domain; | 17 | struct dmar_domain; |
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 1956c8d46d32..2e2aa3df170c 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h | |||
@@ -23,9 +23,6 @@ | |||
23 | 23 | ||
24 | #include <linux/device.h> | 24 | #include <linux/device.h> |
25 | #include <linux/uio.h> | 25 | #include <linux/uio.h> |
26 | #include <linux/kref.h> | ||
27 | #include <linux/completion.h> | ||
28 | #include <linux/rcupdate.h> | ||
29 | #include <linux/dma-mapping.h> | 26 | #include <linux/dma-mapping.h> |
30 | 27 | ||
31 | /** | 28 | /** |
@@ -205,6 +202,7 @@ struct dma_async_tx_descriptor { | |||
205 | /** | 202 | /** |
206 | * struct dma_device - info on the entity supplying DMA services | 203 | * struct dma_device - info on the entity supplying DMA services |
207 | * @chancnt: how many DMA channels are supported | 204 | * @chancnt: how many DMA channels are supported |
205 | * @privatecnt: how many DMA channels are requested by dma_request_channel | ||
208 | * @channels: the list of struct dma_chan | 206 | * @channels: the list of struct dma_chan |
209 | * @global_node: list_head for global dma_device_list | 207 | * @global_node: list_head for global dma_device_list |
210 | * @cap_mask: one or more dma_capability flags | 208 | * @cap_mask: one or more dma_capability flags |
@@ -227,6 +225,7 @@ struct dma_async_tx_descriptor { | |||
227 | struct dma_device { | 225 | struct dma_device { |
228 | 226 | ||
229 | unsigned int chancnt; | 227 | unsigned int chancnt; |
228 | unsigned int privatecnt; | ||
230 | struct list_head channels; | 229 | struct list_head channels; |
231 | struct list_head global_node; | 230 | struct list_head global_node; |
232 | dma_cap_mask_t cap_mask; | 231 | dma_cap_mask_t cap_mask; |
@@ -291,6 +290,24 @@ static inline void net_dmaengine_put(void) | |||
291 | } | 290 | } |
292 | #endif | 291 | #endif |
293 | 292 | ||
293 | #ifdef CONFIG_ASYNC_TX_DMA | ||
294 | #define async_dmaengine_get() dmaengine_get() | ||
295 | #define async_dmaengine_put() dmaengine_put() | ||
296 | #define async_dma_find_channel(type) dma_find_channel(type) | ||
297 | #else | ||
298 | static inline void async_dmaengine_get(void) | ||
299 | { | ||
300 | } | ||
301 | static inline void async_dmaengine_put(void) | ||
302 | { | ||
303 | } | ||
304 | static inline struct dma_chan * | ||
305 | async_dma_find_channel(enum dma_transaction_type type) | ||
306 | { | ||
307 | return NULL; | ||
308 | } | ||
309 | #endif | ||
310 | |||
294 | dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan, | 311 | dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan, |
295 | void *dest, void *src, size_t len); | 312 | void *dest, void *src, size_t len); |
296 | dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan, | 313 | dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan, |
@@ -337,6 +354,13 @@ __dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp) | |||
337 | set_bit(tx_type, dstp->bits); | 354 | set_bit(tx_type, dstp->bits); |
338 | } | 355 | } |
339 | 356 | ||
357 | #define dma_cap_clear(tx, mask) __dma_cap_clear((tx), &(mask)) | ||
358 | static inline void | ||
359 | __dma_cap_clear(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp) | ||
360 | { | ||
361 | clear_bit(tx_type, dstp->bits); | ||
362 | } | ||
363 | |||
340 | #define dma_cap_zero(mask) __dma_cap_zero(&(mask)) | 364 | #define dma_cap_zero(mask) __dma_cap_zero(&(mask)) |
341 | static inline void __dma_cap_zero(dma_cap_mask_t *dstp) | 365 | static inline void __dma_cap_zero(dma_cap_mask_t *dstp) |
342 | { | 366 | { |
diff --git a/include/linux/dw_dmac.h b/include/linux/dw_dmac.h index d797dde247f7..c8aad713a046 100644 --- a/include/linux/dw_dmac.h +++ b/include/linux/dw_dmac.h | |||
@@ -74,4 +74,23 @@ struct dw_dma_slave { | |||
74 | #define DWC_CFGL_HS_DST_POL (1 << 18) /* dst handshake active low */ | 74 | #define DWC_CFGL_HS_DST_POL (1 << 18) /* dst handshake active low */ |
75 | #define DWC_CFGL_HS_SRC_POL (1 << 19) /* src handshake active low */ | 75 | #define DWC_CFGL_HS_SRC_POL (1 << 19) /* src handshake active low */ |
76 | 76 | ||
77 | /* DMA API extensions */ | ||
78 | struct dw_cyclic_desc { | ||
79 | struct dw_desc **desc; | ||
80 | unsigned long periods; | ||
81 | void (*period_callback)(void *param); | ||
82 | void *period_callback_param; | ||
83 | }; | ||
84 | |||
85 | struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, | ||
86 | dma_addr_t buf_addr, size_t buf_len, size_t period_len, | ||
87 | enum dma_data_direction direction); | ||
88 | void dw_dma_cyclic_free(struct dma_chan *chan); | ||
89 | int dw_dma_cyclic_start(struct dma_chan *chan); | ||
90 | void dw_dma_cyclic_stop(struct dma_chan *chan); | ||
91 | |||
92 | dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan); | ||
93 | |||
94 | dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan); | ||
95 | |||
77 | #endif /* DW_DMAC_H */ | 96 | #endif /* DW_DMAC_H */ |
diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h index dd495b8c3091..634a5e5aba3e 100644 --- a/include/linux/ext3_fs.h +++ b/include/linux/ext3_fs.h | |||
@@ -208,6 +208,7 @@ static inline __u32 ext3_mask_flags(umode_t mode, __u32 flags) | |||
208 | #define EXT3_STATE_JDATA 0x00000001 /* journaled data exists */ | 208 | #define EXT3_STATE_JDATA 0x00000001 /* journaled data exists */ |
209 | #define EXT3_STATE_NEW 0x00000002 /* inode is newly created */ | 209 | #define EXT3_STATE_NEW 0x00000002 /* inode is newly created */ |
210 | #define EXT3_STATE_XATTR 0x00000004 /* has in-inode xattrs */ | 210 | #define EXT3_STATE_XATTR 0x00000004 /* has in-inode xattrs */ |
211 | #define EXT3_STATE_FLUSH_ON_CLOSE 0x00000008 | ||
211 | 212 | ||
212 | /* Used to pass group descriptor data when online resize is done */ | 213 | /* Used to pass group descriptor data when online resize is done */ |
213 | struct ext3_new_group_input { | 214 | struct ext3_new_group_input { |
@@ -893,9 +894,8 @@ extern int ext3_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, | |||
893 | u64 start, u64 len); | 894 | u64 start, u64 len); |
894 | 895 | ||
895 | /* ioctl.c */ | 896 | /* ioctl.c */ |
896 | extern int ext3_ioctl (struct inode *, struct file *, unsigned int, | 897 | extern long ext3_ioctl(struct file *, unsigned int, unsigned long); |
897 | unsigned long); | 898 | extern long ext3_compat_ioctl(struct file *, unsigned int, unsigned long); |
898 | extern long ext3_compat_ioctl (struct file *, unsigned int, unsigned long); | ||
899 | 899 | ||
900 | /* namei.c */ | 900 | /* namei.c */ |
901 | extern int ext3_orphan_add(handle_t *, struct inode *); | 901 | extern int ext3_orphan_add(handle_t *, struct inode *); |
diff --git a/include/linux/fs.h b/include/linux/fs.h index 61211ad823fe..a09e17c8f5fd 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -1741,6 +1741,8 @@ extern void drop_collected_mounts(struct vfsmount *); | |||
1741 | 1741 | ||
1742 | extern int vfs_statfs(struct dentry *, struct kstatfs *); | 1742 | extern int vfs_statfs(struct dentry *, struct kstatfs *); |
1743 | 1743 | ||
1744 | extern int current_umask(void); | ||
1745 | |||
1744 | /* /sys/fs */ | 1746 | /* /sys/fs */ |
1745 | extern struct kobject *fs_kobj; | 1747 | extern struct kobject *fs_kobj; |
1746 | 1748 | ||
@@ -1885,6 +1887,18 @@ extern int fsync_super(struct super_block *); | |||
1885 | extern int fsync_no_super(struct block_device *); | 1887 | extern int fsync_no_super(struct block_device *); |
1886 | #else | 1888 | #else |
1887 | static inline void bd_forget(struct inode *inode) {} | 1889 | static inline void bd_forget(struct inode *inode) {} |
1890 | static inline int sync_blockdev(struct block_device *bdev) { return 0; } | ||
1891 | static inline void invalidate_bdev(struct block_device *bdev) {} | ||
1892 | |||
1893 | static inline struct super_block *freeze_bdev(struct block_device *sb) | ||
1894 | { | ||
1895 | return NULL; | ||
1896 | } | ||
1897 | |||
1898 | static inline int thaw_bdev(struct block_device *bdev, struct super_block *sb) | ||
1899 | { | ||
1900 | return 0; | ||
1901 | } | ||
1888 | #endif | 1902 | #endif |
1889 | extern const struct file_operations def_blk_fops; | 1903 | extern const struct file_operations def_blk_fops; |
1890 | extern const struct file_operations def_chr_fops; | 1904 | extern const struct file_operations def_chr_fops; |
diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h index 18b467dbe278..78a05bfcd8eb 100644 --- a/include/linux/fs_struct.h +++ b/include/linux/fs_struct.h | |||
@@ -4,12 +4,10 @@ | |||
4 | #include <linux/path.h> | 4 | #include <linux/path.h> |
5 | 5 | ||
6 | struct fs_struct { | 6 | struct fs_struct { |
7 | atomic_t count; /* This usage count is used by check_unsafe_exec() for | 7 | int users; |
8 | * security checking purposes - therefore it may not be | ||
9 | * incremented, except by clone(CLONE_FS). | ||
10 | */ | ||
11 | rwlock_t lock; | 8 | rwlock_t lock; |
12 | int umask; | 9 | int umask; |
10 | int in_exec; | ||
13 | struct path root, pwd; | 11 | struct path root, pwd; |
14 | }; | 12 | }; |
15 | 13 | ||
@@ -19,6 +17,8 @@ extern void exit_fs(struct task_struct *); | |||
19 | extern void set_fs_root(struct fs_struct *, struct path *); | 17 | extern void set_fs_root(struct fs_struct *, struct path *); |
20 | extern void set_fs_pwd(struct fs_struct *, struct path *); | 18 | extern void set_fs_pwd(struct fs_struct *, struct path *); |
21 | extern struct fs_struct *copy_fs_struct(struct fs_struct *); | 19 | extern struct fs_struct *copy_fs_struct(struct fs_struct *); |
22 | extern void put_fs_struct(struct fs_struct *); | 20 | extern void free_fs_struct(struct fs_struct *); |
21 | extern void daemonize_fs_struct(void); | ||
22 | extern int unshare_fs_struct(void); | ||
23 | 23 | ||
24 | #endif /* _LINUX_FS_STRUCT_H */ | 24 | #endif /* _LINUX_FS_STRUCT_H */ |
diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h new file mode 100644 index 000000000000..84d3532dd3ea --- /dev/null +++ b/include/linux/fscache-cache.h | |||
@@ -0,0 +1,505 @@ | |||
1 | /* General filesystem caching backing cache interface | ||
2 | * | ||
3 | * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | * | ||
11 | * NOTE!!! See: | ||
12 | * | ||
13 | * Documentation/filesystems/caching/backend-api.txt | ||
14 | * | ||
15 | * for a description of the cache backend interface declared here. | ||
16 | */ | ||
17 | |||
18 | #ifndef _LINUX_FSCACHE_CACHE_H | ||
19 | #define _LINUX_FSCACHE_CACHE_H | ||
20 | |||
21 | #include <linux/fscache.h> | ||
22 | #include <linux/sched.h> | ||
23 | #include <linux/slow-work.h> | ||
24 | |||
25 | #define NR_MAXCACHES BITS_PER_LONG | ||
26 | |||
27 | struct fscache_cache; | ||
28 | struct fscache_cache_ops; | ||
29 | struct fscache_object; | ||
30 | struct fscache_operation; | ||
31 | |||
32 | /* | ||
33 | * cache tag definition | ||
34 | */ | ||
35 | struct fscache_cache_tag { | ||
36 | struct list_head link; | ||
37 | struct fscache_cache *cache; /* cache referred to by this tag */ | ||
38 | unsigned long flags; | ||
39 | #define FSCACHE_TAG_RESERVED 0 /* T if tag is reserved for a cache */ | ||
40 | atomic_t usage; | ||
41 | char name[0]; /* tag name */ | ||
42 | }; | ||
43 | |||
44 | /* | ||
45 | * cache definition | ||
46 | */ | ||
47 | struct fscache_cache { | ||
48 | const struct fscache_cache_ops *ops; | ||
49 | struct fscache_cache_tag *tag; /* tag representing this cache */ | ||
50 | struct kobject *kobj; /* system representation of this cache */ | ||
51 | struct list_head link; /* link in list of caches */ | ||
52 | size_t max_index_size; /* maximum size of index data */ | ||
53 | char identifier[36]; /* cache label */ | ||
54 | |||
55 | /* node management */ | ||
56 | struct work_struct op_gc; /* operation garbage collector */ | ||
57 | struct list_head object_list; /* list of data/index objects */ | ||
58 | struct list_head op_gc_list; /* list of ops to be deleted */ | ||
59 | spinlock_t object_list_lock; | ||
60 | spinlock_t op_gc_list_lock; | ||
61 | atomic_t object_count; /* no. of live objects in this cache */ | ||
62 | struct fscache_object *fsdef; /* object for the fsdef index */ | ||
63 | unsigned long flags; | ||
64 | #define FSCACHE_IOERROR 0 /* cache stopped on I/O error */ | ||
65 | #define FSCACHE_CACHE_WITHDRAWN 1 /* cache has been withdrawn */ | ||
66 | }; | ||
67 | |||
68 | extern wait_queue_head_t fscache_cache_cleared_wq; | ||
69 | |||
70 | /* | ||
71 | * operation to be applied to a cache object | ||
72 | * - retrieval initiation operations are done in the context of the process | ||
73 | * that issued them, and not in an async thread pool | ||
74 | */ | ||
75 | typedef void (*fscache_operation_release_t)(struct fscache_operation *op); | ||
76 | typedef void (*fscache_operation_processor_t)(struct fscache_operation *op); | ||
77 | |||
78 | struct fscache_operation { | ||
79 | union { | ||
80 | struct work_struct fast_work; /* record for fast ops */ | ||
81 | struct slow_work slow_work; /* record for (very) slow ops */ | ||
82 | }; | ||
83 | struct list_head pend_link; /* link in object->pending_ops */ | ||
84 | struct fscache_object *object; /* object to be operated upon */ | ||
85 | |||
86 | unsigned long flags; | ||
87 | #define FSCACHE_OP_TYPE 0x000f /* operation type */ | ||
88 | #define FSCACHE_OP_FAST 0x0001 /* - fast op, processor may not sleep for disk */ | ||
89 | #define FSCACHE_OP_SLOW 0x0002 /* - (very) slow op, processor may sleep for disk */ | ||
90 | #define FSCACHE_OP_MYTHREAD 0x0003 /* - processing is done be issuing thread, not pool */ | ||
91 | #define FSCACHE_OP_WAITING 4 /* cleared when op is woken */ | ||
92 | #define FSCACHE_OP_EXCLUSIVE 5 /* exclusive op, other ops must wait */ | ||
93 | #define FSCACHE_OP_DEAD 6 /* op is now dead */ | ||
94 | |||
95 | atomic_t usage; | ||
96 | unsigned debug_id; /* debugging ID */ | ||
97 | |||
98 | /* operation processor callback | ||
99 | * - can be NULL if FSCACHE_OP_WAITING is going to be used to perform | ||
100 | * the op in a non-pool thread */ | ||
101 | fscache_operation_processor_t processor; | ||
102 | |||
103 | /* operation releaser */ | ||
104 | fscache_operation_release_t release; | ||
105 | }; | ||
106 | |||
107 | extern atomic_t fscache_op_debug_id; | ||
108 | extern const struct slow_work_ops fscache_op_slow_work_ops; | ||
109 | |||
110 | extern void fscache_enqueue_operation(struct fscache_operation *); | ||
111 | extern void fscache_put_operation(struct fscache_operation *); | ||
112 | |||
113 | /** | ||
114 | * fscache_operation_init - Do basic initialisation of an operation | ||
115 | * @op: The operation to initialise | ||
116 | * @release: The release function to assign | ||
117 | * | ||
118 | * Do basic initialisation of an operation. The caller must still set flags, | ||
119 | * object, either fast_work or slow_work if necessary, and processor if needed. | ||
120 | */ | ||
121 | static inline void fscache_operation_init(struct fscache_operation *op, | ||
122 | fscache_operation_release_t release) | ||
123 | { | ||
124 | atomic_set(&op->usage, 1); | ||
125 | op->debug_id = atomic_inc_return(&fscache_op_debug_id); | ||
126 | op->release = release; | ||
127 | INIT_LIST_HEAD(&op->pend_link); | ||
128 | } | ||
129 | |||
130 | /** | ||
131 | * fscache_operation_init_slow - Do additional initialisation of a slow op | ||
132 | * @op: The operation to initialise | ||
133 | * @processor: The processor function to assign | ||
134 | * | ||
135 | * Do additional initialisation of an operation as required for slow work. | ||
136 | */ | ||
137 | static inline | ||
138 | void fscache_operation_init_slow(struct fscache_operation *op, | ||
139 | fscache_operation_processor_t processor) | ||
140 | { | ||
141 | op->processor = processor; | ||
142 | slow_work_init(&op->slow_work, &fscache_op_slow_work_ops); | ||
143 | } | ||
144 | |||
145 | /* | ||
146 | * data read operation | ||
147 | */ | ||
148 | struct fscache_retrieval { | ||
149 | struct fscache_operation op; | ||
150 | struct address_space *mapping; /* netfs pages */ | ||
151 | fscache_rw_complete_t end_io_func; /* function to call on I/O completion */ | ||
152 | void *context; /* netfs read context (pinned) */ | ||
153 | struct list_head to_do; /* list of things to be done by the backend */ | ||
154 | unsigned long start_time; /* time at which retrieval started */ | ||
155 | }; | ||
156 | |||
157 | typedef int (*fscache_page_retrieval_func_t)(struct fscache_retrieval *op, | ||
158 | struct page *page, | ||
159 | gfp_t gfp); | ||
160 | |||
161 | typedef int (*fscache_pages_retrieval_func_t)(struct fscache_retrieval *op, | ||
162 | struct list_head *pages, | ||
163 | unsigned *nr_pages, | ||
164 | gfp_t gfp); | ||
165 | |||
166 | /** | ||
167 | * fscache_get_retrieval - Get an extra reference on a retrieval operation | ||
168 | * @op: The retrieval operation to get a reference on | ||
169 | * | ||
170 | * Get an extra reference on a retrieval operation. | ||
171 | */ | ||
172 | static inline | ||
173 | struct fscache_retrieval *fscache_get_retrieval(struct fscache_retrieval *op) | ||
174 | { | ||
175 | atomic_inc(&op->op.usage); | ||
176 | return op; | ||
177 | } | ||
178 | |||
179 | /** | ||
180 | * fscache_enqueue_retrieval - Enqueue a retrieval operation for processing | ||
181 | * @op: The retrieval operation affected | ||
182 | * | ||
183 | * Enqueue a retrieval operation for processing by the FS-Cache thread pool. | ||
184 | */ | ||
185 | static inline void fscache_enqueue_retrieval(struct fscache_retrieval *op) | ||
186 | { | ||
187 | fscache_enqueue_operation(&op->op); | ||
188 | } | ||
189 | |||
190 | /** | ||
191 | * fscache_put_retrieval - Drop a reference to a retrieval operation | ||
192 | * @op: The retrieval operation affected | ||
193 | * | ||
194 | * Drop a reference to a retrieval operation. | ||
195 | */ | ||
196 | static inline void fscache_put_retrieval(struct fscache_retrieval *op) | ||
197 | { | ||
198 | fscache_put_operation(&op->op); | ||
199 | } | ||
200 | |||
201 | /* | ||
202 | * cached page storage work item | ||
203 | * - used to do three things: | ||
204 | * - batch writes to the cache | ||
205 | * - do cache writes asynchronously | ||
206 | * - defer writes until cache object lookup completion | ||
207 | */ | ||
208 | struct fscache_storage { | ||
209 | struct fscache_operation op; | ||
210 | pgoff_t store_limit; /* don't write more than this */ | ||
211 | }; | ||
212 | |||
213 | /* | ||
214 | * cache operations | ||
215 | */ | ||
216 | struct fscache_cache_ops { | ||
217 | /* name of cache provider */ | ||
218 | const char *name; | ||
219 | |||
220 | /* allocate an object record for a cookie */ | ||
221 | struct fscache_object *(*alloc_object)(struct fscache_cache *cache, | ||
222 | struct fscache_cookie *cookie); | ||
223 | |||
224 | /* look up the object for a cookie */ | ||
225 | void (*lookup_object)(struct fscache_object *object); | ||
226 | |||
227 | /* finished looking up */ | ||
228 | void (*lookup_complete)(struct fscache_object *object); | ||
229 | |||
230 | /* increment the usage count on this object (may fail if unmounting) */ | ||
231 | struct fscache_object *(*grab_object)(struct fscache_object *object); | ||
232 | |||
233 | /* pin an object in the cache */ | ||
234 | int (*pin_object)(struct fscache_object *object); | ||
235 | |||
236 | /* unpin an object in the cache */ | ||
237 | void (*unpin_object)(struct fscache_object *object); | ||
238 | |||
239 | /* store the updated auxilliary data on an object */ | ||
240 | void (*update_object)(struct fscache_object *object); | ||
241 | |||
242 | /* discard the resources pinned by an object and effect retirement if | ||
243 | * necessary */ | ||
244 | void (*drop_object)(struct fscache_object *object); | ||
245 | |||
246 | /* dispose of a reference to an object */ | ||
247 | void (*put_object)(struct fscache_object *object); | ||
248 | |||
249 | /* sync a cache */ | ||
250 | void (*sync_cache)(struct fscache_cache *cache); | ||
251 | |||
252 | /* notification that the attributes of a non-index object (such as | ||
253 | * i_size) have changed */ | ||
254 | int (*attr_changed)(struct fscache_object *object); | ||
255 | |||
256 | /* reserve space for an object's data and associated metadata */ | ||
257 | int (*reserve_space)(struct fscache_object *object, loff_t i_size); | ||
258 | |||
259 | /* request a backing block for a page be read or allocated in the | ||
260 | * cache */ | ||
261 | fscache_page_retrieval_func_t read_or_alloc_page; | ||
262 | |||
263 | /* request backing blocks for a list of pages be read or allocated in | ||
264 | * the cache */ | ||
265 | fscache_pages_retrieval_func_t read_or_alloc_pages; | ||
266 | |||
267 | /* request a backing block for a page be allocated in the cache so that | ||
268 | * it can be written directly */ | ||
269 | fscache_page_retrieval_func_t allocate_page; | ||
270 | |||
271 | /* request backing blocks for pages be allocated in the cache so that | ||
272 | * they can be written directly */ | ||
273 | fscache_pages_retrieval_func_t allocate_pages; | ||
274 | |||
275 | /* write a page to its backing block in the cache */ | ||
276 | int (*write_page)(struct fscache_storage *op, struct page *page); | ||
277 | |||
278 | /* detach backing block from a page (optional) | ||
279 | * - must release the cookie lock before returning | ||
280 | * - may sleep | ||
281 | */ | ||
282 | void (*uncache_page)(struct fscache_object *object, | ||
283 | struct page *page); | ||
284 | |||
285 | /* dissociate a cache from all the pages it was backing */ | ||
286 | void (*dissociate_pages)(struct fscache_cache *cache); | ||
287 | }; | ||
288 | |||
289 | /* | ||
290 | * data file or index object cookie | ||
291 | * - a file will only appear in one cache | ||
292 | * - a request to cache a file may or may not be honoured, subject to | ||
293 | * constraints such as disk space | ||
294 | * - indices are created on disk just-in-time | ||
295 | */ | ||
296 | struct fscache_cookie { | ||
297 | atomic_t usage; /* number of users of this cookie */ | ||
298 | atomic_t n_children; /* number of children of this cookie */ | ||
299 | spinlock_t lock; | ||
300 | struct hlist_head backing_objects; /* object(s) backing this file/index */ | ||
301 | const struct fscache_cookie_def *def; /* definition */ | ||
302 | struct fscache_cookie *parent; /* parent of this entry */ | ||
303 | void *netfs_data; /* back pointer to netfs */ | ||
304 | struct radix_tree_root stores; /* pages to be stored on this cookie */ | ||
305 | #define FSCACHE_COOKIE_PENDING_TAG 0 /* pages tag: pending write to cache */ | ||
306 | |||
307 | unsigned long flags; | ||
308 | #define FSCACHE_COOKIE_LOOKING_UP 0 /* T if non-index cookie being looked up still */ | ||
309 | #define FSCACHE_COOKIE_CREATING 1 /* T if non-index object being created still */ | ||
310 | #define FSCACHE_COOKIE_NO_DATA_YET 2 /* T if new object with no cached data yet */ | ||
311 | #define FSCACHE_COOKIE_PENDING_FILL 3 /* T if pending initial fill on object */ | ||
312 | #define FSCACHE_COOKIE_FILLING 4 /* T if filling object incrementally */ | ||
313 | #define FSCACHE_COOKIE_UNAVAILABLE 5 /* T if cookie is unavailable (error, etc) */ | ||
314 | }; | ||
315 | |||
316 | extern struct fscache_cookie fscache_fsdef_index; | ||
317 | |||
318 | /* | ||
319 | * on-disk cache file or index handle | ||
320 | */ | ||
321 | struct fscache_object { | ||
322 | enum fscache_object_state { | ||
323 | FSCACHE_OBJECT_INIT, /* object in initial unbound state */ | ||
324 | FSCACHE_OBJECT_LOOKING_UP, /* looking up object */ | ||
325 | FSCACHE_OBJECT_CREATING, /* creating object */ | ||
326 | |||
327 | /* active states */ | ||
328 | FSCACHE_OBJECT_AVAILABLE, /* cleaning up object after creation */ | ||
329 | FSCACHE_OBJECT_ACTIVE, /* object is usable */ | ||
330 | FSCACHE_OBJECT_UPDATING, /* object is updating */ | ||
331 | |||
332 | /* terminal states */ | ||
333 | FSCACHE_OBJECT_DYING, /* object waiting for accessors to finish */ | ||
334 | FSCACHE_OBJECT_LC_DYING, /* object cleaning up after lookup/create */ | ||
335 | FSCACHE_OBJECT_ABORT_INIT, /* abort the init state */ | ||
336 | FSCACHE_OBJECT_RELEASING, /* releasing object */ | ||
337 | FSCACHE_OBJECT_RECYCLING, /* retiring object */ | ||
338 | FSCACHE_OBJECT_WITHDRAWING, /* withdrawing object */ | ||
339 | FSCACHE_OBJECT_DEAD, /* object is now dead */ | ||
340 | } state; | ||
341 | |||
342 | int debug_id; /* debugging ID */ | ||
343 | int n_children; /* number of child objects */ | ||
344 | int n_ops; /* number of ops outstanding on object */ | ||
345 | int n_obj_ops; /* number of object ops outstanding on object */ | ||
346 | int n_in_progress; /* number of ops in progress */ | ||
347 | int n_exclusive; /* number of exclusive ops queued */ | ||
348 | spinlock_t lock; /* state and operations lock */ | ||
349 | |||
350 | unsigned long lookup_jif; /* time at which lookup started */ | ||
351 | unsigned long event_mask; /* events this object is interested in */ | ||
352 | unsigned long events; /* events to be processed by this object | ||
353 | * (order is important - using fls) */ | ||
354 | #define FSCACHE_OBJECT_EV_REQUEUE 0 /* T if object should be requeued */ | ||
355 | #define FSCACHE_OBJECT_EV_UPDATE 1 /* T if object should be updated */ | ||
356 | #define FSCACHE_OBJECT_EV_CLEARED 2 /* T if accessors all gone */ | ||
357 | #define FSCACHE_OBJECT_EV_ERROR 3 /* T if fatal error occurred during processing */ | ||
358 | #define FSCACHE_OBJECT_EV_RELEASE 4 /* T if netfs requested object release */ | ||
359 | #define FSCACHE_OBJECT_EV_RETIRE 5 /* T if netfs requested object retirement */ | ||
360 | #define FSCACHE_OBJECT_EV_WITHDRAW 6 /* T if cache requested object withdrawal */ | ||
361 | |||
362 | unsigned long flags; | ||
363 | #define FSCACHE_OBJECT_LOCK 0 /* T if object is busy being processed */ | ||
364 | #define FSCACHE_OBJECT_PENDING_WRITE 1 /* T if object has pending write */ | ||
365 | #define FSCACHE_OBJECT_WAITING 2 /* T if object is waiting on its parent */ | ||
366 | |||
367 | struct list_head cache_link; /* link in cache->object_list */ | ||
368 | struct hlist_node cookie_link; /* link in cookie->backing_objects */ | ||
369 | struct fscache_cache *cache; /* cache that supplied this object */ | ||
370 | struct fscache_cookie *cookie; /* netfs's file/index object */ | ||
371 | struct fscache_object *parent; /* parent object */ | ||
372 | struct slow_work work; /* attention scheduling record */ | ||
373 | struct list_head dependents; /* FIFO of dependent objects */ | ||
374 | struct list_head dep_link; /* link in parent's dependents list */ | ||
375 | struct list_head pending_ops; /* unstarted operations on this object */ | ||
376 | pgoff_t store_limit; /* current storage limit */ | ||
377 | }; | ||
378 | |||
379 | extern const char *fscache_object_states[]; | ||
380 | |||
381 | #define fscache_object_is_active(obj) \ | ||
382 | (!test_bit(FSCACHE_IOERROR, &(obj)->cache->flags) && \ | ||
383 | (obj)->state >= FSCACHE_OBJECT_AVAILABLE && \ | ||
384 | (obj)->state < FSCACHE_OBJECT_DYING) | ||
385 | |||
386 | extern const struct slow_work_ops fscache_object_slow_work_ops; | ||
387 | |||
388 | /** | ||
389 | * fscache_object_init - Initialise a cache object description | ||
390 | * @object: Object description | ||
391 | * | ||
392 | * Initialise a cache object description to its basic values. | ||
393 | * | ||
394 | * See Documentation/filesystems/caching/backend-api.txt for a complete | ||
395 | * description. | ||
396 | */ | ||
397 | static inline | ||
398 | void fscache_object_init(struct fscache_object *object, | ||
399 | struct fscache_cookie *cookie, | ||
400 | struct fscache_cache *cache) | ||
401 | { | ||
402 | atomic_inc(&cache->object_count); | ||
403 | |||
404 | object->state = FSCACHE_OBJECT_INIT; | ||
405 | spin_lock_init(&object->lock); | ||
406 | INIT_LIST_HEAD(&object->cache_link); | ||
407 | INIT_HLIST_NODE(&object->cookie_link); | ||
408 | vslow_work_init(&object->work, &fscache_object_slow_work_ops); | ||
409 | INIT_LIST_HEAD(&object->dependents); | ||
410 | INIT_LIST_HEAD(&object->dep_link); | ||
411 | INIT_LIST_HEAD(&object->pending_ops); | ||
412 | object->n_children = 0; | ||
413 | object->n_ops = object->n_in_progress = object->n_exclusive = 0; | ||
414 | object->events = object->event_mask = 0; | ||
415 | object->flags = 0; | ||
416 | object->store_limit = 0; | ||
417 | object->cache = cache; | ||
418 | object->cookie = cookie; | ||
419 | object->parent = NULL; | ||
420 | } | ||
421 | |||
422 | extern void fscache_object_lookup_negative(struct fscache_object *object); | ||
423 | extern void fscache_obtained_object(struct fscache_object *object); | ||
424 | |||
425 | /** | ||
426 | * fscache_object_destroyed - Note destruction of an object in a cache | ||
427 | * @cache: The cache from which the object came | ||
428 | * | ||
429 | * Note the destruction and deallocation of an object record in a cache. | ||
430 | */ | ||
431 | static inline void fscache_object_destroyed(struct fscache_cache *cache) | ||
432 | { | ||
433 | if (atomic_dec_and_test(&cache->object_count)) | ||
434 | wake_up_all(&fscache_cache_cleared_wq); | ||
435 | } | ||
436 | |||
437 | /** | ||
438 | * fscache_object_lookup_error - Note an object encountered an error | ||
439 | * @object: The object on which the error was encountered | ||
440 | * | ||
441 | * Note that an object encountered a fatal error (usually an I/O error) and | ||
442 | * that it should be withdrawn as soon as possible. | ||
443 | */ | ||
444 | static inline void fscache_object_lookup_error(struct fscache_object *object) | ||
445 | { | ||
446 | set_bit(FSCACHE_OBJECT_EV_ERROR, &object->events); | ||
447 | } | ||
448 | |||
449 | /** | ||
450 | * fscache_set_store_limit - Set the maximum size to be stored in an object | ||
451 | * @object: The object to set the maximum on | ||
452 | * @i_size: The limit to set in bytes | ||
453 | * | ||
454 | * Set the maximum size an object is permitted to reach, implying the highest | ||
455 | * byte that may be written. Intended to be called by the attr_changed() op. | ||
456 | * | ||
457 | * See Documentation/filesystems/caching/backend-api.txt for a complete | ||
458 | * description. | ||
459 | */ | ||
460 | static inline | ||
461 | void fscache_set_store_limit(struct fscache_object *object, loff_t i_size) | ||
462 | { | ||
463 | object->store_limit = i_size >> PAGE_SHIFT; | ||
464 | if (i_size & ~PAGE_MASK) | ||
465 | object->store_limit++; | ||
466 | } | ||
467 | |||
468 | /** | ||
469 | * fscache_end_io - End a retrieval operation on a page | ||
470 | * @op: The FS-Cache operation covering the retrieval | ||
471 | * @page: The page that was to be fetched | ||
472 | * @error: The error code (0 if successful) | ||
473 | * | ||
474 | * Note the end of an operation to retrieve a page, as covered by a particular | ||
475 | * operation record. | ||
476 | */ | ||
477 | static inline void fscache_end_io(struct fscache_retrieval *op, | ||
478 | struct page *page, int error) | ||
479 | { | ||
480 | op->end_io_func(page, op->context, error); | ||
481 | } | ||
482 | |||
483 | /* | ||
484 | * out-of-line cache backend functions | ||
485 | */ | ||
486 | extern void fscache_init_cache(struct fscache_cache *cache, | ||
487 | const struct fscache_cache_ops *ops, | ||
488 | const char *idfmt, | ||
489 | ...) __attribute__ ((format (printf, 3, 4))); | ||
490 | |||
491 | extern int fscache_add_cache(struct fscache_cache *cache, | ||
492 | struct fscache_object *fsdef, | ||
493 | const char *tagname); | ||
494 | extern void fscache_withdraw_cache(struct fscache_cache *cache); | ||
495 | |||
496 | extern void fscache_io_error(struct fscache_cache *cache); | ||
497 | |||
498 | extern void fscache_mark_pages_cached(struct fscache_retrieval *op, | ||
499 | struct pagevec *pagevec); | ||
500 | |||
501 | extern enum fscache_checkaux fscache_check_aux(struct fscache_object *object, | ||
502 | const void *data, | ||
503 | uint16_t datalen); | ||
504 | |||
505 | #endif /* _LINUX_FSCACHE_CACHE_H */ | ||
diff --git a/include/linux/fscache.h b/include/linux/fscache.h new file mode 100644 index 000000000000..6d8ee466e0a0 --- /dev/null +++ b/include/linux/fscache.h | |||
@@ -0,0 +1,618 @@ | |||
1 | /* General filesystem caching interface | ||
2 | * | ||
3 | * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | * | ||
11 | * NOTE!!! See: | ||
12 | * | ||
13 | * Documentation/filesystems/caching/netfs-api.txt | ||
14 | * | ||
15 | * for a description of the network filesystem interface declared here. | ||
16 | */ | ||
17 | |||
18 | #ifndef _LINUX_FSCACHE_H | ||
19 | #define _LINUX_FSCACHE_H | ||
20 | |||
21 | #include <linux/fs.h> | ||
22 | #include <linux/list.h> | ||
23 | #include <linux/pagemap.h> | ||
24 | #include <linux/pagevec.h> | ||
25 | |||
26 | #if defined(CONFIG_FSCACHE) || defined(CONFIG_FSCACHE_MODULE) | ||
27 | #define fscache_available() (1) | ||
28 | #define fscache_cookie_valid(cookie) (cookie) | ||
29 | #else | ||
30 | #define fscache_available() (0) | ||
31 | #define fscache_cookie_valid(cookie) (0) | ||
32 | #endif | ||
33 | |||
34 | |||
35 | /* | ||
36 | * overload PG_private_2 to give us PG_fscache - this is used to indicate that | ||
37 | * a page is currently backed by a local disk cache | ||
38 | */ | ||
39 | #define PageFsCache(page) PagePrivate2((page)) | ||
40 | #define SetPageFsCache(page) SetPagePrivate2((page)) | ||
41 | #define ClearPageFsCache(page) ClearPagePrivate2((page)) | ||
42 | #define TestSetPageFsCache(page) TestSetPagePrivate2((page)) | ||
43 | #define TestClearPageFsCache(page) TestClearPagePrivate2((page)) | ||
44 | |||
45 | /* pattern used to fill dead space in an index entry */ | ||
46 | #define FSCACHE_INDEX_DEADFILL_PATTERN 0x79 | ||
47 | |||
48 | struct pagevec; | ||
49 | struct fscache_cache_tag; | ||
50 | struct fscache_cookie; | ||
51 | struct fscache_netfs; | ||
52 | |||
53 | typedef void (*fscache_rw_complete_t)(struct page *page, | ||
54 | void *context, | ||
55 | int error); | ||
56 | |||
57 | /* result of index entry consultation */ | ||
58 | enum fscache_checkaux { | ||
59 | FSCACHE_CHECKAUX_OKAY, /* entry okay as is */ | ||
60 | FSCACHE_CHECKAUX_NEEDS_UPDATE, /* entry requires update */ | ||
61 | FSCACHE_CHECKAUX_OBSOLETE, /* entry requires deletion */ | ||
62 | }; | ||
63 | |||
64 | /* | ||
65 | * fscache cookie definition | ||
66 | */ | ||
67 | struct fscache_cookie_def { | ||
68 | /* name of cookie type */ | ||
69 | char name[16]; | ||
70 | |||
71 | /* cookie type */ | ||
72 | uint8_t type; | ||
73 | #define FSCACHE_COOKIE_TYPE_INDEX 0 | ||
74 | #define FSCACHE_COOKIE_TYPE_DATAFILE 1 | ||
75 | |||
76 | /* select the cache into which to insert an entry in this index | ||
77 | * - optional | ||
78 | * - should return a cache identifier or NULL to cause the cache to be | ||
79 | * inherited from the parent if possible or the first cache picked | ||
80 | * for a non-index file if not | ||
81 | */ | ||
82 | struct fscache_cache_tag *(*select_cache)( | ||
83 | const void *parent_netfs_data, | ||
84 | const void *cookie_netfs_data); | ||
85 | |||
86 | /* get an index key | ||
87 | * - should store the key data in the buffer | ||
88 | * - should return the amount of amount stored | ||
89 | * - not permitted to return an error | ||
90 | * - the netfs data from the cookie being used as the source is | ||
91 | * presented | ||
92 | */ | ||
93 | uint16_t (*get_key)(const void *cookie_netfs_data, | ||
94 | void *buffer, | ||
95 | uint16_t bufmax); | ||
96 | |||
97 | /* get certain file attributes from the netfs data | ||
98 | * - this function can be absent for an index | ||
99 | * - not permitted to return an error | ||
100 | * - the netfs data from the cookie being used as the source is | ||
101 | * presented | ||
102 | */ | ||
103 | void (*get_attr)(const void *cookie_netfs_data, uint64_t *size); | ||
104 | |||
105 | /* get the auxilliary data from netfs data | ||
106 | * - this function can be absent if the index carries no state data | ||
107 | * - should store the auxilliary data in the buffer | ||
108 | * - should return the amount of amount stored | ||
109 | * - not permitted to return an error | ||
110 | * - the netfs data from the cookie being used as the source is | ||
111 | * presented | ||
112 | */ | ||
113 | uint16_t (*get_aux)(const void *cookie_netfs_data, | ||
114 | void *buffer, | ||
115 | uint16_t bufmax); | ||
116 | |||
117 | /* consult the netfs about the state of an object | ||
118 | * - this function can be absent if the index carries no state data | ||
119 | * - the netfs data from the cookie being used as the target is | ||
120 | * presented, as is the auxilliary data | ||
121 | */ | ||
122 | enum fscache_checkaux (*check_aux)(void *cookie_netfs_data, | ||
123 | const void *data, | ||
124 | uint16_t datalen); | ||
125 | |||
126 | /* get an extra reference on a read context | ||
127 | * - this function can be absent if the completion function doesn't | ||
128 | * require a context | ||
129 | */ | ||
130 | void (*get_context)(void *cookie_netfs_data, void *context); | ||
131 | |||
132 | /* release an extra reference on a read context | ||
133 | * - this function can be absent if the completion function doesn't | ||
134 | * require a context | ||
135 | */ | ||
136 | void (*put_context)(void *cookie_netfs_data, void *context); | ||
137 | |||
138 | /* indicate pages that now have cache metadata retained | ||
139 | * - this function should mark the specified pages as now being cached | ||
140 | * - the pages will have been marked with PG_fscache before this is | ||
141 | * called, so this is optional | ||
142 | */ | ||
143 | void (*mark_pages_cached)(void *cookie_netfs_data, | ||
144 | struct address_space *mapping, | ||
145 | struct pagevec *cached_pvec); | ||
146 | |||
147 | /* indicate the cookie is no longer cached | ||
148 | * - this function is called when the backing store currently caching | ||
149 | * a cookie is removed | ||
150 | * - the netfs should use this to clean up any markers indicating | ||
151 | * cached pages | ||
152 | * - this is mandatory for any object that may have data | ||
153 | */ | ||
154 | void (*now_uncached)(void *cookie_netfs_data); | ||
155 | }; | ||
156 | |||
157 | /* | ||
158 | * fscache cached network filesystem type | ||
159 | * - name, version and ops must be filled in before registration | ||
160 | * - all other fields will be set during registration | ||
161 | */ | ||
162 | struct fscache_netfs { | ||
163 | uint32_t version; /* indexing version */ | ||
164 | const char *name; /* filesystem name */ | ||
165 | struct fscache_cookie *primary_index; | ||
166 | struct list_head link; /* internal link */ | ||
167 | }; | ||
168 | |||
169 | /* | ||
170 | * slow-path functions for when there is actually caching available, and the | ||
171 | * netfs does actually have a valid token | ||
172 | * - these are not to be called directly | ||
173 | * - these are undefined symbols when FS-Cache is not configured and the | ||
174 | * optimiser takes care of not using them | ||
175 | */ | ||
176 | extern int __fscache_register_netfs(struct fscache_netfs *); | ||
177 | extern void __fscache_unregister_netfs(struct fscache_netfs *); | ||
178 | extern struct fscache_cache_tag *__fscache_lookup_cache_tag(const char *); | ||
179 | extern void __fscache_release_cache_tag(struct fscache_cache_tag *); | ||
180 | |||
181 | extern struct fscache_cookie *__fscache_acquire_cookie( | ||
182 | struct fscache_cookie *, | ||
183 | const struct fscache_cookie_def *, | ||
184 | void *); | ||
185 | extern void __fscache_relinquish_cookie(struct fscache_cookie *, int); | ||
186 | extern void __fscache_update_cookie(struct fscache_cookie *); | ||
187 | extern int __fscache_attr_changed(struct fscache_cookie *); | ||
188 | extern int __fscache_read_or_alloc_page(struct fscache_cookie *, | ||
189 | struct page *, | ||
190 | fscache_rw_complete_t, | ||
191 | void *, | ||
192 | gfp_t); | ||
193 | extern int __fscache_read_or_alloc_pages(struct fscache_cookie *, | ||
194 | struct address_space *, | ||
195 | struct list_head *, | ||
196 | unsigned *, | ||
197 | fscache_rw_complete_t, | ||
198 | void *, | ||
199 | gfp_t); | ||
200 | extern int __fscache_alloc_page(struct fscache_cookie *, struct page *, gfp_t); | ||
201 | extern int __fscache_write_page(struct fscache_cookie *, struct page *, gfp_t); | ||
202 | extern void __fscache_uncache_page(struct fscache_cookie *, struct page *); | ||
203 | extern bool __fscache_check_page_write(struct fscache_cookie *, struct page *); | ||
204 | extern void __fscache_wait_on_page_write(struct fscache_cookie *, struct page *); | ||
205 | |||
206 | /** | ||
207 | * fscache_register_netfs - Register a filesystem as desiring caching services | ||
208 | * @netfs: The description of the filesystem | ||
209 | * | ||
210 | * Register a filesystem as desiring caching services if they're available. | ||
211 | * | ||
212 | * See Documentation/filesystems/caching/netfs-api.txt for a complete | ||
213 | * description. | ||
214 | */ | ||
215 | static inline | ||
216 | int fscache_register_netfs(struct fscache_netfs *netfs) | ||
217 | { | ||
218 | if (fscache_available()) | ||
219 | return __fscache_register_netfs(netfs); | ||
220 | else | ||
221 | return 0; | ||
222 | } | ||
223 | |||
224 | /** | ||
225 | * fscache_unregister_netfs - Indicate that a filesystem no longer desires | ||
226 | * caching services | ||
227 | * @netfs: The description of the filesystem | ||
228 | * | ||
229 | * Indicate that a filesystem no longer desires caching services for the | ||
230 | * moment. | ||
231 | * | ||
232 | * See Documentation/filesystems/caching/netfs-api.txt for a complete | ||
233 | * description. | ||
234 | */ | ||
235 | static inline | ||
236 | void fscache_unregister_netfs(struct fscache_netfs *netfs) | ||
237 | { | ||
238 | if (fscache_available()) | ||
239 | __fscache_unregister_netfs(netfs); | ||
240 | } | ||
241 | |||
242 | /** | ||
243 | * fscache_lookup_cache_tag - Look up a cache tag | ||
244 | * @name: The name of the tag to search for | ||
245 | * | ||
246 | * Acquire a specific cache referral tag that can be used to select a specific | ||
247 | * cache in which to cache an index. | ||
248 | * | ||
249 | * See Documentation/filesystems/caching/netfs-api.txt for a complete | ||
250 | * description. | ||
251 | */ | ||
252 | static inline | ||
253 | struct fscache_cache_tag *fscache_lookup_cache_tag(const char *name) | ||
254 | { | ||
255 | if (fscache_available()) | ||
256 | return __fscache_lookup_cache_tag(name); | ||
257 | else | ||
258 | return NULL; | ||
259 | } | ||
260 | |||
261 | /** | ||
262 | * fscache_release_cache_tag - Release a cache tag | ||
263 | * @tag: The tag to release | ||
264 | * | ||
265 | * Release a reference to a cache referral tag previously looked up. | ||
266 | * | ||
267 | * See Documentation/filesystems/caching/netfs-api.txt for a complete | ||
268 | * description. | ||
269 | */ | ||
270 | static inline | ||
271 | void fscache_release_cache_tag(struct fscache_cache_tag *tag) | ||
272 | { | ||
273 | if (fscache_available()) | ||
274 | __fscache_release_cache_tag(tag); | ||
275 | } | ||
276 | |||
277 | /** | ||
278 | * fscache_acquire_cookie - Acquire a cookie to represent a cache object | ||
279 | * @parent: The cookie that's to be the parent of this one | ||
280 | * @def: A description of the cache object, including callback operations | ||
281 | * @netfs_data: An arbitrary piece of data to be kept in the cookie to | ||
282 | * represent the cache object to the netfs | ||
283 | * | ||
284 | * This function is used to inform FS-Cache about part of an index hierarchy | ||
285 | * that can be used to locate files. This is done by requesting a cookie for | ||
286 | * each index in the path to the file. | ||
287 | * | ||
288 | * See Documentation/filesystems/caching/netfs-api.txt for a complete | ||
289 | * description. | ||
290 | */ | ||
291 | static inline | ||
292 | struct fscache_cookie *fscache_acquire_cookie( | ||
293 | struct fscache_cookie *parent, | ||
294 | const struct fscache_cookie_def *def, | ||
295 | void *netfs_data) | ||
296 | { | ||
297 | if (fscache_cookie_valid(parent)) | ||
298 | return __fscache_acquire_cookie(parent, def, netfs_data); | ||
299 | else | ||
300 | return NULL; | ||
301 | } | ||
302 | |||
303 | /** | ||
304 | * fscache_relinquish_cookie - Return the cookie to the cache, maybe discarding | ||
305 | * it | ||
306 | * @cookie: The cookie being returned | ||
307 | * @retire: True if the cache object the cookie represents is to be discarded | ||
308 | * | ||
309 | * This function returns a cookie to the cache, forcibly discarding the | ||
310 | * associated cache object if retire is set to true. | ||
311 | * | ||
312 | * See Documentation/filesystems/caching/netfs-api.txt for a complete | ||
313 | * description. | ||
314 | */ | ||
315 | static inline | ||
316 | void fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire) | ||
317 | { | ||
318 | if (fscache_cookie_valid(cookie)) | ||
319 | __fscache_relinquish_cookie(cookie, retire); | ||
320 | } | ||
321 | |||
322 | /** | ||
323 | * fscache_update_cookie - Request that a cache object be updated | ||
324 | * @cookie: The cookie representing the cache object | ||
325 | * | ||
326 | * Request an update of the index data for the cache object associated with the | ||
327 | * cookie. | ||
328 | * | ||
329 | * See Documentation/filesystems/caching/netfs-api.txt for a complete | ||
330 | * description. | ||
331 | */ | ||
332 | static inline | ||
333 | void fscache_update_cookie(struct fscache_cookie *cookie) | ||
334 | { | ||
335 | if (fscache_cookie_valid(cookie)) | ||
336 | __fscache_update_cookie(cookie); | ||
337 | } | ||
338 | |||
339 | /** | ||
340 | * fscache_pin_cookie - Pin a data-storage cache object in its cache | ||
341 | * @cookie: The cookie representing the cache object | ||
342 | * | ||
343 | * Permit data-storage cache objects to be pinned in the cache. | ||
344 | * | ||
345 | * See Documentation/filesystems/caching/netfs-api.txt for a complete | ||
346 | * description. | ||
347 | */ | ||
348 | static inline | ||
349 | int fscache_pin_cookie(struct fscache_cookie *cookie) | ||
350 | { | ||
351 | return -ENOBUFS; | ||
352 | } | ||
353 | |||
354 | /** | ||
355 | * fscache_pin_cookie - Unpin a data-storage cache object in its cache | ||
356 | * @cookie: The cookie representing the cache object | ||
357 | * | ||
358 | * Permit data-storage cache objects to be unpinned from the cache. | ||
359 | * | ||
360 | * See Documentation/filesystems/caching/netfs-api.txt for a complete | ||
361 | * description. | ||
362 | */ | ||
363 | static inline | ||
364 | void fscache_unpin_cookie(struct fscache_cookie *cookie) | ||
365 | { | ||
366 | } | ||
367 | |||
368 | /** | ||
369 | * fscache_attr_changed - Notify cache that an object's attributes changed | ||
370 | * @cookie: The cookie representing the cache object | ||
371 | * | ||
372 | * Send a notification to the cache indicating that an object's attributes have | ||
373 | * changed. This includes the data size. These attributes will be obtained | ||
374 | * through the get_attr() cookie definition op. | ||
375 | * | ||
376 | * See Documentation/filesystems/caching/netfs-api.txt for a complete | ||
377 | * description. | ||
378 | */ | ||
379 | static inline | ||
380 | int fscache_attr_changed(struct fscache_cookie *cookie) | ||
381 | { | ||
382 | if (fscache_cookie_valid(cookie)) | ||
383 | return __fscache_attr_changed(cookie); | ||
384 | else | ||
385 | return -ENOBUFS; | ||
386 | } | ||
387 | |||
388 | /** | ||
389 | * fscache_reserve_space - Reserve data space for a cached object | ||
390 | * @cookie: The cookie representing the cache object | ||
391 | * @i_size: The amount of space to be reserved | ||
392 | * | ||
393 | * Reserve an amount of space in the cache for the cache object attached to a | ||
394 | * cookie so that a write to that object within the space can always be | ||
395 | * honoured. | ||
396 | * | ||
397 | * See Documentation/filesystems/caching/netfs-api.txt for a complete | ||
398 | * description. | ||
399 | */ | ||
400 | static inline | ||
401 | int fscache_reserve_space(struct fscache_cookie *cookie, loff_t size) | ||
402 | { | ||
403 | return -ENOBUFS; | ||
404 | } | ||
405 | |||
406 | /** | ||
407 | * fscache_read_or_alloc_page - Read a page from the cache or allocate a block | ||
408 | * in which to store it | ||
409 | * @cookie: The cookie representing the cache object | ||
410 | * @page: The netfs page to fill if possible | ||
411 | * @end_io_func: The callback to invoke when and if the page is filled | ||
412 | * @context: An arbitrary piece of data to pass on to end_io_func() | ||
413 | * @gfp: The conditions under which memory allocation should be made | ||
414 | * | ||
415 | * Read a page from the cache, or if that's not possible make a potential | ||
416 | * one-block reservation in the cache into which the page may be stored once | ||
417 | * fetched from the server. | ||
418 | * | ||
419 | * If the page is not backed by the cache object, or if it there's some reason | ||
420 | * it can't be, -ENOBUFS will be returned and nothing more will be done for | ||
421 | * that page. | ||
422 | * | ||
423 | * Else, if that page is backed by the cache, a read will be initiated directly | ||
424 | * to the netfs's page and 0 will be returned by this function. The | ||
425 | * end_io_func() callback will be invoked when the operation terminates on a | ||
426 | * completion or failure. Note that the callback may be invoked before the | ||
427 | * return. | ||
428 | * | ||
429 | * Else, if the page is unbacked, -ENODATA is returned and a block may have | ||
430 | * been allocated in the cache. | ||
431 | * | ||
432 | * See Documentation/filesystems/caching/netfs-api.txt for a complete | ||
433 | * description. | ||
434 | */ | ||
435 | static inline | ||
436 | int fscache_read_or_alloc_page(struct fscache_cookie *cookie, | ||
437 | struct page *page, | ||
438 | fscache_rw_complete_t end_io_func, | ||
439 | void *context, | ||
440 | gfp_t gfp) | ||
441 | { | ||
442 | if (fscache_cookie_valid(cookie)) | ||
443 | return __fscache_read_or_alloc_page(cookie, page, end_io_func, | ||
444 | context, gfp); | ||
445 | else | ||
446 | return -ENOBUFS; | ||
447 | } | ||
448 | |||
449 | /** | ||
450 | * fscache_read_or_alloc_pages - Read pages from the cache and/or allocate | ||
451 | * blocks in which to store them | ||
452 | * @cookie: The cookie representing the cache object | ||
453 | * @mapping: The netfs inode mapping to which the pages will be attached | ||
454 | * @pages: A list of potential netfs pages to be filled | ||
455 | * @end_io_func: The callback to invoke when and if each page is filled | ||
456 | * @context: An arbitrary piece of data to pass on to end_io_func() | ||
457 | * @gfp: The conditions under which memory allocation should be made | ||
458 | * | ||
459 | * Read a set of pages from the cache, or if that's not possible, attempt to | ||
460 | * make a potential one-block reservation for each page in the cache into which | ||
461 | * that page may be stored once fetched from the server. | ||
462 | * | ||
463 | * If some pages are not backed by the cache object, or if it there's some | ||
464 | * reason they can't be, -ENOBUFS will be returned and nothing more will be | ||
465 | * done for that pages. | ||
466 | * | ||
467 | * Else, if some of the pages are backed by the cache, a read will be initiated | ||
468 | * directly to the netfs's page and 0 will be returned by this function. The | ||
469 | * end_io_func() callback will be invoked when the operation terminates on a | ||
470 | * completion or failure. Note that the callback may be invoked before the | ||
471 | * return. | ||
472 | * | ||
473 | * Else, if a page is unbacked, -ENODATA is returned and a block may have | ||
474 | * been allocated in the cache. | ||
475 | * | ||
476 | * Because the function may want to return all of -ENOBUFS, -ENODATA and 0 in | ||
477 | * regard to different pages, the return values are prioritised in that order. | ||
478 | * Any pages submitted for reading are removed from the pages list. | ||
479 | * | ||
480 | * See Documentation/filesystems/caching/netfs-api.txt for a complete | ||
481 | * description. | ||
482 | */ | ||
483 | static inline | ||
484 | int fscache_read_or_alloc_pages(struct fscache_cookie *cookie, | ||
485 | struct address_space *mapping, | ||
486 | struct list_head *pages, | ||
487 | unsigned *nr_pages, | ||
488 | fscache_rw_complete_t end_io_func, | ||
489 | void *context, | ||
490 | gfp_t gfp) | ||
491 | { | ||
492 | if (fscache_cookie_valid(cookie)) | ||
493 | return __fscache_read_or_alloc_pages(cookie, mapping, pages, | ||
494 | nr_pages, end_io_func, | ||
495 | context, gfp); | ||
496 | else | ||
497 | return -ENOBUFS; | ||
498 | } | ||
499 | |||
500 | /** | ||
501 | * fscache_alloc_page - Allocate a block in which to store a page | ||
502 | * @cookie: The cookie representing the cache object | ||
503 | * @page: The netfs page to allocate a page for | ||
504 | * @gfp: The conditions under which memory allocation should be made | ||
505 | * | ||
506 | * Request Allocation a block in the cache in which to store a netfs page | ||
507 | * without retrieving any contents from the cache. | ||
508 | * | ||
509 | * If the page is not backed by a file then -ENOBUFS will be returned and | ||
510 | * nothing more will be done, and no reservation will be made. | ||
511 | * | ||
512 | * Else, a block will be allocated if one wasn't already, and 0 will be | ||
513 | * returned | ||
514 | * | ||
515 | * See Documentation/filesystems/caching/netfs-api.txt for a complete | ||
516 | * description. | ||
517 | */ | ||
518 | static inline | ||
519 | int fscache_alloc_page(struct fscache_cookie *cookie, | ||
520 | struct page *page, | ||
521 | gfp_t gfp) | ||
522 | { | ||
523 | if (fscache_cookie_valid(cookie)) | ||
524 | return __fscache_alloc_page(cookie, page, gfp); | ||
525 | else | ||
526 | return -ENOBUFS; | ||
527 | } | ||
528 | |||
529 | /** | ||
530 | * fscache_write_page - Request storage of a page in the cache | ||
531 | * @cookie: The cookie representing the cache object | ||
532 | * @page: The netfs page to store | ||
533 | * @gfp: The conditions under which memory allocation should be made | ||
534 | * | ||
535 | * Request the contents of the netfs page be written into the cache. This | ||
536 | * request may be ignored if no cache block is currently allocated, in which | ||
537 | * case it will return -ENOBUFS. | ||
538 | * | ||
539 | * If a cache block was already allocated, a write will be initiated and 0 will | ||
540 | * be returned. The PG_fscache_write page bit is set immediately and will then | ||
541 | * be cleared at the completion of the write to indicate the success or failure | ||
542 | * of the operation. Note that the completion may happen before the return. | ||
543 | * | ||
544 | * See Documentation/filesystems/caching/netfs-api.txt for a complete | ||
545 | * description. | ||
546 | */ | ||
547 | static inline | ||
548 | int fscache_write_page(struct fscache_cookie *cookie, | ||
549 | struct page *page, | ||
550 | gfp_t gfp) | ||
551 | { | ||
552 | if (fscache_cookie_valid(cookie)) | ||
553 | return __fscache_write_page(cookie, page, gfp); | ||
554 | else | ||
555 | return -ENOBUFS; | ||
556 | } | ||
557 | |||
558 | /** | ||
559 | * fscache_uncache_page - Indicate that caching is no longer required on a page | ||
560 | * @cookie: The cookie representing the cache object | ||
561 | * @page: The netfs page that was being cached. | ||
562 | * | ||
563 | * Tell the cache that we no longer want a page to be cached and that it should | ||
564 | * remove any knowledge of the netfs page it may have. | ||
565 | * | ||
566 | * Note that this cannot cancel any outstanding I/O operations between this | ||
567 | * page and the cache. | ||
568 | * | ||
569 | * See Documentation/filesystems/caching/netfs-api.txt for a complete | ||
570 | * description. | ||
571 | */ | ||
572 | static inline | ||
573 | void fscache_uncache_page(struct fscache_cookie *cookie, | ||
574 | struct page *page) | ||
575 | { | ||
576 | if (fscache_cookie_valid(cookie)) | ||
577 | __fscache_uncache_page(cookie, page); | ||
578 | } | ||
579 | |||
580 | /** | ||
581 | * fscache_check_page_write - Ask if a page is being writing to the cache | ||
582 | * @cookie: The cookie representing the cache object | ||
583 | * @page: The netfs page that is being cached. | ||
584 | * | ||
585 | * Ask the cache if a page is being written to the cache. | ||
586 | * | ||
587 | * See Documentation/filesystems/caching/netfs-api.txt for a complete | ||
588 | * description. | ||
589 | */ | ||
590 | static inline | ||
591 | bool fscache_check_page_write(struct fscache_cookie *cookie, | ||
592 | struct page *page) | ||
593 | { | ||
594 | if (fscache_cookie_valid(cookie)) | ||
595 | return __fscache_check_page_write(cookie, page); | ||
596 | return false; | ||
597 | } | ||
598 | |||
599 | /** | ||
600 | * fscache_wait_on_page_write - Wait for a page to complete writing to the cache | ||
601 | * @cookie: The cookie representing the cache object | ||
602 | * @page: The netfs page that is being cached. | ||
603 | * | ||
604 | * Ask the cache to wake us up when a page is no longer being written to the | ||
605 | * cache. | ||
606 | * | ||
607 | * See Documentation/filesystems/caching/netfs-api.txt for a complete | ||
608 | * description. | ||
609 | */ | ||
610 | static inline | ||
611 | void fscache_wait_on_page_write(struct fscache_cookie *cookie, | ||
612 | struct page *page) | ||
613 | { | ||
614 | if (fscache_cookie_valid(cookie)) | ||
615 | __fscache_wait_on_page_write(cookie, page); | ||
616 | } | ||
617 | |||
618 | #endif /* _LINUX_FSCACHE_H */ | ||
diff --git a/include/linux/gfp.h b/include/linux/gfp.h index dd20cd78faa8..0bbc15f54536 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h | |||
@@ -4,6 +4,7 @@ | |||
4 | #include <linux/mmzone.h> | 4 | #include <linux/mmzone.h> |
5 | #include <linux/stddef.h> | 5 | #include <linux/stddef.h> |
6 | #include <linux/linkage.h> | 6 | #include <linux/linkage.h> |
7 | #include <linux/topology.h> | ||
7 | 8 | ||
8 | struct vm_area_struct; | 9 | struct vm_area_struct; |
9 | 10 | ||
diff --git a/include/linux/hdreg.h b/include/linux/hdreg.h index ed21bd3dbd25..29ee2873f4a8 100644 --- a/include/linux/hdreg.h +++ b/include/linux/hdreg.h | |||
@@ -1,68 +1,6 @@ | |||
1 | #ifndef _LINUX_HDREG_H | 1 | #ifndef _LINUX_HDREG_H |
2 | #define _LINUX_HDREG_H | 2 | #define _LINUX_HDREG_H |
3 | 3 | ||
4 | #ifdef __KERNEL__ | ||
5 | #include <linux/ata.h> | ||
6 | |||
7 | /* | ||
8 | * This file contains some defines for the AT-hd-controller. | ||
9 | * Various sources. | ||
10 | */ | ||
11 | |||
12 | /* ide.c has its own port definitions in "ide.h" */ | ||
13 | |||
14 | #define HD_IRQ 14 | ||
15 | |||
16 | /* Hd controller regs. Ref: IBM AT Bios-listing */ | ||
17 | #define HD_DATA 0x1f0 /* _CTL when writing */ | ||
18 | #define HD_ERROR 0x1f1 /* see err-bits */ | ||
19 | #define HD_NSECTOR 0x1f2 /* nr of sectors to read/write */ | ||
20 | #define HD_SECTOR 0x1f3 /* starting sector */ | ||
21 | #define HD_LCYL 0x1f4 /* starting cylinder */ | ||
22 | #define HD_HCYL 0x1f5 /* high byte of starting cyl */ | ||
23 | #define HD_CURRENT 0x1f6 /* 101dhhhh , d=drive, hhhh=head */ | ||
24 | #define HD_STATUS 0x1f7 /* see status-bits */ | ||
25 | #define HD_FEATURE HD_ERROR /* same io address, read=error, write=feature */ | ||
26 | #define HD_PRECOMP HD_FEATURE /* obsolete use of this port - predates IDE */ | ||
27 | #define HD_COMMAND HD_STATUS /* same io address, read=status, write=cmd */ | ||
28 | |||
29 | #define HD_CMD 0x3f6 /* used for resets */ | ||
30 | #define HD_ALTSTATUS 0x3f6 /* same as HD_STATUS but doesn't clear irq */ | ||
31 | |||
32 | /* remainder is shared between hd.c, ide.c, ide-cd.c, and the hdparm utility */ | ||
33 | |||
34 | /* Bits of HD_STATUS */ | ||
35 | #define ERR_STAT 0x01 | ||
36 | #define INDEX_STAT 0x02 | ||
37 | #define ECC_STAT 0x04 /* Corrected error */ | ||
38 | #define DRQ_STAT 0x08 | ||
39 | #define SEEK_STAT 0x10 | ||
40 | #define SRV_STAT 0x10 | ||
41 | #define WRERR_STAT 0x20 | ||
42 | #define READY_STAT 0x40 | ||
43 | #define BUSY_STAT 0x80 | ||
44 | |||
45 | /* Bits for HD_ERROR */ | ||
46 | #define MARK_ERR 0x01 /* Bad address mark */ | ||
47 | #define ILI_ERR 0x01 /* Illegal Length Indication (ATAPI) */ | ||
48 | #define TRK0_ERR 0x02 /* couldn't find track 0 */ | ||
49 | #define EOM_ERR 0x02 /* End Of Media (ATAPI) */ | ||
50 | #define ABRT_ERR 0x04 /* Command aborted */ | ||
51 | #define MCR_ERR 0x08 /* media change request */ | ||
52 | #define ID_ERR 0x10 /* ID field not found */ | ||
53 | #define MC_ERR 0x20 /* media changed */ | ||
54 | #define ECC_ERR 0x40 /* Uncorrectable ECC error */ | ||
55 | #define BBD_ERR 0x80 /* pre-EIDE meaning: block marked bad */ | ||
56 | #define ICRC_ERR 0x80 /* new meaning: CRC error during transfer */ | ||
57 | #define LFS_ERR 0xf0 /* Last Failed Sense (ATAPI) */ | ||
58 | |||
59 | /* Bits of HD_NSECTOR */ | ||
60 | #define CD 0x01 | ||
61 | #define IO 0x02 | ||
62 | #define REL 0x04 | ||
63 | #define TAG_MASK 0xf8 | ||
64 | #endif /* __KERNEL__ */ | ||
65 | |||
66 | #include <linux/types.h> | 4 | #include <linux/types.h> |
67 | 5 | ||
68 | /* | 6 | /* |
@@ -191,6 +129,7 @@ typedef struct hd_drive_hob_hdr { | |||
191 | #define TASKFILE_INVALID 0x7fff | 129 | #define TASKFILE_INVALID 0x7fff |
192 | #endif | 130 | #endif |
193 | 131 | ||
132 | #ifndef __KERNEL__ | ||
194 | /* ATA/ATAPI Commands pre T13 Spec */ | 133 | /* ATA/ATAPI Commands pre T13 Spec */ |
195 | #define WIN_NOP 0x00 | 134 | #define WIN_NOP 0x00 |
196 | /* | 135 | /* |
@@ -379,6 +318,7 @@ typedef struct hd_drive_hob_hdr { | |||
379 | #define SECURITY_ERASE_UNIT 0xBD | 318 | #define SECURITY_ERASE_UNIT 0xBD |
380 | #define SECURITY_FREEZE_LOCK 0xBE | 319 | #define SECURITY_FREEZE_LOCK 0xBE |
381 | #define SECURITY_DISABLE_PASSWORD 0xBF | 320 | #define SECURITY_DISABLE_PASSWORD 0xBF |
321 | #endif /* __KERNEL__ */ | ||
382 | 322 | ||
383 | struct hd_geometry { | 323 | struct hd_geometry { |
384 | unsigned char heads; | 324 | unsigned char heads; |
@@ -448,6 +388,7 @@ enum { | |||
448 | 388 | ||
449 | #define __NEW_HD_DRIVE_ID | 389 | #define __NEW_HD_DRIVE_ID |
450 | 390 | ||
391 | #ifndef __KERNEL__ | ||
451 | /* | 392 | /* |
452 | * Structure returned by HDIO_GET_IDENTITY, as per ANSI NCITS ATA6 rev.1b spec. | 393 | * Structure returned by HDIO_GET_IDENTITY, as per ANSI NCITS ATA6 rev.1b spec. |
453 | * | 394 | * |
@@ -699,6 +640,7 @@ struct hd_driveid { | |||
699 | * 7:0 Signature | 640 | * 7:0 Signature |
700 | */ | 641 | */ |
701 | }; | 642 | }; |
643 | #endif /* __KERNEL__ */ | ||
702 | 644 | ||
703 | /* | 645 | /* |
704 | * IDE "nice" flags. These are used on a per drive basis to determine | 646 | * IDE "nice" flags. These are used on a per drive basis to determine |
diff --git a/include/linux/hid.h b/include/linux/hid.h index fa8ee9cef7be..a72876e43589 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h | |||
@@ -270,6 +270,7 @@ struct hid_item { | |||
270 | 270 | ||
271 | #define HID_QUIRK_INVERT 0x00000001 | 271 | #define HID_QUIRK_INVERT 0x00000001 |
272 | #define HID_QUIRK_NOTOUCH 0x00000002 | 272 | #define HID_QUIRK_NOTOUCH 0x00000002 |
273 | #define HID_QUIRK_IGNORE 0x00000004 | ||
273 | #define HID_QUIRK_NOGET 0x00000008 | 274 | #define HID_QUIRK_NOGET 0x00000008 |
274 | #define HID_QUIRK_BADPAD 0x00000020 | 275 | #define HID_QUIRK_BADPAD 0x00000020 |
275 | #define HID_QUIRK_MULTI_INPUT 0x00000040 | 276 | #define HID_QUIRK_MULTI_INPUT 0x00000040 |
@@ -603,12 +604,17 @@ struct hid_ll_driver { | |||
603 | int (*open)(struct hid_device *hdev); | 604 | int (*open)(struct hid_device *hdev); |
604 | void (*close)(struct hid_device *hdev); | 605 | void (*close)(struct hid_device *hdev); |
605 | 606 | ||
607 | int (*power)(struct hid_device *hdev, int level); | ||
608 | |||
606 | int (*hidinput_input_event) (struct input_dev *idev, unsigned int type, | 609 | int (*hidinput_input_event) (struct input_dev *idev, unsigned int type, |
607 | unsigned int code, int value); | 610 | unsigned int code, int value); |
608 | 611 | ||
609 | int (*parse)(struct hid_device *hdev); | 612 | int (*parse)(struct hid_device *hdev); |
610 | }; | 613 | }; |
611 | 614 | ||
615 | #define PM_HINT_FULLON 1<<5 | ||
616 | #define PM_HINT_NORMAL 1<<1 | ||
617 | |||
612 | /* Applications from HID Usage Tables 4/8/99 Version 1.1 */ | 618 | /* Applications from HID Usage Tables 4/8/99 Version 1.1 */ |
613 | /* We ignore a few input applications that are not widely used */ | 619 | /* We ignore a few input applications that are not widely used */ |
614 | #define IS_INPUT_APPLICATION(a) (((a >= 0x00010000) && (a <= 0x00010008)) || (a == 0x00010080) || (a == 0x000c0001) || (a == 0x000d0002)) | 620 | #define IS_INPUT_APPLICATION(a) (((a >= 0x00010000) && (a <= 0x00010008)) || (a == 0x00010080) || (a == 0x000c0001) || (a == 0x000d0002)) |
@@ -641,6 +647,7 @@ int hidinput_find_field(struct hid_device *hid, unsigned int type, unsigned int | |||
641 | void hid_output_report(struct hid_report *report, __u8 *data); | 647 | void hid_output_report(struct hid_report *report, __u8 *data); |
642 | struct hid_device *hid_allocate_device(void); | 648 | struct hid_device *hid_allocate_device(void); |
643 | int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size); | 649 | int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size); |
650 | int hid_check_keys_pressed(struct hid_device *hid); | ||
644 | int hid_connect(struct hid_device *hid, unsigned int connect_mask); | 651 | int hid_connect(struct hid_device *hid, unsigned int connect_mask); |
645 | 652 | ||
646 | /** | 653 | /** |
@@ -791,21 +798,5 @@ dbg_hid(const char *fmt, ...) | |||
791 | __FILE__ , ## arg) | 798 | __FILE__ , ## arg) |
792 | #endif /* HID_FF */ | 799 | #endif /* HID_FF */ |
793 | 800 | ||
794 | #ifdef __KERNEL__ | ||
795 | #ifdef CONFIG_HID_COMPAT | ||
796 | #define HID_COMPAT_LOAD_DRIVER(name) \ | ||
797 | /* prototype to avoid sparse warning */ \ | ||
798 | extern void hid_compat_##name(void); \ | ||
799 | void hid_compat_##name(void) { } \ | ||
800 | EXPORT_SYMBOL(hid_compat_##name) | ||
801 | #else | ||
802 | #define HID_COMPAT_LOAD_DRIVER(name) | ||
803 | #endif /* HID_COMPAT */ | ||
804 | #define HID_COMPAT_CALL_DRIVER(name) do { \ | ||
805 | extern void hid_compat_##name(void); \ | ||
806 | hid_compat_##name(); \ | ||
807 | } while (0) | ||
808 | #endif /* __KERNEL__ */ | ||
809 | |||
810 | #endif | 801 | #endif |
811 | 802 | ||
diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 7ff5c55f9b55..1fcb7126a01f 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h | |||
@@ -19,8 +19,21 @@ static inline void flush_kernel_dcache_page(struct page *page) | |||
19 | } | 19 | } |
20 | #endif | 20 | #endif |
21 | 21 | ||
22 | #ifdef CONFIG_HIGHMEM | 22 | #include <asm/kmap_types.h> |
23 | |||
24 | #if defined(CONFIG_DEBUG_HIGHMEM) && defined(CONFIG_TRACE_IRQFLAGS_SUPPORT) | ||
25 | |||
26 | void debug_kmap_atomic(enum km_type type); | ||
27 | |||
28 | #else | ||
23 | 29 | ||
30 | static inline void debug_kmap_atomic(enum km_type type) | ||
31 | { | ||
32 | } | ||
33 | |||
34 | #endif | ||
35 | |||
36 | #ifdef CONFIG_HIGHMEM | ||
24 | #include <asm/highmem.h> | 37 | #include <asm/highmem.h> |
25 | 38 | ||
26 | /* declarations for linux/mm/highmem.c */ | 39 | /* declarations for linux/mm/highmem.c */ |
@@ -44,8 +57,6 @@ static inline void *kmap(struct page *page) | |||
44 | 57 | ||
45 | #define kunmap(page) do { (void) (page); } while (0) | 58 | #define kunmap(page) do { (void) (page); } while (0) |
46 | 59 | ||
47 | #include <asm/kmap_types.h> | ||
48 | |||
49 | static inline void *kmap_atomic(struct page *page, enum km_type idx) | 60 | static inline void *kmap_atomic(struct page *page, enum km_type idx) |
50 | { | 61 | { |
51 | pagefault_disable(); | 62 | pagefault_disable(); |
@@ -187,16 +198,4 @@ static inline void copy_highpage(struct page *to, struct page *from) | |||
187 | kunmap_atomic(vto, KM_USER1); | 198 | kunmap_atomic(vto, KM_USER1); |
188 | } | 199 | } |
189 | 200 | ||
190 | #if defined(CONFIG_DEBUG_HIGHMEM) && defined(CONFIG_TRACE_IRQFLAGS_SUPPORT) | ||
191 | |||
192 | void debug_kmap_atomic(enum km_type type); | ||
193 | |||
194 | #else | ||
195 | |||
196 | static inline void debug_kmap_atomic(enum km_type type) | ||
197 | { | ||
198 | } | ||
199 | |||
200 | #endif | ||
201 | |||
202 | #endif /* _LINUX_HIGHMEM_H */ | 201 | #endif /* _LINUX_HIGHMEM_H */ |
diff --git a/include/linux/i2c/at24.h b/include/linux/i2c/at24.h index f6edd522a929..8ace93024d60 100644 --- a/include/linux/i2c/at24.h +++ b/include/linux/i2c/at24.h | |||
@@ -2,6 +2,7 @@ | |||
2 | #define _LINUX_AT24_H | 2 | #define _LINUX_AT24_H |
3 | 3 | ||
4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
5 | #include <linux/memory.h> | ||
5 | 6 | ||
6 | /* | 7 | /* |
7 | * As seen through Linux I2C, differences between the most common types of I2C | 8 | * As seen through Linux I2C, differences between the most common types of I2C |
@@ -23,6 +24,9 @@ struct at24_platform_data { | |||
23 | #define AT24_FLAG_READONLY 0x40 /* sysfs-entry will be read-only */ | 24 | #define AT24_FLAG_READONLY 0x40 /* sysfs-entry will be read-only */ |
24 | #define AT24_FLAG_IRUGO 0x20 /* sysfs-entry will be world-readable */ | 25 | #define AT24_FLAG_IRUGO 0x20 /* sysfs-entry will be world-readable */ |
25 | #define AT24_FLAG_TAKE8ADDR 0x10 /* take always 8 addresses (24c00) */ | 26 | #define AT24_FLAG_TAKE8ADDR 0x10 /* take always 8 addresses (24c00) */ |
27 | |||
28 | void (*setup)(struct memory_accessor *, void *context); | ||
29 | void *context; | ||
26 | }; | 30 | }; |
27 | 31 | ||
28 | #endif /* _LINUX_AT24_H */ | 32 | #endif /* _LINUX_AT24_H */ |
diff --git a/include/linux/i2c/twl4030.h b/include/linux/i2c/twl4030.h index 8137f660a5cc..0dc80ef24975 100644 --- a/include/linux/i2c/twl4030.h +++ b/include/linux/i2c/twl4030.h | |||
@@ -218,6 +218,53 @@ int twl4030_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes); | |||
218 | 218 | ||
219 | /*----------------------------------------------------------------------*/ | 219 | /*----------------------------------------------------------------------*/ |
220 | 220 | ||
221 | /* Power bus message definitions */ | ||
222 | |||
223 | #define DEV_GRP_NULL 0x0 | ||
224 | #define DEV_GRP_P1 0x1 | ||
225 | #define DEV_GRP_P2 0x2 | ||
226 | #define DEV_GRP_P3 0x4 | ||
227 | |||
228 | #define RES_GRP_RES 0x0 | ||
229 | #define RES_GRP_PP 0x1 | ||
230 | #define RES_GRP_RC 0x2 | ||
231 | #define RES_GRP_PP_RC 0x3 | ||
232 | #define RES_GRP_PR 0x4 | ||
233 | #define RES_GRP_PP_PR 0x5 | ||
234 | #define RES_GRP_RC_PR 0x6 | ||
235 | #define RES_GRP_ALL 0x7 | ||
236 | |||
237 | #define RES_TYPE2_R0 0x0 | ||
238 | |||
239 | #define RES_TYPE_ALL 0x7 | ||
240 | |||
241 | #define RES_STATE_WRST 0xF | ||
242 | #define RES_STATE_ACTIVE 0xE | ||
243 | #define RES_STATE_SLEEP 0x8 | ||
244 | #define RES_STATE_OFF 0x0 | ||
245 | |||
246 | /* | ||
247 | * Power Bus Message Format ... these can be sent individually by Linux, | ||
248 | * but are usually part of downloaded scripts that are run when various | ||
249 | * power events are triggered. | ||
250 | * | ||
251 | * Broadcast Message (16 Bits): | ||
252 | * DEV_GRP[15:13] MT[12] RES_GRP[11:9] RES_TYPE2[8:7] RES_TYPE[6:4] | ||
253 | * RES_STATE[3:0] | ||
254 | * | ||
255 | * Singular Message (16 Bits): | ||
256 | * DEV_GRP[15:13] MT[12] RES_ID[11:4] RES_STATE[3:0] | ||
257 | */ | ||
258 | |||
259 | #define MSG_BROADCAST(devgrp, grp, type, type2, state) \ | ||
260 | ( (devgrp) << 13 | 1 << 12 | (grp) << 9 | (type2) << 7 \ | ||
261 | | (type) << 4 | (state)) | ||
262 | |||
263 | #define MSG_SINGULAR(devgrp, id, state) \ | ||
264 | ((devgrp) << 13 | 0 << 12 | (id) << 4 | (state)) | ||
265 | |||
266 | /*----------------------------------------------------------------------*/ | ||
267 | |||
221 | struct twl4030_bci_platform_data { | 268 | struct twl4030_bci_platform_data { |
222 | int *battery_tmp_tbl; | 269 | int *battery_tmp_tbl; |
223 | unsigned int tblsize; | 270 | unsigned int tblsize; |
diff --git a/include/linux/idr.h b/include/linux/idr.h index dd846df8cd32..e968db71e33a 100644 --- a/include/linux/idr.h +++ b/include/linux/idr.h | |||
@@ -106,6 +106,7 @@ int idr_get_new(struct idr *idp, void *ptr, int *id); | |||
106 | int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id); | 106 | int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id); |
107 | int idr_for_each(struct idr *idp, | 107 | int idr_for_each(struct idr *idp, |
108 | int (*fn)(int id, void *p, void *data), void *data); | 108 | int (*fn)(int id, void *p, void *data), void *data); |
109 | void *idr_get_next(struct idr *idp, int *nextid); | ||
109 | void *idr_replace(struct idr *idp, void *ptr, int id); | 110 | void *idr_replace(struct idr *idp, void *ptr, int id); |
110 | void idr_remove(struct idr *idp, int id); | 111 | void idr_remove(struct idr *idp, int id); |
111 | void idr_remove_all(struct idr *idp); | 112 | void idr_remove_all(struct idr *idp); |
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 1d6c71d96ede..77214ead1a36 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h | |||
@@ -123,7 +123,7 @@ static inline void dmar_writeq(void __iomem *addr, u64 val) | |||
123 | #define ecap_eim_support(e) ((e >> 4) & 0x1) | 123 | #define ecap_eim_support(e) ((e >> 4) & 0x1) |
124 | #define ecap_ir_support(e) ((e >> 3) & 0x1) | 124 | #define ecap_ir_support(e) ((e >> 3) & 0x1) |
125 | #define ecap_max_handle_mask(e) ((e >> 20) & 0xf) | 125 | #define ecap_max_handle_mask(e) ((e >> 20) & 0xf) |
126 | 126 | #define ecap_sc_support(e) ((e >> 7) & 0x1) /* Snooping Control */ | |
127 | 127 | ||
128 | /* IOTLB_REG */ | 128 | /* IOTLB_REG */ |
129 | #define DMA_TLB_FLUSH_GRANU_OFFSET 60 | 129 | #define DMA_TLB_FLUSH_GRANU_OFFSET 60 |
diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 8a7bfb1b6ca0..3af4ffd591b9 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h | |||
@@ -21,6 +21,7 @@ | |||
21 | 21 | ||
22 | #define IOMMU_READ (1) | 22 | #define IOMMU_READ (1) |
23 | #define IOMMU_WRITE (2) | 23 | #define IOMMU_WRITE (2) |
24 | #define IOMMU_CACHE (4) /* DMA cache coherency */ | ||
24 | 25 | ||
25 | struct device; | 26 | struct device; |
26 | 27 | ||
@@ -28,6 +29,8 @@ struct iommu_domain { | |||
28 | void *priv; | 29 | void *priv; |
29 | }; | 30 | }; |
30 | 31 | ||
32 | #define IOMMU_CAP_CACHE_COHERENCY 0x1 | ||
33 | |||
31 | struct iommu_ops { | 34 | struct iommu_ops { |
32 | int (*domain_init)(struct iommu_domain *domain); | 35 | int (*domain_init)(struct iommu_domain *domain); |
33 | void (*domain_destroy)(struct iommu_domain *domain); | 36 | void (*domain_destroy)(struct iommu_domain *domain); |
@@ -39,6 +42,8 @@ struct iommu_ops { | |||
39 | size_t size); | 42 | size_t size); |
40 | phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, | 43 | phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, |
41 | unsigned long iova); | 44 | unsigned long iova); |
45 | int (*domain_has_cap)(struct iommu_domain *domain, | ||
46 | unsigned long cap); | ||
42 | }; | 47 | }; |
43 | 48 | ||
44 | #ifdef CONFIG_IOMMU_API | 49 | #ifdef CONFIG_IOMMU_API |
@@ -57,6 +62,8 @@ extern void iommu_unmap_range(struct iommu_domain *domain, unsigned long iova, | |||
57 | size_t size); | 62 | size_t size); |
58 | extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, | 63 | extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, |
59 | unsigned long iova); | 64 | unsigned long iova); |
65 | extern int iommu_domain_has_cap(struct iommu_domain *domain, | ||
66 | unsigned long cap); | ||
60 | 67 | ||
61 | #else /* CONFIG_IOMMU_API */ | 68 | #else /* CONFIG_IOMMU_API */ |
62 | 69 | ||
@@ -107,6 +114,12 @@ static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, | |||
107 | return 0; | 114 | return 0; |
108 | } | 115 | } |
109 | 116 | ||
117 | static inline int domain_has_cap(struct iommu_domain *domain, | ||
118 | unsigned long cap) | ||
119 | { | ||
120 | return 0; | ||
121 | } | ||
122 | |||
110 | #endif /* CONFIG_IOMMU_API */ | 123 | #endif /* CONFIG_IOMMU_API */ |
111 | 124 | ||
112 | #endif /* __LINUX_IOMMU_H */ | 125 | #endif /* __LINUX_IOMMU_H */ |
diff --git a/include/linux/jbd.h b/include/linux/jbd.h index 64246dce5663..53ae4399da2d 100644 --- a/include/linux/jbd.h +++ b/include/linux/jbd.h | |||
@@ -35,7 +35,7 @@ | |||
35 | #define journal_oom_retry 1 | 35 | #define journal_oom_retry 1 |
36 | 36 | ||
37 | /* | 37 | /* |
38 | * Define JBD_PARANIOD_IOFAIL to cause a kernel BUG() if ext3 finds | 38 | * Define JBD_PARANOID_IOFAIL to cause a kernel BUG() if ext3 finds |
39 | * certain classes of error which can occur due to failed IOs. Under | 39 | * certain classes of error which can occur due to failed IOs. Under |
40 | * normal use we want ext3 to continue after such errors, because | 40 | * normal use we want ext3 to continue after such errors, because |
41 | * hardware _can_ fail, but for debugging purposes when running tests on | 41 | * hardware _can_ fail, but for debugging purposes when running tests on |
@@ -552,6 +552,11 @@ struct transaction_s | |||
552 | */ | 552 | */ |
553 | int t_handle_count; | 553 | int t_handle_count; |
554 | 554 | ||
555 | /* | ||
556 | * This transaction is being forced and some process is | ||
557 | * waiting for it to finish. | ||
558 | */ | ||
559 | int t_synchronous_commit:1; | ||
555 | }; | 560 | }; |
556 | 561 | ||
557 | /** | 562 | /** |
diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h index f3fe34391d8e..792274269f2b 100644 --- a/include/linux/kallsyms.h +++ b/include/linux/kallsyms.h | |||
@@ -13,10 +13,17 @@ | |||
13 | #define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s]") + (KSYM_NAME_LEN - 1) + \ | 13 | #define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s]") + (KSYM_NAME_LEN - 1) + \ |
14 | 2*(BITS_PER_LONG*3/10) + (MODULE_NAME_LEN - 1) + 1) | 14 | 2*(BITS_PER_LONG*3/10) + (MODULE_NAME_LEN - 1) + 1) |
15 | 15 | ||
16 | struct module; | ||
17 | |||
16 | #ifdef CONFIG_KALLSYMS | 18 | #ifdef CONFIG_KALLSYMS |
17 | /* Lookup the address for a symbol. Returns 0 if not found. */ | 19 | /* Lookup the address for a symbol. Returns 0 if not found. */ |
18 | unsigned long kallsyms_lookup_name(const char *name); | 20 | unsigned long kallsyms_lookup_name(const char *name); |
19 | 21 | ||
22 | /* Call a function on each kallsyms symbol in the core kernel */ | ||
23 | int kallsyms_on_each_symbol(int (*fn)(void *, const char *, struct module *, | ||
24 | unsigned long), | ||
25 | void *data); | ||
26 | |||
20 | extern int kallsyms_lookup_size_offset(unsigned long addr, | 27 | extern int kallsyms_lookup_size_offset(unsigned long addr, |
21 | unsigned long *symbolsize, | 28 | unsigned long *symbolsize, |
22 | unsigned long *offset); | 29 | unsigned long *offset); |
@@ -43,6 +50,14 @@ static inline unsigned long kallsyms_lookup_name(const char *name) | |||
43 | return 0; | 50 | return 0; |
44 | } | 51 | } |
45 | 52 | ||
53 | static inline int kallsyms_on_each_symbol(int (*fn)(void *, const char *, | ||
54 | struct module *, | ||
55 | unsigned long), | ||
56 | void *data) | ||
57 | { | ||
58 | return 0; | ||
59 | } | ||
60 | |||
46 | static inline int kallsyms_lookup_size_offset(unsigned long addr, | 61 | static inline int kallsyms_lookup_size_offset(unsigned long addr, |
47 | unsigned long *symbolsize, | 62 | unsigned long *symbolsize, |
48 | unsigned long *offset) | 63 | unsigned long *offset) |
diff --git a/include/linux/kernel.h b/include/linux/kernel.h index e81f2637fdef..d9e75ec7def5 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h | |||
@@ -255,6 +255,7 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies, | |||
255 | } \ | 255 | } \ |
256 | }) | 256 | }) |
257 | 257 | ||
258 | void log_buf_kexec_setup(void); | ||
258 | #else | 259 | #else |
259 | static inline int vprintk(const char *s, va_list args) | 260 | static inline int vprintk(const char *s, va_list args) |
260 | __attribute__ ((format (printf, 1, 0))); | 261 | __attribute__ ((format (printf, 1, 0))); |
@@ -270,6 +271,9 @@ static inline bool printk_timed_ratelimit(unsigned long *caller_jiffies, \ | |||
270 | /* No effect, but we still get type checking even in the !PRINTK case: */ | 271 | /* No effect, but we still get type checking even in the !PRINTK case: */ |
271 | #define printk_once(x...) printk(x) | 272 | #define printk_once(x...) printk(x) |
272 | 273 | ||
274 | static inline void log_buf_kexec_setup(void) | ||
275 | { | ||
276 | } | ||
273 | #endif | 277 | #endif |
274 | 278 | ||
275 | extern int printk_needs_cpu(int cpu); | 279 | extern int printk_needs_cpu(int cpu); |
diff --git a/include/linux/kmod.h b/include/linux/kmod.h index 92213a9194e1..d5fa565086d1 100644 --- a/include/linux/kmod.h +++ b/include/linux/kmod.h | |||
@@ -29,10 +29,15 @@ | |||
29 | #ifdef CONFIG_MODULES | 29 | #ifdef CONFIG_MODULES |
30 | /* modprobe exit status on success, -ve on error. Return value | 30 | /* modprobe exit status on success, -ve on error. Return value |
31 | * usually useless though. */ | 31 | * usually useless though. */ |
32 | extern int request_module(const char * name, ...) __attribute__ ((format (printf, 1, 2))); | 32 | extern int __request_module(bool wait, const char *name, ...) \ |
33 | #define try_then_request_module(x, mod...) ((x) ?: (request_module(mod), (x))) | 33 | __attribute__((format(printf, 2, 3))); |
34 | #define request_module(mod...) __request_module(true, mod) | ||
35 | #define request_module_nowait(mod...) __request_module(false, mod) | ||
36 | #define try_then_request_module(x, mod...) \ | ||
37 | ((x) ?: (__request_module(false, mod), (x))) | ||
34 | #else | 38 | #else |
35 | static inline int request_module(const char * name, ...) { return -ENOSYS; } | 39 | static inline int request_module(const char *name, ...) { return -ENOSYS; } |
40 | static inline int request_module_nowait(const char *name, ...) { return -ENOSYS; } | ||
36 | #define try_then_request_module(x, mod...) (x) | 41 | #define try_then_request_module(x, mod...) (x) |
37 | #endif | 42 | #endif |
38 | 43 | ||
diff --git a/include/linux/libata.h b/include/linux/libata.h index 76262d83656b..b450a2628855 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h | |||
@@ -379,7 +379,7 @@ enum { | |||
379 | ATA_HORKAGE_BRIDGE_OK = (1 << 10), /* no bridge limits */ | 379 | ATA_HORKAGE_BRIDGE_OK = (1 << 10), /* no bridge limits */ |
380 | ATA_HORKAGE_ATAPI_MOD16_DMA = (1 << 11), /* use ATAPI DMA for commands | 380 | ATA_HORKAGE_ATAPI_MOD16_DMA = (1 << 11), /* use ATAPI DMA for commands |
381 | not multiple of 16 bytes */ | 381 | not multiple of 16 bytes */ |
382 | ATA_HORKAGE_FIRMWARE_WARN = (1 << 12), /* firwmare update warning */ | 382 | ATA_HORKAGE_FIRMWARE_WARN = (1 << 12), /* firmware update warning */ |
383 | ATA_HORKAGE_1_5_GBPS = (1 << 13), /* force 1.5 Gbps */ | 383 | ATA_HORKAGE_1_5_GBPS = (1 << 13), /* force 1.5 Gbps */ |
384 | 384 | ||
385 | /* DMA mask for user DMA control: User visible values; DO NOT | 385 | /* DMA mask for user DMA control: User visible values; DO NOT |
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 5a58ea3e91e9..da5a5a1f4cd2 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h | |||
@@ -364,6 +364,23 @@ do { \ | |||
364 | 364 | ||
365 | #endif /* CONFIG_LOCK_STAT */ | 365 | #endif /* CONFIG_LOCK_STAT */ |
366 | 366 | ||
367 | #ifdef CONFIG_LOCKDEP | ||
368 | |||
369 | /* | ||
370 | * On lockdep we dont want the hand-coded irq-enable of | ||
371 | * _raw_*_lock_flags() code, because lockdep assumes | ||
372 | * that interrupts are not re-enabled during lock-acquire: | ||
373 | */ | ||
374 | #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \ | ||
375 | LOCK_CONTENDED((_lock), (try), (lock)) | ||
376 | |||
377 | #else /* CONFIG_LOCKDEP */ | ||
378 | |||
379 | #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \ | ||
380 | lockfl((_lock), (flags)) | ||
381 | |||
382 | #endif /* CONFIG_LOCKDEP */ | ||
383 | |||
367 | #ifdef CONFIG_GENERIC_HARDIRQS | 384 | #ifdef CONFIG_GENERIC_HARDIRQS |
368 | extern void early_init_irq_lock_class(void); | 385 | extern void early_init_irq_lock_class(void); |
369 | #else | 386 | #else |
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 326f45c86530..18146c980b68 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h | |||
@@ -88,9 +88,6 @@ extern void mem_cgroup_end_migration(struct mem_cgroup *mem, | |||
88 | /* | 88 | /* |
89 | * For memory reclaim. | 89 | * For memory reclaim. |
90 | */ | 90 | */ |
91 | extern int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem); | ||
92 | extern long mem_cgroup_reclaim_imbalance(struct mem_cgroup *mem); | ||
93 | |||
94 | extern int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem); | 91 | extern int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem); |
95 | extern void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, | 92 | extern void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, |
96 | int priority); | 93 | int priority); |
@@ -104,6 +101,8 @@ struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, | |||
104 | struct zone *zone); | 101 | struct zone *zone); |
105 | struct zone_reclaim_stat* | 102 | struct zone_reclaim_stat* |
106 | mem_cgroup_get_reclaim_stat_from_page(struct page *page); | 103 | mem_cgroup_get_reclaim_stat_from_page(struct page *page); |
104 | extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, | ||
105 | struct task_struct *p); | ||
107 | 106 | ||
108 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP | 107 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP |
109 | extern int do_swap_account; | 108 | extern int do_swap_account; |
@@ -209,16 +208,6 @@ static inline void mem_cgroup_end_migration(struct mem_cgroup *mem, | |||
209 | { | 208 | { |
210 | } | 209 | } |
211 | 210 | ||
212 | static inline int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem) | ||
213 | { | ||
214 | return 0; | ||
215 | } | ||
216 | |||
217 | static inline int mem_cgroup_reclaim_imbalance(struct mem_cgroup *mem) | ||
218 | { | ||
219 | return 0; | ||
220 | } | ||
221 | |||
222 | static inline int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem) | 211 | static inline int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem) |
223 | { | 212 | { |
224 | return 0; | 213 | return 0; |
@@ -270,6 +259,11 @@ mem_cgroup_get_reclaim_stat_from_page(struct page *page) | |||
270 | return NULL; | 259 | return NULL; |
271 | } | 260 | } |
272 | 261 | ||
262 | static inline void | ||
263 | mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) | ||
264 | { | ||
265 | } | ||
266 | |||
273 | #endif /* CONFIG_CGROUP_MEM_CONT */ | 267 | #endif /* CONFIG_CGROUP_MEM_CONT */ |
274 | 268 | ||
275 | #endif /* _LINUX_MEMCONTROL_H */ | 269 | #endif /* _LINUX_MEMCONTROL_H */ |
diff --git a/include/linux/memory.h b/include/linux/memory.h index 86a6c0f0518d..37fa19b34ef5 100644 --- a/include/linux/memory.h +++ b/include/linux/memory.h | |||
@@ -100,6 +100,17 @@ enum mem_add_context { BOOT, HOTPLUG }; | |||
100 | #endif | 100 | #endif |
101 | 101 | ||
102 | /* | 102 | /* |
103 | * 'struct memory_accessor' is a generic interface to provide | ||
104 | * in-kernel access to persistent memory such as i2c or SPI EEPROMs | ||
105 | */ | ||
106 | struct memory_accessor { | ||
107 | ssize_t (*read)(struct memory_accessor *, char *buf, off_t offset, | ||
108 | size_t count); | ||
109 | ssize_t (*write)(struct memory_accessor *, const char *buf, | ||
110 | off_t offset, size_t count); | ||
111 | }; | ||
112 | |||
113 | /* | ||
103 | * Kernel text modification mutex, used for code patching. Users of this lock | 114 | * Kernel text modification mutex, used for code patching. Users of this lock |
104 | * can sleep. | 115 | * can sleep. |
105 | */ | 116 | */ |
diff --git a/include/linux/mm.h b/include/linux/mm.h index aeabe953ba4f..bff1f0d475c7 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -1079,7 +1079,7 @@ static inline void setup_per_cpu_pageset(void) {} | |||
1079 | #endif | 1079 | #endif |
1080 | 1080 | ||
1081 | /* nommu.c */ | 1081 | /* nommu.c */ |
1082 | extern atomic_t mmap_pages_allocated; | 1082 | extern atomic_long_t mmap_pages_allocated; |
1083 | 1083 | ||
1084 | /* prio_tree.c */ | 1084 | /* prio_tree.c */ |
1085 | void vma_prio_tree_add(struct vm_area_struct *, struct vm_area_struct *old); | 1085 | void vma_prio_tree_add(struct vm_area_struct *, struct vm_area_struct *old); |
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index ddadb4defe00..0e80e26ecf21 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h | |||
@@ -95,6 +95,9 @@ struct page { | |||
95 | void *virtual; /* Kernel virtual address (NULL if | 95 | void *virtual; /* Kernel virtual address (NULL if |
96 | not kmapped, ie. highmem) */ | 96 | not kmapped, ie. highmem) */ |
97 | #endif /* WANT_PAGE_VIRTUAL */ | 97 | #endif /* WANT_PAGE_VIRTUAL */ |
98 | #ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS | ||
99 | unsigned long debug_flags; /* Use atomic bitops on this */ | ||
100 | #endif | ||
98 | }; | 101 | }; |
99 | 102 | ||
100 | /* | 103 | /* |
@@ -175,9 +178,6 @@ struct vm_area_struct { | |||
175 | #ifdef CONFIG_NUMA | 178 | #ifdef CONFIG_NUMA |
176 | struct mempolicy *vm_policy; /* NUMA policy for the VMA */ | 179 | struct mempolicy *vm_policy; /* NUMA policy for the VMA */ |
177 | #endif | 180 | #endif |
178 | #ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS | ||
179 | unsigned long debug_flags; /* Use atomic bitops on this */ | ||
180 | #endif | ||
181 | }; | 181 | }; |
182 | 182 | ||
183 | struct core_thread { | 183 | struct core_thread { |
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 4e457256bd33..3e7615e9087e 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h | |||
@@ -192,5 +192,10 @@ static inline void mmc_signal_sdio_irq(struct mmc_host *host) | |||
192 | wake_up_process(host->sdio_irq_thread); | 192 | wake_up_process(host->sdio_irq_thread); |
193 | } | 193 | } |
194 | 194 | ||
195 | struct regulator; | ||
196 | |||
197 | int mmc_regulator_get_ocrmask(struct regulator *supply); | ||
198 | int mmc_regulator_set_ocr(struct regulator *supply, unsigned short vdd_bit); | ||
199 | |||
195 | #endif | 200 | #endif |
196 | 201 | ||
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 26ef24076b76..186ec6ab334d 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
@@ -764,12 +764,6 @@ extern int numa_zonelist_order_handler(struct ctl_table *, int, | |||
764 | extern char numa_zonelist_order[]; | 764 | extern char numa_zonelist_order[]; |
765 | #define NUMA_ZONELIST_ORDER_LEN 16 /* string buffer size */ | 765 | #define NUMA_ZONELIST_ORDER_LEN 16 /* string buffer size */ |
766 | 766 | ||
767 | #include <linux/topology.h> | ||
768 | /* Returns the number of the current Node. */ | ||
769 | #ifndef numa_node_id | ||
770 | #define numa_node_id() (cpu_to_node(raw_smp_processor_id())) | ||
771 | #endif | ||
772 | |||
773 | #ifndef CONFIG_NEED_MULTIPLE_NODES | 767 | #ifndef CONFIG_NEED_MULTIPLE_NODES |
774 | 768 | ||
775 | extern struct pglist_data contig_page_data; | 769 | extern struct pglist_data contig_page_data; |
diff --git a/include/linux/mnt_namespace.h b/include/linux/mnt_namespace.h index 830bbcd449d6..3a059298cc19 100644 --- a/include/linux/mnt_namespace.h +++ b/include/linux/mnt_namespace.h | |||
@@ -22,6 +22,8 @@ struct proc_mounts { | |||
22 | int event; | 22 | int event; |
23 | }; | 23 | }; |
24 | 24 | ||
25 | struct fs_struct; | ||
26 | |||
25 | extern struct mnt_namespace *copy_mnt_ns(unsigned long, struct mnt_namespace *, | 27 | extern struct mnt_namespace *copy_mnt_ns(unsigned long, struct mnt_namespace *, |
26 | struct fs_struct *); | 28 | struct fs_struct *); |
27 | extern void __put_mnt_ns(struct mnt_namespace *ns); | 29 | extern void __put_mnt_ns(struct mnt_namespace *ns); |
diff --git a/include/linux/module.h b/include/linux/module.h index 22d9878e868c..627ac082e2a6 100644 --- a/include/linux/module.h +++ b/include/linux/module.h | |||
@@ -248,6 +248,10 @@ struct module | |||
248 | const unsigned long *crcs; | 248 | const unsigned long *crcs; |
249 | unsigned int num_syms; | 249 | unsigned int num_syms; |
250 | 250 | ||
251 | /* Kernel parameters. */ | ||
252 | struct kernel_param *kp; | ||
253 | unsigned int num_kp; | ||
254 | |||
251 | /* GPL-only exported symbols. */ | 255 | /* GPL-only exported symbols. */ |
252 | unsigned int num_gpl_syms; | 256 | unsigned int num_gpl_syms; |
253 | const struct kernel_symbol *gpl_syms; | 257 | const struct kernel_symbol *gpl_syms; |
@@ -355,6 +359,8 @@ struct module | |||
355 | #define MODULE_ARCH_INIT {} | 359 | #define MODULE_ARCH_INIT {} |
356 | #endif | 360 | #endif |
357 | 361 | ||
362 | extern struct mutex module_mutex; | ||
363 | |||
358 | /* FIXME: It'd be nice to isolate modules during init, too, so they | 364 | /* FIXME: It'd be nice to isolate modules during init, too, so they |
359 | aren't used before they (may) fail. But presently too much code | 365 | aren't used before they (may) fail. But presently too much code |
360 | (IDE & SCSI) require entry into the module during init.*/ | 366 | (IDE & SCSI) require entry into the module during init.*/ |
@@ -363,10 +369,10 @@ static inline int module_is_live(struct module *mod) | |||
363 | return mod->state != MODULE_STATE_GOING; | 369 | return mod->state != MODULE_STATE_GOING; |
364 | } | 370 | } |
365 | 371 | ||
366 | /* Is this address in a module? (second is with no locks, for oops) */ | ||
367 | struct module *module_text_address(unsigned long addr); | ||
368 | struct module *__module_text_address(unsigned long addr); | 372 | struct module *__module_text_address(unsigned long addr); |
369 | int is_module_address(unsigned long addr); | 373 | struct module *__module_address(unsigned long addr); |
374 | bool is_module_address(unsigned long addr); | ||
375 | bool is_module_text_address(unsigned long addr); | ||
370 | 376 | ||
371 | static inline int within_module_core(unsigned long addr, struct module *mod) | 377 | static inline int within_module_core(unsigned long addr, struct module *mod) |
372 | { | 378 | { |
@@ -380,6 +386,31 @@ static inline int within_module_init(unsigned long addr, struct module *mod) | |||
380 | addr < (unsigned long)mod->module_init + mod->init_size; | 386 | addr < (unsigned long)mod->module_init + mod->init_size; |
381 | } | 387 | } |
382 | 388 | ||
389 | /* Search for module by name: must hold module_mutex. */ | ||
390 | struct module *find_module(const char *name); | ||
391 | |||
392 | struct symsearch { | ||
393 | const struct kernel_symbol *start, *stop; | ||
394 | const unsigned long *crcs; | ||
395 | enum { | ||
396 | NOT_GPL_ONLY, | ||
397 | GPL_ONLY, | ||
398 | WILL_BE_GPL_ONLY, | ||
399 | } licence; | ||
400 | bool unused; | ||
401 | }; | ||
402 | |||
403 | /* Search for an exported symbol by name. */ | ||
404 | const struct kernel_symbol *find_symbol(const char *name, | ||
405 | struct module **owner, | ||
406 | const unsigned long **crc, | ||
407 | bool gplok, | ||
408 | bool warn); | ||
409 | |||
410 | /* Walk the exported symbol table */ | ||
411 | bool each_symbol(bool (*fn)(const struct symsearch *arr, struct module *owner, | ||
412 | unsigned int symnum, void *data), void *data); | ||
413 | |||
383 | /* Returns 0 and fills in value, defined and namebuf, or -ERANGE if | 414 | /* Returns 0 and fills in value, defined and namebuf, or -ERANGE if |
384 | symnum out of range. */ | 415 | symnum out of range. */ |
385 | int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type, | 416 | int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type, |
@@ -388,6 +419,10 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type, | |||
388 | /* Look for this name: can be of form module:name. */ | 419 | /* Look for this name: can be of form module:name. */ |
389 | unsigned long module_kallsyms_lookup_name(const char *name); | 420 | unsigned long module_kallsyms_lookup_name(const char *name); |
390 | 421 | ||
422 | int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *, | ||
423 | struct module *, unsigned long), | ||
424 | void *data); | ||
425 | |||
391 | extern void __module_put_and_exit(struct module *mod, long code) | 426 | extern void __module_put_and_exit(struct module *mod, long code) |
392 | __attribute__((noreturn)); | 427 | __attribute__((noreturn)); |
393 | #define module_put_and_exit(code) __module_put_and_exit(THIS_MODULE, code); | 428 | #define module_put_and_exit(code) __module_put_and_exit(THIS_MODULE, code); |
@@ -449,6 +484,7 @@ static inline void __module_get(struct module *module) | |||
449 | #define symbol_put_addr(p) do { } while(0) | 484 | #define symbol_put_addr(p) do { } while(0) |
450 | 485 | ||
451 | #endif /* CONFIG_MODULE_UNLOAD */ | 486 | #endif /* CONFIG_MODULE_UNLOAD */ |
487 | int use_module(struct module *a, struct module *b); | ||
452 | 488 | ||
453 | /* This is a #define so the string doesn't get put in every .o file */ | 489 | /* This is a #define so the string doesn't get put in every .o file */ |
454 | #define module_name(mod) \ | 490 | #define module_name(mod) \ |
@@ -495,21 +531,24 @@ search_module_extables(unsigned long addr) | |||
495 | return NULL; | 531 | return NULL; |
496 | } | 532 | } |
497 | 533 | ||
498 | /* Is this address in a module? */ | 534 | static inline struct module *__module_address(unsigned long addr) |
499 | static inline struct module *module_text_address(unsigned long addr) | ||
500 | { | 535 | { |
501 | return NULL; | 536 | return NULL; |
502 | } | 537 | } |
503 | 538 | ||
504 | /* Is this address in a module? (don't take a lock, we're oopsing) */ | ||
505 | static inline struct module *__module_text_address(unsigned long addr) | 539 | static inline struct module *__module_text_address(unsigned long addr) |
506 | { | 540 | { |
507 | return NULL; | 541 | return NULL; |
508 | } | 542 | } |
509 | 543 | ||
510 | static inline int is_module_address(unsigned long addr) | 544 | static inline bool is_module_address(unsigned long addr) |
511 | { | 545 | { |
512 | return 0; | 546 | return false; |
547 | } | ||
548 | |||
549 | static inline bool is_module_text_address(unsigned long addr) | ||
550 | { | ||
551 | return false; | ||
513 | } | 552 | } |
514 | 553 | ||
515 | /* Get/put a kernel symbol (calls should be symmetric) */ | 554 | /* Get/put a kernel symbol (calls should be symmetric) */ |
@@ -564,6 +603,14 @@ static inline unsigned long module_kallsyms_lookup_name(const char *name) | |||
564 | return 0; | 603 | return 0; |
565 | } | 604 | } |
566 | 605 | ||
606 | static inline int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *, | ||
607 | struct module *, | ||
608 | unsigned long), | ||
609 | void *data) | ||
610 | { | ||
611 | return 0; | ||
612 | } | ||
613 | |||
567 | static inline int register_module_notifier(struct notifier_block * nb) | 614 | static inline int register_module_notifier(struct notifier_block * nb) |
568 | { | 615 | { |
569 | /* no events will happen anyway, so this can always succeed */ | 616 | /* no events will happen anyway, so this can always succeed */ |
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h index e4af3399ef48..a4f0b931846c 100644 --- a/include/linux/moduleparam.h +++ b/include/linux/moduleparam.h | |||
@@ -138,6 +138,16 @@ extern int parse_args(const char *name, | |||
138 | unsigned num, | 138 | unsigned num, |
139 | int (*unknown)(char *param, char *val)); | 139 | int (*unknown)(char *param, char *val)); |
140 | 140 | ||
141 | /* Called by module remove. */ | ||
142 | #ifdef CONFIG_SYSFS | ||
143 | extern void destroy_params(const struct kernel_param *params, unsigned num); | ||
144 | #else | ||
145 | static inline void destroy_params(const struct kernel_param *params, | ||
146 | unsigned num) | ||
147 | { | ||
148 | } | ||
149 | #endif /* !CONFIG_SYSFS */ | ||
150 | |||
141 | /* All the helper functions */ | 151 | /* All the helper functions */ |
142 | /* The macros to do compile-time type checking stolen from Jakub | 152 | /* The macros to do compile-time type checking stolen from Jakub |
143 | Jelinek, who IIRC came up with this idea for the 2.4 module init code. */ | 153 | Jelinek, who IIRC came up with this idea for the 2.4 module init code. */ |
diff --git a/include/linux/mpage.h b/include/linux/mpage.h index 5c42821da2d1..068a0c9946af 100644 --- a/include/linux/mpage.h +++ b/include/linux/mpage.h | |||
@@ -11,21 +11,11 @@ | |||
11 | */ | 11 | */ |
12 | #ifdef CONFIG_BLOCK | 12 | #ifdef CONFIG_BLOCK |
13 | 13 | ||
14 | struct mpage_data { | ||
15 | struct bio *bio; | ||
16 | sector_t last_block_in_bio; | ||
17 | get_block_t *get_block; | ||
18 | unsigned use_writepage; | ||
19 | }; | ||
20 | |||
21 | struct writeback_control; | 14 | struct writeback_control; |
22 | 15 | ||
23 | struct bio *mpage_bio_submit(int rw, struct bio *bio); | ||
24 | int mpage_readpages(struct address_space *mapping, struct list_head *pages, | 16 | int mpage_readpages(struct address_space *mapping, struct list_head *pages, |
25 | unsigned nr_pages, get_block_t get_block); | 17 | unsigned nr_pages, get_block_t get_block); |
26 | int mpage_readpage(struct page *page, get_block_t get_block); | 18 | int mpage_readpage(struct page *page, get_block_t get_block); |
27 | int __mpage_writepage(struct page *page, struct writeback_control *wbc, | ||
28 | void *data); | ||
29 | int mpage_writepages(struct address_space *mapping, | 19 | int mpage_writepages(struct address_space *mapping, |
30 | struct writeback_control *wbc, get_block_t get_block); | 20 | struct writeback_control *wbc, get_block_t get_block); |
31 | int mpage_writepage(struct page *page, get_block_t *get_block, | 21 | int mpage_writepage(struct page *page, get_block_t *get_block, |
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index bde2557c2a9c..fdffb413b192 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h | |||
@@ -185,6 +185,9 @@ struct nfs_inode { | |||
185 | fmode_t delegation_state; | 185 | fmode_t delegation_state; |
186 | struct rw_semaphore rwsem; | 186 | struct rw_semaphore rwsem; |
187 | #endif /* CONFIG_NFS_V4*/ | 187 | #endif /* CONFIG_NFS_V4*/ |
188 | #ifdef CONFIG_NFS_FSCACHE | ||
189 | struct fscache_cookie *fscache; | ||
190 | #endif | ||
188 | struct inode vfs_inode; | 191 | struct inode vfs_inode; |
189 | }; | 192 | }; |
190 | 193 | ||
@@ -207,6 +210,8 @@ struct nfs_inode { | |||
207 | #define NFS_INO_ACL_LRU_SET (2) /* Inode is on the LRU list */ | 210 | #define NFS_INO_ACL_LRU_SET (2) /* Inode is on the LRU list */ |
208 | #define NFS_INO_MOUNTPOINT (3) /* inode is remote mountpoint */ | 211 | #define NFS_INO_MOUNTPOINT (3) /* inode is remote mountpoint */ |
209 | #define NFS_INO_FLUSHING (4) /* inode is flushing out data */ | 212 | #define NFS_INO_FLUSHING (4) /* inode is flushing out data */ |
213 | #define NFS_INO_FSCACHE (5) /* inode can be cached by FS-Cache */ | ||
214 | #define NFS_INO_FSCACHE_LOCK (6) /* FS-Cache cookie management lock */ | ||
210 | 215 | ||
211 | static inline struct nfs_inode *NFS_I(const struct inode *inode) | 216 | static inline struct nfs_inode *NFS_I(const struct inode *inode) |
212 | { | 217 | { |
@@ -260,6 +265,11 @@ static inline int NFS_STALE(const struct inode *inode) | |||
260 | return test_bit(NFS_INO_STALE, &NFS_I(inode)->flags); | 265 | return test_bit(NFS_INO_STALE, &NFS_I(inode)->flags); |
261 | } | 266 | } |
262 | 267 | ||
268 | static inline int NFS_FSCACHE(const struct inode *inode) | ||
269 | { | ||
270 | return test_bit(NFS_INO_FSCACHE, &NFS_I(inode)->flags); | ||
271 | } | ||
272 | |||
263 | static inline __u64 NFS_FILEID(const struct inode *inode) | 273 | static inline __u64 NFS_FILEID(const struct inode *inode) |
264 | { | 274 | { |
265 | return NFS_I(inode)->fileid; | 275 | return NFS_I(inode)->fileid; |
@@ -506,6 +516,8 @@ extern int nfs_readpages(struct file *, struct address_space *, | |||
506 | struct list_head *, unsigned); | 516 | struct list_head *, unsigned); |
507 | extern int nfs_readpage_result(struct rpc_task *, struct nfs_read_data *); | 517 | extern int nfs_readpage_result(struct rpc_task *, struct nfs_read_data *); |
508 | extern void nfs_readdata_release(void *data); | 518 | extern void nfs_readdata_release(void *data); |
519 | extern int nfs_readpage_async(struct nfs_open_context *, struct inode *, | ||
520 | struct page *); | ||
509 | 521 | ||
510 | /* | 522 | /* |
511 | * Allocate nfs_read_data structures | 523 | * Allocate nfs_read_data structures |
@@ -583,6 +595,7 @@ extern void * nfs_root_data(void); | |||
583 | #define NFSDBG_CALLBACK 0x0100 | 595 | #define NFSDBG_CALLBACK 0x0100 |
584 | #define NFSDBG_CLIENT 0x0200 | 596 | #define NFSDBG_CLIENT 0x0200 |
585 | #define NFSDBG_MOUNT 0x0400 | 597 | #define NFSDBG_MOUNT 0x0400 |
598 | #define NFSDBG_FSCACHE 0x0800 | ||
586 | #define NFSDBG_ALL 0xFFFF | 599 | #define NFSDBG_ALL 0xFFFF |
587 | 600 | ||
588 | #ifdef __KERNEL__ | 601 | #ifdef __KERNEL__ |
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index 29b1e40dce99..6ad75948cbf7 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h | |||
@@ -64,6 +64,10 @@ struct nfs_client { | |||
64 | char cl_ipaddr[48]; | 64 | char cl_ipaddr[48]; |
65 | unsigned char cl_id_uniquifier; | 65 | unsigned char cl_id_uniquifier; |
66 | #endif | 66 | #endif |
67 | |||
68 | #ifdef CONFIG_NFS_FSCACHE | ||
69 | struct fscache_cookie *fscache; /* client index cache cookie */ | ||
70 | #endif | ||
67 | }; | 71 | }; |
68 | 72 | ||
69 | /* | 73 | /* |
@@ -96,12 +100,19 @@ struct nfs_server { | |||
96 | unsigned int acdirmin; | 100 | unsigned int acdirmin; |
97 | unsigned int acdirmax; | 101 | unsigned int acdirmax; |
98 | unsigned int namelen; | 102 | unsigned int namelen; |
103 | unsigned int options; /* extra options enabled by mount */ | ||
104 | #define NFS_OPTION_FSCACHE 0x00000001 /* - local caching enabled */ | ||
99 | 105 | ||
100 | struct nfs_fsid fsid; | 106 | struct nfs_fsid fsid; |
101 | __u64 maxfilesize; /* maximum file size */ | 107 | __u64 maxfilesize; /* maximum file size */ |
102 | unsigned long mount_time; /* when this fs was mounted */ | 108 | unsigned long mount_time; /* when this fs was mounted */ |
103 | dev_t s_dev; /* superblock dev numbers */ | 109 | dev_t s_dev; /* superblock dev numbers */ |
104 | 110 | ||
111 | #ifdef CONFIG_NFS_FSCACHE | ||
112 | struct nfs_fscache_key *fscache_key; /* unique key for superblock */ | ||
113 | struct fscache_cookie *fscache; /* superblock cookie */ | ||
114 | #endif | ||
115 | |||
105 | #ifdef CONFIG_NFS_V4 | 116 | #ifdef CONFIG_NFS_V4 |
106 | u32 attr_bitmask[2];/* V4 bitmask representing the set | 117 | u32 attr_bitmask[2];/* V4 bitmask representing the set |
107 | of attributes supported on this | 118 | of attributes supported on this |
diff --git a/include/linux/nfs_iostat.h b/include/linux/nfs_iostat.h index 1cb9a3fed2b3..68b10f5f8907 100644 --- a/include/linux/nfs_iostat.h +++ b/include/linux/nfs_iostat.h | |||
@@ -116,4 +116,16 @@ enum nfs_stat_eventcounters { | |||
116 | __NFSIOS_COUNTSMAX, | 116 | __NFSIOS_COUNTSMAX, |
117 | }; | 117 | }; |
118 | 118 | ||
119 | /* | ||
120 | * NFS local caching servicing counters | ||
121 | */ | ||
122 | enum nfs_stat_fscachecounters { | ||
123 | NFSIOS_FSCACHE_PAGES_READ_OK, | ||
124 | NFSIOS_FSCACHE_PAGES_READ_FAIL, | ||
125 | NFSIOS_FSCACHE_PAGES_WRITTEN_OK, | ||
126 | NFSIOS_FSCACHE_PAGES_WRITTEN_FAIL, | ||
127 | NFSIOS_FSCACHE_PAGES_UNCACHED, | ||
128 | __NFSIOS_FSCACHEMAX, | ||
129 | }; | ||
130 | |||
119 | #endif /* _LINUX_NFS_IOSTAT */ | 131 | #endif /* _LINUX_NFS_IOSTAT */ |
diff --git a/include/linux/nsproxy.h b/include/linux/nsproxy.h index afad7dec1b36..7b370c7cfeff 100644 --- a/include/linux/nsproxy.h +++ b/include/linux/nsproxy.h | |||
@@ -8,6 +8,7 @@ struct mnt_namespace; | |||
8 | struct uts_namespace; | 8 | struct uts_namespace; |
9 | struct ipc_namespace; | 9 | struct ipc_namespace; |
10 | struct pid_namespace; | 10 | struct pid_namespace; |
11 | struct fs_struct; | ||
11 | 12 | ||
12 | /* | 13 | /* |
13 | * A structure to contain pointers to all per-process | 14 | * A structure to contain pointers to all per-process |
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 61df1779b2a5..62214c7d2d93 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h | |||
@@ -82,6 +82,7 @@ enum pageflags { | |||
82 | PG_arch_1, | 82 | PG_arch_1, |
83 | PG_reserved, | 83 | PG_reserved, |
84 | PG_private, /* If pagecache, has fs-private data */ | 84 | PG_private, /* If pagecache, has fs-private data */ |
85 | PG_private_2, /* If pagecache, has fs aux data */ | ||
85 | PG_writeback, /* Page is under writeback */ | 86 | PG_writeback, /* Page is under writeback */ |
86 | #ifdef CONFIG_PAGEFLAGS_EXTENDED | 87 | #ifdef CONFIG_PAGEFLAGS_EXTENDED |
87 | PG_head, /* A head page */ | 88 | PG_head, /* A head page */ |
@@ -108,6 +109,12 @@ enum pageflags { | |||
108 | /* Filesystems */ | 109 | /* Filesystems */ |
109 | PG_checked = PG_owner_priv_1, | 110 | PG_checked = PG_owner_priv_1, |
110 | 111 | ||
112 | /* Two page bits are conscripted by FS-Cache to maintain local caching | ||
113 | * state. These bits are set on pages belonging to the netfs's inodes | ||
114 | * when those inodes are being locally cached. | ||
115 | */ | ||
116 | PG_fscache = PG_private_2, /* page backed by cache */ | ||
117 | |||
111 | /* XEN */ | 118 | /* XEN */ |
112 | PG_pinned = PG_owner_priv_1, | 119 | PG_pinned = PG_owner_priv_1, |
113 | PG_savepinned = PG_dirty, | 120 | PG_savepinned = PG_dirty, |
@@ -182,7 +189,7 @@ static inline int TestClearPage##uname(struct page *page) { return 0; } | |||
182 | 189 | ||
183 | struct page; /* forward declaration */ | 190 | struct page; /* forward declaration */ |
184 | 191 | ||
185 | TESTPAGEFLAG(Locked, locked) | 192 | TESTPAGEFLAG(Locked, locked) TESTSETFLAG(Locked, locked) |
186 | PAGEFLAG(Error, error) | 193 | PAGEFLAG(Error, error) |
187 | PAGEFLAG(Referenced, referenced) TESTCLEARFLAG(Referenced, referenced) | 194 | PAGEFLAG(Referenced, referenced) TESTCLEARFLAG(Referenced, referenced) |
188 | PAGEFLAG(Dirty, dirty) TESTSCFLAG(Dirty, dirty) __CLEARPAGEFLAG(Dirty, dirty) | 195 | PAGEFLAG(Dirty, dirty) TESTSCFLAG(Dirty, dirty) __CLEARPAGEFLAG(Dirty, dirty) |
@@ -194,8 +201,6 @@ PAGEFLAG(Checked, checked) /* Used by some filesystems */ | |||
194 | PAGEFLAG(Pinned, pinned) TESTSCFLAG(Pinned, pinned) /* Xen */ | 201 | PAGEFLAG(Pinned, pinned) TESTSCFLAG(Pinned, pinned) /* Xen */ |
195 | PAGEFLAG(SavePinned, savepinned); /* Xen */ | 202 | PAGEFLAG(SavePinned, savepinned); /* Xen */ |
196 | PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved) | 203 | PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved) |
197 | PAGEFLAG(Private, private) __CLEARPAGEFLAG(Private, private) | ||
198 | __SETPAGEFLAG(Private, private) | ||
199 | PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked) | 204 | PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked) |
200 | 205 | ||
201 | __PAGEFLAG(SlobPage, slob_page) | 206 | __PAGEFLAG(SlobPage, slob_page) |
@@ -205,6 +210,16 @@ __PAGEFLAG(SlubFrozen, slub_frozen) | |||
205 | __PAGEFLAG(SlubDebug, slub_debug) | 210 | __PAGEFLAG(SlubDebug, slub_debug) |
206 | 211 | ||
207 | /* | 212 | /* |
213 | * Private page markings that may be used by the filesystem that owns the page | ||
214 | * for its own purposes. | ||
215 | * - PG_private and PG_private_2 cause releasepage() and co to be invoked | ||
216 | */ | ||
217 | PAGEFLAG(Private, private) __SETPAGEFLAG(Private, private) | ||
218 | __CLEARPAGEFLAG(Private, private) | ||
219 | PAGEFLAG(Private2, private_2) TESTSCFLAG(Private2, private_2) | ||
220 | PAGEFLAG(OwnerPriv1, owner_priv_1) TESTCLEARFLAG(OwnerPriv1, owner_priv_1) | ||
221 | |||
222 | /* | ||
208 | * Only test-and-set exist for PG_writeback. The unconditional operators are | 223 | * Only test-and-set exist for PG_writeback. The unconditional operators are |
209 | * risky: they bypass page accounting. | 224 | * risky: they bypass page accounting. |
210 | */ | 225 | */ |
@@ -384,9 +399,10 @@ static inline void __ClearPageTail(struct page *page) | |||
384 | * these flags set. It they are, there is a problem. | 399 | * these flags set. It they are, there is a problem. |
385 | */ | 400 | */ |
386 | #define PAGE_FLAGS_CHECK_AT_FREE \ | 401 | #define PAGE_FLAGS_CHECK_AT_FREE \ |
387 | (1 << PG_lru | 1 << PG_private | 1 << PG_locked | \ | 402 | (1 << PG_lru | 1 << PG_locked | \ |
388 | 1 << PG_buddy | 1 << PG_writeback | 1 << PG_reserved | \ | 403 | 1 << PG_private | 1 << PG_private_2 | \ |
389 | 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \ | 404 | 1 << PG_buddy | 1 << PG_writeback | 1 << PG_reserved | \ |
405 | 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \ | ||
390 | __PG_UNEVICTABLE | __PG_MLOCKED) | 406 | __PG_UNEVICTABLE | __PG_MLOCKED) |
391 | 407 | ||
392 | /* | 408 | /* |
@@ -397,4 +413,16 @@ static inline void __ClearPageTail(struct page *page) | |||
397 | #define PAGE_FLAGS_CHECK_AT_PREP ((1 << NR_PAGEFLAGS) - 1) | 413 | #define PAGE_FLAGS_CHECK_AT_PREP ((1 << NR_PAGEFLAGS) - 1) |
398 | 414 | ||
399 | #endif /* !__GENERATING_BOUNDS_H */ | 415 | #endif /* !__GENERATING_BOUNDS_H */ |
416 | |||
417 | /** | ||
418 | * page_has_private - Determine if page has private stuff | ||
419 | * @page: The page to be checked | ||
420 | * | ||
421 | * Determine if a page has private stuff, indicating that release routines | ||
422 | * should be invoked upon it. | ||
423 | */ | ||
424 | #define page_has_private(page) \ | ||
425 | ((page)->flags & ((1 << PG_private) | \ | ||
426 | (1 << PG_private_2))) | ||
427 | |||
400 | #endif /* PAGE_FLAGS_H */ | 428 | #endif /* PAGE_FLAGS_H */ |
diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h index 602cc1fdee90..7339c7bf7331 100644 --- a/include/linux/page_cgroup.h +++ b/include/linux/page_cgroup.h | |||
@@ -91,24 +91,23 @@ static inline void page_cgroup_init(void) | |||
91 | 91 | ||
92 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP | 92 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP |
93 | #include <linux/swap.h> | 93 | #include <linux/swap.h> |
94 | extern struct mem_cgroup * | 94 | extern unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id); |
95 | swap_cgroup_record(swp_entry_t ent, struct mem_cgroup *mem); | 95 | extern unsigned short lookup_swap_cgroup(swp_entry_t ent); |
96 | extern struct mem_cgroup *lookup_swap_cgroup(swp_entry_t ent); | ||
97 | extern int swap_cgroup_swapon(int type, unsigned long max_pages); | 96 | extern int swap_cgroup_swapon(int type, unsigned long max_pages); |
98 | extern void swap_cgroup_swapoff(int type); | 97 | extern void swap_cgroup_swapoff(int type); |
99 | #else | 98 | #else |
100 | #include <linux/swap.h> | 99 | #include <linux/swap.h> |
101 | 100 | ||
102 | static inline | 101 | static inline |
103 | struct mem_cgroup *swap_cgroup_record(swp_entry_t ent, struct mem_cgroup *mem) | 102 | unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id) |
104 | { | 103 | { |
105 | return NULL; | 104 | return 0; |
106 | } | 105 | } |
107 | 106 | ||
108 | static inline | 107 | static inline |
109 | struct mem_cgroup *lookup_swap_cgroup(swp_entry_t ent) | 108 | unsigned short lookup_swap_cgroup(swp_entry_t ent) |
110 | { | 109 | { |
111 | return NULL; | 110 | return 0; |
112 | } | 111 | } |
113 | 112 | ||
114 | static inline int | 113 | static inline int |
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 01ca0856caff..34da5230faab 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h | |||
@@ -18,9 +18,14 @@ | |||
18 | * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page | 18 | * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page |
19 | * allocation mode flags. | 19 | * allocation mode flags. |
20 | */ | 20 | */ |
21 | #define AS_EIO (__GFP_BITS_SHIFT + 0) /* IO error on async write */ | 21 | enum mapping_flags { |
22 | #define AS_ENOSPC (__GFP_BITS_SHIFT + 1) /* ENOSPC on async write */ | 22 | AS_EIO = __GFP_BITS_SHIFT + 0, /* IO error on async write */ |
23 | #define AS_MM_ALL_LOCKS (__GFP_BITS_SHIFT + 2) /* under mm_take_all_locks() */ | 23 | AS_ENOSPC = __GFP_BITS_SHIFT + 1, /* ENOSPC on async write */ |
24 | AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */ | ||
25 | #ifdef CONFIG_UNEVICTABLE_LRU | ||
26 | AS_UNEVICTABLE = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */ | ||
27 | #endif | ||
28 | }; | ||
24 | 29 | ||
25 | static inline void mapping_set_error(struct address_space *mapping, int error) | 30 | static inline void mapping_set_error(struct address_space *mapping, int error) |
26 | { | 31 | { |
@@ -33,7 +38,6 @@ static inline void mapping_set_error(struct address_space *mapping, int error) | |||
33 | } | 38 | } |
34 | 39 | ||
35 | #ifdef CONFIG_UNEVICTABLE_LRU | 40 | #ifdef CONFIG_UNEVICTABLE_LRU |
36 | #define AS_UNEVICTABLE (__GFP_BITS_SHIFT + 2) /* e.g., ramdisk, SHM_LOCK */ | ||
37 | 41 | ||
38 | static inline void mapping_set_unevictable(struct address_space *mapping) | 42 | static inline void mapping_set_unevictable(struct address_space *mapping) |
39 | { | 43 | { |
@@ -380,6 +384,11 @@ static inline void wait_on_page_writeback(struct page *page) | |||
380 | extern void end_page_writeback(struct page *page); | 384 | extern void end_page_writeback(struct page *page); |
381 | 385 | ||
382 | /* | 386 | /* |
387 | * Add an arbitrary waiter to a page's wait queue | ||
388 | */ | ||
389 | extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter); | ||
390 | |||
391 | /* | ||
383 | * Fault a userspace page into pagetables. Return non-zero on a fault. | 392 | * Fault a userspace page into pagetables. Return non-zero on a fault. |
384 | * | 393 | * |
385 | * This assumes that two userspace pages are always sufficient. That's | 394 | * This assumes that two userspace pages are always sufficient. That's |
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index cb14fd260837..170f8b1f22db 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h | |||
@@ -526,6 +526,7 @@ | |||
526 | #define PCI_DEVICE_ID_AMD_OPUS_7443 0x7443 | 526 | #define PCI_DEVICE_ID_AMD_OPUS_7443 0x7443 |
527 | #define PCI_DEVICE_ID_AMD_VIPER_7443 0x7443 | 527 | #define PCI_DEVICE_ID_AMD_VIPER_7443 0x7443 |
528 | #define PCI_DEVICE_ID_AMD_OPUS_7445 0x7445 | 528 | #define PCI_DEVICE_ID_AMD_OPUS_7445 0x7445 |
529 | #define PCI_DEVICE_ID_AMD_8111_PCI 0x7460 | ||
529 | #define PCI_DEVICE_ID_AMD_8111_LPC 0x7468 | 530 | #define PCI_DEVICE_ID_AMD_8111_LPC 0x7468 |
530 | #define PCI_DEVICE_ID_AMD_8111_IDE 0x7469 | 531 | #define PCI_DEVICE_ID_AMD_8111_IDE 0x7469 |
531 | #define PCI_DEVICE_ID_AMD_8111_SMBUS2 0x746a | 532 | #define PCI_DEVICE_ID_AMD_8111_SMBUS2 0x746a |
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index 98b93ca4db06..67c15653fc23 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h | |||
@@ -94,6 +94,7 @@ extern void ptrace_notify(int exit_code); | |||
94 | extern void __ptrace_link(struct task_struct *child, | 94 | extern void __ptrace_link(struct task_struct *child, |
95 | struct task_struct *new_parent); | 95 | struct task_struct *new_parent); |
96 | extern void __ptrace_unlink(struct task_struct *child); | 96 | extern void __ptrace_unlink(struct task_struct *child); |
97 | extern void exit_ptrace(struct task_struct *tracer); | ||
97 | extern void ptrace_fork(struct task_struct *task, unsigned long clone_flags); | 98 | extern void ptrace_fork(struct task_struct *task, unsigned long clone_flags); |
98 | #define PTRACE_MODE_READ 1 | 99 | #define PTRACE_MODE_READ 1 |
99 | #define PTRACE_MODE_ATTACH 2 | 100 | #define PTRACE_MODE_ATTACH 2 |
diff --git a/include/linux/pwm.h b/include/linux/pwm.h index 3945f803d514..7c775751392c 100644 --- a/include/linux/pwm.h +++ b/include/linux/pwm.h | |||
@@ -28,4 +28,4 @@ int pwm_enable(struct pwm_device *pwm); | |||
28 | */ | 28 | */ |
29 | void pwm_disable(struct pwm_device *pwm); | 29 | void pwm_disable(struct pwm_device *pwm); |
30 | 30 | ||
31 | #endif /* __ASM_ARCH_PWM_H */ | 31 | #endif /* __LINUX_PWM_H */ |
diff --git a/include/linux/raid/bitmap.h b/include/linux/raid/bitmap.h deleted file mode 100644 index e98900671ca9..000000000000 --- a/include/linux/raid/bitmap.h +++ /dev/null | |||
@@ -1,288 +0,0 @@ | |||
1 | /* | ||
2 | * bitmap.h: Copyright (C) Peter T. Breuer (ptb@ot.uc3m.es) 2003 | ||
3 | * | ||
4 | * additions: Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc. | ||
5 | */ | ||
6 | #ifndef BITMAP_H | ||
7 | #define BITMAP_H 1 | ||
8 | |||
9 | #define BITMAP_MAJOR_LO 3 | ||
10 | /* version 4 insists the bitmap is in little-endian order | ||
11 | * with version 3, it is host-endian which is non-portable | ||
12 | */ | ||
13 | #define BITMAP_MAJOR_HI 4 | ||
14 | #define BITMAP_MAJOR_HOSTENDIAN 3 | ||
15 | |||
16 | #define BITMAP_MINOR 39 | ||
17 | |||
18 | /* | ||
19 | * in-memory bitmap: | ||
20 | * | ||
21 | * Use 16 bit block counters to track pending writes to each "chunk". | ||
22 | * The 2 high order bits are special-purpose, the first is a flag indicating | ||
23 | * whether a resync is needed. The second is a flag indicating whether a | ||
24 | * resync is active. | ||
25 | * This means that the counter is actually 14 bits: | ||
26 | * | ||
27 | * +--------+--------+------------------------------------------------+ | ||
28 | * | resync | resync | counter | | ||
29 | * | needed | active | | | ||
30 | * | (0-1) | (0-1) | (0-16383) | | ||
31 | * +--------+--------+------------------------------------------------+ | ||
32 | * | ||
33 | * The "resync needed" bit is set when: | ||
34 | * a '1' bit is read from storage at startup. | ||
35 | * a write request fails on some drives | ||
36 | * a resync is aborted on a chunk with 'resync active' set | ||
37 | * It is cleared (and resync-active set) when a resync starts across all drives | ||
38 | * of the chunk. | ||
39 | * | ||
40 | * | ||
41 | * The "resync active" bit is set when: | ||
42 | * a resync is started on all drives, and resync_needed is set. | ||
43 | * resync_needed will be cleared (as long as resync_active wasn't already set). | ||
44 | * It is cleared when a resync completes. | ||
45 | * | ||
46 | * The counter counts pending write requests, plus the on-disk bit. | ||
47 | * When the counter is '1' and the resync bits are clear, the on-disk | ||
48 | * bit can be cleared aswell, thus setting the counter to 0. | ||
49 | * When we set a bit, or in the counter (to start a write), if the fields is | ||
50 | * 0, we first set the disk bit and set the counter to 1. | ||
51 | * | ||
52 | * If the counter is 0, the on-disk bit is clear and the stipe is clean | ||
53 | * Anything that dirties the stipe pushes the counter to 2 (at least) | ||
54 | * and sets the on-disk bit (lazily). | ||
55 | * If a periodic sweep find the counter at 2, it is decremented to 1. | ||
56 | * If the sweep find the counter at 1, the on-disk bit is cleared and the | ||
57 | * counter goes to zero. | ||
58 | * | ||
59 | * Also, we'll hijack the "map" pointer itself and use it as two 16 bit block | ||
60 | * counters as a fallback when "page" memory cannot be allocated: | ||
61 | * | ||
62 | * Normal case (page memory allocated): | ||
63 | * | ||
64 | * page pointer (32-bit) | ||
65 | * | ||
66 | * [ ] ------+ | ||
67 | * | | ||
68 | * +-------> [ ][ ]..[ ] (4096 byte page == 2048 counters) | ||
69 | * c1 c2 c2048 | ||
70 | * | ||
71 | * Hijacked case (page memory allocation failed): | ||
72 | * | ||
73 | * hijacked page pointer (32-bit) | ||
74 | * | ||
75 | * [ ][ ] (no page memory allocated) | ||
76 | * counter #1 (16-bit) counter #2 (16-bit) | ||
77 | * | ||
78 | */ | ||
79 | |||
80 | #ifdef __KERNEL__ | ||
81 | |||
82 | #define PAGE_BITS (PAGE_SIZE << 3) | ||
83 | #define PAGE_BIT_SHIFT (PAGE_SHIFT + 3) | ||
84 | |||
85 | typedef __u16 bitmap_counter_t; | ||
86 | #define COUNTER_BITS 16 | ||
87 | #define COUNTER_BIT_SHIFT 4 | ||
88 | #define COUNTER_BYTE_RATIO (COUNTER_BITS / 8) | ||
89 | #define COUNTER_BYTE_SHIFT (COUNTER_BIT_SHIFT - 3) | ||
90 | |||
91 | #define NEEDED_MASK ((bitmap_counter_t) (1 << (COUNTER_BITS - 1))) | ||
92 | #define RESYNC_MASK ((bitmap_counter_t) (1 << (COUNTER_BITS - 2))) | ||
93 | #define COUNTER_MAX ((bitmap_counter_t) RESYNC_MASK - 1) | ||
94 | #define NEEDED(x) (((bitmap_counter_t) x) & NEEDED_MASK) | ||
95 | #define RESYNC(x) (((bitmap_counter_t) x) & RESYNC_MASK) | ||
96 | #define COUNTER(x) (((bitmap_counter_t) x) & COUNTER_MAX) | ||
97 | |||
98 | /* how many counters per page? */ | ||
99 | #define PAGE_COUNTER_RATIO (PAGE_BITS / COUNTER_BITS) | ||
100 | /* same, except a shift value for more efficient bitops */ | ||
101 | #define PAGE_COUNTER_SHIFT (PAGE_BIT_SHIFT - COUNTER_BIT_SHIFT) | ||
102 | /* same, except a mask value for more efficient bitops */ | ||
103 | #define PAGE_COUNTER_MASK (PAGE_COUNTER_RATIO - 1) | ||
104 | |||
105 | #define BITMAP_BLOCK_SIZE 512 | ||
106 | #define BITMAP_BLOCK_SHIFT 9 | ||
107 | |||
108 | /* how many blocks per chunk? (this is variable) */ | ||
109 | #define CHUNK_BLOCK_RATIO(bitmap) ((bitmap)->chunksize >> BITMAP_BLOCK_SHIFT) | ||
110 | #define CHUNK_BLOCK_SHIFT(bitmap) ((bitmap)->chunkshift - BITMAP_BLOCK_SHIFT) | ||
111 | #define CHUNK_BLOCK_MASK(bitmap) (CHUNK_BLOCK_RATIO(bitmap) - 1) | ||
112 | |||
113 | /* when hijacked, the counters and bits represent even larger "chunks" */ | ||
114 | /* there will be 1024 chunks represented by each counter in the page pointers */ | ||
115 | #define PAGEPTR_BLOCK_RATIO(bitmap) \ | ||
116 | (CHUNK_BLOCK_RATIO(bitmap) << PAGE_COUNTER_SHIFT >> 1) | ||
117 | #define PAGEPTR_BLOCK_SHIFT(bitmap) \ | ||
118 | (CHUNK_BLOCK_SHIFT(bitmap) + PAGE_COUNTER_SHIFT - 1) | ||
119 | #define PAGEPTR_BLOCK_MASK(bitmap) (PAGEPTR_BLOCK_RATIO(bitmap) - 1) | ||
120 | |||
121 | /* | ||
122 | * on-disk bitmap: | ||
123 | * | ||
124 | * Use one bit per "chunk" (block set). We do the disk I/O on the bitmap | ||
125 | * file a page at a time. There's a superblock at the start of the file. | ||
126 | */ | ||
127 | |||
128 | /* map chunks (bits) to file pages - offset by the size of the superblock */ | ||
129 | #define CHUNK_BIT_OFFSET(chunk) ((chunk) + (sizeof(bitmap_super_t) << 3)) | ||
130 | |||
131 | #endif | ||
132 | |||
133 | /* | ||
134 | * bitmap structures: | ||
135 | */ | ||
136 | |||
137 | #define BITMAP_MAGIC 0x6d746962 | ||
138 | |||
139 | /* use these for bitmap->flags and bitmap->sb->state bit-fields */ | ||
140 | enum bitmap_state { | ||
141 | BITMAP_STALE = 0x002, /* the bitmap file is out of date or had -EIO */ | ||
142 | BITMAP_WRITE_ERROR = 0x004, /* A write error has occurred */ | ||
143 | BITMAP_HOSTENDIAN = 0x8000, | ||
144 | }; | ||
145 | |||
146 | /* the superblock at the front of the bitmap file -- little endian */ | ||
147 | typedef struct bitmap_super_s { | ||
148 | __le32 magic; /* 0 BITMAP_MAGIC */ | ||
149 | __le32 version; /* 4 the bitmap major for now, could change... */ | ||
150 | __u8 uuid[16]; /* 8 128 bit uuid - must match md device uuid */ | ||
151 | __le64 events; /* 24 event counter for the bitmap (1)*/ | ||
152 | __le64 events_cleared;/*32 event counter when last bit cleared (2) */ | ||
153 | __le64 sync_size; /* 40 the size of the md device's sync range(3) */ | ||
154 | __le32 state; /* 48 bitmap state information */ | ||
155 | __le32 chunksize; /* 52 the bitmap chunk size in bytes */ | ||
156 | __le32 daemon_sleep; /* 56 seconds between disk flushes */ | ||
157 | __le32 write_behind; /* 60 number of outstanding write-behind writes */ | ||
158 | |||
159 | __u8 pad[256 - 64]; /* set to zero */ | ||
160 | } bitmap_super_t; | ||
161 | |||
162 | /* notes: | ||
163 | * (1) This event counter is updated before the eventcounter in the md superblock | ||
164 | * When a bitmap is loaded, it is only accepted if this event counter is equal | ||
165 | * to, or one greater than, the event counter in the superblock. | ||
166 | * (2) This event counter is updated when the other one is *if*and*only*if* the | ||
167 | * array is not degraded. As bits are not cleared when the array is degraded, | ||
168 | * this represents the last time that any bits were cleared. | ||
169 | * If a device is being added that has an event count with this value or | ||
170 | * higher, it is accepted as conforming to the bitmap. | ||
171 | * (3)This is the number of sectors represented by the bitmap, and is the range that | ||
172 | * resync happens across. For raid1 and raid5/6 it is the size of individual | ||
173 | * devices. For raid10 it is the size of the array. | ||
174 | */ | ||
175 | |||
176 | #ifdef __KERNEL__ | ||
177 | |||
178 | /* the in-memory bitmap is represented by bitmap_pages */ | ||
179 | struct bitmap_page { | ||
180 | /* | ||
181 | * map points to the actual memory page | ||
182 | */ | ||
183 | char *map; | ||
184 | /* | ||
185 | * in emergencies (when map cannot be alloced), hijack the map | ||
186 | * pointer and use it as two counters itself | ||
187 | */ | ||
188 | unsigned int hijacked:1; | ||
189 | /* | ||
190 | * count of dirty bits on the page | ||
191 | */ | ||
192 | unsigned int count:31; | ||
193 | }; | ||
194 | |||
195 | /* keep track of bitmap file pages that have pending writes on them */ | ||
196 | struct page_list { | ||
197 | struct list_head list; | ||
198 | struct page *page; | ||
199 | }; | ||
200 | |||
201 | /* the main bitmap structure - one per mddev */ | ||
202 | struct bitmap { | ||
203 | struct bitmap_page *bp; | ||
204 | unsigned long pages; /* total number of pages in the bitmap */ | ||
205 | unsigned long missing_pages; /* number of pages not yet allocated */ | ||
206 | |||
207 | mddev_t *mddev; /* the md device that the bitmap is for */ | ||
208 | |||
209 | int counter_bits; /* how many bits per block counter */ | ||
210 | |||
211 | /* bitmap chunksize -- how much data does each bit represent? */ | ||
212 | unsigned long chunksize; | ||
213 | unsigned long chunkshift; /* chunksize = 2^chunkshift (for bitops) */ | ||
214 | unsigned long chunks; /* total number of data chunks for the array */ | ||
215 | |||
216 | /* We hold a count on the chunk currently being synced, and drop | ||
217 | * it when the last block is started. If the resync is aborted | ||
218 | * midway, we need to be able to drop that count, so we remember | ||
219 | * the counted chunk.. | ||
220 | */ | ||
221 | unsigned long syncchunk; | ||
222 | |||
223 | __u64 events_cleared; | ||
224 | int need_sync; | ||
225 | |||
226 | /* bitmap spinlock */ | ||
227 | spinlock_t lock; | ||
228 | |||
229 | long offset; /* offset from superblock if file is NULL */ | ||
230 | struct file *file; /* backing disk file */ | ||
231 | struct page *sb_page; /* cached copy of the bitmap file superblock */ | ||
232 | struct page **filemap; /* list of cache pages for the file */ | ||
233 | unsigned long *filemap_attr; /* attributes associated w/ filemap pages */ | ||
234 | unsigned long file_pages; /* number of pages in the file */ | ||
235 | int last_page_size; /* bytes in the last page */ | ||
236 | |||
237 | unsigned long flags; | ||
238 | |||
239 | int allclean; | ||
240 | |||
241 | unsigned long max_write_behind; /* write-behind mode */ | ||
242 | atomic_t behind_writes; | ||
243 | |||
244 | /* | ||
245 | * the bitmap daemon - periodically wakes up and sweeps the bitmap | ||
246 | * file, cleaning up bits and flushing out pages to disk as necessary | ||
247 | */ | ||
248 | unsigned long daemon_lastrun; /* jiffies of last run */ | ||
249 | unsigned long daemon_sleep; /* how many seconds between updates? */ | ||
250 | unsigned long last_end_sync; /* when we lasted called end_sync to | ||
251 | * update bitmap with resync progress */ | ||
252 | |||
253 | atomic_t pending_writes; /* pending writes to the bitmap file */ | ||
254 | wait_queue_head_t write_wait; | ||
255 | wait_queue_head_t overflow_wait; | ||
256 | |||
257 | }; | ||
258 | |||
259 | /* the bitmap API */ | ||
260 | |||
261 | /* these are used only by md/bitmap */ | ||
262 | int bitmap_create(mddev_t *mddev); | ||
263 | void bitmap_flush(mddev_t *mddev); | ||
264 | void bitmap_destroy(mddev_t *mddev); | ||
265 | |||
266 | void bitmap_print_sb(struct bitmap *bitmap); | ||
267 | void bitmap_update_sb(struct bitmap *bitmap); | ||
268 | |||
269 | int bitmap_setallbits(struct bitmap *bitmap); | ||
270 | void bitmap_write_all(struct bitmap *bitmap); | ||
271 | |||
272 | void bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e); | ||
273 | |||
274 | /* these are exported */ | ||
275 | int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, | ||
276 | unsigned long sectors, int behind); | ||
277 | void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, | ||
278 | unsigned long sectors, int success, int behind); | ||
279 | int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, int *blocks, int degraded); | ||
280 | void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, int *blocks, int aborted); | ||
281 | void bitmap_close_sync(struct bitmap *bitmap); | ||
282 | void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector); | ||
283 | |||
284 | void bitmap_unplug(struct bitmap *bitmap); | ||
285 | void bitmap_daemon_work(struct bitmap *bitmap); | ||
286 | #endif | ||
287 | |||
288 | #endif | ||
diff --git a/include/linux/raid/linear.h b/include/linux/raid/linear.h deleted file mode 100644 index f38b9c586afb..000000000000 --- a/include/linux/raid/linear.h +++ /dev/null | |||
@@ -1,31 +0,0 @@ | |||
1 | #ifndef _LINEAR_H | ||
2 | #define _LINEAR_H | ||
3 | |||
4 | #include <linux/raid/md.h> | ||
5 | |||
6 | struct dev_info { | ||
7 | mdk_rdev_t *rdev; | ||
8 | sector_t num_sectors; | ||
9 | sector_t start_sector; | ||
10 | }; | ||
11 | |||
12 | typedef struct dev_info dev_info_t; | ||
13 | |||
14 | struct linear_private_data | ||
15 | { | ||
16 | struct linear_private_data *prev; /* earlier version */ | ||
17 | dev_info_t **hash_table; | ||
18 | sector_t spacing; | ||
19 | sector_t array_sectors; | ||
20 | int sector_shift; /* shift before dividing | ||
21 | * by spacing | ||
22 | */ | ||
23 | dev_info_t disks[0]; | ||
24 | }; | ||
25 | |||
26 | |||
27 | typedef struct linear_private_data linear_conf_t; | ||
28 | |||
29 | #define mddev_to_conf(mddev) ((linear_conf_t *) mddev->private) | ||
30 | |||
31 | #endif | ||
diff --git a/include/linux/raid/md.h b/include/linux/raid/md.h deleted file mode 100644 index 82bea14cae1a..000000000000 --- a/include/linux/raid/md.h +++ /dev/null | |||
@@ -1,81 +0,0 @@ | |||
1 | /* | ||
2 | md.h : Multiple Devices driver for Linux | ||
3 | Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman | ||
4 | Copyright (C) 1994-96 Marc ZYNGIER | ||
5 | <zyngier@ufr-info-p7.ibp.fr> or | ||
6 | <maz@gloups.fdn.fr> | ||
7 | |||
8 | This program is free software; you can redistribute it and/or modify | ||
9 | it under the terms of the GNU General Public License as published by | ||
10 | the Free Software Foundation; either version 2, or (at your option) | ||
11 | any later version. | ||
12 | |||
13 | You should have received a copy of the GNU General Public License | ||
14 | (for example /usr/src/linux/COPYING); if not, write to the Free | ||
15 | Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
16 | */ | ||
17 | |||
18 | #ifndef _MD_H | ||
19 | #define _MD_H | ||
20 | |||
21 | #include <linux/blkdev.h> | ||
22 | #include <linux/seq_file.h> | ||
23 | |||
24 | /* | ||
25 | * 'md_p.h' holds the 'physical' layout of RAID devices | ||
26 | * 'md_u.h' holds the user <=> kernel API | ||
27 | * | ||
28 | * 'md_k.h' holds kernel internal definitions | ||
29 | */ | ||
30 | |||
31 | #include <linux/raid/md_p.h> | ||
32 | #include <linux/raid/md_u.h> | ||
33 | #include <linux/raid/md_k.h> | ||
34 | |||
35 | #ifdef CONFIG_MD | ||
36 | |||
37 | /* | ||
38 | * Different major versions are not compatible. | ||
39 | * Different minor versions are only downward compatible. | ||
40 | * Different patchlevel versions are downward and upward compatible. | ||
41 | */ | ||
42 | #define MD_MAJOR_VERSION 0 | ||
43 | #define MD_MINOR_VERSION 90 | ||
44 | /* | ||
45 | * MD_PATCHLEVEL_VERSION indicates kernel functionality. | ||
46 | * >=1 means different superblock formats are selectable using SET_ARRAY_INFO | ||
47 | * and major_version/minor_version accordingly | ||
48 | * >=2 means that Internal bitmaps are supported by setting MD_SB_BITMAP_PRESENT | ||
49 | * in the super status byte | ||
50 | * >=3 means that bitmap superblock version 4 is supported, which uses | ||
51 | * little-ending representation rather than host-endian | ||
52 | */ | ||
53 | #define MD_PATCHLEVEL_VERSION 3 | ||
54 | |||
55 | extern int mdp_major; | ||
56 | |||
57 | extern int register_md_personality(struct mdk_personality *p); | ||
58 | extern int unregister_md_personality(struct mdk_personality *p); | ||
59 | extern mdk_thread_t * md_register_thread(void (*run) (mddev_t *mddev), | ||
60 | mddev_t *mddev, const char *name); | ||
61 | extern void md_unregister_thread(mdk_thread_t *thread); | ||
62 | extern void md_wakeup_thread(mdk_thread_t *thread); | ||
63 | extern void md_check_recovery(mddev_t *mddev); | ||
64 | extern void md_write_start(mddev_t *mddev, struct bio *bi); | ||
65 | extern void md_write_end(mddev_t *mddev); | ||
66 | extern void md_done_sync(mddev_t *mddev, int blocks, int ok); | ||
67 | extern void md_error(mddev_t *mddev, mdk_rdev_t *rdev); | ||
68 | |||
69 | extern void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev, | ||
70 | sector_t sector, int size, struct page *page); | ||
71 | extern void md_super_wait(mddev_t *mddev); | ||
72 | extern int sync_page_io(struct block_device *bdev, sector_t sector, int size, | ||
73 | struct page *page, int rw); | ||
74 | extern void md_do_sync(mddev_t *mddev); | ||
75 | extern void md_new_event(mddev_t *mddev); | ||
76 | extern int md_allow_write(mddev_t *mddev); | ||
77 | extern void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev); | ||
78 | |||
79 | #endif /* CONFIG_MD */ | ||
80 | #endif | ||
81 | |||
diff --git a/include/linux/raid/md_k.h b/include/linux/raid/md_k.h deleted file mode 100644 index 9743e4dbc918..000000000000 --- a/include/linux/raid/md_k.h +++ /dev/null | |||
@@ -1,402 +0,0 @@ | |||
1 | /* | ||
2 | md_k.h : kernel internal structure of the Linux MD driver | ||
3 | Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman | ||
4 | |||
5 | This program is free software; you can redistribute it and/or modify | ||
6 | it under the terms of the GNU General Public License as published by | ||
7 | the Free Software Foundation; either version 2, or (at your option) | ||
8 | any later version. | ||
9 | |||
10 | You should have received a copy of the GNU General Public License | ||
11 | (for example /usr/src/linux/COPYING); if not, write to the Free | ||
12 | Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
13 | */ | ||
14 | |||
15 | #ifndef _MD_K_H | ||
16 | #define _MD_K_H | ||
17 | |||
18 | /* and dm-bio-list.h is not under include/linux because.... ??? */ | ||
19 | #include "../../../drivers/md/dm-bio-list.h" | ||
20 | |||
21 | #ifdef CONFIG_BLOCK | ||
22 | |||
23 | #define LEVEL_MULTIPATH (-4) | ||
24 | #define LEVEL_LINEAR (-1) | ||
25 | #define LEVEL_FAULTY (-5) | ||
26 | |||
27 | /* we need a value for 'no level specified' and 0 | ||
28 | * means 'raid0', so we need something else. This is | ||
29 | * for internal use only | ||
30 | */ | ||
31 | #define LEVEL_NONE (-1000000) | ||
32 | |||
33 | #define MaxSector (~(sector_t)0) | ||
34 | |||
35 | typedef struct mddev_s mddev_t; | ||
36 | typedef struct mdk_rdev_s mdk_rdev_t; | ||
37 | |||
38 | /* | ||
39 | * options passed in raidrun: | ||
40 | */ | ||
41 | |||
42 | /* Currently this must fit in an 'int' */ | ||
43 | #define MAX_CHUNK_SIZE (1<<30) | ||
44 | |||
45 | /* | ||
46 | * MD's 'extended' device | ||
47 | */ | ||
48 | struct mdk_rdev_s | ||
49 | { | ||
50 | struct list_head same_set; /* RAID devices within the same set */ | ||
51 | |||
52 | sector_t size; /* Device size (in blocks) */ | ||
53 | mddev_t *mddev; /* RAID array if running */ | ||
54 | long last_events; /* IO event timestamp */ | ||
55 | |||
56 | struct block_device *bdev; /* block device handle */ | ||
57 | |||
58 | struct page *sb_page; | ||
59 | int sb_loaded; | ||
60 | __u64 sb_events; | ||
61 | sector_t data_offset; /* start of data in array */ | ||
62 | sector_t sb_start; /* offset of the super block (in 512byte sectors) */ | ||
63 | int sb_size; /* bytes in the superblock */ | ||
64 | int preferred_minor; /* autorun support */ | ||
65 | |||
66 | struct kobject kobj; | ||
67 | |||
68 | /* A device can be in one of three states based on two flags: | ||
69 | * Not working: faulty==1 in_sync==0 | ||
70 | * Fully working: faulty==0 in_sync==1 | ||
71 | * Working, but not | ||
72 | * in sync with array | ||
73 | * faulty==0 in_sync==0 | ||
74 | * | ||
75 | * It can never have faulty==1, in_sync==1 | ||
76 | * This reduces the burden of testing multiple flags in many cases | ||
77 | */ | ||
78 | |||
79 | unsigned long flags; | ||
80 | #define Faulty 1 /* device is known to have a fault */ | ||
81 | #define In_sync 2 /* device is in_sync with rest of array */ | ||
82 | #define WriteMostly 4 /* Avoid reading if at all possible */ | ||
83 | #define BarriersNotsupp 5 /* BIO_RW_BARRIER is not supported */ | ||
84 | #define AllReserved 6 /* If whole device is reserved for | ||
85 | * one array */ | ||
86 | #define AutoDetected 7 /* added by auto-detect */ | ||
87 | #define Blocked 8 /* An error occured on an externally | ||
88 | * managed array, don't allow writes | ||
89 | * until it is cleared */ | ||
90 | #define StateChanged 9 /* Faulty or Blocked has changed during | ||
91 | * interrupt, so it needs to be | ||
92 | * notified by the thread */ | ||
93 | wait_queue_head_t blocked_wait; | ||
94 | |||
95 | int desc_nr; /* descriptor index in the superblock */ | ||
96 | int raid_disk; /* role of device in array */ | ||
97 | int saved_raid_disk; /* role that device used to have in the | ||
98 | * array and could again if we did a partial | ||
99 | * resync from the bitmap | ||
100 | */ | ||
101 | sector_t recovery_offset;/* If this device has been partially | ||
102 | * recovered, this is where we were | ||
103 | * up to. | ||
104 | */ | ||
105 | |||
106 | atomic_t nr_pending; /* number of pending requests. | ||
107 | * only maintained for arrays that | ||
108 | * support hot removal | ||
109 | */ | ||
110 | atomic_t read_errors; /* number of consecutive read errors that | ||
111 | * we have tried to ignore. | ||
112 | */ | ||
113 | atomic_t corrected_errors; /* number of corrected read errors, | ||
114 | * for reporting to userspace and storing | ||
115 | * in superblock. | ||
116 | */ | ||
117 | struct work_struct del_work; /* used for delayed sysfs removal */ | ||
118 | |||
119 | struct sysfs_dirent *sysfs_state; /* handle for 'state' | ||
120 | * sysfs entry */ | ||
121 | }; | ||
122 | |||
123 | struct mddev_s | ||
124 | { | ||
125 | void *private; | ||
126 | struct mdk_personality *pers; | ||
127 | dev_t unit; | ||
128 | int md_minor; | ||
129 | struct list_head disks; | ||
130 | unsigned long flags; | ||
131 | #define MD_CHANGE_DEVS 0 /* Some device status has changed */ | ||
132 | #define MD_CHANGE_CLEAN 1 /* transition to or from 'clean' */ | ||
133 | #define MD_CHANGE_PENDING 2 /* superblock update in progress */ | ||
134 | |||
135 | int ro; | ||
136 | |||
137 | struct gendisk *gendisk; | ||
138 | |||
139 | struct kobject kobj; | ||
140 | int hold_active; | ||
141 | #define UNTIL_IOCTL 1 | ||
142 | #define UNTIL_STOP 2 | ||
143 | |||
144 | /* Superblock information */ | ||
145 | int major_version, | ||
146 | minor_version, | ||
147 | patch_version; | ||
148 | int persistent; | ||
149 | int external; /* metadata is | ||
150 | * managed externally */ | ||
151 | char metadata_type[17]; /* externally set*/ | ||
152 | int chunk_size; | ||
153 | time_t ctime, utime; | ||
154 | int level, layout; | ||
155 | char clevel[16]; | ||
156 | int raid_disks; | ||
157 | int max_disks; | ||
158 | sector_t size; /* used size of component devices */ | ||
159 | sector_t array_sectors; /* exported array size */ | ||
160 | __u64 events; | ||
161 | |||
162 | char uuid[16]; | ||
163 | |||
164 | /* If the array is being reshaped, we need to record the | ||
165 | * new shape and an indication of where we are up to. | ||
166 | * This is written to the superblock. | ||
167 | * If reshape_position is MaxSector, then no reshape is happening (yet). | ||
168 | */ | ||
169 | sector_t reshape_position; | ||
170 | int delta_disks, new_level, new_layout, new_chunk; | ||
171 | |||
172 | struct mdk_thread_s *thread; /* management thread */ | ||
173 | struct mdk_thread_s *sync_thread; /* doing resync or reconstruct */ | ||
174 | sector_t curr_resync; /* last block scheduled */ | ||
175 | unsigned long resync_mark; /* a recent timestamp */ | ||
176 | sector_t resync_mark_cnt;/* blocks written at resync_mark */ | ||
177 | sector_t curr_mark_cnt; /* blocks scheduled now */ | ||
178 | |||
179 | sector_t resync_max_sectors; /* may be set by personality */ | ||
180 | |||
181 | sector_t resync_mismatches; /* count of sectors where | ||
182 | * parity/replica mismatch found | ||
183 | */ | ||
184 | |||
185 | /* allow user-space to request suspension of IO to regions of the array */ | ||
186 | sector_t suspend_lo; | ||
187 | sector_t suspend_hi; | ||
188 | /* if zero, use the system-wide default */ | ||
189 | int sync_speed_min; | ||
190 | int sync_speed_max; | ||
191 | |||
192 | /* resync even though the same disks are shared among md-devices */ | ||
193 | int parallel_resync; | ||
194 | |||
195 | int ok_start_degraded; | ||
196 | /* recovery/resync flags | ||
197 | * NEEDED: we might need to start a resync/recover | ||
198 | * RUNNING: a thread is running, or about to be started | ||
199 | * SYNC: actually doing a resync, not a recovery | ||
200 | * RECOVER: doing recovery, or need to try it. | ||
201 | * INTR: resync needs to be aborted for some reason | ||
202 | * DONE: thread is done and is waiting to be reaped | ||
203 | * REQUEST: user-space has requested a sync (used with SYNC) | ||
204 | * CHECK: user-space request for for check-only, no repair | ||
205 | * RESHAPE: A reshape is happening | ||
206 | * | ||
207 | * If neither SYNC or RESHAPE are set, then it is a recovery. | ||
208 | */ | ||
209 | #define MD_RECOVERY_RUNNING 0 | ||
210 | #define MD_RECOVERY_SYNC 1 | ||
211 | #define MD_RECOVERY_RECOVER 2 | ||
212 | #define MD_RECOVERY_INTR 3 | ||
213 | #define MD_RECOVERY_DONE 4 | ||
214 | #define MD_RECOVERY_NEEDED 5 | ||
215 | #define MD_RECOVERY_REQUESTED 6 | ||
216 | #define MD_RECOVERY_CHECK 7 | ||
217 | #define MD_RECOVERY_RESHAPE 8 | ||
218 | #define MD_RECOVERY_FROZEN 9 | ||
219 | |||
220 | unsigned long recovery; | ||
221 | int recovery_disabled; /* if we detect that recovery | ||
222 | * will always fail, set this | ||
223 | * so we don't loop trying */ | ||
224 | |||
225 | int in_sync; /* know to not need resync */ | ||
226 | struct mutex reconfig_mutex; | ||
227 | atomic_t active; /* general refcount */ | ||
228 | atomic_t openers; /* number of active opens */ | ||
229 | |||
230 | int changed; /* true if we might need to reread partition info */ | ||
231 | int degraded; /* whether md should consider | ||
232 | * adding a spare | ||
233 | */ | ||
234 | int barriers_work; /* initialised to true, cleared as soon | ||
235 | * as a barrier request to slave | ||
236 | * fails. Only supported | ||
237 | */ | ||
238 | struct bio *biolist; /* bios that need to be retried | ||
239 | * because BIO_RW_BARRIER is not supported | ||
240 | */ | ||
241 | |||
242 | atomic_t recovery_active; /* blocks scheduled, but not written */ | ||
243 | wait_queue_head_t recovery_wait; | ||
244 | sector_t recovery_cp; | ||
245 | sector_t resync_min; /* user requested sync | ||
246 | * starts here */ | ||
247 | sector_t resync_max; /* resync should pause | ||
248 | * when it gets here */ | ||
249 | |||
250 | struct sysfs_dirent *sysfs_state; /* handle for 'array_state' | ||
251 | * file in sysfs. | ||
252 | */ | ||
253 | struct sysfs_dirent *sysfs_action; /* handle for 'sync_action' */ | ||
254 | |||
255 | struct work_struct del_work; /* used for delayed sysfs removal */ | ||
256 | |||
257 | spinlock_t write_lock; | ||
258 | wait_queue_head_t sb_wait; /* for waiting on superblock updates */ | ||
259 | atomic_t pending_writes; /* number of active superblock writes */ | ||
260 | |||
261 | unsigned int safemode; /* if set, update "clean" superblock | ||
262 | * when no writes pending. | ||
263 | */ | ||
264 | unsigned int safemode_delay; | ||
265 | struct timer_list safemode_timer; | ||
266 | atomic_t writes_pending; | ||
267 | struct request_queue *queue; /* for plugging ... */ | ||
268 | |||
269 | atomic_t write_behind; /* outstanding async IO */ | ||
270 | unsigned int max_write_behind; /* 0 = sync */ | ||
271 | |||
272 | struct bitmap *bitmap; /* the bitmap for the device */ | ||
273 | struct file *bitmap_file; /* the bitmap file */ | ||
274 | long bitmap_offset; /* offset from superblock of | ||
275 | * start of bitmap. May be | ||
276 | * negative, but not '0' | ||
277 | */ | ||
278 | long default_bitmap_offset; /* this is the offset to use when | ||
279 | * hot-adding a bitmap. It should | ||
280 | * eventually be settable by sysfs. | ||
281 | */ | ||
282 | |||
283 | struct list_head all_mddevs; | ||
284 | }; | ||
285 | |||
286 | |||
287 | static inline void rdev_dec_pending(mdk_rdev_t *rdev, mddev_t *mddev) | ||
288 | { | ||
289 | int faulty = test_bit(Faulty, &rdev->flags); | ||
290 | if (atomic_dec_and_test(&rdev->nr_pending) && faulty) | ||
291 | set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); | ||
292 | } | ||
293 | |||
294 | static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors) | ||
295 | { | ||
296 | atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io); | ||
297 | } | ||
298 | |||
299 | struct mdk_personality | ||
300 | { | ||
301 | char *name; | ||
302 | int level; | ||
303 | struct list_head list; | ||
304 | struct module *owner; | ||
305 | int (*make_request)(struct request_queue *q, struct bio *bio); | ||
306 | int (*run)(mddev_t *mddev); | ||
307 | int (*stop)(mddev_t *mddev); | ||
308 | void (*status)(struct seq_file *seq, mddev_t *mddev); | ||
309 | /* error_handler must set ->faulty and clear ->in_sync | ||
310 | * if appropriate, and should abort recovery if needed | ||
311 | */ | ||
312 | void (*error_handler)(mddev_t *mddev, mdk_rdev_t *rdev); | ||
313 | int (*hot_add_disk) (mddev_t *mddev, mdk_rdev_t *rdev); | ||
314 | int (*hot_remove_disk) (mddev_t *mddev, int number); | ||
315 | int (*spare_active) (mddev_t *mddev); | ||
316 | sector_t (*sync_request)(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster); | ||
317 | int (*resize) (mddev_t *mddev, sector_t sectors); | ||
318 | int (*check_reshape) (mddev_t *mddev); | ||
319 | int (*start_reshape) (mddev_t *mddev); | ||
320 | int (*reconfig) (mddev_t *mddev, int layout, int chunk_size); | ||
321 | /* quiesce moves between quiescence states | ||
322 | * 0 - fully active | ||
323 | * 1 - no new requests allowed | ||
324 | * others - reserved | ||
325 | */ | ||
326 | void (*quiesce) (mddev_t *mddev, int state); | ||
327 | }; | ||
328 | |||
329 | |||
330 | struct md_sysfs_entry { | ||
331 | struct attribute attr; | ||
332 | ssize_t (*show)(mddev_t *, char *); | ||
333 | ssize_t (*store)(mddev_t *, const char *, size_t); | ||
334 | }; | ||
335 | |||
336 | |||
337 | static inline char * mdname (mddev_t * mddev) | ||
338 | { | ||
339 | return mddev->gendisk ? mddev->gendisk->disk_name : "mdX"; | ||
340 | } | ||
341 | |||
342 | /* | ||
343 | * iterates through some rdev ringlist. It's safe to remove the | ||
344 | * current 'rdev'. Dont touch 'tmp' though. | ||
345 | */ | ||
346 | #define rdev_for_each_list(rdev, tmp, head) \ | ||
347 | list_for_each_entry_safe(rdev, tmp, head, same_set) | ||
348 | |||
349 | /* | ||
350 | * iterates through the 'same array disks' ringlist | ||
351 | */ | ||
352 | #define rdev_for_each(rdev, tmp, mddev) \ | ||
353 | list_for_each_entry_safe(rdev, tmp, &((mddev)->disks), same_set) | ||
354 | |||
355 | #define rdev_for_each_rcu(rdev, mddev) \ | ||
356 | list_for_each_entry_rcu(rdev, &((mddev)->disks), same_set) | ||
357 | |||
358 | typedef struct mdk_thread_s { | ||
359 | void (*run) (mddev_t *mddev); | ||
360 | mddev_t *mddev; | ||
361 | wait_queue_head_t wqueue; | ||
362 | unsigned long flags; | ||
363 | struct task_struct *tsk; | ||
364 | unsigned long timeout; | ||
365 | } mdk_thread_t; | ||
366 | |||
367 | #define THREAD_WAKEUP 0 | ||
368 | |||
369 | #define __wait_event_lock_irq(wq, condition, lock, cmd) \ | ||
370 | do { \ | ||
371 | wait_queue_t __wait; \ | ||
372 | init_waitqueue_entry(&__wait, current); \ | ||
373 | \ | ||
374 | add_wait_queue(&wq, &__wait); \ | ||
375 | for (;;) { \ | ||
376 | set_current_state(TASK_UNINTERRUPTIBLE); \ | ||
377 | if (condition) \ | ||
378 | break; \ | ||
379 | spin_unlock_irq(&lock); \ | ||
380 | cmd; \ | ||
381 | schedule(); \ | ||
382 | spin_lock_irq(&lock); \ | ||
383 | } \ | ||
384 | current->state = TASK_RUNNING; \ | ||
385 | remove_wait_queue(&wq, &__wait); \ | ||
386 | } while (0) | ||
387 | |||
388 | #define wait_event_lock_irq(wq, condition, lock, cmd) \ | ||
389 | do { \ | ||
390 | if (condition) \ | ||
391 | break; \ | ||
392 | __wait_event_lock_irq(wq, condition, lock, cmd); \ | ||
393 | } while (0) | ||
394 | |||
395 | static inline void safe_put_page(struct page *p) | ||
396 | { | ||
397 | if (p) put_page(p); | ||
398 | } | ||
399 | |||
400 | #endif /* CONFIG_BLOCK */ | ||
401 | #endif | ||
402 | |||
diff --git a/include/linux/raid/md_u.h b/include/linux/raid/md_u.h index 7192035fc4b0..fb1abb3367e9 100644 --- a/include/linux/raid/md_u.h +++ b/include/linux/raid/md_u.h | |||
@@ -15,6 +15,24 @@ | |||
15 | #ifndef _MD_U_H | 15 | #ifndef _MD_U_H |
16 | #define _MD_U_H | 16 | #define _MD_U_H |
17 | 17 | ||
18 | /* | ||
19 | * Different major versions are not compatible. | ||
20 | * Different minor versions are only downward compatible. | ||
21 | * Different patchlevel versions are downward and upward compatible. | ||
22 | */ | ||
23 | #define MD_MAJOR_VERSION 0 | ||
24 | #define MD_MINOR_VERSION 90 | ||
25 | /* | ||
26 | * MD_PATCHLEVEL_VERSION indicates kernel functionality. | ||
27 | * >=1 means different superblock formats are selectable using SET_ARRAY_INFO | ||
28 | * and major_version/minor_version accordingly | ||
29 | * >=2 means that Internal bitmaps are supported by setting MD_SB_BITMAP_PRESENT | ||
30 | * in the super status byte | ||
31 | * >=3 means that bitmap superblock version 4 is supported, which uses | ||
32 | * little-ending representation rather than host-endian | ||
33 | */ | ||
34 | #define MD_PATCHLEVEL_VERSION 3 | ||
35 | |||
18 | /* ioctls */ | 36 | /* ioctls */ |
19 | 37 | ||
20 | /* status */ | 38 | /* status */ |
@@ -46,6 +64,12 @@ | |||
46 | #define STOP_ARRAY_RO _IO (MD_MAJOR, 0x33) | 64 | #define STOP_ARRAY_RO _IO (MD_MAJOR, 0x33) |
47 | #define RESTART_ARRAY_RW _IO (MD_MAJOR, 0x34) | 65 | #define RESTART_ARRAY_RW _IO (MD_MAJOR, 0x34) |
48 | 66 | ||
67 | /* 63 partitions with the alternate major number (mdp) */ | ||
68 | #define MdpMinorShift 6 | ||
69 | #ifdef __KERNEL__ | ||
70 | extern int mdp_major; | ||
71 | #endif | ||
72 | |||
49 | typedef struct mdu_version_s { | 73 | typedef struct mdu_version_s { |
50 | int major; | 74 | int major; |
51 | int minor; | 75 | int minor; |
@@ -85,6 +109,17 @@ typedef struct mdu_array_info_s { | |||
85 | 109 | ||
86 | } mdu_array_info_t; | 110 | } mdu_array_info_t; |
87 | 111 | ||
112 | /* non-obvious values for 'level' */ | ||
113 | #define LEVEL_MULTIPATH (-4) | ||
114 | #define LEVEL_LINEAR (-1) | ||
115 | #define LEVEL_FAULTY (-5) | ||
116 | |||
117 | /* we need a value for 'no level specified' and 0 | ||
118 | * means 'raid0', so we need something else. This is | ||
119 | * for internal use only | ||
120 | */ | ||
121 | #define LEVEL_NONE (-1000000) | ||
122 | |||
88 | typedef struct mdu_disk_info_s { | 123 | typedef struct mdu_disk_info_s { |
89 | /* | 124 | /* |
90 | * configuration/status of one particular disk | 125 | * configuration/status of one particular disk |
diff --git a/include/linux/raid/multipath.h b/include/linux/raid/multipath.h deleted file mode 100644 index 6f53fc177a47..000000000000 --- a/include/linux/raid/multipath.h +++ /dev/null | |||
@@ -1,42 +0,0 @@ | |||
1 | #ifndef _MULTIPATH_H | ||
2 | #define _MULTIPATH_H | ||
3 | |||
4 | #include <linux/raid/md.h> | ||
5 | |||
6 | struct multipath_info { | ||
7 | mdk_rdev_t *rdev; | ||
8 | }; | ||
9 | |||
10 | struct multipath_private_data { | ||
11 | mddev_t *mddev; | ||
12 | struct multipath_info *multipaths; | ||
13 | int raid_disks; | ||
14 | int working_disks; | ||
15 | spinlock_t device_lock; | ||
16 | struct list_head retry_list; | ||
17 | |||
18 | mempool_t *pool; | ||
19 | }; | ||
20 | |||
21 | typedef struct multipath_private_data multipath_conf_t; | ||
22 | |||
23 | /* | ||
24 | * this is the only point in the RAID code where we violate | ||
25 | * C type safety. mddev->private is an 'opaque' pointer. | ||
26 | */ | ||
27 | #define mddev_to_conf(mddev) ((multipath_conf_t *) mddev->private) | ||
28 | |||
29 | /* | ||
30 | * this is our 'private' 'collective' MULTIPATH buffer head. | ||
31 | * it contains information about what kind of IO operations were started | ||
32 | * for this MULTIPATH operation, and about their status: | ||
33 | */ | ||
34 | |||
35 | struct multipath_bh { | ||
36 | mddev_t *mddev; | ||
37 | struct bio *master_bio; | ||
38 | struct bio bio; | ||
39 | int path; | ||
40 | struct list_head retry_list; | ||
41 | }; | ||
42 | #endif | ||
diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h new file mode 100644 index 000000000000..d92480f8285c --- /dev/null +++ b/include/linux/raid/pq.h | |||
@@ -0,0 +1,132 @@ | |||
1 | /* -*- linux-c -*- ------------------------------------------------------- * | ||
2 | * | ||
3 | * Copyright 2003 H. Peter Anvin - All Rights Reserved | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation, Inc., 53 Temple Place Ste 330, | ||
8 | * Boston MA 02111-1307, USA; either version 2 of the License, or | ||
9 | * (at your option) any later version; incorporated herein by reference. | ||
10 | * | ||
11 | * ----------------------------------------------------------------------- */ | ||
12 | |||
13 | #ifndef LINUX_RAID_RAID6_H | ||
14 | #define LINUX_RAID_RAID6_H | ||
15 | |||
16 | #ifdef __KERNEL__ | ||
17 | |||
18 | /* Set to 1 to use kernel-wide empty_zero_page */ | ||
19 | #define RAID6_USE_EMPTY_ZERO_PAGE 0 | ||
20 | #include <linux/blkdev.h> | ||
21 | |||
22 | /* We need a pre-zeroed page... if we don't want to use the kernel-provided | ||
23 | one define it here */ | ||
24 | #if RAID6_USE_EMPTY_ZERO_PAGE | ||
25 | # define raid6_empty_zero_page empty_zero_page | ||
26 | #else | ||
27 | extern const char raid6_empty_zero_page[PAGE_SIZE]; | ||
28 | #endif | ||
29 | |||
30 | #else /* ! __KERNEL__ */ | ||
31 | /* Used for testing in user space */ | ||
32 | |||
33 | #include <errno.h> | ||
34 | #include <inttypes.h> | ||
35 | #include <limits.h> | ||
36 | #include <stddef.h> | ||
37 | #include <sys/mman.h> | ||
38 | #include <sys/types.h> | ||
39 | |||
40 | /* Not standard, but glibc defines it */ | ||
41 | #define BITS_PER_LONG __WORDSIZE | ||
42 | |||
43 | typedef uint8_t u8; | ||
44 | typedef uint16_t u16; | ||
45 | typedef uint32_t u32; | ||
46 | typedef uint64_t u64; | ||
47 | |||
48 | #ifndef PAGE_SIZE | ||
49 | # define PAGE_SIZE 4096 | ||
50 | #endif | ||
51 | extern const char raid6_empty_zero_page[PAGE_SIZE]; | ||
52 | |||
53 | #define __init | ||
54 | #define __exit | ||
55 | #define __attribute_const__ __attribute__((const)) | ||
56 | #define noinline __attribute__((noinline)) | ||
57 | |||
58 | #define preempt_enable() | ||
59 | #define preempt_disable() | ||
60 | #define cpu_has_feature(x) 1 | ||
61 | #define enable_kernel_altivec() | ||
62 | #define disable_kernel_altivec() | ||
63 | |||
64 | #define EXPORT_SYMBOL(sym) | ||
65 | #define MODULE_LICENSE(licence) | ||
66 | #define subsys_initcall(x) | ||
67 | #define module_exit(x) | ||
68 | #endif /* __KERNEL__ */ | ||
69 | |||
70 | /* Routine choices */ | ||
71 | struct raid6_calls { | ||
72 | void (*gen_syndrome)(int, size_t, void **); | ||
73 | int (*valid)(void); /* Returns 1 if this routine set is usable */ | ||
74 | const char *name; /* Name of this routine set */ | ||
75 | int prefer; /* Has special performance attribute */ | ||
76 | }; | ||
77 | |||
78 | /* Selected algorithm */ | ||
79 | extern struct raid6_calls raid6_call; | ||
80 | |||
81 | /* Algorithm list */ | ||
82 | extern const struct raid6_calls * const raid6_algos[]; | ||
83 | int raid6_select_algo(void); | ||
84 | |||
85 | /* Return values from chk_syndrome */ | ||
86 | #define RAID6_OK 0 | ||
87 | #define RAID6_P_BAD 1 | ||
88 | #define RAID6_Q_BAD 2 | ||
89 | #define RAID6_PQ_BAD 3 | ||
90 | |||
91 | /* Galois field tables */ | ||
92 | extern const u8 raid6_gfmul[256][256] __attribute__((aligned(256))); | ||
93 | extern const u8 raid6_gfexp[256] __attribute__((aligned(256))); | ||
94 | extern const u8 raid6_gfinv[256] __attribute__((aligned(256))); | ||
95 | extern const u8 raid6_gfexi[256] __attribute__((aligned(256))); | ||
96 | |||
97 | /* Recovery routines */ | ||
98 | void raid6_2data_recov(int disks, size_t bytes, int faila, int failb, | ||
99 | void **ptrs); | ||
100 | void raid6_datap_recov(int disks, size_t bytes, int faila, void **ptrs); | ||
101 | void raid6_dual_recov(int disks, size_t bytes, int faila, int failb, | ||
102 | void **ptrs); | ||
103 | |||
104 | /* Some definitions to allow code to be compiled for testing in userspace */ | ||
105 | #ifndef __KERNEL__ | ||
106 | |||
107 | # define jiffies raid6_jiffies() | ||
108 | # define printk printf | ||
109 | # define GFP_KERNEL 0 | ||
110 | # define __get_free_pages(x, y) ((unsigned long)mmap(NULL, PAGE_SIZE << (y), \ | ||
111 | PROT_READ|PROT_WRITE, \ | ||
112 | MAP_PRIVATE|MAP_ANONYMOUS,\ | ||
113 | 0, 0)) | ||
114 | # define free_pages(x, y) munmap((void *)(x), (y)*PAGE_SIZE) | ||
115 | |||
116 | static inline void cpu_relax(void) | ||
117 | { | ||
118 | /* Nothing */ | ||
119 | } | ||
120 | |||
121 | #undef HZ | ||
122 | #define HZ 1000 | ||
123 | static inline uint32_t raid6_jiffies(void) | ||
124 | { | ||
125 | struct timeval tv; | ||
126 | gettimeofday(&tv, NULL); | ||
127 | return tv.tv_sec*1000 + tv.tv_usec/1000; | ||
128 | } | ||
129 | |||
130 | #endif /* ! __KERNEL__ */ | ||
131 | |||
132 | #endif /* LINUX_RAID_RAID6_H */ | ||
diff --git a/include/linux/raid/raid0.h b/include/linux/raid/raid0.h deleted file mode 100644 index fd42aa87c391..000000000000 --- a/include/linux/raid/raid0.h +++ /dev/null | |||
@@ -1,30 +0,0 @@ | |||
1 | #ifndef _RAID0_H | ||
2 | #define _RAID0_H | ||
3 | |||
4 | #include <linux/raid/md.h> | ||
5 | |||
6 | struct strip_zone | ||
7 | { | ||
8 | sector_t zone_start; /* Zone offset in md_dev (in sectors) */ | ||
9 | sector_t dev_start; /* Zone offset in real dev (in sectors) */ | ||
10 | sector_t sectors; /* Zone size in sectors */ | ||
11 | int nb_dev; /* # of devices attached to the zone */ | ||
12 | mdk_rdev_t **dev; /* Devices attached to the zone */ | ||
13 | }; | ||
14 | |||
15 | struct raid0_private_data | ||
16 | { | ||
17 | struct strip_zone **hash_table; /* Table of indexes into strip_zone */ | ||
18 | struct strip_zone *strip_zone; | ||
19 | mdk_rdev_t **devlist; /* lists of rdevs, pointed to by strip_zone->dev */ | ||
20 | int nr_strip_zones; | ||
21 | |||
22 | sector_t spacing; | ||
23 | int sector_shift; /* shift this before divide by spacing */ | ||
24 | }; | ||
25 | |||
26 | typedef struct raid0_private_data raid0_conf_t; | ||
27 | |||
28 | #define mddev_to_conf(mddev) ((raid0_conf_t *) mddev->private) | ||
29 | |||
30 | #endif | ||
diff --git a/include/linux/raid/raid1.h b/include/linux/raid/raid1.h deleted file mode 100644 index 0a9ba7c3302e..000000000000 --- a/include/linux/raid/raid1.h +++ /dev/null | |||
@@ -1,134 +0,0 @@ | |||
1 | #ifndef _RAID1_H | ||
2 | #define _RAID1_H | ||
3 | |||
4 | #include <linux/raid/md.h> | ||
5 | |||
6 | typedef struct mirror_info mirror_info_t; | ||
7 | |||
8 | struct mirror_info { | ||
9 | mdk_rdev_t *rdev; | ||
10 | sector_t head_position; | ||
11 | }; | ||
12 | |||
13 | /* | ||
14 | * memory pools need a pointer to the mddev, so they can force an unplug | ||
15 | * when memory is tight, and a count of the number of drives that the | ||
16 | * pool was allocated for, so they know how much to allocate and free. | ||
17 | * mddev->raid_disks cannot be used, as it can change while a pool is active | ||
18 | * These two datums are stored in a kmalloced struct. | ||
19 | */ | ||
20 | |||
21 | struct pool_info { | ||
22 | mddev_t *mddev; | ||
23 | int raid_disks; | ||
24 | }; | ||
25 | |||
26 | |||
27 | typedef struct r1bio_s r1bio_t; | ||
28 | |||
29 | struct r1_private_data_s { | ||
30 | mddev_t *mddev; | ||
31 | mirror_info_t *mirrors; | ||
32 | int raid_disks; | ||
33 | int last_used; | ||
34 | sector_t next_seq_sect; | ||
35 | spinlock_t device_lock; | ||
36 | |||
37 | struct list_head retry_list; | ||
38 | /* queue pending writes and submit them on unplug */ | ||
39 | struct bio_list pending_bio_list; | ||
40 | /* queue of writes that have been unplugged */ | ||
41 | struct bio_list flushing_bio_list; | ||
42 | |||
43 | /* for use when syncing mirrors: */ | ||
44 | |||
45 | spinlock_t resync_lock; | ||
46 | int nr_pending; | ||
47 | int nr_waiting; | ||
48 | int nr_queued; | ||
49 | int barrier; | ||
50 | sector_t next_resync; | ||
51 | int fullsync; /* set to 1 if a full sync is needed, | ||
52 | * (fresh device added). | ||
53 | * Cleared when a sync completes. | ||
54 | */ | ||
55 | |||
56 | wait_queue_head_t wait_barrier; | ||
57 | |||
58 | struct pool_info *poolinfo; | ||
59 | |||
60 | struct page *tmppage; | ||
61 | |||
62 | mempool_t *r1bio_pool; | ||
63 | mempool_t *r1buf_pool; | ||
64 | }; | ||
65 | |||
66 | typedef struct r1_private_data_s conf_t; | ||
67 | |||
68 | /* | ||
69 | * this is the only point in the RAID code where we violate | ||
70 | * C type safety. mddev->private is an 'opaque' pointer. | ||
71 | */ | ||
72 | #define mddev_to_conf(mddev) ((conf_t *) mddev->private) | ||
73 | |||
74 | /* | ||
75 | * this is our 'private' RAID1 bio. | ||
76 | * | ||
77 | * it contains information about what kind of IO operations were started | ||
78 | * for this RAID1 operation, and about their status: | ||
79 | */ | ||
80 | |||
81 | struct r1bio_s { | ||
82 | atomic_t remaining; /* 'have we finished' count, | ||
83 | * used from IRQ handlers | ||
84 | */ | ||
85 | atomic_t behind_remaining; /* number of write-behind ios remaining | ||
86 | * in this BehindIO request | ||
87 | */ | ||
88 | sector_t sector; | ||
89 | int sectors; | ||
90 | unsigned long state; | ||
91 | mddev_t *mddev; | ||
92 | /* | ||
93 | * original bio going to /dev/mdx | ||
94 | */ | ||
95 | struct bio *master_bio; | ||
96 | /* | ||
97 | * if the IO is in READ direction, then this is where we read | ||
98 | */ | ||
99 | int read_disk; | ||
100 | |||
101 | struct list_head retry_list; | ||
102 | struct bitmap_update *bitmap_update; | ||
103 | /* | ||
104 | * if the IO is in WRITE direction, then multiple bios are used. | ||
105 | * We choose the number when they are allocated. | ||
106 | */ | ||
107 | struct bio *bios[0]; | ||
108 | /* DO NOT PUT ANY NEW FIELDS HERE - bios array is contiguously alloced*/ | ||
109 | }; | ||
110 | |||
111 | /* when we get a read error on a read-only array, we redirect to another | ||
112 | * device without failing the first device, or trying to over-write to | ||
113 | * correct the read error. To keep track of bad blocks on a per-bio | ||
114 | * level, we store IO_BLOCKED in the appropriate 'bios' pointer | ||
115 | */ | ||
116 | #define IO_BLOCKED ((struct bio*)1) | ||
117 | |||
118 | /* bits for r1bio.state */ | ||
119 | #define R1BIO_Uptodate 0 | ||
120 | #define R1BIO_IsSync 1 | ||
121 | #define R1BIO_Degraded 2 | ||
122 | #define R1BIO_BehindIO 3 | ||
123 | #define R1BIO_Barrier 4 | ||
124 | #define R1BIO_BarrierRetry 5 | ||
125 | /* For write-behind requests, we call bi_end_io when | ||
126 | * the last non-write-behind device completes, providing | ||
127 | * any write was successful. Otherwise we call when | ||
128 | * any write-behind write succeeds, otherwise we call | ||
129 | * with failure when last write completes (and all failed). | ||
130 | * Record that bi_end_io was called with this flag... | ||
131 | */ | ||
132 | #define R1BIO_Returned 6 | ||
133 | |||
134 | #endif | ||
diff --git a/include/linux/raid/raid10.h b/include/linux/raid/raid10.h deleted file mode 100644 index e9091cfeb286..000000000000 --- a/include/linux/raid/raid10.h +++ /dev/null | |||
@@ -1,123 +0,0 @@ | |||
1 | #ifndef _RAID10_H | ||
2 | #define _RAID10_H | ||
3 | |||
4 | #include <linux/raid/md.h> | ||
5 | |||
6 | typedef struct mirror_info mirror_info_t; | ||
7 | |||
8 | struct mirror_info { | ||
9 | mdk_rdev_t *rdev; | ||
10 | sector_t head_position; | ||
11 | }; | ||
12 | |||
13 | typedef struct r10bio_s r10bio_t; | ||
14 | |||
15 | struct r10_private_data_s { | ||
16 | mddev_t *mddev; | ||
17 | mirror_info_t *mirrors; | ||
18 | int raid_disks; | ||
19 | spinlock_t device_lock; | ||
20 | |||
21 | /* geometry */ | ||
22 | int near_copies; /* number of copies layed out raid0 style */ | ||
23 | int far_copies; /* number of copies layed out | ||
24 | * at large strides across drives | ||
25 | */ | ||
26 | int far_offset; /* far_copies are offset by 1 stripe | ||
27 | * instead of many | ||
28 | */ | ||
29 | int copies; /* near_copies * far_copies. | ||
30 | * must be <= raid_disks | ||
31 | */ | ||
32 | sector_t stride; /* distance between far copies. | ||
33 | * This is size / far_copies unless | ||
34 | * far_offset, in which case it is | ||
35 | * 1 stripe. | ||
36 | */ | ||
37 | |||
38 | int chunk_shift; /* shift from chunks to sectors */ | ||
39 | sector_t chunk_mask; | ||
40 | |||
41 | struct list_head retry_list; | ||
42 | /* queue pending writes and submit them on unplug */ | ||
43 | struct bio_list pending_bio_list; | ||
44 | |||
45 | |||
46 | spinlock_t resync_lock; | ||
47 | int nr_pending; | ||
48 | int nr_waiting; | ||
49 | int nr_queued; | ||
50 | int barrier; | ||
51 | sector_t next_resync; | ||
52 | int fullsync; /* set to 1 if a full sync is needed, | ||
53 | * (fresh device added). | ||
54 | * Cleared when a sync completes. | ||
55 | */ | ||
56 | |||
57 | wait_queue_head_t wait_barrier; | ||
58 | |||
59 | mempool_t *r10bio_pool; | ||
60 | mempool_t *r10buf_pool; | ||
61 | struct page *tmppage; | ||
62 | }; | ||
63 | |||
64 | typedef struct r10_private_data_s conf_t; | ||
65 | |||
66 | /* | ||
67 | * this is the only point in the RAID code where we violate | ||
68 | * C type safety. mddev->private is an 'opaque' pointer. | ||
69 | */ | ||
70 | #define mddev_to_conf(mddev) ((conf_t *) mddev->private) | ||
71 | |||
72 | /* | ||
73 | * this is our 'private' RAID10 bio. | ||
74 | * | ||
75 | * it contains information about what kind of IO operations were started | ||
76 | * for this RAID10 operation, and about their status: | ||
77 | */ | ||
78 | |||
79 | struct r10bio_s { | ||
80 | atomic_t remaining; /* 'have we finished' count, | ||
81 | * used from IRQ handlers | ||
82 | */ | ||
83 | sector_t sector; /* virtual sector number */ | ||
84 | int sectors; | ||
85 | unsigned long state; | ||
86 | mddev_t *mddev; | ||
87 | /* | ||
88 | * original bio going to /dev/mdx | ||
89 | */ | ||
90 | struct bio *master_bio; | ||
91 | /* | ||
92 | * if the IO is in READ direction, then this is where we read | ||
93 | */ | ||
94 | int read_slot; | ||
95 | |||
96 | struct list_head retry_list; | ||
97 | /* | ||
98 | * if the IO is in WRITE direction, then multiple bios are used, | ||
99 | * one for each copy. | ||
100 | * When resyncing we also use one for each copy. | ||
101 | * When reconstructing, we use 2 bios, one for read, one for write. | ||
102 | * We choose the number when they are allocated. | ||
103 | */ | ||
104 | struct { | ||
105 | struct bio *bio; | ||
106 | sector_t addr; | ||
107 | int devnum; | ||
108 | } devs[0]; | ||
109 | }; | ||
110 | |||
111 | /* when we get a read error on a read-only array, we redirect to another | ||
112 | * device without failing the first device, or trying to over-write to | ||
113 | * correct the read error. To keep track of bad blocks on a per-bio | ||
114 | * level, we store IO_BLOCKED in the appropriate 'bios' pointer | ||
115 | */ | ||
116 | #define IO_BLOCKED ((struct bio*)1) | ||
117 | |||
118 | /* bits for r10bio.state */ | ||
119 | #define R10BIO_Uptodate 0 | ||
120 | #define R10BIO_IsSync 1 | ||
121 | #define R10BIO_IsRecover 2 | ||
122 | #define R10BIO_Degraded 3 | ||
123 | #endif | ||
diff --git a/include/linux/raid/raid5.h b/include/linux/raid/raid5.h deleted file mode 100644 index 3b2672792457..000000000000 --- a/include/linux/raid/raid5.h +++ /dev/null | |||
@@ -1,402 +0,0 @@ | |||
1 | #ifndef _RAID5_H | ||
2 | #define _RAID5_H | ||
3 | |||
4 | #include <linux/raid/md.h> | ||
5 | #include <linux/raid/xor.h> | ||
6 | |||
7 | /* | ||
8 | * | ||
9 | * Each stripe contains one buffer per disc. Each buffer can be in | ||
10 | * one of a number of states stored in "flags". Changes between | ||
11 | * these states happen *almost* exclusively under a per-stripe | ||
12 | * spinlock. Some very specific changes can happen in bi_end_io, and | ||
13 | * these are not protected by the spin lock. | ||
14 | * | ||
15 | * The flag bits that are used to represent these states are: | ||
16 | * R5_UPTODATE and R5_LOCKED | ||
17 | * | ||
18 | * State Empty == !UPTODATE, !LOCK | ||
19 | * We have no data, and there is no active request | ||
20 | * State Want == !UPTODATE, LOCK | ||
21 | * A read request is being submitted for this block | ||
22 | * State Dirty == UPTODATE, LOCK | ||
23 | * Some new data is in this buffer, and it is being written out | ||
24 | * State Clean == UPTODATE, !LOCK | ||
25 | * We have valid data which is the same as on disc | ||
26 | * | ||
27 | * The possible state transitions are: | ||
28 | * | ||
29 | * Empty -> Want - on read or write to get old data for parity calc | ||
30 | * Empty -> Dirty - on compute_parity to satisfy write/sync request.(RECONSTRUCT_WRITE) | ||
31 | * Empty -> Clean - on compute_block when computing a block for failed drive | ||
32 | * Want -> Empty - on failed read | ||
33 | * Want -> Clean - on successful completion of read request | ||
34 | * Dirty -> Clean - on successful completion of write request | ||
35 | * Dirty -> Clean - on failed write | ||
36 | * Clean -> Dirty - on compute_parity to satisfy write/sync (RECONSTRUCT or RMW) | ||
37 | * | ||
38 | * The Want->Empty, Want->Clean, Dirty->Clean, transitions | ||
39 | * all happen in b_end_io at interrupt time. | ||
40 | * Each sets the Uptodate bit before releasing the Lock bit. | ||
41 | * This leaves one multi-stage transition: | ||
42 | * Want->Dirty->Clean | ||
43 | * This is safe because thinking that a Clean buffer is actually dirty | ||
44 | * will at worst delay some action, and the stripe will be scheduled | ||
45 | * for attention after the transition is complete. | ||
46 | * | ||
47 | * There is one possibility that is not covered by these states. That | ||
48 | * is if one drive has failed and there is a spare being rebuilt. We | ||
49 | * can't distinguish between a clean block that has been generated | ||
50 | * from parity calculations, and a clean block that has been | ||
51 | * successfully written to the spare ( or to parity when resyncing). | ||
52 | * To distingush these states we have a stripe bit STRIPE_INSYNC that | ||
53 | * is set whenever a write is scheduled to the spare, or to the parity | ||
54 | * disc if there is no spare. A sync request clears this bit, and | ||
55 | * when we find it set with no buffers locked, we know the sync is | ||
56 | * complete. | ||
57 | * | ||
58 | * Buffers for the md device that arrive via make_request are attached | ||
59 | * to the appropriate stripe in one of two lists linked on b_reqnext. | ||
60 | * One list (bh_read) for read requests, one (bh_write) for write. | ||
61 | * There should never be more than one buffer on the two lists | ||
62 | * together, but we are not guaranteed of that so we allow for more. | ||
63 | * | ||
64 | * If a buffer is on the read list when the associated cache buffer is | ||
65 | * Uptodate, the data is copied into the read buffer and it's b_end_io | ||
66 | * routine is called. This may happen in the end_request routine only | ||
67 | * if the buffer has just successfully been read. end_request should | ||
68 | * remove the buffers from the list and then set the Uptodate bit on | ||
69 | * the buffer. Other threads may do this only if they first check | ||
70 | * that the Uptodate bit is set. Once they have checked that they may | ||
71 | * take buffers off the read queue. | ||
72 | * | ||
73 | * When a buffer on the write list is committed for write it is copied | ||
74 | * into the cache buffer, which is then marked dirty, and moved onto a | ||
75 | * third list, the written list (bh_written). Once both the parity | ||
76 | * block and the cached buffer are successfully written, any buffer on | ||
77 | * a written list can be returned with b_end_io. | ||
78 | * | ||
79 | * The write list and read list both act as fifos. The read list is | ||
80 | * protected by the device_lock. The write and written lists are | ||
81 | * protected by the stripe lock. The device_lock, which can be | ||
82 | * claimed while the stipe lock is held, is only for list | ||
83 | * manipulations and will only be held for a very short time. It can | ||
84 | * be claimed from interrupts. | ||
85 | * | ||
86 | * | ||
87 | * Stripes in the stripe cache can be on one of two lists (or on | ||
88 | * neither). The "inactive_list" contains stripes which are not | ||
89 | * currently being used for any request. They can freely be reused | ||
90 | * for another stripe. The "handle_list" contains stripes that need | ||
91 | * to be handled in some way. Both of these are fifo queues. Each | ||
92 | * stripe is also (potentially) linked to a hash bucket in the hash | ||
93 | * table so that it can be found by sector number. Stripes that are | ||
94 | * not hashed must be on the inactive_list, and will normally be at | ||
95 | * the front. All stripes start life this way. | ||
96 | * | ||
97 | * The inactive_list, handle_list and hash bucket lists are all protected by the | ||
98 | * device_lock. | ||
99 | * - stripes on the inactive_list never have their stripe_lock held. | ||
100 | * - stripes have a reference counter. If count==0, they are on a list. | ||
101 | * - If a stripe might need handling, STRIPE_HANDLE is set. | ||
102 | * - When refcount reaches zero, then if STRIPE_HANDLE it is put on | ||
103 | * handle_list else inactive_list | ||
104 | * | ||
105 | * This, combined with the fact that STRIPE_HANDLE is only ever | ||
106 | * cleared while a stripe has a non-zero count means that if the | ||
107 | * refcount is 0 and STRIPE_HANDLE is set, then it is on the | ||
108 | * handle_list and if recount is 0 and STRIPE_HANDLE is not set, then | ||
109 | * the stripe is on inactive_list. | ||
110 | * | ||
111 | * The possible transitions are: | ||
112 | * activate an unhashed/inactive stripe (get_active_stripe()) | ||
113 | * lockdev check-hash unlink-stripe cnt++ clean-stripe hash-stripe unlockdev | ||
114 | * activate a hashed, possibly active stripe (get_active_stripe()) | ||
115 | * lockdev check-hash if(!cnt++)unlink-stripe unlockdev | ||
116 | * attach a request to an active stripe (add_stripe_bh()) | ||
117 | * lockdev attach-buffer unlockdev | ||
118 | * handle a stripe (handle_stripe()) | ||
119 | * lockstripe clrSTRIPE_HANDLE ... | ||
120 | * (lockdev check-buffers unlockdev) .. | ||
121 | * change-state .. | ||
122 | * record io/ops needed unlockstripe schedule io/ops | ||
123 | * release an active stripe (release_stripe()) | ||
124 | * lockdev if (!--cnt) { if STRIPE_HANDLE, add to handle_list else add to inactive-list } unlockdev | ||
125 | * | ||
126 | * The refcount counts each thread that have activated the stripe, | ||
127 | * plus raid5d if it is handling it, plus one for each active request | ||
128 | * on a cached buffer, and plus one if the stripe is undergoing stripe | ||
129 | * operations. | ||
130 | * | ||
131 | * Stripe operations are performed outside the stripe lock, | ||
132 | * the stripe operations are: | ||
133 | * -copying data between the stripe cache and user application buffers | ||
134 | * -computing blocks to save a disk access, or to recover a missing block | ||
135 | * -updating the parity on a write operation (reconstruct write and | ||
136 | * read-modify-write) | ||
137 | * -checking parity correctness | ||
138 | * -running i/o to disk | ||
139 | * These operations are carried out by raid5_run_ops which uses the async_tx | ||
140 | * api to (optionally) offload operations to dedicated hardware engines. | ||
141 | * When requesting an operation handle_stripe sets the pending bit for the | ||
142 | * operation and increments the count. raid5_run_ops is then run whenever | ||
143 | * the count is non-zero. | ||
144 | * There are some critical dependencies between the operations that prevent some | ||
145 | * from being requested while another is in flight. | ||
146 | * 1/ Parity check operations destroy the in cache version of the parity block, | ||
147 | * so we prevent parity dependent operations like writes and compute_blocks | ||
148 | * from starting while a check is in progress. Some dma engines can perform | ||
149 | * the check without damaging the parity block, in these cases the parity | ||
150 | * block is re-marked up to date (assuming the check was successful) and is | ||
151 | * not re-read from disk. | ||
152 | * 2/ When a write operation is requested we immediately lock the affected | ||
153 | * blocks, and mark them as not up to date. This causes new read requests | ||
154 | * to be held off, as well as parity checks and compute block operations. | ||
155 | * 3/ Once a compute block operation has been requested handle_stripe treats | ||
156 | * that block as if it is up to date. raid5_run_ops guaruntees that any | ||
157 | * operation that is dependent on the compute block result is initiated after | ||
158 | * the compute block completes. | ||
159 | */ | ||
160 | |||
161 | /* | ||
162 | * Operations state - intermediate states that are visible outside of sh->lock | ||
163 | * In general _idle indicates nothing is running, _run indicates a data | ||
164 | * processing operation is active, and _result means the data processing result | ||
165 | * is stable and can be acted upon. For simple operations like biofill and | ||
166 | * compute that only have an _idle and _run state they are indicated with | ||
167 | * sh->state flags (STRIPE_BIOFILL_RUN and STRIPE_COMPUTE_RUN) | ||
168 | */ | ||
169 | /** | ||
170 | * enum check_states - handles syncing / repairing a stripe | ||
171 | * @check_state_idle - check operations are quiesced | ||
172 | * @check_state_run - check operation is running | ||
173 | * @check_state_result - set outside lock when check result is valid | ||
174 | * @check_state_compute_run - check failed and we are repairing | ||
175 | * @check_state_compute_result - set outside lock when compute result is valid | ||
176 | */ | ||
177 | enum check_states { | ||
178 | check_state_idle = 0, | ||
179 | check_state_run, /* parity check */ | ||
180 | check_state_check_result, | ||
181 | check_state_compute_run, /* parity repair */ | ||
182 | check_state_compute_result, | ||
183 | }; | ||
184 | |||
185 | /** | ||
186 | * enum reconstruct_states - handles writing or expanding a stripe | ||
187 | */ | ||
188 | enum reconstruct_states { | ||
189 | reconstruct_state_idle = 0, | ||
190 | reconstruct_state_prexor_drain_run, /* prexor-write */ | ||
191 | reconstruct_state_drain_run, /* write */ | ||
192 | reconstruct_state_run, /* expand */ | ||
193 | reconstruct_state_prexor_drain_result, | ||
194 | reconstruct_state_drain_result, | ||
195 | reconstruct_state_result, | ||
196 | }; | ||
197 | |||
198 | struct stripe_head { | ||
199 | struct hlist_node hash; | ||
200 | struct list_head lru; /* inactive_list or handle_list */ | ||
201 | struct raid5_private_data *raid_conf; | ||
202 | sector_t sector; /* sector of this row */ | ||
203 | int pd_idx; /* parity disk index */ | ||
204 | unsigned long state; /* state flags */ | ||
205 | atomic_t count; /* nr of active thread/requests */ | ||
206 | spinlock_t lock; | ||
207 | int bm_seq; /* sequence number for bitmap flushes */ | ||
208 | int disks; /* disks in stripe */ | ||
209 | enum check_states check_state; | ||
210 | enum reconstruct_states reconstruct_state; | ||
211 | /* stripe_operations | ||
212 | * @target - STRIPE_OP_COMPUTE_BLK target | ||
213 | */ | ||
214 | struct stripe_operations { | ||
215 | int target; | ||
216 | u32 zero_sum_result; | ||
217 | } ops; | ||
218 | struct r5dev { | ||
219 | struct bio req; | ||
220 | struct bio_vec vec; | ||
221 | struct page *page; | ||
222 | struct bio *toread, *read, *towrite, *written; | ||
223 | sector_t sector; /* sector of this page */ | ||
224 | unsigned long flags; | ||
225 | } dev[1]; /* allocated with extra space depending of RAID geometry */ | ||
226 | }; | ||
227 | |||
228 | /* stripe_head_state - collects and tracks the dynamic state of a stripe_head | ||
229 | * for handle_stripe. It is only valid under spin_lock(sh->lock); | ||
230 | */ | ||
231 | struct stripe_head_state { | ||
232 | int syncing, expanding, expanded; | ||
233 | int locked, uptodate, to_read, to_write, failed, written; | ||
234 | int to_fill, compute, req_compute, non_overwrite; | ||
235 | int failed_num; | ||
236 | unsigned long ops_request; | ||
237 | }; | ||
238 | |||
239 | /* r6_state - extra state data only relevant to r6 */ | ||
240 | struct r6_state { | ||
241 | int p_failed, q_failed, qd_idx, failed_num[2]; | ||
242 | }; | ||
243 | |||
244 | /* Flags */ | ||
245 | #define R5_UPTODATE 0 /* page contains current data */ | ||
246 | #define R5_LOCKED 1 /* IO has been submitted on "req" */ | ||
247 | #define R5_OVERWRITE 2 /* towrite covers whole page */ | ||
248 | /* and some that are internal to handle_stripe */ | ||
249 | #define R5_Insync 3 /* rdev && rdev->in_sync at start */ | ||
250 | #define R5_Wantread 4 /* want to schedule a read */ | ||
251 | #define R5_Wantwrite 5 | ||
252 | #define R5_Overlap 7 /* There is a pending overlapping request on this block */ | ||
253 | #define R5_ReadError 8 /* seen a read error here recently */ | ||
254 | #define R5_ReWrite 9 /* have tried to over-write the readerror */ | ||
255 | |||
256 | #define R5_Expanded 10 /* This block now has post-expand data */ | ||
257 | #define R5_Wantcompute 11 /* compute_block in progress treat as | ||
258 | * uptodate | ||
259 | */ | ||
260 | #define R5_Wantfill 12 /* dev->toread contains a bio that needs | ||
261 | * filling | ||
262 | */ | ||
263 | #define R5_Wantdrain 13 /* dev->towrite needs to be drained */ | ||
264 | /* | ||
265 | * Write method | ||
266 | */ | ||
267 | #define RECONSTRUCT_WRITE 1 | ||
268 | #define READ_MODIFY_WRITE 2 | ||
269 | /* not a write method, but a compute_parity mode */ | ||
270 | #define CHECK_PARITY 3 | ||
271 | |||
272 | /* | ||
273 | * Stripe state | ||
274 | */ | ||
275 | #define STRIPE_HANDLE 2 | ||
276 | #define STRIPE_SYNCING 3 | ||
277 | #define STRIPE_INSYNC 4 | ||
278 | #define STRIPE_PREREAD_ACTIVE 5 | ||
279 | #define STRIPE_DELAYED 6 | ||
280 | #define STRIPE_DEGRADED 7 | ||
281 | #define STRIPE_BIT_DELAY 8 | ||
282 | #define STRIPE_EXPANDING 9 | ||
283 | #define STRIPE_EXPAND_SOURCE 10 | ||
284 | #define STRIPE_EXPAND_READY 11 | ||
285 | #define STRIPE_IO_STARTED 12 /* do not count towards 'bypass_count' */ | ||
286 | #define STRIPE_FULL_WRITE 13 /* all blocks are set to be overwritten */ | ||
287 | #define STRIPE_BIOFILL_RUN 14 | ||
288 | #define STRIPE_COMPUTE_RUN 15 | ||
289 | /* | ||
290 | * Operation request flags | ||
291 | */ | ||
292 | #define STRIPE_OP_BIOFILL 0 | ||
293 | #define STRIPE_OP_COMPUTE_BLK 1 | ||
294 | #define STRIPE_OP_PREXOR 2 | ||
295 | #define STRIPE_OP_BIODRAIN 3 | ||
296 | #define STRIPE_OP_POSTXOR 4 | ||
297 | #define STRIPE_OP_CHECK 5 | ||
298 | |||
299 | /* | ||
300 | * Plugging: | ||
301 | * | ||
302 | * To improve write throughput, we need to delay the handling of some | ||
303 | * stripes until there has been a chance that several write requests | ||
304 | * for the one stripe have all been collected. | ||
305 | * In particular, any write request that would require pre-reading | ||
306 | * is put on a "delayed" queue until there are no stripes currently | ||
307 | * in a pre-read phase. Further, if the "delayed" queue is empty when | ||
308 | * a stripe is put on it then we "plug" the queue and do not process it | ||
309 | * until an unplug call is made. (the unplug_io_fn() is called). | ||
310 | * | ||
311 | * When preread is initiated on a stripe, we set PREREAD_ACTIVE and add | ||
312 | * it to the count of prereading stripes. | ||
313 | * When write is initiated, or the stripe refcnt == 0 (just in case) we | ||
314 | * clear the PREREAD_ACTIVE flag and decrement the count | ||
315 | * Whenever the 'handle' queue is empty and the device is not plugged, we | ||
316 | * move any strips from delayed to handle and clear the DELAYED flag and set | ||
317 | * PREREAD_ACTIVE. | ||
318 | * In stripe_handle, if we find pre-reading is necessary, we do it if | ||
319 | * PREREAD_ACTIVE is set, else we set DELAYED which will send it to the delayed queue. | ||
320 | * HANDLE gets cleared if stripe_handle leave nothing locked. | ||
321 | */ | ||
322 | |||
323 | |||
324 | struct disk_info { | ||
325 | mdk_rdev_t *rdev; | ||
326 | }; | ||
327 | |||
328 | struct raid5_private_data { | ||
329 | struct hlist_head *stripe_hashtbl; | ||
330 | mddev_t *mddev; | ||
331 | struct disk_info *spare; | ||
332 | int chunk_size, level, algorithm; | ||
333 | int max_degraded; | ||
334 | int raid_disks; | ||
335 | int max_nr_stripes; | ||
336 | |||
337 | /* used during an expand */ | ||
338 | sector_t expand_progress; /* MaxSector when no expand happening */ | ||
339 | sector_t expand_lo; /* from here up to expand_progress it out-of-bounds | ||
340 | * as we haven't flushed the metadata yet | ||
341 | */ | ||
342 | int previous_raid_disks; | ||
343 | |||
344 | struct list_head handle_list; /* stripes needing handling */ | ||
345 | struct list_head hold_list; /* preread ready stripes */ | ||
346 | struct list_head delayed_list; /* stripes that have plugged requests */ | ||
347 | struct list_head bitmap_list; /* stripes delaying awaiting bitmap update */ | ||
348 | struct bio *retry_read_aligned; /* currently retrying aligned bios */ | ||
349 | struct bio *retry_read_aligned_list; /* aligned bios retry list */ | ||
350 | atomic_t preread_active_stripes; /* stripes with scheduled io */ | ||
351 | atomic_t active_aligned_reads; | ||
352 | atomic_t pending_full_writes; /* full write backlog */ | ||
353 | int bypass_count; /* bypassed prereads */ | ||
354 | int bypass_threshold; /* preread nice */ | ||
355 | struct list_head *last_hold; /* detect hold_list promotions */ | ||
356 | |||
357 | atomic_t reshape_stripes; /* stripes with pending writes for reshape */ | ||
358 | /* unfortunately we need two cache names as we temporarily have | ||
359 | * two caches. | ||
360 | */ | ||
361 | int active_name; | ||
362 | char cache_name[2][20]; | ||
363 | struct kmem_cache *slab_cache; /* for allocating stripes */ | ||
364 | |||
365 | int seq_flush, seq_write; | ||
366 | int quiesce; | ||
367 | |||
368 | int fullsync; /* set to 1 if a full sync is needed, | ||
369 | * (fresh device added). | ||
370 | * Cleared when a sync completes. | ||
371 | */ | ||
372 | |||
373 | struct page *spare_page; /* Used when checking P/Q in raid6 */ | ||
374 | |||
375 | /* | ||
376 | * Free stripes pool | ||
377 | */ | ||
378 | atomic_t active_stripes; | ||
379 | struct list_head inactive_list; | ||
380 | wait_queue_head_t wait_for_stripe; | ||
381 | wait_queue_head_t wait_for_overlap; | ||
382 | int inactive_blocked; /* release of inactive stripes blocked, | ||
383 | * waiting for 25% to be free | ||
384 | */ | ||
385 | int pool_size; /* number of disks in stripeheads in pool */ | ||
386 | spinlock_t device_lock; | ||
387 | struct disk_info *disks; | ||
388 | }; | ||
389 | |||
390 | typedef struct raid5_private_data raid5_conf_t; | ||
391 | |||
392 | #define mddev_to_conf(mddev) ((raid5_conf_t *) mddev->private) | ||
393 | |||
394 | /* | ||
395 | * Our supported algorithms | ||
396 | */ | ||
397 | #define ALGORITHM_LEFT_ASYMMETRIC 0 | ||
398 | #define ALGORITHM_RIGHT_ASYMMETRIC 1 | ||
399 | #define ALGORITHM_LEFT_SYMMETRIC 2 | ||
400 | #define ALGORITHM_RIGHT_SYMMETRIC 3 | ||
401 | |||
402 | #endif | ||
diff --git a/include/linux/raid/xor.h b/include/linux/raid/xor.h index 3e120587eada..5a210959e3f8 100644 --- a/include/linux/raid/xor.h +++ b/include/linux/raid/xor.h | |||
@@ -1,8 +1,6 @@ | |||
1 | #ifndef _XOR_H | 1 | #ifndef _XOR_H |
2 | #define _XOR_H | 2 | #define _XOR_H |
3 | 3 | ||
4 | #include <linux/raid/md.h> | ||
5 | |||
6 | #define MAX_XOR_BLOCKS 4 | 4 | #define MAX_XOR_BLOCKS 4 |
7 | 5 | ||
8 | extern void xor_blocks(unsigned int count, unsigned int bytes, | 6 | extern void xor_blocks(unsigned int count, unsigned int bytes, |
diff --git a/include/linux/regulator/bq24022.h b/include/linux/regulator/bq24022.h index e84b0a9feda5..a6d014005d49 100644 --- a/include/linux/regulator/bq24022.h +++ b/include/linux/regulator/bq24022.h | |||
@@ -10,6 +10,8 @@ | |||
10 | * | 10 | * |
11 | */ | 11 | */ |
12 | 12 | ||
13 | struct regulator_init_data; | ||
14 | |||
13 | /** | 15 | /** |
14 | * bq24022_mach_info - platform data for bq24022 | 16 | * bq24022_mach_info - platform data for bq24022 |
15 | * @gpio_nce: GPIO line connected to the nCE pin, used to enable / disable charging | 17 | * @gpio_nce: GPIO line connected to the nCE pin, used to enable / disable charging |
@@ -18,4 +20,5 @@ | |||
18 | struct bq24022_mach_info { | 20 | struct bq24022_mach_info { |
19 | int gpio_nce; | 21 | int gpio_nce; |
20 | int gpio_iset2; | 22 | int gpio_iset2; |
23 | struct regulator_init_data *init_data; | ||
21 | }; | 24 | }; |
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h index 801bf77ff4e2..277f4b964df5 100644 --- a/include/linux/regulator/consumer.h +++ b/include/linux/regulator/consumer.h | |||
@@ -3,7 +3,7 @@ | |||
3 | * | 3 | * |
4 | * Copyright (C) 2007, 2008 Wolfson Microelectronics PLC. | 4 | * Copyright (C) 2007, 2008 Wolfson Microelectronics PLC. |
5 | * | 5 | * |
6 | * Author: Liam Girdwood <lg@opensource.wolfsonmicro.com> | 6 | * Author: Liam Girdwood <lrg@slimlogic.co.uk> |
7 | * | 7 | * |
8 | * This program is free software; you can redistribute it and/or modify | 8 | * This program is free software; you can redistribute it and/or modify |
9 | * it under the terms of the GNU General Public License version 2 as | 9 | * it under the terms of the GNU General Public License version 2 as |
@@ -88,6 +88,7 @@ | |||
88 | * FAIL Regulator output has failed. | 88 | * FAIL Regulator output has failed. |
89 | * OVER_TEMP Regulator over temp. | 89 | * OVER_TEMP Regulator over temp. |
90 | * FORCE_DISABLE Regulator shut down by software. | 90 | * FORCE_DISABLE Regulator shut down by software. |
91 | * VOLTAGE_CHANGE Regulator voltage changed. | ||
91 | * | 92 | * |
92 | * NOTE: These events can be OR'ed together when passed into handler. | 93 | * NOTE: These events can be OR'ed together when passed into handler. |
93 | */ | 94 | */ |
@@ -98,6 +99,7 @@ | |||
98 | #define REGULATOR_EVENT_FAIL 0x08 | 99 | #define REGULATOR_EVENT_FAIL 0x08 |
99 | #define REGULATOR_EVENT_OVER_TEMP 0x10 | 100 | #define REGULATOR_EVENT_OVER_TEMP 0x10 |
100 | #define REGULATOR_EVENT_FORCE_DISABLE 0x20 | 101 | #define REGULATOR_EVENT_FORCE_DISABLE 0x20 |
102 | #define REGULATOR_EVENT_VOLTAGE_CHANGE 0x40 | ||
101 | 103 | ||
102 | struct regulator; | 104 | struct regulator; |
103 | 105 | ||
@@ -140,6 +142,8 @@ int regulator_bulk_disable(int num_consumers, | |||
140 | void regulator_bulk_free(int num_consumers, | 142 | void regulator_bulk_free(int num_consumers, |
141 | struct regulator_bulk_data *consumers); | 143 | struct regulator_bulk_data *consumers); |
142 | 144 | ||
145 | int regulator_count_voltages(struct regulator *regulator); | ||
146 | int regulator_list_voltage(struct regulator *regulator, unsigned selector); | ||
143 | int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV); | 147 | int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV); |
144 | int regulator_get_voltage(struct regulator *regulator); | 148 | int regulator_get_voltage(struct regulator *regulator); |
145 | int regulator_set_current_limit(struct regulator *regulator, | 149 | int regulator_set_current_limit(struct regulator *regulator, |
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h index 2dae05705f13..4848d8dacd90 100644 --- a/include/linux/regulator/driver.h +++ b/include/linux/regulator/driver.h | |||
@@ -3,7 +3,7 @@ | |||
3 | * | 3 | * |
4 | * Copyright (C) 2007, 2008 Wolfson Microelectronics PLC. | 4 | * Copyright (C) 2007, 2008 Wolfson Microelectronics PLC. |
5 | * | 5 | * |
6 | * Author: Liam Girdwood <lg@opensource.wolfsonmicro.com> | 6 | * Author: Liam Girdwood <lrg@slimlogic.co.uk> |
7 | * | 7 | * |
8 | * This program is free software; you can redistribute it and/or modify | 8 | * This program is free software; you can redistribute it and/or modify |
9 | * it under the terms of the GNU General Public License version 2 as | 9 | * it under the terms of the GNU General Public License version 2 as |
@@ -21,25 +21,38 @@ | |||
21 | struct regulator_dev; | 21 | struct regulator_dev; |
22 | struct regulator_init_data; | 22 | struct regulator_init_data; |
23 | 23 | ||
24 | enum regulator_status { | ||
25 | REGULATOR_STATUS_OFF, | ||
26 | REGULATOR_STATUS_ON, | ||
27 | REGULATOR_STATUS_ERROR, | ||
28 | /* fast/normal/idle/standby are flavors of "on" */ | ||
29 | REGULATOR_STATUS_FAST, | ||
30 | REGULATOR_STATUS_NORMAL, | ||
31 | REGULATOR_STATUS_IDLE, | ||
32 | REGULATOR_STATUS_STANDBY, | ||
33 | }; | ||
34 | |||
24 | /** | 35 | /** |
25 | * struct regulator_ops - regulator operations. | 36 | * struct regulator_ops - regulator operations. |
26 | * | 37 | * |
27 | * This struct describes regulator operations which can be implemented by | 38 | * @enable: Configure the regulator as enabled. |
28 | * regulator chip drivers. | 39 | * @disable: Configure the regulator as disabled. |
29 | * | ||
30 | * @enable: Enable the regulator. | ||
31 | * @disable: Disable the regulator. | ||
32 | * @is_enabled: Return 1 if the regulator is enabled, 0 otherwise. | 40 | * @is_enabled: Return 1 if the regulator is enabled, 0 otherwise. |
33 | * | 41 | * |
34 | * @set_voltage: Set the voltage for the regulator within the range specified. | 42 | * @set_voltage: Set the voltage for the regulator within the range specified. |
35 | * The driver should select the voltage closest to min_uV. | 43 | * The driver should select the voltage closest to min_uV. |
36 | * @get_voltage: Return the currently configured voltage for the regulator. | 44 | * @get_voltage: Return the currently configured voltage for the regulator. |
45 | * @list_voltage: Return one of the supported voltages, in microvolts; zero | ||
46 | * if the selector indicates a voltage that is unusable on this system; | ||
47 | * or negative errno. Selectors range from zero to one less than | ||
48 | * regulator_desc.n_voltages. Voltages may be reported in any order. | ||
37 | * | 49 | * |
38 | * @set_current_limit: Configure a limit for a current-limited regulator. | 50 | * @set_current_limit: Configure a limit for a current-limited regulator. |
39 | * @get_current_limit: Get the limit for a current-limited regulator. | 51 | * @get_current_limit: Get the configured limit for a current-limited regulator. |
40 | * | 52 | * |
41 | * @set_mode: Set the operating mode for the regulator. | 53 | * @get_mode: Get the configured operating mode for the regulator. |
42 | * @get_mode: Get the current operating mode for the regulator. | 54 | * @get_status: Return actual (not as-configured) status of regulator, as a |
55 | * REGULATOR_STATUS value (or negative errno) | ||
43 | * @get_optimum_mode: Get the most efficient operating mode for the regulator | 56 | * @get_optimum_mode: Get the most efficient operating mode for the regulator |
44 | * when running with the specified parameters. | 57 | * when running with the specified parameters. |
45 | * | 58 | * |
@@ -51,9 +64,15 @@ struct regulator_init_data; | |||
51 | * suspended. | 64 | * suspended. |
52 | * @set_suspend_mode: Set the operating mode for the regulator when the | 65 | * @set_suspend_mode: Set the operating mode for the regulator when the |
53 | * system is suspended. | 66 | * system is suspended. |
67 | * | ||
68 | * This struct describes regulator operations which can be implemented by | ||
69 | * regulator chip drivers. | ||
54 | */ | 70 | */ |
55 | struct regulator_ops { | 71 | struct regulator_ops { |
56 | 72 | ||
73 | /* enumerate supported voltages */ | ||
74 | int (*list_voltage) (struct regulator_dev *, unsigned selector); | ||
75 | |||
57 | /* get/set regulator voltage */ | 76 | /* get/set regulator voltage */ |
58 | int (*set_voltage) (struct regulator_dev *, int min_uV, int max_uV); | 77 | int (*set_voltage) (struct regulator_dev *, int min_uV, int max_uV); |
59 | int (*get_voltage) (struct regulator_dev *); | 78 | int (*get_voltage) (struct regulator_dev *); |
@@ -72,6 +91,13 @@ struct regulator_ops { | |||
72 | int (*set_mode) (struct regulator_dev *, unsigned int mode); | 91 | int (*set_mode) (struct regulator_dev *, unsigned int mode); |
73 | unsigned int (*get_mode) (struct regulator_dev *); | 92 | unsigned int (*get_mode) (struct regulator_dev *); |
74 | 93 | ||
94 | /* report regulator status ... most other accessors report | ||
95 | * control inputs, this reports results of combining inputs | ||
96 | * from Linux (and other sources) with the actual load. | ||
97 | * returns REGULATOR_STATUS_* or negative errno. | ||
98 | */ | ||
99 | int (*get_status)(struct regulator_dev *); | ||
100 | |||
75 | /* get most efficient regulator operating mode for load */ | 101 | /* get most efficient regulator operating mode for load */ |
76 | unsigned int (*get_optimum_mode) (struct regulator_dev *, int input_uV, | 102 | unsigned int (*get_optimum_mode) (struct regulator_dev *, int input_uV, |
77 | int output_uV, int load_uA); | 103 | int output_uV, int load_uA); |
@@ -106,6 +132,7 @@ enum regulator_type { | |||
106 | * | 132 | * |
107 | * @name: Identifying name for the regulator. | 133 | * @name: Identifying name for the regulator. |
108 | * @id: Numerical identifier for the regulator. | 134 | * @id: Numerical identifier for the regulator. |
135 | * @n_voltages: Number of selectors available for ops.list_voltage(). | ||
109 | * @ops: Regulator operations table. | 136 | * @ops: Regulator operations table. |
110 | * @irq: Interrupt number for the regulator. | 137 | * @irq: Interrupt number for the regulator. |
111 | * @type: Indicates if the regulator is a voltage or current regulator. | 138 | * @type: Indicates if the regulator is a voltage or current regulator. |
@@ -114,14 +141,48 @@ enum regulator_type { | |||
114 | struct regulator_desc { | 141 | struct regulator_desc { |
115 | const char *name; | 142 | const char *name; |
116 | int id; | 143 | int id; |
144 | unsigned n_voltages; | ||
117 | struct regulator_ops *ops; | 145 | struct regulator_ops *ops; |
118 | int irq; | 146 | int irq; |
119 | enum regulator_type type; | 147 | enum regulator_type type; |
120 | struct module *owner; | 148 | struct module *owner; |
121 | }; | 149 | }; |
122 | 150 | ||
151 | /* | ||
152 | * struct regulator_dev | ||
153 | * | ||
154 | * Voltage / Current regulator class device. One for each | ||
155 | * regulator. | ||
156 | * | ||
157 | * This should *not* be used directly by anything except the regulator | ||
158 | * core and notification injection (which should take the mutex and do | ||
159 | * no other direct access). | ||
160 | */ | ||
161 | struct regulator_dev { | ||
162 | struct regulator_desc *desc; | ||
163 | int use_count; | ||
164 | |||
165 | /* lists we belong to */ | ||
166 | struct list_head list; /* list of all regulators */ | ||
167 | struct list_head slist; /* list of supplied regulators */ | ||
168 | |||
169 | /* lists we own */ | ||
170 | struct list_head consumer_list; /* consumers we supply */ | ||
171 | struct list_head supply_list; /* regulators we supply */ | ||
172 | |||
173 | struct blocking_notifier_head notifier; | ||
174 | struct mutex mutex; /* consumer lock */ | ||
175 | struct module *owner; | ||
176 | struct device dev; | ||
177 | struct regulation_constraints *constraints; | ||
178 | struct regulator_dev *supply; /* for tree */ | ||
179 | |||
180 | void *reg_data; /* regulator_dev data */ | ||
181 | }; | ||
182 | |||
123 | struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc, | 183 | struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc, |
124 | struct device *dev, void *driver_data); | 184 | struct device *dev, struct regulator_init_data *init_data, |
185 | void *driver_data); | ||
125 | void regulator_unregister(struct regulator_dev *rdev); | 186 | void regulator_unregister(struct regulator_dev *rdev); |
126 | 187 | ||
127 | int regulator_notifier_call_chain(struct regulator_dev *rdev, | 188 | int regulator_notifier_call_chain(struct regulator_dev *rdev, |
diff --git a/include/linux/regulator/fixed.h b/include/linux/regulator/fixed.h index 1387a5d2190e..91b4da31f1b5 100644 --- a/include/linux/regulator/fixed.h +++ b/include/linux/regulator/fixed.h | |||
@@ -14,9 +14,12 @@ | |||
14 | #ifndef __REGULATOR_FIXED_H | 14 | #ifndef __REGULATOR_FIXED_H |
15 | #define __REGULATOR_FIXED_H | 15 | #define __REGULATOR_FIXED_H |
16 | 16 | ||
17 | struct regulator_init_data; | ||
18 | |||
17 | struct fixed_voltage_config { | 19 | struct fixed_voltage_config { |
18 | const char *supply_name; | 20 | const char *supply_name; |
19 | int microvolts; | 21 | int microvolts; |
22 | struct regulator_init_data *init_data; | ||
20 | }; | 23 | }; |
21 | 24 | ||
22 | #endif | 25 | #endif |
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h index 3794773b23d2..bac64fa390f2 100644 --- a/include/linux/regulator/machine.h +++ b/include/linux/regulator/machine.h | |||
@@ -3,7 +3,7 @@ | |||
3 | * | 3 | * |
4 | * Copyright (C) 2007, 2008 Wolfson Microelectronics PLC. | 4 | * Copyright (C) 2007, 2008 Wolfson Microelectronics PLC. |
5 | * | 5 | * |
6 | * Author: Liam Girdwood <lg@opensource.wolfsonmicro.com> | 6 | * Author: Liam Girdwood <lrg@slimlogic.co.uk> |
7 | * | 7 | * |
8 | * This program is free software; you can redistribute it and/or modify | 8 | * This program is free software; you can redistribute it and/or modify |
9 | * it under the terms of the GNU General Public License version 2 as | 9 | * it under the terms of the GNU General Public License version 2 as |
@@ -73,7 +73,9 @@ struct regulator_state { | |||
73 | * | 73 | * |
74 | * @always_on: Set if the regulator should never be disabled. | 74 | * @always_on: Set if the regulator should never be disabled. |
75 | * @boot_on: Set if the regulator is enabled when the system is initially | 75 | * @boot_on: Set if the regulator is enabled when the system is initially |
76 | * started. | 76 | * started. If the regulator is not enabled by the hardware or |
77 | * bootloader then it will be enabled when the constraints are | ||
78 | * applied. | ||
77 | * @apply_uV: Apply the voltage constraint when initialising. | 79 | * @apply_uV: Apply the voltage constraint when initialising. |
78 | * | 80 | * |
79 | * @input_uV: Input voltage for regulator when supplied by another regulator. | 81 | * @input_uV: Input voltage for regulator when supplied by another regulator. |
@@ -83,6 +85,7 @@ struct regulator_state { | |||
83 | * @state_standby: State for regulator when system is suspended in standby | 85 | * @state_standby: State for regulator when system is suspended in standby |
84 | * mode. | 86 | * mode. |
85 | * @initial_state: Suspend state to set by default. | 87 | * @initial_state: Suspend state to set by default. |
88 | * @initial_mode: Mode to set at startup. | ||
86 | */ | 89 | */ |
87 | struct regulation_constraints { | 90 | struct regulation_constraints { |
88 | 91 | ||
@@ -111,6 +114,9 @@ struct regulation_constraints { | |||
111 | struct regulator_state state_standby; | 114 | struct regulator_state state_standby; |
112 | suspend_state_t initial_state; /* suspend state to set at init */ | 115 | suspend_state_t initial_state; /* suspend state to set at init */ |
113 | 116 | ||
117 | /* mode to set on startup */ | ||
118 | unsigned int initial_mode; | ||
119 | |||
114 | /* constriant flags */ | 120 | /* constriant flags */ |
115 | unsigned always_on:1; /* regulator never off when system is on */ | 121 | unsigned always_on:1; /* regulator never off when system is on */ |
116 | unsigned boot_on:1; /* bootloader/firmware enabled regulator */ | 122 | unsigned boot_on:1; /* bootloader/firmware enabled regulator */ |
@@ -160,4 +166,6 @@ struct regulator_init_data { | |||
160 | 166 | ||
161 | int regulator_suspend_prepare(suspend_state_t state); | 167 | int regulator_suspend_prepare(suspend_state_t state); |
162 | 168 | ||
169 | void regulator_has_full_constraints(void); | ||
170 | |||
163 | #endif | 171 | #endif |
diff --git a/include/linux/rtc-v3020.h b/include/linux/rtc-v3020.h index bf74e63c98fe..8ba646e610d9 100644 --- a/include/linux/rtc-v3020.h +++ b/include/linux/rtc-v3020.h | |||
@@ -14,6 +14,12 @@ | |||
14 | * is used depends on the board. */ | 14 | * is used depends on the board. */ |
15 | struct v3020_platform_data { | 15 | struct v3020_platform_data { |
16 | int leftshift; /* (1<<(leftshift)) & readl() */ | 16 | int leftshift; /* (1<<(leftshift)) & readl() */ |
17 | |||
18 | int use_gpio:1; | ||
19 | unsigned int gpio_cs; | ||
20 | unsigned int gpio_wr; | ||
21 | unsigned int gpio_rd; | ||
22 | unsigned int gpio_io; | ||
17 | }; | 23 | }; |
18 | 24 | ||
19 | #define V3020_STATUS_0 0x00 | 25 | #define V3020_STATUS_0 0x00 |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 5a50fdef5be5..b94f3541f67b 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -68,7 +68,7 @@ struct sched_param { | |||
68 | #include <linux/smp.h> | 68 | #include <linux/smp.h> |
69 | #include <linux/sem.h> | 69 | #include <linux/sem.h> |
70 | #include <linux/signal.h> | 70 | #include <linux/signal.h> |
71 | #include <linux/fs_struct.h> | 71 | #include <linux/path.h> |
72 | #include <linux/compiler.h> | 72 | #include <linux/compiler.h> |
73 | #include <linux/completion.h> | 73 | #include <linux/completion.h> |
74 | #include <linux/pid.h> | 74 | #include <linux/pid.h> |
@@ -97,6 +97,7 @@ struct futex_pi_state; | |||
97 | struct robust_list_head; | 97 | struct robust_list_head; |
98 | struct bio; | 98 | struct bio; |
99 | struct bts_tracer; | 99 | struct bts_tracer; |
100 | struct fs_struct; | ||
100 | 101 | ||
101 | /* | 102 | /* |
102 | * List of flags we want to share for kernel threads, | 103 | * List of flags we want to share for kernel threads, |
@@ -549,25 +550,8 @@ struct signal_struct { | |||
549 | 550 | ||
550 | struct list_head cpu_timers[3]; | 551 | struct list_head cpu_timers[3]; |
551 | 552 | ||
552 | /* job control IDs */ | ||
553 | |||
554 | /* | ||
555 | * pgrp and session fields are deprecated. | ||
556 | * use the task_session_Xnr and task_pgrp_Xnr routines below | ||
557 | */ | ||
558 | |||
559 | union { | ||
560 | pid_t pgrp __deprecated; | ||
561 | pid_t __pgrp; | ||
562 | }; | ||
563 | |||
564 | struct pid *tty_old_pgrp; | 553 | struct pid *tty_old_pgrp; |
565 | 554 | ||
566 | union { | ||
567 | pid_t session __deprecated; | ||
568 | pid_t __session; | ||
569 | }; | ||
570 | |||
571 | /* boolean value for session group leader */ | 555 | /* boolean value for session group leader */ |
572 | int leader; | 556 | int leader; |
573 | 557 | ||
@@ -1473,16 +1457,6 @@ static inline int rt_task(struct task_struct *p) | |||
1473 | return rt_prio(p->prio); | 1457 | return rt_prio(p->prio); |
1474 | } | 1458 | } |
1475 | 1459 | ||
1476 | static inline void set_task_session(struct task_struct *tsk, pid_t session) | ||
1477 | { | ||
1478 | tsk->signal->__session = session; | ||
1479 | } | ||
1480 | |||
1481 | static inline void set_task_pgrp(struct task_struct *tsk, pid_t pgrp) | ||
1482 | { | ||
1483 | tsk->signal->__pgrp = pgrp; | ||
1484 | } | ||
1485 | |||
1486 | static inline struct pid *task_pid(struct task_struct *task) | 1460 | static inline struct pid *task_pid(struct task_struct *task) |
1487 | { | 1461 | { |
1488 | return task->pids[PIDTYPE_PID].pid; | 1462 | return task->pids[PIDTYPE_PID].pid; |
@@ -1493,6 +1467,11 @@ static inline struct pid *task_tgid(struct task_struct *task) | |||
1493 | return task->group_leader->pids[PIDTYPE_PID].pid; | 1467 | return task->group_leader->pids[PIDTYPE_PID].pid; |
1494 | } | 1468 | } |
1495 | 1469 | ||
1470 | /* | ||
1471 | * Without tasklist or rcu lock it is not safe to dereference | ||
1472 | * the result of task_pgrp/task_session even if task == current, | ||
1473 | * we can race with another thread doing sys_setsid/sys_setpgid. | ||
1474 | */ | ||
1496 | static inline struct pid *task_pgrp(struct task_struct *task) | 1475 | static inline struct pid *task_pgrp(struct task_struct *task) |
1497 | { | 1476 | { |
1498 | return task->group_leader->pids[PIDTYPE_PGID].pid; | 1477 | return task->group_leader->pids[PIDTYPE_PGID].pid; |
@@ -1518,17 +1497,23 @@ struct pid_namespace; | |||
1518 | * | 1497 | * |
1519 | * see also pid_nr() etc in include/linux/pid.h | 1498 | * see also pid_nr() etc in include/linux/pid.h |
1520 | */ | 1499 | */ |
1500 | pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, | ||
1501 | struct pid_namespace *ns); | ||
1521 | 1502 | ||
1522 | static inline pid_t task_pid_nr(struct task_struct *tsk) | 1503 | static inline pid_t task_pid_nr(struct task_struct *tsk) |
1523 | { | 1504 | { |
1524 | return tsk->pid; | 1505 | return tsk->pid; |
1525 | } | 1506 | } |
1526 | 1507 | ||
1527 | pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns); | 1508 | static inline pid_t task_pid_nr_ns(struct task_struct *tsk, |
1509 | struct pid_namespace *ns) | ||
1510 | { | ||
1511 | return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns); | ||
1512 | } | ||
1528 | 1513 | ||
1529 | static inline pid_t task_pid_vnr(struct task_struct *tsk) | 1514 | static inline pid_t task_pid_vnr(struct task_struct *tsk) |
1530 | { | 1515 | { |
1531 | return pid_vnr(task_pid(tsk)); | 1516 | return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL); |
1532 | } | 1517 | } |
1533 | 1518 | ||
1534 | 1519 | ||
@@ -1545,31 +1530,34 @@ static inline pid_t task_tgid_vnr(struct task_struct *tsk) | |||
1545 | } | 1530 | } |
1546 | 1531 | ||
1547 | 1532 | ||
1548 | static inline pid_t task_pgrp_nr(struct task_struct *tsk) | 1533 | static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, |
1534 | struct pid_namespace *ns) | ||
1549 | { | 1535 | { |
1550 | return tsk->signal->__pgrp; | 1536 | return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns); |
1551 | } | 1537 | } |
1552 | 1538 | ||
1553 | pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns); | ||
1554 | |||
1555 | static inline pid_t task_pgrp_vnr(struct task_struct *tsk) | 1539 | static inline pid_t task_pgrp_vnr(struct task_struct *tsk) |
1556 | { | 1540 | { |
1557 | return pid_vnr(task_pgrp(tsk)); | 1541 | return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL); |
1558 | } | 1542 | } |
1559 | 1543 | ||
1560 | 1544 | ||
1561 | static inline pid_t task_session_nr(struct task_struct *tsk) | 1545 | static inline pid_t task_session_nr_ns(struct task_struct *tsk, |
1546 | struct pid_namespace *ns) | ||
1562 | { | 1547 | { |
1563 | return tsk->signal->__session; | 1548 | return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns); |
1564 | } | 1549 | } |
1565 | 1550 | ||
1566 | pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns); | ||
1567 | |||
1568 | static inline pid_t task_session_vnr(struct task_struct *tsk) | 1551 | static inline pid_t task_session_vnr(struct task_struct *tsk) |
1569 | { | 1552 | { |
1570 | return pid_vnr(task_session(tsk)); | 1553 | return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL); |
1571 | } | 1554 | } |
1572 | 1555 | ||
1556 | /* obsolete, do not use */ | ||
1557 | static inline pid_t task_pgrp_nr(struct task_struct *tsk) | ||
1558 | { | ||
1559 | return task_pgrp_nr_ns(tsk, &init_pid_ns); | ||
1560 | } | ||
1573 | 1561 | ||
1574 | /** | 1562 | /** |
1575 | * pid_alive - check that a task structure is not stale | 1563 | * pid_alive - check that a task structure is not stale |
@@ -1979,7 +1967,8 @@ extern void mm_release(struct task_struct *, struct mm_struct *); | |||
1979 | /* Allocate a new mm structure and copy contents from tsk->mm */ | 1967 | /* Allocate a new mm structure and copy contents from tsk->mm */ |
1980 | extern struct mm_struct *dup_mm(struct task_struct *tsk); | 1968 | extern struct mm_struct *dup_mm(struct task_struct *tsk); |
1981 | 1969 | ||
1982 | extern int copy_thread(int, unsigned long, unsigned long, unsigned long, struct task_struct *, struct pt_regs *); | 1970 | extern int copy_thread(unsigned long, unsigned long, unsigned long, |
1971 | struct task_struct *, struct pt_regs *); | ||
1983 | extern void flush_thread(void); | 1972 | extern void flush_thread(void); |
1984 | extern void exit_thread(void); | 1973 | extern void exit_thread(void); |
1985 | 1974 | ||
@@ -2064,6 +2053,11 @@ static inline int thread_group_empty(struct task_struct *p) | |||
2064 | #define delay_group_leader(p) \ | 2053 | #define delay_group_leader(p) \ |
2065 | (thread_group_leader(p) && !thread_group_empty(p)) | 2054 | (thread_group_leader(p) && !thread_group_empty(p)) |
2066 | 2055 | ||
2056 | static inline int task_detached(struct task_struct *p) | ||
2057 | { | ||
2058 | return p->exit_signal == -1; | ||
2059 | } | ||
2060 | |||
2067 | /* | 2061 | /* |
2068 | * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring | 2062 | * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring |
2069 | * subscriptions and synchronises with wait4(). Also used in procfs. Also | 2063 | * subscriptions and synchronises with wait4(). Also used in procfs. Also |
diff --git a/include/linux/slow-work.h b/include/linux/slow-work.h new file mode 100644 index 000000000000..85958277f83d --- /dev/null +++ b/include/linux/slow-work.h | |||
@@ -0,0 +1,95 @@ | |||
1 | /* Worker thread pool for slow items, such as filesystem lookups or mkdirs | ||
2 | * | ||
3 | * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public Licence | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the Licence, or (at your option) any later version. | ||
10 | * | ||
11 | * See Documentation/slow-work.txt | ||
12 | */ | ||
13 | |||
14 | #ifndef _LINUX_SLOW_WORK_H | ||
15 | #define _LINUX_SLOW_WORK_H | ||
16 | |||
17 | #ifdef CONFIG_SLOW_WORK | ||
18 | |||
19 | #include <linux/sysctl.h> | ||
20 | |||
21 | struct slow_work; | ||
22 | |||
23 | /* | ||
24 | * The operations used to support slow work items | ||
25 | */ | ||
26 | struct slow_work_ops { | ||
27 | /* get a ref on a work item | ||
28 | * - return 0 if successful, -ve if not | ||
29 | */ | ||
30 | int (*get_ref)(struct slow_work *work); | ||
31 | |||
32 | /* discard a ref to a work item */ | ||
33 | void (*put_ref)(struct slow_work *work); | ||
34 | |||
35 | /* execute a work item */ | ||
36 | void (*execute)(struct slow_work *work); | ||
37 | }; | ||
38 | |||
39 | /* | ||
40 | * A slow work item | ||
41 | * - A reference is held on the parent object by the thread pool when it is | ||
42 | * queued | ||
43 | */ | ||
44 | struct slow_work { | ||
45 | unsigned long flags; | ||
46 | #define SLOW_WORK_PENDING 0 /* item pending (further) execution */ | ||
47 | #define SLOW_WORK_EXECUTING 1 /* item currently executing */ | ||
48 | #define SLOW_WORK_ENQ_DEFERRED 2 /* item enqueue deferred */ | ||
49 | #define SLOW_WORK_VERY_SLOW 3 /* item is very slow */ | ||
50 | const struct slow_work_ops *ops; /* operations table for this item */ | ||
51 | struct list_head link; /* link in queue */ | ||
52 | }; | ||
53 | |||
54 | /** | ||
55 | * slow_work_init - Initialise a slow work item | ||
56 | * @work: The work item to initialise | ||
57 | * @ops: The operations to use to handle the slow work item | ||
58 | * | ||
59 | * Initialise a slow work item. | ||
60 | */ | ||
61 | static inline void slow_work_init(struct slow_work *work, | ||
62 | const struct slow_work_ops *ops) | ||
63 | { | ||
64 | work->flags = 0; | ||
65 | work->ops = ops; | ||
66 | INIT_LIST_HEAD(&work->link); | ||
67 | } | ||
68 | |||
69 | /** | ||
70 | * slow_work_init - Initialise a very slow work item | ||
71 | * @work: The work item to initialise | ||
72 | * @ops: The operations to use to handle the slow work item | ||
73 | * | ||
74 | * Initialise a very slow work item. This item will be restricted such that | ||
75 | * only a certain number of the pool threads will be able to execute items of | ||
76 | * this type. | ||
77 | */ | ||
78 | static inline void vslow_work_init(struct slow_work *work, | ||
79 | const struct slow_work_ops *ops) | ||
80 | { | ||
81 | work->flags = 1 << SLOW_WORK_VERY_SLOW; | ||
82 | work->ops = ops; | ||
83 | INIT_LIST_HEAD(&work->link); | ||
84 | } | ||
85 | |||
86 | extern int slow_work_enqueue(struct slow_work *work); | ||
87 | extern int slow_work_register_user(void); | ||
88 | extern void slow_work_unregister_user(void); | ||
89 | |||
90 | #ifdef CONFIG_SYSCTL | ||
91 | extern ctl_table slow_work_sysctls[]; | ||
92 | #endif | ||
93 | |||
94 | #endif /* CONFIG_SLOW_WORK */ | ||
95 | #endif /* _LINUX_SLOW_WORK_H */ | ||
diff --git a/include/linux/smp.h b/include/linux/smp.h index bbacb7baa446..a69db820eed6 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h | |||
@@ -38,7 +38,7 @@ int smp_call_function_single(int cpuid, void (*func) (void *info), void *info, | |||
38 | /* | 38 | /* |
39 | * main cross-CPU interfaces, handles INIT, TLB flush, STOP, etc. | 39 | * main cross-CPU interfaces, handles INIT, TLB flush, STOP, etc. |
40 | * (defined in asm header): | 40 | * (defined in asm header): |
41 | */ | 41 | */ |
42 | 42 | ||
43 | /* | 43 | /* |
44 | * stops all CPUs but the current one: | 44 | * stops all CPUs but the current one: |
@@ -82,7 +82,8 @@ smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info, | |||
82 | return 0; | 82 | return 0; |
83 | } | 83 | } |
84 | 84 | ||
85 | void __smp_call_function_single(int cpuid, struct call_single_data *data); | 85 | void __smp_call_function_single(int cpuid, struct call_single_data *data, |
86 | int wait); | ||
86 | 87 | ||
87 | /* | 88 | /* |
88 | * Generic and arch helpers | 89 | * Generic and arch helpers |
@@ -121,6 +122,8 @@ extern unsigned int setup_max_cpus; | |||
121 | 122 | ||
122 | #else /* !SMP */ | 123 | #else /* !SMP */ |
123 | 124 | ||
125 | static inline void smp_send_stop(void) { } | ||
126 | |||
124 | /* | 127 | /* |
125 | * These macros fold the SMP functionality into a single CPU system | 128 | * These macros fold the SMP functionality into a single CPU system |
126 | */ | 129 | */ |
diff --git a/include/linux/spi/eeprom.h b/include/linux/spi/eeprom.h index 1085212c446e..306e7b1c69ed 100644 --- a/include/linux/spi/eeprom.h +++ b/include/linux/spi/eeprom.h | |||
@@ -1,6 +1,8 @@ | |||
1 | #ifndef __LINUX_SPI_EEPROM_H | 1 | #ifndef __LINUX_SPI_EEPROM_H |
2 | #define __LINUX_SPI_EEPROM_H | 2 | #define __LINUX_SPI_EEPROM_H |
3 | 3 | ||
4 | #include <linux/memory.h> | ||
5 | |||
4 | /* | 6 | /* |
5 | * Put one of these structures in platform_data for SPI EEPROMS handled | 7 | * Put one of these structures in platform_data for SPI EEPROMS handled |
6 | * by the "at25" driver. On SPI, most EEPROMS understand the same core | 8 | * by the "at25" driver. On SPI, most EEPROMS understand the same core |
@@ -17,6 +19,10 @@ struct spi_eeprom { | |||
17 | #define EE_ADDR2 0x0002 /* 16 bit addrs */ | 19 | #define EE_ADDR2 0x0002 /* 16 bit addrs */ |
18 | #define EE_ADDR3 0x0004 /* 24 bit addrs */ | 20 | #define EE_ADDR3 0x0004 /* 24 bit addrs */ |
19 | #define EE_READONLY 0x0008 /* disallow writes */ | 21 | #define EE_READONLY 0x0008 /* disallow writes */ |
22 | |||
23 | /* for exporting this chip's data to other kernel code */ | ||
24 | void (*setup)(struct memory_accessor *mem, void *context); | ||
25 | void *context; | ||
20 | }; | 26 | }; |
21 | 27 | ||
22 | #endif /* __LINUX_SPI_EEPROM_H */ | 28 | #endif /* __LINUX_SPI_EEPROM_H */ |
diff --git a/include/linux/spi/spi_gpio.h b/include/linux/spi/spi_gpio.h index 0f01a0f1f40c..ca6782ee4b9f 100644 --- a/include/linux/spi/spi_gpio.h +++ b/include/linux/spi/spi_gpio.h | |||
@@ -25,10 +25,16 @@ | |||
25 | * ... | 25 | * ... |
26 | * }; | 26 | * }; |
27 | * | 27 | * |
28 | * If chipselect is not used (there's only one device on the bus), assign | ||
29 | * SPI_GPIO_NO_CHIPSELECT to the controller_data: | ||
30 | * .controller_data = (void *) SPI_GPIO_NO_CHIPSELECT; | ||
31 | * | ||
28 | * If the bitbanged bus is later switched to a "native" controller, | 32 | * If the bitbanged bus is later switched to a "native" controller, |
29 | * that platform_device and controller_data should be removed. | 33 | * that platform_device and controller_data should be removed. |
30 | */ | 34 | */ |
31 | 35 | ||
36 | #define SPI_GPIO_NO_CHIPSELECT ((unsigned long)-1l) | ||
37 | |||
32 | /** | 38 | /** |
33 | * struct spi_gpio_platform_data - parameter for bitbanged SPI master | 39 | * struct spi_gpio_platform_data - parameter for bitbanged SPI master |
34 | * @sck: number of the GPIO used for clock output | 40 | * @sck: number of the GPIO used for clock output |
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index a0c66a2e00ad..252b245cfcf4 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h | |||
@@ -153,9 +153,11 @@ do { \ | |||
153 | extern int _raw_spin_trylock(spinlock_t *lock); | 153 | extern int _raw_spin_trylock(spinlock_t *lock); |
154 | extern void _raw_spin_unlock(spinlock_t *lock); | 154 | extern void _raw_spin_unlock(spinlock_t *lock); |
155 | extern void _raw_read_lock(rwlock_t *lock); | 155 | extern void _raw_read_lock(rwlock_t *lock); |
156 | #define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock) | ||
156 | extern int _raw_read_trylock(rwlock_t *lock); | 157 | extern int _raw_read_trylock(rwlock_t *lock); |
157 | extern void _raw_read_unlock(rwlock_t *lock); | 158 | extern void _raw_read_unlock(rwlock_t *lock); |
158 | extern void _raw_write_lock(rwlock_t *lock); | 159 | extern void _raw_write_lock(rwlock_t *lock); |
160 | #define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock) | ||
159 | extern int _raw_write_trylock(rwlock_t *lock); | 161 | extern int _raw_write_trylock(rwlock_t *lock); |
160 | extern void _raw_write_unlock(rwlock_t *lock); | 162 | extern void _raw_write_unlock(rwlock_t *lock); |
161 | #else | 163 | #else |
@@ -165,9 +167,13 @@ do { \ | |||
165 | # define _raw_spin_trylock(lock) __raw_spin_trylock(&(lock)->raw_lock) | 167 | # define _raw_spin_trylock(lock) __raw_spin_trylock(&(lock)->raw_lock) |
166 | # define _raw_spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock) | 168 | # define _raw_spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock) |
167 | # define _raw_read_lock(rwlock) __raw_read_lock(&(rwlock)->raw_lock) | 169 | # define _raw_read_lock(rwlock) __raw_read_lock(&(rwlock)->raw_lock) |
170 | # define _raw_read_lock_flags(lock, flags) \ | ||
171 | __raw_read_lock_flags(&(lock)->raw_lock, *(flags)) | ||
168 | # define _raw_read_trylock(rwlock) __raw_read_trylock(&(rwlock)->raw_lock) | 172 | # define _raw_read_trylock(rwlock) __raw_read_trylock(&(rwlock)->raw_lock) |
169 | # define _raw_read_unlock(rwlock) __raw_read_unlock(&(rwlock)->raw_lock) | 173 | # define _raw_read_unlock(rwlock) __raw_read_unlock(&(rwlock)->raw_lock) |
170 | # define _raw_write_lock(rwlock) __raw_write_lock(&(rwlock)->raw_lock) | 174 | # define _raw_write_lock(rwlock) __raw_write_lock(&(rwlock)->raw_lock) |
175 | # define _raw_write_lock_flags(lock, flags) \ | ||
176 | __raw_write_lock_flags(&(lock)->raw_lock, *(flags)) | ||
171 | # define _raw_write_trylock(rwlock) __raw_write_trylock(&(rwlock)->raw_lock) | 177 | # define _raw_write_trylock(rwlock) __raw_write_trylock(&(rwlock)->raw_lock) |
172 | # define _raw_write_unlock(rwlock) __raw_write_unlock(&(rwlock)->raw_lock) | 178 | # define _raw_write_unlock(rwlock) __raw_write_unlock(&(rwlock)->raw_lock) |
173 | #endif | 179 | #endif |
diff --git a/include/linux/string.h b/include/linux/string.h index 3c877d686375..489019ef1694 100644 --- a/include/linux/string.h +++ b/include/linux/string.h | |||
@@ -122,5 +122,14 @@ int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...) __printf(3, 4); | |||
122 | extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos, | 122 | extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos, |
123 | const void *from, size_t available); | 123 | const void *from, size_t available); |
124 | 124 | ||
125 | /** | ||
126 | * strstarts - does @str start with @prefix? | ||
127 | * @str: string to examine | ||
128 | * @prefix: prefix to look for. | ||
129 | */ | ||
130 | static inline bool strstarts(const char *str, const char *prefix) | ||
131 | { | ||
132 | return strncmp(str, prefix, strlen(prefix)) == 0; | ||
133 | } | ||
125 | #endif | 134 | #endif |
126 | #endif /* _LINUX_STRING_H_ */ | 135 | #endif /* _LINUX_STRING_H_ */ |
diff --git a/include/linux/synclink.h b/include/linux/synclink.h index 99b8bdb17b2b..0ff2779c44d0 100644 --- a/include/linux/synclink.h +++ b/include/linux/synclink.h | |||
@@ -125,6 +125,7 @@ | |||
125 | #define MGSL_MODE_MONOSYNC 3 | 125 | #define MGSL_MODE_MONOSYNC 3 |
126 | #define MGSL_MODE_BISYNC 4 | 126 | #define MGSL_MODE_BISYNC 4 |
127 | #define MGSL_MODE_RAW 6 | 127 | #define MGSL_MODE_RAW 6 |
128 | #define MGSL_MODE_BASE_CLOCK 7 | ||
128 | 129 | ||
129 | #define MGSL_BUS_TYPE_ISA 1 | 130 | #define MGSL_BUS_TYPE_ISA 1 |
130 | #define MGSL_BUS_TYPE_EISA 2 | 131 | #define MGSL_BUS_TYPE_EISA 2 |
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 0cff9bb80b02..6470f74074af 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h | |||
@@ -517,6 +517,10 @@ asmlinkage long sys_pread64(unsigned int fd, char __user *buf, | |||
517 | size_t count, loff_t pos); | 517 | size_t count, loff_t pos); |
518 | asmlinkage long sys_pwrite64(unsigned int fd, const char __user *buf, | 518 | asmlinkage long sys_pwrite64(unsigned int fd, const char __user *buf, |
519 | size_t count, loff_t pos); | 519 | size_t count, loff_t pos); |
520 | asmlinkage long sys_preadv(unsigned long fd, const struct iovec __user *vec, | ||
521 | unsigned long vlen, unsigned long pos_l, unsigned long pos_h); | ||
522 | asmlinkage long sys_pwritev(unsigned long fd, const struct iovec __user *vec, | ||
523 | unsigned long vlen, unsigned long pos_l, unsigned long pos_h); | ||
520 | asmlinkage long sys_getcwd(char __user *buf, unsigned long size); | 524 | asmlinkage long sys_getcwd(char __user *buf, unsigned long size); |
521 | asmlinkage long sys_mkdir(const char __user *pathname, int mode); | 525 | asmlinkage long sys_mkdir(const char __user *pathname, int mode); |
522 | asmlinkage long sys_chdir(const char __user *filename); | 526 | asmlinkage long sys_chdir(const char __user *filename); |
diff --git a/include/linux/timeriomem-rng.h b/include/linux/timeriomem-rng.h index dd253177f65f..3e08a1c86830 100644 --- a/include/linux/timeriomem-rng.h +++ b/include/linux/timeriomem-rng.h | |||
@@ -14,7 +14,7 @@ struct timeriomem_rng_data { | |||
14 | struct completion completion; | 14 | struct completion completion; |
15 | unsigned int present:1; | 15 | unsigned int present:1; |
16 | 16 | ||
17 | u32 __iomem *address; | 17 | void __iomem *address; |
18 | 18 | ||
19 | /* measures in usecs */ | 19 | /* measures in usecs */ |
20 | unsigned int period; | 20 | unsigned int period; |
diff --git a/include/linux/topology.h b/include/linux/topology.h index a16b9e06f2e5..7402c1a27c4f 100644 --- a/include/linux/topology.h +++ b/include/linux/topology.h | |||
@@ -38,11 +38,7 @@ | |||
38 | #endif | 38 | #endif |
39 | 39 | ||
40 | #ifndef nr_cpus_node | 40 | #ifndef nr_cpus_node |
41 | #define nr_cpus_node(node) \ | 41 | #define nr_cpus_node(node) cpumask_weight(cpumask_of_node(node)) |
42 | ({ \ | ||
43 | node_to_cpumask_ptr(__tmp__, node); \ | ||
44 | cpus_weight(*__tmp__); \ | ||
45 | }) | ||
46 | #endif | 42 | #endif |
47 | 43 | ||
48 | #define for_each_node_with_cpus(node) \ | 44 | #define for_each_node_with_cpus(node) \ |
@@ -200,4 +196,9 @@ int arch_update_cpu_topology(void); | |||
200 | #define topology_core_cpumask(cpu) cpumask_of(cpu) | 196 | #define topology_core_cpumask(cpu) cpumask_of(cpu) |
201 | #endif | 197 | #endif |
202 | 198 | ||
199 | /* Returns the number of the current Node. */ | ||
200 | #ifndef numa_node_id | ||
201 | #define numa_node_id() (cpu_to_node(raw_smp_processor_id())) | ||
202 | #endif | ||
203 | |||
203 | #endif /* _LINUX_TOPOLOGY_H */ | 204 | #endif /* _LINUX_TOPOLOGY_H */ |
diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h index 6186a789d6c7..c7aa154f4bfc 100644 --- a/include/linux/tracehook.h +++ b/include/linux/tracehook.h | |||
@@ -388,17 +388,14 @@ static inline void tracehook_signal_handler(int sig, siginfo_t *info, | |||
388 | * tracehook_consider_ignored_signal - suppress short-circuit of ignored signal | 388 | * tracehook_consider_ignored_signal - suppress short-circuit of ignored signal |
389 | * @task: task receiving the signal | 389 | * @task: task receiving the signal |
390 | * @sig: signal number being sent | 390 | * @sig: signal number being sent |
391 | * @handler: %SIG_IGN or %SIG_DFL | ||
392 | * | 391 | * |
393 | * Return zero iff tracing doesn't care to examine this ignored signal, | 392 | * Return zero iff tracing doesn't care to examine this ignored signal, |
394 | * so it can short-circuit normal delivery and never even get queued. | 393 | * so it can short-circuit normal delivery and never even get queued. |
395 | * Either @handler is %SIG_DFL and @sig's default is ignore, or it's %SIG_IGN. | ||
396 | * | 394 | * |
397 | * Called with @task->sighand->siglock held. | 395 | * Called with @task->sighand->siglock held. |
398 | */ | 396 | */ |
399 | static inline int tracehook_consider_ignored_signal(struct task_struct *task, | 397 | static inline int tracehook_consider_ignored_signal(struct task_struct *task, |
400 | int sig, | 398 | int sig) |
401 | void __user *handler) | ||
402 | { | 399 | { |
403 | return (task_ptrace(task) & PT_PTRACED) != 0; | 400 | return (task_ptrace(task) & PT_PTRACED) != 0; |
404 | } | 401 | } |
@@ -407,19 +404,17 @@ static inline int tracehook_consider_ignored_signal(struct task_struct *task, | |||
407 | * tracehook_consider_fatal_signal - suppress special handling of fatal signal | 404 | * tracehook_consider_fatal_signal - suppress special handling of fatal signal |
408 | * @task: task receiving the signal | 405 | * @task: task receiving the signal |
409 | * @sig: signal number being sent | 406 | * @sig: signal number being sent |
410 | * @handler: %SIG_DFL or %SIG_IGN | ||
411 | * | 407 | * |
412 | * Return nonzero to prevent special handling of this termination signal. | 408 | * Return nonzero to prevent special handling of this termination signal. |
413 | * Normally @handler is %SIG_DFL. It can be %SIG_IGN if @sig is ignored, | 409 | * Normally handler for signal is %SIG_DFL. It can be %SIG_IGN if @sig is |
414 | * in which case force_sig() is about to reset it to %SIG_DFL. | 410 | * ignored, in which case force_sig() is about to reset it to %SIG_DFL. |
415 | * When this returns zero, this signal might cause a quick termination | 411 | * When this returns zero, this signal might cause a quick termination |
416 | * that does not give the debugger a chance to intercept the signal. | 412 | * that does not give the debugger a chance to intercept the signal. |
417 | * | 413 | * |
418 | * Called with or without @task->sighand->siglock held. | 414 | * Called with or without @task->sighand->siglock held. |
419 | */ | 415 | */ |
420 | static inline int tracehook_consider_fatal_signal(struct task_struct *task, | 416 | static inline int tracehook_consider_fatal_signal(struct task_struct *task, |
421 | int sig, | 417 | int sig) |
422 | void __user *handler) | ||
423 | { | 418 | { |
424 | return (task_ptrace(task) & PT_PTRACED) != 0; | 419 | return (task_ptrace(task) & PT_PTRACED) != 0; |
425 | } | 420 | } |
@@ -507,7 +502,7 @@ static inline int tracehook_notify_jctl(int notify, int why) | |||
507 | static inline int tracehook_notify_death(struct task_struct *task, | 502 | static inline int tracehook_notify_death(struct task_struct *task, |
508 | void **death_cookie, int group_dead) | 503 | void **death_cookie, int group_dead) |
509 | { | 504 | { |
510 | if (task->exit_signal == -1) | 505 | if (task_detached(task)) |
511 | return task->ptrace ? SIGCHLD : DEATH_REAP; | 506 | return task->ptrace ? SIGCHLD : DEATH_REAP; |
512 | 507 | ||
513 | /* | 508 | /* |
diff --git a/include/linux/usb/wusb.h b/include/linux/usb/wusb.h index 5f401b644ed5..429c631d2aad 100644 --- a/include/linux/usb/wusb.h +++ b/include/linux/usb/wusb.h | |||
@@ -80,8 +80,7 @@ struct wusb_ckhdid { | |||
80 | u8 data[16]; | 80 | u8 data[16]; |
81 | } __attribute__((packed)); | 81 | } __attribute__((packed)); |
82 | 82 | ||
83 | const static | 83 | static const struct wusb_ckhdid wusb_ckhdid_zero = { .data = { 0 } }; |
84 | struct wusb_ckhdid wusb_ckhdid_zero = { .data = { 0 } }; | ||
85 | 84 | ||
86 | #define WUSB_CKHDID_STRSIZE (3 * sizeof(struct wusb_ckhdid) + 1) | 85 | #define WUSB_CKHDID_STRSIZE (3 * sizeof(struct wusb_ckhdid) + 1) |
87 | 86 | ||
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 3cd51e579ab1..13e1adf55c4c 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h | |||
@@ -41,6 +41,11 @@ struct delayed_work { | |||
41 | struct timer_list timer; | 41 | struct timer_list timer; |
42 | }; | 42 | }; |
43 | 43 | ||
44 | static inline struct delayed_work *to_delayed_work(struct work_struct *work) | ||
45 | { | ||
46 | return container_of(work, struct delayed_work, work); | ||
47 | } | ||
48 | |||
44 | struct execute_work { | 49 | struct execute_work { |
45 | struct work_struct work; | 50 | struct work_struct work; |
46 | }; | 51 | }; |