diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-09-11 19:08:54 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-09-11 19:08:54 -0400 |
| commit | c2d95729e3094ecdd8c54e856bbe971adbbd7f48 (patch) | |
| tree | 76cc5b551227d3d55d68a93105c1fe8080dfb812 /include | |
| parent | bbda1baeeb2f4aff3addac3d086a1e56c3f2503e (diff) | |
| parent | b34081f1cd59585451efaa69e1dff1b9507e6c89 (diff) | |
Merge branch 'akpm' (patches from Andrew Morton)
Merge first patch-bomb from Andrew Morton:
- Some pidns/fork/exec tweaks
- OCFS2 updates
- Most of MM - there remain quite a few memcg parts which depend on
pending core cgroups changes. Which might have been already merged -
I'll check tomorrow...
- Various misc stuff all over the place
- A few block bits which I never got around to sending to Jens -
relatively minor things.
- MAINTAINERS maintenance
- A small number of lib/ updates
- checkpatch updates
- epoll
- firmware/dmi-scan
- Some kprobes work for S390
- drivers/rtc updates
- hfsplus feature work
- vmcore feature work
- rbtree upgrades
- AOE updates
- pktcdvd cleanups
- PPS
- memstick
- w1
- New "inittmpfs" feature, which does the obvious
- More IPC work from Davidlohr.
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (303 commits)
lz4: fix compression/decompression signedness mismatch
ipc: drop ipc_lock_check
ipc, shm: drop shm_lock_check
ipc: drop ipc_lock_by_ptr
ipc, shm: guard against non-existant vma in shmdt(2)
ipc: document general ipc locking scheme
ipc,msg: drop msg_unlock
ipc: rename ids->rw_mutex
ipc,shm: shorten critical region for shmat
ipc,shm: cleanup do_shmat pasta
ipc,shm: shorten critical region for shmctl
ipc,shm: make shmctl_nolock lockless
ipc,shm: introduce shmctl_nolock
ipc: drop ipcctl_pre_down
ipc,shm: shorten critical region in shmctl_down
ipc,shm: introduce lockless functions to obtain the ipc object
initmpfs: use initramfs if rootfstype= or root= specified
initmpfs: make rootfs use tmpfs when CONFIG_TMPFS enabled
initmpfs: move rootfs code from fs/ramfs/ to init/
initmpfs: move bdi setup from init_rootfs to init_ramfs
...
Diffstat (limited to 'include')
28 files changed, 252 insertions, 120 deletions
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index c3881553f7d1..5f66d519a726 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h | |||
| @@ -243,6 +243,8 @@ int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio); | |||
| 243 | * BDI_CAP_EXEC_MAP: Can be mapped for execution | 243 | * BDI_CAP_EXEC_MAP: Can be mapped for execution |
| 244 | * | 244 | * |
| 245 | * BDI_CAP_SWAP_BACKED: Count shmem/tmpfs objects as swap-backed. | 245 | * BDI_CAP_SWAP_BACKED: Count shmem/tmpfs objects as swap-backed. |
| 246 | * | ||
| 247 | * BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold. | ||
| 246 | */ | 248 | */ |
| 247 | #define BDI_CAP_NO_ACCT_DIRTY 0x00000001 | 249 | #define BDI_CAP_NO_ACCT_DIRTY 0x00000001 |
| 248 | #define BDI_CAP_NO_WRITEBACK 0x00000002 | 250 | #define BDI_CAP_NO_WRITEBACK 0x00000002 |
| @@ -254,6 +256,7 @@ int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio); | |||
| 254 | #define BDI_CAP_NO_ACCT_WB 0x00000080 | 256 | #define BDI_CAP_NO_ACCT_WB 0x00000080 |
| 255 | #define BDI_CAP_SWAP_BACKED 0x00000100 | 257 | #define BDI_CAP_SWAP_BACKED 0x00000100 |
| 256 | #define BDI_CAP_STABLE_WRITES 0x00000200 | 258 | #define BDI_CAP_STABLE_WRITES 0x00000200 |
| 259 | #define BDI_CAP_STRICTLIMIT 0x00000400 | ||
| 257 | 260 | ||
| 258 | #define BDI_CAP_VMFLAGS \ | 261 | #define BDI_CAP_VMFLAGS \ |
| 259 | (BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP) | 262 | (BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP) |
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h index 70cf138690e9..e8112ae50531 100644 --- a/include/linux/binfmts.h +++ b/include/linux/binfmts.h | |||
| @@ -31,7 +31,7 @@ struct linux_binprm { | |||
| 31 | #ifdef __alpha__ | 31 | #ifdef __alpha__ |
| 32 | unsigned int taso:1; | 32 | unsigned int taso:1; |
| 33 | #endif | 33 | #endif |
| 34 | unsigned int recursion_depth; | 34 | unsigned int recursion_depth; /* only for search_binary_handler() */ |
| 35 | struct file * file; | 35 | struct file * file; |
| 36 | struct cred *cred; /* new credentials */ | 36 | struct cred *cred; /* new credentials */ |
| 37 | int unsafe; /* how unsafe this exec is (mask of LSM_UNSAFE_*) */ | 37 | int unsafe; /* how unsafe this exec is (mask of LSM_UNSAFE_*) */ |
diff --git a/include/linux/cmdline-parser.h b/include/linux/cmdline-parser.h new file mode 100644 index 000000000000..98e892ef6d5a --- /dev/null +++ b/include/linux/cmdline-parser.h | |||
| @@ -0,0 +1,43 @@ | |||
| 1 | /* | ||
| 2 | * Parsing command line, get the partitions information. | ||
| 3 | * | ||
| 4 | * Written by Cai Zhiyong <caizhiyong@huawei.com> | ||
| 5 | * | ||
| 6 | */ | ||
| 7 | #ifndef CMDLINEPARSEH | ||
| 8 | #define CMDLINEPARSEH | ||
| 9 | |||
| 10 | #include <linux/blkdev.h> | ||
| 11 | |||
| 12 | /* partition flags */ | ||
| 13 | #define PF_RDONLY 0x01 /* Device is read only */ | ||
| 14 | #define PF_POWERUP_LOCK 0x02 /* Always locked after reset */ | ||
| 15 | |||
| 16 | struct cmdline_subpart { | ||
| 17 | char name[BDEVNAME_SIZE]; /* partition name, such as 'rootfs' */ | ||
| 18 | sector_t from; | ||
| 19 | sector_t size; | ||
| 20 | int flags; | ||
| 21 | struct cmdline_subpart *next_subpart; | ||
| 22 | }; | ||
| 23 | |||
| 24 | struct cmdline_parts { | ||
| 25 | char name[BDEVNAME_SIZE]; /* block device, such as 'mmcblk0' */ | ||
| 26 | unsigned int nr_subparts; | ||
| 27 | struct cmdline_subpart *subpart; | ||
| 28 | struct cmdline_parts *next_parts; | ||
| 29 | }; | ||
| 30 | |||
| 31 | void cmdline_parts_free(struct cmdline_parts **parts); | ||
| 32 | |||
| 33 | int cmdline_parts_parse(struct cmdline_parts **parts, const char *cmdline); | ||
| 34 | |||
| 35 | struct cmdline_parts *cmdline_parts_find(struct cmdline_parts *parts, | ||
| 36 | const char *bdev); | ||
| 37 | |||
| 38 | void cmdline_parts_set(struct cmdline_parts *parts, sector_t disk_size, | ||
| 39 | int slot, | ||
| 40 | int (*add_part)(int, struct cmdline_subpart *, void *), | ||
| 41 | void *param); | ||
| 42 | |||
| 43 | #endif /* CMDLINEPARSEH */ | ||
diff --git a/include/linux/compat.h b/include/linux/compat.h index ec1aee4aec9c..345da00a86e0 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h | |||
| @@ -43,6 +43,7 @@ | |||
| 43 | #define COMPAT_SYSCALL_DEFINEx(x, name, ...) \ | 43 | #define COMPAT_SYSCALL_DEFINEx(x, name, ...) \ |
| 44 | asmlinkage long compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__));\ | 44 | asmlinkage long compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__));\ |
| 45 | static inline long C_SYSC##name(__MAP(x,__SC_DECL,__VA_ARGS__));\ | 45 | static inline long C_SYSC##name(__MAP(x,__SC_DECL,__VA_ARGS__));\ |
| 46 | asmlinkage long compat_SyS##name(__MAP(x,__SC_LONG,__VA_ARGS__));\ | ||
| 46 | asmlinkage long compat_SyS##name(__MAP(x,__SC_LONG,__VA_ARGS__))\ | 47 | asmlinkage long compat_SyS##name(__MAP(x,__SC_LONG,__VA_ARGS__))\ |
| 47 | { \ | 48 | { \ |
| 48 | return C_SYSC##name(__MAP(x,__SC_DELOUSE,__VA_ARGS__)); \ | 49 | return C_SYSC##name(__MAP(x,__SC_DELOUSE,__VA_ARGS__)); \ |
diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h index 37e4f8da7cdf..fe68a5a98583 100644 --- a/include/linux/crash_dump.h +++ b/include/linux/crash_dump.h | |||
| @@ -12,6 +12,15 @@ | |||
| 12 | extern unsigned long long elfcorehdr_addr; | 12 | extern unsigned long long elfcorehdr_addr; |
| 13 | extern unsigned long long elfcorehdr_size; | 13 | extern unsigned long long elfcorehdr_size; |
| 14 | 14 | ||
| 15 | extern int __weak elfcorehdr_alloc(unsigned long long *addr, | ||
| 16 | unsigned long long *size); | ||
| 17 | extern void __weak elfcorehdr_free(unsigned long long addr); | ||
| 18 | extern ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos); | ||
| 19 | extern ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos); | ||
| 20 | extern int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma, | ||
| 21 | unsigned long from, unsigned long pfn, | ||
| 22 | unsigned long size, pgprot_t prot); | ||
| 23 | |||
| 15 | extern ssize_t copy_oldmem_page(unsigned long, char *, size_t, | 24 | extern ssize_t copy_oldmem_page(unsigned long, char *, size_t, |
| 16 | unsigned long, int); | 25 | unsigned long, int); |
| 17 | 26 | ||
diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h index 661d374aeb2d..f8d41cb1cbe0 100644 --- a/include/linux/genalloc.h +++ b/include/linux/genalloc.h | |||
| @@ -66,8 +66,8 @@ struct gen_pool_chunk { | |||
| 66 | struct list_head next_chunk; /* next chunk in pool */ | 66 | struct list_head next_chunk; /* next chunk in pool */ |
| 67 | atomic_t avail; | 67 | atomic_t avail; |
| 68 | phys_addr_t phys_addr; /* physical starting address of memory chunk */ | 68 | phys_addr_t phys_addr; /* physical starting address of memory chunk */ |
| 69 | unsigned long start_addr; /* starting address of memory chunk */ | 69 | unsigned long start_addr; /* start address of memory chunk */ |
| 70 | unsigned long end_addr; /* ending address of memory chunk */ | 70 | unsigned long end_addr; /* end address of memory chunk (inclusive) */ |
| 71 | unsigned long bits[0]; /* bitmap for allocating memory chunk */ | 71 | unsigned long bits[0]; /* bitmap for allocating memory chunk */ |
| 72 | }; | 72 | }; |
| 73 | 73 | ||
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index c2b1801a160b..0393270466c3 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h | |||
| @@ -66,6 +66,9 @@ int hugetlb_reserve_pages(struct inode *inode, long from, long to, | |||
| 66 | vm_flags_t vm_flags); | 66 | vm_flags_t vm_flags); |
| 67 | void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed); | 67 | void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed); |
| 68 | int dequeue_hwpoisoned_huge_page(struct page *page); | 68 | int dequeue_hwpoisoned_huge_page(struct page *page); |
| 69 | bool isolate_huge_page(struct page *page, struct list_head *list); | ||
| 70 | void putback_active_hugepage(struct page *page); | ||
| 71 | bool is_hugepage_active(struct page *page); | ||
| 69 | void copy_huge_page(struct page *dst, struct page *src); | 72 | void copy_huge_page(struct page *dst, struct page *src); |
| 70 | 73 | ||
| 71 | #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE | 74 | #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE |
| @@ -134,6 +137,9 @@ static inline int dequeue_hwpoisoned_huge_page(struct page *page) | |||
| 134 | return 0; | 137 | return 0; |
| 135 | } | 138 | } |
| 136 | 139 | ||
| 140 | #define isolate_huge_page(p, l) false | ||
| 141 | #define putback_active_hugepage(p) do {} while (0) | ||
| 142 | #define is_hugepage_active(x) false | ||
| 137 | static inline void copy_huge_page(struct page *dst, struct page *src) | 143 | static inline void copy_huge_page(struct page *dst, struct page *src) |
| 138 | { | 144 | { |
| 139 | } | 145 | } |
| @@ -261,6 +267,8 @@ struct huge_bootmem_page { | |||
| 261 | }; | 267 | }; |
| 262 | 268 | ||
| 263 | struct page *alloc_huge_page_node(struct hstate *h, int nid); | 269 | struct page *alloc_huge_page_node(struct hstate *h, int nid); |
| 270 | struct page *alloc_huge_page_noerr(struct vm_area_struct *vma, | ||
| 271 | unsigned long addr, int avoid_reserve); | ||
| 264 | 272 | ||
| 265 | /* arch callback */ | 273 | /* arch callback */ |
| 266 | int __init alloc_bootmem_huge_page(struct hstate *h); | 274 | int __init alloc_bootmem_huge_page(struct hstate *h); |
| @@ -371,9 +379,23 @@ static inline pgoff_t basepage_index(struct page *page) | |||
| 371 | return __basepage_index(page); | 379 | return __basepage_index(page); |
| 372 | } | 380 | } |
| 373 | 381 | ||
| 382 | extern void dissolve_free_huge_pages(unsigned long start_pfn, | ||
| 383 | unsigned long end_pfn); | ||
| 384 | int pmd_huge_support(void); | ||
| 385 | /* | ||
| 386 | * Currently hugepage migration is enabled only for pmd-based hugepage. | ||
| 387 | * This function will be updated when hugepage migration is more widely | ||
| 388 | * supported. | ||
| 389 | */ | ||
| 390 | static inline int hugepage_migration_support(struct hstate *h) | ||
| 391 | { | ||
| 392 | return pmd_huge_support() && (huge_page_shift(h) == PMD_SHIFT); | ||
| 393 | } | ||
| 394 | |||
| 374 | #else /* CONFIG_HUGETLB_PAGE */ | 395 | #else /* CONFIG_HUGETLB_PAGE */ |
| 375 | struct hstate {}; | 396 | struct hstate {}; |
| 376 | #define alloc_huge_page_node(h, nid) NULL | 397 | #define alloc_huge_page_node(h, nid) NULL |
| 398 | #define alloc_huge_page_noerr(v, a, r) NULL | ||
| 377 | #define alloc_bootmem_huge_page(h) NULL | 399 | #define alloc_bootmem_huge_page(h) NULL |
| 378 | #define hstate_file(f) NULL | 400 | #define hstate_file(f) NULL |
| 379 | #define hstate_sizelog(s) NULL | 401 | #define hstate_sizelog(s) NULL |
| @@ -396,6 +418,9 @@ static inline pgoff_t basepage_index(struct page *page) | |||
| 396 | { | 418 | { |
| 397 | return page->index; | 419 | return page->index; |
| 398 | } | 420 | } |
| 421 | #define dissolve_free_huge_pages(s, e) do {} while (0) | ||
| 422 | #define pmd_huge_support() 0 | ||
| 423 | #define hugepage_migration_support(h) 0 | ||
| 399 | #endif /* CONFIG_HUGETLB_PAGE */ | 424 | #endif /* CONFIG_HUGETLB_PAGE */ |
| 400 | 425 | ||
| 401 | #endif /* _LINUX_HUGETLB_H */ | 426 | #endif /* _LINUX_HUGETLB_H */ |
diff --git a/include/linux/init.h b/include/linux/init.h index e73f2b708525..f1c27a71d03c 100644 --- a/include/linux/init.h +++ b/include/linux/init.h | |||
| @@ -153,6 +153,7 @@ extern unsigned int reset_devices; | |||
| 153 | void setup_arch(char **); | 153 | void setup_arch(char **); |
| 154 | void prepare_namespace(void); | 154 | void prepare_namespace(void); |
| 155 | void __init load_default_modules(void); | 155 | void __init load_default_modules(void); |
| 156 | int __init init_rootfs(void); | ||
| 156 | 157 | ||
| 157 | extern void (*late_time_init)(void); | 158 | extern void (*late_time_init)(void); |
| 158 | 159 | ||
diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h index c4d870b0d5e6..19c19a5eee29 100644 --- a/include/linux/ipc_namespace.h +++ b/include/linux/ipc_namespace.h | |||
| @@ -22,7 +22,7 @@ struct ipc_ids { | |||
| 22 | int in_use; | 22 | int in_use; |
| 23 | unsigned short seq; | 23 | unsigned short seq; |
| 24 | unsigned short seq_max; | 24 | unsigned short seq_max; |
| 25 | struct rw_semaphore rw_mutex; | 25 | struct rw_semaphore rwsem; |
| 26 | struct idr ipcs_idr; | 26 | struct idr ipcs_idr; |
| 27 | int next_id; | 27 | int next_id; |
| 28 | }; | 28 | }; |
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index ca1d27a0d6a6..925eaf28fca9 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h | |||
| @@ -264,10 +264,36 @@ extern void arch_arm_kprobe(struct kprobe *p); | |||
| 264 | extern void arch_disarm_kprobe(struct kprobe *p); | 264 | extern void arch_disarm_kprobe(struct kprobe *p); |
| 265 | extern int arch_init_kprobes(void); | 265 | extern int arch_init_kprobes(void); |
| 266 | extern void show_registers(struct pt_regs *regs); | 266 | extern void show_registers(struct pt_regs *regs); |
| 267 | extern kprobe_opcode_t *get_insn_slot(void); | ||
| 268 | extern void free_insn_slot(kprobe_opcode_t *slot, int dirty); | ||
| 269 | extern void kprobes_inc_nmissed_count(struct kprobe *p); | 267 | extern void kprobes_inc_nmissed_count(struct kprobe *p); |
| 270 | 268 | ||
| 269 | struct kprobe_insn_cache { | ||
| 270 | struct mutex mutex; | ||
| 271 | void *(*alloc)(void); /* allocate insn page */ | ||
| 272 | void (*free)(void *); /* free insn page */ | ||
| 273 | struct list_head pages; /* list of kprobe_insn_page */ | ||
| 274 | size_t insn_size; /* size of instruction slot */ | ||
| 275 | int nr_garbage; | ||
| 276 | }; | ||
| 277 | |||
| 278 | extern kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c); | ||
| 279 | extern void __free_insn_slot(struct kprobe_insn_cache *c, | ||
| 280 | kprobe_opcode_t *slot, int dirty); | ||
| 281 | |||
| 282 | #define DEFINE_INSN_CACHE_OPS(__name) \ | ||
| 283 | extern struct kprobe_insn_cache kprobe_##__name##_slots; \ | ||
| 284 | \ | ||
| 285 | static inline kprobe_opcode_t *get_##__name##_slot(void) \ | ||
| 286 | { \ | ||
| 287 | return __get_insn_slot(&kprobe_##__name##_slots); \ | ||
| 288 | } \ | ||
| 289 | \ | ||
| 290 | static inline void free_##__name##_slot(kprobe_opcode_t *slot, int dirty)\ | ||
| 291 | { \ | ||
| 292 | __free_insn_slot(&kprobe_##__name##_slots, slot, dirty); \ | ||
| 293 | } \ | ||
| 294 | |||
| 295 | DEFINE_INSN_CACHE_OPS(insn); | ||
| 296 | |||
| 271 | #ifdef CONFIG_OPTPROBES | 297 | #ifdef CONFIG_OPTPROBES |
| 272 | /* | 298 | /* |
| 273 | * Internal structure for direct jump optimized probe | 299 | * Internal structure for direct jump optimized probe |
| @@ -287,13 +313,13 @@ extern void arch_optimize_kprobes(struct list_head *oplist); | |||
| 287 | extern void arch_unoptimize_kprobes(struct list_head *oplist, | 313 | extern void arch_unoptimize_kprobes(struct list_head *oplist, |
| 288 | struct list_head *done_list); | 314 | struct list_head *done_list); |
| 289 | extern void arch_unoptimize_kprobe(struct optimized_kprobe *op); | 315 | extern void arch_unoptimize_kprobe(struct optimized_kprobe *op); |
| 290 | extern kprobe_opcode_t *get_optinsn_slot(void); | ||
| 291 | extern void free_optinsn_slot(kprobe_opcode_t *slot, int dirty); | ||
| 292 | extern int arch_within_optimized_kprobe(struct optimized_kprobe *op, | 316 | extern int arch_within_optimized_kprobe(struct optimized_kprobe *op, |
| 293 | unsigned long addr); | 317 | unsigned long addr); |
| 294 | 318 | ||
| 295 | extern void opt_pre_handler(struct kprobe *p, struct pt_regs *regs); | 319 | extern void opt_pre_handler(struct kprobe *p, struct pt_regs *regs); |
| 296 | 320 | ||
| 321 | DEFINE_INSN_CACHE_OPS(optinsn); | ||
| 322 | |||
| 297 | #ifdef CONFIG_SYSCTL | 323 | #ifdef CONFIG_SYSCTL |
| 298 | extern int sysctl_kprobes_optimization; | 324 | extern int sysctl_kprobes_optimization; |
| 299 | extern int proc_kprobes_optimization_handler(struct ctl_table *table, | 325 | extern int proc_kprobes_optimization_handler(struct ctl_table *table, |
diff --git a/include/linux/lz4.h b/include/linux/lz4.h index d21c13f10a64..4356686b0a39 100644 --- a/include/linux/lz4.h +++ b/include/linux/lz4.h | |||
| @@ -67,8 +67,8 @@ int lz4hc_compress(const unsigned char *src, size_t src_len, | |||
| 67 | * note : Destination buffer must be already allocated. | 67 | * note : Destination buffer must be already allocated. |
| 68 | * slightly faster than lz4_decompress_unknownoutputsize() | 68 | * slightly faster than lz4_decompress_unknownoutputsize() |
| 69 | */ | 69 | */ |
| 70 | int lz4_decompress(const char *src, size_t *src_len, char *dest, | 70 | int lz4_decompress(const unsigned char *src, size_t *src_len, |
| 71 | size_t actual_dest_len); | 71 | unsigned char *dest, size_t actual_dest_len); |
| 72 | 72 | ||
| 73 | /* | 73 | /* |
| 74 | * lz4_decompress_unknownoutputsize() | 74 | * lz4_decompress_unknownoutputsize() |
| @@ -82,6 +82,6 @@ int lz4_decompress(const char *src, size_t *src_len, char *dest, | |||
| 82 | * Error if return (< 0) | 82 | * Error if return (< 0) |
| 83 | * note : Destination buffer must be already allocated. | 83 | * note : Destination buffer must be already allocated. |
| 84 | */ | 84 | */ |
| 85 | int lz4_decompress_unknownoutputsize(const char *src, size_t src_len, | 85 | int lz4_decompress_unknownoutputsize(const unsigned char *src, size_t src_len, |
| 86 | char *dest, size_t *dest_len); | 86 | unsigned char *dest, size_t *dest_len); |
| 87 | #endif | 87 | #endif |
diff --git a/include/linux/memblock.h b/include/linux/memblock.h index f388203db7e8..31e95acddb4d 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h | |||
| @@ -60,6 +60,8 @@ int memblock_reserve(phys_addr_t base, phys_addr_t size); | |||
| 60 | void memblock_trim_memory(phys_addr_t align); | 60 | void memblock_trim_memory(phys_addr_t align); |
| 61 | 61 | ||
| 62 | #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP | 62 | #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP |
| 63 | int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn, | ||
| 64 | unsigned long *end_pfn); | ||
| 63 | void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn, | 65 | void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn, |
| 64 | unsigned long *out_end_pfn, int *out_nid); | 66 | unsigned long *out_end_pfn, int *out_nid); |
| 65 | 67 | ||
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index 0d7df39a5885..da6716b9e3fe 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h | |||
| @@ -91,7 +91,6 @@ static inline struct mempolicy *mpol_dup(struct mempolicy *pol) | |||
| 91 | } | 91 | } |
| 92 | 92 | ||
| 93 | #define vma_policy(vma) ((vma)->vm_policy) | 93 | #define vma_policy(vma) ((vma)->vm_policy) |
| 94 | #define vma_set_policy(vma, pol) ((vma)->vm_policy = (pol)) | ||
| 95 | 94 | ||
| 96 | static inline void mpol_get(struct mempolicy *pol) | 95 | static inline void mpol_get(struct mempolicy *pol) |
| 97 | { | 96 | { |
| @@ -126,6 +125,7 @@ struct shared_policy { | |||
| 126 | spinlock_t lock; | 125 | spinlock_t lock; |
| 127 | }; | 126 | }; |
| 128 | 127 | ||
| 128 | int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst); | ||
| 129 | void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol); | 129 | void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol); |
| 130 | int mpol_set_shared_policy(struct shared_policy *info, | 130 | int mpol_set_shared_policy(struct shared_policy *info, |
| 131 | struct vm_area_struct *vma, | 131 | struct vm_area_struct *vma, |
| @@ -173,7 +173,7 @@ extern int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol); | |||
| 173 | /* Check if a vma is migratable */ | 173 | /* Check if a vma is migratable */ |
| 174 | static inline int vma_migratable(struct vm_area_struct *vma) | 174 | static inline int vma_migratable(struct vm_area_struct *vma) |
| 175 | { | 175 | { |
| 176 | if (vma->vm_flags & (VM_IO | VM_HUGETLB | VM_PFNMAP)) | 176 | if (vma->vm_flags & (VM_IO | VM_PFNMAP)) |
| 177 | return 0; | 177 | return 0; |
| 178 | /* | 178 | /* |
| 179 | * Migration allocates pages in the highest zone. If we cannot | 179 | * Migration allocates pages in the highest zone. If we cannot |
| @@ -240,7 +240,12 @@ mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx) | |||
| 240 | } | 240 | } |
| 241 | 241 | ||
| 242 | #define vma_policy(vma) NULL | 242 | #define vma_policy(vma) NULL |
| 243 | #define vma_set_policy(vma, pol) do {} while(0) | 243 | |
| 244 | static inline int | ||
| 245 | vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst) | ||
| 246 | { | ||
| 247 | return 0; | ||
| 248 | } | ||
| 244 | 249 | ||
| 245 | static inline void numa_policy_init(void) | 250 | static inline void numa_policy_init(void) |
| 246 | { | 251 | { |
diff --git a/include/linux/migrate.h b/include/linux/migrate.h index a405d3dc0f61..6fe521420631 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h | |||
| @@ -41,8 +41,6 @@ extern int migrate_page(struct address_space *, | |||
| 41 | struct page *, struct page *, enum migrate_mode); | 41 | struct page *, struct page *, enum migrate_mode); |
| 42 | extern int migrate_pages(struct list_head *l, new_page_t x, | 42 | extern int migrate_pages(struct list_head *l, new_page_t x, |
| 43 | unsigned long private, enum migrate_mode mode, int reason); | 43 | unsigned long private, enum migrate_mode mode, int reason); |
| 44 | extern int migrate_huge_page(struct page *, new_page_t x, | ||
| 45 | unsigned long private, enum migrate_mode mode); | ||
| 46 | 44 | ||
| 47 | extern int fail_migrate_page(struct address_space *, | 45 | extern int fail_migrate_page(struct address_space *, |
| 48 | struct page *, struct page *); | 46 | struct page *, struct page *); |
| @@ -62,9 +60,6 @@ static inline void putback_movable_pages(struct list_head *l) {} | |||
| 62 | static inline int migrate_pages(struct list_head *l, new_page_t x, | 60 | static inline int migrate_pages(struct list_head *l, new_page_t x, |
| 63 | unsigned long private, enum migrate_mode mode, int reason) | 61 | unsigned long private, enum migrate_mode mode, int reason) |
| 64 | { return -ENOSYS; } | 62 | { return -ENOSYS; } |
| 65 | static inline int migrate_huge_page(struct page *page, new_page_t x, | ||
| 66 | unsigned long private, enum migrate_mode mode) | ||
| 67 | { return -ENOSYS; } | ||
| 68 | 63 | ||
| 69 | static inline int migrate_prep(void) { return -ENOSYS; } | 64 | static inline int migrate_prep(void) { return -ENOSYS; } |
| 70 | static inline int migrate_prep_local(void) { return -ENOSYS; } | 65 | static inline int migrate_prep_local(void) { return -ENOSYS; } |
diff --git a/include/linux/mm.h b/include/linux/mm.h index d2d59b4149d0..caf543c7eaa7 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
| @@ -115,6 +115,12 @@ extern unsigned int kobjsize(const void *objp); | |||
| 115 | #define VM_ARCH_1 0x01000000 /* Architecture-specific flag */ | 115 | #define VM_ARCH_1 0x01000000 /* Architecture-specific flag */ |
| 116 | #define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */ | 116 | #define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */ |
| 117 | 117 | ||
| 118 | #ifdef CONFIG_MEM_SOFT_DIRTY | ||
| 119 | # define VM_SOFTDIRTY 0x08000000 /* Not soft dirty clean area */ | ||
| 120 | #else | ||
| 121 | # define VM_SOFTDIRTY 0 | ||
| 122 | #endif | ||
| 123 | |||
| 118 | #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */ | 124 | #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */ |
| 119 | #define VM_HUGEPAGE 0x20000000 /* MADV_HUGEPAGE marked this vma */ | 125 | #define VM_HUGEPAGE 0x20000000 /* MADV_HUGEPAGE marked this vma */ |
| 120 | #define VM_NOHUGEPAGE 0x40000000 /* MADV_NOHUGEPAGE marked this vma */ | 126 | #define VM_NOHUGEPAGE 0x40000000 /* MADV_NOHUGEPAGE marked this vma */ |
| @@ -489,20 +495,6 @@ static inline int compound_order(struct page *page) | |||
| 489 | return (unsigned long)page[1].lru.prev; | 495 | return (unsigned long)page[1].lru.prev; |
| 490 | } | 496 | } |
| 491 | 497 | ||
| 492 | static inline int compound_trans_order(struct page *page) | ||
| 493 | { | ||
| 494 | int order; | ||
| 495 | unsigned long flags; | ||
| 496 | |||
| 497 | if (!PageHead(page)) | ||
| 498 | return 0; | ||
| 499 | |||
| 500 | flags = compound_lock_irqsave(page); | ||
| 501 | order = compound_order(page); | ||
| 502 | compound_unlock_irqrestore(page, flags); | ||
| 503 | return order; | ||
| 504 | } | ||
| 505 | |||
| 506 | static inline void set_compound_order(struct page *page, unsigned long order) | 498 | static inline void set_compound_order(struct page *page, unsigned long order) |
| 507 | { | 499 | { |
| 508 | page[1].lru.prev = (void *)order; | 500 | page[1].lru.prev = (void *)order; |
| @@ -637,12 +629,12 @@ static inline enum zone_type page_zonenum(const struct page *page) | |||
| 637 | #endif | 629 | #endif |
| 638 | 630 | ||
| 639 | /* | 631 | /* |
| 640 | * The identification function is only used by the buddy allocator for | 632 | * The identification function is mainly used by the buddy allocator for |
| 641 | * determining if two pages could be buddies. We are not really | 633 | * determining if two pages could be buddies. We are not really identifying |
| 642 | * identifying a zone since we could be using a the section number | 634 | * the zone since we could be using the section number id if we do not have |
| 643 | * id if we have not node id available in page flags. | 635 | * node id available in page flags. |
| 644 | * We guarantee only that it will return the same value for two | 636 | * We only guarantee that it will return the same value for two combinable |
| 645 | * combinable pages in a zone. | 637 | * pages in a zone. |
| 646 | */ | 638 | */ |
| 647 | static inline int page_zone_id(struct page *page) | 639 | static inline int page_zone_id(struct page *page) |
| 648 | { | 640 | { |
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index 1397ccf81e91..cf55945c83fb 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h | |||
| @@ -2,6 +2,7 @@ | |||
| 2 | #define LINUX_MM_INLINE_H | 2 | #define LINUX_MM_INLINE_H |
| 3 | 3 | ||
| 4 | #include <linux/huge_mm.h> | 4 | #include <linux/huge_mm.h> |
| 5 | #include <linux/swap.h> | ||
| 5 | 6 | ||
| 6 | /** | 7 | /** |
| 7 | * page_is_file_cache - should the page be on a file LRU or anon LRU? | 8 | * page_is_file_cache - should the page be on a file LRU or anon LRU? |
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index af4a3b77a8de..bd791e452ad7 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
| @@ -105,6 +105,7 @@ struct zone_padding { | |||
| 105 | enum zone_stat_item { | 105 | enum zone_stat_item { |
| 106 | /* First 128 byte cacheline (assuming 64 bit words) */ | 106 | /* First 128 byte cacheline (assuming 64 bit words) */ |
| 107 | NR_FREE_PAGES, | 107 | NR_FREE_PAGES, |
| 108 | NR_ALLOC_BATCH, | ||
| 108 | NR_LRU_BASE, | 109 | NR_LRU_BASE, |
| 109 | NR_INACTIVE_ANON = NR_LRU_BASE, /* must match order of LRU_[IN]ACTIVE */ | 110 | NR_INACTIVE_ANON = NR_LRU_BASE, /* must match order of LRU_[IN]ACTIVE */ |
| 110 | NR_ACTIVE_ANON, /* " " " " " */ | 111 | NR_ACTIVE_ANON, /* " " " " " */ |
| @@ -352,7 +353,6 @@ struct zone { | |||
| 352 | * free areas of different sizes | 353 | * free areas of different sizes |
| 353 | */ | 354 | */ |
| 354 | spinlock_t lock; | 355 | spinlock_t lock; |
| 355 | int all_unreclaimable; /* All pages pinned */ | ||
| 356 | #if defined CONFIG_COMPACTION || defined CONFIG_CMA | 356 | #if defined CONFIG_COMPACTION || defined CONFIG_CMA |
| 357 | /* Set to true when the PG_migrate_skip bits should be cleared */ | 357 | /* Set to true when the PG_migrate_skip bits should be cleared */ |
| 358 | bool compact_blockskip_flush; | 358 | bool compact_blockskip_flush; |
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index ffc444c38b0a..403940787be1 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h | |||
| @@ -231,6 +231,7 @@ unsigned long radix_tree_next_hole(struct radix_tree_root *root, | |||
| 231 | unsigned long radix_tree_prev_hole(struct radix_tree_root *root, | 231 | unsigned long radix_tree_prev_hole(struct radix_tree_root *root, |
| 232 | unsigned long index, unsigned long max_scan); | 232 | unsigned long index, unsigned long max_scan); |
| 233 | int radix_tree_preload(gfp_t gfp_mask); | 233 | int radix_tree_preload(gfp_t gfp_mask); |
| 234 | int radix_tree_maybe_preload(gfp_t gfp_mask); | ||
| 234 | void radix_tree_init(void); | 235 | void radix_tree_init(void); |
| 235 | void *radix_tree_tag_set(struct radix_tree_root *root, | 236 | void *radix_tree_tag_set(struct radix_tree_root *root, |
| 236 | unsigned long index, unsigned int tag); | 237 | unsigned long index, unsigned int tag); |
diff --git a/include/linux/ramfs.h b/include/linux/ramfs.h index 69e37c2d1ea5..753207c8ce20 100644 --- a/include/linux/ramfs.h +++ b/include/linux/ramfs.h | |||
| @@ -25,7 +25,7 @@ extern int ramfs_nommu_mmap(struct file *file, struct vm_area_struct *vma); | |||
| 25 | 25 | ||
| 26 | extern const struct file_operations ramfs_file_operations; | 26 | extern const struct file_operations ramfs_file_operations; |
| 27 | extern const struct vm_operations_struct generic_file_vm_ops; | 27 | extern const struct vm_operations_struct generic_file_vm_ops; |
| 28 | extern int __init init_rootfs(void); | 28 | extern int __init init_ramfs_fs(void); |
| 29 | 29 | ||
| 30 | int ramfs_fill_super(struct super_block *sb, void *data, int silent); | 30 | int ramfs_fill_super(struct super_block *sb, void *data, int silent); |
| 31 | 31 | ||
diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h index 0022c1bb1e26..aa870a4ddf54 100644 --- a/include/linux/rbtree.h +++ b/include/linux/rbtree.h | |||
| @@ -68,6 +68,10 @@ extern struct rb_node *rb_prev(const struct rb_node *); | |||
| 68 | extern struct rb_node *rb_first(const struct rb_root *); | 68 | extern struct rb_node *rb_first(const struct rb_root *); |
| 69 | extern struct rb_node *rb_last(const struct rb_root *); | 69 | extern struct rb_node *rb_last(const struct rb_root *); |
| 70 | 70 | ||
| 71 | /* Postorder iteration - always visit the parent after its children */ | ||
| 72 | extern struct rb_node *rb_first_postorder(const struct rb_root *); | ||
| 73 | extern struct rb_node *rb_next_postorder(const struct rb_node *); | ||
| 74 | |||
| 71 | /* Fast replacement of a single node without remove/rebalance/add/rebalance */ | 75 | /* Fast replacement of a single node without remove/rebalance/add/rebalance */ |
| 72 | extern void rb_replace_node(struct rb_node *victim, struct rb_node *new, | 76 | extern void rb_replace_node(struct rb_node *victim, struct rb_node *new, |
| 73 | struct rb_root *root); | 77 | struct rb_root *root); |
| @@ -81,4 +85,22 @@ static inline void rb_link_node(struct rb_node * node, struct rb_node * parent, | |||
| 81 | *rb_link = node; | 85 | *rb_link = node; |
| 82 | } | 86 | } |
| 83 | 87 | ||
| 88 | /** | ||
| 89 | * rbtree_postorder_for_each_entry_safe - iterate over rb_root in post order of | ||
| 90 | * given type safe against removal of rb_node entry | ||
| 91 | * | ||
| 92 | * @pos: the 'type *' to use as a loop cursor. | ||
| 93 | * @n: another 'type *' to use as temporary storage | ||
| 94 | * @root: 'rb_root *' of the rbtree. | ||
| 95 | * @field: the name of the rb_node field within 'type'. | ||
| 96 | */ | ||
| 97 | #define rbtree_postorder_for_each_entry_safe(pos, n, root, field) \ | ||
| 98 | for (pos = rb_entry(rb_first_postorder(root), typeof(*pos), field),\ | ||
| 99 | n = rb_entry(rb_next_postorder(&pos->field), \ | ||
| 100 | typeof(*pos), field); \ | ||
| 101 | &pos->field; \ | ||
| 102 | pos = n, \ | ||
| 103 | n = rb_entry(rb_next_postorder(&pos->field), \ | ||
| 104 | typeof(*pos), field)) | ||
| 105 | |||
| 84 | #endif /* _LINUX_RBTREE_H */ | 106 | #endif /* _LINUX_RBTREE_H */ |
diff --git a/include/linux/sched.h b/include/linux/sched.h index ce1e1c0aaa33..45f254dddafc 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -2169,15 +2169,15 @@ static inline bool thread_group_leader(struct task_struct *p) | |||
| 2169 | * all we care about is that we have a task with the appropriate | 2169 | * all we care about is that we have a task with the appropriate |
| 2170 | * pid, we don't actually care if we have the right task. | 2170 | * pid, we don't actually care if we have the right task. |
| 2171 | */ | 2171 | */ |
| 2172 | static inline int has_group_leader_pid(struct task_struct *p) | 2172 | static inline bool has_group_leader_pid(struct task_struct *p) |
| 2173 | { | 2173 | { |
| 2174 | return p->pid == p->tgid; | 2174 | return task_pid(p) == p->signal->leader_pid; |
| 2175 | } | 2175 | } |
| 2176 | 2176 | ||
| 2177 | static inline | 2177 | static inline |
| 2178 | int same_thread_group(struct task_struct *p1, struct task_struct *p2) | 2178 | bool same_thread_group(struct task_struct *p1, struct task_struct *p2) |
| 2179 | { | 2179 | { |
| 2180 | return p1->tgid == p2->tgid; | 2180 | return p1->signal == p2->signal; |
| 2181 | } | 2181 | } |
| 2182 | 2182 | ||
| 2183 | static inline struct task_struct *next_thread(const struct task_struct *p) | 2183 | static inline struct task_struct *next_thread(const struct task_struct *p) |
diff --git a/include/linux/smp.h b/include/linux/smp.h index c181399f2c20..cfb7ca094b38 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h | |||
| @@ -28,6 +28,27 @@ extern unsigned int total_cpus; | |||
| 28 | int smp_call_function_single(int cpuid, smp_call_func_t func, void *info, | 28 | int smp_call_function_single(int cpuid, smp_call_func_t func, void *info, |
| 29 | int wait); | 29 | int wait); |
| 30 | 30 | ||
| 31 | /* | ||
| 32 | * Call a function on all processors | ||
| 33 | */ | ||
| 34 | int on_each_cpu(smp_call_func_t func, void *info, int wait); | ||
| 35 | |||
| 36 | /* | ||
| 37 | * Call a function on processors specified by mask, which might include | ||
| 38 | * the local one. | ||
| 39 | */ | ||
| 40 | void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func, | ||
| 41 | void *info, bool wait); | ||
| 42 | |||
| 43 | /* | ||
| 44 | * Call a function on each processor for which the supplied function | ||
| 45 | * cond_func returns a positive value. This may include the local | ||
| 46 | * processor. | ||
| 47 | */ | ||
| 48 | void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info), | ||
| 49 | smp_call_func_t func, void *info, bool wait, | ||
| 50 | gfp_t gfp_flags); | ||
| 51 | |||
| 31 | #ifdef CONFIG_SMP | 52 | #ifdef CONFIG_SMP |
| 32 | 53 | ||
| 33 | #include <linux/preempt.h> | 54 | #include <linux/preempt.h> |
| @@ -95,27 +116,6 @@ static inline void call_function_init(void) { } | |||
| 95 | #endif | 116 | #endif |
| 96 | 117 | ||
| 97 | /* | 118 | /* |
| 98 | * Call a function on all processors | ||
| 99 | */ | ||
| 100 | int on_each_cpu(smp_call_func_t func, void *info, int wait); | ||
| 101 | |||
| 102 | /* | ||
| 103 | * Call a function on processors specified by mask, which might include | ||
| 104 | * the local one. | ||
| 105 | */ | ||
| 106 | void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func, | ||
| 107 | void *info, bool wait); | ||
| 108 | |||
| 109 | /* | ||
| 110 | * Call a function on each processor for which the supplied function | ||
| 111 | * cond_func returns a positive value. This may include the local | ||
| 112 | * processor. | ||
| 113 | */ | ||
| 114 | void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info), | ||
| 115 | smp_call_func_t func, void *info, bool wait, | ||
| 116 | gfp_t gfp_flags); | ||
| 117 | |||
| 118 | /* | ||
| 119 | * Mark the boot cpu "online" so that it can call console drivers in | 119 | * Mark the boot cpu "online" so that it can call console drivers in |
| 120 | * printk() and can access its per-cpu storage. | 120 | * printk() and can access its per-cpu storage. |
| 121 | */ | 121 | */ |
| @@ -139,43 +139,6 @@ static inline int up_smp_call_function(smp_call_func_t func, void *info) | |||
| 139 | } | 139 | } |
| 140 | #define smp_call_function(func, info, wait) \ | 140 | #define smp_call_function(func, info, wait) \ |
| 141 | (up_smp_call_function(func, info)) | 141 | (up_smp_call_function(func, info)) |
| 142 | #define on_each_cpu(func, info, wait) \ | ||
| 143 | ({ \ | ||
| 144 | unsigned long __flags; \ | ||
| 145 | local_irq_save(__flags); \ | ||
| 146 | func(info); \ | ||
| 147 | local_irq_restore(__flags); \ | ||
| 148 | 0; \ | ||
| 149 | }) | ||
| 150 | /* | ||
| 151 | * Note we still need to test the mask even for UP | ||
| 152 | * because we actually can get an empty mask from | ||
| 153 | * code that on SMP might call us without the local | ||
| 154 | * CPU in the mask. | ||
| 155 | */ | ||
| 156 | #define on_each_cpu_mask(mask, func, info, wait) \ | ||
| 157 | do { \ | ||
| 158 | if (cpumask_test_cpu(0, (mask))) { \ | ||
| 159 | local_irq_disable(); \ | ||
| 160 | (func)(info); \ | ||
| 161 | local_irq_enable(); \ | ||
| 162 | } \ | ||
| 163 | } while (0) | ||
| 164 | /* | ||
| 165 | * Preemption is disabled here to make sure the cond_func is called under the | ||
| 166 | * same condtions in UP and SMP. | ||
| 167 | */ | ||
| 168 | #define on_each_cpu_cond(cond_func, func, info, wait, gfp_flags)\ | ||
| 169 | do { \ | ||
| 170 | void *__info = (info); \ | ||
| 171 | preempt_disable(); \ | ||
| 172 | if ((cond_func)(0, __info)) { \ | ||
| 173 | local_irq_disable(); \ | ||
| 174 | (func)(__info); \ | ||
| 175 | local_irq_enable(); \ | ||
| 176 | } \ | ||
| 177 | preempt_enable(); \ | ||
| 178 | } while (0) | ||
| 179 | 142 | ||
| 180 | static inline void smp_send_reschedule(int cpu) { } | 143 | static inline void smp_send_reschedule(int cpu) { } |
| 181 | #define smp_prepare_boot_cpu() do {} while (0) | 144 | #define smp_prepare_boot_cpu() do {} while (0) |
diff --git a/include/linux/swap.h b/include/linux/swap.h index d95cde5e257d..c03c139219c9 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h | |||
| @@ -182,6 +182,33 @@ enum { | |||
| 182 | #define SWAP_MAP_SHMEM 0xbf /* Owned by shmem/tmpfs, in first swap_map */ | 182 | #define SWAP_MAP_SHMEM 0xbf /* Owned by shmem/tmpfs, in first swap_map */ |
| 183 | 183 | ||
| 184 | /* | 184 | /* |
| 185 | * We use this to track usage of a cluster. A cluster is a block of swap disk | ||
| 186 | * space with SWAPFILE_CLUSTER pages long and naturally aligns in disk. All | ||
| 187 | * free clusters are organized into a list. We fetch an entry from the list to | ||
| 188 | * get a free cluster. | ||
| 189 | * | ||
| 190 | * The data field stores next cluster if the cluster is free or cluster usage | ||
| 191 | * counter otherwise. The flags field determines if a cluster is free. This is | ||
| 192 | * protected by swap_info_struct.lock. | ||
| 193 | */ | ||
| 194 | struct swap_cluster_info { | ||
| 195 | unsigned int data:24; | ||
| 196 | unsigned int flags:8; | ||
| 197 | }; | ||
| 198 | #define CLUSTER_FLAG_FREE 1 /* This cluster is free */ | ||
| 199 | #define CLUSTER_FLAG_NEXT_NULL 2 /* This cluster has no next cluster */ | ||
| 200 | |||
| 201 | /* | ||
| 202 | * We assign a cluster to each CPU, so each CPU can allocate swap entry from | ||
| 203 | * its own cluster and swapout sequentially. The purpose is to optimize swapout | ||
| 204 | * throughput. | ||
| 205 | */ | ||
| 206 | struct percpu_cluster { | ||
| 207 | struct swap_cluster_info index; /* Current cluster index */ | ||
| 208 | unsigned int next; /* Likely next allocation offset */ | ||
| 209 | }; | ||
| 210 | |||
| 211 | /* | ||
| 185 | * The in-memory structure used to track swap areas. | 212 | * The in-memory structure used to track swap areas. |
| 186 | */ | 213 | */ |
| 187 | struct swap_info_struct { | 214 | struct swap_info_struct { |
| @@ -191,14 +218,16 @@ struct swap_info_struct { | |||
| 191 | signed char next; /* next type on the swap list */ | 218 | signed char next; /* next type on the swap list */ |
| 192 | unsigned int max; /* extent of the swap_map */ | 219 | unsigned int max; /* extent of the swap_map */ |
| 193 | unsigned char *swap_map; /* vmalloc'ed array of usage counts */ | 220 | unsigned char *swap_map; /* vmalloc'ed array of usage counts */ |
| 221 | struct swap_cluster_info *cluster_info; /* cluster info. Only for SSD */ | ||
| 222 | struct swap_cluster_info free_cluster_head; /* free cluster list head */ | ||
| 223 | struct swap_cluster_info free_cluster_tail; /* free cluster list tail */ | ||
| 194 | unsigned int lowest_bit; /* index of first free in swap_map */ | 224 | unsigned int lowest_bit; /* index of first free in swap_map */ |
| 195 | unsigned int highest_bit; /* index of last free in swap_map */ | 225 | unsigned int highest_bit; /* index of last free in swap_map */ |
| 196 | unsigned int pages; /* total of usable pages of swap */ | 226 | unsigned int pages; /* total of usable pages of swap */ |
| 197 | unsigned int inuse_pages; /* number of those currently in use */ | 227 | unsigned int inuse_pages; /* number of those currently in use */ |
| 198 | unsigned int cluster_next; /* likely index for next allocation */ | 228 | unsigned int cluster_next; /* likely index for next allocation */ |
| 199 | unsigned int cluster_nr; /* countdown to next cluster search */ | 229 | unsigned int cluster_nr; /* countdown to next cluster search */ |
| 200 | unsigned int lowest_alloc; /* while preparing discard cluster */ | 230 | struct percpu_cluster __percpu *percpu_cluster; /* per cpu's swap location */ |
| 201 | unsigned int highest_alloc; /* while preparing discard cluster */ | ||
| 202 | struct swap_extent *curr_swap_extent; | 231 | struct swap_extent *curr_swap_extent; |
| 203 | struct swap_extent first_swap_extent; | 232 | struct swap_extent first_swap_extent; |
| 204 | struct block_device *bdev; /* swap device or bdev of swap file */ | 233 | struct block_device *bdev; /* swap device or bdev of swap file */ |
| @@ -212,14 +241,18 @@ struct swap_info_struct { | |||
| 212 | * protect map scan related fields like | 241 | * protect map scan related fields like |
| 213 | * swap_map, lowest_bit, highest_bit, | 242 | * swap_map, lowest_bit, highest_bit, |
| 214 | * inuse_pages, cluster_next, | 243 | * inuse_pages, cluster_next, |
| 215 | * cluster_nr, lowest_alloc and | 244 | * cluster_nr, lowest_alloc, |
| 216 | * highest_alloc. other fields are only | 245 | * highest_alloc, free/discard cluster |
| 217 | * changed at swapon/swapoff, so are | 246 | * list. other fields are only changed |
| 218 | * protected by swap_lock. changing | 247 | * at swapon/swapoff, so are protected |
| 219 | * flags need hold this lock and | 248 | * by swap_lock. changing flags need |
| 220 | * swap_lock. If both locks need hold, | 249 | * hold this lock and swap_lock. If |
| 221 | * hold swap_lock first. | 250 | * both locks need hold, hold swap_lock |
| 251 | * first. | ||
| 222 | */ | 252 | */ |
| 253 | struct work_struct discard_work; /* discard worker */ | ||
| 254 | struct swap_cluster_info discard_cluster_head; /* list head of discard clusters */ | ||
| 255 | struct swap_cluster_info discard_cluster_tail; /* list tail of discard clusters */ | ||
| 223 | }; | 256 | }; |
| 224 | 257 | ||
| 225 | struct swap_list_t { | 258 | struct swap_list_t { |
| @@ -414,6 +447,7 @@ mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout) | |||
| 414 | 447 | ||
| 415 | #else /* CONFIG_SWAP */ | 448 | #else /* CONFIG_SWAP */ |
| 416 | 449 | ||
| 450 | #define swap_address_space(entry) (NULL) | ||
| 417 | #define get_nr_swap_pages() 0L | 451 | #define get_nr_swap_pages() 0L |
| 418 | #define total_swap_pages 0L | 452 | #define total_swap_pages 0L |
| 419 | #define total_swapcache_pages() 0UL | 453 | #define total_swapcache_pages() 0UL |
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 84662ecc7b51..7fac04e7ff6e 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h | |||
| @@ -186,6 +186,7 @@ extern struct trace_event_functions exit_syscall_print_funcs; | |||
| 186 | #define __SYSCALL_DEFINEx(x, name, ...) \ | 186 | #define __SYSCALL_DEFINEx(x, name, ...) \ |
| 187 | asmlinkage long sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \ | 187 | asmlinkage long sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \ |
| 188 | static inline long SYSC##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \ | 188 | static inline long SYSC##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \ |
| 189 | asmlinkage long SyS##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \ | ||
| 189 | asmlinkage long SyS##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \ | 190 | asmlinkage long SyS##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \ |
| 190 | { \ | 191 | { \ |
| 191 | long ret = SYSC##name(__MAP(x,__SC_CAST,__VA_ARGS__)); \ | 192 | long ret = SYSC##name(__MAP(x,__SC_CAST,__VA_ARGS__)); \ |
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h index bd6cf61142be..1855f0a22add 100644 --- a/include/linux/vm_event_item.h +++ b/include/linux/vm_event_item.h | |||
| @@ -70,6 +70,12 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, | |||
| 70 | THP_ZERO_PAGE_ALLOC, | 70 | THP_ZERO_PAGE_ALLOC, |
| 71 | THP_ZERO_PAGE_ALLOC_FAILED, | 71 | THP_ZERO_PAGE_ALLOC_FAILED, |
| 72 | #endif | 72 | #endif |
| 73 | #ifdef CONFIG_SMP | ||
| 74 | NR_TLB_REMOTE_FLUSH, /* cpu tried to flush others' tlbs */ | ||
| 75 | NR_TLB_REMOTE_FLUSH_RECEIVED,/* cpu received ipi for flush */ | ||
| 76 | #endif | ||
| 77 | NR_TLB_LOCAL_FLUSH_ALL, | ||
| 78 | NR_TLB_LOCAL_FLUSH_ONE, | ||
| 73 | NR_VM_EVENT_ITEMS | 79 | NR_VM_EVENT_ITEMS |
| 74 | }; | 80 | }; |
| 75 | 81 | ||
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index c586679b6fef..e4b948080d20 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h | |||
| @@ -143,7 +143,6 @@ static inline unsigned long zone_page_state_snapshot(struct zone *zone, | |||
| 143 | } | 143 | } |
| 144 | 144 | ||
| 145 | extern unsigned long global_reclaimable_pages(void); | 145 | extern unsigned long global_reclaimable_pages(void); |
| 146 | extern unsigned long zone_reclaimable_pages(struct zone *zone); | ||
| 147 | 146 | ||
| 148 | #ifdef CONFIG_NUMA | 147 | #ifdef CONFIG_NUMA |
| 149 | /* | 148 | /* |
| @@ -198,7 +197,7 @@ extern void __inc_zone_state(struct zone *, enum zone_stat_item); | |||
| 198 | extern void dec_zone_state(struct zone *, enum zone_stat_item); | 197 | extern void dec_zone_state(struct zone *, enum zone_stat_item); |
| 199 | extern void __dec_zone_state(struct zone *, enum zone_stat_item); | 198 | extern void __dec_zone_state(struct zone *, enum zone_stat_item); |
| 200 | 199 | ||
| 201 | void refresh_cpu_vm_stats(int); | 200 | void cpu_vm_stats_fold(int cpu); |
| 202 | void refresh_zone_stat_thresholds(void); | 201 | void refresh_zone_stat_thresholds(void); |
| 203 | 202 | ||
| 204 | void drain_zonestat(struct zone *zone, struct per_cpu_pageset *); | 203 | void drain_zonestat(struct zone *zone, struct per_cpu_pageset *); |
| @@ -255,6 +254,7 @@ static inline void __dec_zone_page_state(struct page *page, | |||
| 255 | 254 | ||
| 256 | static inline void refresh_cpu_vm_stats(int cpu) { } | 255 | static inline void refresh_cpu_vm_stats(int cpu) { } |
| 257 | static inline void refresh_zone_stat_thresholds(void) { } | 256 | static inline void refresh_zone_stat_thresholds(void) { } |
| 257 | static inline void cpu_vm_stats_fold(int cpu) { } | ||
| 258 | 258 | ||
| 259 | static inline void drain_zonestat(struct zone *zone, | 259 | static inline void drain_zonestat(struct zone *zone, |
| 260 | struct per_cpu_pageset *pset) { } | 260 | struct per_cpu_pageset *pset) { } |
diff --git a/include/linux/writeback.h b/include/linux/writeback.h index 4e198ca1f685..021b8a319b9e 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h | |||
| @@ -98,8 +98,6 @@ int try_to_writeback_inodes_sb(struct super_block *, enum wb_reason reason); | |||
| 98 | int try_to_writeback_inodes_sb_nr(struct super_block *, unsigned long nr, | 98 | int try_to_writeback_inodes_sb_nr(struct super_block *, unsigned long nr, |
| 99 | enum wb_reason reason); | 99 | enum wb_reason reason); |
| 100 | void sync_inodes_sb(struct super_block *); | 100 | void sync_inodes_sb(struct super_block *); |
| 101 | long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages, | ||
| 102 | enum wb_reason reason); | ||
| 103 | void wakeup_flusher_threads(long nr_pages, enum wb_reason reason); | 101 | void wakeup_flusher_threads(long nr_pages, enum wb_reason reason); |
| 104 | void inode_wait_for_writeback(struct inode *inode); | 102 | void inode_wait_for_writeback(struct inode *inode); |
| 105 | 103 | ||
diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h index 6bc943ecb841..d0c613476620 100644 --- a/include/trace/events/kmem.h +++ b/include/trace/events/kmem.h | |||
| @@ -268,11 +268,13 @@ TRACE_EVENT(mm_page_alloc_extfrag, | |||
| 268 | 268 | ||
| 269 | TP_PROTO(struct page *page, | 269 | TP_PROTO(struct page *page, |
| 270 | int alloc_order, int fallback_order, | 270 | int alloc_order, int fallback_order, |
| 271 | int alloc_migratetype, int fallback_migratetype), | 271 | int alloc_migratetype, int fallback_migratetype, |
| 272 | int change_ownership), | ||
| 272 | 273 | ||
| 273 | TP_ARGS(page, | 274 | TP_ARGS(page, |
| 274 | alloc_order, fallback_order, | 275 | alloc_order, fallback_order, |
| 275 | alloc_migratetype, fallback_migratetype), | 276 | alloc_migratetype, fallback_migratetype, |
| 277 | change_ownership), | ||
| 276 | 278 | ||
| 277 | TP_STRUCT__entry( | 279 | TP_STRUCT__entry( |
| 278 | __field( struct page *, page ) | 280 | __field( struct page *, page ) |
| @@ -280,6 +282,7 @@ TRACE_EVENT(mm_page_alloc_extfrag, | |||
| 280 | __field( int, fallback_order ) | 282 | __field( int, fallback_order ) |
| 281 | __field( int, alloc_migratetype ) | 283 | __field( int, alloc_migratetype ) |
| 282 | __field( int, fallback_migratetype ) | 284 | __field( int, fallback_migratetype ) |
| 285 | __field( int, change_ownership ) | ||
| 283 | ), | 286 | ), |
| 284 | 287 | ||
| 285 | TP_fast_assign( | 288 | TP_fast_assign( |
| @@ -288,6 +291,7 @@ TRACE_EVENT(mm_page_alloc_extfrag, | |||
| 288 | __entry->fallback_order = fallback_order; | 291 | __entry->fallback_order = fallback_order; |
| 289 | __entry->alloc_migratetype = alloc_migratetype; | 292 | __entry->alloc_migratetype = alloc_migratetype; |
| 290 | __entry->fallback_migratetype = fallback_migratetype; | 293 | __entry->fallback_migratetype = fallback_migratetype; |
| 294 | __entry->change_ownership = change_ownership; | ||
| 291 | ), | 295 | ), |
| 292 | 296 | ||
| 293 | TP_printk("page=%p pfn=%lu alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d", | 297 | TP_printk("page=%p pfn=%lu alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d", |
| @@ -299,7 +303,7 @@ TRACE_EVENT(mm_page_alloc_extfrag, | |||
| 299 | __entry->alloc_migratetype, | 303 | __entry->alloc_migratetype, |
| 300 | __entry->fallback_migratetype, | 304 | __entry->fallback_migratetype, |
| 301 | __entry->fallback_order < pageblock_order, | 305 | __entry->fallback_order < pageblock_order, |
| 302 | __entry->alloc_migratetype == __entry->fallback_migratetype) | 306 | __entry->change_ownership) |
| 303 | ); | 307 | ); |
| 304 | 308 | ||
| 305 | #endif /* _TRACE_KMEM_H */ | 309 | #endif /* _TRACE_KMEM_H */ |
