diff options
| -rw-r--r-- | MAINTAINERS | 7 | ||||
| -rw-r--r-- | arch/mn10300/Kconfig | 1 | ||||
| -rw-r--r-- | drivers/rtc/rtc-da9055.c | 2 | ||||
| -rw-r--r-- | drivers/video/ssd1307fb.c | 4 | ||||
| -rw-r--r-- | fs/exec.c | 3 | ||||
| -rw-r--r-- | include/linux/audit.h | 4 | ||||
| -rw-r--r-- | include/linux/cpu_rmap.h | 13 | ||||
| -rw-r--r-- | include/linux/interrupt.h | 5 | ||||
| -rw-r--r-- | include/linux/lockdep.h | 3 | ||||
| -rw-r--r-- | include/linux/rbtree_augmented.h | 14 | ||||
| -rw-r--r-- | include/linux/rwsem.h | 9 | ||||
| -rw-r--r-- | include/uapi/linux/audit.h | 2 | ||||
| -rw-r--r-- | kernel/audit.c | 40 | ||||
| -rw-r--r-- | kernel/audit_tree.c | 26 | ||||
| -rw-r--r-- | kernel/audit_watch.c | 2 | ||||
| -rw-r--r-- | kernel/auditsc.c | 20 | ||||
| -rw-r--r-- | kernel/rwsem.c | 10 | ||||
| -rw-r--r-- | lib/cpu_rmap.c | 54 | ||||
| -rw-r--r-- | lib/rbtree.c | 20 | ||||
| -rw-r--r-- | mm/bootmem.c | 24 | ||||
| -rw-r--r-- | mm/compaction.c | 6 | ||||
| -rw-r--r-- | mm/huge_memory.c | 15 | ||||
| -rw-r--r-- | mm/memblock.c | 3 | ||||
| -rw-r--r-- | mm/migrate.c | 14 | ||||
| -rw-r--r-- | mm/mmap.c | 2 | ||||
| -rw-r--r-- | mm/page_alloc.c | 2 |
26 files changed, 227 insertions, 78 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index 3ab0949599cd..51ff2aea8a32 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -648,7 +648,7 @@ F: arch/arm/ | |||
| 648 | 648 | ||
| 649 | ARM SUB-ARCHITECTURES | 649 | ARM SUB-ARCHITECTURES |
| 650 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 650 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
| 651 | S: MAINTAINED | 651 | S: Maintained |
| 652 | F: arch/arm/mach-*/ | 652 | F: arch/arm/mach-*/ |
| 653 | F: arch/arm/plat-*/ | 653 | F: arch/arm/plat-*/ |
| 654 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc.git | 654 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc.git |
| @@ -5507,8 +5507,7 @@ M: Benoît Cousson <b-cousson@ti.com> | |||
| 5507 | M: Paul Walmsley <paul@pwsan.com> | 5507 | M: Paul Walmsley <paul@pwsan.com> |
| 5508 | L: linux-omap@vger.kernel.org | 5508 | L: linux-omap@vger.kernel.org |
| 5509 | S: Maintained | 5509 | S: Maintained |
| 5510 | F: arch/arm/mach-omap2/omap_hwmod.c | 5510 | F: arch/arm/mach-omap2/omap_hwmod.* |
| 5511 | F: arch/arm/plat-omap/include/plat/omap_hwmod.h | ||
| 5512 | 5511 | ||
| 5513 | OMAP HWMOD DATA FOR OMAP4-BASED DEVICES | 5512 | OMAP HWMOD DATA FOR OMAP4-BASED DEVICES |
| 5514 | M: Benoît Cousson <b-cousson@ti.com> | 5513 | M: Benoît Cousson <b-cousson@ti.com> |
| @@ -7334,7 +7333,7 @@ S: Odd Fixes | |||
| 7334 | F: drivers/staging/speakup/ | 7333 | F: drivers/staging/speakup/ |
| 7335 | 7334 | ||
| 7336 | STAGING - TI DSP BRIDGE DRIVERS | 7335 | STAGING - TI DSP BRIDGE DRIVERS |
| 7337 | M: Omar Ramirez Luna <omar.ramirez@ti.com> | 7336 | M: Omar Ramirez Luna <omar.ramirez@copitl.com> |
| 7338 | S: Odd Fixes | 7337 | S: Odd Fixes |
| 7339 | F: drivers/staging/tidspbridge/ | 7338 | F: drivers/staging/tidspbridge/ |
| 7340 | 7339 | ||
diff --git a/arch/mn10300/Kconfig b/arch/mn10300/Kconfig index aa03f2e13385..e70001cfa05b 100644 --- a/arch/mn10300/Kconfig +++ b/arch/mn10300/Kconfig | |||
| @@ -6,6 +6,7 @@ config MN10300 | |||
| 6 | select ARCH_WANT_IPC_PARSE_VERSION | 6 | select ARCH_WANT_IPC_PARSE_VERSION |
| 7 | select HAVE_ARCH_TRACEHOOK | 7 | select HAVE_ARCH_TRACEHOOK |
| 8 | select HAVE_ARCH_KGDB | 8 | select HAVE_ARCH_KGDB |
| 9 | select GENERIC_ATOMIC64 | ||
| 9 | select HAVE_NMI_WATCHDOG if MN10300_WD_TIMER | 10 | select HAVE_NMI_WATCHDOG if MN10300_WD_TIMER |
| 10 | select GENERIC_CLOCKEVENTS | 11 | select GENERIC_CLOCKEVENTS |
| 11 | select MODULES_USE_ELF_RELA | 12 | select MODULES_USE_ELF_RELA |
diff --git a/drivers/rtc/rtc-da9055.c b/drivers/rtc/rtc-da9055.c index 96bafc5c3bf8..8f0dcfedb83c 100644 --- a/drivers/rtc/rtc-da9055.c +++ b/drivers/rtc/rtc-da9055.c | |||
| @@ -227,7 +227,7 @@ static const struct rtc_class_ops da9055_rtc_ops = { | |||
| 227 | .alarm_irq_enable = da9055_rtc_alarm_irq_enable, | 227 | .alarm_irq_enable = da9055_rtc_alarm_irq_enable, |
| 228 | }; | 228 | }; |
| 229 | 229 | ||
| 230 | static int __init da9055_rtc_device_init(struct da9055 *da9055, | 230 | static int da9055_rtc_device_init(struct da9055 *da9055, |
| 231 | struct da9055_pdata *pdata) | 231 | struct da9055_pdata *pdata) |
| 232 | { | 232 | { |
| 233 | int ret; | 233 | int ret; |
diff --git a/drivers/video/ssd1307fb.c b/drivers/video/ssd1307fb.c index 4d99dd7a6831..395cb6a8d8f3 100644 --- a/drivers/video/ssd1307fb.c +++ b/drivers/video/ssd1307fb.c | |||
| @@ -145,8 +145,8 @@ static void ssd1307fb_update_display(struct ssd1307fb_par *par) | |||
| 145 | u32 page_length = SSD1307FB_WIDTH * i; | 145 | u32 page_length = SSD1307FB_WIDTH * i; |
| 146 | u32 index = page_length + (SSD1307FB_WIDTH * k + j) / 8; | 146 | u32 index = page_length + (SSD1307FB_WIDTH * k + j) / 8; |
| 147 | u8 byte = *(vmem + index); | 147 | u8 byte = *(vmem + index); |
| 148 | u8 bit = byte & (1 << (7 - (j % 8))); | 148 | u8 bit = byte & (1 << (j % 8)); |
| 149 | bit = bit >> (7 - (j % 8)); | 149 | bit = bit >> (j % 8); |
| 150 | buf |= bit << k; | 150 | buf |= bit << k; |
| 151 | } | 151 | } |
| 152 | ssd1307fb_write_data(par->client, buf); | 152 | ssd1307fb_write_data(par->client, buf); |
| @@ -434,8 +434,9 @@ static int count(struct user_arg_ptr argv, int max) | |||
| 434 | if (IS_ERR(p)) | 434 | if (IS_ERR(p)) |
| 435 | return -EFAULT; | 435 | return -EFAULT; |
| 436 | 436 | ||
| 437 | if (i++ >= max) | 437 | if (i >= max) |
| 438 | return -E2BIG; | 438 | return -E2BIG; |
| 439 | ++i; | ||
| 439 | 440 | ||
| 440 | if (fatal_signal_pending(current)) | 441 | if (fatal_signal_pending(current)) |
| 441 | return -ERESTARTNOHAND; | 442 | return -ERESTARTNOHAND; |
diff --git a/include/linux/audit.h b/include/linux/audit.h index bce729afbcf9..5a6d718adf34 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h | |||
| @@ -24,6 +24,7 @@ | |||
| 24 | #define _LINUX_AUDIT_H_ | 24 | #define _LINUX_AUDIT_H_ |
| 25 | 25 | ||
| 26 | #include <linux/sched.h> | 26 | #include <linux/sched.h> |
| 27 | #include <linux/ptrace.h> | ||
| 27 | #include <uapi/linux/audit.h> | 28 | #include <uapi/linux/audit.h> |
| 28 | 29 | ||
| 29 | struct audit_sig_info { | 30 | struct audit_sig_info { |
| @@ -157,7 +158,8 @@ void audit_core_dumps(long signr); | |||
| 157 | 158 | ||
| 158 | static inline void audit_seccomp(unsigned long syscall, long signr, int code) | 159 | static inline void audit_seccomp(unsigned long syscall, long signr, int code) |
| 159 | { | 160 | { |
| 160 | if (unlikely(!audit_dummy_context())) | 161 | /* Force a record to be reported if a signal was delivered. */ |
| 162 | if (signr || unlikely(!audit_dummy_context())) | ||
| 161 | __audit_seccomp(syscall, signr, code); | 163 | __audit_seccomp(syscall, signr, code); |
| 162 | } | 164 | } |
| 163 | 165 | ||
diff --git a/include/linux/cpu_rmap.h b/include/linux/cpu_rmap.h index ac3bbb5b9502..1739510d8994 100644 --- a/include/linux/cpu_rmap.h +++ b/include/linux/cpu_rmap.h | |||
| @@ -13,9 +13,11 @@ | |||
| 13 | #include <linux/cpumask.h> | 13 | #include <linux/cpumask.h> |
| 14 | #include <linux/gfp.h> | 14 | #include <linux/gfp.h> |
| 15 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
| 16 | #include <linux/kref.h> | ||
| 16 | 17 | ||
| 17 | /** | 18 | /** |
| 18 | * struct cpu_rmap - CPU affinity reverse-map | 19 | * struct cpu_rmap - CPU affinity reverse-map |
| 20 | * @refcount: kref for object | ||
| 19 | * @size: Number of objects to be reverse-mapped | 21 | * @size: Number of objects to be reverse-mapped |
| 20 | * @used: Number of objects added | 22 | * @used: Number of objects added |
| 21 | * @obj: Pointer to array of object pointers | 23 | * @obj: Pointer to array of object pointers |
| @@ -23,6 +25,7 @@ | |||
| 23 | * based on affinity masks | 25 | * based on affinity masks |
| 24 | */ | 26 | */ |
| 25 | struct cpu_rmap { | 27 | struct cpu_rmap { |
| 28 | struct kref refcount; | ||
| 26 | u16 size, used; | 29 | u16 size, used; |
| 27 | void **obj; | 30 | void **obj; |
| 28 | struct { | 31 | struct { |
| @@ -33,15 +36,7 @@ struct cpu_rmap { | |||
| 33 | #define CPU_RMAP_DIST_INF 0xffff | 36 | #define CPU_RMAP_DIST_INF 0xffff |
| 34 | 37 | ||
| 35 | extern struct cpu_rmap *alloc_cpu_rmap(unsigned int size, gfp_t flags); | 38 | extern struct cpu_rmap *alloc_cpu_rmap(unsigned int size, gfp_t flags); |
| 36 | 39 | extern int cpu_rmap_put(struct cpu_rmap *rmap); | |
| 37 | /** | ||
| 38 | * free_cpu_rmap - free CPU affinity reverse-map | ||
| 39 | * @rmap: Reverse-map allocated with alloc_cpu_rmap(), or %NULL | ||
| 40 | */ | ||
| 41 | static inline void free_cpu_rmap(struct cpu_rmap *rmap) | ||
| 42 | { | ||
| 43 | kfree(rmap); | ||
| 44 | } | ||
| 45 | 40 | ||
| 46 | extern int cpu_rmap_add(struct cpu_rmap *rmap, void *obj); | 41 | extern int cpu_rmap_add(struct cpu_rmap *rmap, void *obj); |
| 47 | extern int cpu_rmap_update(struct cpu_rmap *rmap, u16 index, | 42 | extern int cpu_rmap_update(struct cpu_rmap *rmap, u16 index, |
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 5e4e6170f43a..5fa5afeeb759 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h | |||
| @@ -268,11 +268,6 @@ struct irq_affinity_notify { | |||
| 268 | extern int | 268 | extern int |
| 269 | irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify); | 269 | irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify); |
| 270 | 270 | ||
| 271 | static inline void irq_run_affinity_notifiers(void) | ||
| 272 | { | ||
| 273 | flush_scheduled_work(); | ||
| 274 | } | ||
| 275 | |||
| 276 | #else /* CONFIG_SMP */ | 271 | #else /* CONFIG_SMP */ |
| 277 | 272 | ||
| 278 | static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m) | 273 | static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m) |
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 00e46376e28f..2bca44b0893c 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h | |||
| @@ -524,14 +524,17 @@ static inline void print_irqtrace_events(struct task_struct *curr) | |||
| 524 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 524 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 525 | # ifdef CONFIG_PROVE_LOCKING | 525 | # ifdef CONFIG_PROVE_LOCKING |
| 526 | # define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) | 526 | # define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) |
| 527 | # define rwsem_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i) | ||
| 527 | # define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, NULL, i) | 528 | # define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, NULL, i) |
| 528 | # else | 529 | # else |
| 529 | # define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) | 530 | # define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) |
| 531 | # define rwsem_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i) | ||
| 530 | # define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, NULL, i) | 532 | # define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, NULL, i) |
| 531 | # endif | 533 | # endif |
| 532 | # define rwsem_release(l, n, i) lock_release(l, n, i) | 534 | # define rwsem_release(l, n, i) lock_release(l, n, i) |
| 533 | #else | 535 | #else |
| 534 | # define rwsem_acquire(l, s, t, i) do { } while (0) | 536 | # define rwsem_acquire(l, s, t, i) do { } while (0) |
| 537 | # define rwsem_acquire_nest(l, s, t, n, i) do { } while (0) | ||
| 535 | # define rwsem_acquire_read(l, s, t, i) do { } while (0) | 538 | # define rwsem_acquire_read(l, s, t, i) do { } while (0) |
| 536 | # define rwsem_release(l, n, i) do { } while (0) | 539 | # define rwsem_release(l, n, i) do { } while (0) |
| 537 | #endif | 540 | #endif |
diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h index 2ac60c9cf644..fea49b5da12a 100644 --- a/include/linux/rbtree_augmented.h +++ b/include/linux/rbtree_augmented.h | |||
| @@ -123,9 +123,9 @@ __rb_change_child(struct rb_node *old, struct rb_node *new, | |||
| 123 | extern void __rb_erase_color(struct rb_node *parent, struct rb_root *root, | 123 | extern void __rb_erase_color(struct rb_node *parent, struct rb_root *root, |
| 124 | void (*augment_rotate)(struct rb_node *old, struct rb_node *new)); | 124 | void (*augment_rotate)(struct rb_node *old, struct rb_node *new)); |
| 125 | 125 | ||
| 126 | static __always_inline void | 126 | static __always_inline struct rb_node * |
| 127 | rb_erase_augmented(struct rb_node *node, struct rb_root *root, | 127 | __rb_erase_augmented(struct rb_node *node, struct rb_root *root, |
| 128 | const struct rb_augment_callbacks *augment) | 128 | const struct rb_augment_callbacks *augment) |
| 129 | { | 129 | { |
| 130 | struct rb_node *child = node->rb_right, *tmp = node->rb_left; | 130 | struct rb_node *child = node->rb_right, *tmp = node->rb_left; |
| 131 | struct rb_node *parent, *rebalance; | 131 | struct rb_node *parent, *rebalance; |
| @@ -217,6 +217,14 @@ rb_erase_augmented(struct rb_node *node, struct rb_root *root, | |||
| 217 | } | 217 | } |
| 218 | 218 | ||
| 219 | augment->propagate(tmp, NULL); | 219 | augment->propagate(tmp, NULL); |
| 220 | return rebalance; | ||
| 221 | } | ||
| 222 | |||
| 223 | static __always_inline void | ||
| 224 | rb_erase_augmented(struct rb_node *node, struct rb_root *root, | ||
| 225 | const struct rb_augment_callbacks *augment) | ||
| 226 | { | ||
| 227 | struct rb_node *rebalance = __rb_erase_augmented(node, root, augment); | ||
| 220 | if (rebalance) | 228 | if (rebalance) |
| 221 | __rb_erase_color(rebalance, root, augment->rotate); | 229 | __rb_erase_color(rebalance, root, augment->rotate); |
| 222 | } | 230 | } |
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h index 54bd7cd7ecbd..413cc11e414a 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h | |||
| @@ -125,8 +125,17 @@ extern void downgrade_write(struct rw_semaphore *sem); | |||
| 125 | */ | 125 | */ |
| 126 | extern void down_read_nested(struct rw_semaphore *sem, int subclass); | 126 | extern void down_read_nested(struct rw_semaphore *sem, int subclass); |
| 127 | extern void down_write_nested(struct rw_semaphore *sem, int subclass); | 127 | extern void down_write_nested(struct rw_semaphore *sem, int subclass); |
| 128 | extern void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest_lock); | ||
| 129 | |||
| 130 | # define down_write_nest_lock(sem, nest_lock) \ | ||
| 131 | do { \ | ||
| 132 | typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \ | ||
| 133 | _down_write_nest_lock(sem, &(nest_lock)->dep_map); \ | ||
| 134 | } while (0); | ||
| 135 | |||
| 128 | #else | 136 | #else |
| 129 | # define down_read_nested(sem, subclass) down_read(sem) | 137 | # define down_read_nested(sem, subclass) down_read(sem) |
| 138 | # define down_write_nest_lock(sem, nest_lock) down_read(sem) | ||
| 130 | # define down_write_nested(sem, subclass) down_write(sem) | 139 | # define down_write_nested(sem, subclass) down_write(sem) |
| 131 | #endif | 140 | #endif |
| 132 | 141 | ||
diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h index 76352ac45f24..9f096f1c0907 100644 --- a/include/uapi/linux/audit.h +++ b/include/uapi/linux/audit.h | |||
| @@ -26,7 +26,6 @@ | |||
| 26 | 26 | ||
| 27 | #include <linux/types.h> | 27 | #include <linux/types.h> |
| 28 | #include <linux/elf-em.h> | 28 | #include <linux/elf-em.h> |
| 29 | #include <linux/ptrace.h> | ||
| 30 | 29 | ||
| 31 | /* The netlink messages for the audit system is divided into blocks: | 30 | /* The netlink messages for the audit system is divided into blocks: |
| 32 | * 1000 - 1099 are for commanding the audit system | 31 | * 1000 - 1099 are for commanding the audit system |
| @@ -106,6 +105,7 @@ | |||
| 106 | #define AUDIT_MMAP 1323 /* Record showing descriptor and flags in mmap */ | 105 | #define AUDIT_MMAP 1323 /* Record showing descriptor and flags in mmap */ |
| 107 | #define AUDIT_NETFILTER_PKT 1324 /* Packets traversing netfilter chains */ | 106 | #define AUDIT_NETFILTER_PKT 1324 /* Packets traversing netfilter chains */ |
| 108 | #define AUDIT_NETFILTER_CFG 1325 /* Netfilter chain modifications */ | 107 | #define AUDIT_NETFILTER_CFG 1325 /* Netfilter chain modifications */ |
| 108 | #define AUDIT_SECCOMP 1326 /* Secure Computing event */ | ||
| 109 | 109 | ||
| 110 | #define AUDIT_AVC 1400 /* SE Linux avc denial or grant */ | 110 | #define AUDIT_AVC 1400 /* SE Linux avc denial or grant */ |
| 111 | #define AUDIT_SELINUX_ERR 1401 /* Internal SE Linux Errors */ | 111 | #define AUDIT_SELINUX_ERR 1401 /* Internal SE Linux Errors */ |
diff --git a/kernel/audit.c b/kernel/audit.c index 40414e9143db..d596e5355f15 100644 --- a/kernel/audit.c +++ b/kernel/audit.c | |||
| @@ -272,6 +272,8 @@ static int audit_log_config_change(char *function_name, int new, int old, | |||
| 272 | int rc = 0; | 272 | int rc = 0; |
| 273 | 273 | ||
| 274 | ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE); | 274 | ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE); |
| 275 | if (unlikely(!ab)) | ||
| 276 | return rc; | ||
| 275 | audit_log_format(ab, "%s=%d old=%d auid=%u ses=%u", function_name, new, | 277 | audit_log_format(ab, "%s=%d old=%d auid=%u ses=%u", function_name, new, |
| 276 | old, from_kuid(&init_user_ns, loginuid), sessionid); | 278 | old, from_kuid(&init_user_ns, loginuid), sessionid); |
| 277 | if (sid) { | 279 | if (sid) { |
| @@ -619,6 +621,8 @@ static int audit_log_common_recv_msg(struct audit_buffer **ab, u16 msg_type, | |||
| 619 | } | 621 | } |
| 620 | 622 | ||
| 621 | *ab = audit_log_start(NULL, GFP_KERNEL, msg_type); | 623 | *ab = audit_log_start(NULL, GFP_KERNEL, msg_type); |
| 624 | if (unlikely(!*ab)) | ||
| 625 | return rc; | ||
| 622 | audit_log_format(*ab, "pid=%d uid=%u auid=%u ses=%u", | 626 | audit_log_format(*ab, "pid=%d uid=%u auid=%u ses=%u", |
| 623 | task_tgid_vnr(current), | 627 | task_tgid_vnr(current), |
| 624 | from_kuid(&init_user_ns, current_uid()), | 628 | from_kuid(&init_user_ns, current_uid()), |
| @@ -1097,6 +1101,23 @@ static inline void audit_get_stamp(struct audit_context *ctx, | |||
| 1097 | } | 1101 | } |
| 1098 | } | 1102 | } |
| 1099 | 1103 | ||
| 1104 | /* | ||
| 1105 | * Wait for auditd to drain the queue a little | ||
| 1106 | */ | ||
| 1107 | static void wait_for_auditd(unsigned long sleep_time) | ||
| 1108 | { | ||
| 1109 | DECLARE_WAITQUEUE(wait, current); | ||
| 1110 | set_current_state(TASK_INTERRUPTIBLE); | ||
| 1111 | add_wait_queue(&audit_backlog_wait, &wait); | ||
| 1112 | |||
| 1113 | if (audit_backlog_limit && | ||
| 1114 | skb_queue_len(&audit_skb_queue) > audit_backlog_limit) | ||
| 1115 | schedule_timeout(sleep_time); | ||
| 1116 | |||
| 1117 | __set_current_state(TASK_RUNNING); | ||
| 1118 | remove_wait_queue(&audit_backlog_wait, &wait); | ||
| 1119 | } | ||
| 1120 | |||
| 1100 | /* Obtain an audit buffer. This routine does locking to obtain the | 1121 | /* Obtain an audit buffer. This routine does locking to obtain the |
| 1101 | * audit buffer, but then no locking is required for calls to | 1122 | * audit buffer, but then no locking is required for calls to |
| 1102 | * audit_log_*format. If the tsk is a task that is currently in a | 1123 | * audit_log_*format. If the tsk is a task that is currently in a |
| @@ -1142,20 +1163,13 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask, | |||
| 1142 | 1163 | ||
| 1143 | while (audit_backlog_limit | 1164 | while (audit_backlog_limit |
| 1144 | && skb_queue_len(&audit_skb_queue) > audit_backlog_limit + reserve) { | 1165 | && skb_queue_len(&audit_skb_queue) > audit_backlog_limit + reserve) { |
| 1145 | if (gfp_mask & __GFP_WAIT && audit_backlog_wait_time | 1166 | if (gfp_mask & __GFP_WAIT && audit_backlog_wait_time) { |
| 1146 | && time_before(jiffies, timeout_start + audit_backlog_wait_time)) { | 1167 | unsigned long sleep_time; |
| 1147 | 1168 | ||
| 1148 | /* Wait for auditd to drain the queue a little */ | 1169 | sleep_time = timeout_start + audit_backlog_wait_time - |
| 1149 | DECLARE_WAITQUEUE(wait, current); | 1170 | jiffies; |
| 1150 | set_current_state(TASK_INTERRUPTIBLE); | 1171 | if ((long)sleep_time > 0) |
| 1151 | add_wait_queue(&audit_backlog_wait, &wait); | 1172 | wait_for_auditd(sleep_time); |
| 1152 | |||
| 1153 | if (audit_backlog_limit && | ||
| 1154 | skb_queue_len(&audit_skb_queue) > audit_backlog_limit) | ||
| 1155 | schedule_timeout(timeout_start + audit_backlog_wait_time - jiffies); | ||
| 1156 | |||
| 1157 | __set_current_state(TASK_RUNNING); | ||
| 1158 | remove_wait_queue(&audit_backlog_wait, &wait); | ||
| 1159 | continue; | 1173 | continue; |
| 1160 | } | 1174 | } |
| 1161 | if (audit_rate_check() && printk_ratelimit()) | 1175 | if (audit_rate_check() && printk_ratelimit()) |
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c index e81175ef25f8..642a89c4f3d6 100644 --- a/kernel/audit_tree.c +++ b/kernel/audit_tree.c | |||
| @@ -449,11 +449,26 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree) | |||
| 449 | return 0; | 449 | return 0; |
| 450 | } | 450 | } |
| 451 | 451 | ||
| 452 | static void audit_log_remove_rule(struct audit_krule *rule) | ||
| 453 | { | ||
| 454 | struct audit_buffer *ab; | ||
| 455 | |||
| 456 | ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE); | ||
| 457 | if (unlikely(!ab)) | ||
| 458 | return; | ||
| 459 | audit_log_format(ab, "op="); | ||
| 460 | audit_log_string(ab, "remove rule"); | ||
| 461 | audit_log_format(ab, " dir="); | ||
| 462 | audit_log_untrustedstring(ab, rule->tree->pathname); | ||
| 463 | audit_log_key(ab, rule->filterkey); | ||
| 464 | audit_log_format(ab, " list=%d res=1", rule->listnr); | ||
| 465 | audit_log_end(ab); | ||
| 466 | } | ||
| 467 | |||
| 452 | static void kill_rules(struct audit_tree *tree) | 468 | static void kill_rules(struct audit_tree *tree) |
| 453 | { | 469 | { |
| 454 | struct audit_krule *rule, *next; | 470 | struct audit_krule *rule, *next; |
| 455 | struct audit_entry *entry; | 471 | struct audit_entry *entry; |
| 456 | struct audit_buffer *ab; | ||
| 457 | 472 | ||
| 458 | list_for_each_entry_safe(rule, next, &tree->rules, rlist) { | 473 | list_for_each_entry_safe(rule, next, &tree->rules, rlist) { |
| 459 | entry = container_of(rule, struct audit_entry, rule); | 474 | entry = container_of(rule, struct audit_entry, rule); |
| @@ -461,14 +476,7 @@ static void kill_rules(struct audit_tree *tree) | |||
| 461 | list_del_init(&rule->rlist); | 476 | list_del_init(&rule->rlist); |
| 462 | if (rule->tree) { | 477 | if (rule->tree) { |
| 463 | /* not a half-baked one */ | 478 | /* not a half-baked one */ |
| 464 | ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE); | 479 | audit_log_remove_rule(rule); |
| 465 | audit_log_format(ab, "op="); | ||
| 466 | audit_log_string(ab, "remove rule"); | ||
| 467 | audit_log_format(ab, " dir="); | ||
| 468 | audit_log_untrustedstring(ab, rule->tree->pathname); | ||
| 469 | audit_log_key(ab, rule->filterkey); | ||
| 470 | audit_log_format(ab, " list=%d res=1", rule->listnr); | ||
| 471 | audit_log_end(ab); | ||
| 472 | rule->tree = NULL; | 480 | rule->tree = NULL; |
| 473 | list_del_rcu(&entry->list); | 481 | list_del_rcu(&entry->list); |
| 474 | list_del(&entry->rule.list); | 482 | list_del(&entry->rule.list); |
diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c index 4a599f699adc..22831c4d369c 100644 --- a/kernel/audit_watch.c +++ b/kernel/audit_watch.c | |||
| @@ -240,6 +240,8 @@ static void audit_watch_log_rule_change(struct audit_krule *r, struct audit_watc | |||
| 240 | if (audit_enabled) { | 240 | if (audit_enabled) { |
| 241 | struct audit_buffer *ab; | 241 | struct audit_buffer *ab; |
| 242 | ab = audit_log_start(NULL, GFP_NOFS, AUDIT_CONFIG_CHANGE); | 242 | ab = audit_log_start(NULL, GFP_NOFS, AUDIT_CONFIG_CHANGE); |
| 243 | if (unlikely(!ab)) | ||
| 244 | return; | ||
| 243 | audit_log_format(ab, "auid=%u ses=%u op=", | 245 | audit_log_format(ab, "auid=%u ses=%u op=", |
| 244 | from_kuid(&init_user_ns, audit_get_loginuid(current)), | 246 | from_kuid(&init_user_ns, audit_get_loginuid(current)), |
| 245 | audit_get_sessionid(current)); | 247 | audit_get_sessionid(current)); |
diff --git a/kernel/auditsc.c b/kernel/auditsc.c index e37e6a12c5e3..a371f857a0a9 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c | |||
| @@ -1464,14 +1464,14 @@ static void show_special(struct audit_context *context, int *call_panic) | |||
| 1464 | audit_log_end(ab); | 1464 | audit_log_end(ab); |
| 1465 | ab = audit_log_start(context, GFP_KERNEL, | 1465 | ab = audit_log_start(context, GFP_KERNEL, |
| 1466 | AUDIT_IPC_SET_PERM); | 1466 | AUDIT_IPC_SET_PERM); |
| 1467 | if (unlikely(!ab)) | ||
| 1468 | return; | ||
| 1467 | audit_log_format(ab, | 1469 | audit_log_format(ab, |
| 1468 | "qbytes=%lx ouid=%u ogid=%u mode=%#ho", | 1470 | "qbytes=%lx ouid=%u ogid=%u mode=%#ho", |
| 1469 | context->ipc.qbytes, | 1471 | context->ipc.qbytes, |
| 1470 | context->ipc.perm_uid, | 1472 | context->ipc.perm_uid, |
| 1471 | context->ipc.perm_gid, | 1473 | context->ipc.perm_gid, |
| 1472 | context->ipc.perm_mode); | 1474 | context->ipc.perm_mode); |
| 1473 | if (!ab) | ||
| 1474 | return; | ||
| 1475 | } | 1475 | } |
| 1476 | break; } | 1476 | break; } |
| 1477 | case AUDIT_MQ_OPEN: { | 1477 | case AUDIT_MQ_OPEN: { |
| @@ -2675,7 +2675,7 @@ void __audit_mmap_fd(int fd, int flags) | |||
| 2675 | context->type = AUDIT_MMAP; | 2675 | context->type = AUDIT_MMAP; |
| 2676 | } | 2676 | } |
| 2677 | 2677 | ||
| 2678 | static void audit_log_abend(struct audit_buffer *ab, char *reason, long signr) | 2678 | static void audit_log_task(struct audit_buffer *ab) |
| 2679 | { | 2679 | { |
| 2680 | kuid_t auid, uid; | 2680 | kuid_t auid, uid; |
| 2681 | kgid_t gid; | 2681 | kgid_t gid; |
| @@ -2693,6 +2693,11 @@ static void audit_log_abend(struct audit_buffer *ab, char *reason, long signr) | |||
| 2693 | audit_log_task_context(ab); | 2693 | audit_log_task_context(ab); |
| 2694 | audit_log_format(ab, " pid=%d comm=", current->pid); | 2694 | audit_log_format(ab, " pid=%d comm=", current->pid); |
| 2695 | audit_log_untrustedstring(ab, current->comm); | 2695 | audit_log_untrustedstring(ab, current->comm); |
| 2696 | } | ||
| 2697 | |||
| 2698 | static void audit_log_abend(struct audit_buffer *ab, char *reason, long signr) | ||
| 2699 | { | ||
| 2700 | audit_log_task(ab); | ||
| 2696 | audit_log_format(ab, " reason="); | 2701 | audit_log_format(ab, " reason="); |
| 2697 | audit_log_string(ab, reason); | 2702 | audit_log_string(ab, reason); |
| 2698 | audit_log_format(ab, " sig=%ld", signr); | 2703 | audit_log_format(ab, " sig=%ld", signr); |
| @@ -2715,6 +2720,8 @@ void audit_core_dumps(long signr) | |||
| 2715 | return; | 2720 | return; |
| 2716 | 2721 | ||
| 2717 | ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_ANOM_ABEND); | 2722 | ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_ANOM_ABEND); |
| 2723 | if (unlikely(!ab)) | ||
| 2724 | return; | ||
| 2718 | audit_log_abend(ab, "memory violation", signr); | 2725 | audit_log_abend(ab, "memory violation", signr); |
| 2719 | audit_log_end(ab); | 2726 | audit_log_end(ab); |
| 2720 | } | 2727 | } |
| @@ -2723,8 +2730,11 @@ void __audit_seccomp(unsigned long syscall, long signr, int code) | |||
| 2723 | { | 2730 | { |
| 2724 | struct audit_buffer *ab; | 2731 | struct audit_buffer *ab; |
| 2725 | 2732 | ||
| 2726 | ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_ANOM_ABEND); | 2733 | ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_SECCOMP); |
| 2727 | audit_log_abend(ab, "seccomp", signr); | 2734 | if (unlikely(!ab)) |
| 2735 | return; | ||
| 2736 | audit_log_task(ab); | ||
| 2737 | audit_log_format(ab, " sig=%ld", signr); | ||
| 2728 | audit_log_format(ab, " syscall=%ld", syscall); | 2738 | audit_log_format(ab, " syscall=%ld", syscall); |
| 2729 | audit_log_format(ab, " compat=%d", is_compat_task()); | 2739 | audit_log_format(ab, " compat=%d", is_compat_task()); |
| 2730 | audit_log_format(ab, " ip=0x%lx", KSTK_EIP(current)); | 2740 | audit_log_format(ab, " ip=0x%lx", KSTK_EIP(current)); |
diff --git a/kernel/rwsem.c b/kernel/rwsem.c index 6850f53e02d8..b3c6c3fcd847 100644 --- a/kernel/rwsem.c +++ b/kernel/rwsem.c | |||
| @@ -116,6 +116,16 @@ void down_read_nested(struct rw_semaphore *sem, int subclass) | |||
| 116 | 116 | ||
| 117 | EXPORT_SYMBOL(down_read_nested); | 117 | EXPORT_SYMBOL(down_read_nested); |
| 118 | 118 | ||
| 119 | void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest) | ||
| 120 | { | ||
| 121 | might_sleep(); | ||
| 122 | rwsem_acquire_nest(&sem->dep_map, 0, 0, nest, _RET_IP_); | ||
| 123 | |||
| 124 | LOCK_CONTENDED(sem, __down_write_trylock, __down_write); | ||
| 125 | } | ||
| 126 | |||
| 127 | EXPORT_SYMBOL(_down_write_nest_lock); | ||
| 128 | |||
| 119 | void down_write_nested(struct rw_semaphore *sem, int subclass) | 129 | void down_write_nested(struct rw_semaphore *sem, int subclass) |
| 120 | { | 130 | { |
| 121 | might_sleep(); | 131 | might_sleep(); |
diff --git a/lib/cpu_rmap.c b/lib/cpu_rmap.c index 145dec5267c9..5fbed5caba6e 100644 --- a/lib/cpu_rmap.c +++ b/lib/cpu_rmap.c | |||
| @@ -45,6 +45,7 @@ struct cpu_rmap *alloc_cpu_rmap(unsigned int size, gfp_t flags) | |||
| 45 | if (!rmap) | 45 | if (!rmap) |
| 46 | return NULL; | 46 | return NULL; |
| 47 | 47 | ||
| 48 | kref_init(&rmap->refcount); | ||
| 48 | rmap->obj = (void **)((char *)rmap + obj_offset); | 49 | rmap->obj = (void **)((char *)rmap + obj_offset); |
| 49 | 50 | ||
| 50 | /* Initially assign CPUs to objects on a rota, since we have | 51 | /* Initially assign CPUs to objects on a rota, since we have |
| @@ -63,6 +64,35 @@ struct cpu_rmap *alloc_cpu_rmap(unsigned int size, gfp_t flags) | |||
| 63 | } | 64 | } |
| 64 | EXPORT_SYMBOL(alloc_cpu_rmap); | 65 | EXPORT_SYMBOL(alloc_cpu_rmap); |
| 65 | 66 | ||
| 67 | /** | ||
| 68 | * cpu_rmap_release - internal reclaiming helper called from kref_put | ||
| 69 | * @ref: kref to struct cpu_rmap | ||
| 70 | */ | ||
| 71 | static void cpu_rmap_release(struct kref *ref) | ||
| 72 | { | ||
| 73 | struct cpu_rmap *rmap = container_of(ref, struct cpu_rmap, refcount); | ||
| 74 | kfree(rmap); | ||
| 75 | } | ||
| 76 | |||
| 77 | /** | ||
| 78 | * cpu_rmap_get - internal helper to get new ref on a cpu_rmap | ||
| 79 | * @rmap: reverse-map allocated with alloc_cpu_rmap() | ||
| 80 | */ | ||
| 81 | static inline void cpu_rmap_get(struct cpu_rmap *rmap) | ||
| 82 | { | ||
| 83 | kref_get(&rmap->refcount); | ||
| 84 | } | ||
| 85 | |||
| 86 | /** | ||
| 87 | * cpu_rmap_put - release ref on a cpu_rmap | ||
| 88 | * @rmap: reverse-map allocated with alloc_cpu_rmap() | ||
| 89 | */ | ||
| 90 | int cpu_rmap_put(struct cpu_rmap *rmap) | ||
| 91 | { | ||
| 92 | return kref_put(&rmap->refcount, cpu_rmap_release); | ||
| 93 | } | ||
| 94 | EXPORT_SYMBOL(cpu_rmap_put); | ||
| 95 | |||
| 66 | /* Reevaluate nearest object for given CPU, comparing with the given | 96 | /* Reevaluate nearest object for given CPU, comparing with the given |
| 67 | * neighbours at the given distance. | 97 | * neighbours at the given distance. |
| 68 | */ | 98 | */ |
| @@ -197,8 +227,7 @@ struct irq_glue { | |||
| 197 | * free_irq_cpu_rmap - free a CPU affinity reverse-map used for IRQs | 227 | * free_irq_cpu_rmap - free a CPU affinity reverse-map used for IRQs |
| 198 | * @rmap: Reverse-map allocated with alloc_irq_cpu_map(), or %NULL | 228 | * @rmap: Reverse-map allocated with alloc_irq_cpu_map(), or %NULL |
| 199 | * | 229 | * |
| 200 | * Must be called in process context, before freeing the IRQs, and | 230 | * Must be called in process context, before freeing the IRQs. |
| 201 | * without holding any locks required by global workqueue items. | ||
| 202 | */ | 231 | */ |
| 203 | void free_irq_cpu_rmap(struct cpu_rmap *rmap) | 232 | void free_irq_cpu_rmap(struct cpu_rmap *rmap) |
| 204 | { | 233 | { |
| @@ -212,12 +241,18 @@ void free_irq_cpu_rmap(struct cpu_rmap *rmap) | |||
| 212 | glue = rmap->obj[index]; | 241 | glue = rmap->obj[index]; |
| 213 | irq_set_affinity_notifier(glue->notify.irq, NULL); | 242 | irq_set_affinity_notifier(glue->notify.irq, NULL); |
| 214 | } | 243 | } |
| 215 | irq_run_affinity_notifiers(); | ||
| 216 | 244 | ||
| 217 | kfree(rmap); | 245 | cpu_rmap_put(rmap); |
| 218 | } | 246 | } |
| 219 | EXPORT_SYMBOL(free_irq_cpu_rmap); | 247 | EXPORT_SYMBOL(free_irq_cpu_rmap); |
| 220 | 248 | ||
| 249 | /** | ||
| 250 | * irq_cpu_rmap_notify - callback for IRQ subsystem when IRQ affinity updated | ||
| 251 | * @notify: struct irq_affinity_notify passed by irq/manage.c | ||
| 252 | * @mask: cpu mask for new SMP affinity | ||
| 253 | * | ||
| 254 | * This is executed in workqueue context. | ||
| 255 | */ | ||
| 221 | static void | 256 | static void |
| 222 | irq_cpu_rmap_notify(struct irq_affinity_notify *notify, const cpumask_t *mask) | 257 | irq_cpu_rmap_notify(struct irq_affinity_notify *notify, const cpumask_t *mask) |
| 223 | { | 258 | { |
| @@ -230,10 +265,16 @@ irq_cpu_rmap_notify(struct irq_affinity_notify *notify, const cpumask_t *mask) | |||
| 230 | pr_warning("irq_cpu_rmap_notify: update failed: %d\n", rc); | 265 | pr_warning("irq_cpu_rmap_notify: update failed: %d\n", rc); |
| 231 | } | 266 | } |
| 232 | 267 | ||
| 268 | /** | ||
| 269 | * irq_cpu_rmap_release - reclaiming callback for IRQ subsystem | ||
| 270 | * @ref: kref to struct irq_affinity_notify passed by irq/manage.c | ||
| 271 | */ | ||
| 233 | static void irq_cpu_rmap_release(struct kref *ref) | 272 | static void irq_cpu_rmap_release(struct kref *ref) |
| 234 | { | 273 | { |
| 235 | struct irq_glue *glue = | 274 | struct irq_glue *glue = |
| 236 | container_of(ref, struct irq_glue, notify.kref); | 275 | container_of(ref, struct irq_glue, notify.kref); |
| 276 | |||
| 277 | cpu_rmap_put(glue->rmap); | ||
| 237 | kfree(glue); | 278 | kfree(glue); |
| 238 | } | 279 | } |
| 239 | 280 | ||
| @@ -258,10 +299,13 @@ int irq_cpu_rmap_add(struct cpu_rmap *rmap, int irq) | |||
| 258 | glue->notify.notify = irq_cpu_rmap_notify; | 299 | glue->notify.notify = irq_cpu_rmap_notify; |
| 259 | glue->notify.release = irq_cpu_rmap_release; | 300 | glue->notify.release = irq_cpu_rmap_release; |
| 260 | glue->rmap = rmap; | 301 | glue->rmap = rmap; |
| 302 | cpu_rmap_get(rmap); | ||
| 261 | glue->index = cpu_rmap_add(rmap, glue); | 303 | glue->index = cpu_rmap_add(rmap, glue); |
| 262 | rc = irq_set_affinity_notifier(irq, &glue->notify); | 304 | rc = irq_set_affinity_notifier(irq, &glue->notify); |
| 263 | if (rc) | 305 | if (rc) { |
| 306 | cpu_rmap_put(glue->rmap); | ||
| 264 | kfree(glue); | 307 | kfree(glue); |
| 308 | } | ||
| 265 | return rc; | 309 | return rc; |
| 266 | } | 310 | } |
| 267 | EXPORT_SYMBOL(irq_cpu_rmap_add); | 311 | EXPORT_SYMBOL(irq_cpu_rmap_add); |
diff --git a/lib/rbtree.c b/lib/rbtree.c index 4f56a11d67fa..c0e31fe2fabf 100644 --- a/lib/rbtree.c +++ b/lib/rbtree.c | |||
| @@ -194,8 +194,12 @@ __rb_insert(struct rb_node *node, struct rb_root *root, | |||
| 194 | } | 194 | } |
| 195 | } | 195 | } |
| 196 | 196 | ||
| 197 | __always_inline void | 197 | /* |
| 198 | __rb_erase_color(struct rb_node *parent, struct rb_root *root, | 198 | * Inline version for rb_erase() use - we want to be able to inline |
| 199 | * and eliminate the dummy_rotate callback there | ||
| 200 | */ | ||
| 201 | static __always_inline void | ||
| 202 | ____rb_erase_color(struct rb_node *parent, struct rb_root *root, | ||
| 199 | void (*augment_rotate)(struct rb_node *old, struct rb_node *new)) | 203 | void (*augment_rotate)(struct rb_node *old, struct rb_node *new)) |
| 200 | { | 204 | { |
| 201 | struct rb_node *node = NULL, *sibling, *tmp1, *tmp2; | 205 | struct rb_node *node = NULL, *sibling, *tmp1, *tmp2; |
| @@ -355,6 +359,13 @@ __rb_erase_color(struct rb_node *parent, struct rb_root *root, | |||
| 355 | } | 359 | } |
| 356 | } | 360 | } |
| 357 | } | 361 | } |
| 362 | |||
| 363 | /* Non-inline version for rb_erase_augmented() use */ | ||
| 364 | void __rb_erase_color(struct rb_node *parent, struct rb_root *root, | ||
| 365 | void (*augment_rotate)(struct rb_node *old, struct rb_node *new)) | ||
| 366 | { | ||
| 367 | ____rb_erase_color(parent, root, augment_rotate); | ||
| 368 | } | ||
| 358 | EXPORT_SYMBOL(__rb_erase_color); | 369 | EXPORT_SYMBOL(__rb_erase_color); |
| 359 | 370 | ||
| 360 | /* | 371 | /* |
| @@ -380,7 +391,10 @@ EXPORT_SYMBOL(rb_insert_color); | |||
| 380 | 391 | ||
| 381 | void rb_erase(struct rb_node *node, struct rb_root *root) | 392 | void rb_erase(struct rb_node *node, struct rb_root *root) |
| 382 | { | 393 | { |
| 383 | rb_erase_augmented(node, root, &dummy_callbacks); | 394 | struct rb_node *rebalance; |
| 395 | rebalance = __rb_erase_augmented(node, root, &dummy_callbacks); | ||
| 396 | if (rebalance) | ||
| 397 | ____rb_erase_color(rebalance, root, dummy_rotate); | ||
| 384 | } | 398 | } |
| 385 | EXPORT_SYMBOL(rb_erase); | 399 | EXPORT_SYMBOL(rb_erase); |
| 386 | 400 | ||
diff --git a/mm/bootmem.c b/mm/bootmem.c index 1324cd74faec..b93376c39b61 100644 --- a/mm/bootmem.c +++ b/mm/bootmem.c | |||
| @@ -185,10 +185,23 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata) | |||
| 185 | 185 | ||
| 186 | while (start < end) { | 186 | while (start < end) { |
| 187 | unsigned long *map, idx, vec; | 187 | unsigned long *map, idx, vec; |
| 188 | unsigned shift; | ||
| 188 | 189 | ||
| 189 | map = bdata->node_bootmem_map; | 190 | map = bdata->node_bootmem_map; |
| 190 | idx = start - bdata->node_min_pfn; | 191 | idx = start - bdata->node_min_pfn; |
| 192 | shift = idx & (BITS_PER_LONG - 1); | ||
| 193 | /* | ||
| 194 | * vec holds at most BITS_PER_LONG map bits, | ||
| 195 | * bit 0 corresponds to start. | ||
| 196 | */ | ||
| 191 | vec = ~map[idx / BITS_PER_LONG]; | 197 | vec = ~map[idx / BITS_PER_LONG]; |
| 198 | |||
| 199 | if (shift) { | ||
| 200 | vec >>= shift; | ||
| 201 | if (end - start >= BITS_PER_LONG) | ||
| 202 | vec |= ~map[idx / BITS_PER_LONG + 1] << | ||
| 203 | (BITS_PER_LONG - shift); | ||
| 204 | } | ||
| 192 | /* | 205 | /* |
| 193 | * If we have a properly aligned and fully unreserved | 206 | * If we have a properly aligned and fully unreserved |
| 194 | * BITS_PER_LONG block of pages in front of us, free | 207 | * BITS_PER_LONG block of pages in front of us, free |
| @@ -201,19 +214,18 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata) | |||
| 201 | count += BITS_PER_LONG; | 214 | count += BITS_PER_LONG; |
| 202 | start += BITS_PER_LONG; | 215 | start += BITS_PER_LONG; |
| 203 | } else { | 216 | } else { |
| 204 | unsigned long off = 0; | 217 | unsigned long cur = start; |
| 205 | 218 | ||
| 206 | vec >>= start & (BITS_PER_LONG - 1); | 219 | start = ALIGN(start + 1, BITS_PER_LONG); |
| 207 | while (vec) { | 220 | while (vec && cur != start) { |
| 208 | if (vec & 1) { | 221 | if (vec & 1) { |
| 209 | page = pfn_to_page(start + off); | 222 | page = pfn_to_page(cur); |
| 210 | __free_pages_bootmem(page, 0); | 223 | __free_pages_bootmem(page, 0); |
| 211 | count++; | 224 | count++; |
| 212 | } | 225 | } |
| 213 | vec >>= 1; | 226 | vec >>= 1; |
| 214 | off++; | 227 | ++cur; |
| 215 | } | 228 | } |
| 216 | start = ALIGN(start + 1, BITS_PER_LONG); | ||
| 217 | } | 229 | } |
| 218 | } | 230 | } |
| 219 | 231 | ||
diff --git a/mm/compaction.c b/mm/compaction.c index 2c570432aa56..c62bd063d766 100644 --- a/mm/compaction.c +++ b/mm/compaction.c | |||
| @@ -1144,7 +1144,7 @@ static int compact_node(int nid) | |||
| 1144 | } | 1144 | } |
| 1145 | 1145 | ||
| 1146 | /* Compact all nodes in the system */ | 1146 | /* Compact all nodes in the system */ |
| 1147 | static int compact_nodes(void) | 1147 | static void compact_nodes(void) |
| 1148 | { | 1148 | { |
| 1149 | int nid; | 1149 | int nid; |
| 1150 | 1150 | ||
| @@ -1153,8 +1153,6 @@ static int compact_nodes(void) | |||
| 1153 | 1153 | ||
| 1154 | for_each_online_node(nid) | 1154 | for_each_online_node(nid) |
| 1155 | compact_node(nid); | 1155 | compact_node(nid); |
| 1156 | |||
| 1157 | return COMPACT_COMPLETE; | ||
| 1158 | } | 1156 | } |
| 1159 | 1157 | ||
| 1160 | /* The written value is actually unused, all memory is compacted */ | 1158 | /* The written value is actually unused, all memory is compacted */ |
| @@ -1165,7 +1163,7 @@ int sysctl_compaction_handler(struct ctl_table *table, int write, | |||
| 1165 | void __user *buffer, size_t *length, loff_t *ppos) | 1163 | void __user *buffer, size_t *length, loff_t *ppos) |
| 1166 | { | 1164 | { |
| 1167 | if (write) | 1165 | if (write) |
| 1168 | return compact_nodes(); | 1166 | compact_nodes(); |
| 1169 | 1167 | ||
| 1170 | return 0; | 1168 | return 0; |
| 1171 | } | 1169 | } |
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 9e894edc7811..6001ee6347a9 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
| @@ -1819,9 +1819,19 @@ int split_huge_page(struct page *page) | |||
| 1819 | 1819 | ||
| 1820 | BUG_ON(is_huge_zero_pfn(page_to_pfn(page))); | 1820 | BUG_ON(is_huge_zero_pfn(page_to_pfn(page))); |
| 1821 | BUG_ON(!PageAnon(page)); | 1821 | BUG_ON(!PageAnon(page)); |
| 1822 | anon_vma = page_lock_anon_vma_read(page); | 1822 | |
| 1823 | /* | ||
| 1824 | * The caller does not necessarily hold an mmap_sem that would prevent | ||
| 1825 | * the anon_vma disappearing so we first we take a reference to it | ||
| 1826 | * and then lock the anon_vma for write. This is similar to | ||
| 1827 | * page_lock_anon_vma_read except the write lock is taken to serialise | ||
| 1828 | * against parallel split or collapse operations. | ||
| 1829 | */ | ||
| 1830 | anon_vma = page_get_anon_vma(page); | ||
| 1823 | if (!anon_vma) | 1831 | if (!anon_vma) |
| 1824 | goto out; | 1832 | goto out; |
| 1833 | anon_vma_lock_write(anon_vma); | ||
| 1834 | |||
| 1825 | ret = 0; | 1835 | ret = 0; |
| 1826 | if (!PageCompound(page)) | 1836 | if (!PageCompound(page)) |
| 1827 | goto out_unlock; | 1837 | goto out_unlock; |
| @@ -1832,7 +1842,8 @@ int split_huge_page(struct page *page) | |||
| 1832 | 1842 | ||
| 1833 | BUG_ON(PageCompound(page)); | 1843 | BUG_ON(PageCompound(page)); |
| 1834 | out_unlock: | 1844 | out_unlock: |
| 1835 | page_unlock_anon_vma_read(anon_vma); | 1845 | anon_vma_unlock(anon_vma); |
| 1846 | put_anon_vma(anon_vma); | ||
| 1836 | out: | 1847 | out: |
| 1837 | return ret; | 1848 | return ret; |
| 1838 | } | 1849 | } |
diff --git a/mm/memblock.c b/mm/memblock.c index 625905523c2a..88adc8afb610 100644 --- a/mm/memblock.c +++ b/mm/memblock.c | |||
| @@ -314,7 +314,8 @@ static void __init_memblock memblock_merge_regions(struct memblock_type *type) | |||
| 314 | } | 314 | } |
| 315 | 315 | ||
| 316 | this->size += next->size; | 316 | this->size += next->size; |
| 317 | memmove(next, next + 1, (type->cnt - (i + 1)) * sizeof(*next)); | 317 | /* move forward from next + 1, index of which is i + 2 */ |
| 318 | memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next)); | ||
| 318 | type->cnt--; | 319 | type->cnt--; |
| 319 | } | 320 | } |
| 320 | } | 321 | } |
diff --git a/mm/migrate.c b/mm/migrate.c index 3b676b0c5c3e..c38778610aa8 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
| @@ -1679,9 +1679,21 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm, | |||
| 1679 | page_xchg_last_nid(new_page, page_last_nid(page)); | 1679 | page_xchg_last_nid(new_page, page_last_nid(page)); |
| 1680 | 1680 | ||
| 1681 | isolated = numamigrate_isolate_page(pgdat, page); | 1681 | isolated = numamigrate_isolate_page(pgdat, page); |
| 1682 | if (!isolated) { | 1682 | |
| 1683 | /* | ||
| 1684 | * Failing to isolate or a GUP pin prevents migration. The expected | ||
| 1685 | * page count is 2. 1 for anonymous pages without a mapping and 1 | ||
| 1686 | * for the callers pin. If the page was isolated, the page will | ||
| 1687 | * need to be put back on the LRU. | ||
| 1688 | */ | ||
| 1689 | if (!isolated || page_count(page) != 2) { | ||
| 1683 | count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR); | 1690 | count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR); |
| 1684 | put_page(new_page); | 1691 | put_page(new_page); |
| 1692 | if (isolated) { | ||
| 1693 | putback_lru_page(page); | ||
| 1694 | isolated = 0; | ||
| 1695 | goto out; | ||
| 1696 | } | ||
| 1685 | goto out_keep_locked; | 1697 | goto out_keep_locked; |
| 1686 | } | 1698 | } |
| 1687 | 1699 | ||
| @@ -2886,7 +2886,7 @@ static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma) | |||
| 2886 | * The LSB of head.next can't change from under us | 2886 | * The LSB of head.next can't change from under us |
| 2887 | * because we hold the mm_all_locks_mutex. | 2887 | * because we hold the mm_all_locks_mutex. |
| 2888 | */ | 2888 | */ |
| 2889 | down_write(&anon_vma->root->rwsem); | 2889 | down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_sem); |
| 2890 | /* | 2890 | /* |
| 2891 | * We can safely modify head.next after taking the | 2891 | * We can safely modify head.next after taking the |
| 2892 | * anon_vma->root->rwsem. If some other vma in this mm shares | 2892 | * anon_vma->root->rwsem. If some other vma in this mm shares |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index ece7b8e8a5b6..df2022ff0c8a 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
| @@ -5585,7 +5585,7 @@ static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn) | |||
| 5585 | pfn &= (PAGES_PER_SECTION-1); | 5585 | pfn &= (PAGES_PER_SECTION-1); |
| 5586 | return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; | 5586 | return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; |
| 5587 | #else | 5587 | #else |
| 5588 | pfn = pfn - zone->zone_start_pfn; | 5588 | pfn = pfn - round_down(zone->zone_start_pfn, pageblock_nr_pages); |
| 5589 | return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; | 5589 | return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; |
| 5590 | #endif /* CONFIG_SPARSEMEM */ | 5590 | #endif /* CONFIG_SPARSEMEM */ |
| 5591 | } | 5591 | } |
