aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/events/core.c1
-rw-r--r--kernel/events/uprobes.c4
-rw-r--r--kernel/extable.c2
-rw-r--r--kernel/fork.c33
-rw-r--r--kernel/kexec.c5
-rw-r--r--kernel/kprobes.c95
-rw-r--r--kernel/modsign_pubkey.c6
-rw-r--r--kernel/panic.c8
-rw-r--r--kernel/power/hibernate.c45
-rw-r--r--kernel/power/snapshot.c12
-rw-r--r--kernel/power/user.c24
-rw-r--r--kernel/ptrace.c2
-rw-r--r--kernel/sched/fair.c2
-rw-r--r--kernel/signal.c4
-rw-r--r--kernel/smp.c16
-rw-r--r--kernel/spinlock.c14
-rw-r--r--kernel/sysctl.c2
-rw-r--r--kernel/task_work.c40
-rw-r--r--kernel/up.c58
19 files changed, 230 insertions, 143 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 2207efc941d1..dd236b66ca3a 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -5039,6 +5039,7 @@ static void perf_event_mmap_output(struct perf_event *event,
5039 mmap_event->event_id.header.size += sizeof(mmap_event->maj); 5039 mmap_event->event_id.header.size += sizeof(mmap_event->maj);
5040 mmap_event->event_id.header.size += sizeof(mmap_event->min); 5040 mmap_event->event_id.header.size += sizeof(mmap_event->min);
5041 mmap_event->event_id.header.size += sizeof(mmap_event->ino); 5041 mmap_event->event_id.header.size += sizeof(mmap_event->ino);
5042 mmap_event->event_id.header.size += sizeof(mmap_event->ino_generation);
5042 } 5043 }
5043 5044
5044 perf_event_header__init_id(&mmap_event->event_id.header, &sample, event); 5045 perf_event_header__init_id(&mmap_event->event_id.header, &sample, event);
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index f3569747d629..ad8e1bdca70e 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -1682,12 +1682,10 @@ static bool handle_trampoline(struct pt_regs *regs)
1682 tmp = ri; 1682 tmp = ri;
1683 ri = ri->next; 1683 ri = ri->next;
1684 kfree(tmp); 1684 kfree(tmp);
1685 utask->depth--;
1685 1686
1686 if (!chained) 1687 if (!chained)
1687 break; 1688 break;
1688
1689 utask->depth--;
1690
1691 BUG_ON(!ri); 1689 BUG_ON(!ri);
1692 } 1690 }
1693 1691
diff --git a/kernel/extable.c b/kernel/extable.c
index 67460b93b1a1..832cb28105bb 100644
--- a/kernel/extable.c
+++ b/kernel/extable.c
@@ -41,7 +41,7 @@ u32 __initdata main_extable_sort_needed = 1;
41/* Sort the kernel's built-in exception table */ 41/* Sort the kernel's built-in exception table */
42void __init sort_main_extable(void) 42void __init sort_main_extable(void)
43{ 43{
44 if (main_extable_sort_needed) { 44 if (main_extable_sort_needed && __stop___ex_table > __start___ex_table) {
45 pr_notice("Sorting __ex_table...\n"); 45 pr_notice("Sorting __ex_table...\n");
46 sort_extable(__start___ex_table, __stop___ex_table); 46 sort_extable(__start___ex_table, __stop___ex_table);
47 } 47 }
diff --git a/kernel/fork.c b/kernel/fork.c
index c9eaf2013002..81ccb4f010c2 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -351,7 +351,6 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
351 struct rb_node **rb_link, *rb_parent; 351 struct rb_node **rb_link, *rb_parent;
352 int retval; 352 int retval;
353 unsigned long charge; 353 unsigned long charge;
354 struct mempolicy *pol;
355 354
356 uprobe_start_dup_mmap(); 355 uprobe_start_dup_mmap();
357 down_write(&oldmm->mmap_sem); 356 down_write(&oldmm->mmap_sem);
@@ -400,11 +399,9 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
400 goto fail_nomem; 399 goto fail_nomem;
401 *tmp = *mpnt; 400 *tmp = *mpnt;
402 INIT_LIST_HEAD(&tmp->anon_vma_chain); 401 INIT_LIST_HEAD(&tmp->anon_vma_chain);
403 pol = mpol_dup(vma_policy(mpnt)); 402 retval = vma_dup_policy(mpnt, tmp);
404 retval = PTR_ERR(pol); 403 if (retval)
405 if (IS_ERR(pol))
406 goto fail_nomem_policy; 404 goto fail_nomem_policy;
407 vma_set_policy(tmp, pol);
408 tmp->vm_mm = mm; 405 tmp->vm_mm = mm;
409 if (anon_vma_fork(tmp, mpnt)) 406 if (anon_vma_fork(tmp, mpnt))
410 goto fail_nomem_anon_vma_fork; 407 goto fail_nomem_anon_vma_fork;
@@ -472,7 +469,7 @@ out:
472 uprobe_end_dup_mmap(); 469 uprobe_end_dup_mmap();
473 return retval; 470 return retval;
474fail_nomem_anon_vma_fork: 471fail_nomem_anon_vma_fork:
475 mpol_put(pol); 472 mpol_put(vma_policy(tmp));
476fail_nomem_policy: 473fail_nomem_policy:
477 kmem_cache_free(vm_area_cachep, tmp); 474 kmem_cache_free(vm_area_cachep, tmp);
478fail_nomem: 475fail_nomem:
@@ -1173,13 +1170,16 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1173 return ERR_PTR(-EINVAL); 1170 return ERR_PTR(-EINVAL);
1174 1171
1175 /* 1172 /*
1176 * If the new process will be in a different pid namespace 1173 * If the new process will be in a different pid or user namespace
1177 * don't allow the creation of threads. 1174 * do not allow it to share a thread group or signal handlers or
1175 * parent with the forking task.
1178 */ 1176 */
1179 if ((clone_flags & (CLONE_VM|CLONE_NEWPID)) && 1177 if (clone_flags & (CLONE_SIGHAND | CLONE_PARENT)) {
1180 (task_active_pid_ns(current) != 1178 if ((clone_flags & (CLONE_NEWUSER | CLONE_NEWPID)) ||
1181 current->nsproxy->pid_ns_for_children)) 1179 (task_active_pid_ns(current) !=
1182 return ERR_PTR(-EINVAL); 1180 current->nsproxy->pid_ns_for_children))
1181 return ERR_PTR(-EINVAL);
1182 }
1183 1183
1184 retval = security_task_create(clone_flags); 1184 retval = security_task_create(clone_flags);
1185 if (retval) 1185 if (retval)
@@ -1576,15 +1576,6 @@ long do_fork(unsigned long clone_flags,
1576 long nr; 1576 long nr;
1577 1577
1578 /* 1578 /*
1579 * Do some preliminary argument and permissions checking before we
1580 * actually start allocating stuff
1581 */
1582 if (clone_flags & (CLONE_NEWUSER | CLONE_NEWPID)) {
1583 if (clone_flags & (CLONE_THREAD|CLONE_PARENT))
1584 return -EINVAL;
1585 }
1586
1587 /*
1588 * Determine whether and which event to report to ptracer. When 1579 * Determine whether and which event to report to ptracer. When
1589 * called from kernel_thread or CLONE_UNTRACED is explicitly 1580 * called from kernel_thread or CLONE_UNTRACED is explicitly
1590 * requested, no event is reported; otherwise, report if the event 1581 * requested, no event is reported; otherwise, report if the event
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 59f7b55ba745..2a74f307c5ec 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -1474,11 +1474,8 @@ static int __init __parse_crashkernel(char *cmdline,
1474 if (first_colon && (!first_space || first_colon < first_space)) 1474 if (first_colon && (!first_space || first_colon < first_space))
1475 return parse_crashkernel_mem(ck_cmdline, system_ram, 1475 return parse_crashkernel_mem(ck_cmdline, system_ram,
1476 crash_size, crash_base); 1476 crash_size, crash_base);
1477 else
1478 return parse_crashkernel_simple(ck_cmdline, crash_size,
1479 crash_base);
1480 1477
1481 return 0; 1478 return parse_crashkernel_simple(ck_cmdline, crash_size, crash_base);
1482} 1479}
1483 1480
1484/* 1481/*
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 6e33498d665c..a0d367a49122 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -112,6 +112,7 @@ static struct kprobe_blackpoint kprobe_blacklist[] = {
112struct kprobe_insn_page { 112struct kprobe_insn_page {
113 struct list_head list; 113 struct list_head list;
114 kprobe_opcode_t *insns; /* Page of instruction slots */ 114 kprobe_opcode_t *insns; /* Page of instruction slots */
115 struct kprobe_insn_cache *cache;
115 int nused; 116 int nused;
116 int ngarbage; 117 int ngarbage;
117 char slot_used[]; 118 char slot_used[];
@@ -121,12 +122,6 @@ struct kprobe_insn_page {
121 (offsetof(struct kprobe_insn_page, slot_used) + \ 122 (offsetof(struct kprobe_insn_page, slot_used) + \
122 (sizeof(char) * (slots))) 123 (sizeof(char) * (slots)))
123 124
124struct kprobe_insn_cache {
125 struct list_head pages; /* list of kprobe_insn_page */
126 size_t insn_size; /* size of instruction slot */
127 int nr_garbage;
128};
129
130static int slots_per_page(struct kprobe_insn_cache *c) 125static int slots_per_page(struct kprobe_insn_cache *c)
131{ 126{
132 return PAGE_SIZE/(c->insn_size * sizeof(kprobe_opcode_t)); 127 return PAGE_SIZE/(c->insn_size * sizeof(kprobe_opcode_t));
@@ -138,8 +133,20 @@ enum kprobe_slot_state {
138 SLOT_USED = 2, 133 SLOT_USED = 2,
139}; 134};
140 135
141static DEFINE_MUTEX(kprobe_insn_mutex); /* Protects kprobe_insn_slots */ 136static void *alloc_insn_page(void)
142static struct kprobe_insn_cache kprobe_insn_slots = { 137{
138 return module_alloc(PAGE_SIZE);
139}
140
141static void free_insn_page(void *page)
142{
143 module_free(NULL, page);
144}
145
146struct kprobe_insn_cache kprobe_insn_slots = {
147 .mutex = __MUTEX_INITIALIZER(kprobe_insn_slots.mutex),
148 .alloc = alloc_insn_page,
149 .free = free_insn_page,
143 .pages = LIST_HEAD_INIT(kprobe_insn_slots.pages), 150 .pages = LIST_HEAD_INIT(kprobe_insn_slots.pages),
144 .insn_size = MAX_INSN_SIZE, 151 .insn_size = MAX_INSN_SIZE,
145 .nr_garbage = 0, 152 .nr_garbage = 0,
@@ -150,10 +157,12 @@ static int __kprobes collect_garbage_slots(struct kprobe_insn_cache *c);
150 * __get_insn_slot() - Find a slot on an executable page for an instruction. 157 * __get_insn_slot() - Find a slot on an executable page for an instruction.
151 * We allocate an executable page if there's no room on existing ones. 158 * We allocate an executable page if there's no room on existing ones.
152 */ 159 */
153static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c) 160kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
154{ 161{
155 struct kprobe_insn_page *kip; 162 struct kprobe_insn_page *kip;
163 kprobe_opcode_t *slot = NULL;
156 164
165 mutex_lock(&c->mutex);
157 retry: 166 retry:
158 list_for_each_entry(kip, &c->pages, list) { 167 list_for_each_entry(kip, &c->pages, list) {
159 if (kip->nused < slots_per_page(c)) { 168 if (kip->nused < slots_per_page(c)) {
@@ -162,7 +171,8 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
162 if (kip->slot_used[i] == SLOT_CLEAN) { 171 if (kip->slot_used[i] == SLOT_CLEAN) {
163 kip->slot_used[i] = SLOT_USED; 172 kip->slot_used[i] = SLOT_USED;
164 kip->nused++; 173 kip->nused++;
165 return kip->insns + (i * c->insn_size); 174 slot = kip->insns + (i * c->insn_size);
175 goto out;
166 } 176 }
167 } 177 }
168 /* kip->nused is broken. Fix it. */ 178 /* kip->nused is broken. Fix it. */
@@ -178,37 +188,29 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
178 /* All out of space. Need to allocate a new page. */ 188 /* All out of space. Need to allocate a new page. */
179 kip = kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c)), GFP_KERNEL); 189 kip = kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c)), GFP_KERNEL);
180 if (!kip) 190 if (!kip)
181 return NULL; 191 goto out;
182 192
183 /* 193 /*
184 * Use module_alloc so this page is within +/- 2GB of where the 194 * Use module_alloc so this page is within +/- 2GB of where the
185 * kernel image and loaded module images reside. This is required 195 * kernel image and loaded module images reside. This is required
186 * so x86_64 can correctly handle the %rip-relative fixups. 196 * so x86_64 can correctly handle the %rip-relative fixups.
187 */ 197 */
188 kip->insns = module_alloc(PAGE_SIZE); 198 kip->insns = c->alloc();
189 if (!kip->insns) { 199 if (!kip->insns) {
190 kfree(kip); 200 kfree(kip);
191 return NULL; 201 goto out;
192 } 202 }
193 INIT_LIST_HEAD(&kip->list); 203 INIT_LIST_HEAD(&kip->list);
194 memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c)); 204 memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c));
195 kip->slot_used[0] = SLOT_USED; 205 kip->slot_used[0] = SLOT_USED;
196 kip->nused = 1; 206 kip->nused = 1;
197 kip->ngarbage = 0; 207 kip->ngarbage = 0;
208 kip->cache = c;
198 list_add(&kip->list, &c->pages); 209 list_add(&kip->list, &c->pages);
199 return kip->insns; 210 slot = kip->insns;
200} 211out:
201 212 mutex_unlock(&c->mutex);
202 213 return slot;
203kprobe_opcode_t __kprobes *get_insn_slot(void)
204{
205 kprobe_opcode_t *ret = NULL;
206
207 mutex_lock(&kprobe_insn_mutex);
208 ret = __get_insn_slot(&kprobe_insn_slots);
209 mutex_unlock(&kprobe_insn_mutex);
210
211 return ret;
212} 214}
213 215
214/* Return 1 if all garbages are collected, otherwise 0. */ 216/* Return 1 if all garbages are collected, otherwise 0. */
@@ -225,7 +227,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
225 */ 227 */
226 if (!list_is_singular(&kip->list)) { 228 if (!list_is_singular(&kip->list)) {
227 list_del(&kip->list); 229 list_del(&kip->list);
228 module_free(NULL, kip->insns); 230 kip->cache->free(kip->insns);
229 kfree(kip); 231 kfree(kip);
230 } 232 }
231 return 1; 233 return 1;
@@ -255,11 +257,12 @@ static int __kprobes collect_garbage_slots(struct kprobe_insn_cache *c)
255 return 0; 257 return 0;
256} 258}
257 259
258static void __kprobes __free_insn_slot(struct kprobe_insn_cache *c, 260void __kprobes __free_insn_slot(struct kprobe_insn_cache *c,
259 kprobe_opcode_t *slot, int dirty) 261 kprobe_opcode_t *slot, int dirty)
260{ 262{
261 struct kprobe_insn_page *kip; 263 struct kprobe_insn_page *kip;
262 264
265 mutex_lock(&c->mutex);
263 list_for_each_entry(kip, &c->pages, list) { 266 list_for_each_entry(kip, &c->pages, list) {
264 long idx = ((long)slot - (long)kip->insns) / 267 long idx = ((long)slot - (long)kip->insns) /
265 (c->insn_size * sizeof(kprobe_opcode_t)); 268 (c->insn_size * sizeof(kprobe_opcode_t));
@@ -272,45 +275,25 @@ static void __kprobes __free_insn_slot(struct kprobe_insn_cache *c,
272 collect_garbage_slots(c); 275 collect_garbage_slots(c);
273 } else 276 } else
274 collect_one_slot(kip, idx); 277 collect_one_slot(kip, idx);
275 return; 278 goto out;
276 } 279 }
277 } 280 }
278 /* Could not free this slot. */ 281 /* Could not free this slot. */
279 WARN_ON(1); 282 WARN_ON(1);
283out:
284 mutex_unlock(&c->mutex);
280} 285}
281 286
282void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
283{
284 mutex_lock(&kprobe_insn_mutex);
285 __free_insn_slot(&kprobe_insn_slots, slot, dirty);
286 mutex_unlock(&kprobe_insn_mutex);
287}
288#ifdef CONFIG_OPTPROBES 287#ifdef CONFIG_OPTPROBES
289/* For optimized_kprobe buffer */ 288/* For optimized_kprobe buffer */
290static DEFINE_MUTEX(kprobe_optinsn_mutex); /* Protects kprobe_optinsn_slots */ 289struct kprobe_insn_cache kprobe_optinsn_slots = {
291static struct kprobe_insn_cache kprobe_optinsn_slots = { 290 .mutex = __MUTEX_INITIALIZER(kprobe_optinsn_slots.mutex),
291 .alloc = alloc_insn_page,
292 .free = free_insn_page,
292 .pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages), 293 .pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages),
293 /* .insn_size is initialized later */ 294 /* .insn_size is initialized later */
294 .nr_garbage = 0, 295 .nr_garbage = 0,
295}; 296};
296/* Get a slot for optimized_kprobe buffer */
297kprobe_opcode_t __kprobes *get_optinsn_slot(void)
298{
299 kprobe_opcode_t *ret = NULL;
300
301 mutex_lock(&kprobe_optinsn_mutex);
302 ret = __get_insn_slot(&kprobe_optinsn_slots);
303 mutex_unlock(&kprobe_optinsn_mutex);
304
305 return ret;
306}
307
308void __kprobes free_optinsn_slot(kprobe_opcode_t * slot, int dirty)
309{
310 mutex_lock(&kprobe_optinsn_mutex);
311 __free_insn_slot(&kprobe_optinsn_slots, slot, dirty);
312 mutex_unlock(&kprobe_optinsn_mutex);
313}
314#endif 297#endif
315#endif 298#endif
316 299
diff --git a/kernel/modsign_pubkey.c b/kernel/modsign_pubkey.c
index 2b6e69909c39..7cbd4507a7e6 100644
--- a/kernel/modsign_pubkey.c
+++ b/kernel/modsign_pubkey.c
@@ -18,14 +18,14 @@
18 18
19struct key *modsign_keyring; 19struct key *modsign_keyring;
20 20
21extern __initdata const u8 modsign_certificate_list[]; 21extern __initconst const u8 modsign_certificate_list[];
22extern __initdata const u8 modsign_certificate_list_end[]; 22extern __initconst const u8 modsign_certificate_list_end[];
23 23
24/* 24/*
25 * We need to make sure ccache doesn't cache the .o file as it doesn't notice 25 * We need to make sure ccache doesn't cache the .o file as it doesn't notice
26 * if modsign.pub changes. 26 * if modsign.pub changes.
27 */ 27 */
28static __initdata const char annoy_ccache[] = __TIME__ "foo"; 28static __initconst const char annoy_ccache[] = __TIME__ "foo";
29 29
30/* 30/*
31 * Load the compiled-in keys 31 * Load the compiled-in keys
diff --git a/kernel/panic.c b/kernel/panic.c
index 801864600514..b6c482ccc5db 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -123,10 +123,14 @@ void panic(const char *fmt, ...)
123 */ 123 */
124 smp_send_stop(); 124 smp_send_stop();
125 125
126 kmsg_dump(KMSG_DUMP_PANIC); 126 /*
127 127 * Run any panic handlers, including those that might need to
128 * add information to the kmsg dump output.
129 */
128 atomic_notifier_call_chain(&panic_notifier_list, 0, buf); 130 atomic_notifier_call_chain(&panic_notifier_list, 0, buf);
129 131
132 kmsg_dump(KMSG_DUMP_PANIC);
133
130 bust_spinlocks(0); 134 bust_spinlocks(0);
131 135
132 if (!panic_blink) 136 if (!panic_blink)
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index 3085e62a80a5..c9c759d5a15c 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -644,22 +644,23 @@ int hibernate(void)
644 if (error) 644 if (error)
645 goto Exit; 645 goto Exit;
646 646
647 /* Allocate memory management structures */
648 error = create_basic_memory_bitmaps();
649 if (error)
650 goto Exit;
651
652 printk(KERN_INFO "PM: Syncing filesystems ... "); 647 printk(KERN_INFO "PM: Syncing filesystems ... ");
653 sys_sync(); 648 sys_sync();
654 printk("done.\n"); 649 printk("done.\n");
655 650
656 error = freeze_processes(); 651 error = freeze_processes();
657 if (error) 652 if (error)
658 goto Free_bitmaps; 653 goto Exit;
654
655 lock_device_hotplug();
656 /* Allocate memory management structures */
657 error = create_basic_memory_bitmaps();
658 if (error)
659 goto Thaw;
659 660
660 error = hibernation_snapshot(hibernation_mode == HIBERNATION_PLATFORM); 661 error = hibernation_snapshot(hibernation_mode == HIBERNATION_PLATFORM);
661 if (error || freezer_test_done) 662 if (error || freezer_test_done)
662 goto Thaw; 663 goto Free_bitmaps;
663 664
664 if (in_suspend) { 665 if (in_suspend) {
665 unsigned int flags = 0; 666 unsigned int flags = 0;
@@ -682,14 +683,14 @@ int hibernate(void)
682 pr_debug("PM: Image restored successfully.\n"); 683 pr_debug("PM: Image restored successfully.\n");
683 } 684 }
684 685
686 Free_bitmaps:
687 free_basic_memory_bitmaps();
685 Thaw: 688 Thaw:
689 unlock_device_hotplug();
686 thaw_processes(); 690 thaw_processes();
687 691
688 /* Don't bother checking whether freezer_test_done is true */ 692 /* Don't bother checking whether freezer_test_done is true */
689 freezer_test_done = false; 693 freezer_test_done = false;
690
691 Free_bitmaps:
692 free_basic_memory_bitmaps();
693 Exit: 694 Exit:
694 pm_notifier_call_chain(PM_POST_HIBERNATION); 695 pm_notifier_call_chain(PM_POST_HIBERNATION);
695 pm_restore_console(); 696 pm_restore_console();
@@ -806,21 +807,20 @@ static int software_resume(void)
806 pm_prepare_console(); 807 pm_prepare_console();
807 error = pm_notifier_call_chain(PM_RESTORE_PREPARE); 808 error = pm_notifier_call_chain(PM_RESTORE_PREPARE);
808 if (error) 809 if (error)
809 goto close_finish; 810 goto Close_Finish;
810
811 error = create_basic_memory_bitmaps();
812 if (error)
813 goto close_finish;
814 811
815 pr_debug("PM: Preparing processes for restore.\n"); 812 pr_debug("PM: Preparing processes for restore.\n");
816 error = freeze_processes(); 813 error = freeze_processes();
817 if (error) { 814 if (error)
818 swsusp_close(FMODE_READ); 815 goto Close_Finish;
819 goto Done;
820 }
821 816
822 pr_debug("PM: Loading hibernation image.\n"); 817 pr_debug("PM: Loading hibernation image.\n");
823 818
819 lock_device_hotplug();
820 error = create_basic_memory_bitmaps();
821 if (error)
822 goto Thaw;
823
824 error = swsusp_read(&flags); 824 error = swsusp_read(&flags);
825 swsusp_close(FMODE_READ); 825 swsusp_close(FMODE_READ);
826 if (!error) 826 if (!error)
@@ -828,9 +828,10 @@ static int software_resume(void)
828 828
829 printk(KERN_ERR "PM: Failed to load hibernation image, recovering.\n"); 829 printk(KERN_ERR "PM: Failed to load hibernation image, recovering.\n");
830 swsusp_free(); 830 swsusp_free();
831 thaw_processes();
832 Done:
833 free_basic_memory_bitmaps(); 831 free_basic_memory_bitmaps();
832 Thaw:
833 unlock_device_hotplug();
834 thaw_processes();
834 Finish: 835 Finish:
835 pm_notifier_call_chain(PM_POST_RESTORE); 836 pm_notifier_call_chain(PM_POST_RESTORE);
836 pm_restore_console(); 837 pm_restore_console();
@@ -840,7 +841,7 @@ static int software_resume(void)
840 mutex_unlock(&pm_mutex); 841 mutex_unlock(&pm_mutex);
841 pr_debug("PM: Hibernation image not present or could not be loaded.\n"); 842 pr_debug("PM: Hibernation image not present or could not be loaded.\n");
842 return error; 843 return error;
843close_finish: 844 Close_Finish:
844 swsusp_close(FMODE_READ); 845 swsusp_close(FMODE_READ);
845 goto Finish; 846 goto Finish;
846} 847}
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 349587bb03e1..358a146fd4da 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -352,7 +352,7 @@ static int create_mem_extents(struct list_head *list, gfp_t gfp_mask)
352 struct mem_extent *ext, *cur, *aux; 352 struct mem_extent *ext, *cur, *aux;
353 353
354 zone_start = zone->zone_start_pfn; 354 zone_start = zone->zone_start_pfn;
355 zone_end = zone->zone_start_pfn + zone->spanned_pages; 355 zone_end = zone_end_pfn(zone);
356 356
357 list_for_each_entry(ext, list, hook) 357 list_for_each_entry(ext, list, hook)
358 if (zone_start <= ext->end) 358 if (zone_start <= ext->end)
@@ -884,7 +884,7 @@ static unsigned int count_highmem_pages(void)
884 continue; 884 continue;
885 885
886 mark_free_pages(zone); 886 mark_free_pages(zone);
887 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages; 887 max_zone_pfn = zone_end_pfn(zone);
888 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) 888 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
889 if (saveable_highmem_page(zone, pfn)) 889 if (saveable_highmem_page(zone, pfn))
890 n++; 890 n++;
@@ -948,7 +948,7 @@ static unsigned int count_data_pages(void)
948 continue; 948 continue;
949 949
950 mark_free_pages(zone); 950 mark_free_pages(zone);
951 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages; 951 max_zone_pfn = zone_end_pfn(zone);
952 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) 952 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
953 if (saveable_page(zone, pfn)) 953 if (saveable_page(zone, pfn))
954 n++; 954 n++;
@@ -1041,7 +1041,7 @@ copy_data_pages(struct memory_bitmap *copy_bm, struct memory_bitmap *orig_bm)
1041 unsigned long max_zone_pfn; 1041 unsigned long max_zone_pfn;
1042 1042
1043 mark_free_pages(zone); 1043 mark_free_pages(zone);
1044 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages; 1044 max_zone_pfn = zone_end_pfn(zone);
1045 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) 1045 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1046 if (page_is_saveable(zone, pfn)) 1046 if (page_is_saveable(zone, pfn))
1047 memory_bm_set_bit(orig_bm, pfn); 1047 memory_bm_set_bit(orig_bm, pfn);
@@ -1093,7 +1093,7 @@ void swsusp_free(void)
1093 unsigned long pfn, max_zone_pfn; 1093 unsigned long pfn, max_zone_pfn;
1094 1094
1095 for_each_populated_zone(zone) { 1095 for_each_populated_zone(zone) {
1096 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages; 1096 max_zone_pfn = zone_end_pfn(zone);
1097 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) 1097 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1098 if (pfn_valid(pfn)) { 1098 if (pfn_valid(pfn)) {
1099 struct page *page = pfn_to_page(pfn); 1099 struct page *page = pfn_to_page(pfn);
@@ -1755,7 +1755,7 @@ static int mark_unsafe_pages(struct memory_bitmap *bm)
1755 1755
1756 /* Clear page flags */ 1756 /* Clear page flags */
1757 for_each_populated_zone(zone) { 1757 for_each_populated_zone(zone) {
1758 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages; 1758 max_zone_pfn = zone_end_pfn(zone);
1759 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) 1759 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1760 if (pfn_valid(pfn)) 1760 if (pfn_valid(pfn))
1761 swsusp_unset_page_free(pfn_to_page(pfn)); 1761 swsusp_unset_page_free(pfn_to_page(pfn));
diff --git a/kernel/power/user.c b/kernel/power/user.c
index 4ed81e74f86f..72e8f4fd616d 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -60,11 +60,6 @@ static int snapshot_open(struct inode *inode, struct file *filp)
60 error = -ENOSYS; 60 error = -ENOSYS;
61 goto Unlock; 61 goto Unlock;
62 } 62 }
63 if(create_basic_memory_bitmaps()) {
64 atomic_inc(&snapshot_device_available);
65 error = -ENOMEM;
66 goto Unlock;
67 }
68 nonseekable_open(inode, filp); 63 nonseekable_open(inode, filp);
69 data = &snapshot_state; 64 data = &snapshot_state;
70 filp->private_data = data; 65 filp->private_data = data;
@@ -90,10 +85,9 @@ static int snapshot_open(struct inode *inode, struct file *filp)
90 if (error) 85 if (error)
91 pm_notifier_call_chain(PM_POST_RESTORE); 86 pm_notifier_call_chain(PM_POST_RESTORE);
92 } 87 }
93 if (error) { 88 if (error)
94 free_basic_memory_bitmaps();
95 atomic_inc(&snapshot_device_available); 89 atomic_inc(&snapshot_device_available);
96 } 90
97 data->frozen = 0; 91 data->frozen = 0;
98 data->ready = 0; 92 data->ready = 0;
99 data->platform_support = 0; 93 data->platform_support = 0;
@@ -111,11 +105,11 @@ static int snapshot_release(struct inode *inode, struct file *filp)
111 lock_system_sleep(); 105 lock_system_sleep();
112 106
113 swsusp_free(); 107 swsusp_free();
114 free_basic_memory_bitmaps();
115 data = filp->private_data; 108 data = filp->private_data;
116 free_all_swap_pages(data->swap); 109 free_all_swap_pages(data->swap);
117 if (data->frozen) { 110 if (data->frozen) {
118 pm_restore_gfp_mask(); 111 pm_restore_gfp_mask();
112 free_basic_memory_bitmaps();
119 thaw_processes(); 113 thaw_processes();
120 } 114 }
121 pm_notifier_call_chain(data->mode == O_RDONLY ? 115 pm_notifier_call_chain(data->mode == O_RDONLY ?
@@ -207,6 +201,7 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
207 if (!mutex_trylock(&pm_mutex)) 201 if (!mutex_trylock(&pm_mutex))
208 return -EBUSY; 202 return -EBUSY;
209 203
204 lock_device_hotplug();
210 data = filp->private_data; 205 data = filp->private_data;
211 206
212 switch (cmd) { 207 switch (cmd) {
@@ -220,14 +215,22 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
220 printk("done.\n"); 215 printk("done.\n");
221 216
222 error = freeze_processes(); 217 error = freeze_processes();
223 if (!error) 218 if (error)
219 break;
220
221 error = create_basic_memory_bitmaps();
222 if (error)
223 thaw_processes();
224 else
224 data->frozen = 1; 225 data->frozen = 1;
226
225 break; 227 break;
226 228
227 case SNAPSHOT_UNFREEZE: 229 case SNAPSHOT_UNFREEZE:
228 if (!data->frozen || data->ready) 230 if (!data->frozen || data->ready)
229 break; 231 break;
230 pm_restore_gfp_mask(); 232 pm_restore_gfp_mask();
233 free_basic_memory_bitmaps();
231 thaw_processes(); 234 thaw_processes();
232 data->frozen = 0; 235 data->frozen = 0;
233 break; 236 break;
@@ -371,6 +374,7 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
371 374
372 } 375 }
373 376
377 unlock_device_hotplug();
374 mutex_unlock(&pm_mutex); 378 mutex_unlock(&pm_mutex);
375 379
376 return error; 380 return error;
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index a146ee327f6a..dd562e9aa2c8 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -236,7 +236,7 @@ static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
236 */ 236 */
237 int dumpable = 0; 237 int dumpable = 0;
238 /* Don't let security modules deny introspection */ 238 /* Don't let security modules deny introspection */
239 if (task == current) 239 if (same_thread_group(task, current))
240 return 0; 240 return 0;
241 rcu_read_lock(); 241 rcu_read_lock();
242 tcred = __task_cred(task); 242 tcred = __task_cred(task);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 7f0a5e6cdae0..9b3fe1cd8f40 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5151,7 +5151,7 @@ static int should_we_balance(struct lb_env *env)
5151 * First idle cpu or the first cpu(busiest) in this sched group 5151 * First idle cpu or the first cpu(busiest) in this sched group
5152 * is eligible for doing load balancing at this and above domains. 5152 * is eligible for doing load balancing at this and above domains.
5153 */ 5153 */
5154 return balance_cpu != env->dst_cpu; 5154 return balance_cpu == env->dst_cpu;
5155} 5155}
5156 5156
5157/* 5157/*
diff --git a/kernel/signal.c b/kernel/signal.c
index 50e41075ac77..ded28b91fa53 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -3394,7 +3394,7 @@ COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
3394 new_ka.sa.sa_restorer = compat_ptr(restorer); 3394 new_ka.sa.sa_restorer = compat_ptr(restorer);
3395#endif 3395#endif
3396 ret |= copy_from_user(&mask, &act->sa_mask, sizeof(mask)); 3396 ret |= copy_from_user(&mask, &act->sa_mask, sizeof(mask));
3397 ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags); 3397 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
3398 if (ret) 3398 if (ret)
3399 return -EFAULT; 3399 return -EFAULT;
3400 sigset_from_compat(&new_ka.sa.sa_mask, &mask); 3400 sigset_from_compat(&new_ka.sa.sa_mask, &mask);
@@ -3406,7 +3406,7 @@ COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
3406 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler), 3406 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
3407 &oact->sa_handler); 3407 &oact->sa_handler);
3408 ret |= copy_to_user(&oact->sa_mask, &mask, sizeof(mask)); 3408 ret |= copy_to_user(&oact->sa_mask, &mask, sizeof(mask));
3409 ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags); 3409 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
3410#ifdef __ARCH_HAS_SA_RESTORER 3410#ifdef __ARCH_HAS_SA_RESTORER
3411 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer), 3411 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3412 &oact->sa_restorer); 3412 &oact->sa_restorer);
diff --git a/kernel/smp.c b/kernel/smp.c
index 449b707fc20d..0564571dcdf7 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -48,10 +48,13 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
48 cpu_to_node(cpu))) 48 cpu_to_node(cpu)))
49 return notifier_from_errno(-ENOMEM); 49 return notifier_from_errno(-ENOMEM);
50 if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL, 50 if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL,
51 cpu_to_node(cpu))) 51 cpu_to_node(cpu))) {
52 free_cpumask_var(cfd->cpumask);
52 return notifier_from_errno(-ENOMEM); 53 return notifier_from_errno(-ENOMEM);
54 }
53 cfd->csd = alloc_percpu(struct call_single_data); 55 cfd->csd = alloc_percpu(struct call_single_data);
54 if (!cfd->csd) { 56 if (!cfd->csd) {
57 free_cpumask_var(cfd->cpumask_ipi);
55 free_cpumask_var(cfd->cpumask); 58 free_cpumask_var(cfd->cpumask);
56 return notifier_from_errno(-ENOMEM); 59 return notifier_from_errno(-ENOMEM);
57 } 60 }
@@ -572,8 +575,10 @@ EXPORT_SYMBOL(on_each_cpu);
572 * 575 *
573 * If @wait is true, then returns once @func has returned. 576 * If @wait is true, then returns once @func has returned.
574 * 577 *
575 * You must not call this function with disabled interrupts or 578 * You must not call this function with disabled interrupts or from a
576 * from a hardware interrupt handler or from a bottom half handler. 579 * hardware interrupt handler or from a bottom half handler. The
580 * exception is that it may be used during early boot while
581 * early_boot_irqs_disabled is set.
577 */ 582 */
578void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func, 583void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
579 void *info, bool wait) 584 void *info, bool wait)
@@ -582,9 +587,10 @@ void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
582 587
583 smp_call_function_many(mask, func, info, wait); 588 smp_call_function_many(mask, func, info, wait);
584 if (cpumask_test_cpu(cpu, mask)) { 589 if (cpumask_test_cpu(cpu, mask)) {
585 local_irq_disable(); 590 unsigned long flags;
591 local_irq_save(flags);
586 func(info); 592 func(info);
587 local_irq_enable(); 593 local_irq_restore(flags);
588 } 594 }
589 put_cpu(); 595 put_cpu();
590} 596}
diff --git a/kernel/spinlock.c b/kernel/spinlock.c
index 5cdd8065a3ce..4b082b5cac9e 100644
--- a/kernel/spinlock.c
+++ b/kernel/spinlock.c
@@ -34,6 +34,20 @@
34#else 34#else
35#define raw_read_can_lock(l) read_can_lock(l) 35#define raw_read_can_lock(l) read_can_lock(l)
36#define raw_write_can_lock(l) write_can_lock(l) 36#define raw_write_can_lock(l) write_can_lock(l)
37
38/*
39 * Some architectures can relax in favour of the CPU owning the lock.
40 */
41#ifndef arch_read_relax
42# define arch_read_relax(l) cpu_relax()
43#endif
44#ifndef arch_write_relax
45# define arch_write_relax(l) cpu_relax()
46#endif
47#ifndef arch_spin_relax
48# define arch_spin_relax(l) cpu_relax()
49#endif
50
37/* 51/*
38 * We build the __lock_function inlines here. They are too large for 52 * We build the __lock_function inlines here. They are too large for
39 * inlining all over the place, but here is only one user per function 53 * inlining all over the place, but here is only one user per function
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 7822cd88a95c..b2f06f3c6a3f 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -1225,7 +1225,7 @@ static struct ctl_table vm_table[] = {
1225 .data = &hugepages_treat_as_movable, 1225 .data = &hugepages_treat_as_movable,
1226 .maxlen = sizeof(int), 1226 .maxlen = sizeof(int),
1227 .mode = 0644, 1227 .mode = 0644,
1228 .proc_handler = hugetlb_treat_movable_handler, 1228 .proc_handler = proc_dointvec,
1229 }, 1229 },
1230 { 1230 {
1231 .procname = "nr_overcommit_hugepages", 1231 .procname = "nr_overcommit_hugepages",
diff --git a/kernel/task_work.c b/kernel/task_work.c
index 65bd3c92d6f3..8727032e3a6f 100644
--- a/kernel/task_work.c
+++ b/kernel/task_work.c
@@ -4,6 +4,23 @@
4 4
5static struct callback_head work_exited; /* all we need is ->next == NULL */ 5static struct callback_head work_exited; /* all we need is ->next == NULL */
6 6
7/**
8 * task_work_add - ask the @task to execute @work->func()
9 * @task: the task which should run the callback
10 * @work: the callback to run
11 * @notify: send the notification if true
12 *
13 * Queue @work for task_work_run() below and notify the @task if @notify.
14 * Fails if the @task is exiting/exited and thus it can't process this @work.
15 * Otherwise @work->func() will be called when the @task returns from kernel
16 * mode or exits.
17 *
18 * This is like the signal handler which runs in kernel mode, but it doesn't
19 * try to wake up the @task.
20 *
21 * RETURNS:
22 * 0 if succeeds or -ESRCH.
23 */
7int 24int
8task_work_add(struct task_struct *task, struct callback_head *work, bool notify) 25task_work_add(struct task_struct *task, struct callback_head *work, bool notify)
9{ 26{
@@ -21,11 +38,22 @@ task_work_add(struct task_struct *task, struct callback_head *work, bool notify)
21 return 0; 38 return 0;
22} 39}
23 40
41/**
42 * task_work_cancel - cancel a pending work added by task_work_add()
43 * @task: the task which should execute the work
44 * @func: identifies the work to remove
45 *
46 * Find the last queued pending work with ->func == @func and remove
47 * it from queue.
48 *
49 * RETURNS:
50 * The found work or NULL if not found.
51 */
24struct callback_head * 52struct callback_head *
25task_work_cancel(struct task_struct *task, task_work_func_t func) 53task_work_cancel(struct task_struct *task, task_work_func_t func)
26{ 54{
27 struct callback_head **pprev = &task->task_works; 55 struct callback_head **pprev = &task->task_works;
28 struct callback_head *work = NULL; 56 struct callback_head *work;
29 unsigned long flags; 57 unsigned long flags;
30 /* 58 /*
31 * If cmpxchg() fails we continue without updating pprev. 59 * If cmpxchg() fails we continue without updating pprev.
@@ -35,7 +63,7 @@ task_work_cancel(struct task_struct *task, task_work_func_t func)
35 */ 63 */
36 raw_spin_lock_irqsave(&task->pi_lock, flags); 64 raw_spin_lock_irqsave(&task->pi_lock, flags);
37 while ((work = ACCESS_ONCE(*pprev))) { 65 while ((work = ACCESS_ONCE(*pprev))) {
38 read_barrier_depends(); 66 smp_read_barrier_depends();
39 if (work->func != func) 67 if (work->func != func)
40 pprev = &work->next; 68 pprev = &work->next;
41 else if (cmpxchg(pprev, work, work->next) == work) 69 else if (cmpxchg(pprev, work, work->next) == work)
@@ -46,6 +74,14 @@ task_work_cancel(struct task_struct *task, task_work_func_t func)
46 return work; 74 return work;
47} 75}
48 76
77/**
78 * task_work_run - execute the works added by task_work_add()
79 *
80 * Flush the pending works. Should be used by the core kernel code.
81 * Called before the task returns to the user-mode or stops, or when
82 * it exits. In the latter case task_work_add() can no longer add the
83 * new work after task_work_run() returns.
84 */
49void task_work_run(void) 85void task_work_run(void)
50{ 86{
51 struct task_struct *task = current; 87 struct task_struct *task = current;
diff --git a/kernel/up.c b/kernel/up.c
index c54c75e9faf7..630d72bf7e41 100644
--- a/kernel/up.c
+++ b/kernel/up.c
@@ -10,12 +10,64 @@
10int smp_call_function_single(int cpu, void (*func) (void *info), void *info, 10int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
11 int wait) 11 int wait)
12{ 12{
13 unsigned long flags;
14
13 WARN_ON(cpu != 0); 15 WARN_ON(cpu != 0);
14 16
15 local_irq_disable(); 17 local_irq_save(flags);
16 (func)(info); 18 func(info);
17 local_irq_enable(); 19 local_irq_restore(flags);
18 20
19 return 0; 21 return 0;
20} 22}
21EXPORT_SYMBOL(smp_call_function_single); 23EXPORT_SYMBOL(smp_call_function_single);
24
25int on_each_cpu(smp_call_func_t func, void *info, int wait)
26{
27 unsigned long flags;
28
29 local_irq_save(flags);
30 func(info);
31 local_irq_restore(flags);
32 return 0;
33}
34EXPORT_SYMBOL(on_each_cpu);
35
36/*
37 * Note we still need to test the mask even for UP
38 * because we actually can get an empty mask from
39 * code that on SMP might call us without the local
40 * CPU in the mask.
41 */
42void on_each_cpu_mask(const struct cpumask *mask,
43 smp_call_func_t func, void *info, bool wait)
44{
45 unsigned long flags;
46
47 if (cpumask_test_cpu(0, mask)) {
48 local_irq_save(flags);
49 func(info);
50 local_irq_restore(flags);
51 }
52}
53EXPORT_SYMBOL(on_each_cpu_mask);
54
55/*
56 * Preemption is disabled here to make sure the cond_func is called under the
57 * same condtions in UP and SMP.
58 */
59void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
60 smp_call_func_t func, void *info, bool wait,
61 gfp_t gfp_flags)
62{
63 unsigned long flags;
64
65 preempt_disable();
66 if (cond_func(0, info)) {
67 local_irq_save(flags);
68 func(info);
69 local_irq_restore(flags);
70 }
71 preempt_enable();
72}
73EXPORT_SYMBOL(on_each_cpu_cond);