diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/Makefile | 1 | ||||
| -rw-r--r-- | kernel/cgroup.c | 4 | ||||
| -rw-r--r-- | kernel/configs.c | 1 | ||||
| -rw-r--r-- | kernel/debug/debug_core.c | 139 | ||||
| -rw-r--r-- | kernel/debug/debug_core.h | 1 | ||||
| -rw-r--r-- | kernel/debug/kdb/kdb_debugger.c | 3 | ||||
| -rw-r--r-- | kernel/debug/kdb/kdb_io.c | 2 | ||||
| -rw-r--r-- | kernel/debug/kdb/kdb_main.c | 18 | ||||
| -rw-r--r-- | kernel/debug/kdb/kdb_private.h | 48 | ||||
| -rw-r--r-- | kernel/early_res.c | 590 | ||||
| -rw-r--r-- | kernel/gcov/fs.c | 1 | ||||
| -rw-r--r-- | kernel/kprobes.c | 1 | ||||
| -rw-r--r-- | kernel/pm_qos_params.c | 3 | ||||
| -rw-r--r-- | kernel/profile.c | 1 | ||||
| -rw-r--r-- | kernel/rtmutex-tester.c | 6 | ||||
| -rw-r--r-- | kernel/softirq.c | 2 | ||||
| -rw-r--r-- | kernel/sys_ni.c | 1 | ||||
| -rw-r--r-- | kernel/trace/blktrace.c | 16 | ||||
| -rw-r--r-- | kernel/trace/ftrace.c | 2 | ||||
| -rw-r--r-- | kernel/trace/ring_buffer.c | 1 | ||||
| -rw-r--r-- | kernel/trace/trace_events.c | 6 | ||||
| -rw-r--r-- | kernel/trace/trace_kdb.c | 1 | ||||
| -rw-r--r-- | kernel/trace/trace_kprobe.c | 2 | ||||
| -rw-r--r-- | kernel/trace/trace_stack.c | 1 | ||||
| -rw-r--r-- | kernel/workqueue.c | 314 |
25 files changed, 312 insertions, 853 deletions
diff --git a/kernel/Makefile b/kernel/Makefile index e2c9d52cfe9e..0b5ff083fa22 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
| @@ -11,7 +11,6 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o \ | |||
| 11 | hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \ | 11 | hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \ |
| 12 | notifier.o ksysfs.o pm_qos_params.o sched_clock.o cred.o \ | 12 | notifier.o ksysfs.o pm_qos_params.o sched_clock.o cred.o \ |
| 13 | async.o range.o jump_label.o | 13 | async.o range.o jump_label.o |
| 14 | obj-$(CONFIG_HAVE_EARLY_RES) += early_res.o | ||
| 15 | obj-y += groups.o | 14 | obj-y += groups.o |
| 16 | 15 | ||
| 17 | ifdef CONFIG_FUNCTION_TRACER | 16 | ifdef CONFIG_FUNCTION_TRACER |
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 291ba3d04bea..7b69b8d0313d 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
| @@ -52,7 +52,6 @@ | |||
| 52 | #include <linux/cgroupstats.h> | 52 | #include <linux/cgroupstats.h> |
| 53 | #include <linux/hash.h> | 53 | #include <linux/hash.h> |
| 54 | #include <linux/namei.h> | 54 | #include <linux/namei.h> |
| 55 | #include <linux/smp_lock.h> | ||
| 56 | #include <linux/pid_namespace.h> | 55 | #include <linux/pid_namespace.h> |
| 57 | #include <linux/idr.h> | 56 | #include <linux/idr.h> |
| 58 | #include <linux/vmalloc.h> /* TODO: replace with more sophisticated array */ | 57 | #include <linux/vmalloc.h> /* TODO: replace with more sophisticated array */ |
| @@ -1222,7 +1221,6 @@ static int cgroup_remount(struct super_block *sb, int *flags, char *data) | |||
| 1222 | struct cgroup *cgrp = &root->top_cgroup; | 1221 | struct cgroup *cgrp = &root->top_cgroup; |
| 1223 | struct cgroup_sb_opts opts; | 1222 | struct cgroup_sb_opts opts; |
| 1224 | 1223 | ||
| 1225 | lock_kernel(); | ||
| 1226 | mutex_lock(&cgrp->dentry->d_inode->i_mutex); | 1224 | mutex_lock(&cgrp->dentry->d_inode->i_mutex); |
| 1227 | mutex_lock(&cgroup_mutex); | 1225 | mutex_lock(&cgroup_mutex); |
| 1228 | 1226 | ||
| @@ -1255,7 +1253,6 @@ static int cgroup_remount(struct super_block *sb, int *flags, char *data) | |||
| 1255 | kfree(opts.name); | 1253 | kfree(opts.name); |
| 1256 | mutex_unlock(&cgroup_mutex); | 1254 | mutex_unlock(&cgroup_mutex); |
| 1257 | mutex_unlock(&cgrp->dentry->d_inode->i_mutex); | 1255 | mutex_unlock(&cgrp->dentry->d_inode->i_mutex); |
| 1258 | unlock_kernel(); | ||
| 1259 | return ret; | 1256 | return ret; |
| 1260 | } | 1257 | } |
| 1261 | 1258 | ||
| @@ -1568,7 +1565,6 @@ static int cgroup_get_sb(struct file_system_type *fs_type, | |||
| 1568 | out_err: | 1565 | out_err: |
| 1569 | kfree(opts.release_agent); | 1566 | kfree(opts.release_agent); |
| 1570 | kfree(opts.name); | 1567 | kfree(opts.name); |
| 1571 | |||
| 1572 | return ret; | 1568 | return ret; |
| 1573 | } | 1569 | } |
| 1574 | 1570 | ||
diff --git a/kernel/configs.c b/kernel/configs.c index abaee684ecbf..b4066b44a99d 100644 --- a/kernel/configs.c +++ b/kernel/configs.c | |||
| @@ -66,6 +66,7 @@ ikconfig_read_current(struct file *file, char __user *buf, | |||
| 66 | static const struct file_operations ikconfig_file_ops = { | 66 | static const struct file_operations ikconfig_file_ops = { |
| 67 | .owner = THIS_MODULE, | 67 | .owner = THIS_MODULE, |
| 68 | .read = ikconfig_read_current, | 68 | .read = ikconfig_read_current, |
| 69 | .llseek = default_llseek, | ||
| 69 | }; | 70 | }; |
| 70 | 71 | ||
| 71 | static int __init ikconfig_init(void) | 72 | static int __init ikconfig_init(void) |
diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c index de407c78178d..fec596da9bd0 100644 --- a/kernel/debug/debug_core.c +++ b/kernel/debug/debug_core.c | |||
| @@ -47,6 +47,7 @@ | |||
| 47 | #include <linux/pid.h> | 47 | #include <linux/pid.h> |
| 48 | #include <linux/smp.h> | 48 | #include <linux/smp.h> |
| 49 | #include <linux/mm.h> | 49 | #include <linux/mm.h> |
| 50 | #include <linux/rcupdate.h> | ||
| 50 | 51 | ||
| 51 | #include <asm/cacheflush.h> | 52 | #include <asm/cacheflush.h> |
| 52 | #include <asm/byteorder.h> | 53 | #include <asm/byteorder.h> |
| @@ -109,13 +110,15 @@ static struct kgdb_bkpt kgdb_break[KGDB_MAX_BREAKPOINTS] = { | |||
| 109 | */ | 110 | */ |
| 110 | atomic_t kgdb_active = ATOMIC_INIT(-1); | 111 | atomic_t kgdb_active = ATOMIC_INIT(-1); |
| 111 | EXPORT_SYMBOL_GPL(kgdb_active); | 112 | EXPORT_SYMBOL_GPL(kgdb_active); |
| 113 | static DEFINE_RAW_SPINLOCK(dbg_master_lock); | ||
| 114 | static DEFINE_RAW_SPINLOCK(dbg_slave_lock); | ||
| 112 | 115 | ||
| 113 | /* | 116 | /* |
| 114 | * We use NR_CPUs not PERCPU, in case kgdb is used to debug early | 117 | * We use NR_CPUs not PERCPU, in case kgdb is used to debug early |
| 115 | * bootup code (which might not have percpu set up yet): | 118 | * bootup code (which might not have percpu set up yet): |
| 116 | */ | 119 | */ |
| 117 | static atomic_t passive_cpu_wait[NR_CPUS]; | 120 | static atomic_t masters_in_kgdb; |
| 118 | static atomic_t cpu_in_kgdb[NR_CPUS]; | 121 | static atomic_t slaves_in_kgdb; |
| 119 | static atomic_t kgdb_break_tasklet_var; | 122 | static atomic_t kgdb_break_tasklet_var; |
| 120 | atomic_t kgdb_setting_breakpoint; | 123 | atomic_t kgdb_setting_breakpoint; |
| 121 | 124 | ||
| @@ -457,26 +460,32 @@ static int kgdb_reenter_check(struct kgdb_state *ks) | |||
| 457 | return 1; | 460 | return 1; |
| 458 | } | 461 | } |
| 459 | 462 | ||
| 460 | static void dbg_cpu_switch(int cpu, int next_cpu) | 463 | static void dbg_touch_watchdogs(void) |
| 461 | { | 464 | { |
| 462 | /* Mark the cpu we are switching away from as a slave when it | 465 | touch_softlockup_watchdog_sync(); |
| 463 | * holds the kgdb_active token. This must be done so that the | 466 | clocksource_touch_watchdog(); |
| 464 | * that all the cpus wait in for the debug core will not enter | 467 | rcu_cpu_stall_reset(); |
| 465 | * again as the master. */ | ||
| 466 | if (cpu == atomic_read(&kgdb_active)) { | ||
| 467 | kgdb_info[cpu].exception_state |= DCPU_IS_SLAVE; | ||
| 468 | kgdb_info[cpu].exception_state &= ~DCPU_WANT_MASTER; | ||
| 469 | } | ||
| 470 | kgdb_info[next_cpu].exception_state |= DCPU_NEXT_MASTER; | ||
| 471 | } | 468 | } |
| 472 | 469 | ||
| 473 | static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs) | 470 | static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs, |
| 471 | int exception_state) | ||
| 474 | { | 472 | { |
| 475 | unsigned long flags; | 473 | unsigned long flags; |
| 476 | int sstep_tries = 100; | 474 | int sstep_tries = 100; |
| 477 | int error; | 475 | int error; |
| 478 | int i, cpu; | 476 | int cpu; |
| 479 | int trace_on = 0; | 477 | int trace_on = 0; |
| 478 | int online_cpus = num_online_cpus(); | ||
| 479 | |||
| 480 | kgdb_info[ks->cpu].enter_kgdb++; | ||
| 481 | kgdb_info[ks->cpu].exception_state |= exception_state; | ||
| 482 | |||
| 483 | if (exception_state == DCPU_WANT_MASTER) | ||
| 484 | atomic_inc(&masters_in_kgdb); | ||
| 485 | else | ||
| 486 | atomic_inc(&slaves_in_kgdb); | ||
| 487 | kgdb_disable_hw_debug(ks->linux_regs); | ||
| 488 | |||
| 480 | acquirelock: | 489 | acquirelock: |
| 481 | /* | 490 | /* |
| 482 | * Interrupts will be restored by the 'trap return' code, except when | 491 | * Interrupts will be restored by the 'trap return' code, except when |
| @@ -489,14 +498,15 @@ acquirelock: | |||
| 489 | kgdb_info[cpu].task = current; | 498 | kgdb_info[cpu].task = current; |
| 490 | kgdb_info[cpu].ret_state = 0; | 499 | kgdb_info[cpu].ret_state = 0; |
| 491 | kgdb_info[cpu].irq_depth = hardirq_count() >> HARDIRQ_SHIFT; | 500 | kgdb_info[cpu].irq_depth = hardirq_count() >> HARDIRQ_SHIFT; |
| 492 | /* | ||
| 493 | * Make sure the above info reaches the primary CPU before | ||
| 494 | * our cpu_in_kgdb[] flag setting does: | ||
| 495 | */ | ||
| 496 | atomic_inc(&cpu_in_kgdb[cpu]); | ||
| 497 | 501 | ||
| 498 | if (exception_level == 1) | 502 | /* Make sure the above info reaches the primary CPU */ |
| 503 | smp_mb(); | ||
| 504 | |||
| 505 | if (exception_level == 1) { | ||
| 506 | if (raw_spin_trylock(&dbg_master_lock)) | ||
| 507 | atomic_xchg(&kgdb_active, cpu); | ||
| 499 | goto cpu_master_loop; | 508 | goto cpu_master_loop; |
| 509 | } | ||
| 500 | 510 | ||
| 501 | /* | 511 | /* |
| 502 | * CPU will loop if it is a slave or request to become a kgdb | 512 | * CPU will loop if it is a slave or request to become a kgdb |
| @@ -508,10 +518,12 @@ cpu_loop: | |||
| 508 | kgdb_info[cpu].exception_state &= ~DCPU_NEXT_MASTER; | 518 | kgdb_info[cpu].exception_state &= ~DCPU_NEXT_MASTER; |
| 509 | goto cpu_master_loop; | 519 | goto cpu_master_loop; |
| 510 | } else if (kgdb_info[cpu].exception_state & DCPU_WANT_MASTER) { | 520 | } else if (kgdb_info[cpu].exception_state & DCPU_WANT_MASTER) { |
| 511 | if (atomic_cmpxchg(&kgdb_active, -1, cpu) == cpu) | 521 | if (raw_spin_trylock(&dbg_master_lock)) { |
| 522 | atomic_xchg(&kgdb_active, cpu); | ||
| 512 | break; | 523 | break; |
| 524 | } | ||
| 513 | } else if (kgdb_info[cpu].exception_state & DCPU_IS_SLAVE) { | 525 | } else if (kgdb_info[cpu].exception_state & DCPU_IS_SLAVE) { |
| 514 | if (!atomic_read(&passive_cpu_wait[cpu])) | 526 | if (!raw_spin_is_locked(&dbg_slave_lock)) |
| 515 | goto return_normal; | 527 | goto return_normal; |
| 516 | } else { | 528 | } else { |
| 517 | return_normal: | 529 | return_normal: |
| @@ -522,9 +534,12 @@ return_normal: | |||
| 522 | arch_kgdb_ops.correct_hw_break(); | 534 | arch_kgdb_ops.correct_hw_break(); |
| 523 | if (trace_on) | 535 | if (trace_on) |
| 524 | tracing_on(); | 536 | tracing_on(); |
| 525 | atomic_dec(&cpu_in_kgdb[cpu]); | 537 | kgdb_info[cpu].exception_state &= |
| 526 | touch_softlockup_watchdog_sync(); | 538 | ~(DCPU_WANT_MASTER | DCPU_IS_SLAVE); |
| 527 | clocksource_touch_watchdog(); | 539 | kgdb_info[cpu].enter_kgdb--; |
| 540 | smp_mb__before_atomic_dec(); | ||
| 541 | atomic_dec(&slaves_in_kgdb); | ||
| 542 | dbg_touch_watchdogs(); | ||
| 528 | local_irq_restore(flags); | 543 | local_irq_restore(flags); |
| 529 | return 0; | 544 | return 0; |
| 530 | } | 545 | } |
| @@ -541,8 +556,8 @@ return_normal: | |||
| 541 | (kgdb_info[cpu].task && | 556 | (kgdb_info[cpu].task && |
| 542 | kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) { | 557 | kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) { |
| 543 | atomic_set(&kgdb_active, -1); | 558 | atomic_set(&kgdb_active, -1); |
| 544 | touch_softlockup_watchdog_sync(); | 559 | raw_spin_unlock(&dbg_master_lock); |
| 545 | clocksource_touch_watchdog(); | 560 | dbg_touch_watchdogs(); |
| 546 | local_irq_restore(flags); | 561 | local_irq_restore(flags); |
| 547 | 562 | ||
| 548 | goto acquirelock; | 563 | goto acquirelock; |
| @@ -563,16 +578,12 @@ return_normal: | |||
| 563 | if (dbg_io_ops->pre_exception) | 578 | if (dbg_io_ops->pre_exception) |
| 564 | dbg_io_ops->pre_exception(); | 579 | dbg_io_ops->pre_exception(); |
| 565 | 580 | ||
| 566 | kgdb_disable_hw_debug(ks->linux_regs); | ||
| 567 | |||
| 568 | /* | 581 | /* |
| 569 | * Get the passive CPU lock which will hold all the non-primary | 582 | * Get the passive CPU lock which will hold all the non-primary |
| 570 | * CPU in a spin state while the debugger is active | 583 | * CPU in a spin state while the debugger is active |
| 571 | */ | 584 | */ |
| 572 | if (!kgdb_single_step) { | 585 | if (!kgdb_single_step) |
| 573 | for (i = 0; i < NR_CPUS; i++) | 586 | raw_spin_lock(&dbg_slave_lock); |
| 574 | atomic_inc(&passive_cpu_wait[i]); | ||
| 575 | } | ||
| 576 | 587 | ||
| 577 | #ifdef CONFIG_SMP | 588 | #ifdef CONFIG_SMP |
| 578 | /* Signal the other CPUs to enter kgdb_wait() */ | 589 | /* Signal the other CPUs to enter kgdb_wait() */ |
| @@ -583,10 +594,9 @@ return_normal: | |||
| 583 | /* | 594 | /* |
| 584 | * Wait for the other CPUs to be notified and be waiting for us: | 595 | * Wait for the other CPUs to be notified and be waiting for us: |
| 585 | */ | 596 | */ |
| 586 | for_each_online_cpu(i) { | 597 | while (kgdb_do_roundup && (atomic_read(&masters_in_kgdb) + |
| 587 | while (kgdb_do_roundup && !atomic_read(&cpu_in_kgdb[i])) | 598 | atomic_read(&slaves_in_kgdb)) != online_cpus) |
| 588 | cpu_relax(); | 599 | cpu_relax(); |
| 589 | } | ||
| 590 | 600 | ||
| 591 | /* | 601 | /* |
| 592 | * At this point the primary processor is completely | 602 | * At this point the primary processor is completely |
| @@ -615,7 +625,8 @@ cpu_master_loop: | |||
| 615 | if (error == DBG_PASS_EVENT) { | 625 | if (error == DBG_PASS_EVENT) { |
| 616 | dbg_kdb_mode = !dbg_kdb_mode; | 626 | dbg_kdb_mode = !dbg_kdb_mode; |
| 617 | } else if (error == DBG_SWITCH_CPU_EVENT) { | 627 | } else if (error == DBG_SWITCH_CPU_EVENT) { |
| 618 | dbg_cpu_switch(cpu, dbg_switch_cpu); | 628 | kgdb_info[dbg_switch_cpu].exception_state |= |
| 629 | DCPU_NEXT_MASTER; | ||
| 619 | goto cpu_loop; | 630 | goto cpu_loop; |
| 620 | } else { | 631 | } else { |
| 621 | kgdb_info[cpu].ret_state = error; | 632 | kgdb_info[cpu].ret_state = error; |
| @@ -627,24 +638,11 @@ cpu_master_loop: | |||
| 627 | if (dbg_io_ops->post_exception) | 638 | if (dbg_io_ops->post_exception) |
| 628 | dbg_io_ops->post_exception(); | 639 | dbg_io_ops->post_exception(); |
| 629 | 640 | ||
| 630 | atomic_dec(&cpu_in_kgdb[ks->cpu]); | ||
| 631 | |||
| 632 | if (!kgdb_single_step) { | 641 | if (!kgdb_single_step) { |
| 633 | for (i = NR_CPUS-1; i >= 0; i--) | 642 | raw_spin_unlock(&dbg_slave_lock); |
| 634 | atomic_dec(&passive_cpu_wait[i]); | 643 | /* Wait till all the CPUs have quit from the debugger. */ |
| 635 | /* | 644 | while (kgdb_do_roundup && atomic_read(&slaves_in_kgdb)) |
| 636 | * Wait till all the CPUs have quit from the debugger, | 645 | cpu_relax(); |
| 637 | * but allow a CPU that hit an exception and is | ||
| 638 | * waiting to become the master to remain in the debug | ||
| 639 | * core. | ||
| 640 | */ | ||
| 641 | for_each_online_cpu(i) { | ||
| 642 | while (kgdb_do_roundup && | ||
| 643 | atomic_read(&cpu_in_kgdb[i]) && | ||
| 644 | !(kgdb_info[i].exception_state & | ||
| 645 | DCPU_WANT_MASTER)) | ||
| 646 | cpu_relax(); | ||
| 647 | } | ||
| 648 | } | 646 | } |
| 649 | 647 | ||
| 650 | kgdb_restore: | 648 | kgdb_restore: |
| @@ -655,12 +653,20 @@ kgdb_restore: | |||
| 655 | else | 653 | else |
| 656 | kgdb_sstep_pid = 0; | 654 | kgdb_sstep_pid = 0; |
| 657 | } | 655 | } |
| 656 | if (arch_kgdb_ops.correct_hw_break) | ||
| 657 | arch_kgdb_ops.correct_hw_break(); | ||
| 658 | if (trace_on) | 658 | if (trace_on) |
| 659 | tracing_on(); | 659 | tracing_on(); |
| 660 | |||
| 661 | kgdb_info[cpu].exception_state &= | ||
| 662 | ~(DCPU_WANT_MASTER | DCPU_IS_SLAVE); | ||
| 663 | kgdb_info[cpu].enter_kgdb--; | ||
| 664 | smp_mb__before_atomic_dec(); | ||
| 665 | atomic_dec(&masters_in_kgdb); | ||
| 660 | /* Free kgdb_active */ | 666 | /* Free kgdb_active */ |
| 661 | atomic_set(&kgdb_active, -1); | 667 | atomic_set(&kgdb_active, -1); |
| 662 | touch_softlockup_watchdog_sync(); | 668 | raw_spin_unlock(&dbg_master_lock); |
| 663 | clocksource_touch_watchdog(); | 669 | dbg_touch_watchdogs(); |
| 664 | local_irq_restore(flags); | 670 | local_irq_restore(flags); |
| 665 | 671 | ||
| 666 | return kgdb_info[cpu].ret_state; | 672 | return kgdb_info[cpu].ret_state; |
| @@ -678,7 +684,6 @@ kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs) | |||
| 678 | { | 684 | { |
| 679 | struct kgdb_state kgdb_var; | 685 | struct kgdb_state kgdb_var; |
| 680 | struct kgdb_state *ks = &kgdb_var; | 686 | struct kgdb_state *ks = &kgdb_var; |
| 681 | int ret; | ||
| 682 | 687 | ||
| 683 | ks->cpu = raw_smp_processor_id(); | 688 | ks->cpu = raw_smp_processor_id(); |
| 684 | ks->ex_vector = evector; | 689 | ks->ex_vector = evector; |
| @@ -689,11 +694,10 @@ kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs) | |||
| 689 | 694 | ||
| 690 | if (kgdb_reenter_check(ks)) | 695 | if (kgdb_reenter_check(ks)) |
| 691 | return 0; /* Ouch, double exception ! */ | 696 | return 0; /* Ouch, double exception ! */ |
| 692 | kgdb_info[ks->cpu].exception_state |= DCPU_WANT_MASTER; | 697 | if (kgdb_info[ks->cpu].enter_kgdb != 0) |
| 693 | ret = kgdb_cpu_enter(ks, regs); | 698 | return 0; |
| 694 | kgdb_info[ks->cpu].exception_state &= ~(DCPU_WANT_MASTER | | 699 | |
| 695 | DCPU_IS_SLAVE); | 700 | return kgdb_cpu_enter(ks, regs, DCPU_WANT_MASTER); |
| 696 | return ret; | ||
| 697 | } | 701 | } |
| 698 | 702 | ||
| 699 | int kgdb_nmicallback(int cpu, void *regs) | 703 | int kgdb_nmicallback(int cpu, void *regs) |
| @@ -706,12 +710,9 @@ int kgdb_nmicallback(int cpu, void *regs) | |||
| 706 | ks->cpu = cpu; | 710 | ks->cpu = cpu; |
| 707 | ks->linux_regs = regs; | 711 | ks->linux_regs = regs; |
| 708 | 712 | ||
| 709 | if (!atomic_read(&cpu_in_kgdb[cpu]) && | 713 | if (kgdb_info[ks->cpu].enter_kgdb == 0 && |
| 710 | atomic_read(&kgdb_active) != -1 && | 714 | raw_spin_is_locked(&dbg_master_lock)) { |
| 711 | atomic_read(&kgdb_active) != cpu) { | 715 | kgdb_cpu_enter(ks, regs, DCPU_IS_SLAVE); |
| 712 | kgdb_info[cpu].exception_state |= DCPU_IS_SLAVE; | ||
| 713 | kgdb_cpu_enter(ks, regs); | ||
| 714 | kgdb_info[cpu].exception_state &= ~DCPU_IS_SLAVE; | ||
| 715 | return 0; | 716 | return 0; |
| 716 | } | 717 | } |
| 717 | #endif | 718 | #endif |
diff --git a/kernel/debug/debug_core.h b/kernel/debug/debug_core.h index c5d753d80f67..3494c28a7e7a 100644 --- a/kernel/debug/debug_core.h +++ b/kernel/debug/debug_core.h | |||
| @@ -40,6 +40,7 @@ struct debuggerinfo_struct { | |||
| 40 | int exception_state; | 40 | int exception_state; |
| 41 | int ret_state; | 41 | int ret_state; |
| 42 | int irq_depth; | 42 | int irq_depth; |
| 43 | int enter_kgdb; | ||
| 43 | }; | 44 | }; |
| 44 | 45 | ||
| 45 | extern struct debuggerinfo_struct kgdb_info[]; | 46 | extern struct debuggerinfo_struct kgdb_info[]; |
diff --git a/kernel/debug/kdb/kdb_debugger.c b/kernel/debug/kdb/kdb_debugger.c index bf6e8270e957..dd0b1b7dd02c 100644 --- a/kernel/debug/kdb/kdb_debugger.c +++ b/kernel/debug/kdb/kdb_debugger.c | |||
| @@ -86,7 +86,7 @@ int kdb_stub(struct kgdb_state *ks) | |||
| 86 | } | 86 | } |
| 87 | /* Set initial kdb state variables */ | 87 | /* Set initial kdb state variables */ |
| 88 | KDB_STATE_CLEAR(KGDB_TRANS); | 88 | KDB_STATE_CLEAR(KGDB_TRANS); |
| 89 | kdb_initial_cpu = ks->cpu; | 89 | kdb_initial_cpu = atomic_read(&kgdb_active); |
| 90 | kdb_current_task = kgdb_info[ks->cpu].task; | 90 | kdb_current_task = kgdb_info[ks->cpu].task; |
| 91 | kdb_current_regs = kgdb_info[ks->cpu].debuggerinfo; | 91 | kdb_current_regs = kgdb_info[ks->cpu].debuggerinfo; |
| 92 | /* Remove any breakpoints as needed by kdb and clear single step */ | 92 | /* Remove any breakpoints as needed by kdb and clear single step */ |
| @@ -105,7 +105,6 @@ int kdb_stub(struct kgdb_state *ks) | |||
| 105 | ks->pass_exception = 1; | 105 | ks->pass_exception = 1; |
| 106 | KDB_FLAG_SET(CATASTROPHIC); | 106 | KDB_FLAG_SET(CATASTROPHIC); |
| 107 | } | 107 | } |
| 108 | kdb_initial_cpu = ks->cpu; | ||
| 109 | if (KDB_STATE(SSBPT) && reason == KDB_REASON_SSTEP) { | 108 | if (KDB_STATE(SSBPT) && reason == KDB_REASON_SSTEP) { |
| 110 | KDB_STATE_CLEAR(SSBPT); | 109 | KDB_STATE_CLEAR(SSBPT); |
| 111 | KDB_STATE_CLEAR(DOING_SS); | 110 | KDB_STATE_CLEAR(DOING_SS); |
diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c index c9b7f4f90bba..96fdaac46a80 100644 --- a/kernel/debug/kdb/kdb_io.c +++ b/kernel/debug/kdb/kdb_io.c | |||
| @@ -823,4 +823,4 @@ int kdb_printf(const char *fmt, ...) | |||
| 823 | 823 | ||
| 824 | return r; | 824 | return r; |
| 825 | } | 825 | } |
| 826 | 826 | EXPORT_SYMBOL_GPL(kdb_printf); | |
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c index caf057a3de0e..d7bda21a106b 100644 --- a/kernel/debug/kdb/kdb_main.c +++ b/kernel/debug/kdb/kdb_main.c | |||
| @@ -1749,13 +1749,13 @@ static int kdb_go(int argc, const char **argv) | |||
| 1749 | int nextarg; | 1749 | int nextarg; |
| 1750 | long offset; | 1750 | long offset; |
| 1751 | 1751 | ||
| 1752 | if (raw_smp_processor_id() != kdb_initial_cpu) { | ||
| 1753 | kdb_printf("go must execute on the entry cpu, " | ||
| 1754 | "please use \"cpu %d\" and then execute go\n", | ||
| 1755 | kdb_initial_cpu); | ||
| 1756 | return KDB_BADCPUNUM; | ||
| 1757 | } | ||
| 1752 | if (argc == 1) { | 1758 | if (argc == 1) { |
| 1753 | if (raw_smp_processor_id() != kdb_initial_cpu) { | ||
| 1754 | kdb_printf("go <address> must be issued from the " | ||
| 1755 | "initial cpu, do cpu %d first\n", | ||
| 1756 | kdb_initial_cpu); | ||
| 1757 | return KDB_ARGCOUNT; | ||
| 1758 | } | ||
| 1759 | nextarg = 1; | 1759 | nextarg = 1; |
| 1760 | diag = kdbgetaddrarg(argc, argv, &nextarg, | 1760 | diag = kdbgetaddrarg(argc, argv, &nextarg, |
| 1761 | &addr, &offset, NULL); | 1761 | &addr, &offset, NULL); |
| @@ -2783,6 +2783,8 @@ int kdb_register_repeat(char *cmd, | |||
| 2783 | 2783 | ||
| 2784 | return 0; | 2784 | return 0; |
| 2785 | } | 2785 | } |
| 2786 | EXPORT_SYMBOL_GPL(kdb_register_repeat); | ||
| 2787 | |||
| 2786 | 2788 | ||
| 2787 | /* | 2789 | /* |
| 2788 | * kdb_register - Compatibility register function for commands that do | 2790 | * kdb_register - Compatibility register function for commands that do |
| @@ -2805,6 +2807,7 @@ int kdb_register(char *cmd, | |||
| 2805 | return kdb_register_repeat(cmd, func, usage, help, minlen, | 2807 | return kdb_register_repeat(cmd, func, usage, help, minlen, |
| 2806 | KDB_REPEAT_NONE); | 2808 | KDB_REPEAT_NONE); |
| 2807 | } | 2809 | } |
| 2810 | EXPORT_SYMBOL_GPL(kdb_register); | ||
| 2808 | 2811 | ||
| 2809 | /* | 2812 | /* |
| 2810 | * kdb_unregister - This function is used to unregister a kernel | 2813 | * kdb_unregister - This function is used to unregister a kernel |
| @@ -2823,7 +2826,7 @@ int kdb_unregister(char *cmd) | |||
| 2823 | /* | 2826 | /* |
| 2824 | * find the command. | 2827 | * find the command. |
| 2825 | */ | 2828 | */ |
| 2826 | for (i = 0, kp = kdb_commands; i < kdb_max_commands; i++, kp++) { | 2829 | for_each_kdbcmd(kp, i) { |
| 2827 | if (kp->cmd_name && (strcmp(kp->cmd_name, cmd) == 0)) { | 2830 | if (kp->cmd_name && (strcmp(kp->cmd_name, cmd) == 0)) { |
| 2828 | kp->cmd_name = NULL; | 2831 | kp->cmd_name = NULL; |
| 2829 | return 0; | 2832 | return 0; |
| @@ -2833,6 +2836,7 @@ int kdb_unregister(char *cmd) | |||
| 2833 | /* Couldn't find it. */ | 2836 | /* Couldn't find it. */ |
| 2834 | return 1; | 2837 | return 1; |
| 2835 | } | 2838 | } |
| 2839 | EXPORT_SYMBOL_GPL(kdb_unregister); | ||
| 2836 | 2840 | ||
| 2837 | /* Initialize the kdb command table. */ | 2841 | /* Initialize the kdb command table. */ |
| 2838 | static void __init kdb_inittab(void) | 2842 | static void __init kdb_inittab(void) |
diff --git a/kernel/debug/kdb/kdb_private.h b/kernel/debug/kdb/kdb_private.h index be775f7e81e0..35d69ed1dfb5 100644 --- a/kernel/debug/kdb/kdb_private.h +++ b/kernel/debug/kdb/kdb_private.h | |||
| @@ -15,29 +15,6 @@ | |||
| 15 | #include <linux/kgdb.h> | 15 | #include <linux/kgdb.h> |
| 16 | #include "../debug_core.h" | 16 | #include "../debug_core.h" |
| 17 | 17 | ||
| 18 | /* Kernel Debugger Error codes. Must not overlap with command codes. */ | ||
| 19 | #define KDB_NOTFOUND (-1) | ||
| 20 | #define KDB_ARGCOUNT (-2) | ||
| 21 | #define KDB_BADWIDTH (-3) | ||
| 22 | #define KDB_BADRADIX (-4) | ||
| 23 | #define KDB_NOTENV (-5) | ||
| 24 | #define KDB_NOENVVALUE (-6) | ||
| 25 | #define KDB_NOTIMP (-7) | ||
| 26 | #define KDB_ENVFULL (-8) | ||
| 27 | #define KDB_ENVBUFFULL (-9) | ||
| 28 | #define KDB_TOOMANYBPT (-10) | ||
| 29 | #define KDB_TOOMANYDBREGS (-11) | ||
| 30 | #define KDB_DUPBPT (-12) | ||
| 31 | #define KDB_BPTNOTFOUND (-13) | ||
| 32 | #define KDB_BADMODE (-14) | ||
| 33 | #define KDB_BADINT (-15) | ||
| 34 | #define KDB_INVADDRFMT (-16) | ||
| 35 | #define KDB_BADREG (-17) | ||
| 36 | #define KDB_BADCPUNUM (-18) | ||
| 37 | #define KDB_BADLENGTH (-19) | ||
| 38 | #define KDB_NOBP (-20) | ||
| 39 | #define KDB_BADADDR (-21) | ||
| 40 | |||
| 41 | /* Kernel Debugger Command codes. Must not overlap with error codes. */ | 18 | /* Kernel Debugger Command codes. Must not overlap with error codes. */ |
| 42 | #define KDB_CMD_GO (-1001) | 19 | #define KDB_CMD_GO (-1001) |
| 43 | #define KDB_CMD_CPU (-1002) | 20 | #define KDB_CMD_CPU (-1002) |
| @@ -93,17 +70,6 @@ | |||
| 93 | */ | 70 | */ |
| 94 | #define KDB_MAXBPT 16 | 71 | #define KDB_MAXBPT 16 |
| 95 | 72 | ||
| 96 | /* Maximum number of arguments to a function */ | ||
| 97 | #define KDB_MAXARGS 16 | ||
| 98 | |||
| 99 | typedef enum { | ||
| 100 | KDB_REPEAT_NONE = 0, /* Do not repeat this command */ | ||
| 101 | KDB_REPEAT_NO_ARGS, /* Repeat the command without arguments */ | ||
| 102 | KDB_REPEAT_WITH_ARGS, /* Repeat the command including its arguments */ | ||
| 103 | } kdb_repeat_t; | ||
| 104 | |||
| 105 | typedef int (*kdb_func_t)(int, const char **); | ||
| 106 | |||
| 107 | /* Symbol table format returned by kallsyms. */ | 73 | /* Symbol table format returned by kallsyms. */ |
| 108 | typedef struct __ksymtab { | 74 | typedef struct __ksymtab { |
| 109 | unsigned long value; /* Address of symbol */ | 75 | unsigned long value; /* Address of symbol */ |
| @@ -123,11 +89,6 @@ extern int kallsyms_symbol_next(char *prefix_name, int flag); | |||
| 123 | extern int kallsyms_symbol_complete(char *prefix_name, int max_len); | 89 | extern int kallsyms_symbol_complete(char *prefix_name, int max_len); |
| 124 | 90 | ||
| 125 | /* Exported Symbols for kernel loadable modules to use. */ | 91 | /* Exported Symbols for kernel loadable modules to use. */ |
| 126 | extern int kdb_register(char *, kdb_func_t, char *, char *, short); | ||
| 127 | extern int kdb_register_repeat(char *, kdb_func_t, char *, char *, | ||
| 128 | short, kdb_repeat_t); | ||
| 129 | extern int kdb_unregister(char *); | ||
| 130 | |||
| 131 | extern int kdb_getarea_size(void *, unsigned long, size_t); | 92 | extern int kdb_getarea_size(void *, unsigned long, size_t); |
| 132 | extern int kdb_putarea_size(unsigned long, void *, size_t); | 93 | extern int kdb_putarea_size(unsigned long, void *, size_t); |
| 133 | 94 | ||
| @@ -144,6 +105,7 @@ extern int kdb_getword(unsigned long *, unsigned long, size_t); | |||
| 144 | extern int kdb_putword(unsigned long, unsigned long, size_t); | 105 | extern int kdb_putword(unsigned long, unsigned long, size_t); |
| 145 | 106 | ||
| 146 | extern int kdbgetularg(const char *, unsigned long *); | 107 | extern int kdbgetularg(const char *, unsigned long *); |
| 108 | extern int kdbgetu64arg(const char *, u64 *); | ||
| 147 | extern char *kdbgetenv(const char *); | 109 | extern char *kdbgetenv(const char *); |
| 148 | extern int kdbgetaddrarg(int, const char **, int*, unsigned long *, | 110 | extern int kdbgetaddrarg(int, const char **, int*, unsigned long *, |
| 149 | long *, char **); | 111 | long *, char **); |
| @@ -255,14 +217,6 @@ extern void kdb_ps1(const struct task_struct *p); | |||
| 255 | extern void kdb_print_nameval(const char *name, unsigned long val); | 217 | extern void kdb_print_nameval(const char *name, unsigned long val); |
| 256 | extern void kdb_send_sig_info(struct task_struct *p, struct siginfo *info); | 218 | extern void kdb_send_sig_info(struct task_struct *p, struct siginfo *info); |
| 257 | extern void kdb_meminfo_proc_show(void); | 219 | extern void kdb_meminfo_proc_show(void); |
| 258 | #ifdef CONFIG_KALLSYMS | ||
| 259 | extern const char *kdb_walk_kallsyms(loff_t *pos); | ||
| 260 | #else /* ! CONFIG_KALLSYMS */ | ||
| 261 | static inline const char *kdb_walk_kallsyms(loff_t *pos) | ||
| 262 | { | ||
| 263 | return NULL; | ||
| 264 | } | ||
| 265 | #endif /* ! CONFIG_KALLSYMS */ | ||
| 266 | extern char *kdb_getstr(char *, size_t, char *); | 220 | extern char *kdb_getstr(char *, size_t, char *); |
| 267 | 221 | ||
| 268 | /* Defines for kdb_symbol_print */ | 222 | /* Defines for kdb_symbol_print */ |
diff --git a/kernel/early_res.c b/kernel/early_res.c deleted file mode 100644 index 7bfae887f211..000000000000 --- a/kernel/early_res.c +++ /dev/null | |||
| @@ -1,590 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * early_res, could be used to replace bootmem | ||
| 3 | */ | ||
| 4 | #include <linux/kernel.h> | ||
| 5 | #include <linux/types.h> | ||
| 6 | #include <linux/init.h> | ||
| 7 | #include <linux/bootmem.h> | ||
| 8 | #include <linux/mm.h> | ||
| 9 | #include <linux/early_res.h> | ||
| 10 | #include <linux/slab.h> | ||
| 11 | #include <linux/kmemleak.h> | ||
| 12 | |||
| 13 | /* | ||
| 14 | * Early reserved memory areas. | ||
| 15 | */ | ||
| 16 | /* | ||
| 17 | * need to make sure this one is bigger enough before | ||
| 18 | * find_fw_memmap_area could be used | ||
| 19 | */ | ||
| 20 | #define MAX_EARLY_RES_X 32 | ||
| 21 | |||
| 22 | struct early_res { | ||
| 23 | u64 start, end; | ||
| 24 | char name[15]; | ||
| 25 | char overlap_ok; | ||
| 26 | }; | ||
| 27 | static struct early_res early_res_x[MAX_EARLY_RES_X] __initdata; | ||
| 28 | |||
| 29 | static int max_early_res __initdata = MAX_EARLY_RES_X; | ||
| 30 | static struct early_res *early_res __initdata = &early_res_x[0]; | ||
| 31 | static int early_res_count __initdata; | ||
| 32 | |||
| 33 | static int __init find_overlapped_early(u64 start, u64 end) | ||
| 34 | { | ||
| 35 | int i; | ||
| 36 | struct early_res *r; | ||
| 37 | |||
| 38 | for (i = 0; i < max_early_res && early_res[i].end; i++) { | ||
| 39 | r = &early_res[i]; | ||
| 40 | if (end > r->start && start < r->end) | ||
| 41 | break; | ||
| 42 | } | ||
| 43 | |||
| 44 | return i; | ||
| 45 | } | ||
| 46 | |||
| 47 | /* | ||
| 48 | * Drop the i-th range from the early reservation map, | ||
| 49 | * by copying any higher ranges down one over it, and | ||
| 50 | * clearing what had been the last slot. | ||
| 51 | */ | ||
| 52 | static void __init drop_range(int i) | ||
| 53 | { | ||
| 54 | int j; | ||
| 55 | |||
| 56 | for (j = i + 1; j < max_early_res && early_res[j].end; j++) | ||
| 57 | ; | ||
| 58 | |||
| 59 | memmove(&early_res[i], &early_res[i + 1], | ||
| 60 | (j - 1 - i) * sizeof(struct early_res)); | ||
| 61 | |||
| 62 | early_res[j - 1].end = 0; | ||
| 63 | early_res_count--; | ||
| 64 | } | ||
| 65 | |||
| 66 | static void __init drop_range_partial(int i, u64 start, u64 end) | ||
| 67 | { | ||
| 68 | u64 common_start, common_end; | ||
| 69 | u64 old_start, old_end; | ||
| 70 | |||
| 71 | old_start = early_res[i].start; | ||
| 72 | old_end = early_res[i].end; | ||
| 73 | common_start = max(old_start, start); | ||
| 74 | common_end = min(old_end, end); | ||
| 75 | |||
| 76 | /* no overlap ? */ | ||
| 77 | if (common_start >= common_end) | ||
| 78 | return; | ||
| 79 | |||
| 80 | if (old_start < common_start) { | ||
| 81 | /* make head segment */ | ||
| 82 | early_res[i].end = common_start; | ||
| 83 | if (old_end > common_end) { | ||
| 84 | char name[15]; | ||
| 85 | |||
| 86 | /* | ||
| 87 | * Save a local copy of the name, since the | ||
| 88 | * early_res array could get resized inside | ||
| 89 | * reserve_early_without_check() -> | ||
| 90 | * __check_and_double_early_res(), which would | ||
| 91 | * make the current name pointer invalid. | ||
| 92 | */ | ||
| 93 | strncpy(name, early_res[i].name, | ||
| 94 | sizeof(early_res[i].name) - 1); | ||
| 95 | /* add another for left over on tail */ | ||
| 96 | reserve_early_without_check(common_end, old_end, name); | ||
| 97 | } | ||
| 98 | return; | ||
| 99 | } else { | ||
| 100 | if (old_end > common_end) { | ||
| 101 | /* reuse the entry for tail left */ | ||
| 102 | early_res[i].start = common_end; | ||
| 103 | return; | ||
| 104 | } | ||
| 105 | /* all covered */ | ||
| 106 | drop_range(i); | ||
| 107 | } | ||
| 108 | } | ||
| 109 | |||
| 110 | /* | ||
| 111 | * Split any existing ranges that: | ||
| 112 | * 1) are marked 'overlap_ok', and | ||
| 113 | * 2) overlap with the stated range [start, end) | ||
| 114 | * into whatever portion (if any) of the existing range is entirely | ||
| 115 | * below or entirely above the stated range. Drop the portion | ||
| 116 | * of the existing range that overlaps with the stated range, | ||
| 117 | * which will allow the caller of this routine to then add that | ||
| 118 | * stated range without conflicting with any existing range. | ||
| 119 | */ | ||
| 120 | static void __init drop_overlaps_that_are_ok(u64 start, u64 end) | ||
| 121 | { | ||
| 122 | int i; | ||
| 123 | struct early_res *r; | ||
| 124 | u64 lower_start, lower_end; | ||
| 125 | u64 upper_start, upper_end; | ||
| 126 | char name[15]; | ||
| 127 | |||
| 128 | for (i = 0; i < max_early_res && early_res[i].end; i++) { | ||
| 129 | r = &early_res[i]; | ||
| 130 | |||
| 131 | /* Continue past non-overlapping ranges */ | ||
| 132 | if (end <= r->start || start >= r->end) | ||
| 133 | continue; | ||
| 134 | |||
| 135 | /* | ||
| 136 | * Leave non-ok overlaps as is; let caller | ||
| 137 | * panic "Overlapping early reservations" | ||
| 138 | * when it hits this overlap. | ||
| 139 | */ | ||
| 140 | if (!r->overlap_ok) | ||
| 141 | return; | ||
| 142 | |||
| 143 | /* | ||
| 144 | * We have an ok overlap. We will drop it from the early | ||
| 145 | * reservation map, and add back in any non-overlapping | ||
| 146 | * portions (lower or upper) as separate, overlap_ok, | ||
| 147 | * non-overlapping ranges. | ||
| 148 | */ | ||
| 149 | |||
| 150 | /* 1. Note any non-overlapping (lower or upper) ranges. */ | ||
| 151 | strncpy(name, r->name, sizeof(name) - 1); | ||
| 152 | |||
| 153 | lower_start = lower_end = 0; | ||
| 154 | upper_start = upper_end = 0; | ||
| 155 | if (r->start < start) { | ||
| 156 | lower_start = r->start; | ||
| 157 | lower_end = start; | ||
| 158 | } | ||
| 159 | if (r->end > end) { | ||
| 160 | upper_start = end; | ||
| 161 | upper_end = r->end; | ||
| 162 | } | ||
| 163 | |||
| 164 | /* 2. Drop the original ok overlapping range */ | ||
| 165 | drop_range(i); | ||
| 166 | |||
| 167 | i--; /* resume for-loop on copied down entry */ | ||
| 168 | |||
| 169 | /* 3. Add back in any non-overlapping ranges. */ | ||
| 170 | if (lower_end) | ||
| 171 | reserve_early_overlap_ok(lower_start, lower_end, name); | ||
| 172 | if (upper_end) | ||
| 173 | reserve_early_overlap_ok(upper_start, upper_end, name); | ||
| 174 | } | ||
| 175 | } | ||
| 176 | |||
| 177 | static void __init __reserve_early(u64 start, u64 end, char *name, | ||
| 178 | int overlap_ok) | ||
| 179 | { | ||
| 180 | int i; | ||
| 181 | struct early_res *r; | ||
| 182 | |||
| 183 | i = find_overlapped_early(start, end); | ||
| 184 | if (i >= max_early_res) | ||
| 185 | panic("Too many early reservations"); | ||
| 186 | r = &early_res[i]; | ||
| 187 | if (r->end) | ||
| 188 | panic("Overlapping early reservations " | ||
| 189 | "%llx-%llx %s to %llx-%llx %s\n", | ||
| 190 | start, end - 1, name ? name : "", r->start, | ||
| 191 | r->end - 1, r->name); | ||
| 192 | r->start = start; | ||
| 193 | r->end = end; | ||
| 194 | r->overlap_ok = overlap_ok; | ||
| 195 | if (name) | ||
| 196 | strncpy(r->name, name, sizeof(r->name) - 1); | ||
| 197 | early_res_count++; | ||
| 198 | } | ||
| 199 | |||
| 200 | /* | ||
| 201 | * A few early reservtations come here. | ||
| 202 | * | ||
| 203 | * The 'overlap_ok' in the name of this routine does -not- mean it | ||
| 204 | * is ok for these reservations to overlap an earlier reservation. | ||
| 205 | * Rather it means that it is ok for subsequent reservations to | ||
| 206 | * overlap this one. | ||
| 207 | * | ||
| 208 | * Use this entry point to reserve early ranges when you are doing | ||
| 209 | * so out of "Paranoia", reserving perhaps more memory than you need, | ||
| 210 | * just in case, and don't mind a subsequent overlapping reservation | ||
| 211 | * that is known to be needed. | ||
| 212 | * | ||
| 213 | * The drop_overlaps_that_are_ok() call here isn't really needed. | ||
| 214 | * It would be needed if we had two colliding 'overlap_ok' | ||
| 215 | * reservations, so that the second such would not panic on the | ||
| 216 | * overlap with the first. We don't have any such as of this | ||
| 217 | * writing, but might as well tolerate such if it happens in | ||
| 218 | * the future. | ||
| 219 | */ | ||
| 220 | void __init reserve_early_overlap_ok(u64 start, u64 end, char *name) | ||
| 221 | { | ||
| 222 | drop_overlaps_that_are_ok(start, end); | ||
| 223 | __reserve_early(start, end, name, 1); | ||
| 224 | } | ||
| 225 | |||
| 226 | static void __init __check_and_double_early_res(u64 ex_start, u64 ex_end) | ||
| 227 | { | ||
| 228 | u64 start, end, size, mem; | ||
| 229 | struct early_res *new; | ||
| 230 | |||
| 231 | /* do we have enough slots left ? */ | ||
| 232 | if ((max_early_res - early_res_count) > max(max_early_res/8, 2)) | ||
| 233 | return; | ||
| 234 | |||
| 235 | /* double it */ | ||
| 236 | mem = -1ULL; | ||
| 237 | size = sizeof(struct early_res) * max_early_res * 2; | ||
| 238 | if (early_res == early_res_x) | ||
| 239 | start = 0; | ||
| 240 | else | ||
| 241 | start = early_res[0].end; | ||
| 242 | end = ex_start; | ||
| 243 | if (start + size < end) | ||
| 244 | mem = find_fw_memmap_area(start, end, size, | ||
| 245 | sizeof(struct early_res)); | ||
| 246 | if (mem == -1ULL) { | ||
| 247 | start = ex_end; | ||
| 248 | end = get_max_mapped(); | ||
| 249 | if (start + size < end) | ||
| 250 | mem = find_fw_memmap_area(start, end, size, | ||
| 251 | sizeof(struct early_res)); | ||
| 252 | } | ||
| 253 | if (mem == -1ULL) | ||
| 254 | panic("can not find more space for early_res array"); | ||
| 255 | |||
| 256 | new = __va(mem); | ||
| 257 | /* save the first one for own */ | ||
| 258 | new[0].start = mem; | ||
| 259 | new[0].end = mem + size; | ||
| 260 | new[0].overlap_ok = 0; | ||
| 261 | /* copy old to new */ | ||
| 262 | if (early_res == early_res_x) { | ||
| 263 | memcpy(&new[1], &early_res[0], | ||
| 264 | sizeof(struct early_res) * max_early_res); | ||
| 265 | memset(&new[max_early_res+1], 0, | ||
| 266 | sizeof(struct early_res) * (max_early_res - 1)); | ||
| 267 | early_res_count++; | ||
| 268 | } else { | ||
| 269 | memcpy(&new[1], &early_res[1], | ||
| 270 | sizeof(struct early_res) * (max_early_res - 1)); | ||
| 271 | memset(&new[max_early_res], 0, | ||
| 272 | sizeof(struct early_res) * max_early_res); | ||
| 273 | } | ||
| 274 | memset(&early_res[0], 0, sizeof(struct early_res) * max_early_res); | ||
| 275 | early_res = new; | ||
| 276 | max_early_res *= 2; | ||
| 277 | printk(KERN_DEBUG "early_res array is doubled to %d at [%llx - %llx]\n", | ||
| 278 | max_early_res, mem, mem + size - 1); | ||
| 279 | } | ||
| 280 | |||
| 281 | /* | ||
| 282 | * Most early reservations come here. | ||
| 283 | * | ||
| 284 | * We first have drop_overlaps_that_are_ok() drop any pre-existing | ||
| 285 | * 'overlap_ok' ranges, so that we can then reserve this memory | ||
| 286 | * range without risk of panic'ing on an overlapping overlap_ok | ||
| 287 | * early reservation. | ||
| 288 | */ | ||
| 289 | void __init reserve_early(u64 start, u64 end, char *name) | ||
| 290 | { | ||
| 291 | if (start >= end) | ||
| 292 | return; | ||
| 293 | |||
| 294 | __check_and_double_early_res(start, end); | ||
| 295 | |||
| 296 | drop_overlaps_that_are_ok(start, end); | ||
| 297 | __reserve_early(start, end, name, 0); | ||
| 298 | } | ||
| 299 | |||
| 300 | void __init reserve_early_without_check(u64 start, u64 end, char *name) | ||
| 301 | { | ||
| 302 | struct early_res *r; | ||
| 303 | |||
| 304 | if (start >= end) | ||
| 305 | return; | ||
| 306 | |||
| 307 | __check_and_double_early_res(start, end); | ||
| 308 | |||
| 309 | r = &early_res[early_res_count]; | ||
| 310 | |||
| 311 | r->start = start; | ||
| 312 | r->end = end; | ||
| 313 | r->overlap_ok = 0; | ||
| 314 | if (name) | ||
| 315 | strncpy(r->name, name, sizeof(r->name) - 1); | ||
| 316 | early_res_count++; | ||
| 317 | } | ||
| 318 | |||
| 319 | void __init free_early(u64 start, u64 end) | ||
| 320 | { | ||
| 321 | struct early_res *r; | ||
| 322 | int i; | ||
| 323 | |||
| 324 | kmemleak_free_part(__va(start), end - start); | ||
| 325 | |||
| 326 | i = find_overlapped_early(start, end); | ||
| 327 | r = &early_res[i]; | ||
| 328 | if (i >= max_early_res || r->end != end || r->start != start) | ||
| 329 | panic("free_early on not reserved area: %llx-%llx!", | ||
| 330 | start, end - 1); | ||
| 331 | |||
| 332 | drop_range(i); | ||
| 333 | } | ||
| 334 | |||
| 335 | void __init free_early_partial(u64 start, u64 end) | ||
| 336 | { | ||
| 337 | struct early_res *r; | ||
| 338 | int i; | ||
| 339 | |||
| 340 | kmemleak_free_part(__va(start), end - start); | ||
| 341 | |||
| 342 | if (start == end) | ||
| 343 | return; | ||
| 344 | |||
| 345 | if (WARN_ONCE(start > end, " wrong range [%#llx, %#llx]\n", start, end)) | ||
| 346 | return; | ||
| 347 | |||
| 348 | try_next: | ||
| 349 | i = find_overlapped_early(start, end); | ||
| 350 | if (i >= max_early_res) | ||
| 351 | return; | ||
| 352 | |||
| 353 | r = &early_res[i]; | ||
| 354 | /* hole ? */ | ||
| 355 | if (r->end >= end && r->start <= start) { | ||
| 356 | drop_range_partial(i, start, end); | ||
| 357 | return; | ||
| 358 | } | ||
| 359 | |||
| 360 | drop_range_partial(i, start, end); | ||
| 361 | goto try_next; | ||
| 362 | } | ||
| 363 | |||
| 364 | #ifdef CONFIG_NO_BOOTMEM | ||
| 365 | static void __init subtract_early_res(struct range *range, int az) | ||
| 366 | { | ||
| 367 | int i, count; | ||
| 368 | u64 final_start, final_end; | ||
| 369 | int idx = 0; | ||
| 370 | |||
| 371 | count = 0; | ||
| 372 | for (i = 0; i < max_early_res && early_res[i].end; i++) | ||
| 373 | count++; | ||
| 374 | |||
| 375 | /* need to skip first one ?*/ | ||
| 376 | if (early_res != early_res_x) | ||
| 377 | idx = 1; | ||
| 378 | |||
| 379 | #define DEBUG_PRINT_EARLY_RES 1 | ||
| 380 | |||
| 381 | #if DEBUG_PRINT_EARLY_RES | ||
| 382 | printk(KERN_INFO "Subtract (%d early reservations)\n", count); | ||
| 383 | #endif | ||
| 384 | for (i = idx; i < count; i++) { | ||
| 385 | struct early_res *r = &early_res[i]; | ||
| 386 | #if DEBUG_PRINT_EARLY_RES | ||
| 387 | printk(KERN_INFO " #%d [%010llx - %010llx] %15s\n", i, | ||
| 388 | r->start, r->end, r->name); | ||
| 389 | #endif | ||
| 390 | final_start = PFN_DOWN(r->start); | ||
| 391 | final_end = PFN_UP(r->end); | ||
| 392 | if (final_start >= final_end) | ||
| 393 | continue; | ||
| 394 | subtract_range(range, az, final_start, final_end); | ||
| 395 | } | ||
| 396 | |||
| 397 | } | ||
| 398 | |||
| 399 | int __init get_free_all_memory_range(struct range **rangep, int nodeid) | ||
| 400 | { | ||
| 401 | int i, count; | ||
| 402 | u64 start = 0, end; | ||
| 403 | u64 size; | ||
| 404 | u64 mem; | ||
| 405 | struct range *range; | ||
| 406 | int nr_range; | ||
| 407 | |||
| 408 | count = 0; | ||
| 409 | for (i = 0; i < max_early_res && early_res[i].end; i++) | ||
| 410 | count++; | ||
| 411 | |||
| 412 | count *= 2; | ||
| 413 | |||
| 414 | size = sizeof(struct range) * count; | ||
| 415 | end = get_max_mapped(); | ||
| 416 | #ifdef MAX_DMA32_PFN | ||
| 417 | if (end > (MAX_DMA32_PFN << PAGE_SHIFT)) | ||
| 418 | start = MAX_DMA32_PFN << PAGE_SHIFT; | ||
| 419 | #endif | ||
| 420 | mem = find_fw_memmap_area(start, end, size, sizeof(struct range)); | ||
| 421 | if (mem == -1ULL) | ||
| 422 | panic("can not find more space for range free"); | ||
| 423 | |||
| 424 | range = __va(mem); | ||
| 425 | /* use early_node_map[] and early_res to get range array at first */ | ||
| 426 | memset(range, 0, size); | ||
| 427 | nr_range = 0; | ||
| 428 | |||
| 429 | /* need to go over early_node_map to find out good range for node */ | ||
| 430 | nr_range = add_from_early_node_map(range, count, nr_range, nodeid); | ||
| 431 | #ifdef CONFIG_X86_32 | ||
| 432 | subtract_range(range, count, max_low_pfn, -1ULL); | ||
| 433 | #endif | ||
| 434 | subtract_early_res(range, count); | ||
| 435 | nr_range = clean_sort_range(range, count); | ||
| 436 | |||
| 437 | /* need to clear it ? */ | ||
| 438 | if (nodeid == MAX_NUMNODES) { | ||
| 439 | memset(&early_res[0], 0, | ||
| 440 | sizeof(struct early_res) * max_early_res); | ||
| 441 | early_res = NULL; | ||
| 442 | max_early_res = 0; | ||
| 443 | } | ||
| 444 | |||
| 445 | *rangep = range; | ||
| 446 | return nr_range; | ||
| 447 | } | ||
| 448 | #else | ||
| 449 | void __init early_res_to_bootmem(u64 start, u64 end) | ||
| 450 | { | ||
| 451 | int i, count; | ||
| 452 | u64 final_start, final_end; | ||
| 453 | int idx = 0; | ||
| 454 | |||
| 455 | count = 0; | ||
| 456 | for (i = 0; i < max_early_res && early_res[i].end; i++) | ||
| 457 | count++; | ||
| 458 | |||
| 459 | /* need to skip first one ?*/ | ||
| 460 | if (early_res != early_res_x) | ||
| 461 | idx = 1; | ||
| 462 | |||
| 463 | printk(KERN_INFO "(%d/%d early reservations) ==> bootmem [%010llx - %010llx]\n", | ||
| 464 | count - idx, max_early_res, start, end); | ||
| 465 | for (i = idx; i < count; i++) { | ||
| 466 | struct early_res *r = &early_res[i]; | ||
| 467 | printk(KERN_INFO " #%d [%010llx - %010llx] %16s", i, | ||
| 468 | r->start, r->end, r->name); | ||
| 469 | final_start = max(start, r->start); | ||
| 470 | final_end = min(end, r->end); | ||
| 471 | if (final_start >= final_end) { | ||
| 472 | printk(KERN_CONT "\n"); | ||
| 473 | continue; | ||
| 474 | } | ||
| 475 | printk(KERN_CONT " ==> [%010llx - %010llx]\n", | ||
| 476 | final_start, final_end); | ||
| 477 | reserve_bootmem_generic(final_start, final_end - final_start, | ||
| 478 | BOOTMEM_DEFAULT); | ||
| 479 | } | ||
| 480 | /* clear them */ | ||
| 481 | memset(&early_res[0], 0, sizeof(struct early_res) * max_early_res); | ||
| 482 | early_res = NULL; | ||
| 483 | max_early_res = 0; | ||
| 484 | early_res_count = 0; | ||
| 485 | } | ||
| 486 | #endif | ||
| 487 | |||
| 488 | /* Check for already reserved areas */ | ||
| 489 | static inline int __init bad_addr(u64 *addrp, u64 size, u64 align) | ||
| 490 | { | ||
| 491 | int i; | ||
| 492 | u64 addr = *addrp; | ||
| 493 | int changed = 0; | ||
| 494 | struct early_res *r; | ||
| 495 | again: | ||
| 496 | i = find_overlapped_early(addr, addr + size); | ||
| 497 | r = &early_res[i]; | ||
| 498 | if (i < max_early_res && r->end) { | ||
| 499 | *addrp = addr = round_up(r->end, align); | ||
| 500 | changed = 1; | ||
| 501 | goto again; | ||
| 502 | } | ||
| 503 | return changed; | ||
| 504 | } | ||
| 505 | |||
| 506 | /* Check for already reserved areas */ | ||
| 507 | static inline int __init bad_addr_size(u64 *addrp, u64 *sizep, u64 align) | ||
| 508 | { | ||
| 509 | int i; | ||
| 510 | u64 addr = *addrp, last; | ||
| 511 | u64 size = *sizep; | ||
| 512 | int changed = 0; | ||
| 513 | again: | ||
| 514 | last = addr + size; | ||
| 515 | for (i = 0; i < max_early_res && early_res[i].end; i++) { | ||
| 516 | struct early_res *r = &early_res[i]; | ||
| 517 | if (last > r->start && addr < r->start) { | ||
| 518 | size = r->start - addr; | ||
| 519 | changed = 1; | ||
| 520 | goto again; | ||
| 521 | } | ||
| 522 | if (last > r->end && addr < r->end) { | ||
| 523 | addr = round_up(r->end, align); | ||
| 524 | size = last - addr; | ||
| 525 | changed = 1; | ||
| 526 | goto again; | ||
| 527 | } | ||
| 528 | if (last <= r->end && addr >= r->start) { | ||
| 529 | (*sizep)++; | ||
| 530 | return 0; | ||
| 531 | } | ||
| 532 | } | ||
| 533 | if (changed) { | ||
| 534 | *addrp = addr; | ||
| 535 | *sizep = size; | ||
| 536 | } | ||
| 537 | return changed; | ||
| 538 | } | ||
| 539 | |||
| 540 | /* | ||
| 541 | * Find a free area with specified alignment in a specific range. | ||
| 542 | * only with the area.between start to end is active range from early_node_map | ||
| 543 | * so they are good as RAM | ||
| 544 | */ | ||
| 545 | u64 __init find_early_area(u64 ei_start, u64 ei_last, u64 start, u64 end, | ||
| 546 | u64 size, u64 align) | ||
| 547 | { | ||
| 548 | u64 addr, last; | ||
| 549 | |||
| 550 | addr = round_up(ei_start, align); | ||
| 551 | if (addr < start) | ||
| 552 | addr = round_up(start, align); | ||
| 553 | if (addr >= ei_last) | ||
| 554 | goto out; | ||
| 555 | while (bad_addr(&addr, size, align) && addr+size <= ei_last) | ||
| 556 | ; | ||
| 557 | last = addr + size; | ||
| 558 | if (last > ei_last) | ||
| 559 | goto out; | ||
| 560 | if (last > end) | ||
| 561 | goto out; | ||
| 562 | |||
| 563 | return addr; | ||
| 564 | |||
| 565 | out: | ||
| 566 | return -1ULL; | ||
| 567 | } | ||
| 568 | |||
| 569 | u64 __init find_early_area_size(u64 ei_start, u64 ei_last, u64 start, | ||
| 570 | u64 *sizep, u64 align) | ||
| 571 | { | ||
| 572 | u64 addr, last; | ||
| 573 | |||
| 574 | addr = round_up(ei_start, align); | ||
| 575 | if (addr < start) | ||
| 576 | addr = round_up(start, align); | ||
| 577 | if (addr >= ei_last) | ||
| 578 | goto out; | ||
| 579 | *sizep = ei_last - addr; | ||
| 580 | while (bad_addr_size(&addr, sizep, align) && addr + *sizep <= ei_last) | ||
| 581 | ; | ||
| 582 | last = addr + *sizep; | ||
| 583 | if (last > ei_last) | ||
| 584 | goto out; | ||
| 585 | |||
| 586 | return addr; | ||
| 587 | |||
| 588 | out: | ||
| 589 | return -1ULL; | ||
| 590 | } | ||
diff --git a/kernel/gcov/fs.c b/kernel/gcov/fs.c index f83972b16564..9bd0934f6c33 100644 --- a/kernel/gcov/fs.c +++ b/kernel/gcov/fs.c | |||
| @@ -561,6 +561,7 @@ static ssize_t reset_read(struct file *file, char __user *addr, size_t len, | |||
| 561 | static const struct file_operations gcov_reset_fops = { | 561 | static const struct file_operations gcov_reset_fops = { |
| 562 | .write = reset_write, | 562 | .write = reset_write, |
| 563 | .read = reset_read, | 563 | .read = reset_read, |
| 564 | .llseek = noop_llseek, | ||
| 564 | }; | 565 | }; |
| 565 | 566 | ||
| 566 | /* | 567 | /* |
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index ec4210c6501e..56a891914273 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
| @@ -2000,6 +2000,7 @@ static ssize_t write_enabled_file_bool(struct file *file, | |||
| 2000 | static const struct file_operations fops_kp = { | 2000 | static const struct file_operations fops_kp = { |
| 2001 | .read = read_enabled_file_bool, | 2001 | .read = read_enabled_file_bool, |
| 2002 | .write = write_enabled_file_bool, | 2002 | .write = write_enabled_file_bool, |
| 2003 | .llseek = default_llseek, | ||
| 2003 | }; | 2004 | }; |
| 2004 | 2005 | ||
| 2005 | static int __kprobes debugfs_kprobe_init(void) | 2006 | static int __kprobes debugfs_kprobe_init(void) |
diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c index 645e541a45f6..c7a8f453919e 100644 --- a/kernel/pm_qos_params.c +++ b/kernel/pm_qos_params.c | |||
| @@ -110,6 +110,7 @@ static const struct file_operations pm_qos_power_fops = { | |||
| 110 | .write = pm_qos_power_write, | 110 | .write = pm_qos_power_write, |
| 111 | .open = pm_qos_power_open, | 111 | .open = pm_qos_power_open, |
| 112 | .release = pm_qos_power_release, | 112 | .release = pm_qos_power_release, |
| 113 | .llseek = noop_llseek, | ||
| 113 | }; | 114 | }; |
| 114 | 115 | ||
| 115 | /* unlocked internal variant */ | 116 | /* unlocked internal variant */ |
| @@ -398,7 +399,7 @@ static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf, | |||
| 398 | } else | 399 | } else |
| 399 | return -EINVAL; | 400 | return -EINVAL; |
| 400 | 401 | ||
| 401 | pm_qos_req = (struct pm_qos_request_list *)filp->private_data; | 402 | pm_qos_req = filp->private_data; |
| 402 | pm_qos_update_request(pm_qos_req, value); | 403 | pm_qos_update_request(pm_qos_req, value); |
| 403 | 404 | ||
| 404 | return count; | 405 | return count; |
diff --git a/kernel/profile.c b/kernel/profile.c index b22a899934cc..66f841b7fbd3 100644 --- a/kernel/profile.c +++ b/kernel/profile.c | |||
| @@ -555,6 +555,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf, | |||
| 555 | static const struct file_operations proc_profile_operations = { | 555 | static const struct file_operations proc_profile_operations = { |
| 556 | .read = read_profile, | 556 | .read = read_profile, |
| 557 | .write = write_profile, | 557 | .write = write_profile, |
| 558 | .llseek = default_llseek, | ||
| 558 | }; | 559 | }; |
| 559 | 560 | ||
| 560 | #ifdef CONFIG_SMP | 561 | #ifdef CONFIG_SMP |
diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c index a56f629b057a..66cb89bc5ef1 100644 --- a/kernel/rtmutex-tester.c +++ b/kernel/rtmutex-tester.c | |||
| @@ -76,7 +76,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup) | |||
| 76 | } | 76 | } |
| 77 | 77 | ||
| 78 | if (!lockwakeup && td->bkl == 4) { | 78 | if (!lockwakeup && td->bkl == 4) { |
| 79 | #ifdef CONFIG_LOCK_KERNEL | ||
| 79 | unlock_kernel(); | 80 | unlock_kernel(); |
| 81 | #endif | ||
| 80 | td->bkl = 0; | 82 | td->bkl = 0; |
| 81 | } | 83 | } |
| 82 | return 0; | 84 | return 0; |
| @@ -133,14 +135,18 @@ static int handle_op(struct test_thread_data *td, int lockwakeup) | |||
| 133 | if (td->bkl) | 135 | if (td->bkl) |
| 134 | return 0; | 136 | return 0; |
| 135 | td->bkl = 1; | 137 | td->bkl = 1; |
| 138 | #ifdef CONFIG_LOCK_KERNEL | ||
| 136 | lock_kernel(); | 139 | lock_kernel(); |
| 140 | #endif | ||
| 137 | td->bkl = 4; | 141 | td->bkl = 4; |
| 138 | return 0; | 142 | return 0; |
| 139 | 143 | ||
| 140 | case RTTEST_UNLOCKBKL: | 144 | case RTTEST_UNLOCKBKL: |
| 141 | if (td->bkl != 4) | 145 | if (td->bkl != 4) |
| 142 | break; | 146 | break; |
| 147 | #ifdef CONFIG_LOCK_KERNEL | ||
| 143 | unlock_kernel(); | 148 | unlock_kernel(); |
| 149 | #endif | ||
| 144 | td->bkl = 0; | 150 | td->bkl = 0; |
| 145 | return 0; | 151 | return 0; |
| 146 | 152 | ||
diff --git a/kernel/softirq.c b/kernel/softirq.c index fc978889b194..f02a9dfa19bc 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
| @@ -67,7 +67,7 @@ char *softirq_to_name[NR_SOFTIRQS] = { | |||
| 67 | * to the pending events, so lets the scheduler to balance | 67 | * to the pending events, so lets the scheduler to balance |
| 68 | * the softirq load for us. | 68 | * the softirq load for us. |
| 69 | */ | 69 | */ |
| 70 | void wakeup_softirqd(void) | 70 | static void wakeup_softirqd(void) |
| 71 | { | 71 | { |
| 72 | /* Interrupts are disabled: no need to stop preemption */ | 72 | /* Interrupts are disabled: no need to stop preemption */ |
| 73 | struct task_struct *tsk = __get_cpu_var(ksoftirqd); | 73 | struct task_struct *tsk = __get_cpu_var(ksoftirqd); |
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c index bad369ec5403..c782fe9924c7 100644 --- a/kernel/sys_ni.c +++ b/kernel/sys_ni.c | |||
| @@ -50,6 +50,7 @@ cond_syscall(compat_sys_sendmsg); | |||
| 50 | cond_syscall(sys_recvmsg); | 50 | cond_syscall(sys_recvmsg); |
| 51 | cond_syscall(sys_recvmmsg); | 51 | cond_syscall(sys_recvmmsg); |
| 52 | cond_syscall(compat_sys_recvmsg); | 52 | cond_syscall(compat_sys_recvmsg); |
| 53 | cond_syscall(compat_sys_recv); | ||
| 53 | cond_syscall(compat_sys_recvfrom); | 54 | cond_syscall(compat_sys_recvfrom); |
| 54 | cond_syscall(compat_sys_recvmmsg); | 55 | cond_syscall(compat_sys_recvmmsg); |
| 55 | cond_syscall(sys_socketcall); | 56 | cond_syscall(sys_socketcall); |
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 959f8d6c8cc1..bc251ed66724 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c | |||
| @@ -23,7 +23,6 @@ | |||
| 23 | #include <linux/mutex.h> | 23 | #include <linux/mutex.h> |
| 24 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
| 25 | #include <linux/debugfs.h> | 25 | #include <linux/debugfs.h> |
| 26 | #include <linux/smp_lock.h> | ||
| 27 | #include <linux/time.h> | 26 | #include <linux/time.h> |
| 28 | #include <linux/uaccess.h> | 27 | #include <linux/uaccess.h> |
| 29 | 28 | ||
| @@ -326,6 +325,7 @@ static const struct file_operations blk_dropped_fops = { | |||
| 326 | .owner = THIS_MODULE, | 325 | .owner = THIS_MODULE, |
| 327 | .open = blk_dropped_open, | 326 | .open = blk_dropped_open, |
| 328 | .read = blk_dropped_read, | 327 | .read = blk_dropped_read, |
| 328 | .llseek = default_llseek, | ||
| 329 | }; | 329 | }; |
| 330 | 330 | ||
| 331 | static int blk_msg_open(struct inode *inode, struct file *filp) | 331 | static int blk_msg_open(struct inode *inode, struct file *filp) |
| @@ -365,6 +365,7 @@ static const struct file_operations blk_msg_fops = { | |||
| 365 | .owner = THIS_MODULE, | 365 | .owner = THIS_MODULE, |
| 366 | .open = blk_msg_open, | 366 | .open = blk_msg_open, |
| 367 | .write = blk_msg_write, | 367 | .write = blk_msg_write, |
| 368 | .llseek = noop_llseek, | ||
| 368 | }; | 369 | }; |
| 369 | 370 | ||
| 370 | /* | 371 | /* |
| @@ -639,7 +640,6 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg) | |||
| 639 | if (!q) | 640 | if (!q) |
| 640 | return -ENXIO; | 641 | return -ENXIO; |
| 641 | 642 | ||
| 642 | lock_kernel(); | ||
| 643 | mutex_lock(&bdev->bd_mutex); | 643 | mutex_lock(&bdev->bd_mutex); |
| 644 | 644 | ||
| 645 | switch (cmd) { | 645 | switch (cmd) { |
| @@ -667,7 +667,6 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg) | |||
| 667 | } | 667 | } |
| 668 | 668 | ||
| 669 | mutex_unlock(&bdev->bd_mutex); | 669 | mutex_unlock(&bdev->bd_mutex); |
| 670 | unlock_kernel(); | ||
| 671 | return ret; | 670 | return ret; |
| 672 | } | 671 | } |
| 673 | 672 | ||
| @@ -1652,10 +1651,9 @@ static ssize_t sysfs_blk_trace_attr_show(struct device *dev, | |||
| 1652 | struct block_device *bdev; | 1651 | struct block_device *bdev; |
| 1653 | ssize_t ret = -ENXIO; | 1652 | ssize_t ret = -ENXIO; |
| 1654 | 1653 | ||
| 1655 | lock_kernel(); | ||
| 1656 | bdev = bdget(part_devt(p)); | 1654 | bdev = bdget(part_devt(p)); |
| 1657 | if (bdev == NULL) | 1655 | if (bdev == NULL) |
| 1658 | goto out_unlock_kernel; | 1656 | goto out; |
| 1659 | 1657 | ||
| 1660 | q = blk_trace_get_queue(bdev); | 1658 | q = blk_trace_get_queue(bdev); |
| 1661 | if (q == NULL) | 1659 | if (q == NULL) |
| @@ -1683,8 +1681,7 @@ out_unlock_bdev: | |||
| 1683 | mutex_unlock(&bdev->bd_mutex); | 1681 | mutex_unlock(&bdev->bd_mutex); |
| 1684 | out_bdput: | 1682 | out_bdput: |
| 1685 | bdput(bdev); | 1683 | bdput(bdev); |
| 1686 | out_unlock_kernel: | 1684 | out: |
| 1687 | unlock_kernel(); | ||
| 1688 | return ret; | 1685 | return ret; |
| 1689 | } | 1686 | } |
| 1690 | 1687 | ||
| @@ -1714,11 +1711,10 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev, | |||
| 1714 | 1711 | ||
| 1715 | ret = -ENXIO; | 1712 | ret = -ENXIO; |
| 1716 | 1713 | ||
| 1717 | lock_kernel(); | ||
| 1718 | p = dev_to_part(dev); | 1714 | p = dev_to_part(dev); |
| 1719 | bdev = bdget(part_devt(p)); | 1715 | bdev = bdget(part_devt(p)); |
| 1720 | if (bdev == NULL) | 1716 | if (bdev == NULL) |
| 1721 | goto out_unlock_kernel; | 1717 | goto out; |
| 1722 | 1718 | ||
| 1723 | q = blk_trace_get_queue(bdev); | 1719 | q = blk_trace_get_queue(bdev); |
| 1724 | if (q == NULL) | 1720 | if (q == NULL) |
| @@ -1753,8 +1749,6 @@ out_unlock_bdev: | |||
| 1753 | mutex_unlock(&bdev->bd_mutex); | 1749 | mutex_unlock(&bdev->bd_mutex); |
| 1754 | out_bdput: | 1750 | out_bdput: |
| 1755 | bdput(bdev); | 1751 | bdput(bdev); |
| 1756 | out_unlock_kernel: | ||
| 1757 | unlock_kernel(); | ||
| 1758 | out: | 1752 | out: |
| 1759 | return ret ? ret : count; | 1753 | return ret ? ret : count; |
| 1760 | } | 1754 | } |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index ebd80d50c474..f3dadae83883 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
| @@ -800,6 +800,7 @@ static const struct file_operations ftrace_profile_fops = { | |||
| 800 | .open = tracing_open_generic, | 800 | .open = tracing_open_generic, |
| 801 | .read = ftrace_profile_read, | 801 | .read = ftrace_profile_read, |
| 802 | .write = ftrace_profile_write, | 802 | .write = ftrace_profile_write, |
| 803 | .llseek = default_llseek, | ||
| 803 | }; | 804 | }; |
| 804 | 805 | ||
| 805 | /* used to initialize the real stat files */ | 806 | /* used to initialize the real stat files */ |
| @@ -2669,6 +2670,7 @@ static const struct file_operations ftrace_graph_fops = { | |||
| 2669 | .read = seq_read, | 2670 | .read = seq_read, |
| 2670 | .write = ftrace_graph_write, | 2671 | .write = ftrace_graph_write, |
| 2671 | .release = ftrace_graph_release, | 2672 | .release = ftrace_graph_release, |
| 2673 | .llseek = seq_lseek, | ||
| 2672 | }; | 2674 | }; |
| 2673 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 2675 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
| 2674 | 2676 | ||
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index c5a632a669e1..c3dab054d18e 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
| @@ -3974,6 +3974,7 @@ static const struct file_operations rb_simple_fops = { | |||
| 3974 | .open = tracing_open_generic, | 3974 | .open = tracing_open_generic, |
| 3975 | .read = rb_simple_read, | 3975 | .read = rb_simple_read, |
| 3976 | .write = rb_simple_write, | 3976 | .write = rb_simple_write, |
| 3977 | .llseek = default_llseek, | ||
| 3977 | }; | 3978 | }; |
| 3978 | 3979 | ||
| 3979 | 3980 | ||
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 398c0e8b332c..0725eeab1937 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
| @@ -932,6 +932,7 @@ static const struct file_operations ftrace_enable_fops = { | |||
| 932 | .open = tracing_open_generic, | 932 | .open = tracing_open_generic, |
| 933 | .read = event_enable_read, | 933 | .read = event_enable_read, |
| 934 | .write = event_enable_write, | 934 | .write = event_enable_write, |
| 935 | .llseek = default_llseek, | ||
| 935 | }; | 936 | }; |
| 936 | 937 | ||
| 937 | static const struct file_operations ftrace_event_format_fops = { | 938 | static const struct file_operations ftrace_event_format_fops = { |
| @@ -944,29 +945,34 @@ static const struct file_operations ftrace_event_format_fops = { | |||
| 944 | static const struct file_operations ftrace_event_id_fops = { | 945 | static const struct file_operations ftrace_event_id_fops = { |
| 945 | .open = tracing_open_generic, | 946 | .open = tracing_open_generic, |
| 946 | .read = event_id_read, | 947 | .read = event_id_read, |
| 948 | .llseek = default_llseek, | ||
| 947 | }; | 949 | }; |
| 948 | 950 | ||
| 949 | static const struct file_operations ftrace_event_filter_fops = { | 951 | static const struct file_operations ftrace_event_filter_fops = { |
| 950 | .open = tracing_open_generic, | 952 | .open = tracing_open_generic, |
| 951 | .read = event_filter_read, | 953 | .read = event_filter_read, |
| 952 | .write = event_filter_write, | 954 | .write = event_filter_write, |
| 955 | .llseek = default_llseek, | ||
| 953 | }; | 956 | }; |
| 954 | 957 | ||
| 955 | static const struct file_operations ftrace_subsystem_filter_fops = { | 958 | static const struct file_operations ftrace_subsystem_filter_fops = { |
| 956 | .open = tracing_open_generic, | 959 | .open = tracing_open_generic, |
| 957 | .read = subsystem_filter_read, | 960 | .read = subsystem_filter_read, |
| 958 | .write = subsystem_filter_write, | 961 | .write = subsystem_filter_write, |
| 962 | .llseek = default_llseek, | ||
| 959 | }; | 963 | }; |
| 960 | 964 | ||
| 961 | static const struct file_operations ftrace_system_enable_fops = { | 965 | static const struct file_operations ftrace_system_enable_fops = { |
| 962 | .open = tracing_open_generic, | 966 | .open = tracing_open_generic, |
| 963 | .read = system_enable_read, | 967 | .read = system_enable_read, |
| 964 | .write = system_enable_write, | 968 | .write = system_enable_write, |
| 969 | .llseek = default_llseek, | ||
| 965 | }; | 970 | }; |
| 966 | 971 | ||
| 967 | static const struct file_operations ftrace_show_header_fops = { | 972 | static const struct file_operations ftrace_show_header_fops = { |
| 968 | .open = tracing_open_generic, | 973 | .open = tracing_open_generic, |
| 969 | .read = show_header, | 974 | .read = show_header, |
| 975 | .llseek = default_llseek, | ||
| 970 | }; | 976 | }; |
| 971 | 977 | ||
| 972 | static struct dentry *event_trace_events_dir(void) | 978 | static struct dentry *event_trace_events_dir(void) |
diff --git a/kernel/trace/trace_kdb.c b/kernel/trace/trace_kdb.c index 7b8ecd751d93..3c5c5dfea0b3 100644 --- a/kernel/trace/trace_kdb.c +++ b/kernel/trace/trace_kdb.c | |||
| @@ -13,7 +13,6 @@ | |||
| 13 | #include <linux/kdb.h> | 13 | #include <linux/kdb.h> |
| 14 | #include <linux/ftrace.h> | 14 | #include <linux/ftrace.h> |
| 15 | 15 | ||
| 16 | #include "../debug/kdb/kdb_private.h" | ||
| 17 | #include "trace.h" | 16 | #include "trace.h" |
| 18 | #include "trace_output.h" | 17 | #include "trace_output.h" |
| 19 | 18 | ||
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 544301d29dee..b8d2852baa4a 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c | |||
| @@ -648,7 +648,7 @@ static int register_trace_probe(struct trace_probe *tp) | |||
| 648 | } | 648 | } |
| 649 | ret = register_probe_event(tp); | 649 | ret = register_probe_event(tp); |
| 650 | if (ret) { | 650 | if (ret) { |
| 651 | pr_warning("Faild to register probe event(%d)\n", ret); | 651 | pr_warning("Failed to register probe event(%d)\n", ret); |
| 652 | goto end; | 652 | goto end; |
| 653 | } | 653 | } |
| 654 | 654 | ||
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index a6b7e0e0f3eb..4c5dead0c239 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c | |||
| @@ -195,6 +195,7 @@ static const struct file_operations stack_max_size_fops = { | |||
| 195 | .open = tracing_open_generic, | 195 | .open = tracing_open_generic, |
| 196 | .read = stack_max_size_read, | 196 | .read = stack_max_size_read, |
| 197 | .write = stack_max_size_write, | 197 | .write = stack_max_size_write, |
| 198 | .llseek = default_llseek, | ||
| 198 | }; | 199 | }; |
| 199 | 200 | ||
| 200 | static void * | 201 | static void * |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index f77afd939229..e5ff2cbaadc2 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
| @@ -42,9 +42,6 @@ | |||
| 42 | #include <linux/lockdep.h> | 42 | #include <linux/lockdep.h> |
| 43 | #include <linux/idr.h> | 43 | #include <linux/idr.h> |
| 44 | 44 | ||
| 45 | #define CREATE_TRACE_POINTS | ||
| 46 | #include <trace/events/workqueue.h> | ||
| 47 | |||
| 48 | #include "workqueue_sched.h" | 45 | #include "workqueue_sched.h" |
| 49 | 46 | ||
| 50 | enum { | 47 | enum { |
| @@ -257,6 +254,9 @@ EXPORT_SYMBOL_GPL(system_long_wq); | |||
| 257 | EXPORT_SYMBOL_GPL(system_nrt_wq); | 254 | EXPORT_SYMBOL_GPL(system_nrt_wq); |
| 258 | EXPORT_SYMBOL_GPL(system_unbound_wq); | 255 | EXPORT_SYMBOL_GPL(system_unbound_wq); |
| 259 | 256 | ||
| 257 | #define CREATE_TRACE_POINTS | ||
| 258 | #include <trace/events/workqueue.h> | ||
| 259 | |||
| 260 | #define for_each_busy_worker(worker, i, pos, gcwq) \ | 260 | #define for_each_busy_worker(worker, i, pos, gcwq) \ |
| 261 | for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) \ | 261 | for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) \ |
| 262 | hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry) | 262 | hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry) |
| @@ -310,21 +310,6 @@ static inline int __next_wq_cpu(int cpu, const struct cpumask *mask, | |||
| 310 | (cpu) < WORK_CPU_NONE; \ | 310 | (cpu) < WORK_CPU_NONE; \ |
| 311 | (cpu) = __next_wq_cpu((cpu), cpu_possible_mask, (wq))) | 311 | (cpu) = __next_wq_cpu((cpu), cpu_possible_mask, (wq))) |
| 312 | 312 | ||
| 313 | #ifdef CONFIG_LOCKDEP | ||
| 314 | /** | ||
| 315 | * in_workqueue_context() - in context of specified workqueue? | ||
| 316 | * @wq: the workqueue of interest | ||
| 317 | * | ||
| 318 | * Checks lockdep state to see if the current task is executing from | ||
| 319 | * within a workqueue item. This function exists only if lockdep is | ||
| 320 | * enabled. | ||
| 321 | */ | ||
| 322 | int in_workqueue_context(struct workqueue_struct *wq) | ||
| 323 | { | ||
| 324 | return lock_is_held(&wq->lockdep_map); | ||
| 325 | } | ||
| 326 | #endif | ||
| 327 | |||
| 328 | #ifdef CONFIG_DEBUG_OBJECTS_WORK | 313 | #ifdef CONFIG_DEBUG_OBJECTS_WORK |
| 329 | 314 | ||
| 330 | static struct debug_obj_descr work_debug_descr; | 315 | static struct debug_obj_descr work_debug_descr; |
| @@ -604,7 +589,9 @@ static bool keep_working(struct global_cwq *gcwq) | |||
| 604 | { | 589 | { |
| 605 | atomic_t *nr_running = get_gcwq_nr_running(gcwq->cpu); | 590 | atomic_t *nr_running = get_gcwq_nr_running(gcwq->cpu); |
| 606 | 591 | ||
| 607 | return !list_empty(&gcwq->worklist) && atomic_read(nr_running) <= 1; | 592 | return !list_empty(&gcwq->worklist) && |
| 593 | (atomic_read(nr_running) <= 1 || | ||
| 594 | gcwq->flags & GCWQ_HIGHPRI_PENDING); | ||
| 608 | } | 595 | } |
| 609 | 596 | ||
| 610 | /* Do we need a new worker? Called from manager. */ | 597 | /* Do we need a new worker? Called from manager. */ |
| @@ -997,6 +984,7 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, | |||
| 997 | 984 | ||
| 998 | /* gcwq determined, get cwq and queue */ | 985 | /* gcwq determined, get cwq and queue */ |
| 999 | cwq = get_cwq(gcwq->cpu, wq); | 986 | cwq = get_cwq(gcwq->cpu, wq); |
| 987 | trace_workqueue_queue_work(cpu, cwq, work); | ||
| 1000 | 988 | ||
| 1001 | BUG_ON(!list_empty(&work->entry)); | 989 | BUG_ON(!list_empty(&work->entry)); |
| 1002 | 990 | ||
| @@ -1004,6 +992,7 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, | |||
| 1004 | work_flags = work_color_to_flags(cwq->work_color); | 992 | work_flags = work_color_to_flags(cwq->work_color); |
| 1005 | 993 | ||
| 1006 | if (likely(cwq->nr_active < cwq->max_active)) { | 994 | if (likely(cwq->nr_active < cwq->max_active)) { |
| 995 | trace_workqueue_activate_work(work); | ||
| 1007 | cwq->nr_active++; | 996 | cwq->nr_active++; |
| 1008 | worklist = gcwq_determine_ins_pos(gcwq, cwq); | 997 | worklist = gcwq_determine_ins_pos(gcwq, cwq); |
| 1009 | } else { | 998 | } else { |
| @@ -1679,6 +1668,7 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq) | |||
| 1679 | struct work_struct, entry); | 1668 | struct work_struct, entry); |
| 1680 | struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq); | 1669 | struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq); |
| 1681 | 1670 | ||
| 1671 | trace_workqueue_activate_work(work); | ||
| 1682 | move_linked_works(work, pos, NULL); | 1672 | move_linked_works(work, pos, NULL); |
| 1683 | __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work)); | 1673 | __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work)); |
| 1684 | cwq->nr_active++; | 1674 | cwq->nr_active++; |
| @@ -2326,27 +2316,17 @@ out_unlock: | |||
| 2326 | } | 2316 | } |
| 2327 | EXPORT_SYMBOL_GPL(flush_workqueue); | 2317 | EXPORT_SYMBOL_GPL(flush_workqueue); |
| 2328 | 2318 | ||
| 2329 | /** | 2319 | static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr, |
| 2330 | * flush_work - block until a work_struct's callback has terminated | 2320 | bool wait_executing) |
| 2331 | * @work: the work which is to be flushed | ||
| 2332 | * | ||
| 2333 | * Returns false if @work has already terminated. | ||
| 2334 | * | ||
| 2335 | * It is expected that, prior to calling flush_work(), the caller has | ||
| 2336 | * arranged for the work to not be requeued, otherwise it doesn't make | ||
| 2337 | * sense to use this function. | ||
| 2338 | */ | ||
| 2339 | int flush_work(struct work_struct *work) | ||
| 2340 | { | 2321 | { |
| 2341 | struct worker *worker = NULL; | 2322 | struct worker *worker = NULL; |
| 2342 | struct global_cwq *gcwq; | 2323 | struct global_cwq *gcwq; |
| 2343 | struct cpu_workqueue_struct *cwq; | 2324 | struct cpu_workqueue_struct *cwq; |
| 2344 | struct wq_barrier barr; | ||
| 2345 | 2325 | ||
| 2346 | might_sleep(); | 2326 | might_sleep(); |
| 2347 | gcwq = get_work_gcwq(work); | 2327 | gcwq = get_work_gcwq(work); |
| 2348 | if (!gcwq) | 2328 | if (!gcwq) |
| 2349 | return 0; | 2329 | return false; |
| 2350 | 2330 | ||
| 2351 | spin_lock_irq(&gcwq->lock); | 2331 | spin_lock_irq(&gcwq->lock); |
| 2352 | if (!list_empty(&work->entry)) { | 2332 | if (!list_empty(&work->entry)) { |
| @@ -2359,28 +2339,127 @@ int flush_work(struct work_struct *work) | |||
| 2359 | cwq = get_work_cwq(work); | 2339 | cwq = get_work_cwq(work); |
| 2360 | if (unlikely(!cwq || gcwq != cwq->gcwq)) | 2340 | if (unlikely(!cwq || gcwq != cwq->gcwq)) |
| 2361 | goto already_gone; | 2341 | goto already_gone; |
| 2362 | } else { | 2342 | } else if (wait_executing) { |
| 2363 | worker = find_worker_executing_work(gcwq, work); | 2343 | worker = find_worker_executing_work(gcwq, work); |
| 2364 | if (!worker) | 2344 | if (!worker) |
| 2365 | goto already_gone; | 2345 | goto already_gone; |
| 2366 | cwq = worker->current_cwq; | 2346 | cwq = worker->current_cwq; |
| 2367 | } | 2347 | } else |
| 2348 | goto already_gone; | ||
| 2368 | 2349 | ||
| 2369 | insert_wq_barrier(cwq, &barr, work, worker); | 2350 | insert_wq_barrier(cwq, barr, work, worker); |
| 2370 | spin_unlock_irq(&gcwq->lock); | 2351 | spin_unlock_irq(&gcwq->lock); |
| 2371 | 2352 | ||
| 2372 | lock_map_acquire(&cwq->wq->lockdep_map); | 2353 | lock_map_acquire(&cwq->wq->lockdep_map); |
| 2373 | lock_map_release(&cwq->wq->lockdep_map); | 2354 | lock_map_release(&cwq->wq->lockdep_map); |
| 2374 | 2355 | return true; | |
| 2375 | wait_for_completion(&barr.done); | ||
| 2376 | destroy_work_on_stack(&barr.work); | ||
| 2377 | return 1; | ||
| 2378 | already_gone: | 2356 | already_gone: |
| 2379 | spin_unlock_irq(&gcwq->lock); | 2357 | spin_unlock_irq(&gcwq->lock); |
| 2380 | return 0; | 2358 | return false; |
| 2359 | } | ||
| 2360 | |||
| 2361 | /** | ||
| 2362 | * flush_work - wait for a work to finish executing the last queueing instance | ||
| 2363 | * @work: the work to flush | ||
| 2364 | * | ||
| 2365 | * Wait until @work has finished execution. This function considers | ||
| 2366 | * only the last queueing instance of @work. If @work has been | ||
| 2367 | * enqueued across different CPUs on a non-reentrant workqueue or on | ||
| 2368 | * multiple workqueues, @work might still be executing on return on | ||
| 2369 | * some of the CPUs from earlier queueing. | ||
| 2370 | * | ||
| 2371 | * If @work was queued only on a non-reentrant, ordered or unbound | ||
| 2372 | * workqueue, @work is guaranteed to be idle on return if it hasn't | ||
| 2373 | * been requeued since flush started. | ||
| 2374 | * | ||
| 2375 | * RETURNS: | ||
| 2376 | * %true if flush_work() waited for the work to finish execution, | ||
| 2377 | * %false if it was already idle. | ||
| 2378 | */ | ||
| 2379 | bool flush_work(struct work_struct *work) | ||
| 2380 | { | ||
| 2381 | struct wq_barrier barr; | ||
| 2382 | |||
| 2383 | if (start_flush_work(work, &barr, true)) { | ||
| 2384 | wait_for_completion(&barr.done); | ||
| 2385 | destroy_work_on_stack(&barr.work); | ||
| 2386 | return true; | ||
| 2387 | } else | ||
| 2388 | return false; | ||
| 2381 | } | 2389 | } |
| 2382 | EXPORT_SYMBOL_GPL(flush_work); | 2390 | EXPORT_SYMBOL_GPL(flush_work); |
| 2383 | 2391 | ||
| 2392 | static bool wait_on_cpu_work(struct global_cwq *gcwq, struct work_struct *work) | ||
| 2393 | { | ||
| 2394 | struct wq_barrier barr; | ||
| 2395 | struct worker *worker; | ||
| 2396 | |||
| 2397 | spin_lock_irq(&gcwq->lock); | ||
| 2398 | |||
| 2399 | worker = find_worker_executing_work(gcwq, work); | ||
| 2400 | if (unlikely(worker)) | ||
| 2401 | insert_wq_barrier(worker->current_cwq, &barr, work, worker); | ||
| 2402 | |||
| 2403 | spin_unlock_irq(&gcwq->lock); | ||
| 2404 | |||
| 2405 | if (unlikely(worker)) { | ||
| 2406 | wait_for_completion(&barr.done); | ||
| 2407 | destroy_work_on_stack(&barr.work); | ||
| 2408 | return true; | ||
| 2409 | } else | ||
| 2410 | return false; | ||
| 2411 | } | ||
| 2412 | |||
| 2413 | static bool wait_on_work(struct work_struct *work) | ||
| 2414 | { | ||
| 2415 | bool ret = false; | ||
| 2416 | int cpu; | ||
| 2417 | |||
| 2418 | might_sleep(); | ||
| 2419 | |||
| 2420 | lock_map_acquire(&work->lockdep_map); | ||
| 2421 | lock_map_release(&work->lockdep_map); | ||
| 2422 | |||
| 2423 | for_each_gcwq_cpu(cpu) | ||
| 2424 | ret |= wait_on_cpu_work(get_gcwq(cpu), work); | ||
| 2425 | return ret; | ||
| 2426 | } | ||
| 2427 | |||
| 2428 | /** | ||
| 2429 | * flush_work_sync - wait until a work has finished execution | ||
| 2430 | * @work: the work to flush | ||
| 2431 | * | ||
| 2432 | * Wait until @work has finished execution. On return, it's | ||
| 2433 | * guaranteed that all queueing instances of @work which happened | ||
| 2434 | * before this function is called are finished. In other words, if | ||
| 2435 | * @work hasn't been requeued since this function was called, @work is | ||
| 2436 | * guaranteed to be idle on return. | ||
| 2437 | * | ||
| 2438 | * RETURNS: | ||
| 2439 | * %true if flush_work_sync() waited for the work to finish execution, | ||
| 2440 | * %false if it was already idle. | ||
| 2441 | */ | ||
| 2442 | bool flush_work_sync(struct work_struct *work) | ||
| 2443 | { | ||
| 2444 | struct wq_barrier barr; | ||
| 2445 | bool pending, waited; | ||
| 2446 | |||
| 2447 | /* we'll wait for executions separately, queue barr only if pending */ | ||
| 2448 | pending = start_flush_work(work, &barr, false); | ||
| 2449 | |||
| 2450 | /* wait for executions to finish */ | ||
| 2451 | waited = wait_on_work(work); | ||
| 2452 | |||
| 2453 | /* wait for the pending one */ | ||
| 2454 | if (pending) { | ||
| 2455 | wait_for_completion(&barr.done); | ||
| 2456 | destroy_work_on_stack(&barr.work); | ||
| 2457 | } | ||
| 2458 | |||
| 2459 | return pending || waited; | ||
| 2460 | } | ||
| 2461 | EXPORT_SYMBOL_GPL(flush_work_sync); | ||
| 2462 | |||
| 2384 | /* | 2463 | /* |
| 2385 | * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit, | 2464 | * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit, |
| 2386 | * so this work can't be re-armed in any way. | 2465 | * so this work can't be re-armed in any way. |
| @@ -2423,39 +2502,7 @@ static int try_to_grab_pending(struct work_struct *work) | |||
| 2423 | return ret; | 2502 | return ret; |
| 2424 | } | 2503 | } |
| 2425 | 2504 | ||
| 2426 | static void wait_on_cpu_work(struct global_cwq *gcwq, struct work_struct *work) | 2505 | static bool __cancel_work_timer(struct work_struct *work, |
| 2427 | { | ||
| 2428 | struct wq_barrier barr; | ||
| 2429 | struct worker *worker; | ||
| 2430 | |||
| 2431 | spin_lock_irq(&gcwq->lock); | ||
| 2432 | |||
| 2433 | worker = find_worker_executing_work(gcwq, work); | ||
| 2434 | if (unlikely(worker)) | ||
| 2435 | insert_wq_barrier(worker->current_cwq, &barr, work, worker); | ||
| 2436 | |||
| 2437 | spin_unlock_irq(&gcwq->lock); | ||
| 2438 | |||
| 2439 | if (unlikely(worker)) { | ||
| 2440 | wait_for_completion(&barr.done); | ||
| 2441 | destroy_work_on_stack(&barr.work); | ||
| 2442 | } | ||
| 2443 | } | ||
| 2444 | |||
| 2445 | static void wait_on_work(struct work_struct *work) | ||
| 2446 | { | ||
| 2447 | int cpu; | ||
| 2448 | |||
| 2449 | might_sleep(); | ||
| 2450 | |||
| 2451 | lock_map_acquire(&work->lockdep_map); | ||
| 2452 | lock_map_release(&work->lockdep_map); | ||
| 2453 | |||
| 2454 | for_each_gcwq_cpu(cpu) | ||
| 2455 | wait_on_cpu_work(get_gcwq(cpu), work); | ||
| 2456 | } | ||
| 2457 | |||
| 2458 | static int __cancel_work_timer(struct work_struct *work, | ||
| 2459 | struct timer_list* timer) | 2506 | struct timer_list* timer) |
| 2460 | { | 2507 | { |
| 2461 | int ret; | 2508 | int ret; |
| @@ -2472,42 +2519,81 @@ static int __cancel_work_timer(struct work_struct *work, | |||
| 2472 | } | 2519 | } |
| 2473 | 2520 | ||
| 2474 | /** | 2521 | /** |
| 2475 | * cancel_work_sync - block until a work_struct's callback has terminated | 2522 | * cancel_work_sync - cancel a work and wait for it to finish |
| 2476 | * @work: the work which is to be flushed | 2523 | * @work: the work to cancel |
| 2477 | * | ||
| 2478 | * Returns true if @work was pending. | ||
| 2479 | * | 2524 | * |
| 2480 | * cancel_work_sync() will cancel the work if it is queued. If the work's | 2525 | * Cancel @work and wait for its execution to finish. This function |
| 2481 | * callback appears to be running, cancel_work_sync() will block until it | 2526 | * can be used even if the work re-queues itself or migrates to |
| 2482 | * has completed. | 2527 | * another workqueue. On return from this function, @work is |
| 2528 | * guaranteed to be not pending or executing on any CPU. | ||
| 2483 | * | 2529 | * |
| 2484 | * It is possible to use this function if the work re-queues itself. It can | 2530 | * cancel_work_sync(&delayed_work->work) must not be used for |
| 2485 | * cancel the work even if it migrates to another workqueue, however in that | 2531 | * delayed_work's. Use cancel_delayed_work_sync() instead. |
| 2486 | * case it only guarantees that work->func() has completed on the last queued | ||
| 2487 | * workqueue. | ||
| 2488 | * | 2532 | * |
| 2489 | * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not | 2533 | * The caller must ensure that the workqueue on which @work was last |
| 2490 | * pending, otherwise it goes into a busy-wait loop until the timer expires. | ||
| 2491 | * | ||
| 2492 | * The caller must ensure that workqueue_struct on which this work was last | ||
| 2493 | * queued can't be destroyed before this function returns. | 2534 | * queued can't be destroyed before this function returns. |
| 2535 | * | ||
| 2536 | * RETURNS: | ||
| 2537 | * %true if @work was pending, %false otherwise. | ||
| 2494 | */ | 2538 | */ |
| 2495 | int cancel_work_sync(struct work_struct *work) | 2539 | bool cancel_work_sync(struct work_struct *work) |
| 2496 | { | 2540 | { |
| 2497 | return __cancel_work_timer(work, NULL); | 2541 | return __cancel_work_timer(work, NULL); |
| 2498 | } | 2542 | } |
| 2499 | EXPORT_SYMBOL_GPL(cancel_work_sync); | 2543 | EXPORT_SYMBOL_GPL(cancel_work_sync); |
| 2500 | 2544 | ||
| 2501 | /** | 2545 | /** |
| 2502 | * cancel_delayed_work_sync - reliably kill off a delayed work. | 2546 | * flush_delayed_work - wait for a dwork to finish executing the last queueing |
| 2503 | * @dwork: the delayed work struct | 2547 | * @dwork: the delayed work to flush |
| 2548 | * | ||
| 2549 | * Delayed timer is cancelled and the pending work is queued for | ||
| 2550 | * immediate execution. Like flush_work(), this function only | ||
| 2551 | * considers the last queueing instance of @dwork. | ||
| 2552 | * | ||
| 2553 | * RETURNS: | ||
| 2554 | * %true if flush_work() waited for the work to finish execution, | ||
| 2555 | * %false if it was already idle. | ||
| 2556 | */ | ||
| 2557 | bool flush_delayed_work(struct delayed_work *dwork) | ||
| 2558 | { | ||
| 2559 | if (del_timer_sync(&dwork->timer)) | ||
| 2560 | __queue_work(raw_smp_processor_id(), | ||
| 2561 | get_work_cwq(&dwork->work)->wq, &dwork->work); | ||
| 2562 | return flush_work(&dwork->work); | ||
| 2563 | } | ||
| 2564 | EXPORT_SYMBOL(flush_delayed_work); | ||
| 2565 | |||
| 2566 | /** | ||
| 2567 | * flush_delayed_work_sync - wait for a dwork to finish | ||
| 2568 | * @dwork: the delayed work to flush | ||
| 2504 | * | 2569 | * |
| 2505 | * Returns true if @dwork was pending. | 2570 | * Delayed timer is cancelled and the pending work is queued for |
| 2571 | * execution immediately. Other than timer handling, its behavior | ||
| 2572 | * is identical to flush_work_sync(). | ||
| 2506 | * | 2573 | * |
| 2507 | * It is possible to use this function if @dwork rearms itself via queue_work() | 2574 | * RETURNS: |
| 2508 | * or queue_delayed_work(). See also the comment for cancel_work_sync(). | 2575 | * %true if flush_work_sync() waited for the work to finish execution, |
| 2576 | * %false if it was already idle. | ||
| 2509 | */ | 2577 | */ |
| 2510 | int cancel_delayed_work_sync(struct delayed_work *dwork) | 2578 | bool flush_delayed_work_sync(struct delayed_work *dwork) |
| 2579 | { | ||
| 2580 | if (del_timer_sync(&dwork->timer)) | ||
| 2581 | __queue_work(raw_smp_processor_id(), | ||
| 2582 | get_work_cwq(&dwork->work)->wq, &dwork->work); | ||
| 2583 | return flush_work_sync(&dwork->work); | ||
| 2584 | } | ||
| 2585 | EXPORT_SYMBOL(flush_delayed_work_sync); | ||
| 2586 | |||
| 2587 | /** | ||
| 2588 | * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish | ||
| 2589 | * @dwork: the delayed work cancel | ||
| 2590 | * | ||
| 2591 | * This is cancel_work_sync() for delayed works. | ||
| 2592 | * | ||
| 2593 | * RETURNS: | ||
| 2594 | * %true if @dwork was pending, %false otherwise. | ||
| 2595 | */ | ||
| 2596 | bool cancel_delayed_work_sync(struct delayed_work *dwork) | ||
| 2511 | { | 2597 | { |
| 2512 | return __cancel_work_timer(&dwork->work, &dwork->timer); | 2598 | return __cancel_work_timer(&dwork->work, &dwork->timer); |
| 2513 | } | 2599 | } |
| @@ -2559,23 +2645,6 @@ int schedule_delayed_work(struct delayed_work *dwork, | |||
| 2559 | EXPORT_SYMBOL(schedule_delayed_work); | 2645 | EXPORT_SYMBOL(schedule_delayed_work); |
| 2560 | 2646 | ||
| 2561 | /** | 2647 | /** |
| 2562 | * flush_delayed_work - block until a dwork_struct's callback has terminated | ||
| 2563 | * @dwork: the delayed work which is to be flushed | ||
| 2564 | * | ||
| 2565 | * Any timeout is cancelled, and any pending work is run immediately. | ||
| 2566 | */ | ||
| 2567 | void flush_delayed_work(struct delayed_work *dwork) | ||
| 2568 | { | ||
| 2569 | if (del_timer_sync(&dwork->timer)) { | ||
| 2570 | __queue_work(get_cpu(), get_work_cwq(&dwork->work)->wq, | ||
| 2571 | &dwork->work); | ||
| 2572 | put_cpu(); | ||
| 2573 | } | ||
| 2574 | flush_work(&dwork->work); | ||
| 2575 | } | ||
| 2576 | EXPORT_SYMBOL(flush_delayed_work); | ||
| 2577 | |||
| 2578 | /** | ||
| 2579 | * schedule_delayed_work_on - queue work in global workqueue on CPU after delay | 2648 | * schedule_delayed_work_on - queue work in global workqueue on CPU after delay |
| 2580 | * @cpu: cpu to use | 2649 | * @cpu: cpu to use |
| 2581 | * @dwork: job to be done | 2650 | * @dwork: job to be done |
| @@ -2592,13 +2661,15 @@ int schedule_delayed_work_on(int cpu, | |||
| 2592 | EXPORT_SYMBOL(schedule_delayed_work_on); | 2661 | EXPORT_SYMBOL(schedule_delayed_work_on); |
| 2593 | 2662 | ||
| 2594 | /** | 2663 | /** |
| 2595 | * schedule_on_each_cpu - call a function on each online CPU from keventd | 2664 | * schedule_on_each_cpu - execute a function synchronously on each online CPU |
| 2596 | * @func: the function to call | 2665 | * @func: the function to call |
| 2597 | * | 2666 | * |
| 2598 | * Returns zero on success. | 2667 | * schedule_on_each_cpu() executes @func on each online CPU using the |
| 2599 | * Returns -ve errno on failure. | 2668 | * system workqueue and blocks until all CPUs have completed. |
| 2600 | * | ||
| 2601 | * schedule_on_each_cpu() is very slow. | 2669 | * schedule_on_each_cpu() is very slow. |
| 2670 | * | ||
| 2671 | * RETURNS: | ||
| 2672 | * 0 on success, -errno on failure. | ||
| 2602 | */ | 2673 | */ |
| 2603 | int schedule_on_each_cpu(work_func_t func) | 2674 | int schedule_on_each_cpu(work_func_t func) |
| 2604 | { | 2675 | { |
| @@ -2720,7 +2791,9 @@ static int alloc_cwqs(struct workqueue_struct *wq) | |||
| 2720 | } | 2791 | } |
| 2721 | } | 2792 | } |
| 2722 | 2793 | ||
| 2723 | /* just in case, make sure it's actually aligned */ | 2794 | /* just in case, make sure it's actually aligned |
| 2795 | * - this is affected by PERCPU() alignment in vmlinux.lds.S | ||
| 2796 | */ | ||
| 2724 | BUG_ON(!IS_ALIGNED(wq->cpu_wq.v, align)); | 2797 | BUG_ON(!IS_ALIGNED(wq->cpu_wq.v, align)); |
| 2725 | return wq->cpu_wq.v ? 0 : -ENOMEM; | 2798 | return wq->cpu_wq.v ? 0 : -ENOMEM; |
| 2726 | } | 2799 | } |
| @@ -2764,6 +2837,13 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name, | |||
| 2764 | unsigned int cpu; | 2837 | unsigned int cpu; |
| 2765 | 2838 | ||
| 2766 | /* | 2839 | /* |
| 2840 | * Workqueues which may be used during memory reclaim should | ||
| 2841 | * have a rescuer to guarantee forward progress. | ||
| 2842 | */ | ||
| 2843 | if (flags & WQ_MEM_RECLAIM) | ||
| 2844 | flags |= WQ_RESCUER; | ||
| 2845 | |||
| 2846 | /* | ||
| 2767 | * Unbound workqueues aren't concurrency managed and should be | 2847 | * Unbound workqueues aren't concurrency managed and should be |
| 2768 | * dispatched to workers immediately. | 2848 | * dispatched to workers immediately. |
| 2769 | */ | 2849 | */ |
