aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/bounds.c23
-rw-r--r--kernel/cpuset.c22
-rw-r--r--kernel/exit.c2
-rw-r--r--kernel/fork.c6
-rw-r--r--kernel/hrtimer.c15
-rw-r--r--kernel/kexec.c3
-rw-r--r--kernel/kprobes.c349
-rw-r--r--kernel/power/console.c27
-rw-r--r--kernel/sys.c27
9 files changed, 361 insertions, 113 deletions
diff --git a/kernel/bounds.c b/kernel/bounds.c
new file mode 100644
index 000000000000..c3c55544db2f
--- /dev/null
+++ b/kernel/bounds.c
@@ -0,0 +1,23 @@
1/*
2 * Generate definitions needed by the preprocessor.
3 * This code generates raw asm output which is post-processed
4 * to extract and format the required data.
5 */
6
7#define __GENERATING_BOUNDS_H
8/* Include headers that define the enum constants of interest */
9#include <linux/page-flags.h>
10#include <linux/mmzone.h>
11
12#define DEFINE(sym, val) \
13 asm volatile("\n->" #sym " %0 " #val : : "i" (val))
14
15#define BLANK() asm volatile("\n->" : : )
16
17void foo(void)
18{
19 /* The enum constants to put into include/linux/bounds.h */
20 DEFINE(NR_PAGEFLAGS, __NR_PAGEFLAGS);
21 DEFINE(MAX_NR_ZONES, __MAX_NR_ZONES);
22 /* End of constants */
23}
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 8b35fbd8292f..024888bb9814 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -941,7 +941,7 @@ static int update_nodemask(struct cpuset *cs, char *buf)
941 cs->mems_generation = cpuset_mems_generation++; 941 cs->mems_generation = cpuset_mems_generation++;
942 mutex_unlock(&callback_mutex); 942 mutex_unlock(&callback_mutex);
943 943
944 cpuset_being_rebound = cs; /* causes mpol_copy() rebind */ 944 cpuset_being_rebound = cs; /* causes mpol_dup() rebind */
945 945
946 fudge = 10; /* spare mmarray[] slots */ 946 fudge = 10; /* spare mmarray[] slots */
947 fudge += cpus_weight(cs->cpus_allowed); /* imagine one fork-bomb/cpu */ 947 fudge += cpus_weight(cs->cpus_allowed); /* imagine one fork-bomb/cpu */
@@ -992,7 +992,7 @@ static int update_nodemask(struct cpuset *cs, char *buf)
992 * rebind the vma mempolicies of each mm in mmarray[] to their 992 * rebind the vma mempolicies of each mm in mmarray[] to their
993 * new cpuset, and release that mm. The mpol_rebind_mm() 993 * new cpuset, and release that mm. The mpol_rebind_mm()
994 * call takes mmap_sem, which we couldn't take while holding 994 * call takes mmap_sem, which we couldn't take while holding
995 * tasklist_lock. Forks can happen again now - the mpol_copy() 995 * tasklist_lock. Forks can happen again now - the mpol_dup()
996 * cpuset_being_rebound check will catch such forks, and rebind 996 * cpuset_being_rebound check will catch such forks, and rebind
997 * their vma mempolicies too. Because we still hold the global 997 * their vma mempolicies too. Because we still hold the global
998 * cgroup_mutex, we know that no other rebind effort will 998 * cgroup_mutex, we know that no other rebind effort will
@@ -1958,22 +1958,14 @@ nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
1958} 1958}
1959 1959
1960/** 1960/**
1961 * cpuset_zonelist_valid_mems_allowed - check zonelist vs. curremt mems_allowed 1961 * cpuset_nodemask_valid_mems_allowed - check nodemask vs. curremt mems_allowed
1962 * @zl: the zonelist to be checked 1962 * @nodemask: the nodemask to be checked
1963 * 1963 *
1964 * Are any of the nodes on zonelist zl allowed in current->mems_allowed? 1964 * Are any of the nodes in the nodemask allowed in current->mems_allowed?
1965 */ 1965 */
1966int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl) 1966int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
1967{ 1967{
1968 int i; 1968 return nodes_intersects(*nodemask, current->mems_allowed);
1969
1970 for (i = 0; zl->zones[i]; i++) {
1971 int nid = zone_to_nid(zl->zones[i]);
1972
1973 if (node_isset(nid, current->mems_allowed))
1974 return 1;
1975 }
1976 return 0;
1977} 1969}
1978 1970
1979/* 1971/*
diff --git a/kernel/exit.c b/kernel/exit.c
index 97f609f574b1..2a9d98c641ac 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -967,7 +967,7 @@ NORET_TYPE void do_exit(long code)
967 proc_exit_connector(tsk); 967 proc_exit_connector(tsk);
968 exit_notify(tsk, group_dead); 968 exit_notify(tsk, group_dead);
969#ifdef CONFIG_NUMA 969#ifdef CONFIG_NUMA
970 mpol_free(tsk->mempolicy); 970 mpol_put(tsk->mempolicy);
971 tsk->mempolicy = NULL; 971 tsk->mempolicy = NULL;
972#endif 972#endif
973#ifdef CONFIG_FUTEX 973#ifdef CONFIG_FUTEX
diff --git a/kernel/fork.c b/kernel/fork.c
index c674aa8d3c31..6067e429f281 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -279,7 +279,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
279 if (!tmp) 279 if (!tmp)
280 goto fail_nomem; 280 goto fail_nomem;
281 *tmp = *mpnt; 281 *tmp = *mpnt;
282 pol = mpol_copy(vma_policy(mpnt)); 282 pol = mpol_dup(vma_policy(mpnt));
283 retval = PTR_ERR(pol); 283 retval = PTR_ERR(pol);
284 if (IS_ERR(pol)) 284 if (IS_ERR(pol))
285 goto fail_nomem_policy; 285 goto fail_nomem_policy;
@@ -1116,7 +1116,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1116 p->audit_context = NULL; 1116 p->audit_context = NULL;
1117 cgroup_fork(p); 1117 cgroup_fork(p);
1118#ifdef CONFIG_NUMA 1118#ifdef CONFIG_NUMA
1119 p->mempolicy = mpol_copy(p->mempolicy); 1119 p->mempolicy = mpol_dup(p->mempolicy);
1120 if (IS_ERR(p->mempolicy)) { 1120 if (IS_ERR(p->mempolicy)) {
1121 retval = PTR_ERR(p->mempolicy); 1121 retval = PTR_ERR(p->mempolicy);
1122 p->mempolicy = NULL; 1122 p->mempolicy = NULL;
@@ -1374,7 +1374,7 @@ bad_fork_cleanup_security:
1374 security_task_free(p); 1374 security_task_free(p);
1375bad_fork_cleanup_policy: 1375bad_fork_cleanup_policy:
1376#ifdef CONFIG_NUMA 1376#ifdef CONFIG_NUMA
1377 mpol_free(p->mempolicy); 1377 mpol_put(p->mempolicy);
1378bad_fork_cleanup_cgroup: 1378bad_fork_cleanup_cgroup:
1379#endif 1379#endif
1380 cgroup_exit(p, cgroup_callbacks_done); 1380 cgroup_exit(p, cgroup_callbacks_done);
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index f78777abe769..e379ef0e9c20 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -1080,8 +1080,19 @@ static void run_hrtimer_pending(struct hrtimer_cpu_base *cpu_base)
1080 * If the timer was rearmed on another CPU, reprogram 1080 * If the timer was rearmed on another CPU, reprogram
1081 * the event device. 1081 * the event device.
1082 */ 1082 */
1083 if (timer->base->first == &timer->node) 1083 struct hrtimer_clock_base *base = timer->base;
1084 hrtimer_reprogram(timer, timer->base); 1084
1085 if (base->first == &timer->node &&
1086 hrtimer_reprogram(timer, base)) {
1087 /*
1088 * Timer is expired. Thus move it from tree to
1089 * pending list again.
1090 */
1091 __remove_hrtimer(timer, base,
1092 HRTIMER_STATE_PENDING, 0);
1093 list_add_tail(&timer->cb_entry,
1094 &base->cpu_base->cb_pending);
1095 }
1085 } 1096 }
1086 } 1097 }
1087 spin_unlock_irq(&cpu_base->lock); 1098 spin_unlock_irq(&cpu_base->lock);
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 6782dce93d01..cb85c79989b4 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -1405,6 +1405,9 @@ static int __init crash_save_vmcoreinfo_init(void)
1405 VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER); 1405 VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER);
1406 VMCOREINFO_LENGTH(free_area.free_list, MIGRATE_TYPES); 1406 VMCOREINFO_LENGTH(free_area.free_list, MIGRATE_TYPES);
1407 VMCOREINFO_NUMBER(NR_FREE_PAGES); 1407 VMCOREINFO_NUMBER(NR_FREE_PAGES);
1408 VMCOREINFO_NUMBER(PG_lru);
1409 VMCOREINFO_NUMBER(PG_private);
1410 VMCOREINFO_NUMBER(PG_swapcache);
1408 1411
1409 arch_crash_save_vmcoreinfo(); 1412 arch_crash_save_vmcoreinfo();
1410 1413
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index fcfb580c3afc..1e0250cb9486 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -72,6 +72,18 @@ DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */
72DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */ 72DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */
73static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; 73static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
74 74
75/*
76 * Normally, functions that we'd want to prohibit kprobes in, are marked
77 * __kprobes. But, there are cases where such functions already belong to
78 * a different section (__sched for preempt_schedule)
79 *
80 * For such cases, we now have a blacklist
81 */
82struct kprobe_blackpoint kprobe_blacklist[] = {
83 {"preempt_schedule",},
84 {NULL} /* Terminator */
85};
86
75#ifdef __ARCH_WANT_KPROBES_INSN_SLOT 87#ifdef __ARCH_WANT_KPROBES_INSN_SLOT
76/* 88/*
77 * kprobe->ainsn.insn points to the copy of the instruction to be 89 * kprobe->ainsn.insn points to the copy of the instruction to be
@@ -417,6 +429,21 @@ static inline void free_rp_inst(struct kretprobe *rp)
417 } 429 }
418} 430}
419 431
432static void __kprobes cleanup_rp_inst(struct kretprobe *rp)
433{
434 unsigned long flags;
435 struct kretprobe_instance *ri;
436 struct hlist_node *pos, *next;
437 /* No race here */
438 spin_lock_irqsave(&kretprobe_lock, flags);
439 hlist_for_each_entry_safe(ri, pos, next, &rp->used_instances, uflist) {
440 ri->rp = NULL;
441 hlist_del(&ri->uflist);
442 }
443 spin_unlock_irqrestore(&kretprobe_lock, flags);
444 free_rp_inst(rp);
445}
446
420/* 447/*
421 * Keep all fields in the kprobe consistent 448 * Keep all fields in the kprobe consistent
422 */ 449 */
@@ -492,9 +519,22 @@ static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
492 519
493static int __kprobes in_kprobes_functions(unsigned long addr) 520static int __kprobes in_kprobes_functions(unsigned long addr)
494{ 521{
522 struct kprobe_blackpoint *kb;
523
495 if (addr >= (unsigned long)__kprobes_text_start && 524 if (addr >= (unsigned long)__kprobes_text_start &&
496 addr < (unsigned long)__kprobes_text_end) 525 addr < (unsigned long)__kprobes_text_end)
497 return -EINVAL; 526 return -EINVAL;
527 /*
528 * If there exists a kprobe_blacklist, verify and
529 * fail any probe registration in the prohibited area
530 */
531 for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
532 if (kb->start_addr) {
533 if (addr >= kb->start_addr &&
534 addr < (kb->start_addr + kb->range))
535 return -EINVAL;
536 }
537 }
498 return 0; 538 return 0;
499} 539}
500 540
@@ -555,6 +595,7 @@ static int __kprobes __register_kprobe(struct kprobe *p,
555 } 595 }
556 596
557 p->nmissed = 0; 597 p->nmissed = 0;
598 INIT_LIST_HEAD(&p->list);
558 mutex_lock(&kprobe_mutex); 599 mutex_lock(&kprobe_mutex);
559 old_p = get_kprobe(p->addr); 600 old_p = get_kprobe(p->addr);
560 if (old_p) { 601 if (old_p) {
@@ -581,35 +622,28 @@ out:
581 return ret; 622 return ret;
582} 623}
583 624
584int __kprobes register_kprobe(struct kprobe *p) 625/*
585{ 626 * Unregister a kprobe without a scheduler synchronization.
586 return __register_kprobe(p, (unsigned long)__builtin_return_address(0)); 627 */
587} 628static int __kprobes __unregister_kprobe_top(struct kprobe *p)
588
589void __kprobes unregister_kprobe(struct kprobe *p)
590{ 629{
591 struct module *mod;
592 struct kprobe *old_p, *list_p; 630 struct kprobe *old_p, *list_p;
593 int cleanup_p;
594 631
595 mutex_lock(&kprobe_mutex);
596 old_p = get_kprobe(p->addr); 632 old_p = get_kprobe(p->addr);
597 if (unlikely(!old_p)) { 633 if (unlikely(!old_p))
598 mutex_unlock(&kprobe_mutex); 634 return -EINVAL;
599 return; 635
600 }
601 if (p != old_p) { 636 if (p != old_p) {
602 list_for_each_entry_rcu(list_p, &old_p->list, list) 637 list_for_each_entry_rcu(list_p, &old_p->list, list)
603 if (list_p == p) 638 if (list_p == p)
604 /* kprobe p is a valid probe */ 639 /* kprobe p is a valid probe */
605 goto valid_p; 640 goto valid_p;
606 mutex_unlock(&kprobe_mutex); 641 return -EINVAL;
607 return;
608 } 642 }
609valid_p: 643valid_p:
610 if (old_p == p || 644 if (old_p == p ||
611 (old_p->pre_handler == aggr_pre_handler && 645 (old_p->pre_handler == aggr_pre_handler &&
612 p->list.next == &old_p->list && p->list.prev == &old_p->list)) { 646 list_is_singular(&old_p->list))) {
613 /* 647 /*
614 * Only probe on the hash list. Disarm only if kprobes are 648 * Only probe on the hash list. Disarm only if kprobes are
615 * enabled - otherwise, the breakpoint would already have 649 * enabled - otherwise, the breakpoint would already have
@@ -618,43 +652,97 @@ valid_p:
618 if (kprobe_enabled) 652 if (kprobe_enabled)
619 arch_disarm_kprobe(p); 653 arch_disarm_kprobe(p);
620 hlist_del_rcu(&old_p->hlist); 654 hlist_del_rcu(&old_p->hlist);
621 cleanup_p = 1;
622 } else { 655 } else {
656 if (p->break_handler)
657 old_p->break_handler = NULL;
658 if (p->post_handler) {
659 list_for_each_entry_rcu(list_p, &old_p->list, list) {
660 if ((list_p != p) && (list_p->post_handler))
661 goto noclean;
662 }
663 old_p->post_handler = NULL;
664 }
665noclean:
623 list_del_rcu(&p->list); 666 list_del_rcu(&p->list);
624 cleanup_p = 0;
625 } 667 }
668 return 0;
669}
626 670
627 mutex_unlock(&kprobe_mutex); 671static void __kprobes __unregister_kprobe_bottom(struct kprobe *p)
672{
673 struct module *mod;
674 struct kprobe *old_p;
628 675
629 synchronize_sched();
630 if (p->mod_refcounted) { 676 if (p->mod_refcounted) {
631 mod = module_text_address((unsigned long)p->addr); 677 mod = module_text_address((unsigned long)p->addr);
632 if (mod) 678 if (mod)
633 module_put(mod); 679 module_put(mod);
634 } 680 }
635 681
636 if (cleanup_p) { 682 if (list_empty(&p->list) || list_is_singular(&p->list)) {
637 if (p != old_p) { 683 if (!list_empty(&p->list)) {
638 list_del_rcu(&p->list); 684 /* "p" is the last child of an aggr_kprobe */
685 old_p = list_entry(p->list.next, struct kprobe, list);
686 list_del(&p->list);
639 kfree(old_p); 687 kfree(old_p);
640 } 688 }
641 arch_remove_kprobe(p); 689 arch_remove_kprobe(p);
642 } else { 690 }
643 mutex_lock(&kprobe_mutex); 691}
644 if (p->break_handler) 692
645 old_p->break_handler = NULL; 693static int __register_kprobes(struct kprobe **kps, int num,
646 if (p->post_handler){ 694 unsigned long called_from)
647 list_for_each_entry_rcu(list_p, &old_p->list, list){ 695{
648 if (list_p->post_handler){ 696 int i, ret = 0;
649 cleanup_p = 2; 697
650 break; 698 if (num <= 0)
651 } 699 return -EINVAL;
652 } 700 for (i = 0; i < num; i++) {
653 if (cleanup_p == 0) 701 ret = __register_kprobe(kps[i], called_from);
654 old_p->post_handler = NULL; 702 if (ret < 0 && i > 0) {
703 unregister_kprobes(kps, i);
704 break;
655 } 705 }
656 mutex_unlock(&kprobe_mutex);
657 } 706 }
707 return ret;
708}
709
710/*
711 * Registration and unregistration functions for kprobe.
712 */
713int __kprobes register_kprobe(struct kprobe *p)
714{
715 return __register_kprobes(&p, 1,
716 (unsigned long)__builtin_return_address(0));
717}
718
719void __kprobes unregister_kprobe(struct kprobe *p)
720{
721 unregister_kprobes(&p, 1);
722}
723
724int __kprobes register_kprobes(struct kprobe **kps, int num)
725{
726 return __register_kprobes(kps, num,
727 (unsigned long)__builtin_return_address(0));
728}
729
730void __kprobes unregister_kprobes(struct kprobe **kps, int num)
731{
732 int i;
733
734 if (num <= 0)
735 return;
736 mutex_lock(&kprobe_mutex);
737 for (i = 0; i < num; i++)
738 if (__unregister_kprobe_top(kps[i]) < 0)
739 kps[i]->addr = NULL;
740 mutex_unlock(&kprobe_mutex);
741
742 synchronize_sched();
743 for (i = 0; i < num; i++)
744 if (kps[i]->addr)
745 __unregister_kprobe_bottom(kps[i]);
658} 746}
659 747
660static struct notifier_block kprobe_exceptions_nb = { 748static struct notifier_block kprobe_exceptions_nb = {
@@ -667,24 +755,69 @@ unsigned long __weak arch_deref_entry_point(void *entry)
667 return (unsigned long)entry; 755 return (unsigned long)entry;
668} 756}
669 757
670int __kprobes register_jprobe(struct jprobe *jp) 758static int __register_jprobes(struct jprobe **jps, int num,
759 unsigned long called_from)
671{ 760{
672 unsigned long addr = arch_deref_entry_point(jp->entry); 761 struct jprobe *jp;
762 int ret = 0, i;
673 763
674 if (!kernel_text_address(addr)) 764 if (num <= 0)
675 return -EINVAL; 765 return -EINVAL;
766 for (i = 0; i < num; i++) {
767 unsigned long addr;
768 jp = jps[i];
769 addr = arch_deref_entry_point(jp->entry);
770
771 if (!kernel_text_address(addr))
772 ret = -EINVAL;
773 else {
774 /* Todo: Verify probepoint is a function entry point */
775 jp->kp.pre_handler = setjmp_pre_handler;
776 jp->kp.break_handler = longjmp_break_handler;
777 ret = __register_kprobe(&jp->kp, called_from);
778 }
779 if (ret < 0 && i > 0) {
780 unregister_jprobes(jps, i);
781 break;
782 }
783 }
784 return ret;
785}
676 786
677 /* Todo: Verify probepoint is a function entry point */ 787int __kprobes register_jprobe(struct jprobe *jp)
678 jp->kp.pre_handler = setjmp_pre_handler; 788{
679 jp->kp.break_handler = longjmp_break_handler; 789 return __register_jprobes(&jp, 1,
680
681 return __register_kprobe(&jp->kp,
682 (unsigned long)__builtin_return_address(0)); 790 (unsigned long)__builtin_return_address(0));
683} 791}
684 792
685void __kprobes unregister_jprobe(struct jprobe *jp) 793void __kprobes unregister_jprobe(struct jprobe *jp)
686{ 794{
687 unregister_kprobe(&jp->kp); 795 unregister_jprobes(&jp, 1);
796}
797
798int __kprobes register_jprobes(struct jprobe **jps, int num)
799{
800 return __register_jprobes(jps, num,
801 (unsigned long)__builtin_return_address(0));
802}
803
804void __kprobes unregister_jprobes(struct jprobe **jps, int num)
805{
806 int i;
807
808 if (num <= 0)
809 return;
810 mutex_lock(&kprobe_mutex);
811 for (i = 0; i < num; i++)
812 if (__unregister_kprobe_top(&jps[i]->kp) < 0)
813 jps[i]->kp.addr = NULL;
814 mutex_unlock(&kprobe_mutex);
815
816 synchronize_sched();
817 for (i = 0; i < num; i++) {
818 if (jps[i]->kp.addr)
819 __unregister_kprobe_bottom(&jps[i]->kp);
820 }
688} 821}
689 822
690#ifdef CONFIG_KRETPROBES 823#ifdef CONFIG_KRETPROBES
@@ -725,7 +858,8 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p,
725 return 0; 858 return 0;
726} 859}
727 860
728int __kprobes register_kretprobe(struct kretprobe *rp) 861static int __kprobes __register_kretprobe(struct kretprobe *rp,
862 unsigned long called_from)
729{ 863{
730 int ret = 0; 864 int ret = 0;
731 struct kretprobe_instance *inst; 865 struct kretprobe_instance *inst;
@@ -771,46 +905,101 @@ int __kprobes register_kretprobe(struct kretprobe *rp)
771 905
772 rp->nmissed = 0; 906 rp->nmissed = 0;
773 /* Establish function entry probe point */ 907 /* Establish function entry probe point */
774 if ((ret = __register_kprobe(&rp->kp, 908 ret = __register_kprobe(&rp->kp, called_from);
775 (unsigned long)__builtin_return_address(0))) != 0) 909 if (ret != 0)
776 free_rp_inst(rp); 910 free_rp_inst(rp);
777 return ret; 911 return ret;
778} 912}
779 913
914static int __register_kretprobes(struct kretprobe **rps, int num,
915 unsigned long called_from)
916{
917 int ret = 0, i;
918
919 if (num <= 0)
920 return -EINVAL;
921 for (i = 0; i < num; i++) {
922 ret = __register_kretprobe(rps[i], called_from);
923 if (ret < 0 && i > 0) {
924 unregister_kretprobes(rps, i);
925 break;
926 }
927 }
928 return ret;
929}
930
931int __kprobes register_kretprobe(struct kretprobe *rp)
932{
933 return __register_kretprobes(&rp, 1,
934 (unsigned long)__builtin_return_address(0));
935}
936
937void __kprobes unregister_kretprobe(struct kretprobe *rp)
938{
939 unregister_kretprobes(&rp, 1);
940}
941
942int __kprobes register_kretprobes(struct kretprobe **rps, int num)
943{
944 return __register_kretprobes(rps, num,
945 (unsigned long)__builtin_return_address(0));
946}
947
948void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
949{
950 int i;
951
952 if (num <= 0)
953 return;
954 mutex_lock(&kprobe_mutex);
955 for (i = 0; i < num; i++)
956 if (__unregister_kprobe_top(&rps[i]->kp) < 0)
957 rps[i]->kp.addr = NULL;
958 mutex_unlock(&kprobe_mutex);
959
960 synchronize_sched();
961 for (i = 0; i < num; i++) {
962 if (rps[i]->kp.addr) {
963 __unregister_kprobe_bottom(&rps[i]->kp);
964 cleanup_rp_inst(rps[i]);
965 }
966 }
967}
968
780#else /* CONFIG_KRETPROBES */ 969#else /* CONFIG_KRETPROBES */
781int __kprobes register_kretprobe(struct kretprobe *rp) 970int __kprobes register_kretprobe(struct kretprobe *rp)
782{ 971{
783 return -ENOSYS; 972 return -ENOSYS;
784} 973}
785 974
786static int __kprobes pre_handler_kretprobe(struct kprobe *p, 975int __kprobes register_kretprobes(struct kretprobe **rps, int num)
787 struct pt_regs *regs)
788{ 976{
789 return 0; 977 return -ENOSYS;
790} 978}
791#endif /* CONFIG_KRETPROBES */
792
793void __kprobes unregister_kretprobe(struct kretprobe *rp) 979void __kprobes unregister_kretprobe(struct kretprobe *rp)
794{ 980{
795 unsigned long flags; 981}
796 struct kretprobe_instance *ri;
797 struct hlist_node *pos, *next;
798 982
799 unregister_kprobe(&rp->kp); 983void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
984{
985}
800 986
801 /* No race here */ 987static int __kprobes pre_handler_kretprobe(struct kprobe *p,
802 spin_lock_irqsave(&kretprobe_lock, flags); 988 struct pt_regs *regs)
803 hlist_for_each_entry_safe(ri, pos, next, &rp->used_instances, uflist) { 989{
804 ri->rp = NULL; 990 return 0;
805 hlist_del(&ri->uflist);
806 }
807 spin_unlock_irqrestore(&kretprobe_lock, flags);
808 free_rp_inst(rp);
809} 991}
810 992
993#endif /* CONFIG_KRETPROBES */
994
811static int __init init_kprobes(void) 995static int __init init_kprobes(void)
812{ 996{
813 int i, err = 0; 997 int i, err = 0;
998 unsigned long offset = 0, size = 0;
999 char *modname, namebuf[128];
1000 const char *symbol_name;
1001 void *addr;
1002 struct kprobe_blackpoint *kb;
814 1003
815 /* FIXME allocate the probe table, currently defined statically */ 1004 /* FIXME allocate the probe table, currently defined statically */
816 /* initialize all list heads */ 1005 /* initialize all list heads */
@@ -819,6 +1008,28 @@ static int __init init_kprobes(void)
819 INIT_HLIST_HEAD(&kretprobe_inst_table[i]); 1008 INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
820 } 1009 }
821 1010
1011 /*
1012 * Lookup and populate the kprobe_blacklist.
1013 *
1014 * Unlike the kretprobe blacklist, we'll need to determine
1015 * the range of addresses that belong to the said functions,
1016 * since a kprobe need not necessarily be at the beginning
1017 * of a function.
1018 */
1019 for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
1020 kprobe_lookup_name(kb->name, addr);
1021 if (!addr)
1022 continue;
1023
1024 kb->start_addr = (unsigned long)addr;
1025 symbol_name = kallsyms_lookup(kb->start_addr,
1026 &size, &offset, &modname, namebuf);
1027 if (!symbol_name)
1028 kb->range = 0;
1029 else
1030 kb->range = size;
1031 }
1032
822 if (kretprobe_blacklist_size) { 1033 if (kretprobe_blacklist_size) {
823 /* lookup the function address from its name */ 1034 /* lookup the function address from its name */
824 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) { 1035 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
@@ -1066,8 +1277,12 @@ module_init(init_kprobes);
1066 1277
1067EXPORT_SYMBOL_GPL(register_kprobe); 1278EXPORT_SYMBOL_GPL(register_kprobe);
1068EXPORT_SYMBOL_GPL(unregister_kprobe); 1279EXPORT_SYMBOL_GPL(unregister_kprobe);
1280EXPORT_SYMBOL_GPL(register_kprobes);
1281EXPORT_SYMBOL_GPL(unregister_kprobes);
1069EXPORT_SYMBOL_GPL(register_jprobe); 1282EXPORT_SYMBOL_GPL(register_jprobe);
1070EXPORT_SYMBOL_GPL(unregister_jprobe); 1283EXPORT_SYMBOL_GPL(unregister_jprobe);
1284EXPORT_SYMBOL_GPL(register_jprobes);
1285EXPORT_SYMBOL_GPL(unregister_jprobes);
1071#ifdef CONFIG_KPROBES 1286#ifdef CONFIG_KPROBES
1072EXPORT_SYMBOL_GPL(jprobe_return); 1287EXPORT_SYMBOL_GPL(jprobe_return);
1073#endif 1288#endif
@@ -1075,4 +1290,6 @@ EXPORT_SYMBOL_GPL(jprobe_return);
1075#ifdef CONFIG_KPROBES 1290#ifdef CONFIG_KPROBES
1076EXPORT_SYMBOL_GPL(register_kretprobe); 1291EXPORT_SYMBOL_GPL(register_kretprobe);
1077EXPORT_SYMBOL_GPL(unregister_kretprobe); 1292EXPORT_SYMBOL_GPL(unregister_kretprobe);
1293EXPORT_SYMBOL_GPL(register_kretprobes);
1294EXPORT_SYMBOL_GPL(unregister_kretprobes);
1078#endif 1295#endif
diff --git a/kernel/power/console.c b/kernel/power/console.c
index 89bcf4973ee5..b8628be2a465 100644
--- a/kernel/power/console.c
+++ b/kernel/power/console.c
@@ -7,17 +7,39 @@
7#include <linux/vt_kern.h> 7#include <linux/vt_kern.h>
8#include <linux/kbd_kern.h> 8#include <linux/kbd_kern.h>
9#include <linux/console.h> 9#include <linux/console.h>
10#include <linux/module.h>
10#include "power.h" 11#include "power.h"
11 12
12#if defined(CONFIG_VT) && defined(CONFIG_VT_CONSOLE) 13#if defined(CONFIG_VT) && defined(CONFIG_VT_CONSOLE)
13#define SUSPEND_CONSOLE (MAX_NR_CONSOLES-1) 14#define SUSPEND_CONSOLE (MAX_NR_CONSOLES-1)
14 15
15static int orig_fgconsole, orig_kmsg; 16static int orig_fgconsole, orig_kmsg;
17static int disable_vt_switch;
18
19/*
20 * Normally during a suspend, we allocate a new console and switch to it.
21 * When we resume, we switch back to the original console. This switch
22 * can be slow, so on systems where the framebuffer can handle restoration
23 * of video registers anyways, there's little point in doing the console
24 * switch. This function allows you to disable it by passing it '0'.
25 */
26void pm_set_vt_switch(int do_switch)
27{
28 acquire_console_sem();
29 disable_vt_switch = !do_switch;
30 release_console_sem();
31}
32EXPORT_SYMBOL(pm_set_vt_switch);
16 33
17int pm_prepare_console(void) 34int pm_prepare_console(void)
18{ 35{
19 acquire_console_sem(); 36 acquire_console_sem();
20 37
38 if (disable_vt_switch) {
39 release_console_sem();
40 return 0;
41 }
42
21 orig_fgconsole = fg_console; 43 orig_fgconsole = fg_console;
22 44
23 if (vc_allocate(SUSPEND_CONSOLE)) { 45 if (vc_allocate(SUSPEND_CONSOLE)) {
@@ -50,9 +72,12 @@ int pm_prepare_console(void)
50void pm_restore_console(void) 72void pm_restore_console(void)
51{ 73{
52 acquire_console_sem(); 74 acquire_console_sem();
75 if (disable_vt_switch) {
76 release_console_sem();
77 return;
78 }
53 set_console(orig_fgconsole); 79 set_console(orig_fgconsole);
54 release_console_sem(); 80 release_console_sem();
55 kmsg_redirect = orig_kmsg; 81 kmsg_redirect = orig_kmsg;
56 return;
57} 82}
58#endif 83#endif
diff --git a/kernel/sys.c b/kernel/sys.c
index 6a0cc71ee88d..f2a451366953 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1632,10 +1632,9 @@ asmlinkage long sys_umask(int mask)
1632asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3, 1632asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
1633 unsigned long arg4, unsigned long arg5) 1633 unsigned long arg4, unsigned long arg5)
1634{ 1634{
1635 long error; 1635 long uninitialized_var(error);
1636 1636
1637 error = security_task_prctl(option, arg2, arg3, arg4, arg5); 1637 if (security_task_prctl(option, arg2, arg3, arg4, arg5, &error))
1638 if (error)
1639 return error; 1638 return error;
1640 1639
1641 switch (option) { 1640 switch (option) {
@@ -1688,17 +1687,6 @@ asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
1688 error = -EINVAL; 1687 error = -EINVAL;
1689 break; 1688 break;
1690 1689
1691 case PR_GET_KEEPCAPS:
1692 if (current->keep_capabilities)
1693 error = 1;
1694 break;
1695 case PR_SET_KEEPCAPS:
1696 if (arg2 != 0 && arg2 != 1) {
1697 error = -EINVAL;
1698 break;
1699 }
1700 current->keep_capabilities = arg2;
1701 break;
1702 case PR_SET_NAME: { 1690 case PR_SET_NAME: {
1703 struct task_struct *me = current; 1691 struct task_struct *me = current;
1704 unsigned char ncomm[sizeof(me->comm)]; 1692 unsigned char ncomm[sizeof(me->comm)];
@@ -1732,17 +1720,6 @@ asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
1732 case PR_SET_SECCOMP: 1720 case PR_SET_SECCOMP:
1733 error = prctl_set_seccomp(arg2); 1721 error = prctl_set_seccomp(arg2);
1734 break; 1722 break;
1735
1736 case PR_CAPBSET_READ:
1737 if (!cap_valid(arg2))
1738 return -EINVAL;
1739 return !!cap_raised(current->cap_bset, arg2);
1740 case PR_CAPBSET_DROP:
1741#ifdef CONFIG_SECURITY_FILE_CAPABILITIES
1742 return cap_prctl_drop(arg2);
1743#else
1744 return -EINVAL;
1745#endif
1746 case PR_GET_TSC: 1723 case PR_GET_TSC:
1747 error = GET_TSC_CTL(arg2); 1724 error = GET_TSC_CTL(arg2);
1748 break; 1725 break;