aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2014-03-19 16:15:46 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2014-03-19 16:15:46 -0400
commit566b60c04ab230b8cc3845f964306f99504b18df (patch)
tree1897a526488c3496f4ffe5eebb39a1dd41ab731d /kernel/sched
parent3ba4cea21901d90d703b52e4a806fbafa86037a6 (diff)
parentc7edc9e326d53ca5ef9bed82de0740c6b107d55b (diff)
Merge branch 'uprobes-v7' of git://git.linaro.org/people/dave.long/linux into devel-stable
This patch series adds basic uprobes support to ARM. It is based on patches developed earlier by Rabin Vincent. That approach of adding hooks into the kprobes instruction parsing code was not well received. This approach separates the ARM instruction parsing code in kprobes out into a separate set of functions which can be used by both kprobes and uprobes. Both kprobes and uprobes then provide their own semantic action tables to process the results of the parsing.
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/core.c28
-rw-r--r--kernel/sched/cpudeadline.c6
-rw-r--r--kernel/sched/deadline.c10
-rw-r--r--kernel/sched/fair.c2
-rw-r--r--kernel/sched/sched.h1
5 files changed, 24 insertions, 23 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index b46131ef6aab..6edbef296ece 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1952,7 +1952,7 @@ static int dl_overflow(struct task_struct *p, int policy,
1952{ 1952{
1953 1953
1954 struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); 1954 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
1955 u64 period = attr->sched_period; 1955 u64 period = attr->sched_period ?: attr->sched_deadline;
1956 u64 runtime = attr->sched_runtime; 1956 u64 runtime = attr->sched_runtime;
1957 u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0; 1957 u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0;
1958 int cpus, err = -1; 1958 int cpus, err = -1;
@@ -3661,13 +3661,14 @@ SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
3661 * @pid: the pid in question. 3661 * @pid: the pid in question.
3662 * @uattr: structure containing the extended parameters. 3662 * @uattr: structure containing the extended parameters.
3663 */ 3663 */
3664SYSCALL_DEFINE2(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr) 3664SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
3665 unsigned int, flags)
3665{ 3666{
3666 struct sched_attr attr; 3667 struct sched_attr attr;
3667 struct task_struct *p; 3668 struct task_struct *p;
3668 int retval; 3669 int retval;
3669 3670
3670 if (!uattr || pid < 0) 3671 if (!uattr || pid < 0 || flags)
3671 return -EINVAL; 3672 return -EINVAL;
3672 3673
3673 if (sched_copy_attr(uattr, &attr)) 3674 if (sched_copy_attr(uattr, &attr))
@@ -3786,7 +3787,7 @@ static int sched_read_attr(struct sched_attr __user *uattr,
3786 attr->size = usize; 3787 attr->size = usize;
3787 } 3788 }
3788 3789
3789 ret = copy_to_user(uattr, attr, usize); 3790 ret = copy_to_user(uattr, attr, attr->size);
3790 if (ret) 3791 if (ret)
3791 return -EFAULT; 3792 return -EFAULT;
3792 3793
@@ -3804,8 +3805,8 @@ err_size:
3804 * @uattr: structure containing the extended parameters. 3805 * @uattr: structure containing the extended parameters.
3805 * @size: sizeof(attr) for fwd/bwd comp. 3806 * @size: sizeof(attr) for fwd/bwd comp.
3806 */ 3807 */
3807SYSCALL_DEFINE3(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, 3808SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
3808 unsigned int, size) 3809 unsigned int, size, unsigned int, flags)
3809{ 3810{
3810 struct sched_attr attr = { 3811 struct sched_attr attr = {
3811 .size = sizeof(struct sched_attr), 3812 .size = sizeof(struct sched_attr),
@@ -3814,7 +3815,7 @@ SYSCALL_DEFINE3(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
3814 int retval; 3815 int retval;
3815 3816
3816 if (!uattr || pid < 0 || size > PAGE_SIZE || 3817 if (!uattr || pid < 0 || size > PAGE_SIZE ||
3817 size < SCHED_ATTR_SIZE_VER0) 3818 size < SCHED_ATTR_SIZE_VER0 || flags)
3818 return -EINVAL; 3819 return -EINVAL;
3819 3820
3820 rcu_read_lock(); 3821 rcu_read_lock();
@@ -7422,6 +7423,7 @@ static int sched_dl_global_constraints(void)
7422 u64 period = global_rt_period(); 7423 u64 period = global_rt_period();
7423 u64 new_bw = to_ratio(period, runtime); 7424 u64 new_bw = to_ratio(period, runtime);
7424 int cpu, ret = 0; 7425 int cpu, ret = 0;
7426 unsigned long flags;
7425 7427
7426 /* 7428 /*
7427 * Here we want to check the bandwidth not being set to some 7429 * Here we want to check the bandwidth not being set to some
@@ -7435,10 +7437,10 @@ static int sched_dl_global_constraints(void)
7435 for_each_possible_cpu(cpu) { 7437 for_each_possible_cpu(cpu) {
7436 struct dl_bw *dl_b = dl_bw_of(cpu); 7438 struct dl_bw *dl_b = dl_bw_of(cpu);
7437 7439
7438 raw_spin_lock(&dl_b->lock); 7440 raw_spin_lock_irqsave(&dl_b->lock, flags);
7439 if (new_bw < dl_b->total_bw) 7441 if (new_bw < dl_b->total_bw)
7440 ret = -EBUSY; 7442 ret = -EBUSY;
7441 raw_spin_unlock(&dl_b->lock); 7443 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
7442 7444
7443 if (ret) 7445 if (ret)
7444 break; 7446 break;
@@ -7451,6 +7453,7 @@ static void sched_dl_do_global(void)
7451{ 7453{
7452 u64 new_bw = -1; 7454 u64 new_bw = -1;
7453 int cpu; 7455 int cpu;
7456 unsigned long flags;
7454 7457
7455 def_dl_bandwidth.dl_period = global_rt_period(); 7458 def_dl_bandwidth.dl_period = global_rt_period();
7456 def_dl_bandwidth.dl_runtime = global_rt_runtime(); 7459 def_dl_bandwidth.dl_runtime = global_rt_runtime();
@@ -7464,9 +7467,9 @@ static void sched_dl_do_global(void)
7464 for_each_possible_cpu(cpu) { 7467 for_each_possible_cpu(cpu) {
7465 struct dl_bw *dl_b = dl_bw_of(cpu); 7468 struct dl_bw *dl_b = dl_bw_of(cpu);
7466 7469
7467 raw_spin_lock(&dl_b->lock); 7470 raw_spin_lock_irqsave(&dl_b->lock, flags);
7468 dl_b->bw = new_bw; 7471 dl_b->bw = new_bw;
7469 raw_spin_unlock(&dl_b->lock); 7472 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
7470 } 7473 }
7471} 7474}
7472 7475
@@ -7475,7 +7478,8 @@ static int sched_rt_global_validate(void)
7475 if (sysctl_sched_rt_period <= 0) 7478 if (sysctl_sched_rt_period <= 0)
7476 return -EINVAL; 7479 return -EINVAL;
7477 7480
7478 if (sysctl_sched_rt_runtime > sysctl_sched_rt_period) 7481 if ((sysctl_sched_rt_runtime != RUNTIME_INF) &&
7482 (sysctl_sched_rt_runtime > sysctl_sched_rt_period))
7479 return -EINVAL; 7483 return -EINVAL;
7480 7484
7481 return 0; 7485 return 0;
diff --git a/kernel/sched/cpudeadline.c b/kernel/sched/cpudeadline.c
index 045fc74e3f09..5b8838b56d1c 100644
--- a/kernel/sched/cpudeadline.c
+++ b/kernel/sched/cpudeadline.c
@@ -70,7 +70,7 @@ static void cpudl_heapify(struct cpudl *cp, int idx)
70 70
71static void cpudl_change_key(struct cpudl *cp, int idx, u64 new_dl) 71static void cpudl_change_key(struct cpudl *cp, int idx, u64 new_dl)
72{ 72{
73 WARN_ON(idx > num_present_cpus() || idx == IDX_INVALID); 73 WARN_ON(!cpu_present(idx) || idx == IDX_INVALID);
74 74
75 if (dl_time_before(new_dl, cp->elements[idx].dl)) { 75 if (dl_time_before(new_dl, cp->elements[idx].dl)) {
76 cp->elements[idx].dl = new_dl; 76 cp->elements[idx].dl = new_dl;
@@ -117,7 +117,7 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p,
117 } 117 }
118 118
119out: 119out:
120 WARN_ON(best_cpu > num_present_cpus() && best_cpu != -1); 120 WARN_ON(!cpu_present(best_cpu) && best_cpu != -1);
121 121
122 return best_cpu; 122 return best_cpu;
123} 123}
@@ -137,7 +137,7 @@ void cpudl_set(struct cpudl *cp, int cpu, u64 dl, int is_valid)
137 int old_idx, new_cpu; 137 int old_idx, new_cpu;
138 unsigned long flags; 138 unsigned long flags;
139 139
140 WARN_ON(cpu > num_present_cpus()); 140 WARN_ON(!cpu_present(cpu));
141 141
142 raw_spin_lock_irqsave(&cp->lock, flags); 142 raw_spin_lock_irqsave(&cp->lock, flags);
143 old_idx = cp->cpu_to_idx[cpu]; 143 old_idx = cp->cpu_to_idx[cpu];
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 0dd5e0971a07..15cbc17fbf84 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -121,7 +121,7 @@ static inline void dl_clear_overload(struct rq *rq)
121 121
122static void update_dl_migration(struct dl_rq *dl_rq) 122static void update_dl_migration(struct dl_rq *dl_rq)
123{ 123{
124 if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_total > 1) { 124 if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) {
125 if (!dl_rq->overloaded) { 125 if (!dl_rq->overloaded) {
126 dl_set_overload(rq_of_dl_rq(dl_rq)); 126 dl_set_overload(rq_of_dl_rq(dl_rq));
127 dl_rq->overloaded = 1; 127 dl_rq->overloaded = 1;
@@ -137,7 +137,6 @@ static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
137 struct task_struct *p = dl_task_of(dl_se); 137 struct task_struct *p = dl_task_of(dl_se);
138 dl_rq = &rq_of_dl_rq(dl_rq)->dl; 138 dl_rq = &rq_of_dl_rq(dl_rq)->dl;
139 139
140 dl_rq->dl_nr_total++;
141 if (p->nr_cpus_allowed > 1) 140 if (p->nr_cpus_allowed > 1)
142 dl_rq->dl_nr_migratory++; 141 dl_rq->dl_nr_migratory++;
143 142
@@ -149,7 +148,6 @@ static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
149 struct task_struct *p = dl_task_of(dl_se); 148 struct task_struct *p = dl_task_of(dl_se);
150 dl_rq = &rq_of_dl_rq(dl_rq)->dl; 149 dl_rq = &rq_of_dl_rq(dl_rq)->dl;
151 150
152 dl_rq->dl_nr_total--;
153 if (p->nr_cpus_allowed > 1) 151 if (p->nr_cpus_allowed > 1)
154 dl_rq->dl_nr_migratory--; 152 dl_rq->dl_nr_migratory--;
155 153
@@ -717,6 +715,7 @@ void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
717 715
718 WARN_ON(!dl_prio(prio)); 716 WARN_ON(!dl_prio(prio));
719 dl_rq->dl_nr_running++; 717 dl_rq->dl_nr_running++;
718 inc_nr_running(rq_of_dl_rq(dl_rq));
720 719
721 inc_dl_deadline(dl_rq, deadline); 720 inc_dl_deadline(dl_rq, deadline);
722 inc_dl_migration(dl_se, dl_rq); 721 inc_dl_migration(dl_se, dl_rq);
@@ -730,6 +729,7 @@ void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
730 WARN_ON(!dl_prio(prio)); 729 WARN_ON(!dl_prio(prio));
731 WARN_ON(!dl_rq->dl_nr_running); 730 WARN_ON(!dl_rq->dl_nr_running);
732 dl_rq->dl_nr_running--; 731 dl_rq->dl_nr_running--;
732 dec_nr_running(rq_of_dl_rq(dl_rq));
733 733
734 dec_dl_deadline(dl_rq, dl_se->deadline); 734 dec_dl_deadline(dl_rq, dl_se->deadline);
735 dec_dl_migration(dl_se, dl_rq); 735 dec_dl_migration(dl_se, dl_rq);
@@ -836,8 +836,6 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
836 836
837 if (!task_current(rq, p) && p->nr_cpus_allowed > 1) 837 if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
838 enqueue_pushable_dl_task(rq, p); 838 enqueue_pushable_dl_task(rq, p);
839
840 inc_nr_running(rq);
841} 839}
842 840
843static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags) 841static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
@@ -850,8 +848,6 @@ static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
850{ 848{
851 update_curr_dl(rq); 849 update_curr_dl(rq);
852 __dequeue_task_dl(rq, p, flags); 850 __dequeue_task_dl(rq, p, flags);
853
854 dec_nr_running(rq);
855} 851}
856 852
857/* 853/*
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 966cc2bfcb77..78157099b167 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1757,6 +1757,8 @@ void task_numa_work(struct callback_head *work)
1757 start = end; 1757 start = end;
1758 if (pages <= 0) 1758 if (pages <= 0)
1759 goto out; 1759 goto out;
1760
1761 cond_resched();
1760 } while (end != vma->vm_end); 1762 } while (end != vma->vm_end);
1761 } 1763 }
1762 1764
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index c2119fd20f8b..f964add50f38 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -462,7 +462,6 @@ struct dl_rq {
462 } earliest_dl; 462 } earliest_dl;
463 463
464 unsigned long dl_nr_migratory; 464 unsigned long dl_nr_migratory;
465 unsigned long dl_nr_total;
466 int overloaded; 465 int overloaded;
467 466
468 /* 467 /*