aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/notify/fanotify/fanotify_user.c10
-rw-r--r--kernel/sched/core.c13
-rw-r--r--kernel/sched/deadline.c25
-rw-r--r--kernel/sched/fair.c6
4 files changed, 19 insertions, 35 deletions
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index c991616acca9..bff8567aa42d 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -259,16 +259,15 @@ static ssize_t fanotify_read(struct file *file, char __user *buf,
259 struct fsnotify_event *kevent; 259 struct fsnotify_event *kevent;
260 char __user *start; 260 char __user *start;
261 int ret; 261 int ret;
262 DEFINE_WAIT(wait); 262 DEFINE_WAIT_FUNC(wait, woken_wake_function);
263 263
264 start = buf; 264 start = buf;
265 group = file->private_data; 265 group = file->private_data;
266 266
267 pr_debug("%s: group=%p\n", __func__, group); 267 pr_debug("%s: group=%p\n", __func__, group);
268 268
269 add_wait_queue(&group->notification_waitq, &wait);
269 while (1) { 270 while (1) {
270 prepare_to_wait(&group->notification_waitq, &wait, TASK_INTERRUPTIBLE);
271
272 mutex_lock(&group->notification_mutex); 271 mutex_lock(&group->notification_mutex);
273 kevent = get_one_event(group, count); 272 kevent = get_one_event(group, count);
274 mutex_unlock(&group->notification_mutex); 273 mutex_unlock(&group->notification_mutex);
@@ -289,7 +288,8 @@ static ssize_t fanotify_read(struct file *file, char __user *buf,
289 288
290 if (start != buf) 289 if (start != buf)
291 break; 290 break;
292 schedule(); 291
292 wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
293 continue; 293 continue;
294 } 294 }
295 295
@@ -318,8 +318,8 @@ static ssize_t fanotify_read(struct file *file, char __user *buf,
318 buf += ret; 318 buf += ret;
319 count -= ret; 319 count -= ret;
320 } 320 }
321 remove_wait_queue(&group->notification_waitq, &wait);
321 322
322 finish_wait(&group->notification_waitq, &wait);
323 if (start != buf && ret != -EFAULT) 323 if (start != buf && ret != -EFAULT)
324 ret = buf - start; 324 ret = buf - start;
325 return ret; 325 return ret;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index b5797b78add6..c0accc00566e 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7113,9 +7113,6 @@ void __init sched_init(void)
7113#ifdef CONFIG_RT_GROUP_SCHED 7113#ifdef CONFIG_RT_GROUP_SCHED
7114 alloc_size += 2 * nr_cpu_ids * sizeof(void **); 7114 alloc_size += 2 * nr_cpu_ids * sizeof(void **);
7115#endif 7115#endif
7116#ifdef CONFIG_CPUMASK_OFFSTACK
7117 alloc_size += num_possible_cpus() * cpumask_size();
7118#endif
7119 if (alloc_size) { 7116 if (alloc_size) {
7120 ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT); 7117 ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
7121 7118
@@ -7135,13 +7132,13 @@ void __init sched_init(void)
7135 ptr += nr_cpu_ids * sizeof(void **); 7132 ptr += nr_cpu_ids * sizeof(void **);
7136 7133
7137#endif /* CONFIG_RT_GROUP_SCHED */ 7134#endif /* CONFIG_RT_GROUP_SCHED */
7135 }
7138#ifdef CONFIG_CPUMASK_OFFSTACK 7136#ifdef CONFIG_CPUMASK_OFFSTACK
7139 for_each_possible_cpu(i) { 7137 for_each_possible_cpu(i) {
7140 per_cpu(load_balance_mask, i) = (void *)ptr; 7138 per_cpu(load_balance_mask, i) = (cpumask_var_t)kzalloc_node(
7141 ptr += cpumask_size(); 7139 cpumask_size(), GFP_KERNEL, cpu_to_node(i));
7142 }
7143#endif /* CONFIG_CPUMASK_OFFSTACK */
7144 } 7140 }
7141#endif /* CONFIG_CPUMASK_OFFSTACK */
7145 7142
7146 init_rt_bandwidth(&def_rt_bandwidth, 7143 init_rt_bandwidth(&def_rt_bandwidth,
7147 global_rt_period(), global_rt_runtime()); 7144 global_rt_period(), global_rt_runtime());
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index e5db8c6feebd..b52092f2636d 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -570,24 +570,7 @@ void init_dl_task_timer(struct sched_dl_entity *dl_se)
570static 570static
571int dl_runtime_exceeded(struct rq *rq, struct sched_dl_entity *dl_se) 571int dl_runtime_exceeded(struct rq *rq, struct sched_dl_entity *dl_se)
572{ 572{
573 int dmiss = dl_time_before(dl_se->deadline, rq_clock(rq)); 573 return (dl_se->runtime <= 0);
574 int rorun = dl_se->runtime <= 0;
575
576 if (!rorun && !dmiss)
577 return 0;
578
579 /*
580 * If we are beyond our current deadline and we are still
581 * executing, then we have already used some of the runtime of
582 * the next instance. Thus, if we do not account that, we are
583 * stealing bandwidth from the system at each deadline miss!
584 */
585 if (dmiss) {
586 dl_se->runtime = rorun ? dl_se->runtime : 0;
587 dl_se->runtime -= rq_clock(rq) - dl_se->deadline;
588 }
589
590 return 1;
591} 574}
592 575
593extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq); 576extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
@@ -826,10 +809,10 @@ enqueue_dl_entity(struct sched_dl_entity *dl_se,
826 * parameters of the task might need updating. Otherwise, 809 * parameters of the task might need updating. Otherwise,
827 * we want a replenishment of its runtime. 810 * we want a replenishment of its runtime.
828 */ 811 */
829 if (!dl_se->dl_new && flags & ENQUEUE_REPLENISH) 812 if (dl_se->dl_new || flags & ENQUEUE_WAKEUP)
830 replenish_dl_entity(dl_se, pi_se);
831 else
832 update_dl_entity(dl_se, pi_se); 813 update_dl_entity(dl_se, pi_se);
814 else if (flags & ENQUEUE_REPLENISH)
815 replenish_dl_entity(dl_se, pi_se);
833 816
834 __enqueue_dl_entity(dl_se); 817 __enqueue_dl_entity(dl_se);
835} 818}
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index df2cdf77f899..40667cbf371b 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4005,6 +4005,10 @@ void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b, bool force)
4005 4005
4006static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) 4006static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
4007{ 4007{
4008 /* init_cfs_bandwidth() was not called */
4009 if (!cfs_b->throttled_cfs_rq.next)
4010 return;
4011
4008 hrtimer_cancel(&cfs_b->period_timer); 4012 hrtimer_cancel(&cfs_b->period_timer);
4009 hrtimer_cancel(&cfs_b->slack_timer); 4013 hrtimer_cancel(&cfs_b->slack_timer);
4010} 4014}
@@ -4424,7 +4428,7 @@ static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
4424 * wl = S * s'_i; see (2) 4428 * wl = S * s'_i; see (2)
4425 */ 4429 */
4426 if (W > 0 && w < W) 4430 if (W > 0 && w < W)
4427 wl = (w * tg->shares) / W; 4431 wl = (w * (long)tg->shares) / W;
4428 else 4432 else
4429 wl = tg->shares; 4433 wl = tg->shares;
4430 4434