aboutsummaryrefslogtreecommitdiffstats
path: root/fs/select.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/select.c')
-rw-r--r--fs/select.c64
1 files changed, 62 insertions, 2 deletions
diff --git a/fs/select.c b/fs/select.c
index f6dceb56793f..5e61b43d0766 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -28,6 +28,58 @@
28 28
29#include <asm/uaccess.h> 29#include <asm/uaccess.h>
30 30
31
32/*
33 * Estimate expected accuracy in ns from a timeval.
34 *
35 * After quite a bit of churning around, we've settled on
36 * a simple thing of taking 0.1% of the timeout as the
37 * slack, with a cap of 100 msec.
38 * "nice" tasks get a 0.5% slack instead.
39 *
40 * Consider this comment an open invitation to come up with even
41 * better solutions..
42 */
43
44static unsigned long __estimate_accuracy(struct timespec *tv)
45{
46 unsigned long slack;
47 int divfactor = 1000;
48
49 if (task_nice(current))
50 divfactor = divfactor / 5;
51
52 slack = tv->tv_nsec / divfactor;
53 slack += tv->tv_sec * (NSEC_PER_SEC/divfactor);
54
55 if (slack > 100 * NSEC_PER_MSEC)
56 slack = 100 * NSEC_PER_MSEC;
57 return slack;
58}
59
60static unsigned long estimate_accuracy(struct timespec *tv)
61{
62 unsigned long ret;
63 struct timespec now;
64
65 /*
66 * Realtime tasks get a slack of 0 for obvious reasons.
67 */
68
69 if (current->policy == SCHED_FIFO ||
70 current->policy == SCHED_RR)
71 return 0;
72
73 ktime_get_ts(&now);
74 now = timespec_sub(*tv, now);
75 ret = __estimate_accuracy(&now);
76 if (ret < current->timer_slack_ns)
77 return current->timer_slack_ns;
78 return ret;
79}
80
81
82
31struct poll_table_page { 83struct poll_table_page {
32 struct poll_table_page * next; 84 struct poll_table_page * next;
33 struct poll_table_entry * entry; 85 struct poll_table_entry * entry;
@@ -262,6 +314,7 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
262 struct poll_wqueues table; 314 struct poll_wqueues table;
263 poll_table *wait; 315 poll_table *wait;
264 int retval, i, timed_out = 0; 316 int retval, i, timed_out = 0;
317 unsigned long slack = 0;
265 318
266 rcu_read_lock(); 319 rcu_read_lock();
267 retval = max_select_fd(n, fds); 320 retval = max_select_fd(n, fds);
@@ -278,6 +331,9 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
278 timed_out = 1; 331 timed_out = 1;
279 } 332 }
280 333
334 if (end_time)
335 slack = estimate_accuracy(end_time);
336
281 retval = 0; 337 retval = 0;
282 for (;;) { 338 for (;;) {
283 unsigned long *rinp, *routp, *rexp, *inp, *outp, *exp; 339 unsigned long *rinp, *routp, *rexp, *inp, *outp, *exp;
@@ -353,7 +409,7 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
353 to = &expire; 409 to = &expire;
354 } 410 }
355 411
356 if (!schedule_hrtimeout(to, HRTIMER_MODE_ABS)) 412 if (!schedule_hrtimeout_range(to, slack, HRTIMER_MODE_ABS))
357 timed_out = 1; 413 timed_out = 1;
358 } 414 }
359 __set_current_state(TASK_RUNNING); 415 __set_current_state(TASK_RUNNING);
@@ -593,6 +649,7 @@ static int do_poll(unsigned int nfds, struct poll_list *list,
593 poll_table* pt = &wait->pt; 649 poll_table* pt = &wait->pt;
594 ktime_t expire, *to = NULL; 650 ktime_t expire, *to = NULL;
595 int timed_out = 0, count = 0; 651 int timed_out = 0, count = 0;
652 unsigned long slack = 0;
596 653
597 /* Optimise the no-wait case */ 654 /* Optimise the no-wait case */
598 if (end_time && !end_time->tv_sec && !end_time->tv_nsec) { 655 if (end_time && !end_time->tv_sec && !end_time->tv_nsec) {
@@ -600,6 +657,9 @@ static int do_poll(unsigned int nfds, struct poll_list *list,
600 timed_out = 1; 657 timed_out = 1;
601 } 658 }
602 659
660 if (end_time)
661 slack = estimate_accuracy(end_time);
662
603 for (;;) { 663 for (;;) {
604 struct poll_list *walk; 664 struct poll_list *walk;
605 665
@@ -646,7 +706,7 @@ static int do_poll(unsigned int nfds, struct poll_list *list,
646 to = &expire; 706 to = &expire;
647 } 707 }
648 708
649 if (!schedule_hrtimeout(to, HRTIMER_MODE_ABS)) 709 if (!schedule_hrtimeout_range(to, slack, HRTIMER_MODE_ABS))
650 timed_out = 1; 710 timed_out = 1;
651 } 711 }
652 __set_current_state(TASK_RUNNING); 712 __set_current_state(TASK_RUNNING);