aboutsummaryrefslogtreecommitdiffstats
path: root/fs/select.c
diff options
context:
space:
mode:
authorEliezer Tamir <eliezer.tamir@linux.intel.com>2013-07-08 09:20:34 -0400
committerDavid S. Miller <davem@davemloft.net>2013-07-08 22:25:45 -0400
commitcbf55001b2ddb814329735641be5d29b08c82b08 (patch)
tree110c1191f4b6699bef04ebdf45e4677c623a7ceb /fs/select.c
parentc7e8e8a8f7a70b343ca1e0f90a31e35ab2d16de1 (diff)
net: rename low latency sockets functions to busy poll
Rename functions in include/net/ll_poll.h to busy wait. Clarify documentation about expected power use increase. Rename POLL_LL to POLL_BUSY_LOOP. Add need_resched() testing to poll/select busy loops. Note, that in select and poll can_busy_poll is dynamic and is updated continuously to reflect the existence of supported sockets with valid queue information. Signed-off-by: Eliezer Tamir <eliezer.tamir@linux.intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'fs/select.c')
-rw-r--r--fs/select.c60
1 files changed, 37 insertions, 23 deletions
diff --git a/fs/select.c b/fs/select.c
index f28a58592725..25cac5faf6d6 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -402,9 +402,9 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
402 poll_table *wait; 402 poll_table *wait;
403 int retval, i, timed_out = 0; 403 int retval, i, timed_out = 0;
404 unsigned long slack = 0; 404 unsigned long slack = 0;
405 unsigned int ll_flag = ll_get_flag(); 405 unsigned int busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0;
406 u64 ll_start = ll_start_time(ll_flag); 406 u64 busy_start = busy_loop_start_time(busy_flag);
407 u64 ll_time = ll_run_time(); 407 u64 busy_end = busy_loop_end_time();
408 408
409 rcu_read_lock(); 409 rcu_read_lock();
410 retval = max_select_fd(n, fds); 410 retval = max_select_fd(n, fds);
@@ -427,7 +427,7 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
427 retval = 0; 427 retval = 0;
428 for (;;) { 428 for (;;) {
429 unsigned long *rinp, *routp, *rexp, *inp, *outp, *exp; 429 unsigned long *rinp, *routp, *rexp, *inp, *outp, *exp;
430 bool can_ll = false; 430 bool can_busy_loop = false;
431 431
432 inp = fds->in; outp = fds->out; exp = fds->ex; 432 inp = fds->in; outp = fds->out; exp = fds->ex;
433 rinp = fds->res_in; routp = fds->res_out; rexp = fds->res_ex; 433 rinp = fds->res_in; routp = fds->res_out; rexp = fds->res_ex;
@@ -456,7 +456,7 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
456 mask = DEFAULT_POLLMASK; 456 mask = DEFAULT_POLLMASK;
457 if (f_op && f_op->poll) { 457 if (f_op && f_op->poll) {
458 wait_key_set(wait, in, out, 458 wait_key_set(wait, in, out,
459 bit, ll_flag); 459 bit, busy_flag);
460 mask = (*f_op->poll)(f.file, wait); 460 mask = (*f_op->poll)(f.file, wait);
461 } 461 }
462 fdput(f); 462 fdput(f);
@@ -475,11 +475,18 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
475 retval++; 475 retval++;
476 wait->_qproc = NULL; 476 wait->_qproc = NULL;
477 } 477 }
478 if (mask & POLL_LL)
479 can_ll = true;
480 /* got something, stop busy polling */ 478 /* got something, stop busy polling */
481 if (retval) 479 if (retval) {
482 ll_flag = 0; 480 can_busy_loop = false;
481 busy_flag = 0;
482
483 /*
484 * only remember a returned
485 * POLL_BUSY_LOOP if we asked for it
486 */
487 } else if (busy_flag & mask)
488 can_busy_loop = true;
489
483 } 490 }
484 } 491 }
485 if (res_in) 492 if (res_in)
@@ -498,8 +505,9 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
498 break; 505 break;
499 } 506 }
500 507
501 /* only if on, have sockets with POLL_LL and not out of time */ 508 /* only if found POLL_BUSY_LOOP sockets && not out of time */
502 if (ll_flag && can_ll && can_poll_ll(ll_start, ll_time)) 509 if (!need_resched() && can_busy_loop &&
510 busy_loop_range(busy_start, busy_end))
503 continue; 511 continue;
504 512
505 /* 513 /*
@@ -734,7 +742,8 @@ struct poll_list {
734 * if pwait->_qproc is non-NULL. 742 * if pwait->_qproc is non-NULL.
735 */ 743 */
736static inline unsigned int do_pollfd(struct pollfd *pollfd, poll_table *pwait, 744static inline unsigned int do_pollfd(struct pollfd *pollfd, poll_table *pwait,
737 bool *can_ll, unsigned int ll_flag) 745 bool *can_busy_poll,
746 unsigned int busy_flag)
738{ 747{
739 unsigned int mask; 748 unsigned int mask;
740 int fd; 749 int fd;
@@ -748,10 +757,10 @@ static inline unsigned int do_pollfd(struct pollfd *pollfd, poll_table *pwait,
748 mask = DEFAULT_POLLMASK; 757 mask = DEFAULT_POLLMASK;
749 if (f.file->f_op && f.file->f_op->poll) { 758 if (f.file->f_op && f.file->f_op->poll) {
750 pwait->_key = pollfd->events|POLLERR|POLLHUP; 759 pwait->_key = pollfd->events|POLLERR|POLLHUP;
751 pwait->_key |= ll_flag; 760 pwait->_key |= busy_flag;
752 mask = f.file->f_op->poll(f.file, pwait); 761 mask = f.file->f_op->poll(f.file, pwait);
753 if (mask & POLL_LL) 762 if (mask & busy_flag)
754 *can_ll = true; 763 *can_busy_poll = true;
755 } 764 }
756 /* Mask out unneeded events. */ 765 /* Mask out unneeded events. */
757 mask &= pollfd->events | POLLERR | POLLHUP; 766 mask &= pollfd->events | POLLERR | POLLHUP;
@@ -770,9 +779,10 @@ static int do_poll(unsigned int nfds, struct poll_list *list,
770 ktime_t expire, *to = NULL; 779 ktime_t expire, *to = NULL;
771 int timed_out = 0, count = 0; 780 int timed_out = 0, count = 0;
772 unsigned long slack = 0; 781 unsigned long slack = 0;
773 unsigned int ll_flag = ll_get_flag(); 782 unsigned int busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0;
774 u64 ll_start = ll_start_time(ll_flag); 783 u64 busy_start = busy_loop_start_time(busy_flag);
775 u64 ll_time = ll_run_time(); 784 u64 busy_end = busy_loop_end_time();
785
776 786
777 /* Optimise the no-wait case */ 787 /* Optimise the no-wait case */
778 if (end_time && !end_time->tv_sec && !end_time->tv_nsec) { 788 if (end_time && !end_time->tv_sec && !end_time->tv_nsec) {
@@ -785,7 +795,7 @@ static int do_poll(unsigned int nfds, struct poll_list *list,
785 795
786 for (;;) { 796 for (;;) {
787 struct poll_list *walk; 797 struct poll_list *walk;
788 bool can_ll = false; 798 bool can_busy_loop = false;
789 799
790 for (walk = list; walk != NULL; walk = walk->next) { 800 for (walk = list; walk != NULL; walk = walk->next) {
791 struct pollfd * pfd, * pfd_end; 801 struct pollfd * pfd, * pfd_end;
@@ -800,10 +810,13 @@ static int do_poll(unsigned int nfds, struct poll_list *list,
800 * this. They'll get immediately deregistered 810 * this. They'll get immediately deregistered
801 * when we break out and return. 811 * when we break out and return.
802 */ 812 */
803 if (do_pollfd(pfd, pt, &can_ll, ll_flag)) { 813 if (do_pollfd(pfd, pt, &can_busy_loop,
814 busy_flag)) {
804 count++; 815 count++;
805 pt->_qproc = NULL; 816 pt->_qproc = NULL;
806 ll_flag = 0; 817 /* found something, stop busy polling */
818 busy_flag = 0;
819 can_busy_loop = false;
807 } 820 }
808 } 821 }
809 } 822 }
@@ -820,8 +833,9 @@ static int do_poll(unsigned int nfds, struct poll_list *list,
820 if (count || timed_out) 833 if (count || timed_out)
821 break; 834 break;
822 835
823 /* only if on, have sockets with POLL_LL and not out of time */ 836 /* only if found POLL_BUSY_LOOP sockets && not out of time */
824 if (ll_flag && can_ll && can_poll_ll(ll_start, ll_time)) 837 if (!need_resched() && can_busy_loop &&
838 busy_loop_range(busy_start, busy_end))
825 continue; 839 continue;
826 840
827 /* 841 /*