summaryrefslogtreecommitdiffstats
path: root/fs/eventpoll.c
diff options
context:
space:
mode:
authorDavidlohr Bueso <dave@stgolabs.net>2019-01-03 18:27:02 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2019-01-04 16:13:46 -0500
commit74bdc129850c32eaddc625ce557da560303fbf25 (patch)
tree20b4654a2ec7b6e83a7a4bdc0c85f9f07fba2798 /fs/eventpoll.c
parent77b8c0a8e47484e205b01dfedcd224770aa9d800 (diff)
fs/epoll: remove max_nests argument from ep_call_nested()
Patch series "epoll: some miscellaneous optimizations". The following are some incremental optimizations on some of the epoll core. Each patch has the details, but together, the series is seen to shave off measurable cycles on a number of systems and workloads. For example, on a 40-core IB, a pipetest as well as parallel epoll_wait() benchmark show around a 20-30% increase in raw operations per second when the box is fully occupied (incremental thread counts), and up to 15% performance improvement with lower counts. Passes ltp epoll related testcases. This patch(of 6): All callers pass the EP_MAX_NESTS constant already, so lets simplify this a tad and get rid of the redundant parameter for nested eventpolls. Link: http://lkml.kernel.org/r/20181108051006.18751-2-dave@stgolabs.net Signed-off-by: Davidlohr Bueso <dbueso@suse.de> Reviewed-by: Andrew Morton <akpm@linux-foundation.org> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Jason Baron <jbaron@akamai.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/eventpoll.c')
-rw-r--r--fs/eventpoll.c14
1 files changed, 6 insertions, 8 deletions
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 8a5a1010886b..be50799737f4 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -471,7 +471,6 @@ static inline void ep_set_busy_poll_napi_id(struct epitem *epi)
471 * no re-entered. 471 * no re-entered.
472 * 472 *
473 * @ncalls: Pointer to the nested_calls structure to be used for this call. 473 * @ncalls: Pointer to the nested_calls structure to be used for this call.
474 * @max_nests: Maximum number of allowed nesting calls.
475 * @nproc: Nested call core function pointer. 474 * @nproc: Nested call core function pointer.
476 * @priv: Opaque data to be passed to the @nproc callback. 475 * @priv: Opaque data to be passed to the @nproc callback.
477 * @cookie: Cookie to be used to identify this nested call. 476 * @cookie: Cookie to be used to identify this nested call.
@@ -480,7 +479,7 @@ static inline void ep_set_busy_poll_napi_id(struct epitem *epi)
480 * Returns: Returns the code returned by the @nproc callback, or -1 if 479 * Returns: Returns the code returned by the @nproc callback, or -1 if
481 * the maximum recursion limit has been exceeded. 480 * the maximum recursion limit has been exceeded.
482 */ 481 */
483static int ep_call_nested(struct nested_calls *ncalls, int max_nests, 482static int ep_call_nested(struct nested_calls *ncalls,
484 int (*nproc)(void *, void *, int), void *priv, 483 int (*nproc)(void *, void *, int), void *priv,
485 void *cookie, void *ctx) 484 void *cookie, void *ctx)
486{ 485{
@@ -499,7 +498,7 @@ static int ep_call_nested(struct nested_calls *ncalls, int max_nests,
499 */ 498 */
500 list_for_each_entry(tncur, lsthead, llink) { 499 list_for_each_entry(tncur, lsthead, llink) {
501 if (tncur->ctx == ctx && 500 if (tncur->ctx == ctx &&
502 (tncur->cookie == cookie || ++call_nests > max_nests)) { 501 (tncur->cookie == cookie || ++call_nests > EP_MAX_NESTS)) {
503 /* 502 /*
504 * Ops ... loop detected or maximum nest level reached. 503 * Ops ... loop detected or maximum nest level reached.
505 * We abort this wake by breaking the cycle itself. 504 * We abort this wake by breaking the cycle itself.
@@ -573,7 +572,7 @@ static void ep_poll_safewake(wait_queue_head_t *wq)
573{ 572{
574 int this_cpu = get_cpu(); 573 int this_cpu = get_cpu();
575 574
576 ep_call_nested(&poll_safewake_ncalls, EP_MAX_NESTS, 575 ep_call_nested(&poll_safewake_ncalls,
577 ep_poll_wakeup_proc, NULL, wq, (void *) (long) this_cpu); 576 ep_poll_wakeup_proc, NULL, wq, (void *) (long) this_cpu);
578 577
579 put_cpu(); 578 put_cpu();
@@ -1333,7 +1332,6 @@ static int reverse_path_check_proc(void *priv, void *cookie, int call_nests)
1333 } 1332 }
1334 } else { 1333 } else {
1335 error = ep_call_nested(&poll_loop_ncalls, 1334 error = ep_call_nested(&poll_loop_ncalls,
1336 EP_MAX_NESTS,
1337 reverse_path_check_proc, 1335 reverse_path_check_proc,
1338 child_file, child_file, 1336 child_file, child_file,
1339 current); 1337 current);
@@ -1367,7 +1365,7 @@ static int reverse_path_check(void)
1367 /* let's call this for all tfiles */ 1365 /* let's call this for all tfiles */
1368 list_for_each_entry(current_file, &tfile_check_list, f_tfile_llink) { 1366 list_for_each_entry(current_file, &tfile_check_list, f_tfile_llink) {
1369 path_count_init(); 1367 path_count_init();
1370 error = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS, 1368 error = ep_call_nested(&poll_loop_ncalls,
1371 reverse_path_check_proc, current_file, 1369 reverse_path_check_proc, current_file,
1372 current_file, current); 1370 current_file, current);
1373 if (error) 1371 if (error)
@@ -1876,7 +1874,7 @@ static int ep_loop_check_proc(void *priv, void *cookie, int call_nests)
1876 ep_tovisit = epi->ffd.file->private_data; 1874 ep_tovisit = epi->ffd.file->private_data;
1877 if (ep_tovisit->visited) 1875 if (ep_tovisit->visited)
1878 continue; 1876 continue;
1879 error = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS, 1877 error = ep_call_nested(&poll_loop_ncalls,
1880 ep_loop_check_proc, epi->ffd.file, 1878 ep_loop_check_proc, epi->ffd.file,
1881 ep_tovisit, current); 1879 ep_tovisit, current);
1882 if (error != 0) 1880 if (error != 0)
@@ -1916,7 +1914,7 @@ static int ep_loop_check(struct eventpoll *ep, struct file *file)
1916 int ret; 1914 int ret;
1917 struct eventpoll *ep_cur, *ep_next; 1915 struct eventpoll *ep_cur, *ep_next;
1918 1916
1919 ret = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS, 1917 ret = ep_call_nested(&poll_loop_ncalls,
1920 ep_loop_check_proc, file, ep, current); 1918 ep_loop_check_proc, file, ep, current);
1921 /* clear visited list */ 1919 /* clear visited list */
1922 list_for_each_entry_safe(ep_cur, ep_next, &visited_list, 1920 list_for_each_entry_safe(ep_cur, ep_next, &visited_list,