aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2011-07-28 14:04:09 -0400
committerJ. Bruce Fields <bfields@redhat.com>2011-08-19 13:25:36 -0400
commit11fd165c68b73434ca1273e21f21db5eecc90926 (patch)
tree75c3e2d97b2d59ebaaa4571df2ead80a4c4f35a5 /net
parentc1f24ef4ed46f58ea5e524a2364c93b6847fb164 (diff)
sunrpc: use better NUMA affinities
Use NUMA aware allocations to reduce latencies and increase throughput. sunrpc kthreads can use kthread_create_on_node() if pool_mode is "percpu" or "pernode", and svc_prepare_thread()/svc_init_buffer() can also take into account NUMA node affinity for memory allocations. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> CC: "J. Bruce Fields" <bfields@fieldses.org> CC: Neil Brown <neilb@suse.de> CC: David Miller <davem@davemloft.net> Reviewed-by: Greg Banks <gnb@fastmail.fm> [bfields@redhat.com: fix up caller nfs41_callback_up] Signed-off-by: J. Bruce Fields <bfields@redhat.com>
Diffstat (limited to 'net')
-rw-r--r--net/sunrpc/svc.c33
1 files changed, 24 insertions, 9 deletions
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 6a69a1131fb7..30d70abb4e2c 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -295,6 +295,18 @@ svc_pool_map_put(void)
295} 295}
296 296
297 297
298static int svc_pool_map_get_node(unsigned int pidx)
299{
300 const struct svc_pool_map *m = &svc_pool_map;
301
302 if (m->count) {
303 if (m->mode == SVC_POOL_PERCPU)
304 return cpu_to_node(m->pool_to[pidx]);
305 if (m->mode == SVC_POOL_PERNODE)
306 return m->pool_to[pidx];
307 }
308 return NUMA_NO_NODE;
309}
298/* 310/*
299 * Set the given thread's cpus_allowed mask so that it 311 * Set the given thread's cpus_allowed mask so that it
300 * will only run on cpus in the given pool. 312 * will only run on cpus in the given pool.
@@ -499,7 +511,7 @@ EXPORT_SYMBOL_GPL(svc_destroy);
499 * We allocate pages and place them in rq_argpages. 511 * We allocate pages and place them in rq_argpages.
500 */ 512 */
501static int 513static int
502svc_init_buffer(struct svc_rqst *rqstp, unsigned int size) 514svc_init_buffer(struct svc_rqst *rqstp, unsigned int size, int node)
503{ 515{
504 unsigned int pages, arghi; 516 unsigned int pages, arghi;
505 517
@@ -513,7 +525,7 @@ svc_init_buffer(struct svc_rqst *rqstp, unsigned int size)
513 arghi = 0; 525 arghi = 0;
514 BUG_ON(pages > RPCSVC_MAXPAGES); 526 BUG_ON(pages > RPCSVC_MAXPAGES);
515 while (pages) { 527 while (pages) {
516 struct page *p = alloc_page(GFP_KERNEL); 528 struct page *p = alloc_pages_node(node, GFP_KERNEL, 0);
517 if (!p) 529 if (!p)
518 break; 530 break;
519 rqstp->rq_pages[arghi++] = p; 531 rqstp->rq_pages[arghi++] = p;
@@ -536,11 +548,11 @@ svc_release_buffer(struct svc_rqst *rqstp)
536} 548}
537 549
538struct svc_rqst * 550struct svc_rqst *
539svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool) 551svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node)
540{ 552{
541 struct svc_rqst *rqstp; 553 struct svc_rqst *rqstp;
542 554
543 rqstp = kzalloc(sizeof(*rqstp), GFP_KERNEL); 555 rqstp = kzalloc_node(sizeof(*rqstp), GFP_KERNEL, node);
544 if (!rqstp) 556 if (!rqstp)
545 goto out_enomem; 557 goto out_enomem;
546 558
@@ -554,15 +566,15 @@ svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool)
554 rqstp->rq_server = serv; 566 rqstp->rq_server = serv;
555 rqstp->rq_pool = pool; 567 rqstp->rq_pool = pool;
556 568
557 rqstp->rq_argp = kmalloc(serv->sv_xdrsize, GFP_KERNEL); 569 rqstp->rq_argp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node);
558 if (!rqstp->rq_argp) 570 if (!rqstp->rq_argp)
559 goto out_thread; 571 goto out_thread;
560 572
561 rqstp->rq_resp = kmalloc(serv->sv_xdrsize, GFP_KERNEL); 573 rqstp->rq_resp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node);
562 if (!rqstp->rq_resp) 574 if (!rqstp->rq_resp)
563 goto out_thread; 575 goto out_thread;
564 576
565 if (!svc_init_buffer(rqstp, serv->sv_max_mesg)) 577 if (!svc_init_buffer(rqstp, serv->sv_max_mesg, node))
566 goto out_thread; 578 goto out_thread;
567 579
568 return rqstp; 580 return rqstp;
@@ -647,6 +659,7 @@ svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
647 struct svc_pool *chosen_pool; 659 struct svc_pool *chosen_pool;
648 int error = 0; 660 int error = 0;
649 unsigned int state = serv->sv_nrthreads-1; 661 unsigned int state = serv->sv_nrthreads-1;
662 int node;
650 663
651 if (pool == NULL) { 664 if (pool == NULL) {
652 /* The -1 assumes caller has done a svc_get() */ 665 /* The -1 assumes caller has done a svc_get() */
@@ -662,14 +675,16 @@ svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
662 nrservs--; 675 nrservs--;
663 chosen_pool = choose_pool(serv, pool, &state); 676 chosen_pool = choose_pool(serv, pool, &state);
664 677
665 rqstp = svc_prepare_thread(serv, chosen_pool); 678 node = svc_pool_map_get_node(chosen_pool->sp_id);
679 rqstp = svc_prepare_thread(serv, chosen_pool, node);
666 if (IS_ERR(rqstp)) { 680 if (IS_ERR(rqstp)) {
667 error = PTR_ERR(rqstp); 681 error = PTR_ERR(rqstp);
668 break; 682 break;
669 } 683 }
670 684
671 __module_get(serv->sv_module); 685 __module_get(serv->sv_module);
672 task = kthread_create(serv->sv_function, rqstp, serv->sv_name); 686 task = kthread_create_on_node(serv->sv_function, rqstp,
687 node, serv->sv_name);
673 if (IS_ERR(task)) { 688 if (IS_ERR(task)) {
674 error = PTR_ERR(task); 689 error = PTR_ERR(task);
675 module_put(serv->sv_module); 690 module_put(serv->sv_module);