aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-03-16 11:20:19 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-03-16 11:20:19 -0400
commitbd2895eeade5f11f3e5906283c630bbdb4b57454 (patch)
tree4d98f4fcd80c7d062afce28823d08aee53e66f82 /net
parent016aa2ed1cc9cf704cf76d8df07751b6daa9750f (diff)
parent24d51add7438f9696a7205927bf9de3c5c787a58 (diff)
Merge branch 'for-2.6.39' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq
* 'for-2.6.39' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq: workqueue: fix build failure introduced by s/freezeable/freezable/ workqueue: add system_freezeable_wq rds/ib: use system_wq instead of rds_ib_fmr_wq net/9p: replace p9_poll_task with a work net/9p: use system_wq instead of p9_mux_wq xfs: convert to alloc_workqueue() reiserfs: make commit_wq use the default concurrency level ocfs2: use system_wq instead of ocfs2_quota_wq ext4: convert to alloc_workqueue() scsi/scsi_tgt_lib: scsi_tgtd isn't used in memory reclaim path scsi/be2iscsi,qla2xxx: convert to alloc_workqueue() misc/iwmc3200top: use system_wq instead of dedicated workqueues i2o: use alloc_workqueue() instead of create_workqueue() acpi: kacpi*_wq don't need WQ_MEM_RECLAIM fs/aio: aio_wq isn't used in memory reclaim path input/tps6507x-ts: use system_wq instead of dedicated workqueue cpufreq: use system_wq instead of dedicated workqueues wireless/ipw2x00: use system_wq instead of dedicated workqueues arm/omap: use system_wq in mailbox workqueue: use WQ_MEM_RECLAIM instead of WQ_RESCUER
Diffstat (limited to 'net')
-rw-r--r--net/9p/trans_fd.c52
-rw-r--r--net/rds/ib.c9
-rw-r--r--net/rds/ib.h2
-rw-r--r--net/rds/ib_rdma.c27
-rw-r--r--net/sunrpc/sched.c2
5 files changed, 18 insertions, 74 deletions
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index 078eb162d9bf..a30471e51740 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -153,10 +153,11 @@ struct p9_conn {
153 unsigned long wsched; 153 unsigned long wsched;
154}; 154};
155 155
156static void p9_poll_workfn(struct work_struct *work);
157
156static DEFINE_SPINLOCK(p9_poll_lock); 158static DEFINE_SPINLOCK(p9_poll_lock);
157static LIST_HEAD(p9_poll_pending_list); 159static LIST_HEAD(p9_poll_pending_list);
158static struct workqueue_struct *p9_mux_wq; 160static DECLARE_WORK(p9_poll_work, p9_poll_workfn);
159static struct task_struct *p9_poll_task;
160 161
161static void p9_mux_poll_stop(struct p9_conn *m) 162static void p9_mux_poll_stop(struct p9_conn *m)
162{ 163{
@@ -384,7 +385,7 @@ static void p9_read_work(struct work_struct *work)
384 385
385 if (n & POLLIN) { 386 if (n & POLLIN) {
386 P9_DPRINTK(P9_DEBUG_TRANS, "sched read work %p\n", m); 387 P9_DPRINTK(P9_DEBUG_TRANS, "sched read work %p\n", m);
387 queue_work(p9_mux_wq, &m->rq); 388 schedule_work(&m->rq);
388 } else 389 } else
389 clear_bit(Rworksched, &m->wsched); 390 clear_bit(Rworksched, &m->wsched);
390 } else 391 } else
@@ -497,7 +498,7 @@ static void p9_write_work(struct work_struct *work)
497 498
498 if (n & POLLOUT) { 499 if (n & POLLOUT) {
499 P9_DPRINTK(P9_DEBUG_TRANS, "sched write work %p\n", m); 500 P9_DPRINTK(P9_DEBUG_TRANS, "sched write work %p\n", m);
500 queue_work(p9_mux_wq, &m->wq); 501 schedule_work(&m->wq);
501 } else 502 } else
502 clear_bit(Wworksched, &m->wsched); 503 clear_bit(Wworksched, &m->wsched);
503 } else 504 } else
@@ -516,15 +517,14 @@ static int p9_pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key)
516 container_of(wait, struct p9_poll_wait, wait); 517 container_of(wait, struct p9_poll_wait, wait);
517 struct p9_conn *m = pwait->conn; 518 struct p9_conn *m = pwait->conn;
518 unsigned long flags; 519 unsigned long flags;
519 DECLARE_WAITQUEUE(dummy_wait, p9_poll_task);
520 520
521 spin_lock_irqsave(&p9_poll_lock, flags); 521 spin_lock_irqsave(&p9_poll_lock, flags);
522 if (list_empty(&m->poll_pending_link)) 522 if (list_empty(&m->poll_pending_link))
523 list_add_tail(&m->poll_pending_link, &p9_poll_pending_list); 523 list_add_tail(&m->poll_pending_link, &p9_poll_pending_list);
524 spin_unlock_irqrestore(&p9_poll_lock, flags); 524 spin_unlock_irqrestore(&p9_poll_lock, flags);
525 525
526 /* perform the default wake up operation */ 526 schedule_work(&p9_poll_work);
527 return default_wake_function(&dummy_wait, mode, sync, key); 527 return 1;
528} 528}
529 529
530/** 530/**
@@ -629,7 +629,7 @@ static void p9_poll_mux(struct p9_conn *m)
629 P9_DPRINTK(P9_DEBUG_TRANS, "mux %p can read\n", m); 629 P9_DPRINTK(P9_DEBUG_TRANS, "mux %p can read\n", m);
630 if (!test_and_set_bit(Rworksched, &m->wsched)) { 630 if (!test_and_set_bit(Rworksched, &m->wsched)) {
631 P9_DPRINTK(P9_DEBUG_TRANS, "sched read work %p\n", m); 631 P9_DPRINTK(P9_DEBUG_TRANS, "sched read work %p\n", m);
632 queue_work(p9_mux_wq, &m->rq); 632 schedule_work(&m->rq);
633 } 633 }
634 } 634 }
635 635
@@ -639,7 +639,7 @@ static void p9_poll_mux(struct p9_conn *m)
639 if ((m->wsize || !list_empty(&m->unsent_req_list)) && 639 if ((m->wsize || !list_empty(&m->unsent_req_list)) &&
640 !test_and_set_bit(Wworksched, &m->wsched)) { 640 !test_and_set_bit(Wworksched, &m->wsched)) {
641 P9_DPRINTK(P9_DEBUG_TRANS, "sched write work %p\n", m); 641 P9_DPRINTK(P9_DEBUG_TRANS, "sched write work %p\n", m);
642 queue_work(p9_mux_wq, &m->wq); 642 schedule_work(&m->wq);
643 } 643 }
644 } 644 }
645} 645}
@@ -677,7 +677,7 @@ static int p9_fd_request(struct p9_client *client, struct p9_req_t *req)
677 n = p9_fd_poll(m->client, NULL); 677 n = p9_fd_poll(m->client, NULL);
678 678
679 if (n & POLLOUT && !test_and_set_bit(Wworksched, &m->wsched)) 679 if (n & POLLOUT && !test_and_set_bit(Wworksched, &m->wsched))
680 queue_work(p9_mux_wq, &m->wq); 680 schedule_work(&m->wq);
681 681
682 return 0; 682 return 0;
683} 683}
@@ -1047,12 +1047,12 @@ static struct p9_trans_module p9_fd_trans = {
1047 * 1047 *
1048 */ 1048 */
1049 1049
1050static int p9_poll_proc(void *a) 1050static void p9_poll_workfn(struct work_struct *work)
1051{ 1051{
1052 unsigned long flags; 1052 unsigned long flags;
1053 1053
1054 P9_DPRINTK(P9_DEBUG_TRANS, "start %p\n", current); 1054 P9_DPRINTK(P9_DEBUG_TRANS, "start %p\n", current);
1055 repeat: 1055
1056 spin_lock_irqsave(&p9_poll_lock, flags); 1056 spin_lock_irqsave(&p9_poll_lock, flags);
1057 while (!list_empty(&p9_poll_pending_list)) { 1057 while (!list_empty(&p9_poll_pending_list)) {
1058 struct p9_conn *conn = list_first_entry(&p9_poll_pending_list, 1058 struct p9_conn *conn = list_first_entry(&p9_poll_pending_list,
@@ -1067,35 +1067,11 @@ static int p9_poll_proc(void *a)
1067 } 1067 }
1068 spin_unlock_irqrestore(&p9_poll_lock, flags); 1068 spin_unlock_irqrestore(&p9_poll_lock, flags);
1069 1069
1070 set_current_state(TASK_INTERRUPTIBLE);
1071 if (list_empty(&p9_poll_pending_list)) {
1072 P9_DPRINTK(P9_DEBUG_TRANS, "sleeping...\n");
1073 schedule();
1074 }
1075 __set_current_state(TASK_RUNNING);
1076
1077 if (!kthread_should_stop())
1078 goto repeat;
1079
1080 P9_DPRINTK(P9_DEBUG_TRANS, "finish\n"); 1070 P9_DPRINTK(P9_DEBUG_TRANS, "finish\n");
1081 return 0;
1082} 1071}
1083 1072
1084int p9_trans_fd_init(void) 1073int p9_trans_fd_init(void)
1085{ 1074{
1086 p9_mux_wq = create_workqueue("v9fs");
1087 if (!p9_mux_wq) {
1088 printk(KERN_WARNING "v9fs: mux: creating workqueue failed\n");
1089 return -ENOMEM;
1090 }
1091
1092 p9_poll_task = kthread_run(p9_poll_proc, NULL, "v9fs-poll");
1093 if (IS_ERR(p9_poll_task)) {
1094 destroy_workqueue(p9_mux_wq);
1095 printk(KERN_WARNING "v9fs: mux: creating poll task failed\n");
1096 return PTR_ERR(p9_poll_task);
1097 }
1098
1099 v9fs_register_trans(&p9_tcp_trans); 1075 v9fs_register_trans(&p9_tcp_trans);
1100 v9fs_register_trans(&p9_unix_trans); 1076 v9fs_register_trans(&p9_unix_trans);
1101 v9fs_register_trans(&p9_fd_trans); 1077 v9fs_register_trans(&p9_fd_trans);
@@ -1105,10 +1081,8 @@ int p9_trans_fd_init(void)
1105 1081
1106void p9_trans_fd_exit(void) 1082void p9_trans_fd_exit(void)
1107{ 1083{
1108 kthread_stop(p9_poll_task); 1084 flush_work_sync(&p9_poll_work);
1109 v9fs_unregister_trans(&p9_tcp_trans); 1085 v9fs_unregister_trans(&p9_tcp_trans);
1110 v9fs_unregister_trans(&p9_unix_trans); 1086 v9fs_unregister_trans(&p9_unix_trans);
1111 v9fs_unregister_trans(&p9_fd_trans); 1087 v9fs_unregister_trans(&p9_fd_trans);
1112
1113 destroy_workqueue(p9_mux_wq);
1114} 1088}
diff --git a/net/rds/ib.c b/net/rds/ib.c
index 4123967d4d65..cce19f95c624 100644
--- a/net/rds/ib.c
+++ b/net/rds/ib.c
@@ -364,7 +364,6 @@ void rds_ib_exit(void)
364 rds_ib_sysctl_exit(); 364 rds_ib_sysctl_exit();
365 rds_ib_recv_exit(); 365 rds_ib_recv_exit();
366 rds_trans_unregister(&rds_ib_transport); 366 rds_trans_unregister(&rds_ib_transport);
367 rds_ib_fmr_exit();
368} 367}
369 368
370struct rds_transport rds_ib_transport = { 369struct rds_transport rds_ib_transport = {
@@ -400,13 +399,9 @@ int rds_ib_init(void)
400 399
401 INIT_LIST_HEAD(&rds_ib_devices); 400 INIT_LIST_HEAD(&rds_ib_devices);
402 401
403 ret = rds_ib_fmr_init();
404 if (ret)
405 goto out;
406
407 ret = ib_register_client(&rds_ib_client); 402 ret = ib_register_client(&rds_ib_client);
408 if (ret) 403 if (ret)
409 goto out_fmr_exit; 404 goto out;
410 405
411 ret = rds_ib_sysctl_init(); 406 ret = rds_ib_sysctl_init();
412 if (ret) 407 if (ret)
@@ -430,8 +425,6 @@ out_sysctl:
430 rds_ib_sysctl_exit(); 425 rds_ib_sysctl_exit();
431out_ibreg: 426out_ibreg:
432 rds_ib_unregister_client(); 427 rds_ib_unregister_client();
433out_fmr_exit:
434 rds_ib_fmr_exit();
435out: 428out:
436 return ret; 429 return ret;
437} 430}
diff --git a/net/rds/ib.h b/net/rds/ib.h
index e34ad032b66d..4297d92788dc 100644
--- a/net/rds/ib.h
+++ b/net/rds/ib.h
@@ -307,8 +307,6 @@ void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
307void rds_ib_sync_mr(void *trans_private, int dir); 307void rds_ib_sync_mr(void *trans_private, int dir);
308void rds_ib_free_mr(void *trans_private, int invalidate); 308void rds_ib_free_mr(void *trans_private, int invalidate);
309void rds_ib_flush_mrs(void); 309void rds_ib_flush_mrs(void);
310int rds_ib_fmr_init(void);
311void rds_ib_fmr_exit(void);
312 310
313/* ib_recv.c */ 311/* ib_recv.c */
314int rds_ib_recv_init(void); 312int rds_ib_recv_init(void);
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
index 18a833c450c8..819c35a0d9cb 100644
--- a/net/rds/ib_rdma.c
+++ b/net/rds/ib_rdma.c
@@ -38,8 +38,6 @@
38#include "ib.h" 38#include "ib.h"
39#include "xlist.h" 39#include "xlist.h"
40 40
41static struct workqueue_struct *rds_ib_fmr_wq;
42
43static DEFINE_PER_CPU(unsigned long, clean_list_grace); 41static DEFINE_PER_CPU(unsigned long, clean_list_grace);
44#define CLEAN_LIST_BUSY_BIT 0 42#define CLEAN_LIST_BUSY_BIT 0
45 43
@@ -307,7 +305,7 @@ static struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev)
307 int err = 0, iter = 0; 305 int err = 0, iter = 0;
308 306
309 if (atomic_read(&pool->dirty_count) >= pool->max_items / 10) 307 if (atomic_read(&pool->dirty_count) >= pool->max_items / 10)
310 queue_delayed_work(rds_ib_fmr_wq, &pool->flush_worker, 10); 308 schedule_delayed_work(&pool->flush_worker, 10);
311 309
312 while (1) { 310 while (1) {
313 ibmr = rds_ib_reuse_fmr(pool); 311 ibmr = rds_ib_reuse_fmr(pool);
@@ -696,24 +694,6 @@ out_nolock:
696 return ret; 694 return ret;
697} 695}
698 696
699int rds_ib_fmr_init(void)
700{
701 rds_ib_fmr_wq = create_workqueue("rds_fmr_flushd");
702 if (!rds_ib_fmr_wq)
703 return -ENOMEM;
704 return 0;
705}
706
707/*
708 * By the time this is called all the IB devices should have been torn down and
709 * had their pools freed. As each pool is freed its work struct is waited on,
710 * so the pool flushing work queue should be idle by the time we get here.
711 */
712void rds_ib_fmr_exit(void)
713{
714 destroy_workqueue(rds_ib_fmr_wq);
715}
716
717static void rds_ib_mr_pool_flush_worker(struct work_struct *work) 697static void rds_ib_mr_pool_flush_worker(struct work_struct *work)
718{ 698{
719 struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker.work); 699 struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker.work);
@@ -741,7 +721,7 @@ void rds_ib_free_mr(void *trans_private, int invalidate)
741 /* If we've pinned too many pages, request a flush */ 721 /* If we've pinned too many pages, request a flush */
742 if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned || 722 if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned ||
743 atomic_read(&pool->dirty_count) >= pool->max_items / 10) 723 atomic_read(&pool->dirty_count) >= pool->max_items / 10)
744 queue_delayed_work(rds_ib_fmr_wq, &pool->flush_worker, 10); 724 schedule_delayed_work(&pool->flush_worker, 10);
745 725
746 if (invalidate) { 726 if (invalidate) {
747 if (likely(!in_interrupt())) { 727 if (likely(!in_interrupt())) {
@@ -749,8 +729,7 @@ void rds_ib_free_mr(void *trans_private, int invalidate)
749 } else { 729 } else {
750 /* We get here if the user created a MR marked 730 /* We get here if the user created a MR marked
751 * as use_once and invalidate at the same time. */ 731 * as use_once and invalidate at the same time. */
752 queue_delayed_work(rds_ib_fmr_wq, 732 schedule_delayed_work(&pool->flush_worker, 10);
753 &pool->flush_worker, 10);
754 } 733 }
755 } 734 }
756 735
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 59e599498e37..3fc8624fcd17 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -955,7 +955,7 @@ static int rpciod_start(void)
955 * Create the rpciod thread and wait for it to start. 955 * Create the rpciod thread and wait for it to start.
956 */ 956 */
957 dprintk("RPC: creating workqueue rpciod\n"); 957 dprintk("RPC: creating workqueue rpciod\n");
958 wq = alloc_workqueue("rpciod", WQ_RESCUER, 0); 958 wq = alloc_workqueue("rpciod", WQ_MEM_RECLAIM, 0);
959 rpciod_workqueue = wq; 959 rpciod_workqueue = wq;
960 return rpciod_workqueue != NULL; 960 return rpciod_workqueue != NULL;
961} 961}