aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/9p/mux.c16
-rw-r--r--fs/aio.c16
-rw-r--r--fs/bio.c6
-rw-r--r--fs/file.c6
-rw-r--r--fs/gfs2/glock.c8
-rw-r--r--fs/ncpfs/inode.c8
-rw-r--r--fs/ncpfs/sock.c20
-rw-r--r--fs/nfs/client.c2
-rw-r--r--fs/nfs/namespace.c8
-rw-r--r--fs/nfs/nfs4_fs.h2
-rw-r--r--fs/nfs/nfs4renewd.c5
-rw-r--r--fs/nfsd/nfs4state.c7
-rw-r--r--fs/ocfs2/alloc.c9
-rw-r--r--fs/ocfs2/cluster/heartbeat.c10
-rw-r--r--fs/ocfs2/cluster/quorum.c4
-rw-r--r--fs/ocfs2/cluster/tcp.c78
-rw-r--r--fs/ocfs2/cluster/tcp_internal.h8
-rw-r--r--fs/ocfs2/dlm/dlmcommon.h2
-rw-r--r--fs/ocfs2/dlm/dlmdomain.c2
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c5
-rw-r--r--fs/ocfs2/dlm/userdlm.c10
-rw-r--r--fs/ocfs2/journal.c7
-rw-r--r--fs/ocfs2/journal.h2
-rw-r--r--fs/ocfs2/ocfs2.h2
-rw-r--r--fs/ocfs2/super.c2
-rw-r--r--fs/reiserfs/journal.c12
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c21
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c9
28 files changed, 158 insertions, 129 deletions
diff --git a/fs/9p/mux.c b/fs/9p/mux.c
index 90a79c784549..944273c3dbff 100644
--- a/fs/9p/mux.c
+++ b/fs/9p/mux.c
@@ -110,8 +110,8 @@ struct v9fs_mux_rpc {
110}; 110};
111 111
112static int v9fs_poll_proc(void *); 112static int v9fs_poll_proc(void *);
113static void v9fs_read_work(void *); 113static void v9fs_read_work(struct work_struct *work);
114static void v9fs_write_work(void *); 114static void v9fs_write_work(struct work_struct *work);
115static void v9fs_pollwait(struct file *filp, wait_queue_head_t * wait_address, 115static void v9fs_pollwait(struct file *filp, wait_queue_head_t * wait_address,
116 poll_table * p); 116 poll_table * p);
117static u16 v9fs_mux_get_tag(struct v9fs_mux_data *); 117static u16 v9fs_mux_get_tag(struct v9fs_mux_data *);
@@ -297,8 +297,8 @@ struct v9fs_mux_data *v9fs_mux_init(struct v9fs_transport *trans, int msize,
297 m->rbuf = NULL; 297 m->rbuf = NULL;
298 m->wpos = m->wsize = 0; 298 m->wpos = m->wsize = 0;
299 m->wbuf = NULL; 299 m->wbuf = NULL;
300 INIT_WORK(&m->rq, v9fs_read_work, m); 300 INIT_WORK(&m->rq, v9fs_read_work);
301 INIT_WORK(&m->wq, v9fs_write_work, m); 301 INIT_WORK(&m->wq, v9fs_write_work);
302 m->wsched = 0; 302 m->wsched = 0;
303 memset(&m->poll_waddr, 0, sizeof(m->poll_waddr)); 303 memset(&m->poll_waddr, 0, sizeof(m->poll_waddr));
304 m->poll_task = NULL; 304 m->poll_task = NULL;
@@ -458,13 +458,13 @@ static int v9fs_poll_proc(void *a)
458/** 458/**
459 * v9fs_write_work - called when a transport can send some data 459 * v9fs_write_work - called when a transport can send some data
460 */ 460 */
461static void v9fs_write_work(void *a) 461static void v9fs_write_work(struct work_struct *work)
462{ 462{
463 int n, err; 463 int n, err;
464 struct v9fs_mux_data *m; 464 struct v9fs_mux_data *m;
465 struct v9fs_req *req; 465 struct v9fs_req *req;
466 466
467 m = a; 467 m = container_of(work, struct v9fs_mux_data, wq);
468 468
469 if (m->err < 0) { 469 if (m->err < 0) {
470 clear_bit(Wworksched, &m->wsched); 470 clear_bit(Wworksched, &m->wsched);
@@ -564,7 +564,7 @@ static void process_request(struct v9fs_mux_data *m, struct v9fs_req *req)
564/** 564/**
565 * v9fs_read_work - called when there is some data to be read from a transport 565 * v9fs_read_work - called when there is some data to be read from a transport
566 */ 566 */
567static void v9fs_read_work(void *a) 567static void v9fs_read_work(struct work_struct *work)
568{ 568{
569 int n, err; 569 int n, err;
570 struct v9fs_mux_data *m; 570 struct v9fs_mux_data *m;
@@ -572,7 +572,7 @@ static void v9fs_read_work(void *a)
572 struct v9fs_fcall *rcall; 572 struct v9fs_fcall *rcall;
573 char *rbuf; 573 char *rbuf;
574 574
575 m = a; 575 m = container_of(work, struct v9fs_mux_data, rq);
576 576
577 if (m->err < 0) 577 if (m->err < 0)
578 return; 578 return;
diff --git a/fs/aio.c b/fs/aio.c
index 277a5f2d18ad..287a1bc7a182 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -53,13 +53,13 @@ static kmem_cache_t *kioctx_cachep;
53static struct workqueue_struct *aio_wq; 53static struct workqueue_struct *aio_wq;
54 54
55/* Used for rare fput completion. */ 55/* Used for rare fput completion. */
56static void aio_fput_routine(void *); 56static void aio_fput_routine(struct work_struct *);
57static DECLARE_WORK(fput_work, aio_fput_routine, NULL); 57static DECLARE_WORK(fput_work, aio_fput_routine);
58 58
59static DEFINE_SPINLOCK(fput_lock); 59static DEFINE_SPINLOCK(fput_lock);
60static LIST_HEAD(fput_head); 60static LIST_HEAD(fput_head);
61 61
62static void aio_kick_handler(void *); 62static void aio_kick_handler(struct work_struct *);
63static void aio_queue_work(struct kioctx *); 63static void aio_queue_work(struct kioctx *);
64 64
65/* aio_setup 65/* aio_setup
@@ -227,7 +227,7 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
227 227
228 INIT_LIST_HEAD(&ctx->active_reqs); 228 INIT_LIST_HEAD(&ctx->active_reqs);
229 INIT_LIST_HEAD(&ctx->run_list); 229 INIT_LIST_HEAD(&ctx->run_list);
230 INIT_WORK(&ctx->wq, aio_kick_handler, ctx); 230 INIT_DELAYED_WORK(&ctx->wq, aio_kick_handler);
231 231
232 if (aio_setup_ring(ctx) < 0) 232 if (aio_setup_ring(ctx) < 0)
233 goto out_freectx; 233 goto out_freectx;
@@ -469,7 +469,7 @@ static inline void really_put_req(struct kioctx *ctx, struct kiocb *req)
469 wake_up(&ctx->wait); 469 wake_up(&ctx->wait);
470} 470}
471 471
472static void aio_fput_routine(void *data) 472static void aio_fput_routine(struct work_struct *data)
473{ 473{
474 spin_lock_irq(&fput_lock); 474 spin_lock_irq(&fput_lock);
475 while (likely(!list_empty(&fput_head))) { 475 while (likely(!list_empty(&fput_head))) {
@@ -857,9 +857,9 @@ static inline void aio_run_all_iocbs(struct kioctx *ctx)
857 * space. 857 * space.
858 * Run on aiod's context. 858 * Run on aiod's context.
859 */ 859 */
860static void aio_kick_handler(void *data) 860static void aio_kick_handler(struct work_struct *work)
861{ 861{
862 struct kioctx *ctx = data; 862 struct kioctx *ctx = container_of(work, struct kioctx, wq.work);
863 mm_segment_t oldfs = get_fs(); 863 mm_segment_t oldfs = get_fs();
864 int requeue; 864 int requeue;
865 865
@@ -874,7 +874,7 @@ static void aio_kick_handler(void *data)
874 * we're in a worker thread already, don't use queue_delayed_work, 874 * we're in a worker thread already, don't use queue_delayed_work,
875 */ 875 */
876 if (requeue) 876 if (requeue)
877 queue_work(aio_wq, &ctx->wq); 877 queue_delayed_work(aio_wq, &ctx->wq, 0);
878} 878}
879 879
880 880
diff --git a/fs/bio.c b/fs/bio.c
index aa4d09bd4e71..50c40ce2cead 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -940,16 +940,16 @@ static void bio_release_pages(struct bio *bio)
940 * run one bio_put() against the BIO. 940 * run one bio_put() against the BIO.
941 */ 941 */
942 942
943static void bio_dirty_fn(void *data); 943static void bio_dirty_fn(struct work_struct *work);
944 944
945static DECLARE_WORK(bio_dirty_work, bio_dirty_fn, NULL); 945static DECLARE_WORK(bio_dirty_work, bio_dirty_fn);
946static DEFINE_SPINLOCK(bio_dirty_lock); 946static DEFINE_SPINLOCK(bio_dirty_lock);
947static struct bio *bio_dirty_list; 947static struct bio *bio_dirty_list;
948 948
949/* 949/*
950 * This runs in process context 950 * This runs in process context
951 */ 951 */
952static void bio_dirty_fn(void *data) 952static void bio_dirty_fn(struct work_struct *work)
953{ 953{
954 unsigned long flags; 954 unsigned long flags;
955 struct bio *bio; 955 struct bio *bio;
diff --git a/fs/file.c b/fs/file.c
index 8e81775c5dc8..3787e82f54c1 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -91,8 +91,10 @@ out:
91 spin_unlock(&fddef->lock); 91 spin_unlock(&fddef->lock);
92} 92}
93 93
94static void free_fdtable_work(struct fdtable_defer *f) 94static void free_fdtable_work(struct work_struct *work)
95{ 95{
96 struct fdtable_defer *f =
97 container_of(work, struct fdtable_defer, wq);
96 struct fdtable *fdt; 98 struct fdtable *fdt;
97 99
98 spin_lock_bh(&f->lock); 100 spin_lock_bh(&f->lock);
@@ -351,7 +353,7 @@ static void __devinit fdtable_defer_list_init(int cpu)
351{ 353{
352 struct fdtable_defer *fddef = &per_cpu(fdtable_defer_list, cpu); 354 struct fdtable_defer *fddef = &per_cpu(fdtable_defer_list, cpu);
353 spin_lock_init(&fddef->lock); 355 spin_lock_init(&fddef->lock);
354 INIT_WORK(&fddef->wq, (void (*)(void *))free_fdtable_work, fddef); 356 INIT_WORK(&fddef->wq, free_fdtable_work);
355 init_timer(&fddef->timer); 357 init_timer(&fddef->timer);
356 fddef->timer.data = (unsigned long)fddef; 358 fddef->timer.data = (unsigned long)fddef;
357 fddef->timer.function = fdtable_timer; 359 fddef->timer.function = fdtable_timer;
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 78fe0fae23ff..55f5333dae99 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -35,7 +35,7 @@
35 35
36struct greedy { 36struct greedy {
37 struct gfs2_holder gr_gh; 37 struct gfs2_holder gr_gh;
38 struct work_struct gr_work; 38 struct delayed_work gr_work;
39}; 39};
40 40
41struct gfs2_gl_hash_bucket { 41struct gfs2_gl_hash_bucket {
@@ -1368,9 +1368,9 @@ static void gfs2_glock_prefetch(struct gfs2_glock *gl, unsigned int state,
1368 glops->go_xmote_th(gl, state, flags); 1368 glops->go_xmote_th(gl, state, flags);
1369} 1369}
1370 1370
1371static void greedy_work(void *data) 1371static void greedy_work(struct work_struct *work)
1372{ 1372{
1373 struct greedy *gr = data; 1373 struct greedy *gr = container_of(work, struct greedy, gr_work.work);
1374 struct gfs2_holder *gh = &gr->gr_gh; 1374 struct gfs2_holder *gh = &gr->gr_gh;
1375 struct gfs2_glock *gl = gh->gh_gl; 1375 struct gfs2_glock *gl = gh->gh_gl;
1376 const struct gfs2_glock_operations *glops = gl->gl_ops; 1376 const struct gfs2_glock_operations *glops = gl->gl_ops;
@@ -1422,7 +1422,7 @@ int gfs2_glock_be_greedy(struct gfs2_glock *gl, unsigned int time)
1422 1422
1423 gfs2_holder_init(gl, 0, 0, gh); 1423 gfs2_holder_init(gl, 0, 0, gh);
1424 set_bit(HIF_GREEDY, &gh->gh_iflags); 1424 set_bit(HIF_GREEDY, &gh->gh_iflags);
1425 INIT_WORK(&gr->gr_work, greedy_work, gr); 1425 INIT_DELAYED_WORK(&gr->gr_work, greedy_work);
1426 1426
1427 set_bit(GLF_SKIP_WAITERS2, &gl->gl_flags); 1427 set_bit(GLF_SKIP_WAITERS2, &gl->gl_flags);
1428 schedule_delayed_work(&gr->gr_work, time); 1428 schedule_delayed_work(&gr->gr_work, time);
diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
index 42e3bef270c9..72dad552aa00 100644
--- a/fs/ncpfs/inode.c
+++ b/fs/ncpfs/inode.c
@@ -577,12 +577,12 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
577 server->rcv.ptr = (unsigned char*)&server->rcv.buf; 577 server->rcv.ptr = (unsigned char*)&server->rcv.buf;
578 server->rcv.len = 10; 578 server->rcv.len = 10;
579 server->rcv.state = 0; 579 server->rcv.state = 0;
580 INIT_WORK(&server->rcv.tq, ncp_tcp_rcv_proc, server); 580 INIT_WORK(&server->rcv.tq, ncp_tcp_rcv_proc);
581 INIT_WORK(&server->tx.tq, ncp_tcp_tx_proc, server); 581 INIT_WORK(&server->tx.tq, ncp_tcp_tx_proc);
582 sock->sk->sk_write_space = ncp_tcp_write_space; 582 sock->sk->sk_write_space = ncp_tcp_write_space;
583 } else { 583 } else {
584 INIT_WORK(&server->rcv.tq, ncpdgram_rcv_proc, server); 584 INIT_WORK(&server->rcv.tq, ncpdgram_rcv_proc);
585 INIT_WORK(&server->timeout_tq, ncpdgram_timeout_proc, server); 585 INIT_WORK(&server->timeout_tq, ncpdgram_timeout_proc);
586 server->timeout_tm.data = (unsigned long)server; 586 server->timeout_tm.data = (unsigned long)server;
587 server->timeout_tm.function = ncpdgram_timeout_call; 587 server->timeout_tm.function = ncpdgram_timeout_call;
588 } 588 }
diff --git a/fs/ncpfs/sock.c b/fs/ncpfs/sock.c
index 11c2b252ebed..e496d8b65e92 100644
--- a/fs/ncpfs/sock.c
+++ b/fs/ncpfs/sock.c
@@ -350,9 +350,10 @@ static void info_server(struct ncp_server *server, unsigned int id, const void *
350 } 350 }
351} 351}
352 352
353void ncpdgram_rcv_proc(void *s) 353void ncpdgram_rcv_proc(struct work_struct *work)
354{ 354{
355 struct ncp_server *server = s; 355 struct ncp_server *server =
356 container_of(work, struct ncp_server, rcv.tq);
356 struct socket* sock; 357 struct socket* sock;
357 358
358 sock = server->ncp_sock; 359 sock = server->ncp_sock;
@@ -468,9 +469,10 @@ static void __ncpdgram_timeout_proc(struct ncp_server *server)
468 } 469 }
469} 470}
470 471
471void ncpdgram_timeout_proc(void *s) 472void ncpdgram_timeout_proc(struct work_struct *work)
472{ 473{
473 struct ncp_server *server = s; 474 struct ncp_server *server =
475 container_of(work, struct ncp_server, timeout_tq);
474 mutex_lock(&server->rcv.creq_mutex); 476 mutex_lock(&server->rcv.creq_mutex);
475 __ncpdgram_timeout_proc(server); 477 __ncpdgram_timeout_proc(server);
476 mutex_unlock(&server->rcv.creq_mutex); 478 mutex_unlock(&server->rcv.creq_mutex);
@@ -652,18 +654,20 @@ skipdata:;
652 } 654 }
653} 655}
654 656
655void ncp_tcp_rcv_proc(void *s) 657void ncp_tcp_rcv_proc(struct work_struct *work)
656{ 658{
657 struct ncp_server *server = s; 659 struct ncp_server *server =
660 container_of(work, struct ncp_server, rcv.tq);
658 661
659 mutex_lock(&server->rcv.creq_mutex); 662 mutex_lock(&server->rcv.creq_mutex);
660 __ncptcp_rcv_proc(server); 663 __ncptcp_rcv_proc(server);
661 mutex_unlock(&server->rcv.creq_mutex); 664 mutex_unlock(&server->rcv.creq_mutex);
662} 665}
663 666
664void ncp_tcp_tx_proc(void *s) 667void ncp_tcp_tx_proc(struct work_struct *work)
665{ 668{
666 struct ncp_server *server = s; 669 struct ncp_server *server =
670 container_of(work, struct ncp_server, tx.tq);
667 671
668 mutex_lock(&server->rcv.creq_mutex); 672 mutex_lock(&server->rcv.creq_mutex);
669 __ncptcp_try_send(server); 673 __ncptcp_try_send(server);
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 5fea638743e4..23ab145daa2d 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -143,7 +143,7 @@ static struct nfs_client *nfs_alloc_client(const char *hostname,
143 INIT_LIST_HEAD(&clp->cl_state_owners); 143 INIT_LIST_HEAD(&clp->cl_state_owners);
144 INIT_LIST_HEAD(&clp->cl_unused); 144 INIT_LIST_HEAD(&clp->cl_unused);
145 spin_lock_init(&clp->cl_lock); 145 spin_lock_init(&clp->cl_lock);
146 INIT_WORK(&clp->cl_renewd, nfs4_renew_state, clp); 146 INIT_DELAYED_WORK(&clp->cl_renewd, nfs4_renew_state);
147 rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS client"); 147 rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS client");
148 clp->cl_boot_time = CURRENT_TIME; 148 clp->cl_boot_time = CURRENT_TIME;
149 clp->cl_state = 1 << NFS4CLNT_LEASE_EXPIRED; 149 clp->cl_state = 1 << NFS4CLNT_LEASE_EXPIRED;
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
index ec1114b33d89..371b804e7cc8 100644
--- a/fs/nfs/namespace.c
+++ b/fs/nfs/namespace.c
@@ -18,10 +18,10 @@
18 18
19#define NFSDBG_FACILITY NFSDBG_VFS 19#define NFSDBG_FACILITY NFSDBG_VFS
20 20
21static void nfs_expire_automounts(void *list); 21static void nfs_expire_automounts(struct work_struct *work);
22 22
23LIST_HEAD(nfs_automount_list); 23LIST_HEAD(nfs_automount_list);
24static DECLARE_WORK(nfs_automount_task, nfs_expire_automounts, &nfs_automount_list); 24static DECLARE_DELAYED_WORK(nfs_automount_task, nfs_expire_automounts);
25int nfs_mountpoint_expiry_timeout = 500 * HZ; 25int nfs_mountpoint_expiry_timeout = 500 * HZ;
26 26
27static struct vfsmount *nfs_do_submount(const struct vfsmount *mnt_parent, 27static struct vfsmount *nfs_do_submount(const struct vfsmount *mnt_parent,
@@ -164,9 +164,9 @@ struct inode_operations nfs_referral_inode_operations = {
164 .follow_link = nfs_follow_mountpoint, 164 .follow_link = nfs_follow_mountpoint,
165}; 165};
166 166
167static void nfs_expire_automounts(void *data) 167static void nfs_expire_automounts(struct work_struct *work)
168{ 168{
169 struct list_head *list = (struct list_head *)data; 169 struct list_head *list = &nfs_automount_list;
170 170
171 mark_mounts_for_expiry(list); 171 mark_mounts_for_expiry(list);
172 if (!list_empty(list)) 172 if (!list_empty(list))
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 6f346677332d..c26cd978c7cc 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -185,7 +185,7 @@ extern const u32 nfs4_fs_locations_bitmap[2];
185extern void nfs4_schedule_state_renewal(struct nfs_client *); 185extern void nfs4_schedule_state_renewal(struct nfs_client *);
186extern void nfs4_renewd_prepare_shutdown(struct nfs_server *); 186extern void nfs4_renewd_prepare_shutdown(struct nfs_server *);
187extern void nfs4_kill_renewd(struct nfs_client *); 187extern void nfs4_kill_renewd(struct nfs_client *);
188extern void nfs4_renew_state(void *); 188extern void nfs4_renew_state(struct work_struct *);
189 189
190/* nfs4state.c */ 190/* nfs4state.c */
191struct rpc_cred *nfs4_get_renew_cred(struct nfs_client *clp); 191struct rpc_cred *nfs4_get_renew_cred(struct nfs_client *clp);
diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c
index 7b6df1852e75..823298561c0a 100644
--- a/fs/nfs/nfs4renewd.c
+++ b/fs/nfs/nfs4renewd.c
@@ -59,9 +59,10 @@
59#define NFSDBG_FACILITY NFSDBG_PROC 59#define NFSDBG_FACILITY NFSDBG_PROC
60 60
61void 61void
62nfs4_renew_state(void *data) 62nfs4_renew_state(struct work_struct *work)
63{ 63{
64 struct nfs_client *clp = (struct nfs_client *)data; 64 struct nfs_client *clp =
65 container_of(work, struct nfs_client, cl_renewd.work);
65 struct rpc_cred *cred; 66 struct rpc_cred *cred;
66 long lease, timeout; 67 long lease, timeout;
67 unsigned long last, now; 68 unsigned long last, now;
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 293b6495829f..e431e93ab503 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -1829,9 +1829,8 @@ out:
1829} 1829}
1830 1830
1831static struct workqueue_struct *laundry_wq; 1831static struct workqueue_struct *laundry_wq;
1832static struct work_struct laundromat_work; 1832static void laundromat_main(struct work_struct *);
1833static void laundromat_main(void *); 1833static DECLARE_DELAYED_WORK(laundromat_work, laundromat_main);
1834static DECLARE_WORK(laundromat_work, laundromat_main, NULL);
1835 1834
1836__be32 1835__be32
1837nfsd4_renew(clientid_t *clid) 1836nfsd4_renew(clientid_t *clid)
@@ -1940,7 +1939,7 @@ nfs4_laundromat(void)
1940} 1939}
1941 1940
1942void 1941void
1943laundromat_main(void *not_used) 1942laundromat_main(struct work_struct *not_used)
1944{ 1943{
1945 time_t t; 1944 time_t t;
1946 1945
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index f43bc5f18a35..0b2ad163005e 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -1205,10 +1205,12 @@ int ocfs2_flush_truncate_log(struct ocfs2_super *osb)
1205 return status; 1205 return status;
1206} 1206}
1207 1207
1208static void ocfs2_truncate_log_worker(void *data) 1208static void ocfs2_truncate_log_worker(struct work_struct *work)
1209{ 1209{
1210 int status; 1210 int status;
1211 struct ocfs2_super *osb = data; 1211 struct ocfs2_super *osb =
1212 container_of(work, struct ocfs2_super,
1213 osb_truncate_log_wq.work);
1212 1214
1213 mlog_entry_void(); 1215 mlog_entry_void();
1214 1216
@@ -1441,7 +1443,8 @@ int ocfs2_truncate_log_init(struct ocfs2_super *osb)
1441 /* ocfs2_truncate_log_shutdown keys on the existence of 1443 /* ocfs2_truncate_log_shutdown keys on the existence of
1442 * osb->osb_tl_inode so we don't set any of the osb variables 1444 * osb->osb_tl_inode so we don't set any of the osb variables
1443 * until we're sure all is well. */ 1445 * until we're sure all is well. */
1444 INIT_WORK(&osb->osb_truncate_log_wq, ocfs2_truncate_log_worker, osb); 1446 INIT_DELAYED_WORK(&osb->osb_truncate_log_wq,
1447 ocfs2_truncate_log_worker);
1445 osb->osb_tl_bh = tl_bh; 1448 osb->osb_tl_bh = tl_bh;
1446 osb->osb_tl_inode = tl_inode; 1449 osb->osb_tl_inode = tl_inode;
1447 1450
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index 305cba3681fe..4cd9a9580456 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -141,7 +141,7 @@ struct o2hb_region {
141 * recognizes a node going up and down in one iteration */ 141 * recognizes a node going up and down in one iteration */
142 u64 hr_generation; 142 u64 hr_generation;
143 143
144 struct work_struct hr_write_timeout_work; 144 struct delayed_work hr_write_timeout_work;
145 unsigned long hr_last_timeout_start; 145 unsigned long hr_last_timeout_start;
146 146
147 /* Used during o2hb_check_slot to hold a copy of the block 147 /* Used during o2hb_check_slot to hold a copy of the block
@@ -156,9 +156,11 @@ struct o2hb_bio_wait_ctxt {
156 int wc_error; 156 int wc_error;
157}; 157};
158 158
159static void o2hb_write_timeout(void *arg) 159static void o2hb_write_timeout(struct work_struct *work)
160{ 160{
161 struct o2hb_region *reg = arg; 161 struct o2hb_region *reg =
162 container_of(work, struct o2hb_region,
163 hr_write_timeout_work.work);
162 164
163 mlog(ML_ERROR, "Heartbeat write timeout to device %s after %u " 165 mlog(ML_ERROR, "Heartbeat write timeout to device %s after %u "
164 "milliseconds\n", reg->hr_dev_name, 166 "milliseconds\n", reg->hr_dev_name,
@@ -1404,7 +1406,7 @@ static ssize_t o2hb_region_dev_write(struct o2hb_region *reg,
1404 goto out; 1406 goto out;
1405 } 1407 }
1406 1408
1407 INIT_WORK(&reg->hr_write_timeout_work, o2hb_write_timeout, reg); 1409 INIT_DELAYED_WORK(&reg->hr_write_timeout_work, o2hb_write_timeout);
1408 1410
1409 /* 1411 /*
1410 * A node is considered live after it has beat LIVE_THRESHOLD 1412 * A node is considered live after it has beat LIVE_THRESHOLD
diff --git a/fs/ocfs2/cluster/quorum.c b/fs/ocfs2/cluster/quorum.c
index 7bba98fbfc15..4705d659fe57 100644
--- a/fs/ocfs2/cluster/quorum.c
+++ b/fs/ocfs2/cluster/quorum.c
@@ -88,7 +88,7 @@ void o2quo_disk_timeout(void)
88 o2quo_fence_self(); 88 o2quo_fence_self();
89} 89}
90 90
91static void o2quo_make_decision(void *arg) 91static void o2quo_make_decision(struct work_struct *work)
92{ 92{
93 int quorum; 93 int quorum;
94 int lowest_hb, lowest_reachable = 0, fence = 0; 94 int lowest_hb, lowest_reachable = 0, fence = 0;
@@ -306,7 +306,7 @@ void o2quo_init(void)
306 struct o2quo_state *qs = &o2quo_state; 306 struct o2quo_state *qs = &o2quo_state;
307 307
308 spin_lock_init(&qs->qs_lock); 308 spin_lock_init(&qs->qs_lock);
309 INIT_WORK(&qs->qs_work, o2quo_make_decision, NULL); 309 INIT_WORK(&qs->qs_work, o2quo_make_decision);
310} 310}
311 311
312void o2quo_exit(void) 312void o2quo_exit(void)
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index b650efa8c8be..9b3209dc0b16 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -140,11 +140,11 @@ static int o2net_sys_err_translations[O2NET_ERR_MAX] =
140 [O2NET_ERR_DIED] = -EHOSTDOWN,}; 140 [O2NET_ERR_DIED] = -EHOSTDOWN,};
141 141
142/* can't quite avoid *all* internal declarations :/ */ 142/* can't quite avoid *all* internal declarations :/ */
143static void o2net_sc_connect_completed(void *arg); 143static void o2net_sc_connect_completed(struct work_struct *work);
144static void o2net_rx_until_empty(void *arg); 144static void o2net_rx_until_empty(struct work_struct *work);
145static void o2net_shutdown_sc(void *arg); 145static void o2net_shutdown_sc(struct work_struct *work);
146static void o2net_listen_data_ready(struct sock *sk, int bytes); 146static void o2net_listen_data_ready(struct sock *sk, int bytes);
147static void o2net_sc_send_keep_req(void *arg); 147static void o2net_sc_send_keep_req(struct work_struct *work);
148static void o2net_idle_timer(unsigned long data); 148static void o2net_idle_timer(unsigned long data);
149static void o2net_sc_postpone_idle(struct o2net_sock_container *sc); 149static void o2net_sc_postpone_idle(struct o2net_sock_container *sc);
150 150
@@ -308,10 +308,10 @@ static struct o2net_sock_container *sc_alloc(struct o2nm_node *node)
308 o2nm_node_get(node); 308 o2nm_node_get(node);
309 sc->sc_node = node; 309 sc->sc_node = node;
310 310
311 INIT_WORK(&sc->sc_connect_work, o2net_sc_connect_completed, sc); 311 INIT_WORK(&sc->sc_connect_work, o2net_sc_connect_completed);
312 INIT_WORK(&sc->sc_rx_work, o2net_rx_until_empty, sc); 312 INIT_WORK(&sc->sc_rx_work, o2net_rx_until_empty);
313 INIT_WORK(&sc->sc_shutdown_work, o2net_shutdown_sc, sc); 313 INIT_WORK(&sc->sc_shutdown_work, o2net_shutdown_sc);
314 INIT_WORK(&sc->sc_keepalive_work, o2net_sc_send_keep_req, sc); 314 INIT_DELAYED_WORK(&sc->sc_keepalive_work, o2net_sc_send_keep_req);
315 315
316 init_timer(&sc->sc_idle_timeout); 316 init_timer(&sc->sc_idle_timeout);
317 sc->sc_idle_timeout.function = o2net_idle_timer; 317 sc->sc_idle_timeout.function = o2net_idle_timer;
@@ -342,7 +342,7 @@ static void o2net_sc_queue_work(struct o2net_sock_container *sc,
342 sc_put(sc); 342 sc_put(sc);
343} 343}
344static void o2net_sc_queue_delayed_work(struct o2net_sock_container *sc, 344static void o2net_sc_queue_delayed_work(struct o2net_sock_container *sc,
345 struct work_struct *work, 345 struct delayed_work *work,
346 int delay) 346 int delay)
347{ 347{
348 sc_get(sc); 348 sc_get(sc);
@@ -350,7 +350,7 @@ static void o2net_sc_queue_delayed_work(struct o2net_sock_container *sc,
350 sc_put(sc); 350 sc_put(sc);
351} 351}
352static void o2net_sc_cancel_delayed_work(struct o2net_sock_container *sc, 352static void o2net_sc_cancel_delayed_work(struct o2net_sock_container *sc,
353 struct work_struct *work) 353 struct delayed_work *work)
354{ 354{
355 if (cancel_delayed_work(work)) 355 if (cancel_delayed_work(work))
356 sc_put(sc); 356 sc_put(sc);
@@ -564,9 +564,11 @@ static void o2net_ensure_shutdown(struct o2net_node *nn,
564 * ourselves as state_change couldn't get the nn_lock and call set_nn_state 564 * ourselves as state_change couldn't get the nn_lock and call set_nn_state
565 * itself. 565 * itself.
566 */ 566 */
567static void o2net_shutdown_sc(void *arg) 567static void o2net_shutdown_sc(struct work_struct *work)
568{ 568{
569 struct o2net_sock_container *sc = arg; 569 struct o2net_sock_container *sc =
570 container_of(work, struct o2net_sock_container,
571 sc_shutdown_work);
570 struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num); 572 struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num);
571 573
572 sclog(sc, "shutting down\n"); 574 sclog(sc, "shutting down\n");
@@ -1201,9 +1203,10 @@ out:
1201/* this work func is triggerd by data ready. it reads until it can read no 1203/* this work func is triggerd by data ready. it reads until it can read no
1202 * more. it interprets 0, eof, as fatal. if data_ready hits while we're doing 1204 * more. it interprets 0, eof, as fatal. if data_ready hits while we're doing
1203 * our work the work struct will be marked and we'll be called again. */ 1205 * our work the work struct will be marked and we'll be called again. */
1204static void o2net_rx_until_empty(void *arg) 1206static void o2net_rx_until_empty(struct work_struct *work)
1205{ 1207{
1206 struct o2net_sock_container *sc = arg; 1208 struct o2net_sock_container *sc =
1209 container_of(work, struct o2net_sock_container, sc_rx_work);
1207 int ret; 1210 int ret;
1208 1211
1209 do { 1212 do {
@@ -1249,9 +1252,11 @@ static int o2net_set_nodelay(struct socket *sock)
1249 1252
1250/* called when a connect completes and after a sock is accepted. the 1253/* called when a connect completes and after a sock is accepted. the
1251 * rx path will see the response and mark the sc valid */ 1254 * rx path will see the response and mark the sc valid */
1252static void o2net_sc_connect_completed(void *arg) 1255static void o2net_sc_connect_completed(struct work_struct *work)
1253{ 1256{
1254 struct o2net_sock_container *sc = arg; 1257 struct o2net_sock_container *sc =
1258 container_of(work, struct o2net_sock_container,
1259 sc_connect_work);
1255 1260
1256 mlog(ML_MSG, "sc sending handshake with ver %llu id %llx\n", 1261 mlog(ML_MSG, "sc sending handshake with ver %llu id %llx\n",
1257 (unsigned long long)O2NET_PROTOCOL_VERSION, 1262 (unsigned long long)O2NET_PROTOCOL_VERSION,
@@ -1262,9 +1267,11 @@ static void o2net_sc_connect_completed(void *arg)
1262} 1267}
1263 1268
1264/* this is called as a work_struct func. */ 1269/* this is called as a work_struct func. */
1265static void o2net_sc_send_keep_req(void *arg) 1270static void o2net_sc_send_keep_req(struct work_struct *work)
1266{ 1271{
1267 struct o2net_sock_container *sc = arg; 1272 struct o2net_sock_container *sc =
1273 container_of(work, struct o2net_sock_container,
1274 sc_keepalive_work.work);
1268 1275
1269 o2net_sendpage(sc, o2net_keep_req, sizeof(*o2net_keep_req)); 1276 o2net_sendpage(sc, o2net_keep_req, sizeof(*o2net_keep_req));
1270 sc_put(sc); 1277 sc_put(sc);
@@ -1314,14 +1321,15 @@ static void o2net_sc_postpone_idle(struct o2net_sock_container *sc)
1314 * having a connect attempt fail, etc. This centralizes the logic which decides 1321 * having a connect attempt fail, etc. This centralizes the logic which decides
1315 * if a connect attempt should be made or if we should give up and all future 1322 * if a connect attempt should be made or if we should give up and all future
1316 * transmit attempts should fail */ 1323 * transmit attempts should fail */
1317static void o2net_start_connect(void *arg) 1324static void o2net_start_connect(struct work_struct *work)
1318{ 1325{
1319 struct o2net_node *nn = arg; 1326 struct o2net_node *nn =
1327 container_of(work, struct o2net_node, nn_connect_work.work);
1320 struct o2net_sock_container *sc = NULL; 1328 struct o2net_sock_container *sc = NULL;
1321 struct o2nm_node *node = NULL, *mynode = NULL; 1329 struct o2nm_node *node = NULL, *mynode = NULL;
1322 struct socket *sock = NULL; 1330 struct socket *sock = NULL;
1323 struct sockaddr_in myaddr = {0, }, remoteaddr = {0, }; 1331 struct sockaddr_in myaddr = {0, }, remoteaddr = {0, };
1324 int ret = 0; 1332 int ret = 0, stop;
1325 1333
1326 /* if we're greater we initiate tx, otherwise we accept */ 1334 /* if we're greater we initiate tx, otherwise we accept */
1327 if (o2nm_this_node() <= o2net_num_from_nn(nn)) 1335 if (o2nm_this_node() <= o2net_num_from_nn(nn))
@@ -1342,10 +1350,9 @@ static void o2net_start_connect(void *arg)
1342 1350
1343 spin_lock(&nn->nn_lock); 1351 spin_lock(&nn->nn_lock);
1344 /* see if we already have one pending or have given up */ 1352 /* see if we already have one pending or have given up */
1345 if (nn->nn_sc || nn->nn_persistent_error) 1353 stop = (nn->nn_sc || nn->nn_persistent_error);
1346 arg = NULL;
1347 spin_unlock(&nn->nn_lock); 1354 spin_unlock(&nn->nn_lock);
1348 if (arg == NULL) /* *shrug*, needed some indicator */ 1355 if (stop)
1349 goto out; 1356 goto out;
1350 1357
1351 nn->nn_last_connect_attempt = jiffies; 1358 nn->nn_last_connect_attempt = jiffies;
@@ -1421,9 +1428,10 @@ out:
1421 return; 1428 return;
1422} 1429}
1423 1430
1424static void o2net_connect_expired(void *arg) 1431static void o2net_connect_expired(struct work_struct *work)
1425{ 1432{
1426 struct o2net_node *nn = arg; 1433 struct o2net_node *nn =
1434 container_of(work, struct o2net_node, nn_connect_expired.work);
1427 1435
1428 spin_lock(&nn->nn_lock); 1436 spin_lock(&nn->nn_lock);
1429 if (!nn->nn_sc_valid) { 1437 if (!nn->nn_sc_valid) {
@@ -1436,9 +1444,10 @@ static void o2net_connect_expired(void *arg)
1436 spin_unlock(&nn->nn_lock); 1444 spin_unlock(&nn->nn_lock);
1437} 1445}
1438 1446
1439static void o2net_still_up(void *arg) 1447static void o2net_still_up(struct work_struct *work)
1440{ 1448{
1441 struct o2net_node *nn = arg; 1449 struct o2net_node *nn =
1450 container_of(work, struct o2net_node, nn_still_up.work);
1442 1451
1443 o2quo_hb_still_up(o2net_num_from_nn(nn)); 1452 o2quo_hb_still_up(o2net_num_from_nn(nn));
1444} 1453}
@@ -1644,9 +1653,9 @@ out:
1644 return ret; 1653 return ret;
1645} 1654}
1646 1655
1647static void o2net_accept_many(void *arg) 1656static void o2net_accept_many(struct work_struct *work)
1648{ 1657{
1649 struct socket *sock = arg; 1658 struct socket *sock = o2net_listen_sock;
1650 while (o2net_accept_one(sock) == 0) 1659 while (o2net_accept_one(sock) == 0)
1651 cond_resched(); 1660 cond_resched();
1652} 1661}
@@ -1700,7 +1709,7 @@ static int o2net_open_listening_sock(__be16 port)
1700 write_unlock_bh(&sock->sk->sk_callback_lock); 1709 write_unlock_bh(&sock->sk->sk_callback_lock);
1701 1710
1702 o2net_listen_sock = sock; 1711 o2net_listen_sock = sock;
1703 INIT_WORK(&o2net_listen_work, o2net_accept_many, sock); 1712 INIT_WORK(&o2net_listen_work, o2net_accept_many);
1704 1713
1705 sock->sk->sk_reuse = 1; 1714 sock->sk->sk_reuse = 1;
1706 ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin)); 1715 ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin));
@@ -1819,9 +1828,10 @@ int o2net_init(void)
1819 struct o2net_node *nn = o2net_nn_from_num(i); 1828 struct o2net_node *nn = o2net_nn_from_num(i);
1820 1829
1821 spin_lock_init(&nn->nn_lock); 1830 spin_lock_init(&nn->nn_lock);
1822 INIT_WORK(&nn->nn_connect_work, o2net_start_connect, nn); 1831 INIT_DELAYED_WORK(&nn->nn_connect_work, o2net_start_connect);
1823 INIT_WORK(&nn->nn_connect_expired, o2net_connect_expired, nn); 1832 INIT_DELAYED_WORK(&nn->nn_connect_expired,
1824 INIT_WORK(&nn->nn_still_up, o2net_still_up, nn); 1833 o2net_connect_expired);
1834 INIT_DELAYED_WORK(&nn->nn_still_up, o2net_still_up);
1825 /* until we see hb from a node we'll return einval */ 1835 /* until we see hb from a node we'll return einval */
1826 nn->nn_persistent_error = -ENOTCONN; 1836 nn->nn_persistent_error = -ENOTCONN;
1827 init_waitqueue_head(&nn->nn_sc_wq); 1837 init_waitqueue_head(&nn->nn_sc_wq);
diff --git a/fs/ocfs2/cluster/tcp_internal.h b/fs/ocfs2/cluster/tcp_internal.h
index 4b46aac7d243..daebbd3a2c8c 100644
--- a/fs/ocfs2/cluster/tcp_internal.h
+++ b/fs/ocfs2/cluster/tcp_internal.h
@@ -86,18 +86,18 @@ struct o2net_node {
86 * connect attempt fails and so can be self-arming. shutdown is 86 * connect attempt fails and so can be self-arming. shutdown is
87 * careful to first mark the nn such that no connects will be attempted 87 * careful to first mark the nn such that no connects will be attempted
88 * before canceling delayed connect work and flushing the queue. */ 88 * before canceling delayed connect work and flushing the queue. */
89 struct work_struct nn_connect_work; 89 struct delayed_work nn_connect_work;
90 unsigned long nn_last_connect_attempt; 90 unsigned long nn_last_connect_attempt;
91 91
92 /* this is queued as nodes come up and is canceled when a connection is 92 /* this is queued as nodes come up and is canceled when a connection is
93 * established. this expiring gives up on the node and errors out 93 * established. this expiring gives up on the node and errors out
94 * transmits */ 94 * transmits */
95 struct work_struct nn_connect_expired; 95 struct delayed_work nn_connect_expired;
96 96
97 /* after we give up on a socket we wait a while before deciding 97 /* after we give up on a socket we wait a while before deciding
98 * that it is still heartbeating and that we should do some 98 * that it is still heartbeating and that we should do some
99 * quorum work */ 99 * quorum work */
100 struct work_struct nn_still_up; 100 struct delayed_work nn_still_up;
101}; 101};
102 102
103struct o2net_sock_container { 103struct o2net_sock_container {
@@ -129,7 +129,7 @@ struct o2net_sock_container {
129 struct work_struct sc_shutdown_work; 129 struct work_struct sc_shutdown_work;
130 130
131 struct timer_list sc_idle_timeout; 131 struct timer_list sc_idle_timeout;
132 struct work_struct sc_keepalive_work; 132 struct delayed_work sc_keepalive_work;
133 133
134 unsigned sc_handshake_ok:1; 134 unsigned sc_handshake_ok:1;
135 135
diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h
index fa968180b072..6b6ff76538c5 100644
--- a/fs/ocfs2/dlm/dlmcommon.h
+++ b/fs/ocfs2/dlm/dlmcommon.h
@@ -153,7 +153,7 @@ static inline struct hlist_head *dlm_lockres_hash(struct dlm_ctxt *dlm, unsigned
153 * called functions that cannot be directly called from the 153 * called functions that cannot be directly called from the
154 * net message handlers for some reason, usually because 154 * net message handlers for some reason, usually because
155 * they need to send net messages of their own. */ 155 * they need to send net messages of their own. */
156void dlm_dispatch_work(void *data); 156void dlm_dispatch_work(struct work_struct *work);
157 157
158struct dlm_lock_resource; 158struct dlm_lock_resource;
159struct dlm_work_item; 159struct dlm_work_item;
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index 8d1065f8b3bd..637646e6922e 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -1296,7 +1296,7 @@ static struct dlm_ctxt *dlm_alloc_ctxt(const char *domain,
1296 1296
1297 spin_lock_init(&dlm->work_lock); 1297 spin_lock_init(&dlm->work_lock);
1298 INIT_LIST_HEAD(&dlm->work_list); 1298 INIT_LIST_HEAD(&dlm->work_list);
1299 INIT_WORK(&dlm->dispatched_work, dlm_dispatch_work, dlm); 1299 INIT_WORK(&dlm->dispatched_work, dlm_dispatch_work);
1300 1300
1301 kref_init(&dlm->dlm_refs); 1301 kref_init(&dlm->dlm_refs);
1302 dlm->dlm_state = DLM_CTXT_NEW; 1302 dlm->dlm_state = DLM_CTXT_NEW;
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index 9d950d7cea38..fb3e2b0817f1 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -153,9 +153,10 @@ static inline void dlm_reset_recovery(struct dlm_ctxt *dlm)
153} 153}
154 154
155/* Worker function used during recovery. */ 155/* Worker function used during recovery. */
156void dlm_dispatch_work(void *data) 156void dlm_dispatch_work(struct work_struct *work)
157{ 157{
158 struct dlm_ctxt *dlm = (struct dlm_ctxt *)data; 158 struct dlm_ctxt *dlm =
159 container_of(work, struct dlm_ctxt, dispatched_work);
159 LIST_HEAD(tmp_list); 160 LIST_HEAD(tmp_list);
160 struct list_head *iter, *iter2; 161 struct list_head *iter, *iter2;
161 struct dlm_work_item *item; 162 struct dlm_work_item *item;
diff --git a/fs/ocfs2/dlm/userdlm.c b/fs/ocfs2/dlm/userdlm.c
index eead48bbfac6..7d2f578b267d 100644
--- a/fs/ocfs2/dlm/userdlm.c
+++ b/fs/ocfs2/dlm/userdlm.c
@@ -171,15 +171,14 @@ static inline void user_dlm_grab_inode_ref(struct user_lock_res *lockres)
171 BUG(); 171 BUG();
172} 172}
173 173
174static void user_dlm_unblock_lock(void *opaque); 174static void user_dlm_unblock_lock(struct work_struct *work);
175 175
176static void __user_dlm_queue_lockres(struct user_lock_res *lockres) 176static void __user_dlm_queue_lockres(struct user_lock_res *lockres)
177{ 177{
178 if (!(lockres->l_flags & USER_LOCK_QUEUED)) { 178 if (!(lockres->l_flags & USER_LOCK_QUEUED)) {
179 user_dlm_grab_inode_ref(lockres); 179 user_dlm_grab_inode_ref(lockres);
180 180
181 INIT_WORK(&lockres->l_work, user_dlm_unblock_lock, 181 INIT_WORK(&lockres->l_work, user_dlm_unblock_lock);
182 lockres);
183 182
184 queue_work(user_dlm_worker, &lockres->l_work); 183 queue_work(user_dlm_worker, &lockres->l_work);
185 lockres->l_flags |= USER_LOCK_QUEUED; 184 lockres->l_flags |= USER_LOCK_QUEUED;
@@ -279,10 +278,11 @@ static inline void user_dlm_drop_inode_ref(struct user_lock_res *lockres)
279 iput(inode); 278 iput(inode);
280} 279}
281 280
282static void user_dlm_unblock_lock(void *opaque) 281static void user_dlm_unblock_lock(struct work_struct *work)
283{ 282{
284 int new_level, status; 283 int new_level, status;
285 struct user_lock_res *lockres = (struct user_lock_res *) opaque; 284 struct user_lock_res *lockres =
285 container_of(work, struct user_lock_res, l_work);
286 struct dlm_ctxt *dlm = dlm_ctxt_from_user_lockres(lockres); 286 struct dlm_ctxt *dlm = dlm_ctxt_from_user_lockres(lockres);
287 287
288 mlog(0, "processing lockres %.*s\n", lockres->l_namelen, 288 mlog(0, "processing lockres %.*s\n", lockres->l_namelen,
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index fd9734def551..d95ee2720e6e 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -911,11 +911,12 @@ struct ocfs2_la_recovery_item {
911 * NOTE: This function can and will sleep on recovery of other nodes 911 * NOTE: This function can and will sleep on recovery of other nodes
912 * during cluster locking, just like any other ocfs2 process. 912 * during cluster locking, just like any other ocfs2 process.
913 */ 913 */
914void ocfs2_complete_recovery(void *data) 914void ocfs2_complete_recovery(struct work_struct *work)
915{ 915{
916 int ret; 916 int ret;
917 struct ocfs2_super *osb = data; 917 struct ocfs2_journal *journal =
918 struct ocfs2_journal *journal = osb->journal; 918 container_of(work, struct ocfs2_journal, j_recovery_work);
919 struct ocfs2_super *osb = journal->j_osb;
919 struct ocfs2_dinode *la_dinode, *tl_dinode; 920 struct ocfs2_dinode *la_dinode, *tl_dinode;
920 struct ocfs2_la_recovery_item *item; 921 struct ocfs2_la_recovery_item *item;
921 struct list_head *p, *n; 922 struct list_head *p, *n;
diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h
index 2f3a6acdac45..5be161a4ad9f 100644
--- a/fs/ocfs2/journal.h
+++ b/fs/ocfs2/journal.h
@@ -172,7 +172,7 @@ static inline void ocfs2_handle_set_sync(struct ocfs2_journal_handle *handle, in
172} 172}
173 173
174/* Exported only for the journal struct init code in super.c. Do not call. */ 174/* Exported only for the journal struct init code in super.c. Do not call. */
175void ocfs2_complete_recovery(void *data); 175void ocfs2_complete_recovery(struct work_struct *work);
176 176
177/* 177/*
178 * Journal Control: 178 * Journal Control:
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index 0462a7f4e21b..9b1bad1d48ec 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -283,7 +283,7 @@ struct ocfs2_super
283 /* Truncate log info */ 283 /* Truncate log info */
284 struct inode *osb_tl_inode; 284 struct inode *osb_tl_inode;
285 struct buffer_head *osb_tl_bh; 285 struct buffer_head *osb_tl_bh;
286 struct work_struct osb_truncate_log_wq; 286 struct delayed_work osb_truncate_log_wq;
287 287
288 struct ocfs2_node_map osb_recovering_orphan_dirs; 288 struct ocfs2_node_map osb_recovering_orphan_dirs;
289 unsigned int *osb_orphan_wipes; 289 unsigned int *osb_orphan_wipes;
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 76b46ebbb10c..9a8089030f55 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -1365,7 +1365,7 @@ static int ocfs2_initialize_super(struct super_block *sb,
1365 spin_lock_init(&journal->j_lock); 1365 spin_lock_init(&journal->j_lock);
1366 journal->j_trans_id = (unsigned long) 1; 1366 journal->j_trans_id = (unsigned long) 1;
1367 INIT_LIST_HEAD(&journal->j_la_cleanups); 1367 INIT_LIST_HEAD(&journal->j_la_cleanups);
1368 INIT_WORK(&journal->j_recovery_work, ocfs2_complete_recovery, osb); 1368 INIT_WORK(&journal->j_recovery_work, ocfs2_complete_recovery);
1369 journal->j_state = OCFS2_JOURNAL_FREE; 1369 journal->j_state = OCFS2_JOURNAL_FREE;
1370 1370
1371 /* get some pseudo constants for clustersize bits */ 1371 /* get some pseudo constants for clustersize bits */
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index ac93174c9639..7280a23ef344 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -104,7 +104,7 @@ static int release_journal_dev(struct super_block *super,
104 struct reiserfs_journal *journal); 104 struct reiserfs_journal *journal);
105static int dirty_one_transaction(struct super_block *s, 105static int dirty_one_transaction(struct super_block *s,
106 struct reiserfs_journal_list *jl); 106 struct reiserfs_journal_list *jl);
107static void flush_async_commits(void *p); 107static void flush_async_commits(struct work_struct *work);
108static void queue_log_writer(struct super_block *s); 108static void queue_log_writer(struct super_block *s);
109 109
110/* values for join in do_journal_begin_r */ 110/* values for join in do_journal_begin_r */
@@ -2836,7 +2836,8 @@ int journal_init(struct super_block *p_s_sb, const char *j_dev_name,
2836 if (reiserfs_mounted_fs_count <= 1) 2836 if (reiserfs_mounted_fs_count <= 1)
2837 commit_wq = create_workqueue("reiserfs"); 2837 commit_wq = create_workqueue("reiserfs");
2838 2838
2839 INIT_WORK(&journal->j_work, flush_async_commits, p_s_sb); 2839 INIT_DELAYED_WORK(&journal->j_work, flush_async_commits);
2840 journal->j_work_sb = p_s_sb;
2840 return 0; 2841 return 0;
2841 free_and_return: 2842 free_and_return:
2842 free_journal_ram(p_s_sb); 2843 free_journal_ram(p_s_sb);
@@ -3447,10 +3448,11 @@ int journal_end_sync(struct reiserfs_transaction_handle *th,
3447/* 3448/*
3448** writeback the pending async commits to disk 3449** writeback the pending async commits to disk
3449*/ 3450*/
3450static void flush_async_commits(void *p) 3451static void flush_async_commits(struct work_struct *work)
3451{ 3452{
3452 struct super_block *p_s_sb = p; 3453 struct reiserfs_journal *journal =
3453 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb); 3454 container_of(work, struct reiserfs_journal, j_work.work);
3455 struct super_block *p_s_sb = journal->j_work_sb;
3454 struct reiserfs_journal_list *jl; 3456 struct reiserfs_journal_list *jl;
3455 struct list_head *entry; 3457 struct list_head *entry;
3456 3458
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index 09360cf1e1f2..8e6b56fc1cad 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -149,9 +149,10 @@ xfs_destroy_ioend(
149 */ 149 */
150STATIC void 150STATIC void
151xfs_end_bio_delalloc( 151xfs_end_bio_delalloc(
152 void *data) 152 struct work_struct *work)
153{ 153{
154 xfs_ioend_t *ioend = data; 154 xfs_ioend_t *ioend =
155 container_of(work, xfs_ioend_t, io_work);
155 156
156 xfs_destroy_ioend(ioend); 157 xfs_destroy_ioend(ioend);
157} 158}
@@ -161,9 +162,10 @@ xfs_end_bio_delalloc(
161 */ 162 */
162STATIC void 163STATIC void
163xfs_end_bio_written( 164xfs_end_bio_written(
164 void *data) 165 struct work_struct *work)
165{ 166{
166 xfs_ioend_t *ioend = data; 167 xfs_ioend_t *ioend =
168 container_of(work, xfs_ioend_t, io_work);
167 169
168 xfs_destroy_ioend(ioend); 170 xfs_destroy_ioend(ioend);
169} 171}
@@ -176,9 +178,10 @@ xfs_end_bio_written(
176 */ 178 */
177STATIC void 179STATIC void
178xfs_end_bio_unwritten( 180xfs_end_bio_unwritten(
179 void *data) 181 struct work_struct *work)
180{ 182{
181 xfs_ioend_t *ioend = data; 183 xfs_ioend_t *ioend =
184 container_of(work, xfs_ioend_t, io_work);
182 bhv_vnode_t *vp = ioend->io_vnode; 185 bhv_vnode_t *vp = ioend->io_vnode;
183 xfs_off_t offset = ioend->io_offset; 186 xfs_off_t offset = ioend->io_offset;
184 size_t size = ioend->io_size; 187 size_t size = ioend->io_size;
@@ -220,11 +223,11 @@ xfs_alloc_ioend(
220 ioend->io_size = 0; 223 ioend->io_size = 0;
221 224
222 if (type == IOMAP_UNWRITTEN) 225 if (type == IOMAP_UNWRITTEN)
223 INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten, ioend); 226 INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten);
224 else if (type == IOMAP_DELAY) 227 else if (type == IOMAP_DELAY)
225 INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc, ioend); 228 INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc);
226 else 229 else
227 INIT_WORK(&ioend->io_work, xfs_end_bio_written, ioend); 230 INIT_WORK(&ioend->io_work, xfs_end_bio_written);
228 231
229 return ioend; 232 return ioend;
230} 233}
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index d3382843698e..eef4a0ba11e9 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -994,9 +994,10 @@ xfs_buf_wait_unpin(
994 994
995STATIC void 995STATIC void
996xfs_buf_iodone_work( 996xfs_buf_iodone_work(
997 void *v) 997 struct work_struct *work)
998{ 998{
999 xfs_buf_t *bp = (xfs_buf_t *)v; 999 xfs_buf_t *bp =
1000 container_of(work, xfs_buf_t, b_iodone_work);
1000 1001
1001 if (bp->b_iodone) 1002 if (bp->b_iodone)
1002 (*(bp->b_iodone))(bp); 1003 (*(bp->b_iodone))(bp);
@@ -1017,10 +1018,10 @@ xfs_buf_ioend(
1017 1018
1018 if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) { 1019 if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) {
1019 if (schedule) { 1020 if (schedule) {
1020 INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work, bp); 1021 INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work);
1021 queue_work(xfslogd_workqueue, &bp->b_iodone_work); 1022 queue_work(xfslogd_workqueue, &bp->b_iodone_work);
1022 } else { 1023 } else {
1023 xfs_buf_iodone_work(bp); 1024 xfs_buf_iodone_work(&bp->b_iodone_work);
1024 } 1025 }
1025 } else { 1026 } else {
1026 up(&bp->b_iodonesema); 1027 up(&bp->b_iodonesema);