aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2006-11-22 09:55:48 -0500
committerDavid Howells <dhowells@redhat.com>2006-11-22 09:55:48 -0500
commit65f27f38446e1976cc98fd3004b110fedcddd189 (patch)
tree68f8be93feae31dfa018c22db392a05546b63ee1 /fs
parent365970a1ea76d81cb1ad2f652acb605f06dae256 (diff)
WorkStruct: Pass the work_struct pointer instead of context data
Pass the work_struct pointer to the work function rather than context data. The work function can use container_of() to work out the data. For the cases where the container of the work_struct may go away the moment the pending bit is cleared, it is made possible to defer the release of the structure by deferring the clearing of the pending bit. To make this work, an extra flag is introduced into the management side of the work_struct. This governs auto-release of the structure upon execution. Ordinarily, the work queue executor would release the work_struct for further scheduling or deallocation by clearing the pending bit prior to jumping to the work function. This means that, unless the driver makes some guarantee itself that the work_struct won't go away, the work function may not access anything else in the work_struct or its container lest they be deallocated.. This is a problem if the auxiliary data is taken away (as done by the last patch). However, if the pending bit is *not* cleared before jumping to the work function, then the work function *may* access the work_struct and its container with no problems. But then the work function must itself release the work_struct by calling work_release(). In most cases, automatic release is fine, so this is the default. Special initiators exist for the non-auto-release case (ending in _NAR). Signed-Off-By: David Howells <dhowells@redhat.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/aio.c14
-rw-r--r--fs/bio.c6
-rw-r--r--fs/file.c6
-rw-r--r--fs/nfs/client.c2
-rw-r--r--fs/nfs/namespace.c9
-rw-r--r--fs/nfs/nfs4_fs.h2
-rw-r--r--fs/nfs/nfs4renewd.c5
7 files changed, 23 insertions, 21 deletions
diff --git a/fs/aio.c b/fs/aio.c
index 11a1a7100ad6..ca1c5180a17f 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -53,13 +53,13 @@ static kmem_cache_t *kioctx_cachep;
53static struct workqueue_struct *aio_wq; 53static struct workqueue_struct *aio_wq;
54 54
55/* Used for rare fput completion. */ 55/* Used for rare fput completion. */
56static void aio_fput_routine(void *); 56static void aio_fput_routine(struct work_struct *);
57static DECLARE_WORK(fput_work, aio_fput_routine, NULL); 57static DECLARE_WORK(fput_work, aio_fput_routine);
58 58
59static DEFINE_SPINLOCK(fput_lock); 59static DEFINE_SPINLOCK(fput_lock);
60static LIST_HEAD(fput_head); 60static LIST_HEAD(fput_head);
61 61
62static void aio_kick_handler(void *); 62static void aio_kick_handler(struct work_struct *);
63static void aio_queue_work(struct kioctx *); 63static void aio_queue_work(struct kioctx *);
64 64
65/* aio_setup 65/* aio_setup
@@ -227,7 +227,7 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
227 227
228 INIT_LIST_HEAD(&ctx->active_reqs); 228 INIT_LIST_HEAD(&ctx->active_reqs);
229 INIT_LIST_HEAD(&ctx->run_list); 229 INIT_LIST_HEAD(&ctx->run_list);
230 INIT_DELAYED_WORK(&ctx->wq, aio_kick_handler, ctx); 230 INIT_DELAYED_WORK(&ctx->wq, aio_kick_handler);
231 231
232 if (aio_setup_ring(ctx) < 0) 232 if (aio_setup_ring(ctx) < 0)
233 goto out_freectx; 233 goto out_freectx;
@@ -470,7 +470,7 @@ static inline void really_put_req(struct kioctx *ctx, struct kiocb *req)
470 wake_up(&ctx->wait); 470 wake_up(&ctx->wait);
471} 471}
472 472
473static void aio_fput_routine(void *data) 473static void aio_fput_routine(struct work_struct *data)
474{ 474{
475 spin_lock_irq(&fput_lock); 475 spin_lock_irq(&fput_lock);
476 while (likely(!list_empty(&fput_head))) { 476 while (likely(!list_empty(&fput_head))) {
@@ -859,9 +859,9 @@ static inline void aio_run_all_iocbs(struct kioctx *ctx)
859 * space. 859 * space.
860 * Run on aiod's context. 860 * Run on aiod's context.
861 */ 861 */
862static void aio_kick_handler(void *data) 862static void aio_kick_handler(struct work_struct *work)
863{ 863{
864 struct kioctx *ctx = data; 864 struct kioctx *ctx = container_of(work, struct kioctx, wq.work);
865 mm_segment_t oldfs = get_fs(); 865 mm_segment_t oldfs = get_fs();
866 int requeue; 866 int requeue;
867 867
diff --git a/fs/bio.c b/fs/bio.c
index f95c8749499f..c6c07ca5b5a9 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -955,16 +955,16 @@ static void bio_release_pages(struct bio *bio)
955 * run one bio_put() against the BIO. 955 * run one bio_put() against the BIO.
956 */ 956 */
957 957
958static void bio_dirty_fn(void *data); 958static void bio_dirty_fn(struct work_struct *work);
959 959
960static DECLARE_WORK(bio_dirty_work, bio_dirty_fn, NULL); 960static DECLARE_WORK(bio_dirty_work, bio_dirty_fn);
961static DEFINE_SPINLOCK(bio_dirty_lock); 961static DEFINE_SPINLOCK(bio_dirty_lock);
962static struct bio *bio_dirty_list; 962static struct bio *bio_dirty_list;
963 963
964/* 964/*
965 * This runs in process context 965 * This runs in process context
966 */ 966 */
967static void bio_dirty_fn(void *data) 967static void bio_dirty_fn(struct work_struct *work)
968{ 968{
969 unsigned long flags; 969 unsigned long flags;
970 struct bio *bio; 970 struct bio *bio;
diff --git a/fs/file.c b/fs/file.c
index 8e81775c5dc8..3787e82f54c1 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -91,8 +91,10 @@ out:
91 spin_unlock(&fddef->lock); 91 spin_unlock(&fddef->lock);
92} 92}
93 93
94static void free_fdtable_work(struct fdtable_defer *f) 94static void free_fdtable_work(struct work_struct *work)
95{ 95{
96 struct fdtable_defer *f =
97 container_of(work, struct fdtable_defer, wq);
96 struct fdtable *fdt; 98 struct fdtable *fdt;
97 99
98 spin_lock_bh(&f->lock); 100 spin_lock_bh(&f->lock);
@@ -351,7 +353,7 @@ static void __devinit fdtable_defer_list_init(int cpu)
351{ 353{
352 struct fdtable_defer *fddef = &per_cpu(fdtable_defer_list, cpu); 354 struct fdtable_defer *fddef = &per_cpu(fdtable_defer_list, cpu);
353 spin_lock_init(&fddef->lock); 355 spin_lock_init(&fddef->lock);
354 INIT_WORK(&fddef->wq, (void (*)(void *))free_fdtable_work, fddef); 356 INIT_WORK(&fddef->wq, free_fdtable_work);
355 init_timer(&fddef->timer); 357 init_timer(&fddef->timer);
356 fddef->timer.data = (unsigned long)fddef; 358 fddef->timer.data = (unsigned long)fddef;
357 fddef->timer.function = fdtable_timer; 359 fddef->timer.function = fdtable_timer;
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 6f0487d6f44a..23ab145daa2d 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -143,7 +143,7 @@ static struct nfs_client *nfs_alloc_client(const char *hostname,
143 INIT_LIST_HEAD(&clp->cl_state_owners); 143 INIT_LIST_HEAD(&clp->cl_state_owners);
144 INIT_LIST_HEAD(&clp->cl_unused); 144 INIT_LIST_HEAD(&clp->cl_unused);
145 spin_lock_init(&clp->cl_lock); 145 spin_lock_init(&clp->cl_lock);
146 INIT_DELAYED_WORK(&clp->cl_renewd, nfs4_renew_state, clp); 146 INIT_DELAYED_WORK(&clp->cl_renewd, nfs4_renew_state);
147 rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS client"); 147 rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS client");
148 clp->cl_boot_time = CURRENT_TIME; 148 clp->cl_boot_time = CURRENT_TIME;
149 clp->cl_state = 1 << NFS4CLNT_LEASE_EXPIRED; 149 clp->cl_state = 1 << NFS4CLNT_LEASE_EXPIRED;
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
index 5ed798bc1cf7..371b804e7cc8 100644
--- a/fs/nfs/namespace.c
+++ b/fs/nfs/namespace.c
@@ -18,11 +18,10 @@
18 18
19#define NFSDBG_FACILITY NFSDBG_VFS 19#define NFSDBG_FACILITY NFSDBG_VFS
20 20
21static void nfs_expire_automounts(void *list); 21static void nfs_expire_automounts(struct work_struct *work);
22 22
23LIST_HEAD(nfs_automount_list); 23LIST_HEAD(nfs_automount_list);
24static DECLARE_DELAYED_WORK(nfs_automount_task, nfs_expire_automounts, 24static DECLARE_DELAYED_WORK(nfs_automount_task, nfs_expire_automounts);
25 &nfs_automount_list);
26int nfs_mountpoint_expiry_timeout = 500 * HZ; 25int nfs_mountpoint_expiry_timeout = 500 * HZ;
27 26
28static struct vfsmount *nfs_do_submount(const struct vfsmount *mnt_parent, 27static struct vfsmount *nfs_do_submount(const struct vfsmount *mnt_parent,
@@ -165,9 +164,9 @@ struct inode_operations nfs_referral_inode_operations = {
165 .follow_link = nfs_follow_mountpoint, 164 .follow_link = nfs_follow_mountpoint,
166}; 165};
167 166
168static void nfs_expire_automounts(void *data) 167static void nfs_expire_automounts(struct work_struct *work)
169{ 168{
170 struct list_head *list = (struct list_head *)data; 169 struct list_head *list = &nfs_automount_list;
171 170
172 mark_mounts_for_expiry(list); 171 mark_mounts_for_expiry(list);
173 if (!list_empty(list)) 172 if (!list_empty(list))
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 6f346677332d..c26cd978c7cc 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -185,7 +185,7 @@ extern const u32 nfs4_fs_locations_bitmap[2];
185extern void nfs4_schedule_state_renewal(struct nfs_client *); 185extern void nfs4_schedule_state_renewal(struct nfs_client *);
186extern void nfs4_renewd_prepare_shutdown(struct nfs_server *); 186extern void nfs4_renewd_prepare_shutdown(struct nfs_server *);
187extern void nfs4_kill_renewd(struct nfs_client *); 187extern void nfs4_kill_renewd(struct nfs_client *);
188extern void nfs4_renew_state(void *); 188extern void nfs4_renew_state(struct work_struct *);
189 189
190/* nfs4state.c */ 190/* nfs4state.c */
191struct rpc_cred *nfs4_get_renew_cred(struct nfs_client *clp); 191struct rpc_cred *nfs4_get_renew_cred(struct nfs_client *clp);
diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c
index 7b6df1852e75..823298561c0a 100644
--- a/fs/nfs/nfs4renewd.c
+++ b/fs/nfs/nfs4renewd.c
@@ -59,9 +59,10 @@
59#define NFSDBG_FACILITY NFSDBG_PROC 59#define NFSDBG_FACILITY NFSDBG_PROC
60 60
61void 61void
62nfs4_renew_state(void *data) 62nfs4_renew_state(struct work_struct *work)
63{ 63{
64 struct nfs_client *clp = (struct nfs_client *)data; 64 struct nfs_client *clp =
65 container_of(work, struct nfs_client, cl_renewd.work);
65 struct rpc_cred *cred; 66 struct rpc_cred *cred;
66 long lease, timeout; 67 long lease, timeout;
67 unsigned long last, now; 68 unsigned long last, now;