aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2006-11-22 09:54:01 -0500
committerDavid Howells <dhowells@redhat.com>2006-11-22 09:54:01 -0500
commit52bad64d95bd89e08c49ec5a071fa6dcbe5a1a9c (patch)
tree5849b4e3c17daa70a7e81cfdeaddac9ac8a0e953
parent0f9005a6f7a82f4aacbd72f7b92322a8ca1c3f97 (diff)
WorkStruct: Separate delayable and non-delayable events.
Separate delayable work items from non-delayable work items be splitting them into a separate structure (delayed_work), which incorporates a work_struct and the timer_list removed from work_struct. The work_struct struct is huge, and this limits it's usefulness. On a 64-bit architecture it's nearly 100 bytes in size. This reduces that by half for the non-delayable type of event. Signed-Off-By: David Howells <dhowells@redhat.com>
-rw-r--r--arch/x86_64/kernel/mce.c2
-rw-r--r--drivers/ata/libata-core.c11
-rw-r--r--drivers/ata/libata-eh.c2
-rw-r--r--drivers/char/random.c2
-rw-r--r--drivers/char/tty_io.c2
-rw-r--r--fs/aio.c4
-rw-r--r--fs/nfs/client.c2
-rw-r--r--fs/nfs/namespace.c3
-rw-r--r--include/linux/aio.h2
-rw-r--r--include/linux/kbd_kern.h2
-rw-r--r--include/linux/libata.h4
-rw-r--r--include/linux/nfs_fs_sb.h2
-rw-r--r--include/linux/sunrpc/rpc_pipe_fs.h2
-rw-r--r--include/linux/sunrpc/xprt.h2
-rw-r--r--include/linux/tty.h2
-rw-r--r--include/linux/workqueue.h44
-rw-r--r--kernel/workqueue.c51
-rw-r--r--mm/slab.c8
-rw-r--r--net/core/link_watch.c9
-rw-r--r--net/sunrpc/cache.c4
-rw-r--r--net/sunrpc/rpc_pipe.c3
-rw-r--r--net/sunrpc/xprtsock.c6
22 files changed, 96 insertions, 73 deletions
diff --git a/arch/x86_64/kernel/mce.c b/arch/x86_64/kernel/mce.c
index bbea88801d88..5306f2630905 100644
--- a/arch/x86_64/kernel/mce.c
+++ b/arch/x86_64/kernel/mce.c
@@ -307,7 +307,7 @@ void mce_log_therm_throt_event(unsigned int cpu, __u64 status)
307 307
308static int check_interval = 5 * 60; /* 5 minutes */ 308static int check_interval = 5 * 60; /* 5 minutes */
309static void mcheck_timer(void *data); 309static void mcheck_timer(void *data);
310static DECLARE_WORK(mcheck_work, mcheck_timer, NULL); 310static DECLARE_DELAYED_WORK(mcheck_work, mcheck_timer, NULL);
311 311
312static void mcheck_check_cpu(void *info) 312static void mcheck_check_cpu(void *info)
313{ 313{
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 915a55a6cc14..0bb4b4dced76 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -937,12 +937,9 @@ void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data,
937 if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK) 937 if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK)
938 return; 938 return;
939 939
940 PREPARE_WORK(&ap->port_task, fn, data); 940 PREPARE_DELAYED_WORK(&ap->port_task, fn, data);
941 941
942 if (!delay) 942 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
943 rc = queue_work(ata_wq, &ap->port_task);
944 else
945 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
946 943
947 /* rc == 0 means that another user is using port task */ 944 /* rc == 0 means that another user is using port task */
948 WARN_ON(rc == 0); 945 WARN_ON(rc == 0);
@@ -5320,8 +5317,8 @@ void ata_port_init(struct ata_port *ap, struct ata_host *host,
5320 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN; 5317 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
5321#endif 5318#endif
5322 5319
5323 INIT_WORK(&ap->port_task, NULL, NULL); 5320 INIT_DELAYED_WORK(&ap->port_task, NULL, NULL);
5324 INIT_WORK(&ap->hotplug_task, ata_scsi_hotplug, ap); 5321 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug, ap);
5325 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan, ap); 5322 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan, ap);
5326 INIT_LIST_HEAD(&ap->eh_done_q); 5323 INIT_LIST_HEAD(&ap->eh_done_q);
5327 init_waitqueue_head(&ap->eh_wait_q); 5324 init_waitqueue_head(&ap->eh_wait_q);
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 02b2b2787d9b..9f6b7cc74fd9 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -332,7 +332,7 @@ void ata_scsi_error(struct Scsi_Host *host)
332 if (ap->pflags & ATA_PFLAG_LOADING) 332 if (ap->pflags & ATA_PFLAG_LOADING)
333 ap->pflags &= ~ATA_PFLAG_LOADING; 333 ap->pflags &= ~ATA_PFLAG_LOADING;
334 else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG) 334 else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG)
335 queue_work(ata_aux_wq, &ap->hotplug_task); 335 queue_delayed_work(ata_aux_wq, &ap->hotplug_task, 0);
336 336
337 if (ap->pflags & ATA_PFLAG_RECOVERED) 337 if (ap->pflags & ATA_PFLAG_RECOVERED)
338 ata_port_printk(ap, KERN_INFO, "EH complete\n"); 338 ata_port_printk(ap, KERN_INFO, "EH complete\n");
diff --git a/drivers/char/random.c b/drivers/char/random.c
index eb6b13f4211a..f2ab61f3e8ae 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1424,7 +1424,7 @@ static unsigned int ip_cnt;
1424 1424
1425static void rekey_seq_generator(void *private_); 1425static void rekey_seq_generator(void *private_);
1426 1426
1427static DECLARE_WORK(rekey_work, rekey_seq_generator, NULL); 1427static DECLARE_DELAYED_WORK(rekey_work, rekey_seq_generator, NULL);
1428 1428
1429/* 1429/*
1430 * Lock avoidance: 1430 * Lock avoidance:
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index e90ea39c7c4b..7297acfe520c 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -3580,7 +3580,7 @@ static void initialize_tty_struct(struct tty_struct *tty)
3580 tty->overrun_time = jiffies; 3580 tty->overrun_time = jiffies;
3581 tty->buf.head = tty->buf.tail = NULL; 3581 tty->buf.head = tty->buf.tail = NULL;
3582 tty_buffer_init(tty); 3582 tty_buffer_init(tty);
3583 INIT_WORK(&tty->buf.work, flush_to_ldisc, tty); 3583 INIT_DELAYED_WORK(&tty->buf.work, flush_to_ldisc, tty);
3584 init_MUTEX(&tty->buf.pty_sem); 3584 init_MUTEX(&tty->buf.pty_sem);
3585 mutex_init(&tty->termios_mutex); 3585 mutex_init(&tty->termios_mutex);
3586 init_waitqueue_head(&tty->write_wait); 3586 init_waitqueue_head(&tty->write_wait);
diff --git a/fs/aio.c b/fs/aio.c
index 94766599db00..11a1a7100ad6 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -227,7 +227,7 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
227 227
228 INIT_LIST_HEAD(&ctx->active_reqs); 228 INIT_LIST_HEAD(&ctx->active_reqs);
229 INIT_LIST_HEAD(&ctx->run_list); 229 INIT_LIST_HEAD(&ctx->run_list);
230 INIT_WORK(&ctx->wq, aio_kick_handler, ctx); 230 INIT_DELAYED_WORK(&ctx->wq, aio_kick_handler, ctx);
231 231
232 if (aio_setup_ring(ctx) < 0) 232 if (aio_setup_ring(ctx) < 0)
233 goto out_freectx; 233 goto out_freectx;
@@ -876,7 +876,7 @@ static void aio_kick_handler(void *data)
876 * we're in a worker thread already, don't use queue_delayed_work, 876 * we're in a worker thread already, don't use queue_delayed_work,
877 */ 877 */
878 if (requeue) 878 if (requeue)
879 queue_work(aio_wq, &ctx->wq); 879 queue_delayed_work(aio_wq, &ctx->wq, 0);
880} 880}
881 881
882 882
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 5fea638743e4..6f0487d6f44a 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -143,7 +143,7 @@ static struct nfs_client *nfs_alloc_client(const char *hostname,
143 INIT_LIST_HEAD(&clp->cl_state_owners); 143 INIT_LIST_HEAD(&clp->cl_state_owners);
144 INIT_LIST_HEAD(&clp->cl_unused); 144 INIT_LIST_HEAD(&clp->cl_unused);
145 spin_lock_init(&clp->cl_lock); 145 spin_lock_init(&clp->cl_lock);
146 INIT_WORK(&clp->cl_renewd, nfs4_renew_state, clp); 146 INIT_DELAYED_WORK(&clp->cl_renewd, nfs4_renew_state, clp);
147 rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS client"); 147 rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS client");
148 clp->cl_boot_time = CURRENT_TIME; 148 clp->cl_boot_time = CURRENT_TIME;
149 clp->cl_state = 1 << NFS4CLNT_LEASE_EXPIRED; 149 clp->cl_state = 1 << NFS4CLNT_LEASE_EXPIRED;
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
index ec1114b33d89..5ed798bc1cf7 100644
--- a/fs/nfs/namespace.c
+++ b/fs/nfs/namespace.c
@@ -21,7 +21,8 @@
21static void nfs_expire_automounts(void *list); 21static void nfs_expire_automounts(void *list);
22 22
23LIST_HEAD(nfs_automount_list); 23LIST_HEAD(nfs_automount_list);
24static DECLARE_WORK(nfs_automount_task, nfs_expire_automounts, &nfs_automount_list); 24static DECLARE_DELAYED_WORK(nfs_automount_task, nfs_expire_automounts,
25 &nfs_automount_list);
25int nfs_mountpoint_expiry_timeout = 500 * HZ; 26int nfs_mountpoint_expiry_timeout = 500 * HZ;
26 27
27static struct vfsmount *nfs_do_submount(const struct vfsmount *mnt_parent, 28static struct vfsmount *nfs_do_submount(const struct vfsmount *mnt_parent,
diff --git a/include/linux/aio.h b/include/linux/aio.h
index 0d71c0041f13..9e350fd44d77 100644
--- a/include/linux/aio.h
+++ b/include/linux/aio.h
@@ -194,7 +194,7 @@ struct kioctx {
194 194
195 struct aio_ring_info ring_info; 195 struct aio_ring_info ring_info;
196 196
197 struct work_struct wq; 197 struct delayed_work wq;
198}; 198};
199 199
200/* prototypes */ 200/* prototypes */
diff --git a/include/linux/kbd_kern.h b/include/linux/kbd_kern.h
index efe0ee4cc80b..06c58c423fe1 100644
--- a/include/linux/kbd_kern.h
+++ b/include/linux/kbd_kern.h
@@ -158,7 +158,7 @@ static inline void con_schedule_flip(struct tty_struct *t)
158 if (t->buf.tail != NULL) 158 if (t->buf.tail != NULL)
159 t->buf.tail->commit = t->buf.tail->used; 159 t->buf.tail->commit = t->buf.tail->used;
160 spin_unlock_irqrestore(&t->buf.lock, flags); 160 spin_unlock_irqrestore(&t->buf.lock, flags);
161 schedule_work(&t->buf.work); 161 schedule_delayed_work(&t->buf.work, 0);
162} 162}
163 163
164#endif 164#endif
diff --git a/include/linux/libata.h b/include/linux/libata.h
index abd2debebca2..5f04006e8dd2 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -568,8 +568,8 @@ struct ata_port {
568 struct ata_host *host; 568 struct ata_host *host;
569 struct device *dev; 569 struct device *dev;
570 570
571 struct work_struct port_task; 571 struct delayed_work port_task;
572 struct work_struct hotplug_task; 572 struct delayed_work hotplug_task;
573 struct work_struct scsi_rescan_task; 573 struct work_struct scsi_rescan_task;
574 574
575 unsigned int hsm_task_state; 575 unsigned int hsm_task_state;
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 7ccfc7ef0a83..95796e6924f1 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -51,7 +51,7 @@ struct nfs_client {
51 51
52 unsigned long cl_lease_time; 52 unsigned long cl_lease_time;
53 unsigned long cl_last_renewal; 53 unsigned long cl_last_renewal;
54 struct work_struct cl_renewd; 54 struct delayed_work cl_renewd;
55 55
56 struct rpc_wait_queue cl_rpcwaitq; 56 struct rpc_wait_queue cl_rpcwaitq;
57 57
diff --git a/include/linux/sunrpc/rpc_pipe_fs.h b/include/linux/sunrpc/rpc_pipe_fs.h
index a2eb9b4a9de3..4a68125b6de6 100644
--- a/include/linux/sunrpc/rpc_pipe_fs.h
+++ b/include/linux/sunrpc/rpc_pipe_fs.h
@@ -30,7 +30,7 @@ struct rpc_inode {
30#define RPC_PIPE_WAIT_FOR_OPEN 1 30#define RPC_PIPE_WAIT_FOR_OPEN 1
31 int flags; 31 int flags;
32 struct rpc_pipe_ops *ops; 32 struct rpc_pipe_ops *ops;
33 struct work_struct queue_timeout; 33 struct delayed_work queue_timeout;
34}; 34};
35 35
36static inline struct rpc_inode * 36static inline struct rpc_inode *
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index 60394fbc4c70..3e04c1512fc4 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -177,7 +177,7 @@ struct rpc_xprt {
177 unsigned long connect_timeout, 177 unsigned long connect_timeout,
178 bind_timeout, 178 bind_timeout,
179 reestablish_timeout; 179 reestablish_timeout;
180 struct work_struct connect_worker; 180 struct delayed_work connect_worker;
181 unsigned short port; 181 unsigned short port;
182 182
183 /* 183 /*
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 44091c0db0b4..c1f716446161 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -53,7 +53,7 @@ struct tty_buffer {
53}; 53};
54 54
55struct tty_bufhead { 55struct tty_bufhead {
56 struct work_struct work; 56 struct delayed_work work;
57 struct semaphore pty_sem; 57 struct semaphore pty_sem;
58 spinlock_t lock; 58 spinlock_t lock;
59 struct tty_buffer *head; /* Queue head */ 59 struct tty_buffer *head; /* Queue head */
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 9bca3539a1e5..9faaccae570e 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -17,6 +17,10 @@ struct work_struct {
17 void (*func)(void *); 17 void (*func)(void *);
18 void *data; 18 void *data;
19 void *wq_data; 19 void *wq_data;
20};
21
22struct delayed_work {
23 struct work_struct work;
20 struct timer_list timer; 24 struct timer_list timer;
21}; 25};
22 26
@@ -28,32 +32,48 @@ struct execute_work {
28 .entry = { &(n).entry, &(n).entry }, \ 32 .entry = { &(n).entry, &(n).entry }, \
29 .func = (f), \ 33 .func = (f), \
30 .data = (d), \ 34 .data = (d), \
35 }
36
37#define __DELAYED_WORK_INITIALIZER(n, f, d) { \
38 .work = __WORK_INITIALIZER((n).work, (f), (d)), \
31 .timer = TIMER_INITIALIZER(NULL, 0, 0), \ 39 .timer = TIMER_INITIALIZER(NULL, 0, 0), \
32 } 40 }
33 41
34#define DECLARE_WORK(n, f, d) \ 42#define DECLARE_WORK(n, f, d) \
35 struct work_struct n = __WORK_INITIALIZER(n, f, d) 43 struct work_struct n = __WORK_INITIALIZER(n, f, d)
36 44
45#define DECLARE_DELAYED_WORK(n, f, d) \
46 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, d)
47
37/* 48/*
38 * initialize a work-struct's func and data pointers: 49 * initialize a work item's function and data pointers
39 */ 50 */
40#define PREPARE_WORK(_work, _func, _data) \ 51#define PREPARE_WORK(_work, _func, _data) \
41 do { \ 52 do { \
42 (_work)->func = _func; \ 53 (_work)->func = (_func); \
43 (_work)->data = _data; \ 54 (_work)->data = (_data); \
44 } while (0) 55 } while (0)
45 56
57#define PREPARE_DELAYED_WORK(_work, _func, _data) \
58 PREPARE_WORK(&(_work)->work, (_func), (_data))
59
46/* 60/*
47 * initialize all of a work-struct: 61 * initialize all of a work item in one go
48 */ 62 */
49#define INIT_WORK(_work, _func, _data) \ 63#define INIT_WORK(_work, _func, _data) \
50 do { \ 64 do { \
51 INIT_LIST_HEAD(&(_work)->entry); \ 65 INIT_LIST_HEAD(&(_work)->entry); \
52 (_work)->pending = 0; \ 66 (_work)->pending = 0; \
53 PREPARE_WORK((_work), (_func), (_data)); \ 67 PREPARE_WORK((_work), (_func), (_data)); \
68 } while (0)
69
70#define INIT_DELAYED_WORK(_work, _func, _data) \
71 do { \
72 INIT_WORK(&(_work)->work, (_func), (_data)); \
54 init_timer(&(_work)->timer); \ 73 init_timer(&(_work)->timer); \
55 } while (0) 74 } while (0)
56 75
76
57extern struct workqueue_struct *__create_workqueue(const char *name, 77extern struct workqueue_struct *__create_workqueue(const char *name,
58 int singlethread); 78 int singlethread);
59#define create_workqueue(name) __create_workqueue((name), 0) 79#define create_workqueue(name) __create_workqueue((name), 0)
@@ -62,24 +82,24 @@ extern struct workqueue_struct *__create_workqueue(const char *name,
62extern void destroy_workqueue(struct workqueue_struct *wq); 82extern void destroy_workqueue(struct workqueue_struct *wq);
63 83
64extern int FASTCALL(queue_work(struct workqueue_struct *wq, struct work_struct *work)); 84extern int FASTCALL(queue_work(struct workqueue_struct *wq, struct work_struct *work));
65extern int FASTCALL(queue_delayed_work(struct workqueue_struct *wq, struct work_struct *work, unsigned long delay)); 85extern int FASTCALL(queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *work, unsigned long delay));
66extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 86extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
67 struct work_struct *work, unsigned long delay); 87 struct delayed_work *work, unsigned long delay);
68extern void FASTCALL(flush_workqueue(struct workqueue_struct *wq)); 88extern void FASTCALL(flush_workqueue(struct workqueue_struct *wq));
69 89
70extern int FASTCALL(schedule_work(struct work_struct *work)); 90extern int FASTCALL(schedule_work(struct work_struct *work));
71extern int FASTCALL(schedule_delayed_work(struct work_struct *work, unsigned long delay)); 91extern int FASTCALL(schedule_delayed_work(struct delayed_work *work, unsigned long delay));
72 92
73extern int schedule_delayed_work_on(int cpu, struct work_struct *work, unsigned long delay); 93extern int schedule_delayed_work_on(int cpu, struct delayed_work *work, unsigned long delay);
74extern int schedule_on_each_cpu(void (*func)(void *info), void *info); 94extern int schedule_on_each_cpu(void (*func)(void *info), void *info);
75extern void flush_scheduled_work(void); 95extern void flush_scheduled_work(void);
76extern int current_is_keventd(void); 96extern int current_is_keventd(void);
77extern int keventd_up(void); 97extern int keventd_up(void);
78 98
79extern void init_workqueues(void); 99extern void init_workqueues(void);
80void cancel_rearming_delayed_work(struct work_struct *work); 100void cancel_rearming_delayed_work(struct delayed_work *work);
81void cancel_rearming_delayed_workqueue(struct workqueue_struct *, 101void cancel_rearming_delayed_workqueue(struct workqueue_struct *,
82 struct work_struct *); 102 struct delayed_work *);
83int execute_in_process_context(void (*fn)(void *), void *, 103int execute_in_process_context(void (*fn)(void *), void *,
84 struct execute_work *); 104 struct execute_work *);
85 105
@@ -88,13 +108,13 @@ int execute_in_process_context(void (*fn)(void *), void *,
88 * function may still be running on return from cancel_delayed_work(). Run 108 * function may still be running on return from cancel_delayed_work(). Run
89 * flush_scheduled_work() to wait on it. 109 * flush_scheduled_work() to wait on it.
90 */ 110 */
91static inline int cancel_delayed_work(struct work_struct *work) 111static inline int cancel_delayed_work(struct delayed_work *work)
92{ 112{
93 int ret; 113 int ret;
94 114
95 ret = del_timer_sync(&work->timer); 115 ret = del_timer_sync(&work->timer);
96 if (ret) 116 if (ret)
97 clear_bit(0, &work->pending); 117 clear_bit(0, &work->work.pending);
98 return ret; 118 return ret;
99} 119}
100 120
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 17c2f03d2c27..44fc54b7decf 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -122,29 +122,33 @@ EXPORT_SYMBOL_GPL(queue_work);
122 122
123static void delayed_work_timer_fn(unsigned long __data) 123static void delayed_work_timer_fn(unsigned long __data)
124{ 124{
125 struct work_struct *work = (struct work_struct *)__data; 125 struct delayed_work *dwork = (struct delayed_work *)__data;
126 struct workqueue_struct *wq = work->wq_data; 126 struct workqueue_struct *wq = dwork->work.wq_data;
127 int cpu = smp_processor_id(); 127 int cpu = smp_processor_id();
128 128
129 if (unlikely(is_single_threaded(wq))) 129 if (unlikely(is_single_threaded(wq)))
130 cpu = singlethread_cpu; 130 cpu = singlethread_cpu;
131 131
132 __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work); 132 __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), &dwork->work);
133} 133}
134 134
135/** 135/**
136 * queue_delayed_work - queue work on a workqueue after delay 136 * queue_delayed_work - queue work on a workqueue after delay
137 * @wq: workqueue to use 137 * @wq: workqueue to use
138 * @work: work to queue 138 * @work: delayable work to queue
139 * @delay: number of jiffies to wait before queueing 139 * @delay: number of jiffies to wait before queueing
140 * 140 *
141 * Returns 0 if @work was already on a queue, non-zero otherwise. 141 * Returns 0 if @work was already on a queue, non-zero otherwise.
142 */ 142 */
143int fastcall queue_delayed_work(struct workqueue_struct *wq, 143int fastcall queue_delayed_work(struct workqueue_struct *wq,
144 struct work_struct *work, unsigned long delay) 144 struct delayed_work *dwork, unsigned long delay)
145{ 145{
146 int ret = 0; 146 int ret = 0;
147 struct timer_list *timer = &work->timer; 147 struct timer_list *timer = &dwork->timer;
148 struct work_struct *work = &dwork->work;
149
150 if (delay == 0)
151 return queue_work(wq, work);
148 152
149 if (!test_and_set_bit(0, &work->pending)) { 153 if (!test_and_set_bit(0, &work->pending)) {
150 BUG_ON(timer_pending(timer)); 154 BUG_ON(timer_pending(timer));
@@ -153,7 +157,7 @@ int fastcall queue_delayed_work(struct workqueue_struct *wq,
153 /* This stores wq for the moment, for the timer_fn */ 157 /* This stores wq for the moment, for the timer_fn */
154 work->wq_data = wq; 158 work->wq_data = wq;
155 timer->expires = jiffies + delay; 159 timer->expires = jiffies + delay;
156 timer->data = (unsigned long)work; 160 timer->data = (unsigned long)dwork;
157 timer->function = delayed_work_timer_fn; 161 timer->function = delayed_work_timer_fn;
158 add_timer(timer); 162 add_timer(timer);
159 ret = 1; 163 ret = 1;
@@ -172,10 +176,11 @@ EXPORT_SYMBOL_GPL(queue_delayed_work);
172 * Returns 0 if @work was already on a queue, non-zero otherwise. 176 * Returns 0 if @work was already on a queue, non-zero otherwise.
173 */ 177 */
174int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 178int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
175 struct work_struct *work, unsigned long delay) 179 struct delayed_work *dwork, unsigned long delay)
176{ 180{
177 int ret = 0; 181 int ret = 0;
178 struct timer_list *timer = &work->timer; 182 struct timer_list *timer = &dwork->timer;
183 struct work_struct *work = &dwork->work;
179 184
180 if (!test_and_set_bit(0, &work->pending)) { 185 if (!test_and_set_bit(0, &work->pending)) {
181 BUG_ON(timer_pending(timer)); 186 BUG_ON(timer_pending(timer));
@@ -184,7 +189,7 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
184 /* This stores wq for the moment, for the timer_fn */ 189 /* This stores wq for the moment, for the timer_fn */
185 work->wq_data = wq; 190 work->wq_data = wq;
186 timer->expires = jiffies + delay; 191 timer->expires = jiffies + delay;
187 timer->data = (unsigned long)work; 192 timer->data = (unsigned long)dwork;
188 timer->function = delayed_work_timer_fn; 193 timer->function = delayed_work_timer_fn;
189 add_timer_on(timer, cpu); 194 add_timer_on(timer, cpu);
190 ret = 1; 195 ret = 1;
@@ -468,31 +473,31 @@ EXPORT_SYMBOL(schedule_work);
468 473
469/** 474/**
470 * schedule_delayed_work - put work task in global workqueue after delay 475 * schedule_delayed_work - put work task in global workqueue after delay
471 * @work: job to be done 476 * @dwork: job to be done
472 * @delay: number of jiffies to wait 477 * @delay: number of jiffies to wait or 0 for immediate execution
473 * 478 *
474 * After waiting for a given time this puts a job in the kernel-global 479 * After waiting for a given time this puts a job in the kernel-global
475 * workqueue. 480 * workqueue.
476 */ 481 */
477int fastcall schedule_delayed_work(struct work_struct *work, unsigned long delay) 482int fastcall schedule_delayed_work(struct delayed_work *dwork, unsigned long delay)
478{ 483{
479 return queue_delayed_work(keventd_wq, work, delay); 484 return queue_delayed_work(keventd_wq, dwork, delay);
480} 485}
481EXPORT_SYMBOL(schedule_delayed_work); 486EXPORT_SYMBOL(schedule_delayed_work);
482 487
483/** 488/**
484 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay 489 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
485 * @cpu: cpu to use 490 * @cpu: cpu to use
486 * @work: job to be done 491 * @dwork: job to be done
487 * @delay: number of jiffies to wait 492 * @delay: number of jiffies to wait
488 * 493 *
489 * After waiting for a given time this puts a job in the kernel-global 494 * After waiting for a given time this puts a job in the kernel-global
490 * workqueue on the specified CPU. 495 * workqueue on the specified CPU.
491 */ 496 */
492int schedule_delayed_work_on(int cpu, 497int schedule_delayed_work_on(int cpu,
493 struct work_struct *work, unsigned long delay) 498 struct delayed_work *dwork, unsigned long delay)
494{ 499{
495 return queue_delayed_work_on(cpu, keventd_wq, work, delay); 500 return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
496} 501}
497EXPORT_SYMBOL(schedule_delayed_work_on); 502EXPORT_SYMBOL(schedule_delayed_work_on);
498 503
@@ -539,12 +544,12 @@ EXPORT_SYMBOL(flush_scheduled_work);
539 * cancel_rearming_delayed_workqueue - reliably kill off a delayed 544 * cancel_rearming_delayed_workqueue - reliably kill off a delayed
540 * work whose handler rearms the delayed work. 545 * work whose handler rearms the delayed work.
541 * @wq: the controlling workqueue structure 546 * @wq: the controlling workqueue structure
542 * @work: the delayed work struct 547 * @dwork: the delayed work struct
543 */ 548 */
544void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq, 549void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
545 struct work_struct *work) 550 struct delayed_work *dwork)
546{ 551{
547 while (!cancel_delayed_work(work)) 552 while (!cancel_delayed_work(dwork))
548 flush_workqueue(wq); 553 flush_workqueue(wq);
549} 554}
550EXPORT_SYMBOL(cancel_rearming_delayed_workqueue); 555EXPORT_SYMBOL(cancel_rearming_delayed_workqueue);
@@ -552,11 +557,11 @@ EXPORT_SYMBOL(cancel_rearming_delayed_workqueue);
552/** 557/**
553 * cancel_rearming_delayed_work - reliably kill off a delayed keventd 558 * cancel_rearming_delayed_work - reliably kill off a delayed keventd
554 * work whose handler rearms the delayed work. 559 * work whose handler rearms the delayed work.
555 * @work: the delayed work struct 560 * @dwork: the delayed work struct
556 */ 561 */
557void cancel_rearming_delayed_work(struct work_struct *work) 562void cancel_rearming_delayed_work(struct delayed_work *dwork)
558{ 563{
559 cancel_rearming_delayed_workqueue(keventd_wq, work); 564 cancel_rearming_delayed_workqueue(keventd_wq, dwork);
560} 565}
561EXPORT_SYMBOL(cancel_rearming_delayed_work); 566EXPORT_SYMBOL(cancel_rearming_delayed_work);
562 567
diff --git a/mm/slab.c b/mm/slab.c
index 3c4a7e34eddc..a65bc5e992c3 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -753,7 +753,7 @@ int slab_is_available(void)
753 return g_cpucache_up == FULL; 753 return g_cpucache_up == FULL;
754} 754}
755 755
756static DEFINE_PER_CPU(struct work_struct, reap_work); 756static DEFINE_PER_CPU(struct delayed_work, reap_work);
757 757
758static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) 758static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
759{ 759{
@@ -916,16 +916,16 @@ static void next_reap_node(void)
916 */ 916 */
917static void __devinit start_cpu_timer(int cpu) 917static void __devinit start_cpu_timer(int cpu)
918{ 918{
919 struct work_struct *reap_work = &per_cpu(reap_work, cpu); 919 struct delayed_work *reap_work = &per_cpu(reap_work, cpu);
920 920
921 /* 921 /*
922 * When this gets called from do_initcalls via cpucache_init(), 922 * When this gets called from do_initcalls via cpucache_init(),
923 * init_workqueues() has already run, so keventd will be setup 923 * init_workqueues() has already run, so keventd will be setup
924 * at that time. 924 * at that time.
925 */ 925 */
926 if (keventd_up() && reap_work->func == NULL) { 926 if (keventd_up() && reap_work->work.func == NULL) {
927 init_reap_node(cpu); 927 init_reap_node(cpu);
928 INIT_WORK(reap_work, cache_reap, NULL); 928 INIT_DELAYED_WORK(reap_work, cache_reap, NULL);
929 schedule_delayed_work_on(cpu, reap_work, HZ + 3 * cpu); 929 schedule_delayed_work_on(cpu, reap_work, HZ + 3 * cpu);
930 } 930 }
931} 931}
diff --git a/net/core/link_watch.c b/net/core/link_watch.c
index 4b36114744c5..f2ed09e25dfd 100644
--- a/net/core/link_watch.c
+++ b/net/core/link_watch.c
@@ -35,7 +35,7 @@ static unsigned long linkwatch_flags;
35static unsigned long linkwatch_nextevent; 35static unsigned long linkwatch_nextevent;
36 36
37static void linkwatch_event(void *dummy); 37static void linkwatch_event(void *dummy);
38static DECLARE_WORK(linkwatch_work, linkwatch_event, NULL); 38static DECLARE_DELAYED_WORK(linkwatch_work, linkwatch_event, NULL);
39 39
40static LIST_HEAD(lweventlist); 40static LIST_HEAD(lweventlist);
41static DEFINE_SPINLOCK(lweventlist_lock); 41static DEFINE_SPINLOCK(lweventlist_lock);
@@ -171,10 +171,9 @@ void linkwatch_fire_event(struct net_device *dev)
171 unsigned long delay = linkwatch_nextevent - jiffies; 171 unsigned long delay = linkwatch_nextevent - jiffies;
172 172
173 /* If we wrap around we'll delay it by at most HZ. */ 173 /* If we wrap around we'll delay it by at most HZ. */
174 if (!delay || delay > HZ) 174 if (delay > HZ)
175 schedule_work(&linkwatch_work); 175 delay = 0;
176 else 176 schedule_delayed_work(&linkwatch_work, delay);
177 schedule_delayed_work(&linkwatch_work, delay);
178 } 177 }
179 } 178 }
180} 179}
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 00cb388ece03..d5725cb1491e 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -285,7 +285,7 @@ static struct file_operations content_file_operations;
285static struct file_operations cache_flush_operations; 285static struct file_operations cache_flush_operations;
286 286
287static void do_cache_clean(void *data); 287static void do_cache_clean(void *data);
288static DECLARE_WORK(cache_cleaner, do_cache_clean, NULL); 288static DECLARE_DELAYED_WORK(cache_cleaner, do_cache_clean, NULL);
289 289
290void cache_register(struct cache_detail *cd) 290void cache_register(struct cache_detail *cd)
291{ 291{
@@ -337,7 +337,7 @@ void cache_register(struct cache_detail *cd)
337 spin_unlock(&cache_list_lock); 337 spin_unlock(&cache_list_lock);
338 338
339 /* start the cleaning process */ 339 /* start the cleaning process */
340 schedule_work(&cache_cleaner); 340 schedule_delayed_work(&cache_cleaner, 0);
341} 341}
342 342
343int cache_unregister(struct cache_detail *cd) 343int cache_unregister(struct cache_detail *cd)
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 9a0b41a97f90..97be3f7fed44 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -837,7 +837,8 @@ init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
837 INIT_LIST_HEAD(&rpci->pipe); 837 INIT_LIST_HEAD(&rpci->pipe);
838 rpci->pipelen = 0; 838 rpci->pipelen = 0;
839 init_waitqueue_head(&rpci->waitq); 839 init_waitqueue_head(&rpci->waitq);
840 INIT_WORK(&rpci->queue_timeout, rpc_timeout_upcall_queue, rpci); 840 INIT_DELAYED_WORK(&rpci->queue_timeout,
841 rpc_timeout_upcall_queue, rpci);
841 rpci->ops = NULL; 842 rpci->ops = NULL;
842 } 843 }
843} 844}
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 757fc91ef25d..3c7532cd009e 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -1262,7 +1262,7 @@ static void xs_connect(struct rpc_task *task)
1262 xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO; 1262 xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO;
1263 } else { 1263 } else {
1264 dprintk("RPC: xs_connect scheduled xprt %p\n", xprt); 1264 dprintk("RPC: xs_connect scheduled xprt %p\n", xprt);
1265 schedule_work(&xprt->connect_worker); 1265 schedule_delayed_work(&xprt->connect_worker, 0);
1266 1266
1267 /* flush_scheduled_work can sleep... */ 1267 /* flush_scheduled_work can sleep... */
1268 if (!RPC_IS_ASYNC(task)) 1268 if (!RPC_IS_ASYNC(task))
@@ -1375,7 +1375,7 @@ int xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to)
1375 /* XXX: header size can vary due to auth type, IPv6, etc. */ 1375 /* XXX: header size can vary due to auth type, IPv6, etc. */
1376 xprt->max_payload = (1U << 16) - (MAX_HEADER << 3); 1376 xprt->max_payload = (1U << 16) - (MAX_HEADER << 3);
1377 1377
1378 INIT_WORK(&xprt->connect_worker, xs_udp_connect_worker, xprt); 1378 INIT_DELAYED_WORK(&xprt->connect_worker, xs_udp_connect_worker, xprt);
1379 xprt->bind_timeout = XS_BIND_TO; 1379 xprt->bind_timeout = XS_BIND_TO;
1380 xprt->connect_timeout = XS_UDP_CONN_TO; 1380 xprt->connect_timeout = XS_UDP_CONN_TO;
1381 xprt->reestablish_timeout = XS_UDP_REEST_TO; 1381 xprt->reestablish_timeout = XS_UDP_REEST_TO;
@@ -1420,7 +1420,7 @@ int xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to)
1420 xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32); 1420 xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
1421 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; 1421 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
1422 1422
1423 INIT_WORK(&xprt->connect_worker, xs_tcp_connect_worker, xprt); 1423 INIT_DELAYED_WORK(&xprt->connect_worker, xs_tcp_connect_worker, xprt);
1424 xprt->bind_timeout = XS_BIND_TO; 1424 xprt->bind_timeout = XS_BIND_TO;
1425 xprt->connect_timeout = XS_TCP_CONN_TO; 1425 xprt->connect_timeout = XS_TCP_CONN_TO;
1426 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; 1426 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;