aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86_64/kernel/mce.c6
-rw-r--r--arch/x86_64/kernel/smpboot.c12
-rw-r--r--arch/x86_64/kernel/time.c4
-rw-r--r--block/as-iosched.c7
-rw-r--r--block/cfq-iosched.c8
-rw-r--r--block/ll_rw_blk.c8
-rw-r--r--crypto/cryptomgr.c7
-rw-r--r--drivers/acpi/osl.c25
-rw-r--r--drivers/ata/libata-core.c20
-rw-r--r--drivers/ata/libata-scsi.c14
-rw-r--r--drivers/ata/libata.h4
-rw-r--r--drivers/block/floppy.c6
-rw-r--r--drivers/char/random.c6
-rw-r--r--drivers/char/sysrq.c4
-rw-r--r--drivers/char/tty_io.c31
-rw-r--r--drivers/char/vt.c6
-rw-r--r--drivers/cpufreq/cpufreq.c10
-rw-r--r--drivers/input/keyboard/atkbd.c6
-rw-r--r--drivers/input/serio/libps2.c6
-rw-r--r--drivers/net/e1000/e1000_main.c10
-rw-r--r--drivers/pci/pcie/aer/aerdrv.c2
-rw-r--r--drivers/pci/pcie/aer/aerdrv.h2
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c8
-rw-r--r--drivers/scsi/scsi_scan.c7
-rw-r--r--drivers/scsi/scsi_sysfs.c10
-rw-r--r--fs/aio.c14
-rw-r--r--fs/bio.c6
-rw-r--r--fs/file.c6
-rw-r--r--fs/nfs/client.c2
-rw-r--r--fs/nfs/namespace.c9
-rw-r--r--fs/nfs/nfs4_fs.h2
-rw-r--r--fs/nfs/nfs4renewd.c5
-rw-r--r--include/linux/libata.h3
-rw-r--r--include/linux/workqueue.h99
-rw-r--r--include/net/inet_timewait_sock.h2
-rw-r--r--ipc/util.c7
-rw-r--r--kernel/kmod.c16
-rw-r--r--kernel/kthread.c13
-rw-r--r--kernel/power/poweroff.c4
-rw-r--r--kernel/sys.c4
-rw-r--r--kernel/workqueue.c19
-rw-r--r--mm/slab.c6
-rw-r--r--net/core/link_watch.c6
-rw-r--r--net/ipv4/inet_timewait_sock.c5
-rw-r--r--net/ipv4/tcp_minisocks.c3
-rw-r--r--net/sunrpc/cache.c6
-rw-r--r--net/sunrpc/rpc_pipe.c7
-rw-r--r--net/sunrpc/sched.c8
-rw-r--r--net/sunrpc/xprt.c7
-rw-r--r--net/sunrpc/xprtsock.c18
-rw-r--r--security/keys/key.c6
51 files changed, 293 insertions, 219 deletions
diff --git a/arch/x86_64/kernel/mce.c b/arch/x86_64/kernel/mce.c
index 5306f2630905..c7587fc39015 100644
--- a/arch/x86_64/kernel/mce.c
+++ b/arch/x86_64/kernel/mce.c
@@ -306,8 +306,8 @@ void mce_log_therm_throt_event(unsigned int cpu, __u64 status)
306 */ 306 */
307 307
308static int check_interval = 5 * 60; /* 5 minutes */ 308static int check_interval = 5 * 60; /* 5 minutes */
309static void mcheck_timer(void *data); 309static void mcheck_timer(struct work_struct *work);
310static DECLARE_DELAYED_WORK(mcheck_work, mcheck_timer, NULL); 310static DECLARE_DELAYED_WORK(mcheck_work, mcheck_timer);
311 311
312static void mcheck_check_cpu(void *info) 312static void mcheck_check_cpu(void *info)
313{ 313{
@@ -315,7 +315,7 @@ static void mcheck_check_cpu(void *info)
315 do_machine_check(NULL, 0); 315 do_machine_check(NULL, 0);
316} 316}
317 317
318static void mcheck_timer(void *data) 318static void mcheck_timer(struct work_struct *work)
319{ 319{
320 on_each_cpu(mcheck_check_cpu, NULL, 1, 1); 320 on_each_cpu(mcheck_check_cpu, NULL, 1, 1);
321 schedule_delayed_work(&mcheck_work, check_interval * HZ); 321 schedule_delayed_work(&mcheck_work, check_interval * HZ);
diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c
index 62c2e747af58..9800147c4c68 100644
--- a/arch/x86_64/kernel/smpboot.c
+++ b/arch/x86_64/kernel/smpboot.c
@@ -753,14 +753,16 @@ static int __cpuinit wakeup_secondary_via_INIT(int phys_apicid, unsigned int sta
753} 753}
754 754
755struct create_idle { 755struct create_idle {
756 struct work_struct work;
756 struct task_struct *idle; 757 struct task_struct *idle;
757 struct completion done; 758 struct completion done;
758 int cpu; 759 int cpu;
759}; 760};
760 761
761void do_fork_idle(void *_c_idle) 762void do_fork_idle(struct work_struct *work)
762{ 763{
763 struct create_idle *c_idle = _c_idle; 764 struct create_idle *c_idle =
765 container_of(work, struct create_idle, work);
764 766
765 c_idle->idle = fork_idle(c_idle->cpu); 767 c_idle->idle = fork_idle(c_idle->cpu);
766 complete(&c_idle->done); 768 complete(&c_idle->done);
@@ -775,10 +777,10 @@ static int __cpuinit do_boot_cpu(int cpu, int apicid)
775 int timeout; 777 int timeout;
776 unsigned long start_rip; 778 unsigned long start_rip;
777 struct create_idle c_idle = { 779 struct create_idle c_idle = {
780 .work = __WORK_INITIALIZER(c_idle.work, do_fork_idle),
778 .cpu = cpu, 781 .cpu = cpu,
779 .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), 782 .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
780 }; 783 };
781 DECLARE_WORK(work, do_fork_idle, &c_idle);
782 784
783 /* allocate memory for gdts of secondary cpus. Hotplug is considered */ 785 /* allocate memory for gdts of secondary cpus. Hotplug is considered */
784 if (!cpu_gdt_descr[cpu].address && 786 if (!cpu_gdt_descr[cpu].address &&
@@ -825,9 +827,9 @@ static int __cpuinit do_boot_cpu(int cpu, int apicid)
825 * thread. 827 * thread.
826 */ 828 */
827 if (!keventd_up() || current_is_keventd()) 829 if (!keventd_up() || current_is_keventd())
828 work.func(work.data); 830 c_idle.work.func(&c_idle.work);
829 else { 831 else {
830 schedule_work(&work); 832 schedule_work(&c_idle.work);
831 wait_for_completion(&c_idle.done); 833 wait_for_completion(&c_idle.done);
832 } 834 }
833 835
diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c
index e3ef544d2cfb..9f05bc9b2dad 100644
--- a/arch/x86_64/kernel/time.c
+++ b/arch/x86_64/kernel/time.c
@@ -563,7 +563,7 @@ static unsigned int cpufreq_delayed_issched = 0;
563static unsigned int cpufreq_init = 0; 563static unsigned int cpufreq_init = 0;
564static struct work_struct cpufreq_delayed_get_work; 564static struct work_struct cpufreq_delayed_get_work;
565 565
566static void handle_cpufreq_delayed_get(void *v) 566static void handle_cpufreq_delayed_get(struct work_struct *v)
567{ 567{
568 unsigned int cpu; 568 unsigned int cpu;
569 for_each_online_cpu(cpu) { 569 for_each_online_cpu(cpu) {
@@ -639,7 +639,7 @@ static struct notifier_block time_cpufreq_notifier_block = {
639 639
640static int __init cpufreq_tsc(void) 640static int __init cpufreq_tsc(void)
641{ 641{
642 INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get, NULL); 642 INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get);
643 if (!cpufreq_register_notifier(&time_cpufreq_notifier_block, 643 if (!cpufreq_register_notifier(&time_cpufreq_notifier_block,
644 CPUFREQ_TRANSITION_NOTIFIER)) 644 CPUFREQ_TRANSITION_NOTIFIER))
645 cpufreq_init = 1; 645 cpufreq_init = 1;
diff --git a/block/as-iosched.c b/block/as-iosched.c
index 50b95e4c1425..f371c9359999 100644
--- a/block/as-iosched.c
+++ b/block/as-iosched.c
@@ -1274,9 +1274,10 @@ static void as_merged_requests(request_queue_t *q, struct request *req,
1274 * 1274 *
1275 * FIXME! dispatch queue is not a queue at all! 1275 * FIXME! dispatch queue is not a queue at all!
1276 */ 1276 */
1277static void as_work_handler(void *data) 1277static void as_work_handler(struct work_struct *work)
1278{ 1278{
1279 struct request_queue *q = data; 1279 struct as_data *ad = container_of(work, struct as_data, antic_work);
1280 struct request_queue *q = ad->q;
1280 unsigned long flags; 1281 unsigned long flags;
1281 1282
1282 spin_lock_irqsave(q->queue_lock, flags); 1283 spin_lock_irqsave(q->queue_lock, flags);
@@ -1332,7 +1333,7 @@ static void *as_init_queue(request_queue_t *q, elevator_t *e)
1332 ad->antic_timer.function = as_antic_timeout; 1333 ad->antic_timer.function = as_antic_timeout;
1333 ad->antic_timer.data = (unsigned long)q; 1334 ad->antic_timer.data = (unsigned long)q;
1334 init_timer(&ad->antic_timer); 1335 init_timer(&ad->antic_timer);
1335 INIT_WORK(&ad->antic_work, as_work_handler, q); 1336 INIT_WORK(&ad->antic_work, as_work_handler);
1336 1337
1337 INIT_LIST_HEAD(&ad->fifo_list[REQ_SYNC]); 1338 INIT_LIST_HEAD(&ad->fifo_list[REQ_SYNC]);
1338 INIT_LIST_HEAD(&ad->fifo_list[REQ_ASYNC]); 1339 INIT_LIST_HEAD(&ad->fifo_list[REQ_ASYNC]);
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 1d9c3c70a9a0..6cec3a1dccb8 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1841,9 +1841,11 @@ queue_fail:
1841 return 1; 1841 return 1;
1842} 1842}
1843 1843
1844static void cfq_kick_queue(void *data) 1844static void cfq_kick_queue(struct work_struct *work)
1845{ 1845{
1846 request_queue_t *q = data; 1846 struct cfq_data *cfqd =
1847 container_of(work, struct cfq_data, unplug_work);
1848 request_queue_t *q = cfqd->queue;
1847 unsigned long flags; 1849 unsigned long flags;
1848 1850
1849 spin_lock_irqsave(q->queue_lock, flags); 1851 spin_lock_irqsave(q->queue_lock, flags);
@@ -1987,7 +1989,7 @@ static void *cfq_init_queue(request_queue_t *q, elevator_t *e)
1987 cfqd->idle_class_timer.function = cfq_idle_class_timer; 1989 cfqd->idle_class_timer.function = cfq_idle_class_timer;
1988 cfqd->idle_class_timer.data = (unsigned long) cfqd; 1990 cfqd->idle_class_timer.data = (unsigned long) cfqd;
1989 1991
1990 INIT_WORK(&cfqd->unplug_work, cfq_kick_queue, q); 1992 INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
1991 1993
1992 cfqd->cfq_quantum = cfq_quantum; 1994 cfqd->cfq_quantum = cfq_quantum;
1993 cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0]; 1995 cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index 9eaee6640535..eb4cf6df7374 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -34,7 +34,7 @@
34 */ 34 */
35#include <scsi/scsi_cmnd.h> 35#include <scsi/scsi_cmnd.h>
36 36
37static void blk_unplug_work(void *data); 37static void blk_unplug_work(struct work_struct *work);
38static void blk_unplug_timeout(unsigned long data); 38static void blk_unplug_timeout(unsigned long data);
39static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io); 39static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io);
40static void init_request_from_bio(struct request *req, struct bio *bio); 40static void init_request_from_bio(struct request *req, struct bio *bio);
@@ -227,7 +227,7 @@ void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn)
227 if (q->unplug_delay == 0) 227 if (q->unplug_delay == 0)
228 q->unplug_delay = 1; 228 q->unplug_delay = 1;
229 229
230 INIT_WORK(&q->unplug_work, blk_unplug_work, q); 230 INIT_WORK(&q->unplug_work, blk_unplug_work);
231 231
232 q->unplug_timer.function = blk_unplug_timeout; 232 q->unplug_timer.function = blk_unplug_timeout;
233 q->unplug_timer.data = (unsigned long)q; 233 q->unplug_timer.data = (unsigned long)q;
@@ -1631,9 +1631,9 @@ static void blk_backing_dev_unplug(struct backing_dev_info *bdi,
1631 } 1631 }
1632} 1632}
1633 1633
1634static void blk_unplug_work(void *data) 1634static void blk_unplug_work(struct work_struct *work)
1635{ 1635{
1636 request_queue_t *q = data; 1636 request_queue_t *q = container_of(work, request_queue_t, unplug_work);
1637 1637
1638 blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL, 1638 blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
1639 q->rq.count[READ] + q->rq.count[WRITE]); 1639 q->rq.count[READ] + q->rq.count[WRITE]);
diff --git a/crypto/cryptomgr.c b/crypto/cryptomgr.c
index 9b5b15601068..2ebffb84f1d9 100644
--- a/crypto/cryptomgr.c
+++ b/crypto/cryptomgr.c
@@ -40,9 +40,10 @@ struct cryptomgr_param {
40 char template[CRYPTO_MAX_ALG_NAME]; 40 char template[CRYPTO_MAX_ALG_NAME];
41}; 41};
42 42
43static void cryptomgr_probe(void *data) 43static void cryptomgr_probe(struct work_struct *work)
44{ 44{
45 struct cryptomgr_param *param = data; 45 struct cryptomgr_param *param =
46 container_of(work, struct cryptomgr_param, work);
46 struct crypto_template *tmpl; 47 struct crypto_template *tmpl;
47 struct crypto_instance *inst; 48 struct crypto_instance *inst;
48 int err; 49 int err;
@@ -112,7 +113,7 @@ static int cryptomgr_schedule_probe(struct crypto_larval *larval)
112 param->larval.type = larval->alg.cra_flags; 113 param->larval.type = larval->alg.cra_flags;
113 param->larval.mask = larval->mask; 114 param->larval.mask = larval->mask;
114 115
115 INIT_WORK(&param->work, cryptomgr_probe, param); 116 INIT_WORK(&param->work, cryptomgr_probe);
116 schedule_work(&param->work); 117 schedule_work(&param->work);
117 118
118 return NOTIFY_STOP; 119 return NOTIFY_STOP;
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 068fe4f100b0..02b30ae6a68e 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -50,6 +50,7 @@ ACPI_MODULE_NAME("osl")
50struct acpi_os_dpc { 50struct acpi_os_dpc {
51 acpi_osd_exec_callback function; 51 acpi_osd_exec_callback function;
52 void *context; 52 void *context;
53 struct work_struct work;
53}; 54};
54 55
55#ifdef CONFIG_ACPI_CUSTOM_DSDT 56#ifdef CONFIG_ACPI_CUSTOM_DSDT
@@ -564,12 +565,9 @@ void acpi_os_derive_pci_id(acpi_handle rhandle, /* upper bound */
564 acpi_os_derive_pci_id_2(rhandle, chandle, id, &is_bridge, &bus_number); 565 acpi_os_derive_pci_id_2(rhandle, chandle, id, &is_bridge, &bus_number);
565} 566}
566 567
567static void acpi_os_execute_deferred(void *context) 568static void acpi_os_execute_deferred(struct work_struct *work)
568{ 569{
569 struct acpi_os_dpc *dpc = NULL; 570 struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
570
571
572 dpc = (struct acpi_os_dpc *)context;
573 if (!dpc) { 571 if (!dpc) {
574 printk(KERN_ERR PREFIX "Invalid (NULL) context\n"); 572 printk(KERN_ERR PREFIX "Invalid (NULL) context\n");
575 return; 573 return;
@@ -602,7 +600,6 @@ acpi_status acpi_os_execute(acpi_execute_type type,
602{ 600{
603 acpi_status status = AE_OK; 601 acpi_status status = AE_OK;
604 struct acpi_os_dpc *dpc; 602 struct acpi_os_dpc *dpc;
605 struct work_struct *task;
606 603
607 ACPI_FUNCTION_TRACE("os_queue_for_execution"); 604 ACPI_FUNCTION_TRACE("os_queue_for_execution");
608 605
@@ -615,28 +612,22 @@ acpi_status acpi_os_execute(acpi_execute_type type,
615 612
616 /* 613 /*
617 * Allocate/initialize DPC structure. Note that this memory will be 614 * Allocate/initialize DPC structure. Note that this memory will be
618 * freed by the callee. The kernel handles the tq_struct list in a 615 * freed by the callee. The kernel handles the work_struct list in a
619 * way that allows us to also free its memory inside the callee. 616 * way that allows us to also free its memory inside the callee.
620 * Because we may want to schedule several tasks with different 617 * Because we may want to schedule several tasks with different
621 * parameters we can't use the approach some kernel code uses of 618 * parameters we can't use the approach some kernel code uses of
622 * having a static tq_struct. 619 * having a static work_struct.
623 * We can save time and code by allocating the DPC and tq_structs
624 * from the same memory.
625 */ 620 */
626 621
627 dpc = 622 dpc = kmalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC);
628 kmalloc(sizeof(struct acpi_os_dpc) + sizeof(struct work_struct),
629 GFP_ATOMIC);
630 if (!dpc) 623 if (!dpc)
631 return_ACPI_STATUS(AE_NO_MEMORY); 624 return_ACPI_STATUS(AE_NO_MEMORY);
632 625
633 dpc->function = function; 626 dpc->function = function;
634 dpc->context = context; 627 dpc->context = context;
635 628
636 task = (void *)(dpc + 1); 629 INIT_WORK(&dpc->work, acpi_os_execute_deferred);
637 INIT_WORK(task, acpi_os_execute_deferred, (void *)dpc); 630 if (!queue_work(kacpid_wq, &dpc->work)) {
638
639 if (!queue_work(kacpid_wq, task)) {
640 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, 631 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
641 "Call to queue_work() failed.\n")); 632 "Call to queue_work() failed.\n"));
642 kfree(dpc); 633 kfree(dpc);
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 0bb4b4dced76..b5f2da6ac80e 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -914,7 +914,7 @@ static unsigned int ata_id_xfermask(const u16 *id)
914 * ata_port_queue_task - Queue port_task 914 * ata_port_queue_task - Queue port_task
915 * @ap: The ata_port to queue port_task for 915 * @ap: The ata_port to queue port_task for
916 * @fn: workqueue function to be scheduled 916 * @fn: workqueue function to be scheduled
917 * @data: data value to pass to workqueue function 917 * @data: data for @fn to use
918 * @delay: delay time for workqueue function 918 * @delay: delay time for workqueue function
919 * 919 *
920 * Schedule @fn(@data) for execution after @delay jiffies using 920 * Schedule @fn(@data) for execution after @delay jiffies using
@@ -929,7 +929,7 @@ static unsigned int ata_id_xfermask(const u16 *id)
929 * LOCKING: 929 * LOCKING:
930 * Inherited from caller. 930 * Inherited from caller.
931 */ 931 */
932void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data, 932void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
933 unsigned long delay) 933 unsigned long delay)
934{ 934{
935 int rc; 935 int rc;
@@ -937,7 +937,8 @@ void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data,
937 if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK) 937 if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK)
938 return; 938 return;
939 939
940 PREPARE_DELAYED_WORK(&ap->port_task, fn, data); 940 PREPARE_DELAYED_WORK(&ap->port_task, fn);
941 ap->port_task_data = data;
941 942
942 rc = queue_delayed_work(ata_wq, &ap->port_task, delay); 943 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
943 944
@@ -4292,10 +4293,11 @@ fsm_start:
4292 return poll_next; 4293 return poll_next;
4293} 4294}
4294 4295
4295static void ata_pio_task(void *_data) 4296static void ata_pio_task(struct work_struct *work)
4296{ 4297{
4297 struct ata_queued_cmd *qc = _data; 4298 struct ata_port *ap =
4298 struct ata_port *ap = qc->ap; 4299 container_of(work, struct ata_port, port_task.work);
4300 struct ata_queued_cmd *qc = ap->port_task_data;
4299 u8 status; 4301 u8 status;
4300 int poll_next; 4302 int poll_next;
4301 4303
@@ -5317,9 +5319,9 @@ void ata_port_init(struct ata_port *ap, struct ata_host *host,
5317 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN; 5319 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
5318#endif 5320#endif
5319 5321
5320 INIT_DELAYED_WORK(&ap->port_task, NULL, NULL); 5322 INIT_DELAYED_WORK(&ap->port_task, NULL);
5321 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug, ap); 5323 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5322 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan, ap); 5324 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
5323 INIT_LIST_HEAD(&ap->eh_done_q); 5325 INIT_LIST_HEAD(&ap->eh_done_q);
5324 init_waitqueue_head(&ap->eh_wait_q); 5326 init_waitqueue_head(&ap->eh_wait_q);
5325 5327
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 5c1fc467fc7f..c872b324dbd3 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -3079,7 +3079,7 @@ static void ata_scsi_remove_dev(struct ata_device *dev)
3079 3079
3080/** 3080/**
3081 * ata_scsi_hotplug - SCSI part of hotplug 3081 * ata_scsi_hotplug - SCSI part of hotplug
3082 * @data: Pointer to ATA port to perform SCSI hotplug on 3082 * @work: Pointer to ATA port to perform SCSI hotplug on
3083 * 3083 *
3084 * Perform SCSI part of hotplug. It's executed from a separate 3084 * Perform SCSI part of hotplug. It's executed from a separate
3085 * workqueue after EH completes. This is necessary because SCSI 3085 * workqueue after EH completes. This is necessary because SCSI
@@ -3089,9 +3089,10 @@ static void ata_scsi_remove_dev(struct ata_device *dev)
3089 * LOCKING: 3089 * LOCKING:
3090 * Kernel thread context (may sleep). 3090 * Kernel thread context (may sleep).
3091 */ 3091 */
3092void ata_scsi_hotplug(void *data) 3092void ata_scsi_hotplug(struct work_struct *work)
3093{ 3093{
3094 struct ata_port *ap = data; 3094 struct ata_port *ap =
3095 container_of(work, struct ata_port, hotplug_task.work);
3095 int i; 3096 int i;
3096 3097
3097 if (ap->pflags & ATA_PFLAG_UNLOADING) { 3098 if (ap->pflags & ATA_PFLAG_UNLOADING) {
@@ -3190,7 +3191,7 @@ static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
3190 3191
3191/** 3192/**
3192 * ata_scsi_dev_rescan - initiate scsi_rescan_device() 3193 * ata_scsi_dev_rescan - initiate scsi_rescan_device()
3193 * @data: Pointer to ATA port to perform scsi_rescan_device() 3194 * @work: Pointer to ATA port to perform scsi_rescan_device()
3194 * 3195 *
3195 * After ATA pass thru (SAT) commands are executed successfully, 3196 * After ATA pass thru (SAT) commands are executed successfully,
3196 * libata need to propagate the changes to SCSI layer. This 3197 * libata need to propagate the changes to SCSI layer. This
@@ -3200,9 +3201,10 @@ static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
3200 * LOCKING: 3201 * LOCKING:
3201 * Kernel thread context (may sleep). 3202 * Kernel thread context (may sleep).
3202 */ 3203 */
3203void ata_scsi_dev_rescan(void *data) 3204void ata_scsi_dev_rescan(struct work_struct *work)
3204{ 3205{
3205 struct ata_port *ap = data; 3206 struct ata_port *ap =
3207 container_of(work, struct ata_port, scsi_rescan_task);
3206 struct ata_device *dev; 3208 struct ata_device *dev;
3207 unsigned int i; 3209 unsigned int i;
3208 3210
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 0ed263be652a..7e0f3aff873d 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -81,7 +81,7 @@ extern struct scsi_transport_template ata_scsi_transport_template;
81 81
82extern void ata_scsi_scan_host(struct ata_port *ap); 82extern void ata_scsi_scan_host(struct ata_port *ap);
83extern int ata_scsi_offline_dev(struct ata_device *dev); 83extern int ata_scsi_offline_dev(struct ata_device *dev);
84extern void ata_scsi_hotplug(void *data); 84extern void ata_scsi_hotplug(struct work_struct *work);
85extern unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf, 85extern unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf,
86 unsigned int buflen); 86 unsigned int buflen);
87 87
@@ -111,7 +111,7 @@ extern void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
111 unsigned int (*actor) (struct ata_scsi_args *args, 111 unsigned int (*actor) (struct ata_scsi_args *args,
112 u8 *rbuf, unsigned int buflen)); 112 u8 *rbuf, unsigned int buflen));
113extern void ata_schedule_scsi_eh(struct Scsi_Host *shost); 113extern void ata_schedule_scsi_eh(struct Scsi_Host *shost);
114extern void ata_scsi_dev_rescan(void *data); 114extern void ata_scsi_dev_rescan(struct work_struct *work);
115extern int ata_bus_probe(struct ata_port *ap); 115extern int ata_bus_probe(struct ata_port *ap);
116 116
117/* libata-eh.c */ 117/* libata-eh.c */
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index aa1eb4466f9d..3f1b38276e96 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -992,11 +992,11 @@ static void empty(void)
992{ 992{
993} 993}
994 994
995static DECLARE_WORK(floppy_work, NULL, NULL); 995static DECLARE_WORK(floppy_work, NULL);
996 996
997static void schedule_bh(void (*handler) (void)) 997static void schedule_bh(void (*handler) (void))
998{ 998{
999 PREPARE_WORK(&floppy_work, (work_func_t)handler, NULL); 999 PREPARE_WORK(&floppy_work, (work_func_t)handler);
1000 schedule_work(&floppy_work); 1000 schedule_work(&floppy_work);
1001} 1001}
1002 1002
@@ -1008,7 +1008,7 @@ static void cancel_activity(void)
1008 1008
1009 spin_lock_irqsave(&floppy_lock, flags); 1009 spin_lock_irqsave(&floppy_lock, flags);
1010 do_floppy = NULL; 1010 do_floppy = NULL;
1011 PREPARE_WORK(&floppy_work, (work_func_t)empty, NULL); 1011 PREPARE_WORK(&floppy_work, (work_func_t)empty);
1012 del_timer(&fd_timer); 1012 del_timer(&fd_timer);
1013 spin_unlock_irqrestore(&floppy_lock, flags); 1013 spin_unlock_irqrestore(&floppy_lock, flags);
1014} 1014}
diff --git a/drivers/char/random.c b/drivers/char/random.c
index f2ab61f3e8ae..fa764688cad1 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1422,9 +1422,9 @@ static struct keydata {
1422 1422
1423static unsigned int ip_cnt; 1423static unsigned int ip_cnt;
1424 1424
1425static void rekey_seq_generator(void *private_); 1425static void rekey_seq_generator(struct work_struct *work);
1426 1426
1427static DECLARE_DELAYED_WORK(rekey_work, rekey_seq_generator, NULL); 1427static DECLARE_DELAYED_WORK(rekey_work, rekey_seq_generator);
1428 1428
1429/* 1429/*
1430 * Lock avoidance: 1430 * Lock avoidance:
@@ -1438,7 +1438,7 @@ static DECLARE_DELAYED_WORK(rekey_work, rekey_seq_generator, NULL);
1438 * happen, and even if that happens only a not perfectly compliant 1438 * happen, and even if that happens only a not perfectly compliant
1439 * ISN is generated, nothing fatal. 1439 * ISN is generated, nothing fatal.
1440 */ 1440 */
1441static void rekey_seq_generator(void *private_) 1441static void rekey_seq_generator(struct work_struct *work)
1442{ 1442{
1443 struct keydata *keyptr = &ip_keydata[1 ^ (ip_cnt & 1)]; 1443 struct keydata *keyptr = &ip_keydata[1 ^ (ip_cnt & 1)];
1444 1444
diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c
index 5f49280779fb..c64f5bcff947 100644
--- a/drivers/char/sysrq.c
+++ b/drivers/char/sysrq.c
@@ -219,13 +219,13 @@ static struct sysrq_key_op sysrq_term_op = {
219 .enable_mask = SYSRQ_ENABLE_SIGNAL, 219 .enable_mask = SYSRQ_ENABLE_SIGNAL,
220}; 220};
221 221
222static void moom_callback(void *ignored) 222static void moom_callback(struct work_struct *ignored)
223{ 223{
224 out_of_memory(&NODE_DATA(0)->node_zonelists[ZONE_NORMAL], 224 out_of_memory(&NODE_DATA(0)->node_zonelists[ZONE_NORMAL],
225 GFP_KERNEL, 0); 225 GFP_KERNEL, 0);
226} 226}
227 227
228static DECLARE_WORK(moom_work, moom_callback, NULL); 228static DECLARE_WORK(moom_work, moom_callback);
229 229
230static void sysrq_handle_moom(int key, struct tty_struct *tty) 230static void sysrq_handle_moom(int key, struct tty_struct *tty)
231{ 231{
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index 7297acfe520c..83e9e7d9b58c 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -1254,7 +1254,7 @@ EXPORT_SYMBOL_GPL(tty_ldisc_flush);
1254 1254
1255/** 1255/**
1256 * do_tty_hangup - actual handler for hangup events 1256 * do_tty_hangup - actual handler for hangup events
1257 * @data: tty device 1257 * @work: tty device
1258 * 1258 *
1259 * This can be called by the "eventd" kernel thread. That is process 1259 * This can be called by the "eventd" kernel thread. That is process
1260 * synchronous but doesn't hold any locks, so we need to make sure we 1260 * synchronous but doesn't hold any locks, so we need to make sure we
@@ -1274,9 +1274,10 @@ EXPORT_SYMBOL_GPL(tty_ldisc_flush);
1274 * tasklist_lock to walk task list for hangup event 1274 * tasklist_lock to walk task list for hangup event
1275 * 1275 *
1276 */ 1276 */
1277static void do_tty_hangup(void *data) 1277static void do_tty_hangup(struct work_struct *work)
1278{ 1278{
1279 struct tty_struct *tty = (struct tty_struct *) data; 1279 struct tty_struct *tty =
1280 container_of(work, struct tty_struct, hangup_work);
1280 struct file * cons_filp = NULL; 1281 struct file * cons_filp = NULL;
1281 struct file *filp, *f = NULL; 1282 struct file *filp, *f = NULL;
1282 struct task_struct *p; 1283 struct task_struct *p;
@@ -1433,7 +1434,7 @@ void tty_vhangup(struct tty_struct * tty)
1433 1434
1434 printk(KERN_DEBUG "%s vhangup...\n", tty_name(tty, buf)); 1435 printk(KERN_DEBUG "%s vhangup...\n", tty_name(tty, buf));
1435#endif 1436#endif
1436 do_tty_hangup((void *) tty); 1437 do_tty_hangup(&tty->hangup_work);
1437} 1438}
1438EXPORT_SYMBOL(tty_vhangup); 1439EXPORT_SYMBOL(tty_vhangup);
1439 1440
@@ -3304,12 +3305,13 @@ int tty_ioctl(struct inode * inode, struct file * file,
3304 * Nasty bug: do_SAK is being called in interrupt context. This can 3305 * Nasty bug: do_SAK is being called in interrupt context. This can
3305 * deadlock. We punt it up to process context. AKPM - 16Mar2001 3306 * deadlock. We punt it up to process context. AKPM - 16Mar2001
3306 */ 3307 */
3307static void __do_SAK(void *arg) 3308static void __do_SAK(struct work_struct *work)
3308{ 3309{
3310 struct tty_struct *tty =
3311 container_of(work, struct tty_struct, SAK_work);
3309#ifdef TTY_SOFT_SAK 3312#ifdef TTY_SOFT_SAK
3310 tty_hangup(tty); 3313 tty_hangup(tty);
3311#else 3314#else
3312 struct tty_struct *tty = arg;
3313 struct task_struct *g, *p; 3315 struct task_struct *g, *p;
3314 int session; 3316 int session;
3315 int i; 3317 int i;
@@ -3388,7 +3390,7 @@ void do_SAK(struct tty_struct *tty)
3388{ 3390{
3389 if (!tty) 3391 if (!tty)
3390 return; 3392 return;
3391 PREPARE_WORK(&tty->SAK_work, __do_SAK, tty); 3393 PREPARE_WORK(&tty->SAK_work, __do_SAK);
3392 schedule_work(&tty->SAK_work); 3394 schedule_work(&tty->SAK_work);
3393} 3395}
3394 3396
@@ -3396,7 +3398,7 @@ EXPORT_SYMBOL(do_SAK);
3396 3398
3397/** 3399/**
3398 * flush_to_ldisc 3400 * flush_to_ldisc
3399 * @private_: tty structure passed from work queue. 3401 * @work: tty structure passed from work queue.
3400 * 3402 *
3401 * This routine is called out of the software interrupt to flush data 3403 * This routine is called out of the software interrupt to flush data
3402 * from the buffer chain to the line discipline. 3404 * from the buffer chain to the line discipline.
@@ -3406,9 +3408,10 @@ EXPORT_SYMBOL(do_SAK);
3406 * receive_buf method is single threaded for each tty instance. 3408 * receive_buf method is single threaded for each tty instance.
3407 */ 3409 */
3408 3410
3409static void flush_to_ldisc(void *private_) 3411static void flush_to_ldisc(struct work_struct *work)
3410{ 3412{
3411 struct tty_struct *tty = (struct tty_struct *) private_; 3413 struct tty_struct *tty =
3414 container_of(work, struct tty_struct, buf.work.work);
3412 unsigned long flags; 3415 unsigned long flags;
3413 struct tty_ldisc *disc; 3416 struct tty_ldisc *disc;
3414 struct tty_buffer *tbuf, *head; 3417 struct tty_buffer *tbuf, *head;
@@ -3553,7 +3556,7 @@ void tty_flip_buffer_push(struct tty_struct *tty)
3553 spin_unlock_irqrestore(&tty->buf.lock, flags); 3556 spin_unlock_irqrestore(&tty->buf.lock, flags);
3554 3557
3555 if (tty->low_latency) 3558 if (tty->low_latency)
3556 flush_to_ldisc((void *) tty); 3559 flush_to_ldisc(&tty->buf.work.work);
3557 else 3560 else
3558 schedule_delayed_work(&tty->buf.work, 1); 3561 schedule_delayed_work(&tty->buf.work, 1);
3559} 3562}
@@ -3580,17 +3583,17 @@ static void initialize_tty_struct(struct tty_struct *tty)
3580 tty->overrun_time = jiffies; 3583 tty->overrun_time = jiffies;
3581 tty->buf.head = tty->buf.tail = NULL; 3584 tty->buf.head = tty->buf.tail = NULL;
3582 tty_buffer_init(tty); 3585 tty_buffer_init(tty);
3583 INIT_DELAYED_WORK(&tty->buf.work, flush_to_ldisc, tty); 3586 INIT_DELAYED_WORK(&tty->buf.work, flush_to_ldisc);
3584 init_MUTEX(&tty->buf.pty_sem); 3587 init_MUTEX(&tty->buf.pty_sem);
3585 mutex_init(&tty->termios_mutex); 3588 mutex_init(&tty->termios_mutex);
3586 init_waitqueue_head(&tty->write_wait); 3589 init_waitqueue_head(&tty->write_wait);
3587 init_waitqueue_head(&tty->read_wait); 3590 init_waitqueue_head(&tty->read_wait);
3588 INIT_WORK(&tty->hangup_work, do_tty_hangup, tty); 3591 INIT_WORK(&tty->hangup_work, do_tty_hangup);
3589 mutex_init(&tty->atomic_read_lock); 3592 mutex_init(&tty->atomic_read_lock);
3590 mutex_init(&tty->atomic_write_lock); 3593 mutex_init(&tty->atomic_write_lock);
3591 spin_lock_init(&tty->read_lock); 3594 spin_lock_init(&tty->read_lock);
3592 INIT_LIST_HEAD(&tty->tty_files); 3595 INIT_LIST_HEAD(&tty->tty_files);
3593 INIT_WORK(&tty->SAK_work, NULL, NULL); 3596 INIT_WORK(&tty->SAK_work, NULL);
3594} 3597}
3595 3598
3596/* 3599/*
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index 8e4413f6fbaf..8ee04adc37f0 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -155,7 +155,7 @@ static void con_flush_chars(struct tty_struct *tty);
155static void set_vesa_blanking(char __user *p); 155static void set_vesa_blanking(char __user *p);
156static void set_cursor(struct vc_data *vc); 156static void set_cursor(struct vc_data *vc);
157static void hide_cursor(struct vc_data *vc); 157static void hide_cursor(struct vc_data *vc);
158static void console_callback(void *ignored); 158static void console_callback(struct work_struct *ignored);
159static void blank_screen_t(unsigned long dummy); 159static void blank_screen_t(unsigned long dummy);
160static void set_palette(struct vc_data *vc); 160static void set_palette(struct vc_data *vc);
161 161
@@ -174,7 +174,7 @@ static int vesa_blank_mode; /* 0:none 1:suspendV 2:suspendH 3:powerdown */
174static int blankinterval = 10*60*HZ; 174static int blankinterval = 10*60*HZ;
175static int vesa_off_interval; 175static int vesa_off_interval;
176 176
177static DECLARE_WORK(console_work, console_callback, NULL); 177static DECLARE_WORK(console_work, console_callback);
178 178
179/* 179/*
180 * fg_console is the current virtual console, 180 * fg_console is the current virtual console,
@@ -2154,7 +2154,7 @@ out:
2154 * with other console code and prevention of re-entrancy is 2154 * with other console code and prevention of re-entrancy is
2155 * ensured with console_sem. 2155 * ensured with console_sem.
2156 */ 2156 */
2157static void console_callback(void *ignored) 2157static void console_callback(struct work_struct *ignored)
2158{ 2158{
2159 acquire_console_sem(); 2159 acquire_console_sem();
2160 2160
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index dd0c2623e27b..7a7c6e6dfe4f 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -42,7 +42,7 @@ static DEFINE_SPINLOCK(cpufreq_driver_lock);
42 42
43/* internal prototypes */ 43/* internal prototypes */
44static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event); 44static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event);
45static void handle_update(void *data); 45static void handle_update(struct work_struct *work);
46 46
47/** 47/**
48 * Two notifier lists: the "policy" list is involved in the 48 * Two notifier lists: the "policy" list is involved in the
@@ -665,7 +665,7 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
665 mutex_init(&policy->lock); 665 mutex_init(&policy->lock);
666 mutex_lock(&policy->lock); 666 mutex_lock(&policy->lock);
667 init_completion(&policy->kobj_unregister); 667 init_completion(&policy->kobj_unregister);
668 INIT_WORK(&policy->update, handle_update, (void *)(long)cpu); 668 INIT_WORK(&policy->update, handle_update);
669 669
670 /* call driver. From then on the cpufreq must be able 670 /* call driver. From then on the cpufreq must be able
671 * to accept all calls to ->verify and ->setpolicy for this CPU 671 * to accept all calls to ->verify and ->setpolicy for this CPU
@@ -895,9 +895,11 @@ static int cpufreq_remove_dev (struct sys_device * sys_dev)
895} 895}
896 896
897 897
898static void handle_update(void *data) 898static void handle_update(struct work_struct *work)
899{ 899{
900 unsigned int cpu = (unsigned int)(long)data; 900 struct cpufreq_policy *policy =
901 container_of(work, struct cpufreq_policy, update);
902 unsigned int cpu = policy->cpu;
901 dprintk("handle_update for cpu %u called\n", cpu); 903 dprintk("handle_update for cpu %u called\n", cpu);
902 cpufreq_update_policy(cpu); 904 cpufreq_update_policy(cpu);
903} 905}
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index cbb93669d1ce..8451b29a3db5 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -567,9 +567,9 @@ static int atkbd_set_leds(struct atkbd *atkbd)
567 * interrupt context. 567 * interrupt context.
568 */ 568 */
569 569
570static void atkbd_event_work(void *data) 570static void atkbd_event_work(struct work_struct *work)
571{ 571{
572 struct atkbd *atkbd = data; 572 struct atkbd *atkbd = container_of(work, struct atkbd, event_work);
573 573
574 mutex_lock(&atkbd->event_mutex); 574 mutex_lock(&atkbd->event_mutex);
575 575
@@ -943,7 +943,7 @@ static int atkbd_connect(struct serio *serio, struct serio_driver *drv)
943 943
944 atkbd->dev = dev; 944 atkbd->dev = dev;
945 ps2_init(&atkbd->ps2dev, serio); 945 ps2_init(&atkbd->ps2dev, serio);
946 INIT_WORK(&atkbd->event_work, atkbd_event_work, atkbd); 946 INIT_WORK(&atkbd->event_work, atkbd_event_work);
947 mutex_init(&atkbd->event_mutex); 947 mutex_init(&atkbd->event_mutex);
948 948
949 switch (serio->id.type) { 949 switch (serio->id.type) {
diff --git a/drivers/input/serio/libps2.c b/drivers/input/serio/libps2.c
index e5b1b60757bb..b3e84d3bb7f7 100644
--- a/drivers/input/serio/libps2.c
+++ b/drivers/input/serio/libps2.c
@@ -251,9 +251,9 @@ EXPORT_SYMBOL(ps2_command);
251 * ps2_schedule_command(), to a PS/2 device (keyboard, mouse, etc.) 251 * ps2_schedule_command(), to a PS/2 device (keyboard, mouse, etc.)
252 */ 252 */
253 253
254static void ps2_execute_scheduled_command(void *data) 254static void ps2_execute_scheduled_command(struct work_struct *work)
255{ 255{
256 struct ps2work *ps2work = data; 256 struct ps2work *ps2work = container_of(work, struct ps2work, work);
257 257
258 ps2_command(ps2work->ps2dev, ps2work->param, ps2work->command); 258 ps2_command(ps2work->ps2dev, ps2work->param, ps2work->command);
259 kfree(ps2work); 259 kfree(ps2work);
@@ -278,7 +278,7 @@ int ps2_schedule_command(struct ps2dev *ps2dev, unsigned char *param, int comman
278 ps2work->ps2dev = ps2dev; 278 ps2work->ps2dev = ps2dev;
279 ps2work->command = command; 279 ps2work->command = command;
280 memcpy(ps2work->param, param, send); 280 memcpy(ps2work->param, param, send);
281 INIT_WORK(&ps2work->work, ps2_execute_scheduled_command, ps2work); 281 INIT_WORK(&ps2work->work, ps2_execute_scheduled_command);
282 282
283 if (!schedule_work(&ps2work->work)) { 283 if (!schedule_work(&ps2work->work)) {
284 kfree(ps2work); 284 kfree(ps2work);
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 726ec5e88ab2..03294400bc90 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -183,7 +183,7 @@ void e1000_set_ethtool_ops(struct net_device *netdev);
183static void e1000_enter_82542_rst(struct e1000_adapter *adapter); 183static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
184static void e1000_leave_82542_rst(struct e1000_adapter *adapter); 184static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
185static void e1000_tx_timeout(struct net_device *dev); 185static void e1000_tx_timeout(struct net_device *dev);
186static void e1000_reset_task(struct net_device *dev); 186static void e1000_reset_task(struct work_struct *work);
187static void e1000_smartspeed(struct e1000_adapter *adapter); 187static void e1000_smartspeed(struct e1000_adapter *adapter);
188static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, 188static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
189 struct sk_buff *skb); 189 struct sk_buff *skb);
@@ -908,8 +908,7 @@ e1000_probe(struct pci_dev *pdev,
908 adapter->phy_info_timer.function = &e1000_update_phy_info; 908 adapter->phy_info_timer.function = &e1000_update_phy_info;
909 adapter->phy_info_timer.data = (unsigned long) adapter; 909 adapter->phy_info_timer.data = (unsigned long) adapter;
910 910
911 INIT_WORK(&adapter->reset_task, 911 INIT_WORK(&adapter->reset_task, e1000_reset_task);
912 (void (*)(void *))e1000_reset_task, netdev);
913 912
914 e1000_check_options(adapter); 913 e1000_check_options(adapter);
915 914
@@ -3154,9 +3153,10 @@ e1000_tx_timeout(struct net_device *netdev)
3154} 3153}
3155 3154
3156static void 3155static void
3157e1000_reset_task(struct net_device *netdev) 3156e1000_reset_task(struct work_struct *work)
3158{ 3157{
3159 struct e1000_adapter *adapter = netdev_priv(netdev); 3158 struct e1000_adapter *adapter =
3159 container_of(work, struct e1000_adapter, reset_task);
3160 3160
3161 e1000_reinit_locked(adapter); 3161 e1000_reinit_locked(adapter);
3162} 3162}
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index 04c43ef529ac..55866b6b26fa 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -160,7 +160,7 @@ static struct aer_rpc* aer_alloc_rpc(struct pcie_device *dev)
160 rpc->e_lock = SPIN_LOCK_UNLOCKED; 160 rpc->e_lock = SPIN_LOCK_UNLOCKED;
161 161
162 rpc->rpd = dev; 162 rpc->rpd = dev;
163 INIT_WORK(&rpc->dpc_handler, aer_isr, (void *)dev); 163 INIT_WORK(&rpc->dpc_handler, aer_isr);
164 rpc->prod_idx = rpc->cons_idx = 0; 164 rpc->prod_idx = rpc->cons_idx = 0;
165 mutex_init(&rpc->rpc_mutex); 165 mutex_init(&rpc->rpc_mutex);
166 init_waitqueue_head(&rpc->wait_release); 166 init_waitqueue_head(&rpc->wait_release);
diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h
index daf0cad88fc8..3c0a58f64dd8 100644
--- a/drivers/pci/pcie/aer/aerdrv.h
+++ b/drivers/pci/pcie/aer/aerdrv.h
@@ -118,7 +118,7 @@ extern struct bus_type pcie_port_bus_type;
118extern void aer_enable_rootport(struct aer_rpc *rpc); 118extern void aer_enable_rootport(struct aer_rpc *rpc);
119extern void aer_delete_rootport(struct aer_rpc *rpc); 119extern void aer_delete_rootport(struct aer_rpc *rpc);
120extern int aer_init(struct pcie_device *dev); 120extern int aer_init(struct pcie_device *dev);
121extern void aer_isr(void *context); 121extern void aer_isr(struct work_struct *work);
122extern void aer_print_error(struct pci_dev *dev, struct aer_err_info *info); 122extern void aer_print_error(struct pci_dev *dev, struct aer_err_info *info);
123extern int aer_osc_setup(struct pci_dev *dev); 123extern int aer_osc_setup(struct pci_dev *dev);
124 124
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index 1c7e660d6535..08e13033ced8 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -690,14 +690,14 @@ static void aer_isr_one_error(struct pcie_device *p_device,
690 690
691/** 691/**
692 * aer_isr - consume errors detected by root port 692 * aer_isr - consume errors detected by root port
693 * @context: pointer to a private data of pcie device 693 * @work: definition of this work item
694 * 694 *
695 * Invoked, as DPC, when root port records new detected error 695 * Invoked, as DPC, when root port records new detected error
696 **/ 696 **/
697void aer_isr(void *context) 697void aer_isr(struct work_struct *work)
698{ 698{
699 struct pcie_device *p_device = (struct pcie_device *) context; 699 struct aer_rpc *rpc = container_of(work, struct aer_rpc, dpc_handler);
700 struct aer_rpc *rpc = get_service_data(p_device); 700 struct pcie_device *p_device = rpc->rpd;
701 struct aer_err_source *e_src; 701 struct aer_err_source *e_src;
702 702
703 mutex_lock(&rpc->rpc_mutex); 703 mutex_lock(&rpc->rpc_mutex);
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 94a274645f6f..d3c5e964c964 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -362,9 +362,10 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
362 goto retry; 362 goto retry;
363} 363}
364 364
365static void scsi_target_reap_usercontext(void *data) 365static void scsi_target_reap_usercontext(struct work_struct *work)
366{ 366{
367 struct scsi_target *starget = data; 367 struct scsi_target *starget =
368 container_of(work, struct scsi_target, ew.work);
368 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 369 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
369 unsigned long flags; 370 unsigned long flags;
370 371
@@ -400,7 +401,7 @@ void scsi_target_reap(struct scsi_target *starget)
400 starget->state = STARGET_DEL; 401 starget->state = STARGET_DEL;
401 spin_unlock_irqrestore(shost->host_lock, flags); 402 spin_unlock_irqrestore(shost->host_lock, flags);
402 execute_in_process_context(scsi_target_reap_usercontext, 403 execute_in_process_context(scsi_target_reap_usercontext,
403 starget, &starget->ew); 404 &starget->ew);
404 return; 405 return;
405 406
406 } 407 }
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index e1a91665d1c2..259c90cfa367 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -218,16 +218,16 @@ static void scsi_device_cls_release(struct class_device *class_dev)
218 put_device(&sdev->sdev_gendev); 218 put_device(&sdev->sdev_gendev);
219} 219}
220 220
221static void scsi_device_dev_release_usercontext(void *data) 221static void scsi_device_dev_release_usercontext(struct work_struct *work)
222{ 222{
223 struct device *dev = data;
224 struct scsi_device *sdev; 223 struct scsi_device *sdev;
225 struct device *parent; 224 struct device *parent;
226 struct scsi_target *starget; 225 struct scsi_target *starget;
227 unsigned long flags; 226 unsigned long flags;
228 227
229 parent = dev->parent; 228 sdev = container_of(work, struct scsi_device, ew.work);
230 sdev = to_scsi_device(dev); 229
230 parent = sdev->sdev_gendev.parent;
231 starget = to_scsi_target(parent); 231 starget = to_scsi_target(parent);
232 232
233 spin_lock_irqsave(sdev->host->host_lock, flags); 233 spin_lock_irqsave(sdev->host->host_lock, flags);
@@ -258,7 +258,7 @@ static void scsi_device_dev_release_usercontext(void *data)
258static void scsi_device_dev_release(struct device *dev) 258static void scsi_device_dev_release(struct device *dev)
259{ 259{
260 struct scsi_device *sdp = to_scsi_device(dev); 260 struct scsi_device *sdp = to_scsi_device(dev);
261 execute_in_process_context(scsi_device_dev_release_usercontext, dev, 261 execute_in_process_context(scsi_device_dev_release_usercontext,
262 &sdp->ew); 262 &sdp->ew);
263} 263}
264 264
diff --git a/fs/aio.c b/fs/aio.c
index 11a1a7100ad6..ca1c5180a17f 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -53,13 +53,13 @@ static kmem_cache_t *kioctx_cachep;
53static struct workqueue_struct *aio_wq; 53static struct workqueue_struct *aio_wq;
54 54
55/* Used for rare fput completion. */ 55/* Used for rare fput completion. */
56static void aio_fput_routine(void *); 56static void aio_fput_routine(struct work_struct *);
57static DECLARE_WORK(fput_work, aio_fput_routine, NULL); 57static DECLARE_WORK(fput_work, aio_fput_routine);
58 58
59static DEFINE_SPINLOCK(fput_lock); 59static DEFINE_SPINLOCK(fput_lock);
60static LIST_HEAD(fput_head); 60static LIST_HEAD(fput_head);
61 61
62static void aio_kick_handler(void *); 62static void aio_kick_handler(struct work_struct *);
63static void aio_queue_work(struct kioctx *); 63static void aio_queue_work(struct kioctx *);
64 64
65/* aio_setup 65/* aio_setup
@@ -227,7 +227,7 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
227 227
228 INIT_LIST_HEAD(&ctx->active_reqs); 228 INIT_LIST_HEAD(&ctx->active_reqs);
229 INIT_LIST_HEAD(&ctx->run_list); 229 INIT_LIST_HEAD(&ctx->run_list);
230 INIT_DELAYED_WORK(&ctx->wq, aio_kick_handler, ctx); 230 INIT_DELAYED_WORK(&ctx->wq, aio_kick_handler);
231 231
232 if (aio_setup_ring(ctx) < 0) 232 if (aio_setup_ring(ctx) < 0)
233 goto out_freectx; 233 goto out_freectx;
@@ -470,7 +470,7 @@ static inline void really_put_req(struct kioctx *ctx, struct kiocb *req)
470 wake_up(&ctx->wait); 470 wake_up(&ctx->wait);
471} 471}
472 472
473static void aio_fput_routine(void *data) 473static void aio_fput_routine(struct work_struct *data)
474{ 474{
475 spin_lock_irq(&fput_lock); 475 spin_lock_irq(&fput_lock);
476 while (likely(!list_empty(&fput_head))) { 476 while (likely(!list_empty(&fput_head))) {
@@ -859,9 +859,9 @@ static inline void aio_run_all_iocbs(struct kioctx *ctx)
859 * space. 859 * space.
860 * Run on aiod's context. 860 * Run on aiod's context.
861 */ 861 */
862static void aio_kick_handler(void *data) 862static void aio_kick_handler(struct work_struct *work)
863{ 863{
864 struct kioctx *ctx = data; 864 struct kioctx *ctx = container_of(work, struct kioctx, wq.work);
865 mm_segment_t oldfs = get_fs(); 865 mm_segment_t oldfs = get_fs();
866 int requeue; 866 int requeue;
867 867
diff --git a/fs/bio.c b/fs/bio.c
index f95c8749499f..c6c07ca5b5a9 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -955,16 +955,16 @@ static void bio_release_pages(struct bio *bio)
955 * run one bio_put() against the BIO. 955 * run one bio_put() against the BIO.
956 */ 956 */
957 957
958static void bio_dirty_fn(void *data); 958static void bio_dirty_fn(struct work_struct *work);
959 959
960static DECLARE_WORK(bio_dirty_work, bio_dirty_fn, NULL); 960static DECLARE_WORK(bio_dirty_work, bio_dirty_fn);
961static DEFINE_SPINLOCK(bio_dirty_lock); 961static DEFINE_SPINLOCK(bio_dirty_lock);
962static struct bio *bio_dirty_list; 962static struct bio *bio_dirty_list;
963 963
964/* 964/*
965 * This runs in process context 965 * This runs in process context
966 */ 966 */
967static void bio_dirty_fn(void *data) 967static void bio_dirty_fn(struct work_struct *work)
968{ 968{
969 unsigned long flags; 969 unsigned long flags;
970 struct bio *bio; 970 struct bio *bio;
diff --git a/fs/file.c b/fs/file.c
index 8e81775c5dc8..3787e82f54c1 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -91,8 +91,10 @@ out:
91 spin_unlock(&fddef->lock); 91 spin_unlock(&fddef->lock);
92} 92}
93 93
94static void free_fdtable_work(struct fdtable_defer *f) 94static void free_fdtable_work(struct work_struct *work)
95{ 95{
96 struct fdtable_defer *f =
97 container_of(work, struct fdtable_defer, wq);
96 struct fdtable *fdt; 98 struct fdtable *fdt;
97 99
98 spin_lock_bh(&f->lock); 100 spin_lock_bh(&f->lock);
@@ -351,7 +353,7 @@ static void __devinit fdtable_defer_list_init(int cpu)
351{ 353{
352 struct fdtable_defer *fddef = &per_cpu(fdtable_defer_list, cpu); 354 struct fdtable_defer *fddef = &per_cpu(fdtable_defer_list, cpu);
353 spin_lock_init(&fddef->lock); 355 spin_lock_init(&fddef->lock);
354 INIT_WORK(&fddef->wq, (void (*)(void *))free_fdtable_work, fddef); 356 INIT_WORK(&fddef->wq, free_fdtable_work);
355 init_timer(&fddef->timer); 357 init_timer(&fddef->timer);
356 fddef->timer.data = (unsigned long)fddef; 358 fddef->timer.data = (unsigned long)fddef;
357 fddef->timer.function = fdtable_timer; 359 fddef->timer.function = fdtable_timer;
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 6f0487d6f44a..23ab145daa2d 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -143,7 +143,7 @@ static struct nfs_client *nfs_alloc_client(const char *hostname,
143 INIT_LIST_HEAD(&clp->cl_state_owners); 143 INIT_LIST_HEAD(&clp->cl_state_owners);
144 INIT_LIST_HEAD(&clp->cl_unused); 144 INIT_LIST_HEAD(&clp->cl_unused);
145 spin_lock_init(&clp->cl_lock); 145 spin_lock_init(&clp->cl_lock);
146 INIT_DELAYED_WORK(&clp->cl_renewd, nfs4_renew_state, clp); 146 INIT_DELAYED_WORK(&clp->cl_renewd, nfs4_renew_state);
147 rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS client"); 147 rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS client");
148 clp->cl_boot_time = CURRENT_TIME; 148 clp->cl_boot_time = CURRENT_TIME;
149 clp->cl_state = 1 << NFS4CLNT_LEASE_EXPIRED; 149 clp->cl_state = 1 << NFS4CLNT_LEASE_EXPIRED;
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
index 5ed798bc1cf7..371b804e7cc8 100644
--- a/fs/nfs/namespace.c
+++ b/fs/nfs/namespace.c
@@ -18,11 +18,10 @@
18 18
19#define NFSDBG_FACILITY NFSDBG_VFS 19#define NFSDBG_FACILITY NFSDBG_VFS
20 20
21static void nfs_expire_automounts(void *list); 21static void nfs_expire_automounts(struct work_struct *work);
22 22
23LIST_HEAD(nfs_automount_list); 23LIST_HEAD(nfs_automount_list);
24static DECLARE_DELAYED_WORK(nfs_automount_task, nfs_expire_automounts, 24static DECLARE_DELAYED_WORK(nfs_automount_task, nfs_expire_automounts);
25 &nfs_automount_list);
26int nfs_mountpoint_expiry_timeout = 500 * HZ; 25int nfs_mountpoint_expiry_timeout = 500 * HZ;
27 26
28static struct vfsmount *nfs_do_submount(const struct vfsmount *mnt_parent, 27static struct vfsmount *nfs_do_submount(const struct vfsmount *mnt_parent,
@@ -165,9 +164,9 @@ struct inode_operations nfs_referral_inode_operations = {
165 .follow_link = nfs_follow_mountpoint, 164 .follow_link = nfs_follow_mountpoint,
166}; 165};
167 166
168static void nfs_expire_automounts(void *data) 167static void nfs_expire_automounts(struct work_struct *work)
169{ 168{
170 struct list_head *list = (struct list_head *)data; 169 struct list_head *list = &nfs_automount_list;
171 170
172 mark_mounts_for_expiry(list); 171 mark_mounts_for_expiry(list);
173 if (!list_empty(list)) 172 if (!list_empty(list))
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 6f346677332d..c26cd978c7cc 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -185,7 +185,7 @@ extern const u32 nfs4_fs_locations_bitmap[2];
185extern void nfs4_schedule_state_renewal(struct nfs_client *); 185extern void nfs4_schedule_state_renewal(struct nfs_client *);
186extern void nfs4_renewd_prepare_shutdown(struct nfs_server *); 186extern void nfs4_renewd_prepare_shutdown(struct nfs_server *);
187extern void nfs4_kill_renewd(struct nfs_client *); 187extern void nfs4_kill_renewd(struct nfs_client *);
188extern void nfs4_renew_state(void *); 188extern void nfs4_renew_state(struct work_struct *);
189 189
190/* nfs4state.c */ 190/* nfs4state.c */
191struct rpc_cred *nfs4_get_renew_cred(struct nfs_client *clp); 191struct rpc_cred *nfs4_get_renew_cred(struct nfs_client *clp);
diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c
index 7b6df1852e75..823298561c0a 100644
--- a/fs/nfs/nfs4renewd.c
+++ b/fs/nfs/nfs4renewd.c
@@ -59,9 +59,10 @@
59#define NFSDBG_FACILITY NFSDBG_PROC 59#define NFSDBG_FACILITY NFSDBG_PROC
60 60
61void 61void
62nfs4_renew_state(void *data) 62nfs4_renew_state(struct work_struct *work)
63{ 63{
64 struct nfs_client *clp = (struct nfs_client *)data; 64 struct nfs_client *clp =
65 container_of(work, struct nfs_client, cl_renewd.work);
65 struct rpc_cred *cred; 66 struct rpc_cred *cred;
66 long lease, timeout; 67 long lease, timeout;
67 unsigned long last, now; 68 unsigned long last, now;
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 5f04006e8dd2..b3f32eadbef5 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -568,6 +568,7 @@ struct ata_port {
568 struct ata_host *host; 568 struct ata_host *host;
569 struct device *dev; 569 struct device *dev;
570 570
571 void *port_task_data;
571 struct delayed_work port_task; 572 struct delayed_work port_task;
572 struct delayed_work hotplug_task; 573 struct delayed_work hotplug_task;
573 struct work_struct scsi_rescan_task; 574 struct work_struct scsi_rescan_task;
@@ -747,7 +748,7 @@ extern int ata_ratelimit(void);
747extern unsigned int ata_busy_sleep(struct ata_port *ap, 748extern unsigned int ata_busy_sleep(struct ata_port *ap,
748 unsigned long timeout_pat, 749 unsigned long timeout_pat,
749 unsigned long timeout); 750 unsigned long timeout);
750extern void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), 751extern void ata_port_queue_task(struct ata_port *ap, work_func_t fn,
751 void *data, unsigned long delay); 752 void *data, unsigned long delay);
752extern u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val, 753extern u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
753 unsigned long interval_msec, 754 unsigned long interval_msec,
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index ecc017d24cf3..4a3ea83c6d16 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -11,18 +11,19 @@
11 11
12struct workqueue_struct; 12struct workqueue_struct;
13 13
14typedef void (*work_func_t)(void *data); 14struct work_struct;
15typedef void (*work_func_t)(struct work_struct *work);
15 16
16struct work_struct { 17struct work_struct {
17 /* the first word is the work queue pointer and the pending flag 18 /* the first word is the work queue pointer and the flags rolled into
18 * rolled into one */ 19 * one */
19 unsigned long management; 20 unsigned long management;
20#define WORK_STRUCT_PENDING 0 /* T if work item pending execution */ 21#define WORK_STRUCT_PENDING 0 /* T if work item pending execution */
22#define WORK_STRUCT_NOAUTOREL 1 /* F if work item automatically released on exec */
21#define WORK_STRUCT_FLAG_MASK (3UL) 23#define WORK_STRUCT_FLAG_MASK (3UL)
22#define WORK_STRUCT_WQ_DATA_MASK (~WORK_STRUCT_FLAG_MASK) 24#define WORK_STRUCT_WQ_DATA_MASK (~WORK_STRUCT_FLAG_MASK)
23 struct list_head entry; 25 struct list_head entry;
24 work_func_t func; 26 work_func_t func;
25 void *data;
26}; 27};
27 28
28struct delayed_work { 29struct delayed_work {
@@ -34,48 +35,77 @@ struct execute_work {
34 struct work_struct work; 35 struct work_struct work;
35}; 36};
36 37
37#define __WORK_INITIALIZER(n, f, d) { \ 38#define __WORK_INITIALIZER(n, f) { \
39 .management = 0, \
38 .entry = { &(n).entry, &(n).entry }, \ 40 .entry = { &(n).entry, &(n).entry }, \
39 .func = (f), \ 41 .func = (f), \
40 .data = (d), \
41 } 42 }
42 43
43#define __DELAYED_WORK_INITIALIZER(n, f, d) { \ 44#define __WORK_INITIALIZER_NAR(n, f) { \
44 .work = __WORK_INITIALIZER((n).work, (f), (d)), \ 45 .management = (1 << WORK_STRUCT_NOAUTOREL), \
46 .entry = { &(n).entry, &(n).entry }, \
47 .func = (f), \
48 }
49
50#define __DELAYED_WORK_INITIALIZER(n, f) { \
51 .work = __WORK_INITIALIZER((n).work, (f)), \
52 .timer = TIMER_INITIALIZER(NULL, 0, 0), \
53 }
54
55#define __DELAYED_WORK_INITIALIZER_NAR(n, f) { \
56 .work = __WORK_INITIALIZER_NAR((n).work, (f)), \
45 .timer = TIMER_INITIALIZER(NULL, 0, 0), \ 57 .timer = TIMER_INITIALIZER(NULL, 0, 0), \
46 } 58 }
47 59
48#define DECLARE_WORK(n, f, d) \ 60#define DECLARE_WORK(n, f) \
49 struct work_struct n = __WORK_INITIALIZER(n, f, d) 61 struct work_struct n = __WORK_INITIALIZER(n, f)
62
63#define DECLARE_WORK_NAR(n, f) \
64 struct work_struct n = __WORK_INITIALIZER_NAR(n, f)
50 65
51#define DECLARE_DELAYED_WORK(n, f, d) \ 66#define DECLARE_DELAYED_WORK(n, f) \
52 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, d) 67 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f)
68
69#define DECLARE_DELAYED_WORK_NAR(n, f) \
70 struct dwork_struct n = __DELAYED_WORK_INITIALIZER_NAR(n, f)
53 71
54/* 72/*
55 * initialize a work item's function and data pointers 73 * initialize a work item's function pointer
56 */ 74 */
57#define PREPARE_WORK(_work, _func, _data) \ 75#define PREPARE_WORK(_work, _func) \
58 do { \ 76 do { \
59 (_work)->func = (_func); \ 77 (_work)->func = (_func); \
60 (_work)->data = (_data); \
61 } while (0) 78 } while (0)
62 79
63#define PREPARE_DELAYED_WORK(_work, _func, _data) \ 80#define PREPARE_DELAYED_WORK(_work, _func) \
64 PREPARE_WORK(&(_work)->work, (_func), (_data)) 81 PREPARE_WORK(&(_work)->work, (_func))
65 82
66/* 83/*
67 * initialize all of a work item in one go 84 * initialize all of a work item in one go
68 */ 85 */
69#define INIT_WORK(_work, _func, _data) \ 86#define INIT_WORK(_work, _func) \
70 do { \ 87 do { \
71 INIT_LIST_HEAD(&(_work)->entry); \
72 (_work)->management = 0; \ 88 (_work)->management = 0; \
73 PREPARE_WORK((_work), (_func), (_data)); \ 89 INIT_LIST_HEAD(&(_work)->entry); \
90 PREPARE_WORK((_work), (_func)); \
91 } while (0)
92
93#define INIT_WORK_NAR(_work, _func) \
94 do { \
95 (_work)->management = (1 << WORK_STRUCT_NOAUTOREL); \
96 INIT_LIST_HEAD(&(_work)->entry); \
97 PREPARE_WORK((_work), (_func)); \
98 } while (0)
99
100#define INIT_DELAYED_WORK(_work, _func) \
101 do { \
102 INIT_WORK(&(_work)->work, (_func)); \
103 init_timer(&(_work)->timer); \
74 } while (0) 104 } while (0)
75 105
76#define INIT_DELAYED_WORK(_work, _func, _data) \ 106#define INIT_DELAYED_WORK_NAR(_work, _func) \
77 do { \ 107 do { \
78 INIT_WORK(&(_work)->work, (_func), (_data)); \ 108 INIT_WORK_NAR(&(_work)->work, (_func)); \
79 init_timer(&(_work)->timer); \ 109 init_timer(&(_work)->timer); \
80 } while (0) 110 } while (0)
81 111
@@ -94,6 +124,27 @@ struct execute_work {
94#define delayed_work_pending(work) \ 124#define delayed_work_pending(work) \
95 test_bit(WORK_STRUCT_PENDING, &(work)->work.management) 125 test_bit(WORK_STRUCT_PENDING, &(work)->work.management)
96 126
127/**
128 * work_release - Release a work item under execution
129 * @work: The work item to release
130 *
131 * This is used to release a work item that has been initialised with automatic
132 * release mode disabled (WORK_STRUCT_NOAUTOREL is set). This gives the work
133 * function the opportunity to grab auxiliary data from the container of the
134 * work_struct before clearing the pending bit as the work_struct may be
135 * subject to deallocation the moment the pending bit is cleared.
136 *
137 * In such a case, this should be called in the work function after it has
138 * fetched any data it may require from the containter of the work_struct.
139 * After this function has been called, the work_struct may be scheduled for
140 * further execution or it may be deallocated unless other precautions are
141 * taken.
142 *
143 * This should also be used to release a delayed work item.
144 */
145#define work_release(work) \
146 clear_bit(WORK_STRUCT_PENDING, &(work)->management)
147
97 148
98extern struct workqueue_struct *__create_workqueue(const char *name, 149extern struct workqueue_struct *__create_workqueue(const char *name,
99 int singlethread); 150 int singlethread);
@@ -112,7 +163,7 @@ extern int FASTCALL(schedule_work(struct work_struct *work));
112extern int FASTCALL(schedule_delayed_work(struct delayed_work *work, unsigned long delay)); 163extern int FASTCALL(schedule_delayed_work(struct delayed_work *work, unsigned long delay));
113 164
114extern int schedule_delayed_work_on(int cpu, struct delayed_work *work, unsigned long delay); 165extern int schedule_delayed_work_on(int cpu, struct delayed_work *work, unsigned long delay);
115extern int schedule_on_each_cpu(work_func_t func, void *info); 166extern int schedule_on_each_cpu(work_func_t func);
116extern void flush_scheduled_work(void); 167extern void flush_scheduled_work(void);
117extern int current_is_keventd(void); 168extern int current_is_keventd(void);
118extern int keventd_up(void); 169extern int keventd_up(void);
@@ -121,7 +172,7 @@ extern void init_workqueues(void);
121void cancel_rearming_delayed_work(struct delayed_work *work); 172void cancel_rearming_delayed_work(struct delayed_work *work);
122void cancel_rearming_delayed_workqueue(struct workqueue_struct *, 173void cancel_rearming_delayed_workqueue(struct workqueue_struct *,
123 struct delayed_work *); 174 struct delayed_work *);
124int execute_in_process_context(work_func_t fn, void *, struct execute_work *); 175int execute_in_process_context(work_func_t fn, struct execute_work *);
125 176
126/* 177/*
127 * Kill off a pending schedule_delayed_work(). Note that the work callback 178 * Kill off a pending schedule_delayed_work(). Note that the work callback
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
index 5f48748fe017..f7be1ac73601 100644
--- a/include/net/inet_timewait_sock.h
+++ b/include/net/inet_timewait_sock.h
@@ -84,7 +84,7 @@ struct inet_timewait_death_row {
84}; 84};
85 85
86extern void inet_twdr_hangman(unsigned long data); 86extern void inet_twdr_hangman(unsigned long data);
87extern void inet_twdr_twkill_work(void *data); 87extern void inet_twdr_twkill_work(struct work_struct *work);
88extern void inet_twdr_twcal_tick(unsigned long data); 88extern void inet_twdr_twcal_tick(unsigned long data);
89 89
90#if (BITS_PER_LONG == 64) 90#if (BITS_PER_LONG == 64)
diff --git a/ipc/util.c b/ipc/util.c
index cd8bb14a431f..a9b7a227b8d4 100644
--- a/ipc/util.c
+++ b/ipc/util.c
@@ -514,6 +514,11 @@ void ipc_rcu_getref(void *ptr)
514 container_of(ptr, struct ipc_rcu_hdr, data)->refcount++; 514 container_of(ptr, struct ipc_rcu_hdr, data)->refcount++;
515} 515}
516 516
517static void ipc_do_vfree(struct work_struct *work)
518{
519 vfree(container_of(work, struct ipc_rcu_sched, work));
520}
521
517/** 522/**
518 * ipc_schedule_free - free ipc + rcu space 523 * ipc_schedule_free - free ipc + rcu space
519 * @head: RCU callback structure for queued work 524 * @head: RCU callback structure for queued work
@@ -528,7 +533,7 @@ static void ipc_schedule_free(struct rcu_head *head)
528 struct ipc_rcu_sched *sched = 533 struct ipc_rcu_sched *sched =
529 container_of(&(grace->data[0]), struct ipc_rcu_sched, data[0]); 534 container_of(&(grace->data[0]), struct ipc_rcu_sched, data[0]);
530 535
531 INIT_WORK(&sched->work, vfree, sched); 536 INIT_WORK(&sched->work, ipc_do_vfree);
532 schedule_work(&sched->work); 537 schedule_work(&sched->work);
533} 538}
534 539
diff --git a/kernel/kmod.c b/kernel/kmod.c
index bb4e29d924e4..7dc7a9dad6ac 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -114,6 +114,7 @@ EXPORT_SYMBOL(request_module);
114#endif /* CONFIG_KMOD */ 114#endif /* CONFIG_KMOD */
115 115
116struct subprocess_info { 116struct subprocess_info {
117 struct work_struct work;
117 struct completion *complete; 118 struct completion *complete;
118 char *path; 119 char *path;
119 char **argv; 120 char **argv;
@@ -221,9 +222,10 @@ static int wait_for_helper(void *data)
221} 222}
222 223
223/* This is run by khelper thread */ 224/* This is run by khelper thread */
224static void __call_usermodehelper(void *data) 225static void __call_usermodehelper(struct work_struct *work)
225{ 226{
226 struct subprocess_info *sub_info = data; 227 struct subprocess_info *sub_info =
228 container_of(work, struct subprocess_info, work);
227 pid_t pid; 229 pid_t pid;
228 int wait = sub_info->wait; 230 int wait = sub_info->wait;
229 231
@@ -264,6 +266,8 @@ int call_usermodehelper_keys(char *path, char **argv, char **envp,
264{ 266{
265 DECLARE_COMPLETION_ONSTACK(done); 267 DECLARE_COMPLETION_ONSTACK(done);
266 struct subprocess_info sub_info = { 268 struct subprocess_info sub_info = {
269 .work = __WORK_INITIALIZER(sub_info.work,
270 __call_usermodehelper),
267 .complete = &done, 271 .complete = &done,
268 .path = path, 272 .path = path,
269 .argv = argv, 273 .argv = argv,
@@ -272,7 +276,6 @@ int call_usermodehelper_keys(char *path, char **argv, char **envp,
272 .wait = wait, 276 .wait = wait,
273 .retval = 0, 277 .retval = 0,
274 }; 278 };
275 DECLARE_WORK(work, __call_usermodehelper, &sub_info);
276 279
277 if (!khelper_wq) 280 if (!khelper_wq)
278 return -EBUSY; 281 return -EBUSY;
@@ -280,7 +283,7 @@ int call_usermodehelper_keys(char *path, char **argv, char **envp,
280 if (path[0] == '\0') 283 if (path[0] == '\0')
281 return 0; 284 return 0;
282 285
283 queue_work(khelper_wq, &work); 286 queue_work(khelper_wq, &sub_info.work);
284 wait_for_completion(&done); 287 wait_for_completion(&done);
285 return sub_info.retval; 288 return sub_info.retval;
286} 289}
@@ -291,6 +294,8 @@ int call_usermodehelper_pipe(char *path, char **argv, char **envp,
291{ 294{
292 DECLARE_COMPLETION(done); 295 DECLARE_COMPLETION(done);
293 struct subprocess_info sub_info = { 296 struct subprocess_info sub_info = {
297 .work = __WORK_INITIALIZER(sub_info.work,
298 __call_usermodehelper),
294 .complete = &done, 299 .complete = &done,
295 .path = path, 300 .path = path,
296 .argv = argv, 301 .argv = argv,
@@ -298,7 +303,6 @@ int call_usermodehelper_pipe(char *path, char **argv, char **envp,
298 .retval = 0, 303 .retval = 0,
299 }; 304 };
300 struct file *f; 305 struct file *f;
301 DECLARE_WORK(work, __call_usermodehelper, &sub_info);
302 306
303 if (!khelper_wq) 307 if (!khelper_wq)
304 return -EBUSY; 308 return -EBUSY;
@@ -318,7 +322,7 @@ int call_usermodehelper_pipe(char *path, char **argv, char **envp,
318 } 322 }
319 sub_info.stdin = f; 323 sub_info.stdin = f;
320 324
321 queue_work(khelper_wq, &work); 325 queue_work(khelper_wq, &sub_info.work);
322 wait_for_completion(&done); 326 wait_for_completion(&done);
323 return sub_info.retval; 327 return sub_info.retval;
324} 328}
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 4f9c60ef95e8..1db8c72d0d38 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -31,6 +31,8 @@ struct kthread_create_info
31 /* Result passed back to kthread_create() from keventd. */ 31 /* Result passed back to kthread_create() from keventd. */
32 struct task_struct *result; 32 struct task_struct *result;
33 struct completion done; 33 struct completion done;
34
35 struct work_struct work;
34}; 36};
35 37
36struct kthread_stop_info 38struct kthread_stop_info
@@ -111,9 +113,10 @@ static int kthread(void *_create)
111} 113}
112 114
113/* We are keventd: create a thread. */ 115/* We are keventd: create a thread. */
114static void keventd_create_kthread(void *_create) 116static void keventd_create_kthread(struct work_struct *work)
115{ 117{
116 struct kthread_create_info *create = _create; 118 struct kthread_create_info *create =
119 container_of(work, struct kthread_create_info, work);
117 int pid; 120 int pid;
118 121
119 /* We want our own signal handler (we take no signals by default). */ 122 /* We want our own signal handler (we take no signals by default). */
@@ -154,20 +157,20 @@ struct task_struct *kthread_create(int (*threadfn)(void *data),
154 ...) 157 ...)
155{ 158{
156 struct kthread_create_info create; 159 struct kthread_create_info create;
157 DECLARE_WORK(work, keventd_create_kthread, &create);
158 160
159 create.threadfn = threadfn; 161 create.threadfn = threadfn;
160 create.data = data; 162 create.data = data;
161 init_completion(&create.started); 163 init_completion(&create.started);
162 init_completion(&create.done); 164 init_completion(&create.done);
165 INIT_WORK(&create.work, keventd_create_kthread);
163 166
164 /* 167 /*
165 * The workqueue needs to start up first: 168 * The workqueue needs to start up first:
166 */ 169 */
167 if (!helper_wq) 170 if (!helper_wq)
168 work.func(work.data); 171 create.work.func(&create.work);
169 else { 172 else {
170 queue_work(helper_wq, &work); 173 queue_work(helper_wq, &create.work);
171 wait_for_completion(&create.done); 174 wait_for_completion(&create.done);
172 } 175 }
173 if (!IS_ERR(create.result)) { 176 if (!IS_ERR(create.result)) {
diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
index f1f900ac3164..678ec736076b 100644
--- a/kernel/power/poweroff.c
+++ b/kernel/power/poweroff.c
@@ -16,12 +16,12 @@
16 * callback we use. 16 * callback we use.
17 */ 17 */
18 18
19static void do_poweroff(void *dummy) 19static void do_poweroff(struct work_struct *dummy)
20{ 20{
21 kernel_power_off(); 21 kernel_power_off();
22} 22}
23 23
24static DECLARE_WORK(poweroff_work, do_poweroff, NULL); 24static DECLARE_WORK(poweroff_work, do_poweroff);
25 25
26static void handle_poweroff(int key, struct tty_struct *tty) 26static void handle_poweroff(int key, struct tty_struct *tty)
27{ 27{
diff --git a/kernel/sys.c b/kernel/sys.c
index 98489d82801b..c87b461de38d 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -880,7 +880,7 @@ asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void __user
880 return 0; 880 return 0;
881} 881}
882 882
883static void deferred_cad(void *dummy) 883static void deferred_cad(struct work_struct *dummy)
884{ 884{
885 kernel_restart(NULL); 885 kernel_restart(NULL);
886} 886}
@@ -892,7 +892,7 @@ static void deferred_cad(void *dummy)
892 */ 892 */
893void ctrl_alt_del(void) 893void ctrl_alt_del(void)
894{ 894{
895 static DECLARE_WORK(cad_work, deferred_cad, NULL); 895 static DECLARE_WORK(cad_work, deferred_cad);
896 896
897 if (C_A_D) 897 if (C_A_D)
898 schedule_work(&cad_work); 898 schedule_work(&cad_work);
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 967479756511..8d1e7cb8a51a 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -241,14 +241,14 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
241 struct work_struct *work = list_entry(cwq->worklist.next, 241 struct work_struct *work = list_entry(cwq->worklist.next,
242 struct work_struct, entry); 242 struct work_struct, entry);
243 work_func_t f = work->func; 243 work_func_t f = work->func;
244 void *data = work->data;
245 244
246 list_del_init(cwq->worklist.next); 245 list_del_init(cwq->worklist.next);
247 spin_unlock_irqrestore(&cwq->lock, flags); 246 spin_unlock_irqrestore(&cwq->lock, flags);
248 247
249 BUG_ON(get_wq_data(work) != cwq); 248 BUG_ON(get_wq_data(work) != cwq);
250 clear_bit(WORK_STRUCT_PENDING, &work->management); 249 if (!test_bit(WORK_STRUCT_NOAUTOREL, &work->management))
251 f(data); 250 work_release(work);
251 f(work);
252 252
253 spin_lock_irqsave(&cwq->lock, flags); 253 spin_lock_irqsave(&cwq->lock, flags);
254 cwq->remove_sequence++; 254 cwq->remove_sequence++;
@@ -527,7 +527,6 @@ EXPORT_SYMBOL(schedule_delayed_work_on);
527/** 527/**
528 * schedule_on_each_cpu - call a function on each online CPU from keventd 528 * schedule_on_each_cpu - call a function on each online CPU from keventd
529 * @func: the function to call 529 * @func: the function to call
530 * @info: a pointer to pass to func()
531 * 530 *
532 * Returns zero on success. 531 * Returns zero on success.
533 * Returns -ve errno on failure. 532 * Returns -ve errno on failure.
@@ -536,7 +535,7 @@ EXPORT_SYMBOL(schedule_delayed_work_on);
536 * 535 *
537 * schedule_on_each_cpu() is very slow. 536 * schedule_on_each_cpu() is very slow.
538 */ 537 */
539int schedule_on_each_cpu(work_func_t func, void *info) 538int schedule_on_each_cpu(work_func_t func)
540{ 539{
541 int cpu; 540 int cpu;
542 struct work_struct *works; 541 struct work_struct *works;
@@ -547,7 +546,7 @@ int schedule_on_each_cpu(work_func_t func, void *info)
547 546
548 mutex_lock(&workqueue_mutex); 547 mutex_lock(&workqueue_mutex);
549 for_each_online_cpu(cpu) { 548 for_each_online_cpu(cpu) {
550 INIT_WORK(per_cpu_ptr(works, cpu), func, info); 549 INIT_WORK(per_cpu_ptr(works, cpu), func);
551 __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), 550 __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu),
552 per_cpu_ptr(works, cpu)); 551 per_cpu_ptr(works, cpu));
553 } 552 }
@@ -591,7 +590,6 @@ EXPORT_SYMBOL(cancel_rearming_delayed_work);
591/** 590/**
592 * execute_in_process_context - reliably execute the routine with user context 591 * execute_in_process_context - reliably execute the routine with user context
593 * @fn: the function to execute 592 * @fn: the function to execute
594 * @data: data to pass to the function
595 * @ew: guaranteed storage for the execute work structure (must 593 * @ew: guaranteed storage for the execute work structure (must
596 * be available when the work executes) 594 * be available when the work executes)
597 * 595 *
@@ -601,15 +599,14 @@ EXPORT_SYMBOL(cancel_rearming_delayed_work);
601 * Returns: 0 - function was executed 599 * Returns: 0 - function was executed
602 * 1 - function was scheduled for execution 600 * 1 - function was scheduled for execution
603 */ 601 */
604int execute_in_process_context(work_func_t fn, void *data, 602int execute_in_process_context(work_func_t fn, struct execute_work *ew)
605 struct execute_work *ew)
606{ 603{
607 if (!in_interrupt()) { 604 if (!in_interrupt()) {
608 fn(data); 605 fn(&ew->work);
609 return 0; 606 return 0;
610 } 607 }
611 608
612 INIT_WORK(&ew->work, fn, data); 609 INIT_WORK(&ew->work, fn);
613 schedule_work(&ew->work); 610 schedule_work(&ew->work);
614 611
615 return 1; 612 return 1;
diff --git a/mm/slab.c b/mm/slab.c
index a65bc5e992c3..5de81473df34 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -313,7 +313,7 @@ static int drain_freelist(struct kmem_cache *cache,
313static void free_block(struct kmem_cache *cachep, void **objpp, int len, 313static void free_block(struct kmem_cache *cachep, void **objpp, int len,
314 int node); 314 int node);
315static int enable_cpucache(struct kmem_cache *cachep); 315static int enable_cpucache(struct kmem_cache *cachep);
316static void cache_reap(void *unused); 316static void cache_reap(struct work_struct *unused);
317 317
318/* 318/*
319 * This function must be completely optimized away if a constant is passed to 319 * This function must be completely optimized away if a constant is passed to
@@ -925,7 +925,7 @@ static void __devinit start_cpu_timer(int cpu)
925 */ 925 */
926 if (keventd_up() && reap_work->work.func == NULL) { 926 if (keventd_up() && reap_work->work.func == NULL) {
927 init_reap_node(cpu); 927 init_reap_node(cpu);
928 INIT_DELAYED_WORK(reap_work, cache_reap, NULL); 928 INIT_DELAYED_WORK(reap_work, cache_reap);
929 schedule_delayed_work_on(cpu, reap_work, HZ + 3 * cpu); 929 schedule_delayed_work_on(cpu, reap_work, HZ + 3 * cpu);
930 } 930 }
931} 931}
@@ -3815,7 +3815,7 @@ void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
3815 * If we cannot acquire the cache chain mutex then just give up - we'll try 3815 * If we cannot acquire the cache chain mutex then just give up - we'll try
3816 * again on the next iteration. 3816 * again on the next iteration.
3817 */ 3817 */
3818static void cache_reap(void *unused) 3818static void cache_reap(struct work_struct *unused)
3819{ 3819{
3820 struct kmem_cache *searchp; 3820 struct kmem_cache *searchp;
3821 struct kmem_list3 *l3; 3821 struct kmem_list3 *l3;
diff --git a/net/core/link_watch.c b/net/core/link_watch.c
index f2ed09e25dfd..549a2ce951b0 100644
--- a/net/core/link_watch.c
+++ b/net/core/link_watch.c
@@ -34,8 +34,8 @@ enum lw_bits {
34static unsigned long linkwatch_flags; 34static unsigned long linkwatch_flags;
35static unsigned long linkwatch_nextevent; 35static unsigned long linkwatch_nextevent;
36 36
37static void linkwatch_event(void *dummy); 37static void linkwatch_event(struct work_struct *dummy);
38static DECLARE_DELAYED_WORK(linkwatch_work, linkwatch_event, NULL); 38static DECLARE_DELAYED_WORK(linkwatch_work, linkwatch_event);
39 39
40static LIST_HEAD(lweventlist); 40static LIST_HEAD(lweventlist);
41static DEFINE_SPINLOCK(lweventlist_lock); 41static DEFINE_SPINLOCK(lweventlist_lock);
@@ -127,7 +127,7 @@ void linkwatch_run_queue(void)
127} 127}
128 128
129 129
130static void linkwatch_event(void *dummy) 130static void linkwatch_event(struct work_struct *dummy)
131{ 131{
132 /* Limit the number of linkwatch events to one 132 /* Limit the number of linkwatch events to one
133 * per second so that a runaway driver does not 133 * per second so that a runaway driver does not
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index cdd805344c61..8c74f9168b7d 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -197,9 +197,10 @@ EXPORT_SYMBOL_GPL(inet_twdr_hangman);
197 197
198extern void twkill_slots_invalid(void); 198extern void twkill_slots_invalid(void);
199 199
200void inet_twdr_twkill_work(void *data) 200void inet_twdr_twkill_work(struct work_struct *work)
201{ 201{
202 struct inet_timewait_death_row *twdr = data; 202 struct inet_timewait_death_row *twdr =
203 container_of(work, struct inet_timewait_death_row, twkill_work);
203 int i; 204 int i;
204 205
205 if ((INET_TWDR_TWKILL_SLOTS - 1) > (sizeof(twdr->thread_slots) * 8)) 206 if ((INET_TWDR_TWKILL_SLOTS - 1) > (sizeof(twdr->thread_slots) * 8))
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 0163d9826907..af7b2c986b1f 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -45,8 +45,7 @@ struct inet_timewait_death_row tcp_death_row = {
45 .tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0, 45 .tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0,
46 (unsigned long)&tcp_death_row), 46 (unsigned long)&tcp_death_row),
47 .twkill_work = __WORK_INITIALIZER(tcp_death_row.twkill_work, 47 .twkill_work = __WORK_INITIALIZER(tcp_death_row.twkill_work,
48 inet_twdr_twkill_work, 48 inet_twdr_twkill_work),
49 &tcp_death_row),
50/* Short-time timewait calendar */ 49/* Short-time timewait calendar */
51 50
52 .twcal_hand = -1, 51 .twcal_hand = -1,
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index d5725cb1491e..d96fd466a9a4 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -284,8 +284,8 @@ static struct file_operations cache_file_operations;
284static struct file_operations content_file_operations; 284static struct file_operations content_file_operations;
285static struct file_operations cache_flush_operations; 285static struct file_operations cache_flush_operations;
286 286
287static void do_cache_clean(void *data); 287static void do_cache_clean(struct work_struct *work);
288static DECLARE_DELAYED_WORK(cache_cleaner, do_cache_clean, NULL); 288static DECLARE_DELAYED_WORK(cache_cleaner, do_cache_clean);
289 289
290void cache_register(struct cache_detail *cd) 290void cache_register(struct cache_detail *cd)
291{ 291{
@@ -461,7 +461,7 @@ static int cache_clean(void)
461/* 461/*
462 * We want to regularly clean the cache, so we need to schedule some work ... 462 * We want to regularly clean the cache, so we need to schedule some work ...
463 */ 463 */
464static void do_cache_clean(void *data) 464static void do_cache_clean(struct work_struct *work)
465{ 465{
466 int delay = 5; 466 int delay = 5;
467 if (cache_clean() == -1) 467 if (cache_clean() == -1)
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 97be3f7fed44..49dba5febbbd 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -54,10 +54,11 @@ static void rpc_purge_list(struct rpc_inode *rpci, struct list_head *head,
54} 54}
55 55
56static void 56static void
57rpc_timeout_upcall_queue(void *data) 57rpc_timeout_upcall_queue(struct work_struct *work)
58{ 58{
59 LIST_HEAD(free_list); 59 LIST_HEAD(free_list);
60 struct rpc_inode *rpci = (struct rpc_inode *)data; 60 struct rpc_inode *rpci =
61 container_of(work, struct rpc_inode, queue_timeout.work);
61 struct inode *inode = &rpci->vfs_inode; 62 struct inode *inode = &rpci->vfs_inode;
62 void (*destroy_msg)(struct rpc_pipe_msg *); 63 void (*destroy_msg)(struct rpc_pipe_msg *);
63 64
@@ -838,7 +839,7 @@ init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
838 rpci->pipelen = 0; 839 rpci->pipelen = 0;
839 init_waitqueue_head(&rpci->waitq); 840 init_waitqueue_head(&rpci->waitq);
840 INIT_DELAYED_WORK(&rpci->queue_timeout, 841 INIT_DELAYED_WORK(&rpci->queue_timeout,
841 rpc_timeout_upcall_queue, rpci); 842 rpc_timeout_upcall_queue);
842 rpci->ops = NULL; 843 rpci->ops = NULL;
843 } 844 }
844} 845}
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index a1ab4eed41f4..eff44bcdc95a 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -41,7 +41,7 @@ static mempool_t *rpc_buffer_mempool __read_mostly;
41 41
42static void __rpc_default_timer(struct rpc_task *task); 42static void __rpc_default_timer(struct rpc_task *task);
43static void rpciod_killall(void); 43static void rpciod_killall(void);
44static void rpc_async_schedule(void *); 44static void rpc_async_schedule(struct work_struct *);
45 45
46/* 46/*
47 * RPC tasks sit here while waiting for conditions to improve. 47 * RPC tasks sit here while waiting for conditions to improve.
@@ -305,7 +305,7 @@ static void rpc_make_runnable(struct rpc_task *task)
305 if (RPC_IS_ASYNC(task)) { 305 if (RPC_IS_ASYNC(task)) {
306 int status; 306 int status;
307 307
308 INIT_WORK(&task->u.tk_work, rpc_async_schedule, (void *)task); 308 INIT_WORK(&task->u.tk_work, rpc_async_schedule);
309 status = queue_work(task->tk_workqueue, &task->u.tk_work); 309 status = queue_work(task->tk_workqueue, &task->u.tk_work);
310 if (status < 0) { 310 if (status < 0) {
311 printk(KERN_WARNING "RPC: failed to add task to queue: error: %d!\n", status); 311 printk(KERN_WARNING "RPC: failed to add task to queue: error: %d!\n", status);
@@ -695,9 +695,9 @@ rpc_execute(struct rpc_task *task)
695 return __rpc_execute(task); 695 return __rpc_execute(task);
696} 696}
697 697
698static void rpc_async_schedule(void *arg) 698static void rpc_async_schedule(struct work_struct *work)
699{ 699{
700 __rpc_execute((struct rpc_task *)arg); 700 __rpc_execute(container_of(work, struct rpc_task, u.tk_work));
701} 701}
702 702
703/** 703/**
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 80857470dc11..4f9a5d9791fb 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -479,9 +479,10 @@ int xprt_adjust_timeout(struct rpc_rqst *req)
479 return status; 479 return status;
480} 480}
481 481
482static void xprt_autoclose(void *args) 482static void xprt_autoclose(struct work_struct *work)
483{ 483{
484 struct rpc_xprt *xprt = (struct rpc_xprt *)args; 484 struct rpc_xprt *xprt =
485 container_of(work, struct rpc_xprt, task_cleanup);
485 486
486 xprt_disconnect(xprt); 487 xprt_disconnect(xprt);
487 xprt->ops->close(xprt); 488 xprt->ops->close(xprt);
@@ -932,7 +933,7 @@ struct rpc_xprt *xprt_create_transport(int proto, struct sockaddr *ap, size_t si
932 933
933 INIT_LIST_HEAD(&xprt->free); 934 INIT_LIST_HEAD(&xprt->free);
934 INIT_LIST_HEAD(&xprt->recv); 935 INIT_LIST_HEAD(&xprt->recv);
935 INIT_WORK(&xprt->task_cleanup, xprt_autoclose, xprt); 936 INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
936 init_timer(&xprt->timer); 937 init_timer(&xprt->timer);
937 xprt->timer.function = xprt_init_autodisconnect; 938 xprt->timer.function = xprt_init_autodisconnect;
938 xprt->timer.data = (unsigned long) xprt; 939 xprt->timer.data = (unsigned long) xprt;
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 3c7532cd009e..cfe3c15be948 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -1060,13 +1060,14 @@ static int xs_bindresvport(struct rpc_xprt *xprt, struct socket *sock)
1060 1060
1061/** 1061/**
1062 * xs_udp_connect_worker - set up a UDP socket 1062 * xs_udp_connect_worker - set up a UDP socket
1063 * @args: RPC transport to connect 1063 * @work: RPC transport to connect
1064 * 1064 *
1065 * Invoked by a work queue tasklet. 1065 * Invoked by a work queue tasklet.
1066 */ 1066 */
1067static void xs_udp_connect_worker(void *args) 1067static void xs_udp_connect_worker(struct work_struct *work)
1068{ 1068{
1069 struct rpc_xprt *xprt = (struct rpc_xprt *) args; 1069 struct rpc_xprt *xprt =
1070 container_of(work, struct rpc_xprt, connect_worker.work);
1070 struct socket *sock = xprt->sock; 1071 struct socket *sock = xprt->sock;
1071 int err, status = -EIO; 1072 int err, status = -EIO;
1072 1073
@@ -1144,13 +1145,14 @@ static void xs_tcp_reuse_connection(struct rpc_xprt *xprt)
1144 1145
1145/** 1146/**
1146 * xs_tcp_connect_worker - connect a TCP socket to a remote endpoint 1147 * xs_tcp_connect_worker - connect a TCP socket to a remote endpoint
1147 * @args: RPC transport to connect 1148 * @work: RPC transport to connect
1148 * 1149 *
1149 * Invoked by a work queue tasklet. 1150 * Invoked by a work queue tasklet.
1150 */ 1151 */
1151static void xs_tcp_connect_worker(void *args) 1152static void xs_tcp_connect_worker(struct work_struct *work)
1152{ 1153{
1153 struct rpc_xprt *xprt = (struct rpc_xprt *)args; 1154 struct rpc_xprt *xprt =
1155 container_of(work, struct rpc_xprt, connect_worker.work);
1154 struct socket *sock = xprt->sock; 1156 struct socket *sock = xprt->sock;
1155 int err, status = -EIO; 1157 int err, status = -EIO;
1156 1158
@@ -1375,7 +1377,7 @@ int xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to)
1375 /* XXX: header size can vary due to auth type, IPv6, etc. */ 1377 /* XXX: header size can vary due to auth type, IPv6, etc. */
1376 xprt->max_payload = (1U << 16) - (MAX_HEADER << 3); 1378 xprt->max_payload = (1U << 16) - (MAX_HEADER << 3);
1377 1379
1378 INIT_DELAYED_WORK(&xprt->connect_worker, xs_udp_connect_worker, xprt); 1380 INIT_DELAYED_WORK(&xprt->connect_worker, xs_udp_connect_worker);
1379 xprt->bind_timeout = XS_BIND_TO; 1381 xprt->bind_timeout = XS_BIND_TO;
1380 xprt->connect_timeout = XS_UDP_CONN_TO; 1382 xprt->connect_timeout = XS_UDP_CONN_TO;
1381 xprt->reestablish_timeout = XS_UDP_REEST_TO; 1383 xprt->reestablish_timeout = XS_UDP_REEST_TO;
@@ -1420,7 +1422,7 @@ int xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to)
1420 xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32); 1422 xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
1421 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; 1423 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
1422 1424
1423 INIT_DELAYED_WORK(&xprt->connect_worker, xs_tcp_connect_worker, xprt); 1425 INIT_DELAYED_WORK(&xprt->connect_worker, xs_tcp_connect_worker);
1424 xprt->bind_timeout = XS_BIND_TO; 1426 xprt->bind_timeout = XS_BIND_TO;
1425 xprt->connect_timeout = XS_TCP_CONN_TO; 1427 xprt->connect_timeout = XS_TCP_CONN_TO;
1426 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; 1428 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
diff --git a/security/keys/key.c b/security/keys/key.c
index 80de8c3e9cc3..70eacbe5abde 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -30,8 +30,8 @@ DEFINE_SPINLOCK(key_user_lock);
30static LIST_HEAD(key_types_list); 30static LIST_HEAD(key_types_list);
31static DECLARE_RWSEM(key_types_sem); 31static DECLARE_RWSEM(key_types_sem);
32 32
33static void key_cleanup(void *data); 33static void key_cleanup(struct work_struct *work);
34static DECLARE_WORK(key_cleanup_task, key_cleanup, NULL); 34static DECLARE_WORK(key_cleanup_task, key_cleanup);
35 35
36/* we serialise key instantiation and link */ 36/* we serialise key instantiation and link */
37DECLARE_RWSEM(key_construction_sem); 37DECLARE_RWSEM(key_construction_sem);
@@ -552,7 +552,7 @@ EXPORT_SYMBOL(key_negate_and_link);
552 * do cleaning up in process context so that we don't have to disable 552 * do cleaning up in process context so that we don't have to disable
553 * interrupts all over the place 553 * interrupts all over the place
554 */ 554 */
555static void key_cleanup(void *data) 555static void key_cleanup(struct work_struct *work)
556{ 556{
557 struct rb_node *_n; 557 struct rb_node *_n;
558 struct key *key; 558 struct key *key;